|
@@ -3,13 +3,16 @@
|
|
|
|
|
|
#include "SemSegNovelty.h"
|
|
|
|
|
|
-#include "core/image/FilterT.h"
|
|
|
-#include "gp-hik-exp/GPHIKClassifierNICE.h"
|
|
|
-#include "vislearning/baselib/ICETools.h"
|
|
|
-#include "vislearning/baselib/Globals.h"
|
|
|
-#include "vislearning/features/fpfeatures/SparseVectorFeature.h"
|
|
|
-#include "core/basics/StringTools.h"
|
|
|
-#include "core/basics/Timer.h"
|
|
|
+#include <core/image/FilterT.h>
|
|
|
+#include <core/basics/numerictools.h>
|
|
|
+#include <core/basics/StringTools.h>
|
|
|
+#include <core/basics/Timer.h>
|
|
|
+
|
|
|
+#include <gp-hik-exp/GPHIKClassifierNICE.h>
|
|
|
+#include <vislearning/baselib/ICETools.h>
|
|
|
+#include <vislearning/baselib/Globals.h>
|
|
|
+#include <vislearning/features/fpfeatures/SparseVectorFeature.h>
|
|
|
+
|
|
|
#include "segmentation/GenericRegionSegmentationMethodSelection.h"
|
|
|
|
|
|
using namespace std;
|
|
@@ -28,17 +31,39 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
|
|
|
|
|
|
featExtract = new LFColorWeijer ( conf );
|
|
|
|
|
|
- save_cache = conf->gB ( "FPCPixel", "save_cache", true );
|
|
|
- read_cache = conf->gB ( "FPCPixel", "read_cache", false );
|
|
|
- uncertdir = conf->gS("debug", "uncertainty", "uncertainty");
|
|
|
+ this->reuseSegmentation = conf->gB ( "FPCPixel", "reuseSegmentation", true ); //save and read segmentation results from files
|
|
|
+ this->save_classifier = conf->gB ( "FPCPixel", "save_classifier", true ); //save the classifier to a file
|
|
|
+ this->read_classifier = conf->gB ( "FPCPixel", "read_classifier", false ); //read the classifier from a file
|
|
|
+
|
|
|
+ //write uncertainty results in the same folder as done for the segmentation results
|
|
|
+ resultdir = conf->gS("debug", "resultdir", "result");
|
|
|
cache = conf->gS ( "cache", "root", "" );
|
|
|
|
|
|
- classifier = new GPHIKClassifierNICE ( conf, "ClassiferGPHIK" );;
|
|
|
+
|
|
|
+ //stupid work around of the const attribute
|
|
|
+ Config confCopy = *conf;
|
|
|
+
|
|
|
+ //just to make sure, that we do NOT perform an optimization after every iteration step
|
|
|
+ //this would just take a lot of time, which is not desired so far
|
|
|
+ confCopy.sB("ClassifierGPHIK","performOptimizationAfterIncrement",false);
|
|
|
+
|
|
|
+ classifierString = conf->gS ( section, "classifier", "ClassifierGPHIK" );
|
|
|
+ classifier = NULL;
|
|
|
+ vclassifier = NULL;
|
|
|
+ if ( classifierString.compare("ClassifierGPHIK") == 0)
|
|
|
+ classifier = new GPHIKClassifierNICE ( &confCopy, "ClassifierGPHIK" );
|
|
|
+ else
|
|
|
+ vclassifier = GenericClassifierSelection::selectVecClassifier ( conf, classifierString );
|
|
|
+
|
|
|
+
|
|
|
|
|
|
findMaximumUncert = conf->gB(section, "findMaximumUncert", true);
|
|
|
whs = conf->gI ( section, "window_size", 10 );
|
|
|
- featdist = conf->gI ( section, "grid", 10 );
|
|
|
+ //distance to next descriptor during training
|
|
|
+ trainWsize = conf->gI ( section, "train_window_size", 10 );
|
|
|
+ //distance to next descriptor during testing
|
|
|
testWSize = conf->gI (section, "test_window_size", 10);
|
|
|
+ // select your segmentation method here
|
|
|
string rsMethode = conf->gS ( section, "segmentation", "none" );
|
|
|
|
|
|
if(rsMethode == "none")
|
|
@@ -48,7 +73,7 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
|
|
|
else
|
|
|
{
|
|
|
RegionSegmentationMethod *tmpRegionSeg = GenericRegionSegmentationMethodSelection::selectRegionSegmentationMethod(conf, rsMethode);
|
|
|
- if ( save_cache )
|
|
|
+ if ( reuseSegmentation )
|
|
|
regionSeg = new RSCache ( conf, tmpRegionSeg );
|
|
|
else
|
|
|
regionSeg = tmpRegionSeg;
|
|
@@ -56,17 +81,23 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
|
|
|
|
|
|
cn = md->getClassNames ( "train" );
|
|
|
|
|
|
- if ( read_cache )
|
|
|
+ if ( read_classifier )
|
|
|
{
|
|
|
- string classifierdst = "/classifier.data";
|
|
|
- fprintf ( stderr, "SemSegNovelty:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );
|
|
|
-
|
|
|
try
|
|
|
{
|
|
|
if ( classifier != NULL )
|
|
|
{
|
|
|
+ string classifierdst = "/classifier.data";
|
|
|
+ fprintf ( stderr, "SemSegNovelty:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );
|
|
|
classifier->read ( cache + classifierdst );
|
|
|
}
|
|
|
+ else
|
|
|
+ {
|
|
|
+ string classifierdst = "/veccl.data";
|
|
|
+ fprintf ( stderr, "SemSegNovelty:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );
|
|
|
+ vclassifier->read ( cache + classifierdst );
|
|
|
+ }
|
|
|
+
|
|
|
|
|
|
fprintf ( stderr, "SemSegNovelty:: successfully read\n" );
|
|
|
}
|
|
@@ -79,28 +110,90 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
|
|
|
{
|
|
|
train ( md );
|
|
|
}
|
|
|
+
|
|
|
+ //define which measure for "novelty" we want to use
|
|
|
+ noveltyMethodString = conf->gS( section, "noveltyMethod", "gp-variance");
|
|
|
+ if (noveltyMethodString.compare("gp-variance") == 0) // novel = large variance
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPVARIANCE;
|
|
|
+ this->mostNoveltyWithMaxScores = true;
|
|
|
+ }
|
|
|
+ else if (noveltyMethodString.compare("gp-uncertainty") == 0) //novel = large uncertainty (mean / var)
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPUNCERTAINTY;
|
|
|
+ this->mostNoveltyWithMaxScores = false;
|
|
|
+ globalMaxUncert = numeric_limits<double>::max();
|
|
|
+ }
|
|
|
+ else if (noveltyMethodString.compare("gp-mean") == 0) //novel = small mean
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPMINMEAN;
|
|
|
+ this->mostNoveltyWithMaxScores = false;
|
|
|
+ globalMaxUncert = numeric_limits<double>::max();
|
|
|
+ }
|
|
|
+ else if (noveltyMethodString.compare("gp-meanRatio") == 0) //novel = small difference between mean of most plausible class and mean of snd
|
|
|
+ // most plausible class (not useful in binary settings)
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPMEANRATIO;
|
|
|
+ this->mostNoveltyWithMaxScores = false;
|
|
|
+ globalMaxUncert = numeric_limits<double>::max();
|
|
|
+ }
|
|
|
+ else if (noveltyMethodString.compare("gp-weightAll") == 0) // novel = large weight in alpha vector after updating the model (can be predicted exactly)
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPWEIGHTALL;
|
|
|
+ this->mostNoveltyWithMaxScores = true;
|
|
|
+ }
|
|
|
+ else if (noveltyMethodString.compare("gp-weightRatio") == 0) // novel = small difference between weights for alpha vectors
|
|
|
+ // with assumptions of GT label to be the most
|
|
|
+ // plausible against the second most plausible class
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPWEIGHTRATIO;
|
|
|
+ this->mostNoveltyWithMaxScores = false;
|
|
|
+ globalMaxUncert = numeric_limits<double>::max();
|
|
|
+ }
|
|
|
+ else if (noveltyMethodString.compare("random") == 0)
|
|
|
+ {
|
|
|
+ initRand();
|
|
|
+ this->noveltyMethod = RANDOM;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ this->noveltyMethod = GPVARIANCE;
|
|
|
+ this->mostNoveltyWithMaxScores = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ //we don't have queried any region so far
|
|
|
+ queriedRegions.clear();
|
|
|
+ visualizeALimages = conf->gB(section, "visualizeALimages", false);
|
|
|
}
|
|
|
|
|
|
SemSegNovelty::~SemSegNovelty()
|
|
|
{
|
|
|
if(newTrainExamples.size() > 0)
|
|
|
{
|
|
|
- // most uncertain region
|
|
|
- showImage(maskedImg);
|
|
|
- //classifier->add(newTrainExamples)
|
|
|
+ // show most uncertain region
|
|
|
+ if (visualizeALimages)
|
|
|
+ showImage(maskedImg);
|
|
|
+
|
|
|
+ //incorporate new information into the classifier
|
|
|
+ if (classifier != NULL)
|
|
|
+ classifier->addMultipleExamples(newTrainExamples);
|
|
|
+
|
|
|
+ //store the classifier, such that we can read it again in the next round (if we like that)
|
|
|
classifier->save ( cache + "/classifier.data" );
|
|
|
}
|
|
|
|
|
|
// clean-up
|
|
|
if ( classifier != NULL )
|
|
|
delete classifier;
|
|
|
+ if ( vclassifier != NULL )
|
|
|
+ delete vclassifier;
|
|
|
if ( featExtract != NULL )
|
|
|
delete featExtract;
|
|
|
}
|
|
|
|
|
|
void SemSegNovelty::visualizeRegion(const NICE::ColorImage &img, const NICE::Matrix ®ions, int region, NICE::ColorImage &outimage)
|
|
|
{
|
|
|
- vector<uchar> color;
|
|
|
+ std::vector<uchar> color;
|
|
|
color.push_back(255);
|
|
|
color.push_back(0);
|
|
|
color.push_back(0);
|
|
@@ -140,14 +233,7 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
|
////////////////////////
|
|
|
// feature extraction //
|
|
|
////////////////////////
|
|
|
-
|
|
|
- std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
|
|
|
- if ( forbidden_classes_s == "" )
|
|
|
- {
|
|
|
- forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
|
|
|
- }
|
|
|
- cn.getSelection ( forbidden_classes_s, forbidden_classes );
|
|
|
-
|
|
|
+
|
|
|
//check the same thing for the training classes - this is very specific to our setup
|
|
|
std::string forbidden_classesTrain_s = conf->gS ( "analysis", "donttrainTrain", "" );
|
|
|
if ( forbidden_classesTrain_s == "" )
|
|
@@ -155,7 +241,7 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
|
forbidden_classesTrain_s = conf->gS ( "analysis", "forbidden_classesTrain", "" );
|
|
|
}
|
|
|
cn.getSelection ( forbidden_classesTrain_s, forbidden_classesTrain );
|
|
|
-
|
|
|
+
|
|
|
|
|
|
ProgressBar pb ( "Local Feature Extraction" );
|
|
|
pb.show();
|
|
@@ -225,9 +311,9 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
|
feats.calcIntegral ( c );
|
|
|
}
|
|
|
|
|
|
- for ( int y = 0; y < ysize; y += featdist )
|
|
|
+ for ( int y = 0; y < ysize; y += trainWsize)
|
|
|
{
|
|
|
- for ( int x = 0; x < xsize; x += featdist )
|
|
|
+ for ( int x = 0; x < xsize; x += trainWsize )
|
|
|
{
|
|
|
|
|
|
int classnoTmp = labels.getPixel ( x, y );
|
|
@@ -256,8 +342,12 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
|
|
|
|
example.position = imgnb;
|
|
|
examples.push_back ( pair<int, Example> ( classnoTmp, example ) );
|
|
|
+
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
|
|
|
delete ce;
|
|
|
imgnb++;
|
|
@@ -288,19 +378,39 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
|
delete f;
|
|
|
|
|
|
if ( classifier != NULL )
|
|
|
+ {
|
|
|
+ std::cerr << "train FP-classifier with " << examples.size() << " examples" << std::endl;
|
|
|
classifier->train ( fp, examples );
|
|
|
+ std::cerr << "training finished" << std::endl;
|
|
|
+ }
|
|
|
else
|
|
|
{
|
|
|
- cerr << "no classifier selected?!" << endl;
|
|
|
- exit ( -1 );
|
|
|
- }
|
|
|
+ LabeledSetVector lvec;
|
|
|
+ convertExamplesToLSet ( examples, lvec );
|
|
|
+ vclassifier->teach ( lvec );
|
|
|
+// if ( usegmm )
|
|
|
+// convertLSetToSparseExamples ( examples, lvec );
|
|
|
+// else
|
|
|
+ std::cerr << "classifierString: " << classifierString << std::endl;
|
|
|
+ if (this->classifierString.compare("nn") == 0)
|
|
|
+ {
|
|
|
+ convertLSetToExamples ( examples, lvec, true /* only remove pointers to the data in the LSet-struct*/);
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ convertLSetToExamples ( examples, lvec, false /* remove all training examples of the LSet-struct */);
|
|
|
+ }
|
|
|
+ vclassifier->finishTeaching();
|
|
|
+ }
|
|
|
|
|
|
fp.destroy();
|
|
|
|
|
|
- if ( save_cache )
|
|
|
+ if ( save_classifier )
|
|
|
{
|
|
|
if ( classifier != NULL )
|
|
|
classifier->save ( cache + "/classifier.data" );
|
|
|
+ else
|
|
|
+ vclassifier->save ( cache + "/veccl.data" );
|
|
|
}
|
|
|
|
|
|
////////////
|
|
@@ -315,12 +425,16 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
|
cerr << "SemSeg training finished" << endl;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
|
|
|
-{
|
|
|
+{
|
|
|
Timer timer;
|
|
|
timer.start();
|
|
|
|
|
|
+ //segResult contains the GT labels when this method is called
|
|
|
+ // we simply store them in labels, to have an easy access to the GT information lateron
|
|
|
Image labels = segresult;
|
|
|
+ //just to be sure that we do not have a GT-biased result :)
|
|
|
segresult.set(0);
|
|
|
|
|
|
int featdim = -1;
|
|
@@ -363,237 +477,83 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
{
|
|
|
feats.calcIntegral ( c );
|
|
|
}
|
|
|
-
|
|
|
- FloatImage uncert ( xsize, ysize );
|
|
|
- uncert.set ( 0.0 );
|
|
|
-
|
|
|
- FloatImage gpUncertainty ( xsize, ysize );
|
|
|
- FloatImage gpMean ( xsize, ysize );
|
|
|
- FloatImage gpMeanRatio ( xsize, ysize );
|
|
|
- FloatImage gpWeightAll ( xsize, ysize );
|
|
|
- FloatImage gpWeightRatio ( xsize, ysize );
|
|
|
-
|
|
|
- gpUncertainty.set ( 0.0 );
|
|
|
- gpMean.set ( 0.0 );
|
|
|
- gpMeanRatio.set ( 0.0 );
|
|
|
- gpWeightAll.set ( 0.0 );
|
|
|
- gpWeightRatio.set ( 0.0 );
|
|
|
-
|
|
|
- double maxunc = -numeric_limits<double>::max();
|
|
|
|
|
|
- double maxGPUncertainty = -numeric_limits<double>::max();
|
|
|
- double maxGPMean = -numeric_limits<double>::max();
|
|
|
- double maxGPMeanRatio = -numeric_limits<double>::max();
|
|
|
- double maxGPWeightAll = -numeric_limits<double>::max();
|
|
|
- double maxGPWeightRatio = -numeric_limits<double>::max();
|
|
|
-
|
|
|
timer.stop();
|
|
|
- cout << "first: " << timer.getLastAbsolute() << endl;
|
|
|
+ std::cout << "AL time for preparation: " << timer.getLastAbsolute() << std::endl;
|
|
|
+
|
|
|
+ timer.start();
|
|
|
+ //classification results currently only needed to be computed separately if we use the vclassifier, i.e., the nearest neighbor used
|
|
|
+ // for the "novel feature learning" approach
|
|
|
+ //in all other settings, such as active sem seg in general, we do this within the novelty-computation-methods
|
|
|
+ if ( classifier == NULL )
|
|
|
+ {
|
|
|
+ this->computeClassificationResults( feats, segresult, probabilities, xsize, ysize, featdim);
|
|
|
+ }
|
|
|
+// timer.stop();
|
|
|
+//
|
|
|
+// std::cerr << "classification results computed" << std::endl;
|
|
|
|
|
|
- //we need this lateron for active learning stuff
|
|
|
- double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
+ FloatImage noveltyImage ( xsize, ysize );
|
|
|
+ noveltyImage.set ( 0.0 );
|
|
|
|
|
|
- timer.start();
|
|
|
-#pragma omp parallel for
|
|
|
- for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ switch (noveltyMethod)
|
|
|
{
|
|
|
- Example example;
|
|
|
- example.vec = NULL;
|
|
|
- example.svec = new SparseVector ( featdim );
|
|
|
- for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ case GPVARIANCE:
|
|
|
{
|
|
|
- for ( int f = 0; f < featdim; f++ )
|
|
|
- {
|
|
|
- double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
- if ( val > 1e-10 )
|
|
|
- ( *example.svec ) [f] = val;
|
|
|
- }
|
|
|
- example.svec->normalize();
|
|
|
-
|
|
|
- ClassificationResult cr = classifier->classify ( example );
|
|
|
-
|
|
|
- //we need this if we want to compute GP-AL-measure lateron
|
|
|
- double minMeanAbs ( numeric_limits<double>::max() );
|
|
|
- double maxMeanAbs ( 0.0 );
|
|
|
- double sndMaxMeanAbs ( 0.0 );
|
|
|
- double maxMean ( -numeric_limits<double>::max() );
|
|
|
- double sndMaxMean ( -numeric_limits<double>::max() );
|
|
|
-
|
|
|
- for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
- {
|
|
|
- if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
- {
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- //check whether we found a class with higher smaller abs mean than the current minimum
|
|
|
- if (abs(cr.scores[j]) < minMeanAbs)
|
|
|
- minMeanAbs = abs(cr.scores[j]);
|
|
|
- //check for larger abs mean as well
|
|
|
- if (abs(cr.scores[j]) > maxMeanAbs)
|
|
|
- {
|
|
|
- sndMaxMeanAbs = maxMeanAbs;
|
|
|
- maxMeanAbs = abs(cr.scores[j]);
|
|
|
- }
|
|
|
- // and also for the second highest mean of all classes
|
|
|
- else if (abs(cr.scores[j]) > sndMaxMeanAbs)
|
|
|
- {
|
|
|
- sndMaxMeanAbs = abs(cr.scores[j]);
|
|
|
- }
|
|
|
- //check for larger mean without abs as well
|
|
|
- if (cr.scores[j] > maxMean)
|
|
|
- {
|
|
|
- sndMaxMean = maxMean;
|
|
|
- maxMean = cr.scores[j];
|
|
|
- }
|
|
|
- // and also for the second highest mean of all classes
|
|
|
- else if (cr.scores[j] > sndMaxMean)
|
|
|
- {
|
|
|
- sndMaxMean = cr.scores[j];
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
|
|
|
-
|
|
|
- //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
|
|
|
- // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
|
|
|
- double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
|
|
|
-
|
|
|
- // compute results when we take the lowest mean value of all classes
|
|
|
- double gpMeanVal = minMeanAbs;
|
|
|
-
|
|
|
- //look at the difference in the absolut mean values for the most plausible class
|
|
|
- // and the second most plausible class
|
|
|
- double gpMeanRatioVal= maxMean - sndMaxMean;
|
|
|
-
|
|
|
- double gpWeightAllVal ( 0.0 );
|
|
|
- double gpWeightRatioVal ( 0.0 );
|
|
|
-
|
|
|
- if ( numberOfClasses > 2)
|
|
|
- {
|
|
|
- //compute the weight in the alpha-vector for every sample after assuming it to be
|
|
|
- // added to the training set.
|
|
|
- // Thereby, we measure its "importance" for the current model
|
|
|
- //
|
|
|
- //double firstTerm is already computed
|
|
|
- //
|
|
|
- //the second term is only needed when computing impacts
|
|
|
- //double secondTerm; //this is the nasty guy :/
|
|
|
-
|
|
|
- //--- compute the third term
|
|
|
- // this is the difference between predicted label and GT label
|
|
|
- std::vector<double> diffToPositive; diffToPositive.clear();
|
|
|
- std::vector<double> diffToNegative; diffToNegative.clear();
|
|
|
- double diffToNegativeSum(0.0);
|
|
|
-
|
|
|
- for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
- {
|
|
|
- if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
- {
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- // look at the difference to plus 1
|
|
|
- diffToPositive.push_back(abs(cr.scores[j] - 1));
|
|
|
- // look at the difference to -1
|
|
|
- diffToNegative.push_back(abs(cr.scores[j] + 1));
|
|
|
- //sum up the difference to -1
|
|
|
- diffToNegativeSum += abs(cr.scores[j] - 1);
|
|
|
- }
|
|
|
-
|
|
|
- //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
|
|
|
- //and use this as the third term for this specific class.
|
|
|
- //the final value is obtained by minimizing over all classes
|
|
|
- //
|
|
|
- // originally, we minimize over all classes after building the final score
|
|
|
- // however, the first and the second term do not depend on the choice of
|
|
|
- // y*, therefore we minimize here already
|
|
|
- double thirdTerm (numeric_limits<double>::max()) ;
|
|
|
- for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
- {
|
|
|
- double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt]) );
|
|
|
- if (tmpVal < thirdTerm)
|
|
|
- thirdTerm = tmpVal;
|
|
|
- }
|
|
|
- gpWeightAllVal = thirdTerm*firstTerm;
|
|
|
-
|
|
|
- //now look on the ratio of the resulting weights for the most plausible
|
|
|
- // against the second most plausible class
|
|
|
- double thirdTermMostPlausible ( 0.0 ) ;
|
|
|
- double thirdTermSecondMostPlausible ( 0.0 ) ;
|
|
|
- for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
- {
|
|
|
- if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
|
|
|
- {
|
|
|
- thirdTermSecondMostPlausible = thirdTermMostPlausible;
|
|
|
- thirdTermMostPlausible = diffToPositive[tmpCnt];
|
|
|
- }
|
|
|
- else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
|
|
|
- {
|
|
|
- thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
|
|
|
- }
|
|
|
- }
|
|
|
- //compute the resulting score
|
|
|
- gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;
|
|
|
-
|
|
|
- //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would
|
|
|
- //use it as an additional training example
|
|
|
- //TODO this would be REALLY computational demanding. Do we really want to do this?
|
|
|
- // gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
|
|
|
- // gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
|
|
|
- }
|
|
|
- else //binary scenario
|
|
|
- {
|
|
|
- gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
|
|
|
- gpWeightAllVal *= firstTerm;
|
|
|
- gpWeightRatioVal = gpWeightAllVal;
|
|
|
- }
|
|
|
-
|
|
|
- int xs = std::max(0, x - testWSize/2);
|
|
|
- int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
- int ys = std::max(0, y - testWSize/2);
|
|
|
- int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
- for (int yl = ys; yl <= ye; yl++)
|
|
|
- {
|
|
|
- for (int xl = xs; xl <= xe; xl++)
|
|
|
- {
|
|
|
- for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
- {
|
|
|
- probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
- }
|
|
|
- segresult ( xl, yl ) = cr.classno;
|
|
|
- uncert ( xl, yl ) = cr.uncertainty;
|
|
|
-
|
|
|
- gpUncertainty ( xl, yl ) = gpUncertaintyVal;
|
|
|
- gpMean ( xl, yl ) = gpMeanVal;
|
|
|
- gpMeanRatio ( xl, yl ) = gpMeanRatioVal;
|
|
|
- gpWeightAll ( xl, yl ) = gpWeightAllVal;
|
|
|
- gpWeightRatio ( xl, yl ) = gpWeightRatioVal;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (maxunc < cr.uncertainty)
|
|
|
- maxunc = cr.uncertainty;
|
|
|
-
|
|
|
- if (maxGPUncertainty < gpUncertaintyVal)
|
|
|
- maxGPUncertainty = gpUncertaintyVal;
|
|
|
- if (maxGPMean < gpMeanVal)
|
|
|
- maxGPMean = gpMeanVal;
|
|
|
- if (maxGPMeanRatio < gpMeanRatioVal)
|
|
|
- maxGPMeanRatio = gpMeanRatioVal;
|
|
|
- if (maxGPWeightAll < gpMeanRatioVal)
|
|
|
- maxGPWeightAll = gpWeightAllVal;
|
|
|
- if (maxGPWeightRatio < gpWeightRatioVal)
|
|
|
- maxGPWeightRatio = gpWeightRatioVal;
|
|
|
-
|
|
|
- example.svec->clear();
|
|
|
+ this->computeNoveltyByVariance( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
}
|
|
|
- delete example.svec;
|
|
|
- example.svec = NULL;
|
|
|
+ case GPUNCERTAINTY:
|
|
|
+ {
|
|
|
+ this->computeNoveltyByGPUncertainty( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case GPMINMEAN:
|
|
|
+ {
|
|
|
+ std::cerr << "compute novelty using the minimum mean" << std::endl;
|
|
|
+ this->computeNoveltyByGPMean( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case GPMEANRATIO:
|
|
|
+ {
|
|
|
+ this->computeNoveltyByGPMeanRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case GPWEIGHTALL:
|
|
|
+ {
|
|
|
+ this->computeNoveltyByGPWeightAll( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case GPWEIGHTRATIO:
|
|
|
+ {
|
|
|
+ this->computeNoveltyByGPWeightRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case RANDOM:
|
|
|
+ {
|
|
|
+ this->computeNoveltyByRandom( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ default:
|
|
|
+ {
|
|
|
+ //do nothing, keep the image constant to 0.0
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ timer.stop();
|
|
|
+ std::cout << "AL time for novelty score computation: " << timer.getLastAbsolute() << std::endl;
|
|
|
+
|
|
|
+ if (visualizeALimages)
|
|
|
+ {
|
|
|
+ ColorImage imgrgbTmp (xsize, ysize);
|
|
|
+ ICETools::convertToRGB ( noveltyImage, imgrgbTmp );
|
|
|
+ showImage(imgrgbTmp, "Novelty Image without Region Segmentation");
|
|
|
}
|
|
|
-
|
|
|
- // std::cerr << "uncertainty: " << gpUncertaintyVal << " minMean: " << gpMeanVal << " gpMeanRatio: " << gpMeanRatioVal << " weightAll: " << gpWeightAllVal << " weightRatio: "<< gpWeightRatioVal << std::endl;
|
|
|
|
|
|
+
|
|
|
+ timer.start();
|
|
|
|
|
|
//Regionen ermitteln
|
|
|
if(regionSeg != NULL)
|
|
@@ -602,12 +562,14 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
int amountRegions = regionSeg->segRegions ( img, mask );
|
|
|
|
|
|
//compute probs per region
|
|
|
- vector<vector<double> > regionProb(amountRegions,vector<double>(probabilities.channels(),0.0));
|
|
|
- vector<double> regionNoveltyMeasure (amountRegions, 0.0);
|
|
|
- vector<int> regionCounter(amountRegions, 0);
|
|
|
- for ( int y = 0; y < ysize; y++)
|
|
|
+ std::vector<std::vector<double> > regionProb(amountRegions, std::vector<double>(probabilities.channels(),0.0));
|
|
|
+ std::vector<double> regionNoveltyMeasure (amountRegions, 0.0);
|
|
|
+
|
|
|
+ std::vector<int> regionCounter(amountRegions, 0);
|
|
|
+ std::vector<int> regionCounterNovelty(amountRegions, 0);
|
|
|
+ for ( int y = 0; y < ysize; y += trainWsize) //y++)
|
|
|
{
|
|
|
- for (int x = 0; x < xsize; x++)
|
|
|
+ for (int x = 0; x < xsize; x += trainWsize) //x++)
|
|
|
{
|
|
|
int r = mask(x,y);
|
|
|
regionCounter[r]++;
|
|
@@ -615,52 +577,99 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
{
|
|
|
regionProb[r][j] += probabilities ( x, y, j );
|
|
|
}
|
|
|
- regionNoveltyMeasure[r] += uncert(x,y);
|
|
|
+
|
|
|
+ if ( forbidden_classesActiveLearning.find( labels(x,y) ) == forbidden_classesActiveLearning.end() )
|
|
|
+ {
|
|
|
+ //count the amount of "novelty" for the corresponding region
|
|
|
+ regionNoveltyMeasure[r] += noveltyImage(x,y);
|
|
|
+ regionCounterNovelty[r]++;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
//find best class per region
|
|
|
- vector<int> bestClassPerRegion(amountRegions,0);
|
|
|
+ std::vector<int> bestClassPerRegion(amountRegions,0);
|
|
|
+
|
|
|
+ double maxNoveltyScore = -numeric_limits<double>::max();
|
|
|
+ if (!mostNoveltyWithMaxScores)
|
|
|
+ {
|
|
|
+ maxNoveltyScore = numeric_limits<double>::max();
|
|
|
+ }
|
|
|
|
|
|
- double maxuncert = -numeric_limits<double>::max();
|
|
|
int maxUncertRegion = -1;
|
|
|
|
|
|
+ //loop over all regions and compute averaged novelty scores
|
|
|
for(int r = 0; r < amountRegions; r++)
|
|
|
{
|
|
|
+
|
|
|
+ //check for the most plausible class per region
|
|
|
double maxval = -numeric_limits<double>::max();
|
|
|
+
|
|
|
+ //loop over all classes
|
|
|
for(int c = 0; c < probabilities.channels(); c++)
|
|
|
{
|
|
|
regionProb[r][c] /= regionCounter[r];
|
|
|
- if(maxval < regionProb[r][c] && regionProb[r][c] != 0.0)
|
|
|
- {
|
|
|
- maxval = regionProb[r][c];
|
|
|
- bestClassPerRegion[r] = c;
|
|
|
+
|
|
|
+ if( (maxval < regionProb[r][c]) ) //&& (regionProb[r][c] != 0.0) )
|
|
|
+ {
|
|
|
+ maxval = regionProb[r][c];
|
|
|
+ bestClassPerRegion[r] = c;
|
|
|
}
|
|
|
}
|
|
|
- regionNoveltyMeasure[r] /= regionCounter[r];
|
|
|
- if(maxuncert < regionNoveltyMeasure[r])
|
|
|
+
|
|
|
+ //if the region only contains unvalid information (e.g., background) skip it
|
|
|
+ if (regionCounterNovelty[r] == 0)
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ //normalize summed novelty scores to region size
|
|
|
+ regionNoveltyMeasure[r] /= regionCounterNovelty[r];
|
|
|
+
|
|
|
+ //did we find a region that has a higher score as the most novel region known so far within this image?
|
|
|
+ if( ( mostNoveltyWithMaxScores && (maxNoveltyScore < regionNoveltyMeasure[r]) ) // if we look for large novelty scores, e.g., variance
|
|
|
+ || ( !mostNoveltyWithMaxScores && (maxNoveltyScore > regionNoveltyMeasure[r]) ) ) // if we look for small novelty scores, e.g., min mean
|
|
|
{
|
|
|
- maxuncert = regionNoveltyMeasure[r];
|
|
|
- maxUncertRegion = r;
|
|
|
+ //did we already query a region of this image? -- and it was this specific region
|
|
|
+ if ( (queriedRegions.find( currentFile ) != queriedRegions.end() ) && ( queriedRegions[currentFile].find(r) != queriedRegions[currentFile].end() ) )
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ else //only accept the region as novel if we never queried it before
|
|
|
+ {
|
|
|
+ maxNoveltyScore = regionNoveltyMeasure[r];
|
|
|
+ maxUncertRegion = r;
|
|
|
+ }
|
|
|
+
|
|
|
}
|
|
|
+
|
|
|
}
|
|
|
|
|
|
+ // after finding the most novel region for the current image, check whether this region is also the most novel with respect
|
|
|
+ // to all previously seen test images
|
|
|
+ // if so, store the corresponding features, since we want to "actively" query them to incorporate useful information
|
|
|
if(findMaximumUncert)
|
|
|
{
|
|
|
- if(maxuncert > globalMaxUncert)
|
|
|
+ if( ( mostNoveltyWithMaxScores && (maxNoveltyScore > globalMaxUncert) )
|
|
|
+ || ( !mostNoveltyWithMaxScores && (maxNoveltyScore < globalMaxUncert) ) )
|
|
|
{
|
|
|
- //save new important features
|
|
|
+ //current most novel region of the image has "higher" novelty score then previous most novel region of all test images worked on so far
|
|
|
+ // -> save new important features of this region
|
|
|
Examples examples;
|
|
|
- for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ for ( int y = 0; y < ysize; y += trainWsize )
|
|
|
{
|
|
|
- for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ for ( int x = 0; x < xsize; x += trainWsize)
|
|
|
{
|
|
|
if(mask(x,y) == maxUncertRegion)
|
|
|
{
|
|
|
+ int classnoTmp = labels(x,y);
|
|
|
+ if ( forbidden_classesActiveLearning.find(classnoTmp) != forbidden_classesActiveLearning.end() )
|
|
|
+ continue;
|
|
|
+
|
|
|
Example example;
|
|
|
example.vec = NULL;
|
|
|
example.svec = new SparseVector ( featdim );
|
|
|
- int classnoTmp = labels(x,y);
|
|
|
+
|
|
|
for ( int f = 0; f < featdim; f++ )
|
|
|
{
|
|
|
double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
@@ -675,15 +684,27 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
|
|
|
if(examples.size() > 0)
|
|
|
{
|
|
|
+ std::cerr << "found " << examples.size() << " new examples in the queried region" << std::endl << std::endl;
|
|
|
newTrainExamples.clear();
|
|
|
newTrainExamples = examples;
|
|
|
- globalMaxUncert = maxuncert;
|
|
|
- visualizeRegion(img,mask,maxUncertRegion,maskedImg);
|
|
|
+ globalMaxUncert = maxNoveltyScore;
|
|
|
+ //prepare for later visualization
|
|
|
+// if (visualizeALimages)
|
|
|
+ visualizeRegion(img,mask,maxUncertRegion,maskedImg);
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ std::cerr << "the queried region has no valid information" << std::endl << std::endl;
|
|
|
}
|
|
|
+
|
|
|
+ //save filename and region index
|
|
|
+ currentRegionToQuery.first = currentFile;
|
|
|
+ currentRegionToQuery.second = maxUncertRegion;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
//write back best results per region
|
|
|
+ //i.e., write normalized novelty scores for every region into the novelty image
|
|
|
for ( int y = 0; y < ysize; y++)
|
|
|
{
|
|
|
for (int x = 0; x < xsize; x++)
|
|
@@ -694,12 +715,17 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
probabilities ( x, y, j ) = regionProb[r][j];
|
|
|
}
|
|
|
segresult(x,y) = bestClassPerRegion[r];
|
|
|
+ // write novelty scores for every segment into the "final" image
|
|
|
+ noveltyImage(x,y) = regionNoveltyMeasure[r];
|
|
|
}
|
|
|
}
|
|
|
- }
|
|
|
-
|
|
|
+ } // if regionSeg != null
|
|
|
+
|
|
|
timer.stop();
|
|
|
- cout << "second: " << timer.getLastAbsolute() << endl;
|
|
|
+ std::cout << "AL time for determination of novel regions: " << timer.getLastAbsolute() << std::endl;
|
|
|
+
|
|
|
+// timer.stop();
|
|
|
+// cout << "second: " << timer.getLastAbsolute() << endl;
|
|
|
timer.start();
|
|
|
|
|
|
ColorImage imgrgb ( xsize, ysize );
|
|
@@ -707,54 +733,787 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
std::stringstream out;
|
|
|
std::vector< std::string > list2;
|
|
|
StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
|
|
|
- out << uncertdir << "/" << list2.back();
|
|
|
-
|
|
|
- uncert.writeRaw(out.str() + ".rawfloat");
|
|
|
- uncert(0, 0) = 0.0;
|
|
|
- uncert(0, 1) = 1.0+gpNoise;
|
|
|
- ICETools::convertToRGB ( uncert, imgrgb );
|
|
|
- imgrgb.write ( out.str() + "rough.png" );
|
|
|
-
|
|
|
- //invert images such that large numbers correspond to high impact, high variance, high importance, high novelty, ...
|
|
|
- for ( int y = 0; y < ysize; y++)
|
|
|
- {
|
|
|
- for (int x = 0; x < xsize; x++)
|
|
|
- {
|
|
|
- gpUncertainty(x,y) = maxGPUncertainty - gpUncertainty(x,y);
|
|
|
- gpMean(x,y) = maxGPMean - gpMean(x,y);
|
|
|
- gpMeanRatio(x,y) = maxGPMeanRatio - gpMeanRatio(x,y);
|
|
|
- gpWeightRatio(x,y) = maxGPWeightRatio - gpWeightRatio(x,y);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- //
|
|
|
- gpUncertainty(0, 0) = 0.0;
|
|
|
- gpUncertainty(0, 1) = maxGPUncertainty;
|
|
|
- ICETools::convertToRGB ( gpUncertainty, imgrgb );
|
|
|
- imgrgb.write ( out.str() + "gpUncertainty.png" );
|
|
|
- //
|
|
|
- gpMean(0, 0) = 0.0;
|
|
|
- gpMean(0, 1) = maxGPMean;
|
|
|
- ICETools::convertToRGB ( gpMean, imgrgb );
|
|
|
- imgrgb.write ( out.str() + "gpMean.png" );
|
|
|
- //
|
|
|
- gpMeanRatio(0, 0) = 0.0;
|
|
|
- gpMeanRatio(0, 1) = maxGPMeanRatio;
|
|
|
- ICETools::convertToRGB ( gpMeanRatio, imgrgb );
|
|
|
- imgrgb.write ( out.str() + "gpMeanRatio.png" );
|
|
|
- //
|
|
|
- gpWeightAll(0, 0) = 0.0;
|
|
|
- gpWeightAll(0, 1) = maxGPWeightAll;
|
|
|
- ICETools::convertToRGB ( gpWeightAll, imgrgb );
|
|
|
- imgrgb.write ( out.str() + "gpWeightAll.png" );
|
|
|
- //
|
|
|
- gpWeightRatio(0, 0) = 0.0;
|
|
|
- gpWeightRatio(0, 1) = maxGPWeightRatio;
|
|
|
- ICETools::convertToRGB ( gpWeightRatio, imgrgb );
|
|
|
- imgrgb.write ( out.str() + "gpWeightRatio.png" );
|
|
|
-
|
|
|
+ out << resultdir << "/" << list2.back();
|
|
|
+
|
|
|
+ noveltyImage.writeRaw(out.str() + "_run_" + NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+".rawfloat");
|
|
|
+
|
|
|
+ if (visualizeALimages)
|
|
|
+ {
|
|
|
+ ICETools::convertToRGB ( noveltyImage, imgrgb );
|
|
|
+ showImage(imgrgb, "Novelty Image");
|
|
|
+ }
|
|
|
|
|
|
timer.stop();
|
|
|
- cout << "last: " << timer.getLastAbsolute() << endl;
|
|
|
+ cout << "AL time for writing the raw novelty image: " << timer.getLastAbsolute() << endl;
|
|
|
+}
|
|
|
+
|
|
|
+inline void SemSegNovelty::computeClassificationResults( const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize,
|
|
|
+ const int & ysize,
|
|
|
+ const int & featdim
|
|
|
+ )
|
|
|
+{
|
|
|
+ std::cerr << "featdim: " << featdim << std::endl;
|
|
|
+
|
|
|
+ if ( classifier != NULL )
|
|
|
+ {
|
|
|
+
|
|
|
+ #pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ example.svec->clear();
|
|
|
+ }
|
|
|
+ delete example.svec;
|
|
|
+ example.svec = NULL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else //vclassifier
|
|
|
+ {
|
|
|
+ std::cerr << "compute classification results with vclassifier" << std::endl;
|
|
|
+ #pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ NICE::Vector v(featdim);
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ v[f] = val;
|
|
|
+ }
|
|
|
+ v.normalizeL1();
|
|
|
+
|
|
|
+ ClassificationResult cr = vclassifier->classify ( v );
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// compute novelty images depending on the strategy chosen
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByRandom( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+
|
|
|
+ double randVal = randDouble();
|
|
|
+
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = randVal;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByVariance( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = cr.uncertainty;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ example.svec->clear();
|
|
|
+ }
|
|
|
+ delete example.svec;
|
|
|
+ example.svec = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByGPUncertainty( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
+
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ double maxMeanAbs ( 0.0 );
|
|
|
+
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ //check for larger abs mean
|
|
|
+ if (abs(cr.scores[j]) > maxMeanAbs)
|
|
|
+ {
|
|
|
+ maxMeanAbs = abs(cr.scores[j]);
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
|
|
|
+
|
|
|
+ //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
|
|
|
+ // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
|
|
|
+ double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = gpUncertaintyVal;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ example.svec->clear();
|
|
|
+ }
|
|
|
+ delete example.svec;
|
|
|
+ example.svec = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByGPMean( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
+
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ double minMeanAbs ( numeric_limits<double>::max() );
|
|
|
+
|
|
|
+ for ( int j = 0 ; j < probabilities.channels(); j++ )
|
|
|
+ {
|
|
|
+ if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ //check whether we found a class with higher smaller abs mean than the current minimum
|
|
|
+ if (abs(probabilities(x,y,j)) < minMeanAbs)
|
|
|
+ {
|
|
|
+ minMeanAbs = abs(cr.scores[j]);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // compute results when we take the lowest mean value of all classes
|
|
|
+ double gpMeanVal = minMeanAbs;
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = gpMeanVal;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByGPMeanRatio( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
+
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ double maxMean ( -numeric_limits<double>::max() );
|
|
|
+ double sndMaxMean ( -numeric_limits<double>::max() );
|
|
|
+
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ //check for larger mean without abs as well
|
|
|
+ if (cr.scores[j] > maxMean)
|
|
|
+ {
|
|
|
+ sndMaxMean = maxMean;
|
|
|
+ maxMean = cr.scores[j];
|
|
|
+ }
|
|
|
+ // and also for the second highest mean of all classes
|
|
|
+ else if (cr.scores[j] > sndMaxMean)
|
|
|
+ {
|
|
|
+ sndMaxMean = cr.scores[j];
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ //look at the difference in the absolut mean values for the most plausible class
|
|
|
+ // and the second most plausible class
|
|
|
+ double gpMeanRatioVal= maxMean - sndMaxMean;
|
|
|
+
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = gpMeanRatioVal;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ example.svec->clear();
|
|
|
+ }
|
|
|
+ delete example.svec;
|
|
|
+ example.svec = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByGPWeightAll( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
+
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+ double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
|
|
|
+
|
|
|
+ double gpWeightAllVal ( 0.0 );
|
|
|
+
|
|
|
+ if ( numberOfClasses > 2)
|
|
|
+ {
|
|
|
+ //compute the weight in the alpha-vector for every sample after assuming it to be
|
|
|
+ // added to the training set.
|
|
|
+ // Thereby, we measure its "importance" for the current model
|
|
|
+ //
|
|
|
+ //double firstTerm is already computed
|
|
|
+ //
|
|
|
+ //the second term is only needed when computing impacts
|
|
|
+ //double secondTerm; //this is the nasty guy :/
|
|
|
+
|
|
|
+ //--- compute the third term
|
|
|
+ // this is the difference between predicted label and GT label
|
|
|
+ std::vector<double> diffToPositive; diffToPositive.clear();
|
|
|
+ std::vector<double> diffToNegative; diffToNegative.clear();
|
|
|
+ double diffToNegativeSum(0.0);
|
|
|
+
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // look at the difference to plus 1
|
|
|
+ diffToPositive.push_back(abs(cr.scores[j] - 1));
|
|
|
+ // look at the difference to -1
|
|
|
+ diffToNegative.push_back(abs(cr.scores[j] + 1));
|
|
|
+ //sum up the difference to -1
|
|
|
+ diffToNegativeSum += abs(cr.scores[j] - 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
|
|
|
+ //and use this as the third term for this specific class.
|
|
|
+ //the final value is obtained by minimizing over all classes
|
|
|
+ //
|
|
|
+ // originally, we minimize over all classes after building the final score
|
|
|
+ // however, the first and the second term do not depend on the choice of
|
|
|
+ // y*, therefore we minimize here already
|
|
|
+ double thirdTerm (numeric_limits<double>::max()) ;
|
|
|
+ for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
+ {
|
|
|
+ double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt]) );
|
|
|
+ if (tmpVal < thirdTerm)
|
|
|
+ thirdTerm = tmpVal;
|
|
|
+ }
|
|
|
+ gpWeightAllVal = thirdTerm*firstTerm;
|
|
|
+ }
|
|
|
+ else //binary scenario
|
|
|
+ {
|
|
|
+ gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
|
|
|
+ gpWeightAllVal *= firstTerm;
|
|
|
+ }
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = gpWeightAllVal;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ example.svec->clear();
|
|
|
+ }
|
|
|
+ delete example.svec;
|
|
|
+ example.svec = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void SemSegNovelty::computeNoveltyByGPWeightRatio( NICE::FloatImage & noveltyImage,
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
+ NICE::Image & segresult,
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
+{
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
+
|
|
|
+#pragma omp parallel for
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.vec = NULL;
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
+ {
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ example.svec->normalize();
|
|
|
+
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
+
|
|
|
+
|
|
|
+ double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
|
|
|
+
|
|
|
+ double gpWeightRatioVal ( 0.0 );
|
|
|
+
|
|
|
+ if ( numberOfClasses > 2)
|
|
|
+ {
|
|
|
+ //compute the weight in the alpha-vector for every sample after assuming it to be
|
|
|
+ // added to the training set.
|
|
|
+ // Thereby, we measure its "importance" for the current model
|
|
|
+ //
|
|
|
+ //double firstTerm is already computed
|
|
|
+ //
|
|
|
+ //the second term is only needed when computing impacts
|
|
|
+ //double secondTerm; //this is the nasty guy :/
|
|
|
+
|
|
|
+ //--- compute the third term
|
|
|
+ // this is the difference between predicted label and GT label
|
|
|
+ std::vector<double> diffToPositive; diffToPositive.clear();
|
|
|
+ std::vector<double> diffToNegative; diffToNegative.clear();
|
|
|
+ double diffToNegativeSum(0.0);
|
|
|
+
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // look at the difference to plus 1
|
|
|
+ diffToPositive.push_back(abs(cr.scores[j] - 1));
|
|
|
+ }
|
|
|
+
|
|
|
+ //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
|
|
|
+ //and use this as the third term for this specific class.
|
|
|
+ //the final value is obtained by minimizing over all classes
|
|
|
+ //
|
|
|
+ // originally, we minimize over all classes after building the final score
|
|
|
+ // however, the first and the second term do not depend on the choice of
|
|
|
+ // y*, therefore we minimize here already
|
|
|
+
|
|
|
+ //now look on the ratio of the resulting weights for the most plausible
|
|
|
+ // against the second most plausible class
|
|
|
+ double thirdTermMostPlausible ( 0.0 ) ;
|
|
|
+ double thirdTermSecondMostPlausible ( 0.0 ) ;
|
|
|
+ for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
+ {
|
|
|
+ if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
|
|
|
+ {
|
|
|
+ thirdTermSecondMostPlausible = thirdTermMostPlausible;
|
|
|
+ thirdTermMostPlausible = diffToPositive[tmpCnt];
|
|
|
+ }
|
|
|
+ else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
|
|
|
+ {
|
|
|
+ thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
|
|
|
+ }
|
|
|
+ }
|
|
|
+ //compute the resulting score
|
|
|
+ gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;
|
|
|
+
|
|
|
+ //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would
|
|
|
+ //use it as an additional training example
|
|
|
+ //TODO this would be REALLY computational demanding. Do we really want to do this?
|
|
|
+ // gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
|
|
|
+ // gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
|
|
|
+ }
|
|
|
+ else //binary scenario
|
|
|
+ {
|
|
|
+ gpWeightRatioVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
|
|
|
+ gpWeightRatioVal *= firstTerm;
|
|
|
+ }
|
|
|
+
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
+ {
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
+ {
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
+ {
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
+ }
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
+ noveltyImage ( xl, yl ) = gpWeightRatioVal;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ example.svec->clear();
|
|
|
+ }
|
|
|
+ delete example.svec;
|
|
|
+ example.svec = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+void SemSegNovelty::addNewExample(const NICE::Vector& newExample, const int & newClassNo)
|
|
|
+{
|
|
|
+ //accept the new class as valid information
|
|
|
+ if ( forbidden_classesTrain.find ( newClassNo ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ forbidden_classesTrain.erase(newClassNo);
|
|
|
+ numberOfClasses++;
|
|
|
+ }
|
|
|
+ if ( classesInUse.find ( newClassNo ) == classesInUse.end() )
|
|
|
+ {
|
|
|
+ classesInUse.insert( newClassNo );
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ //then add it to the classifier used
|
|
|
+ if ( classifier != NULL )
|
|
|
+ {
|
|
|
+ //TODO
|
|
|
+ }
|
|
|
+ else //vclassifier
|
|
|
+ {
|
|
|
+ if (this->classifierString.compare("nn") == 0)
|
|
|
+ {
|
|
|
+ vclassifier->teach ( newClassNo, newExample );
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void SemSegNovelty::addNovelExamples()
|
|
|
+{
|
|
|
+
|
|
|
+ Timer timer;
|
|
|
+
|
|
|
+ //show the image that contains the most novel region
|
|
|
+ if (visualizeALimages)
|
|
|
+ showImage(maskedImg, "Most novel region");
|
|
|
+
|
|
|
+ timer.start();
|
|
|
+
|
|
|
+
|
|
|
+ std::stringstream out;
|
|
|
+ std::vector< std::string > list2;
|
|
|
+ StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
|
|
|
+ out << resultdir << "/" << list2.back();
|
|
|
+
|
|
|
+ maskedImg.writePPM ( out.str() + "_run_" + NICE::intToString(this->iterationCountSuffix) + "_" + noveltyMethodString+ "_query.ppm" );
|
|
|
+
|
|
|
+
|
|
|
+ timer.stop();
|
|
|
+ std::cerr << "AL time for writing queried image: " << timer.getLast() << std::endl;
|
|
|
+
|
|
|
+ timer.start();
|
|
|
+
|
|
|
+ //check which classes will be added using the features from the novel region
|
|
|
+ std::set<int> newClassNumbers;
|
|
|
+ newClassNumbers.clear(); //just to be sure
|
|
|
+ for ( uint i = 0 ; i < newTrainExamples.size() ; i++ )
|
|
|
+ {
|
|
|
+ if (newClassNumbers.find(newTrainExamples[i].first /* classNumber*/) == newClassNumbers.end() )
|
|
|
+ {
|
|
|
+ newClassNumbers.insert(newTrainExamples[i].first );
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ //accept the new classes as valid information
|
|
|
+ for (std::set<int>::const_iterator clNoIt = newClassNumbers.begin(); clNoIt != newClassNumbers.end(); clNoIt++)
|
|
|
+ {
|
|
|
+ if ( forbidden_classesTrain.find ( *clNoIt ) != forbidden_classesTrain.end() )
|
|
|
+ {
|
|
|
+ forbidden_classesTrain.erase(*clNoIt);
|
|
|
+ numberOfClasses++;
|
|
|
+ }
|
|
|
+ if ( classesInUse.find ( *clNoIt ) == classesInUse.end() )
|
|
|
+ {
|
|
|
+ classesInUse.insert( *clNoIt );
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ timer.stop();
|
|
|
+ std::cerr << "AL time for accepting possible new classes: " << timer.getLast() << std::endl;
|
|
|
+
|
|
|
+ timer.start();
|
|
|
+ //then add the new features to the classifier used
|
|
|
+ if ( classifier != NULL )
|
|
|
+ {
|
|
|
+ if (this->classifierString.compare("ClassifierGPHIK") == 0)
|
|
|
+ {
|
|
|
+ classifier->addMultipleExamples ( this->newTrainExamples );
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else //vclassifier
|
|
|
+ {
|
|
|
+ //TODO
|
|
|
+ }
|
|
|
+
|
|
|
+ timer.stop();
|
|
|
+ std::cerr << "AL time for actually updating the classifier: " << timer.getLast() << std::endl;
|
|
|
+
|
|
|
+ std::cerr << "the current region to query is: " << currentRegionToQuery.first << " -- " << currentRegionToQuery.second << std::endl;
|
|
|
+
|
|
|
+ //did we already query a region of this image?
|
|
|
+ if ( queriedRegions.find( currentRegionToQuery.first ) != queriedRegions.end() )
|
|
|
+ {
|
|
|
+ queriedRegions[ currentRegionToQuery.first ].insert(currentRegionToQuery.second);
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ std::set<int> tmpSet; tmpSet.insert(currentRegionToQuery.second);
|
|
|
+ queriedRegions.insert(std::pair<std::string,std::set<int> > (currentRegionToQuery.first, tmpSet ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ std::cerr << "Write already queried regions: " << std::endl;
|
|
|
+ for (std::map<std::string,std::set<int> >::const_iterator it = queriedRegions.begin(); it != queriedRegions.end(); it++)
|
|
|
+ {
|
|
|
+ std::cerr << "image: " << it->first << " -- ";
|
|
|
+ for (std::set<int>::const_iterator itReg = it->second.begin(); itReg != it->second.end(); itReg++)
|
|
|
+ {
|
|
|
+ std::cerr << *itReg << " ";
|
|
|
+ }
|
|
|
+ std::cerr << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+ //clear the latest results, since one iteration is over
|
|
|
+ globalMaxUncert = -numeric_limits<double>::max();
|
|
|
+ if (!mostNoveltyWithMaxScores)
|
|
|
+ globalMaxUncert = numeric_limits<double>::max();
|
|
|
+}
|
|
|
+
|
|
|
+const Examples * SemSegNovelty::getNovelExamples() const
|
|
|
+{
|
|
|
+ return &(this->newTrainExamples);
|
|
|
}
|