|
@@ -30,15 +30,20 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
|
|
|
|
|
|
save_cache = conf->gB ( "FPCPixel", "save_cache", true );
|
|
save_cache = conf->gB ( "FPCPixel", "save_cache", true );
|
|
read_cache = conf->gB ( "FPCPixel", "read_cache", false );
|
|
read_cache = conf->gB ( "FPCPixel", "read_cache", false );
|
|
- uncertdir = conf->gS("debug", "uncertainty", "uncertainty");
|
|
|
|
|
|
+// uncertdir = conf->gS("debug", "uncertainty", "uncertainty");
|
|
|
|
+ //write uncertainty results in the same folder as done for the segmentation results
|
|
|
|
+ uncertdir = conf->gS("debug", "resultdir", "result");
|
|
cache = conf->gS ( "cache", "root", "" );
|
|
cache = conf->gS ( "cache", "root", "" );
|
|
|
|
|
|
classifier = new GPHIKClassifierNICE ( conf, "ClassiferGPHIK" );;
|
|
classifier = new GPHIKClassifierNICE ( conf, "ClassiferGPHIK" );;
|
|
|
|
|
|
findMaximumUncert = conf->gB(section, "findMaximumUncert", true);
|
|
findMaximumUncert = conf->gB(section, "findMaximumUncert", true);
|
|
whs = conf->gI ( section, "window_size", 10 );
|
|
whs = conf->gI ( section, "window_size", 10 );
|
|
- featdist = conf->gI ( section, "grid", 10 );
|
|
|
|
|
|
+ //distance to next descriptor during training
|
|
|
|
+ trainWsize = conf->gI ( section, "train_window_size", 10 );
|
|
|
|
+ //distance to next descriptor during testing
|
|
testWSize = conf->gI (section, "test_window_size", 10);
|
|
testWSize = conf->gI (section, "test_window_size", 10);
|
|
|
|
+ // select your segmentation method here
|
|
string rsMethode = conf->gS ( section, "segmentation", "none" );
|
|
string rsMethode = conf->gS ( section, "segmentation", "none" );
|
|
|
|
|
|
if(rsMethode == "none")
|
|
if(rsMethode == "none")
|
|
@@ -79,6 +84,40 @@ SemSegNovelty::SemSegNovelty ( const Config *conf,
|
|
{
|
|
{
|
|
train ( md );
|
|
train ( md );
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ //define which measure for "novelty" we want to use
|
|
|
|
+ string noveltyMethodString = conf->gS( section, "noveltyMethod", "gp-variance");
|
|
|
|
+ if (noveltyMethodString.compare("gp-variance") == 0) // novel = large variance
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPVARIANCE;
|
|
|
|
+ }
|
|
|
|
+ else if (noveltyMethodString.compare("gp-uncertainty") == 0) //novel = large uncertainty (mean / var)
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPUNCERTAINTY;
|
|
|
|
+ }
|
|
|
|
+ else if (noveltyMethodString.compare("gp-mean") == 0) //novel = small mean
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPMINMEAN;
|
|
|
|
+ }
|
|
|
|
+ else if (noveltyMethodString.compare("gp-meanRatio") == 0) //novel = small difference between mean of most plausible class and mean of snd
|
|
|
|
+ // most plausible class (not useful in binary settings)
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPMEANRATIO;
|
|
|
|
+ }
|
|
|
|
+ else if (noveltyMethodString.compare("gp-weightAll") == 0) // novel = large weight in alpha vector after updating the model (can be predicted exactly)
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPWEIGHTALL;
|
|
|
|
+ }
|
|
|
|
+ else if (noveltyMethodString.compare("gp-weightRatio") == 0) // novel = small difference between weights for alpha vectors
|
|
|
|
+ // with assumptions of GT label to be the most
|
|
|
|
+ // plausible against the second most plausible class
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPWEIGHTRATIO;
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ this->noveltyMethod = GPVARIANCE;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
SemSegNovelty::~SemSegNovelty()
|
|
SemSegNovelty::~SemSegNovelty()
|
|
@@ -225,9 +264,9 @@ void SemSegNovelty::train ( const MultiDataset *md )
|
|
feats.calcIntegral ( c );
|
|
feats.calcIntegral ( c );
|
|
}
|
|
}
|
|
|
|
|
|
- for ( int y = 0; y < ysize; y += featdist )
|
|
|
|
|
|
+ for ( int y = 0; y < ysize; y += trainWsize )
|
|
{
|
|
{
|
|
- for ( int x = 0; x < xsize; x += featdist )
|
|
|
|
|
|
+ for ( int x = 0; x < xsize; x += trainWsize )
|
|
{
|
|
{
|
|
|
|
|
|
int classnoTmp = labels.getPixel ( x, y );
|
|
int classnoTmp = labels.getPixel ( x, y );
|
|
@@ -364,8 +403,11 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
feats.calcIntegral ( c );
|
|
feats.calcIntegral ( c );
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ FloatImage noveltyImage ( xsize, ysize );
|
|
|
|
+ noveltyImage.set ( 0.0 );
|
|
|
|
+
|
|
FloatImage uncert ( xsize, ysize );
|
|
FloatImage uncert ( xsize, ysize );
|
|
- uncert.set ( 0.0 );
|
|
|
|
|
|
+ uncert.set ( 0.0 );
|
|
|
|
|
|
FloatImage gpUncertainty ( xsize, ysize );
|
|
FloatImage gpUncertainty ( xsize, ysize );
|
|
FloatImage gpMean ( xsize, ysize );
|
|
FloatImage gpMean ( xsize, ysize );
|
|
@@ -379,6 +421,8 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
gpWeightAll.set ( 0.0 );
|
|
gpWeightAll.set ( 0.0 );
|
|
gpWeightRatio.set ( 0.0 );
|
|
gpWeightRatio.set ( 0.0 );
|
|
|
|
|
|
|
|
+ double maxNovelty = -numeric_limits<double>::max();
|
|
|
|
+
|
|
double maxunc = -numeric_limits<double>::max();
|
|
double maxunc = -numeric_limits<double>::max();
|
|
|
|
|
|
double maxGPUncertainty = -numeric_limits<double>::max();
|
|
double maxGPUncertainty = -numeric_limits<double>::max();
|
|
@@ -394,6 +438,29 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
|
|
|
timer.start();
|
|
timer.start();
|
|
|
|
+ this->computeClassificationResults( feats, segresult, probabilities, xsize, ysize, featdim);
|
|
|
|
+ timer.stop();
|
|
|
|
+
|
|
|
|
+ switch (noveltyMethod)
|
|
|
|
+ {
|
|
|
|
+ case GPVARIANCE:
|
|
|
|
+ {
|
|
|
|
+ this->computeNoveltyByVariance( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case GPUNCERTAINTY:
|
|
|
|
+ {
|
|
|
|
+ this->computeNoveltyByGPUncertainty( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ default:
|
|
|
|
+ {
|
|
|
|
+ //do nothing, keep the image constant to 0.0
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
#pragma omp parallel for
|
|
#pragma omp parallel for
|
|
for ( int y = 0; y < ysize; y += testWSize )
|
|
for ( int y = 0; y < ysize; y += testWSize )
|
|
{
|
|
{
|
|
@@ -604,6 +671,11 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
//compute probs per region
|
|
//compute probs per region
|
|
vector<vector<double> > regionProb(amountRegions,vector<double>(probabilities.channels(),0.0));
|
|
vector<vector<double> > regionProb(amountRegions,vector<double>(probabilities.channels(),0.0));
|
|
vector<double> regionNoveltyMeasure (amountRegions, 0.0);
|
|
vector<double> regionNoveltyMeasure (amountRegions, 0.0);
|
|
|
|
+ std::vector<double> regionGPUncertainty (amountRegions, 0.0);
|
|
|
|
+ std::vector<double> regionGPMean (amountRegions, 0.0);
|
|
|
|
+ std::vector<double> regionGPMeanRatio (amountRegions, 0.0);
|
|
|
|
+ std::vector<double> regionGPWeightAll (amountRegions, 0.0);
|
|
|
|
+ std::vector<double> regionGPWeightRatio (amountRegions, 0.0);
|
|
vector<int> regionCounter(amountRegions, 0);
|
|
vector<int> regionCounter(amountRegions, 0);
|
|
for ( int y = 0; y < ysize; y++)
|
|
for ( int y = 0; y < ysize; y++)
|
|
{
|
|
{
|
|
@@ -615,7 +687,15 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
{
|
|
{
|
|
regionProb[r][j] += probabilities ( x, y, j );
|
|
regionProb[r][j] += probabilities ( x, y, j );
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ //count the amount of "novelty" for the corresponding region
|
|
regionNoveltyMeasure[r] += uncert(x,y);
|
|
regionNoveltyMeasure[r] += uncert(x,y);
|
|
|
|
+ //
|
|
|
|
+ regionGPUncertainty[r] += gpUncertainty(x,y);
|
|
|
|
+ regionGPMean[r] += gpMean(x,y);
|
|
|
|
+ regionGPMeanRatio[r] += gpMeanRatio(x,y);
|
|
|
|
+ regionGPWeightAll[r] += gpWeightAll(x,y);
|
|
|
|
+ regionGPWeightRatio[r] += gpWeightRatio(x,y);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -637,7 +717,15 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
bestClassPerRegion[r] = c;
|
|
bestClassPerRegion[r] = c;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ //normalize summed novelty scores to region size
|
|
regionNoveltyMeasure[r] /= regionCounter[r];
|
|
regionNoveltyMeasure[r] /= regionCounter[r];
|
|
|
|
+ //
|
|
|
|
+ regionGPUncertainty[r] /= regionCounter[r];
|
|
|
|
+ regionGPMean[r] /= regionCounter[r];
|
|
|
|
+ regionGPMeanRatio[r] /= regionCounter[r];
|
|
|
|
+ regionGPWeightAll[r] /= regionCounter[r];
|
|
|
|
+ regionGPWeightRatio[r] /= regionCounter[r];
|
|
|
|
+
|
|
if(maxuncert < regionNoveltyMeasure[r])
|
|
if(maxuncert < regionNoveltyMeasure[r])
|
|
{
|
|
{
|
|
maxuncert = regionNoveltyMeasure[r];
|
|
maxuncert = regionNoveltyMeasure[r];
|
|
@@ -694,6 +782,14 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
probabilities ( x, y, j ) = regionProb[r][j];
|
|
probabilities ( x, y, j ) = regionProb[r][j];
|
|
}
|
|
}
|
|
segresult(x,y) = bestClassPerRegion[r];
|
|
segresult(x,y) = bestClassPerRegion[r];
|
|
|
|
+ // write novelty scores for every segment into the "final" image
|
|
|
|
+ uncert(x,y) = regionNoveltyMeasure[r];
|
|
|
|
+ //
|
|
|
|
+ gpUncertainty(x,y) = regionGPUncertainty[r];
|
|
|
|
+ gpMean(x,y) = regionGPMean[r];
|
|
|
|
+ gpMeanRatio(x,y) = regionGPMeanRatio[r];
|
|
|
|
+ gpWeightAll(x,y) = regionGPWeightAll[r];
|
|
|
|
+ gpWeightRatio(x,y) = regionGPWeightRatio[r];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -717,54 +813,178 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
gpWeightAll.writeRaw(out.str() + "_gpWeightAll.rawfloat");
|
|
gpWeightAll.writeRaw(out.str() + "_gpWeightAll.rawfloat");
|
|
gpWeightRatio.writeRaw(out.str() + "_gpWeightRatio.rawfloat");
|
|
gpWeightRatio.writeRaw(out.str() + "_gpWeightRatio.rawfloat");
|
|
|
|
|
|
- //not needed anymore, everything will be done in our nice script :)
|
|
|
|
- //
|
|
|
|
-// uncert(0, 0) = 0.0;
|
|
|
|
-// uncert(0, 1) = 1.0+gpNoise;
|
|
|
|
-// ICETools::convertToRGB ( uncert, imgrgb );
|
|
|
|
-// imgrgb.write ( out.str() + "rough.png" );
|
|
|
|
-//
|
|
|
|
-// //invert images such that large numbers correspond to high impact, high variance, high importance, high novelty, ...
|
|
|
|
-// for ( int y = 0; y < ysize; y++)
|
|
|
|
-// {
|
|
|
|
-// for (int x = 0; x < xsize; x++)
|
|
|
|
-// {
|
|
|
|
-// gpUncertainty(x,y) = maxGPUncertainty - gpUncertainty(x,y);
|
|
|
|
-// gpMean(x,y) = maxGPMean - gpMean(x,y);
|
|
|
|
-// gpMeanRatio(x,y) = maxGPMeanRatio - gpMeanRatio(x,y);
|
|
|
|
-// gpWeightRatio(x,y) = maxGPWeightRatio - gpWeightRatio(x,y);
|
|
|
|
-// }
|
|
|
|
-// }
|
|
|
|
-// //actually, this is also done in the post-processing file
|
|
|
|
-//
|
|
|
|
-//
|
|
|
|
-// //
|
|
|
|
-// gpUncertainty(0, 0) = 0.0;
|
|
|
|
-// gpUncertainty(0, 1) = maxGPUncertainty;
|
|
|
|
-// ICETools::convertToRGB ( gpUncertainty, imgrgb );
|
|
|
|
-// imgrgb.write ( out.str() + "gpUncertainty.png" );
|
|
|
|
-// //
|
|
|
|
-// gpMean(0, 0) = 0.0;
|
|
|
|
-// gpMean(0, 1) = maxGPMean;
|
|
|
|
-// ICETools::convertToRGB ( gpMean, imgrgb );
|
|
|
|
-// imgrgb.write ( out.str() + "gpMean.png" );
|
|
|
|
-// //
|
|
|
|
-// gpMeanRatio(0, 0) = 0.0;
|
|
|
|
-// gpMeanRatio(0, 1) = maxGPMeanRatio;
|
|
|
|
-// ICETools::convertToRGB ( gpMeanRatio, imgrgb );
|
|
|
|
-// imgrgb.write ( out.str() + "gpMeanRatio.png" );
|
|
|
|
-// //
|
|
|
|
-// gpWeightAll(0, 0) = 0.0;
|
|
|
|
-// gpWeightAll(0, 1) = maxGPWeightAll;
|
|
|
|
-// ICETools::convertToRGB ( gpWeightAll, imgrgb );
|
|
|
|
-// imgrgb.write ( out.str() + "gpWeightAll.png" );
|
|
|
|
-// //
|
|
|
|
-// gpWeightRatio(0, 0) = 0.0;
|
|
|
|
-// gpWeightRatio(0, 1) = maxGPWeightRatio;
|
|
|
|
-// ICETools::convertToRGB ( gpWeightRatio, imgrgb );
|
|
|
|
-// imgrgb.write ( out.str() + "gpWeightRatio.png" );
|
|
|
|
-//
|
|
|
|
|
|
|
|
timer.stop();
|
|
timer.stop();
|
|
cout << "last: " << timer.getLastAbsolute() << endl;
|
|
cout << "last: " << timer.getLastAbsolute() << endl;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+inline void SemSegNovelty::computeClassificationResults( const NICE::MultiChannelImageT<double> & feats,
|
|
|
|
+ NICE::Image & segresult,
|
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
|
+ const int & xsize,
|
|
|
|
+ const int & ysize,
|
|
|
|
+ const int & featdim
|
|
|
|
+ )
|
|
|
|
+{
|
|
|
|
+ #pragma omp parallel for
|
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
|
+ {
|
|
|
|
+ Example example;
|
|
|
|
+ example.vec = NULL;
|
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
|
+ {
|
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
|
+ {
|
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
|
+ if ( val > 1e-10 )
|
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
|
+ }
|
|
|
|
+ example.svec->normalize();
|
|
|
|
+
|
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
|
+
|
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
|
+ {
|
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
|
+ {
|
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
|
+ }
|
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ example.svec->clear();
|
|
|
|
+ }
|
|
|
|
+ delete example.svec;
|
|
|
|
+ example.svec = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void SemSegNovelty::computeNoveltyByVariance( NICE::FloatImage & noveltyImage,
|
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
|
+ NICE::Image & segresult,
|
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
|
+{
|
|
|
|
+#pragma omp parallel for
|
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
|
+ {
|
|
|
|
+ Example example;
|
|
|
|
+ example.vec = NULL;
|
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
|
+ {
|
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
|
+ {
|
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
|
+ if ( val > 1e-10 )
|
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
|
+ }
|
|
|
|
+ example.svec->normalize();
|
|
|
|
+
|
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
|
+
|
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
|
+ {
|
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
|
+ {
|
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
|
+ }
|
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
|
+ noveltyImage ( xl, yl ) = cr.uncertainty;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ example.svec->clear();
|
|
|
|
+ }
|
|
|
|
+ delete example.svec;
|
|
|
|
+ example.svec = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void SemSegNovelty::computeNoveltyByGPUncertainty( NICE::FloatImage & noveltyImage,
|
|
|
|
+ const NICE::MultiChannelImageT<double> & feats,
|
|
|
|
+ NICE::Image & segresult,
|
|
|
|
+ NICE::MultiChannelImageT<double> & probabilities,
|
|
|
|
+ const int & xsize, const int & ysize, const int & featdim )
|
|
|
|
+{
|
|
|
|
+
|
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
|
+
|
|
|
|
+#pragma omp parallel for
|
|
|
|
+ for ( int y = 0; y < ysize; y += testWSize )
|
|
|
|
+ {
|
|
|
|
+ Example example;
|
|
|
|
+ example.vec = NULL;
|
|
|
|
+ example.svec = new SparseVector ( featdim );
|
|
|
|
+ for ( int x = 0; x < xsize; x += testWSize)
|
|
|
|
+ {
|
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
|
+ {
|
|
|
|
+ double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
|
+ if ( val > 1e-10 )
|
|
|
|
+ ( *example.svec ) [f] = val;
|
|
|
|
+ }
|
|
|
|
+ example.svec->normalize();
|
|
|
|
+
|
|
|
|
+ ClassificationResult cr = classifier->classify ( example );
|
|
|
|
+
|
|
|
|
+ double maxMeanAbs ( 0.0 );
|
|
|
|
+
|
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
|
|
|
|
+ {
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ //check for larger abs mean
|
|
|
|
+ if (abs(cr.scores[j]) > maxMeanAbs)
|
|
|
|
+ {
|
|
|
|
+ maxMeanAbs = abs(cr.scores[j]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
|
|
|
|
+
|
|
|
|
+ //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
|
|
|
|
+ // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
|
|
|
|
+ double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
|
|
|
|
+
|
|
|
|
+ int xs = std::max(0, x - testWSize/2);
|
|
|
|
+ int xe = std::min(xsize - 1, x + testWSize/2);
|
|
|
|
+ int ys = std::max(0, y - testWSize/2);
|
|
|
|
+ int ye = std::min(ysize - 1, y + testWSize/2);
|
|
|
|
+ for (int yl = ys; yl <= ye; yl++)
|
|
|
|
+ {
|
|
|
|
+ for (int xl = xs; xl <= xe; xl++)
|
|
|
|
+ {
|
|
|
|
+ for ( int j = 0 ; j < cr.scores.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ probabilities ( xl, yl, j ) = cr.scores[j];
|
|
|
|
+ }
|
|
|
|
+ segresult ( xl, yl ) = cr.classno;
|
|
|
|
+
|
|
|
|
+ noveltyImage ( xl, yl ) = gpUncertaintyVal;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ example.svec->clear();
|
|
|
|
+ }
|
|
|
|
+ delete example.svec;
|
|
|
|
+ example.svec = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|