|
@@ -521,17 +521,18 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- MultiChannelImageT<double> feats;
|
|
|
+ // MultiChannelImageT<double> m_CurrentImageFeatures;
|
|
|
|
|
|
// extract features
|
|
|
- featExtract->getFeats ( img, feats );
|
|
|
- featdim = feats.channels();
|
|
|
- feats.addChannel(featdim);
|
|
|
+ m_CurrentImageFeatures.freeData();
|
|
|
+ featExtract->getFeats ( img, m_CurrentImageFeatures );
|
|
|
+ featdim = m_CurrentImageFeatures.channels();
|
|
|
+ m_CurrentImageFeatures.addChannel(featdim);
|
|
|
|
|
|
for (int c = 0; c < featdim; c++)
|
|
|
{
|
|
|
- ImageT<double> tmp = feats[c];
|
|
|
- ImageT<double> tmp2 = feats[c+featdim];
|
|
|
+ ImageT<double> tmp = m_CurrentImageFeatures[c];
|
|
|
+ ImageT<double> tmp2 = m_CurrentImageFeatures[c+featdim];
|
|
|
|
|
|
NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
|
|
|
}
|
|
@@ -540,7 +541,7 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
// compute integral images
|
|
|
for ( int c = 0; c < featdim; c++ )
|
|
|
{
|
|
|
- feats.calcIntegral ( c );
|
|
|
+ m_CurrentImageFeatures.calcIntegral ( c );
|
|
|
}
|
|
|
|
|
|
timer.stop();
|
|
@@ -552,7 +553,7 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
//in all other settings, such as active sem seg in general, we do this within the novelty-computation-methods
|
|
|
if ( classifier == NULL )
|
|
|
{
|
|
|
- this->computeClassificationResults( feats, segresult, probabilities, xsize, ysize, featdim);
|
|
|
+ this->computeClassificationResults( m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim);
|
|
|
}
|
|
|
// timer.stop();
|
|
|
//
|
|
@@ -565,38 +566,38 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
{
|
|
|
case GPVARIANCE:
|
|
|
{
|
|
|
- this->computeNoveltyByVariance( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByVariance( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
case GPUNCERTAINTY:
|
|
|
{
|
|
|
- this->computeNoveltyByGPUncertainty( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByGPUncertainty( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
case GPMINMEAN:
|
|
|
{
|
|
|
std::cerr << "compute novelty using the minimum mean" << std::endl;
|
|
|
- this->computeNoveltyByGPMean( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByGPMean( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
case GPMEANRATIO:
|
|
|
{
|
|
|
- this->computeNoveltyByGPMeanRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByGPMeanRatio( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
case GPWEIGHTALL:
|
|
|
{
|
|
|
- this->computeNoveltyByGPWeightAll( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByGPWeightAll( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
case GPWEIGHTRATIO:
|
|
|
{
|
|
|
- this->computeNoveltyByGPWeightRatio( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByGPWeightRatio( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
case RANDOM:
|
|
|
{
|
|
|
- this->computeNoveltyByRandom( noveltyImage, feats, segresult, probabilities, xsize, ysize, featdim );
|
|
|
+ this->computeNoveltyByRandom( noveltyImage, m_CurrentImageFeatures, segresult, probabilities, xsize, ysize, featdim );
|
|
|
break;
|
|
|
}
|
|
|
default:
|
|
@@ -724,35 +725,40 @@ void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NI
|
|
|
//current most novel region of the image has "higher" novelty score then previous most novel region of all test images worked on so far
|
|
|
// -> save new important features of this region
|
|
|
Examples examples;
|
|
|
+
|
|
|
for ( int y = 0; y < ysize; y += testWSize )
|
|
|
{
|
|
|
for ( int x = 0; x < xsize; x += testWSize)
|
|
|
{
|
|
|
- if(mask(x,y) == maxUncertRegion)
|
|
|
- {
|
|
|
- int classnoTmp = labels(x,y);
|
|
|
- if ( forbidden_classesActiveLearning.find(classnoTmp) != forbidden_classesActiveLearning.end() )
|
|
|
- continue;
|
|
|
-
|
|
|
- Example example(NULL, x, y);
|
|
|
- example.vec = NULL;
|
|
|
- example.svec = new SparseVector ( featdim );
|
|
|
-
|
|
|
- for ( int f = 0; f < featdim; f++ )
|
|
|
+ if(mask(x,y) == maxUncertRegion)
|
|
|
{
|
|
|
- double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
- if ( val > 1e-10 )
|
|
|
- ( *example.svec ) [f] = val;
|
|
|
+ if ( forbidden_classesActiveLearning.find(classnoTmp) != forbidden_classesActiveLearning.end() )
|
|
|
+ continue;
|
|
|
+ int classnoTmp = labels(x,y);
|
|
|
+ Example t_Example(NULL, x, y);
|
|
|
+ t_Example.vec = NULL;
|
|
|
+ t_Example.svec = new SparseVector ( featdim );
|
|
|
+ for ( int f = 0; f < featdim; f++ )
|
|
|
+ {
|
|
|
+// double val = ( *example.svec ) [f];
|
|
|
+ double val = m_CurrentImageFeatures.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
|
|
|
+ if ( val > 1e-10 )
|
|
|
+ ( *t_Example.svec ) [f] = val;
|
|
|
+ }
|
|
|
+ examples.push_back ( pair<int, Example> ( classnoTmp, t_Example ) );
|
|
|
}
|
|
|
- example.svec->normalize();
|
|
|
- examples.push_back ( pair<int, Example> ( classnoTmp, example ) );
|
|
|
- }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
if(examples.size() > 0)
|
|
|
{
|
|
|
std::cerr << "found " << examples.size() << " new examples in the queried region" << std::endl << std::endl;
|
|
|
+ // sauber aufräumen
|
|
|
+ for( int i=0; i< newTrainExamples.size(); i++)
|
|
|
+ {
|
|
|
+ delete newTrainExamples.at(i).second.svec;
|
|
|
+ newTrainExamples.at(i).second.svec = NULL;
|
|
|
+ }
|
|
|
newTrainExamples.clear();
|
|
|
newTrainExamples = examples;
|
|
|
globalMaxUncert = maxNoveltyScore;
|