|
@@ -11,7 +11,9 @@ using namespace NICE;
|
|
using namespace OBJREC;
|
|
using namespace OBJREC;
|
|
|
|
|
|
#undef DEBUG_CSURK
|
|
#undef DEBUG_CSURK
|
|
-#define UNCERTAINTY
|
|
|
|
|
|
+
|
|
|
|
+#undef UNCERTAINTY
|
|
|
|
+// #define UNCERTAINTY
|
|
|
|
|
|
SemSegCsurka::SemSegCsurka ( const Config *conf,
|
|
SemSegCsurka::SemSegCsurka ( const Config *conf,
|
|
const MultiDataset *md )
|
|
const MultiDataset *md )
|
|
@@ -1480,12 +1482,38 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
- vector<FloatImage> uncert;
|
|
|
|
|
|
+ std::vector<FloatImage> uncert;
|
|
|
|
+ std::vector<FloatImage> gpUncertainty;
|
|
|
|
+ std::vector<FloatImage> gpMean;
|
|
|
|
+ std::vector<FloatImage> gpMeanRatio;
|
|
|
|
+ std::vector<FloatImage> gpWeightAll;
|
|
|
|
+ std::vector<FloatImage> gpWeightRatio;
|
|
|
|
+// std::vector<FloatImage> gpImpactAll;
|
|
|
|
+// std::vector<FloatImage> gpImpactRatio;
|
|
|
|
+
|
|
|
|
+ //pre-allocate storage -- one image per scale and method
|
|
for(int s = 0; s < scalesize; s++)
|
|
for(int s = 0; s < scalesize; s++)
|
|
{
|
|
{
|
|
uncert.push_back(FloatImage(xsize, ysize));
|
|
uncert.push_back(FloatImage(xsize, ysize));
|
|
uncert[s].set(0.0);
|
|
uncert[s].set(0.0);
|
|
|
|
+
|
|
|
|
+ gpUncertainty.push_back(FloatImage(xsize, ysize));
|
|
|
|
+ gpMean.push_back(FloatImage(xsize, ysize));
|
|
|
|
+ gpMeanRatio.push_back(FloatImage(xsize, ysize));
|
|
|
|
+ gpWeightAll.push_back(FloatImage(xsize, ysize));
|
|
|
|
+ gpWeightRatio.push_back(FloatImage(xsize, ysize));
|
|
|
|
+/* gpImpactAll.push_back(FloatImage(xsize, ysize));
|
|
|
|
+ gpImpactRatio.push_back(FloatImage(xsize, ysize)); */
|
|
|
|
+
|
|
|
|
+ gpUncertainty[s].set(0.0);
|
|
|
|
+ gpMean[s].set(0.0);
|
|
|
|
+ gpMeanRatio[s].set(0.0);
|
|
|
|
+ gpWeightAll[s].set(0.0);
|
|
|
|
+ gpWeightRatio[s].set(0.0);
|
|
|
|
+// gpImpactAll[s].set(0.0);
|
|
|
|
+// gpImpactRatio[s].set(0.0);
|
|
}
|
|
}
|
|
|
|
+
|
|
ColorImage imgrgb ( xsize, ysize );
|
|
ColorImage imgrgb ( xsize, ysize );
|
|
std::string s;
|
|
std::string s;
|
|
std::stringstream out;
|
|
std::stringstream out;
|
|
@@ -1495,8 +1523,15 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
|
|
|
|
double maxu = -numeric_limits<double>::max();
|
|
double maxu = -numeric_limits<double>::max();
|
|
double minu = numeric_limits<double>::max();
|
|
double minu = numeric_limits<double>::max();
|
|
|
|
+
|
|
|
|
+ double gpNoise = conf->gD("GPHIK", "noise", 0.01);
|
|
|
|
+
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+ #ifdef UNCERTAINTY
|
|
|
|
+ std::cerr << "compute values for uncertainty stuff as well" << std::endl;
|
|
|
|
+ #endif
|
|
|
|
+
|
|
if ( classifier != NULL )
|
|
if ( classifier != NULL )
|
|
{
|
|
{
|
|
clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen: classifier != NULL" << endl;
|
|
clog << "[log] SemSegCsruka::classifyregions: Wahrscheinlichkeitskarten erstellen: classifier != NULL" << endl;
|
|
@@ -1508,6 +1543,13 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
{
|
|
{
|
|
ClassificationResult r = classifier->classify ( pce[i].second );
|
|
ClassificationResult r = classifier->classify ( pce[i].second );
|
|
|
|
|
|
|
|
+ #ifdef UNCERTAINTY
|
|
|
|
+ //we need this if we want to compute GP-AL-measure lateron
|
|
|
|
+ double minMeanAbs ( numeric_limits<double>::max() );
|
|
|
|
+ double maxMeanAbs ( 0.0 );
|
|
|
|
+ double sndMaxMeanAbs ( 0.0 );
|
|
|
|
+ #endif
|
|
|
|
+
|
|
for ( int j = 0 ; j < r.scores.size(); j++ )
|
|
for ( int j = 0 ; j < r.scores.size(); j++ )
|
|
{
|
|
{
|
|
if ( useclass[j] == 0 )
|
|
if ( useclass[j] == 0 )
|
|
@@ -1515,6 +1557,23 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
|
|
|
|
fV[j] += r.scores[j];
|
|
fV[j] += r.scores[j];
|
|
preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
|
|
preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
|
|
|
|
+
|
|
|
|
+ #ifdef UNCERTAINTY
|
|
|
|
+ //check whether we found a class with higher smaller abs mean than the current minimum
|
|
|
|
+ if (abs(r.scores[j]) < minMeanAbs)
|
|
|
|
+ minMeanAbs = abs(r.scores[j]);
|
|
|
|
+ //check for larger abs mean as well
|
|
|
|
+ if (abs(r.scores[j]) > maxMeanAbs)
|
|
|
|
+ {
|
|
|
|
+ sndMaxMeanAbs = maxMeanAbs;
|
|
|
|
+ maxMeanAbs = abs(r.scores[j]);
|
|
|
|
+ }
|
|
|
|
+ // and also for the second highest mean of all classes
|
|
|
|
+ else if (abs(r.scores[j]) > sndMaxMeanAbs)
|
|
|
|
+ {
|
|
|
|
+ sndMaxMeanAbs = abs(r.scores[j]);
|
|
|
|
+ }
|
|
|
|
+ #endif
|
|
}
|
|
}
|
|
|
|
|
|
/*if(r.uncertainty < 0.0)
|
|
/*if(r.uncertainty < 0.0)
|
|
@@ -1528,6 +1587,89 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
|
|
uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
|
|
maxu = std::max ( r.uncertainty, maxu );
|
|
maxu = std::max ( r.uncertainty, maxu );
|
|
minu = std::min ( r.uncertainty, minu );
|
|
minu = std::min ( r.uncertainty, minu );
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ double firstTerm (1.0 / sqrt(r.uncertainty+gpNoise));
|
|
|
|
+
|
|
|
|
+ //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
|
|
|
|
+ // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
|
|
|
|
+ gpUncertainty[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
|
|
|
|
+
|
|
|
|
+ // compute results when we take the lowest mean value of all classes
|
|
|
|
+ gpMean[s] ( pce[i].second.x, pce[i].second.y ) = minMeanAbs;
|
|
|
|
+
|
|
|
|
+ //look at the difference in the absolut mean values for the most plausible class
|
|
|
|
+ // and the second most plausible class
|
|
|
|
+ gpMeanRatio[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs - sndMaxMeanAbs;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ //compute the weight in the alpha-vector for every sample after assuming it to be
|
|
|
|
+ // added to the training set.
|
|
|
|
+ // Thereby, we measure its "importance" for the current model
|
|
|
|
+ //
|
|
|
|
+ //double firstTerm is already computed
|
|
|
|
+ //
|
|
|
|
+ //the second term is only needed when computing impacts
|
|
|
|
+ //double secondTerm; //this is the nasty guy :/
|
|
|
|
+
|
|
|
|
+ //--- compute the third term
|
|
|
|
+ // this is the difference between predicted label and GT label
|
|
|
|
+ std::vector<double> diffToPositive; diffToPositive.clear();
|
|
|
|
+ std::vector<double> diffToNegative; diffToNegative.clear();
|
|
|
|
+ double diffToNegativeSum(0.0);
|
|
|
|
+
|
|
|
|
+ for ( int j = 0 ; j < r.scores.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ if ( useclass[j] == 0 )
|
|
|
|
+ continue;
|
|
|
|
+ // look at the difference to plus 1
|
|
|
|
+ diffToPositive.push_back(abs(r.scores[j] - 1));
|
|
|
|
+ // look at the difference to -1
|
|
|
|
+ diffToNegative.push_back(abs(r.scores[j] + 1));
|
|
|
|
+ //sum up the difference to -1
|
|
|
|
+ diffToNegativeSum += abs(r.scores[j] - 1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
|
|
|
|
+ //and use this as the third term for this specific class.
|
|
|
|
+ //the final value is obtained by minimizing over all classes
|
|
|
|
+ //
|
|
|
|
+ // originally, we minimize over all classes after building the final score
|
|
|
|
+ // however, the first and the second term do not depend on the choice of
|
|
|
|
+ // y*, therefore we minimize here already
|
|
|
|
+ double thirdTerm (numeric_limits<double>::max()) ;
|
|
|
|
+ for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
|
+ {
|
|
|
|
+ double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt]) );
|
|
|
|
+ if (tmpVal < thirdTerm)
|
|
|
|
+ thirdTerm = tmpVal;
|
|
|
|
+ }
|
|
|
|
+ gpWeightAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm;
|
|
|
|
+
|
|
|
|
+ //now look on the ratio of the resulting weights for the most plausible
|
|
|
|
+ // against the second most plausible class
|
|
|
|
+ double thirdTermMostPlausible ( 0.0 ) ;
|
|
|
|
+ double thirdTermSecondMostPlausible ( 0.0 ) ;
|
|
|
|
+ for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
|
+ {
|
|
|
|
+ if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
|
|
|
|
+ {
|
|
|
|
+ thirdTermSecondMostPlausible = thirdTermMostPlausible;
|
|
|
|
+ thirdTermMostPlausible = diffToPositive[tmpCnt];
|
|
|
|
+ }
|
|
|
|
+ else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
|
|
|
|
+ {
|
|
|
|
+ thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ //compute the resulting score
|
|
|
|
+ gpWeightRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;
|
|
|
|
+
|
|
|
|
+ //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would
|
|
|
|
+ //use it as an additional training example
|
|
|
|
+ //TODO this would be REALLY computational demanding. Do we really want to do this?
|
|
|
|
+// gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
|
|
|
|
+// gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1541,27 +1683,162 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
for ( int i = s; i < ( int ) pce.size(); i += scalesize )
|
|
for ( int i = s; i < ( int ) pce.size(); i += scalesize )
|
|
{
|
|
{
|
|
ClassificationResult r = vclassifier->classify ( * ( pce[i].second.vec ) );
|
|
ClassificationResult r = vclassifier->classify ( * ( pce[i].second.vec ) );
|
|
|
|
+
|
|
|
|
+ #ifdef UNCERTAINTY
|
|
|
|
+ //we need this if we want to compute GP-AL-measure lateron
|
|
|
|
+ double minMeanAbs ( numeric_limits<double>::max() );
|
|
|
|
+ double maxMeanAbs ( 0.0 );
|
|
|
|
+ double sndMaxMeanAbs ( 0.0 );
|
|
|
|
+ #endif
|
|
|
|
+
|
|
for ( int j = 0 ; j < ( int ) r.scores.size(); j++ )
|
|
for ( int j = 0 ; j < ( int ) r.scores.size(); j++ )
|
|
{
|
|
{
|
|
if ( useclass[j] == 0 )
|
|
if ( useclass[j] == 0 )
|
|
continue;
|
|
continue;
|
|
fV[j] += r.scores[j];
|
|
fV[j] += r.scores[j];
|
|
preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
|
|
preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
|
|
|
|
+
|
|
|
|
+ #ifdef UNCERTAINTY
|
|
|
|
+ //check whether we found a class with higher smaller abs mean than the current minimum
|
|
|
|
+ if (abs(r.scores[j]) < minMeanAbs)
|
|
|
|
+ minMeanAbs = abs(r.scores[j]);
|
|
|
|
+ //check for larger abs mean as well
|
|
|
|
+ if (abs(r.scores[j]) > maxMeanAbs)
|
|
|
|
+ {
|
|
|
|
+ sndMaxMeanAbs = maxMeanAbs;
|
|
|
|
+ maxMeanAbs = abs(r.scores[j]);
|
|
|
|
+ }
|
|
|
|
+ // and also for the second highest mean of all classes
|
|
|
|
+ else if (abs(r.scores[j]) > sndMaxMeanAbs)
|
|
|
|
+ {
|
|
|
|
+ sndMaxMeanAbs = abs(r.scores[j]);
|
|
|
|
+ }
|
|
|
|
+ #endif
|
|
}
|
|
}
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
|
|
uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
|
|
maxu = std::max ( r.uncertainty, maxu );
|
|
maxu = std::max ( r.uncertainty, maxu );
|
|
minu = std::min ( r.uncertainty, minu );
|
|
minu = std::min ( r.uncertainty, minu );
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ double firstTerm (1.0 / sqrt(r.uncertainty+gpNoise));
|
|
|
|
+
|
|
|
|
+ //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
|
|
|
|
+ // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
|
|
|
|
+ gpUncertainty[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
|
|
|
|
+
|
|
|
|
+ // compute results when we take the lowest mean value of all classes
|
|
|
|
+ gpMean[s] ( pce[i].second.x, pce[i].second.y ) = minMeanAbs;
|
|
|
|
+
|
|
|
|
+ //look at the difference in the absolut mean values for the most plausible class
|
|
|
|
+ // and the second most plausible class
|
|
|
|
+ gpMeanRatio[s] ( pce[i].second.x, pce[i].second.y ) = maxMeanAbs - sndMaxMeanAbs;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ //compute the weight in the alpha-vector for every sample after assuming it to be
|
|
|
|
+ // added to the training set.
|
|
|
|
+ // Thereby, we measure its "importance" for the current model
|
|
|
|
+ //
|
|
|
|
+ //double firstTerm is already computed
|
|
|
|
+ //
|
|
|
|
+ //the second term is only needed when computing impacts
|
|
|
|
+ //double secondTerm; //this is the nasty guy :/
|
|
|
|
+
|
|
|
|
+ //--- compute the third term
|
|
|
|
+ // this is the difference between predicted label and GT label
|
|
|
|
+ std::vector<double> diffToPositive; diffToPositive.clear();
|
|
|
|
+ std::vector<double> diffToNegative; diffToNegative.clear();
|
|
|
|
+ double diffToNegativeSum(0.0);
|
|
|
|
+
|
|
|
|
+ for ( int j = 0 ; j < fV.size(); j++ )
|
|
|
|
+ {
|
|
|
|
+ if ( useclass[j] == 0 )
|
|
|
|
+ continue;
|
|
|
|
+ // look at the difference to plus 1
|
|
|
|
+ diffToPositive.push_back(abs(r.scores[j] - 1));
|
|
|
|
+ // look at the difference to -1
|
|
|
|
+ diffToNegative.push_back(abs(r.scores[j] + 1));
|
|
|
|
+ //sum up the difference to -1
|
|
|
|
+ diffToNegativeSum += abs(r.scores[j] - 1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
|
|
|
|
+ //and use this as the third term for this specific class.
|
|
|
|
+ //the final value is obtained by minimizing over all classes
|
|
|
|
+ //
|
|
|
|
+ // originally, we minimize over all classes after building the final score
|
|
|
|
+ // however, the first and the second term do not depend on the choice of
|
|
|
|
+ // y*, therefore we minimize here already
|
|
|
|
+ double thirdTerm (numeric_limits<double>::max()) ;
|
|
|
|
+ for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
|
+ {
|
|
|
|
+ double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt]) );
|
|
|
|
+ if (tmpVal < thirdTerm)
|
|
|
|
+ thirdTerm = tmpVal;
|
|
|
|
+ }
|
|
|
|
+ gpWeightAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm;
|
|
|
|
+
|
|
|
|
+ //now look on the ratio of the resulting weights for the most plausible
|
|
|
|
+ // against the second most plausible class
|
|
|
|
+ double thirdTermMostPlausible ( 0.0 ) ;
|
|
|
|
+ double thirdTermSecondMostPlausible ( 0.0 ) ;
|
|
|
|
+ for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
|
|
|
|
+ {
|
|
|
|
+ if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
|
|
|
|
+ {
|
|
|
|
+ thirdTermSecondMostPlausible = thirdTermMostPlausible;
|
|
|
|
+ thirdTermMostPlausible = diffToPositive[tmpCnt];
|
|
|
|
+ }
|
|
|
|
+ else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
|
|
|
|
+ {
|
|
|
|
+ thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ //compute the resulting score
|
|
|
|
+ gpWeightRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;
|
|
|
|
+
|
|
|
|
+ //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would
|
|
|
|
+ //use it as an additional training example
|
|
|
|
+ //TODO this would be REALLY computational demanding. Do we really want to do this?
|
|
|
|
+// gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
|
|
|
|
+// gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ #ifdef UNCERTAINTY
|
|
|
|
+ std::cerr << "uncertainty values and derived scores successfully computed" << std::endl;
|
|
|
|
+ #endif
|
|
|
|
+
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
cout << "maxvdirect: " << maxu << " minvdirect: " << minu << endl;
|
|
cout << "maxvdirect: " << maxu << " minvdirect: " << minu << endl;
|
|
|
|
+ //pre-allocate the image for filtering lateron
|
|
FloatImage gaussUncert ( xsize, ysize );
|
|
FloatImage gaussUncert ( xsize, ysize );
|
|
|
|
+
|
|
|
|
+ //just store the first scale
|
|
ICETools::convertToRGB ( uncert[0], imgrgb );
|
|
ICETools::convertToRGB ( uncert[0], imgrgb );
|
|
- imgrgb.write ( out.str() + "rough.png" );
|
|
|
|
|
|
+ imgrgb.write ( out.str() + "rough.ppm" );
|
|
|
|
+
|
|
|
|
+ //pre-allocate memory for filtering of scales
|
|
|
|
+ FloatImage gaussGPUncertainty ( xsize, ysize );
|
|
|
|
+ FloatImage gaussGPMean ( xsize, ysize );
|
|
|
|
+ FloatImage gaussGPMeanRatio( xsize, ysize );
|
|
|
|
+ FloatImage gaussGPWeightAll ( xsize, ysize );
|
|
|
|
+ FloatImage gaussGPWeightRatio ( xsize, ysize );
|
|
|
|
+
|
|
|
|
+ //just store the first scale for every method
|
|
|
|
+ ICETools::convertToRGB ( gpUncertainty[0], imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpUncertainty.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gpMean[0], imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpMean.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gpMeanRatio[0], imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpMeanRatio.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gpWeightAll[0], imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpWeightAll.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gpWeightRatio[0], imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpWeightRatio.ppm" );
|
|
|
|
+
|
|
#endif
|
|
#endif
|
|
|
|
|
|
vector<double> scalesVec;
|
|
vector<double> scalesVec;
|
|
@@ -1659,6 +1936,19 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
filterGaussSigmaApproximate<float, float, float> ( uncert[s], sigma, &gaussUncert );
|
|
filterGaussSigmaApproximate<float, float, float> ( uncert[s], sigma, &gaussUncert );
|
|
uncert[s] = gaussUncert;
|
|
uncert[s] = gaussUncert;
|
|
|
|
+
|
|
|
|
+ //apply the gauss-filtering to all scales of every method
|
|
|
|
+ filterGaussSigmaApproximate<float, float, float> ( gpUncertainty[s], sigma, &gaussGPUncertainty );
|
|
|
|
+ filterGaussSigmaApproximate<float, float, float> ( gpMean[s], sigma, &gaussGPMean );
|
|
|
|
+ filterGaussSigmaApproximate<float, float, float> ( gpMeanRatio[s], sigma, &gaussGPMeanRatio );
|
|
|
|
+ filterGaussSigmaApproximate<float, float, float> ( gpWeightAll[s], sigma, &gaussGPWeightAll );
|
|
|
|
+ filterGaussSigmaApproximate<float, float, float> ( gpWeightRatio[s], sigma, &gaussGPWeightRatio );
|
|
|
|
+
|
|
|
|
+ gpUncertainty[s] = gaussGPUncertainty;
|
|
|
|
+ gpMean[s] = gaussGPMean;
|
|
|
|
+ gpMeanRatio[s] = gaussGPMeanRatio;
|
|
|
|
+ gpWeightAll[s] = gaussGPWeightAll;
|
|
|
|
+ gpWeightRatio[s] = gaussGPWeightRatio;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1691,8 +1981,20 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
for ( int s = 0; s < ( int ) scalesize; s++ )
|
|
for ( int s = 0; s < ( int ) scalesize; s++ )
|
|
{
|
|
{
|
|
gaussUncert(x,y) += uncert[s](x,y);
|
|
gaussUncert(x,y) += uncert[s](x,y);
|
|
|
|
+ //and for the other methods as well
|
|
|
|
+ gaussGPUncertainty(x,y) += gpUncertainty[s](x,y);
|
|
|
|
+ gaussGPMean(x,y) += gpMean[s](x,y);
|
|
|
|
+ gaussGPMeanRatio(x,y) += gpMeanRatio[s](x,y);
|
|
|
|
+ gaussGPWeightAll(x,y) += gpWeightAll[s](x,y);
|
|
|
|
+ gaussGPWeightRatio(x,y) += gpWeightRatio[s](x,y);
|
|
}
|
|
}
|
|
gaussUncert(x,y)/=scalesize;
|
|
gaussUncert(x,y)/=scalesize;
|
|
|
|
+ //and for the other methods as well
|
|
|
|
+ gaussGPUncertainty(x,y)/=scalesize;
|
|
|
|
+ gaussGPMean(x,y)/=scalesize;
|
|
|
|
+ gaussGPMeanRatio(x,y)/=scalesize;
|
|
|
|
+ gaussGPWeightAll(x,y)/=scalesize;
|
|
|
|
+ gaussGPWeightRatio(x,y)/=scalesize;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1726,8 +2028,19 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
gaussUncert(0,0) = 0.0;
|
|
gaussUncert(0,0) = 0.0;
|
|
gaussUncert(0,1) = 0.04;
|
|
gaussUncert(0,1) = 0.04;
|
|
ICETools::convertToRGB ( gaussUncert, imgrgb );
|
|
ICETools::convertToRGB ( gaussUncert, imgrgb );
|
|
- imgrgb.write ( out.str() + "filtered.png" );
|
|
|
|
-
|
|
|
|
|
|
+ imgrgb.write ( out.str() + "filtered.ppm" );
|
|
|
|
+
|
|
|
|
+ ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpUncertaintyFiltered.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPMean, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpMeanFiltered.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpMeanRatioFiltered.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpWeightAllFiltered.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpWeightRatioFiltered.ppm" );
|
|
|
|
+
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#undef VISSEMSEG
|
|
#undef VISSEMSEG
|
|
@@ -1793,7 +2106,13 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
vector<vector <double> > regionprob;
|
|
vector<vector <double> > regionprob;
|
|
|
|
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
- vector<double> regionUncert;
|
|
|
|
|
|
+ std::vector<double> regionUncert;
|
|
|
|
+
|
|
|
|
+ std::vector<double> regionGPUncertainty;
|
|
|
|
+ std::vector<double> regionGPMean;
|
|
|
|
+ std::vector<double> regionGPMeanRatio;
|
|
|
|
+ std::vector<double> regionGPWeightAll;
|
|
|
|
+ std::vector<double> regionGPWeightRatio;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
// Wahrscheinlichkeiten für jede Region initialisieren
|
|
// Wahrscheinlichkeiten für jede Region initialisieren
|
|
@@ -1808,6 +2127,12 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
Regionen.push_back ( pair<int, Example> ( 0, Example() ) );
|
|
Regionen.push_back ( pair<int, Example> ( 0, Example() ) );
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
regionUncert.push_back ( 0.0 );
|
|
regionUncert.push_back ( 0.0 );
|
|
|
|
+
|
|
|
|
+ regionGPUncertainty.push_back ( 0.0 );
|
|
|
|
+ regionGPMean.push_back ( 0.0 );
|
|
|
|
+ regionGPMeanRatio.push_back ( 0.0 );
|
|
|
|
+ regionGPWeightAll.push_back ( 0.0 );
|
|
|
|
+ regionGPWeightRatio.push_back ( 0.0 );
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1827,11 +2152,16 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
}
|
|
}
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
regionUncert[pos] += gaussUncert ( x, y );
|
|
regionUncert[pos] += gaussUncert ( x, y );
|
|
|
|
+
|
|
|
|
+ regionGPUncertainty[pos] += gaussGPUncertainty ( x, y );
|
|
|
|
+ regionGPMean[pos] += gaussGPMean ( x, y );
|
|
|
|
+ regionGPMeanRatio[pos] += gaussGPMeanRatio ( x, y );
|
|
|
|
+ regionGPWeightAll[pos] += gaussGPWeightAll ( x, y );
|
|
|
|
+ regionGPWeightRatio[pos] += gaussGPWeightRatio ( x, y );
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
cout << "regions: " << regionsize << endl;
|
|
cout << "regions: " << regionsize << endl;
|
|
cout << "outfeats: " << endl;
|
|
cout << "outfeats: " << endl;
|
|
@@ -1875,9 +2205,14 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
Regionen[i].first = maxpos;
|
|
Regionen[i].first = maxpos;
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
regionUncert[i] /= Regionen[i].second.weight;
|
|
regionUncert[i] /= Regionen[i].second.weight;
|
|
|
|
+
|
|
|
|
+ regionGPUncertainty[i] /= Regionen[i].second.weight;
|
|
|
|
+ regionGPMean[i] /= Regionen[i].second.weight;
|
|
|
|
+ regionGPMeanRatio[i] /= Regionen[i].second.weight;
|
|
|
|
+ regionGPWeightAll[i] /= Regionen[i].second.weight;
|
|
|
|
+ regionGPWeightRatio[i] /= Regionen[i].second.weight;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
-
|
|
|
|
// Pixel jeder Region labeln
|
|
// Pixel jeder Region labeln
|
|
for ( int y = 0; y < ( int ) mask.cols(); y++ )
|
|
for ( int y = 0; y < ( int ) mask.cols(); y++ )
|
|
{
|
|
{
|
|
@@ -1887,9 +2222,15 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
segresult.setPixel ( x, y, Regionen[pos].first );
|
|
segresult.setPixel ( x, y, Regionen[pos].first );
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
gaussUncert ( x, y ) = regionUncert[pos];
|
|
gaussUncert ( x, y ) = regionUncert[pos];
|
|
|
|
+
|
|
|
|
+ gaussGPUncertainty ( x, y ) = regionGPUncertainty[pos];
|
|
|
|
+ gaussGPMean ( x, y ) = regionGPMean[pos];
|
|
|
|
+ gaussGPMeanRatio ( x, y ) = regionGPMeanRatio[pos];
|
|
|
|
+ gaussGPWeightAll ( x, y ) = regionGPWeightAll[pos];
|
|
|
|
+ gaussGPWeightRatio ( x, y ) = regionGPWeightRatio[pos];
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
- }
|
|
|
|
|
|
+ }
|
|
#ifdef UNCERTAINTY
|
|
#ifdef UNCERTAINTY
|
|
maxu = -numeric_limits<float>::max();
|
|
maxu = -numeric_limits<float>::max();
|
|
minu = numeric_limits<float>::max();
|
|
minu = numeric_limits<float>::max();
|
|
@@ -1907,7 +2248,18 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
|
|
// uncert(0,0) = 1;
|
|
// uncert(0,0) = 1;
|
|
// uncert(0,1) = 0;
|
|
// uncert(0,1) = 0;
|
|
ICETools::convertToRGB ( gaussUncert, imgrgb );
|
|
ICETools::convertToRGB ( gaussUncert, imgrgb );
|
|
- imgrgb.write ( out.str() + "region.png" );
|
|
|
|
|
|
+ imgrgb.write ( out.str() + "region.ppm" );
|
|
|
|
+
|
|
|
|
+ ICETools::convertToRGB ( gaussGPUncertainty, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpUncertaintyRegion.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPMean, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpMeanRegion.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPMeanRatio, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpMeanRatioRegion.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPWeightAll, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpWeightAllRegion.ppm" );
|
|
|
|
+ ICETools::convertToRGB ( gaussGPWeightRatio, imgrgb );
|
|
|
|
+ imgrgb.write ( out.str() + "gpWeightRatioRegion.ppm" );
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#undef WRITEREGIONS
|
|
#undef WRITEREGIONS
|