Forráskód Böngészése

noch nicht perfekt

Bjoern Froehlich 13 éve
szülő
commit
63bc15e7c9

+ 41 - 42
semseg/SemSegContextTree.cpp

@@ -159,8 +159,7 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
 
 
   try
   try
   {
   {
-    imgCount = ( int ) feats.size();
-    featdim = feats[0].channels();
+    imgCount = (int)feats.size();
   }
   }
   catch ( Exception )
   catch ( Exception )
   {
   {
@@ -189,15 +188,15 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
     if ( forbidden_classes.find ( labelmapback[i] ) != forbidden_classes.end() )
     if ( forbidden_classes.find ( labelmapback[i] ) != forbidden_classes.end() )
       fraction[i] = 0;
       fraction[i] = 0;
     else
     else
-      fraction[i] = ( ( double ) maxSamples ) / ( ( double ) featcounter * a[i] * a.size() );
+      fraction[i] = ( (double)maxSamples ) / ( (double)featcounter * a[i] * a.size() );
   }
   }
 
 
   featcounter = 0;
   featcounter = 0;
 
 
   for ( int iCounter = 0; iCounter < imgCount; iCounter++ )
   for ( int iCounter = 0; iCounter < imgCount; iCounter++ )
   {
   {
-    int xsize = ( int ) currentfeats[iCounter].width();
-    int ysize = ( int ) currentfeats[iCounter].height();
+    int xsize = (int)currentfeats[iCounter].width();
+    int ysize = (int)currentfeats[iCounter].height();
 
 
     for ( int x = 0; x < xsize; x++ )
     for ( int x = 0; x < xsize; x++ )
     {
     {
@@ -206,7 +205,7 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
         if ( currentfeats[iCounter].get ( x, y, tree ) == node )
         if ( currentfeats[iCounter].get ( x, y, tree ) == node )
         {
         {
           int cn = labels[iCounter] ( x, y );
           int cn = labels[iCounter] ( x, y );
-          double randD = ( double ) rand() / ( double ) RAND_MAX;
+          double randD = (double)rand() / (double)RAND_MAX;
 
 
           if ( labelmap.find ( cn ) == labelmap.end() )
           if ( labelmap.find ( cn ) == labelmap.end() )
             continue;
             continue;
@@ -232,7 +231,7 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
 
 
   for ( mapit = e.begin() ; mapit != e.end(); mapit++ )
   for ( mapit = e.begin() ; mapit != e.end(); mapit++ )
   {
   {
-    double p = ( double ) ( *mapit ).second / ( double ) featcounter;
+    double p = (double)( *mapit ).second / (double)featcounter;
     globent += p * log2 ( p );
     globent += p * log2 ( p );
   }
   }
 
 
@@ -248,7 +247,7 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
   for ( int i = 0; i < featsPerSplit; i++ )
   for ( int i = 0; i < featsPerSplit; i++ )
   {
   {
     int x1, x2, y1, y2;
     int x1, x2, y1, y2;
-    int ft = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ftypes );
+    int ft = (int)( (double)rand() / (double)RAND_MAX * (double)ftypes );
 
 
     int tmpws = windowSize;
     int tmpws = windowSize;
 
 
@@ -260,16 +259,16 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
       tmpws *= 4;
       tmpws *= 4;
     }
     }
 
 
-    x1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
-    x2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
-    y1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
-    y2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
+    x1 = (int)( (double)rand() / (double)RAND_MAX * (double)tmpws ) - tmpws / 2;
+    x2 = (int)( (double)rand() / (double)RAND_MAX * (double)tmpws ) - tmpws / 2;
+    y1 = (int)( (double)rand() / (double)RAND_MAX * (double)tmpws ) - tmpws / 2;
+    y2 = (int)( (double)rand() / (double)RAND_MAX * (double)tmpws ) - tmpws / 2;
 
 
     if ( ft == 0 )
     if ( ft == 0 )
     {
     {
-      int f1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) featdim );
-      int f2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) featdim );
-      int o = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ops.size() );
+      int f1 = (int)( (double)rand() / (double)RAND_MAX * (double)featdim );
+      int f2 = (int)( (double)rand() / (double)RAND_MAX * (double)featdim );
+      int o = (int)( (double)rand() / (double)RAND_MAX * (double)ops.size() );
       Operation *op = ops[o]->clone();
       Operation *op = ops[o]->clone();
       op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
       op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
       op->setContext ( false );
       op->setContext ( false );
@@ -277,17 +276,17 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
     }
     }
     else if ( ft == 1 )
     else if ( ft == 1 )
     {
     {
-      int opssize = ( int ) ops.size();
+      int opssize = (int)ops.size();
       //opssize = 0;
       //opssize = 0;
-      int o = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( ( ( double ) cops.size() ) + ( double ) opssize ) );
+      int o = (int)( (double)rand() / (double)RAND_MAX * ( ( (double)cops.size() ) + (double)opssize ) );
 
 
       Operation *op;
       Operation *op;
 
 
       if ( o < opssize )
       if ( o < opssize )
       {
       {
-        int chans = ( int ) forest[0][0].dist.size();
-        int f1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
-        int f2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
+        int chans = (int)forest[0][0].dist.size();
+        int f1 = (int)( (double)rand() / (double)RAND_MAX * (double)chans );
+        int f2 = (int)( (double)rand() / (double)RAND_MAX * (double)chans );
         op = ops[o]->clone();
         op = ops[o]->clone();
         op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
         op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
         op->setContext ( true );
         op->setContext ( true );
@@ -295,8 +294,8 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
       else
       else
       {
       {
         int chans = integralImgs[0].channels();
         int chans = integralImgs[0].channels();
-        int f1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
-        int f2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
+        int f1 = (int)( (double)rand() / (double)RAND_MAX * (double)chans );
+        int f2 = (int)( (double)rand() / (double)RAND_MAX * (double)chans );
 
 
         o -= opssize;
         o -= opssize;
         op = cops[o]->clone();
         op = cops[o]->clone();
@@ -343,7 +342,7 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
 
 
     for ( int r = 0; r < randomTests; r++ )
     for ( int r = 0; r < randomTests; r++ )
     {
     {
-      splits.push_back ( ( ( double ) rand() / ( double ) RAND_MAX*scale ) + minval );
+      splits.push_back ( ( (double)rand() / (double)RAND_MAX*scale ) + minval );
     }
     }
 
 
     for ( int run = 0 ; run < randomTests; run++ )
     for ( int run = 0 ; run < randomTests; run++ )
@@ -378,7 +377,7 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
 
 
       for ( mapit = eL.begin() ; mapit != eL.end(); mapit++ )
       for ( mapit = eL.begin() ; mapit != eL.end(); mapit++ )
       {
       {
-        double p = ( double ) ( *mapit ).second / ( double ) counterL;
+        double p = (double)( *mapit ).second / (double)counterL;
         leftent -= p * log2 ( p );
         leftent -= p * log2 ( p );
       }
       }
 
 
@@ -386,13 +385,13 @@ double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<do
 
 
       for ( mapit = eR.begin() ; mapit != eR.end(); mapit++ )
       for ( mapit = eR.begin() ; mapit != eR.end(); mapit++ )
       {
       {
-        double p = ( double ) ( *mapit ).second / ( double ) counterR;
+        double p = (double)( *mapit ).second / (double)counterR;
         rightent -= p * log2 ( p );
         rightent -= p * log2 ( p );
       }
       }
 
 
       //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
       //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
 
 
-      double pl = ( double ) counterL / ( double ) ( counterL + counterR );
+      double pl = (double)counterL / (double)( counterL + counterR );
 
 
       double ig = globent - ( 1.0 - pl ) * rightent - pl * leftent;
       double ig = globent - ( 1.0 - pl ) * rightent - pl * leftent;
 
 
@@ -453,7 +452,7 @@ inline double SemSegContextTree::getMeanProb ( const int &x, const int &y, const
     val += forest[tree][currentfeats.get ( x,y,tree ) ].dist[channel];
     val += forest[tree][currentfeats.get ( x,y,tree ) ].dist[channel];
   }
   }
 
 
-  return val / ( double ) nbTrees;
+  return val / (double)nbTrees;
 }
 }
 
 
 void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<SparseVectorInt> &infeats, NICE::MultiChannelImageT<SparseVectorInt> &integralImage )
 void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<SparseVectorInt> &infeats, NICE::MultiChannelImageT<SparseVectorInt> &integralImage )
@@ -494,7 +493,7 @@ void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<un
   int xsize = currentfeats.width();
   int xsize = currentfeats.width();
   int ysize = currentfeats.height();
   int ysize = currentfeats.height();
 
 
-  int channels = ( int ) forest[0][0].dist.size();
+  int channels = (int)forest[0][0].dist.size();
 #pragma omp parallel for
 #pragma omp parallel for
   for ( int c = 0; c < channels; c++ )
   for ( int c = 0; c < channels; c++ )
   {
   {
@@ -524,7 +523,7 @@ void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<un
     }
     }
   }
   }
 
 
-  int channels2 = ( int ) lfeats.channels();
+  int channels2 = (int)lfeats.channels();
 
 
   xsize = lfeats.width();
   xsize = lfeats.width();
   ysize = lfeats.height();
   ysize = lfeats.height();
@@ -565,7 +564,7 @@ void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<un
 
 
 inline double computeWeight ( const double &d, const double &dim )
 inline double computeWeight ( const double &d, const double &dim )
 {
 {
-  return 1.0 / ( pow ( 2, ( double ) ( dim - d + 1 ) ) );
+  return 1.0 / ( pow ( 2, (double)( dim - d + 1 ) ) );
 }
 }
 
 
 void SemSegContextTree::train ( const MultiDataset *md )
 void SemSegContextTree::train ( const MultiDataset *md )
@@ -737,8 +736,8 @@ void SemSegContextTree::train ( const MultiDataset *md )
 
 
   for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
   for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
   {
   {
-    int xsize = ( int ) currentfeats[iCounter].width();
-    int ysize = ( int ) currentfeats[iCounter].height();
+    int xsize = (int)currentfeats[iCounter].width();
+    int ysize = (int)currentfeats[iCounter].height();
 
 
     for ( int x = 0; x < xsize; x++ )
     for ( int x = 0; x < xsize; x++ )
     {
     {
@@ -753,13 +752,13 @@ void SemSegContextTree::train ( const MultiDataset *md )
     }
     }
   }
   }
 
 
-  for ( int i = 0; i < ( int ) a.size(); i++ )
+  for ( int i = 0; i < (int)a.size(); i++ )
   {
   {
-    a[i] /= ( double ) featcounter;
+    a[i] /= (double)featcounter;
   }
   }
 
 
 #ifdef DEBUG
 #ifdef DEBUG
-  for ( int i = 0; i < ( int ) a.size(); i++ )
+  for ( int i = 0; i < (int)a.size(); i++ )
   {
   {
     cout << "a[" << i << "]: " << a[i] << endl;
     cout << "a[" << i << "]: " << a[i] << endl;
   }
   }
@@ -814,7 +813,7 @@ void SemSegContextTree::train ( const MultiDataset *md )
 
 
     for ( int tree = 0; tree < nbTrees; tree++ )
     for ( int tree = 0; tree < nbTrees; tree++ )
     {
     {
-      int t = ( int ) forest[tree].size();
+      int t = (int)forest[tree].size();
       int s = startnode[tree];
       int s = startnode[tree];
       startnode[tree] = t;
       startnode[tree] = t;
       //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
       //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
@@ -1098,7 +1097,7 @@ void SemSegContextTree::train ( const MultiDataset *md )
 #ifdef DEBUG
 #ifdef DEBUG
   for ( int tree = 0; tree < nbTrees; tree++ )
   for ( int tree = 0; tree < nbTrees; tree++ )
   {
   {
-    int t = ( int ) forest[tree].size();
+    int t = (int)forest[tree].size();
 
 
     for ( int i = 0; i < t; i++ )
     for ( int i = 0; i < t; i++ )
     {
     {
@@ -1108,10 +1107,10 @@ void SemSegContextTree::train ( const MultiDataset *md )
       {
       {
         cout <<  ", feat: " << forest[tree][i].feat->writeInfos() << " ";
         cout <<  ", feat: " << forest[tree][i].feat->writeInfos() << " ";
         opOverview[forest[tree][i].feat->getOps() ]++;
         opOverview[forest[tree][i].feat->getOps() ]++;
-        contextOverview[forest[tree][i].depth][ ( int ) forest[tree][i].feat->getContext() ]++;
+        contextOverview[forest[tree][i].depth][ (int)forest[tree][i].feat->getContext() ]++;
       }
       }
 
 
-      for ( int d = 0; d < ( int ) forest[tree][i].dist.size(); d++ )
+      for ( int d = 0; d < (int)forest[tree][i].dist.size(); d++ )
       {
       {
         cout << " " << forest[tree][i].dist[d];
         cout << " " << forest[tree][i].dist[d];
       }
       }
@@ -1353,7 +1352,7 @@ void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult
       if ( depth < maxDepth )
       if ( depth < maxDepth )
       {
       {
         //compute integral image
         //compute integral image
-        int channels = ( int ) labelmap.size() + feats.channels();
+        int channels = (int)labelmap.size() + feats.channels();
 
 
         if ( integralImg.width() == 0 )
         if ( integralImg.width() == 0 )
         {
         {
@@ -1385,7 +1384,7 @@ void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult
 #endif
 #endif
 
 
   string cndir = conf->gS ( "SSContextTree", "cndir", "" );
   string cndir = conf->gS ( "SSContextTree", "cndir", "" );
-  int classes = ( int ) probabilities.channels();
+  int classes = (int)probabilities.channels();
   vector<int> useclass ( classes, 1 );
   vector<int> useclass ( classes, 1 );
 #ifdef WRITEGLOB
 #ifdef WRITEGLOB
 
 
@@ -1485,7 +1484,7 @@ void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult
     }
     }
 #undef VISUALIZE
 #undef VISUALIZE
 #ifdef VISUALIZE
 #ifdef VISUALIZE
-    for ( int j = 0 ; j < ( int ) probabilities.numChannels; j++ )
+    for ( int j = 0 ; j < (int)probabilities.numChannels; j++ )
     {
     {
       //cout << "class: " << j << endl;//" " << cn.text ( j ) << endl;
       //cout << "class: " << j << endl;//" " << cn.text ( j ) << endl;
 
 

+ 7 - 0
semseg/SemSegCsurka.cpp

@@ -1517,6 +1517,13 @@ void SemSegCsurka::classifyregions ( CachedExample *ce, NICE::Image & segresult,
           preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
           preMap.set ( pce[i].second.x, pce[i].second.y, r.scores[j], j + s*klassen );
         }
         }
 
 
+        if(r.uncertainty < 0.0)
+        {
+          cerr << "uncertainty: " << r.uncertainty << endl;
+          pce[i].second.svec->store(cerr);
+          cerr << endl;
+          exit(-1);
+        }
 #ifdef UNCERTAINTY
 #ifdef UNCERTAINTY
         uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
         uncert[s] ( pce[i].second.x, pce[i].second.y ) = r.uncertainty;
         maxu = std::max ( r.uncertainty, maxu );
         maxu = std::max ( r.uncertainty, maxu );

+ 18 - 9
semseg/operations/Operations.cpp

@@ -87,6 +87,15 @@ std::string Operation::writeInfos()
   return ss.str();
   return ss.str();
 }
 }
 
 
+double Equality::getVal ( const Features &feats, const int &x, const int &y )
+{
+  int xsize, ysize;
+  getXY ( feats, xsize, ysize );
+  double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
+  double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel2 );
+  return (double)(v1 == v2);
+}
+
 double Minus::getVal ( const Features &feats, const int &x, const int &y )
 double Minus::getVal ( const Features &feats, const int &x, const int &y )
 {
 {
   int xsize, ysize;
   int xsize, ysize;
@@ -141,28 +150,28 @@ double IntegralOps::getVal ( const Features &feats, const int &x, const int &y )
 {
 {
   int xsize, ysize;
   int xsize, ysize;
   getXY ( feats, xsize, ysize );
   getXY ( feats, xsize, ysize );
-  return computeMean ( *feats.integralImg, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel1 );
+  return computeMean ( *feats.feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel1 );
 }
 }
 
 
 double GlobalFeats::getVal ( const Features &feats, const int &x, const int &y )
 double GlobalFeats::getVal ( const Features &feats, const int &x, const int &y )
 {
 {
   int xsize, ysize;
   int xsize, ysize;
   getXY ( feats, xsize, ysize );
   getXY ( feats, xsize, ysize );
-  return computeMean ( *feats.integralImg, 0, 0, xsize - 1, ysize - 1, channel1 );
+  return computeMean ( *feats.feats, 0, 0, xsize - 1, ysize - 1, channel1 );
 }
 }
 
 
 double IntegralCenteredOps::getVal ( const Features &feats, const int &x, const int &y )
 double IntegralCenteredOps::getVal ( const Features &feats, const int &x, const int &y )
 {
 {
   int xsize, ysize;
   int xsize, ysize;
   getXY ( feats, xsize, ysize );
   getXY ( feats, xsize, ysize );
-  return computeMean ( *feats.integralImg, BOUND ( x - x1, 0, xsize - 1 ), BOUND ( y - y1, 0, ysize - 1 ), BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
+  return computeMean ( *feats.feats, BOUND ( x - x1, 0, xsize - 1 ), BOUND ( y - y1, 0, ysize - 1 ), BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
 }
 }
 
 
 double BiIntegralCenteredOps::getVal ( const Features &feats, const int &x, const int &y )
 double BiIntegralCenteredOps::getVal ( const Features &feats, const int &x, const int &y )
 {
 {
   int xsize, ysize;
   int xsize, ysize;
   getXY ( feats, xsize, ysize );
   getXY ( feats, xsize, ysize );
-  return computeMean ( *feats.integralImg, BOUND ( x - x1, 0, xsize - 1 ), BOUND ( y - y1, 0, ysize - 1 ), BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 ) - computeMean ( *feats.integralImg, BOUND ( x - x2, 0, xsize - 1 ), BOUND ( y - y2, 0, ysize - 1 ), BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel1 );
+  return computeMean ( *feats.feats, BOUND ( x - x1, 0, xsize - 1 ), BOUND ( y - y1, 0, ysize - 1 ), BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 ) - computeMean ( *feats.feats, BOUND ( x - x2, 0, xsize - 1 ), BOUND ( y - y2, 0, ysize - 1 ), BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel1 );
 }
 }
 
 
 double HaarHorizontal::getVal ( const Features &feats, const int &x, const int &y )
 double HaarHorizontal::getVal ( const Features &feats, const int &x, const int &y )
@@ -175,7 +184,7 @@ double HaarHorizontal::getVal ( const Features &feats, const int &x, const int &
   int lrx = BOUND ( x + x1, 0, xsize - 1 );
   int lrx = BOUND ( x + x1, 0, xsize - 1 );
   int lry = BOUND ( y + y1, 0, ysize - 1 );
   int lry = BOUND ( y + y1, 0, ysize - 1 );
 
 
-  return computeMean ( *feats.integralImg, tlx, tly, lrx, y, channel1 ) - computeMean ( *feats.integralImg, tlx, y, lrx, lry, channel1 );
+  return computeMean ( *feats.feats, tlx, tly, lrx, y, channel1 ) - computeMean ( *feats.feats, tlx, y, lrx, lry, channel1 );
 }
 }
 
 
 double HaarVertical::getVal ( const Features &feats, const int &x, const int &y )
 double HaarVertical::getVal ( const Features &feats, const int &x, const int &y )
@@ -188,7 +197,7 @@ double HaarVertical::getVal ( const Features &feats, const int &x, const int &y
   int lrx = BOUND ( x + x1, 0, xsize - 1 );
   int lrx = BOUND ( x + x1, 0, xsize - 1 );
   int lry = BOUND ( y + y1, 0, ysize - 1 );
   int lry = BOUND ( y + y1, 0, ysize - 1 );
 
 
-  return computeMean ( *feats.integralImg, tlx, tly, x, lry, channel1 ) - computeMean ( *feats.integralImg, x, tly, lrx, lry, channel1 );
+  return computeMean ( *feats.feats, tlx, tly, x, lry, channel1 ) - computeMean ( *feats.feats, x, tly, lrx, lry, channel1 );
 }
 }
 
 
 double HaarDiag::getVal ( const Features &feats, const int &x, const int &y )
 double HaarDiag::getVal ( const Features &feats, const int &x, const int &y )
@@ -201,7 +210,7 @@ double HaarDiag::getVal ( const Features &feats, const int &x, const int &y )
   int lrx = BOUND ( x + x1, 0, xsize - 1 );
   int lrx = BOUND ( x + x1, 0, xsize - 1 );
   int lry = BOUND ( y + y1, 0, ysize - 1 );
   int lry = BOUND ( y + y1, 0, ysize - 1 );
 
 
-  return computeMean ( *feats.integralImg, tlx, tly, x, y, channel1 ) + computeMean ( *feats.integralImg, x, y, lrx, lry, channel1 ) - computeMean ( *feats.integralImg, tlx, y, x, lry, channel1 ) - computeMean ( *feats.integralImg, x, tly, lrx, y, channel1 );
+  return computeMean ( *feats.feats, tlx, tly, x, y, channel1 ) + computeMean ( *feats.feats, x, y, lrx, lry, channel1 ) - computeMean ( *feats.feats, tlx, y, x, lry, channel1 ) - computeMean ( *feats.feats, x, tly, lrx, y, channel1 );
 }
 }
 
 
 double Haar3Horiz::getVal ( const Features &feats, const int &x, const int &y )
 double Haar3Horiz::getVal ( const Features &feats, const int &x, const int &y )
@@ -216,7 +225,7 @@ double Haar3Horiz::getVal ( const Features &feats, const int &x, const int &y )
   int lrx = BOUND ( x + x2, 0, xsize - 1 );
   int lrx = BOUND ( x + x2, 0, xsize - 1 );
   int lry = BOUND ( y + y2, 0, ysize - 1 );
   int lry = BOUND ( y + y2, 0, ysize - 1 );
 
 
-  return computeMean ( *feats.integralImg, tlx, tly, lrx, mtly, channel1 ) - computeMean ( *feats.integralImg, tlx, mtly, lrx, mlry, channel1 ) + computeMean ( *feats.integralImg, tlx, mlry, lrx, lry, channel1 );
+  return computeMean ( *feats.feats, tlx, tly, lrx, mtly, channel1 ) - computeMean ( *feats.feats, tlx, mtly, lrx, mlry, channel1 ) + computeMean ( *feats.feats, tlx, mlry, lrx, lry, channel1 );
 }
 }
 
 
 double Haar3Vert::getVal ( const Features &feats, const int &x, const int &y )
 double Haar3Vert::getVal ( const Features &feats, const int &x, const int &y )
@@ -231,7 +240,7 @@ double Haar3Vert::getVal ( const Features &feats, const int &x, const int &y )
   int lrx = BOUND ( x + x2, 0, xsize - 1 );
   int lrx = BOUND ( x + x2, 0, xsize - 1 );
   int lry = BOUND ( y + y2, 0, ysize - 1 );
   int lry = BOUND ( y + y2, 0, ysize - 1 );
 
 
-  return computeMean ( *feats.integralImg, tlx, tly, mtlx, lry, channel1 ) - computeMean ( *feats.integralImg, mtlx, tly, mlrx, lry, channel1 ) + computeMean ( *feats.integralImg, mlrx, tly, lrx, lry, channel1 );
+  return computeMean ( *feats.feats, tlx, tly, mtlx, lry, channel1 ) - computeMean ( *feats.feats, mtlx, tly, mlrx, lry, channel1 ) + computeMean ( *feats.feats, mlrx, tly, lrx, lry, channel1 );
 }
 }
 
 
 void IntegralOps::set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
 void IntegralOps::set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )

+ 52 - 3
semseg/operations/Operations.h

@@ -44,6 +44,7 @@ enum OperationTypes {
   RELATIVEXPOSITION,
   RELATIVEXPOSITION,
   RELATIVEYPOSITION,
   RELATIVEYPOSITION,
   GLOBALFEATS,
   GLOBALFEATS,
+  EQUALITY,
   NBOPERATIONS
   NBOPERATIONS
 };
 };
 
 
@@ -103,9 +104,6 @@ struct Features {
 
 
   /** tree nodes */
   /** tree nodes */
   std::vector<TreeNode> *tree;
   std::vector<TreeNode> *tree;
-
-  /** integral images for faster feature computation */
-  NICE::MultiChannelImageT<double> *integralImg;
 };
 };
 
 
 /**
 /**
@@ -220,6 +218,7 @@ class ClassificationResultAccess: public ValueAccess
     }
     }
 };
 };
 
 
+#if 0
 /**
 /**
  * @brief not finished yet, do we really need sparse feature representation or ClassificationResultAccess sufficient
  * @brief not finished yet, do we really need sparse feature representation or ClassificationResultAccess sufficient
  **/
  **/
@@ -265,6 +264,7 @@ class SparseImageAccess: public ValueAccess
       return CONTEXT;
       return CONTEXT;
     }
     }
 };
 };
+#endif
 
 
 /**
 /**
  * @brief abstract operation class
  * @brief abstract operation class
@@ -362,6 +362,55 @@ class Operation
     virtual void restore ( std::istream & is );
     virtual void restore ( std::istream & is );
 };
 };
 
 
+/**
+ * @brief simple equality check ?(A==B)
+ **/
+class Equality: public Operation
+{
+  public:
+    /**
+     * @brief interface for feature computation
+     * @param feats features
+     * @param cfeats number of tree node for each pixel
+     * @param tree current tree
+     * @param x current x position
+     * @param y current y position
+     * @return double distance
+     **/
+    virtual double getVal ( const Features &feats, const int &x, const int &y );
+
+    /**
+     * @brief clone operation instead of copy constructor (copy constructor does not work)
+     **/
+    virtual Operation* clone()
+    {
+      return new Equality();
+    }
+
+    /**
+     * @brief print some infos about operation extraction type
+     * @return string feature type
+     **/
+    virtual std::string writeInfos()
+    {
+      std::string out = "Equality";
+
+      if ( values != NULL )
+        out += values->writeInfos();
+
+      return out + Operation::writeInfos();
+    }
+
+    /**
+     * @brief return operation type (for store and restor)
+     * @return OperationTypes
+     **/
+    virtual OperationTypes getOps()
+    {
+      return EQUALITY;
+    }
+};
+
 /**
 /**
  * @brief simple difference operation A-B
  * @brief simple difference operation A-B
  **/
  **/