|
@@ -82,7 +82,7 @@ SemSegContextTree::SemSegContextTree (
|
|
string section = "SSContextTree";
|
|
string section = "SSContextTree";
|
|
string featsec = "Features";
|
|
string featsec = "Features";
|
|
|
|
|
|
- this->lfcw = new LFColorWeijer ( conf );
|
|
|
|
|
|
+ this->lfcw = NULL;
|
|
this->firstiteration = true;
|
|
this->firstiteration = true;
|
|
this->run3Dseg = conf->gB ( section, "run_3dseg", false );
|
|
this->run3Dseg = conf->gB ( section, "run_3dseg", false );
|
|
this->maxSamples = conf->gI ( section, "max_samples", 2000 );
|
|
this->maxSamples = conf->gI ( section, "max_samples", 2000 );
|
|
@@ -109,10 +109,13 @@ SemSegContextTree::SemSegContextTree (
|
|
this->pixelWiseLabeling = conf->gB ( section, "pixelWiseLabeling", false );
|
|
this->pixelWiseLabeling = conf->gB ( section, "pixelWiseLabeling", false );
|
|
|
|
|
|
if ( useCategorization && cndir == "" )
|
|
if ( useCategorization && cndir == "" )
|
|
- this->fasthik = new GPHIKClassifier ( conf );
|
|
|
|
|
|
+ this->fasthik = new GPHIKClassifierNICE ( conf );
|
|
else
|
|
else
|
|
this->fasthik = NULL;
|
|
this->fasthik = NULL;
|
|
|
|
|
|
|
|
+ if ( useWeijer )
|
|
|
|
+ this->lfcw = new LocalFeatureColorWeijer ( conf );
|
|
|
|
+
|
|
this->classnames = md->getClassNames ( "train" );
|
|
this->classnames = md->getClassNames ( "train" );
|
|
|
|
|
|
// feature types
|
|
// feature types
|
|
@@ -725,7 +728,7 @@ void SemSegContextTree::computeRayFeatImage (
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-void SemSegContextTree::computeIntegralImage (
|
|
|
|
|
|
+void SemSegContextTree::updateProbabilityMaps (
|
|
const NICE::MultiChannelImage3DT<unsigned short int> &nodeIndices,
|
|
const NICE::MultiChannelImage3DT<unsigned short int> &nodeIndices,
|
|
NICE::MultiChannelImage3DT<double> &feats,
|
|
NICE::MultiChannelImage3DT<double> &feats,
|
|
int firstChannel )
|
|
int firstChannel )
|
|
@@ -734,34 +737,9 @@ void SemSegContextTree::computeIntegralImage (
|
|
int ysize = feats.height();
|
|
int ysize = feats.height();
|
|
int zsize = feats.depth();
|
|
int zsize = feats.depth();
|
|
|
|
|
|
- // integral images for raw channels
|
|
|
|
- if ( firstiteration && (useFeat3) )
|
|
|
|
- {
|
|
|
|
-#pragma omp parallel for
|
|
|
|
- for ( int it = 0; it < ( int ) integralMap.size(); it++ )
|
|
|
|
- {
|
|
|
|
- int corg = integralMap[it].first;
|
|
|
|
- int cint = integralMap[it].second;
|
|
|
|
-
|
|
|
|
- for ( int z = 0; z < zsize; z++ )
|
|
|
|
- {
|
|
|
|
- for ( int y = 0; y < ysize; y++ )
|
|
|
|
- {
|
|
|
|
- for ( int x = 0; x < xsize; x++ )
|
|
|
|
- {
|
|
|
|
- feats ( x, y, z, cint ) = feats ( x, y, z, corg );
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- feats.calcIntegral ( cint );
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
int classes = ( int ) forest[0][0].dist.size();
|
|
int classes = ( int ) forest[0][0].dist.size();
|
|
|
|
|
|
// integral images for context channels (probability maps for each class)
|
|
// integral images for context channels (probability maps for each class)
|
|
- if (useFeat3 || useFeat4)
|
|
|
|
- {
|
|
|
|
#pragma omp parallel for
|
|
#pragma omp parallel for
|
|
for ( int c = 0; c < classes; c++ )
|
|
for ( int c = 0; c < classes; c++ )
|
|
{
|
|
{
|
|
@@ -793,7 +771,6 @@ void SemSegContextTree::computeIntegralImage (
|
|
|
|
|
|
feats.calcIntegral ( firstChannel + c );
|
|
feats.calcIntegral ( firstChannel + c );
|
|
}
|
|
}
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
inline double computeWeight ( const int &d, const int &dim )
|
|
inline double computeWeight ( const int &d, const int &dim )
|
|
@@ -934,7 +911,7 @@ void SemSegContextTree::train ( const LabeledSet * trainp )
|
|
// allfeats.push_back ( feats );
|
|
// allfeats.push_back ( feats );
|
|
|
|
|
|
int amountRegions;
|
|
int amountRegions;
|
|
- // read image and do some simple transformations
|
|
|
|
|
|
+ // convert color to L*a*b, add selected feature channels
|
|
addFeatureMaps ( imgData, filelist, amountRegions );
|
|
addFeatureMaps ( imgData, filelist, amountRegions );
|
|
allfeats.push_back(imgData);
|
|
allfeats.push_back(imgData);
|
|
|
|
|
|
@@ -1010,14 +987,8 @@ void SemSegContextTree::train ( const LabeledSet * trainp )
|
|
|
|
|
|
// Type 2: rectangular and Haar-like features on gray value integral channels
|
|
// Type 2: rectangular and Haar-like features on gray value integral channels
|
|
if ( useFeat2 )
|
|
if ( useFeat2 )
|
|
- {
|
|
|
|
- integralMap.clear();
|
|
|
|
for ( int i = 0; i < rawChannels; i++ )
|
|
for ( int i = 0; i < rawChannels; i++ )
|
|
- {
|
|
|
|
channelType.push_back ( 2 );
|
|
channelType.push_back ( 2 );
|
|
- integralMap.push_back ( pair<int, int> ( i, i + rawChannels + shift ) );
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
|
|
// Type 3: type 2 features on context channels
|
|
// Type 3: type 2 features on context channels
|
|
if ( useFeat3 )
|
|
if ( useFeat3 )
|
|
@@ -1128,42 +1099,18 @@ void SemSegContextTree::train ( const LabeledSet * trainp )
|
|
vector<vector<vector<double> > > lastRegionProbs = regionProbs;
|
|
vector<vector<vector<double> > > lastRegionProbs = regionProbs;
|
|
|
|
|
|
if ( useFeat1 )
|
|
if ( useFeat1 )
|
|
- {
|
|
|
|
for ( int i = 0; i < imgCounter; i++ )
|
|
for ( int i = 0; i < imgCounter; i++ )
|
|
{
|
|
{
|
|
int numRegions = (int) regionProbs[i].size();
|
|
int numRegions = (int) regionProbs[i].size();
|
|
for ( int r = 0; r < numRegions; r++ )
|
|
for ( int r = 0; r < numRegions; r++ )
|
|
- {
|
|
|
|
for ( int c = 0; c < classes; c++ )
|
|
for ( int c = 0; c < classes; c++ )
|
|
- {
|
|
|
|
regionProbs[i][r][c] = 0.0;
|
|
regionProbs[i][r][c] = 0.0;
|
|
- }
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
- }
|
|
|
|
-
|
|
|
|
- //compute integral & ray images
|
|
|
|
- int multi = 0, multi2 = 0;
|
|
|
|
- if (useFeat3) multi=1;
|
|
|
|
- if (useFeat4) multi=2;
|
|
|
|
- if (useFeat5) multi2 = 1;
|
|
|
|
-
|
|
|
|
- if ( firstiteration && ( useFeat2 || useFeat3 || useFeat4) )
|
|
|
|
- for ( int i = 0; i < imgCounter; i++ )
|
|
|
|
- allfeats[i].addChannel ( (multi*classes) + rawChannels );
|
|
|
|
|
|
|
|
- if ( firstiteration && useFeat5 )
|
|
|
|
- for ( int i = 0; i < imgCounter; i++ )
|
|
|
|
- allfeats[i].addChannel ( 24 );
|
|
|
|
-
|
|
|
|
- int firstChannel = channelType.size() - (multi*classes) - (multi2*24);
|
|
|
|
- if ( useFeat2 || useFeat3 || useFeat4 )
|
|
|
|
- for ( int i = 0; i < imgCounter; i++ )
|
|
|
|
- computeIntegralImage ( nodeIndices[i], allfeats[i], firstChannel );
|
|
|
|
-
|
|
|
|
- if ( firstiteration && useFeat5 )
|
|
|
|
- for ( int i = 0; i < imgCounter; i++ )
|
|
|
|
- computeRayFeatImage( allfeats[i], channelType.size()-24);
|
|
|
|
|
|
+ // initialize & update context channels
|
|
|
|
+ for ( int i = 0; i < imgCounter; i++)
|
|
|
|
+ if ( useFeat3 || useFeat4 )
|
|
|
|
+ this->updateProbabilityMaps ( nodeIndices[i], allfeats[i], rawChannels + shift );
|
|
|
|
|
|
#ifdef VERBOSE
|
|
#ifdef VERBOSE
|
|
Timer timerDepth;
|
|
Timer timerDepth;
|
|
@@ -1291,50 +1238,50 @@ void SemSegContextTree::train ( const LabeledSet * trainp )
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-// assert ( lcounter > 0 && rcounter > 0 );
|
|
|
|
|
|
+ assert ( lcounter > 0 && rcounter > 0 );
|
|
|
|
|
|
- if ( lcounter <= 0 || rcounter <= 0 )
|
|
|
|
- {
|
|
|
|
- cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
|
|
|
|
- cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
|
|
|
|
- cout << "bestig: " << bestig << endl;
|
|
|
|
|
|
+// if ( lcounter <= 0 || rcounter <= 0 )
|
|
|
|
+// {
|
|
|
|
+// cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
|
|
|
|
+// cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
|
|
|
|
+// cout << "bestig: " << bestig << endl;
|
|
|
|
|
|
- for ( int i = 0; i < imgCounter; i++ )
|
|
|
|
- {
|
|
|
|
- int xsize = nodeIndices[i].width();
|
|
|
|
- int ysize = nodeIndices[i].height();
|
|
|
|
- int zsize = nodeIndices[i].depth();
|
|
|
|
- int counter = 0;
|
|
|
|
-
|
|
|
|
- for ( int x = 0; x < xsize; x++ )
|
|
|
|
- {
|
|
|
|
- for ( int y = 0; y < ysize; y++ )
|
|
|
|
- {
|
|
|
|
- for ( int z = 0; z < zsize; z++ )
|
|
|
|
- {
|
|
|
|
- if ( lastNodeIndices[i].get ( x, y, tree ) == node )
|
|
|
|
- {
|
|
|
|
- if ( ++counter > 30 )
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- Features feat;
|
|
|
|
- feat.feats = &allfeats[i];
|
|
|
|
- feat.cTree = tree;
|
|
|
|
- feat.tree = &forest[tree];
|
|
|
|
- feat.rProbs = &lastRegionProbs[i];
|
|
|
|
-
|
|
|
|
- double val = splitfeat->getVal ( feat, x, y, z );
|
|
|
|
- if ( !isfinite ( val ) ) val = 0.0;
|
|
|
|
-
|
|
|
|
- cout << "splitval: " << splitval << " val: " << val << endl;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+// for ( int i = 0; i < imgCounter; i++ )
|
|
|
|
+// {
|
|
|
|
+// int xsize = nodeIndices[i].width();
|
|
|
|
+// int ysize = nodeIndices[i].height();
|
|
|
|
+// int zsize = nodeIndices[i].depth();
|
|
|
|
+// int counter = 0;
|
|
|
|
+
|
|
|
|
+// for ( int x = 0; x < xsize; x++ )
|
|
|
|
+// {
|
|
|
|
+// for ( int y = 0; y < ysize; y++ )
|
|
|
|
+// {
|
|
|
|
+// for ( int z = 0; z < zsize; z++ )
|
|
|
|
+// {
|
|
|
|
+// if ( lastNodeIndices[i].get ( x, y, tree ) == node )
|
|
|
|
+// {
|
|
|
|
+// if ( ++counter > 30 )
|
|
|
|
+// break;
|
|
|
|
+
|
|
|
|
+// Features feat;
|
|
|
|
+// feat.feats = &allfeats[i];
|
|
|
|
+// feat.cTree = tree;
|
|
|
|
+// feat.tree = &forest[tree];
|
|
|
|
+// feat.rProbs = &lastRegionProbs[i];
|
|
|
|
+
|
|
|
|
+// double val = splitfeat->getVal ( feat, x, y, z );
|
|
|
|
+// if ( !isfinite ( val ) ) val = 0.0;
|
|
|
|
+
|
|
|
|
+// cout << "splitval: " << splitval << " val: " << val << endl;
|
|
|
|
+// }
|
|
|
|
+// }
|
|
|
|
+// }
|
|
|
|
+// }
|
|
|
|
+// }
|
|
|
|
|
|
- assert ( lcounter > 0 && rcounter > 0 );
|
|
|
|
- }
|
|
|
|
|
|
+// assert ( lcounter > 0 && rcounter > 0 );
|
|
|
|
+// }
|
|
|
|
|
|
for ( int c = 0; c < classes; c++ )
|
|
for ( int c = 0; c < classes; c++ )
|
|
{
|
|
{
|
|
@@ -1438,7 +1385,7 @@ void SemSegContextTree::train ( const LabeledSet * trainp )
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- fasthik->train ( globalCategorFeats, ys );
|
|
|
|
|
|
+ fasthik->train( reinterpret_cast<vector<const NICE::SparseVector *>&>(globalCategorFeats), ys);
|
|
|
|
|
|
timer.stop();
|
|
timer.stop();
|
|
cerr << "Time for Categorization: " << timer.getLastAbsolute() << " seconds\n" << endl;
|
|
cerr << "Time for Categorization: " << timer.getLastAbsolute() << " seconds\n" << endl;
|
|
@@ -1661,9 +1608,11 @@ void SemSegContextTree::addFeatureMaps (
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ // region feature (unsupervised segmentation)
|
|
|
|
+ int shift = 0;
|
|
if ( useFeat1 )
|
|
if ( useFeat1 )
|
|
{
|
|
{
|
|
- //using segmentation
|
|
|
|
|
|
+ shift = 1;
|
|
MultiChannelImageT<int> regions;
|
|
MultiChannelImageT<int> regions;
|
|
regions.reInit( xsize, ysize, zsize );
|
|
regions.reInit( xsize, ysize, zsize );
|
|
amountRegions = segmentation->segRegions ( imgData, regions, imagetype );
|
|
amountRegions = segmentation->segRegions ( imgData, regions, imagetype );
|
|
@@ -1678,6 +1627,40 @@ void SemSegContextTree::addFeatureMaps (
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ // intergal images of raw channels
|
|
|
|
+ if ( useFeat2 )
|
|
|
|
+ {
|
|
|
|
+ imgData.addChannel ( rawChannels );
|
|
|
|
+
|
|
|
|
+#pragma omp parallel for
|
|
|
|
+ for ( int i = 0; i < rawChannels; i++ )
|
|
|
|
+ {
|
|
|
|
+ int corg = i;
|
|
|
|
+ int cint = i + rawChannels + shift;
|
|
|
|
+
|
|
|
|
+ for ( int z = 0; z < zsize; z++ )
|
|
|
|
+ for ( int y = 0; y < ysize; y++ )
|
|
|
|
+ for ( int x = 0; x < xsize; x++ )
|
|
|
|
+ imgData ( x, y, z, cint ) = imgData ( x, y, z, corg );
|
|
|
|
+
|
|
|
|
+ imgData.calcIntegral ( cint );
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ int classes = labelmapback.size();
|
|
|
|
+
|
|
|
|
+ if ( useFeat3 )
|
|
|
|
+ imgData.addChannel ( classes );
|
|
|
|
+
|
|
|
|
+ if ( useFeat4 )
|
|
|
|
+ imgData.addChannel ( classes );
|
|
|
|
+
|
|
|
|
+ if ( useFeat5 )
|
|
|
|
+ {
|
|
|
|
+ imgData.addChannel ( 24 );
|
|
|
|
+ this->computeRayFeatImage( imgData, imgData.channels()-24);
|
|
|
|
+ }
|
|
|
|
+
|
|
}
|
|
}
|
|
|
|
|
|
void SemSegContextTree::classify (
|
|
void SemSegContextTree::classify (
|
|
@@ -1727,8 +1710,10 @@ void SemSegContextTree::classify (
|
|
addFeatureMaps ( imgData, filelist, amountRegions );
|
|
addFeatureMaps ( imgData, filelist, amountRegions );
|
|
|
|
|
|
vector<int> rSize;
|
|
vector<int> rSize;
|
|
|
|
+ int shift = 0;
|
|
if ( useFeat1 )
|
|
if ( useFeat1 )
|
|
{
|
|
{
|
|
|
|
+ shift = 1;
|
|
regionProbs = vector<vector<double> > ( amountRegions, vector<double> ( classes, 0.0 ) );
|
|
regionProbs = vector<vector<double> > ( amountRegions, vector<double> ( classes, 0.0 ) );
|
|
rSize = vector<int> ( amountRegions, 0 );
|
|
rSize = vector<int> ( amountRegions, 0 );
|
|
for ( int z = 0; z < zsize; z++ )
|
|
for ( int z = 0; z < zsize; z++ )
|
|
@@ -1752,39 +1737,19 @@ void SemSegContextTree::classify (
|
|
depth++;
|
|
depth++;
|
|
vector<vector<double> > lastRegionProbs = regionProbs;
|
|
vector<vector<double> > lastRegionProbs = regionProbs;
|
|
|
|
|
|
- int multi = 0, multi2 = 0;
|
|
|
|
- if (useFeat3) multi=1;
|
|
|
|
- if (useFeat4) multi=2;
|
|
|
|
- if (useFeat5) multi2=1;
|
|
|
|
-
|
|
|
|
- if ( depth < maxDepth )
|
|
|
|
- {
|
|
|
|
- //compute integral images
|
|
|
|
- if ( firstiteration && ( useFeat2 || useFeat3 || useFeat4 ) )
|
|
|
|
- imgData.addChannel ( (multi*classes) + rawChannels );
|
|
|
|
-
|
|
|
|
- //compute canny image
|
|
|
|
- if ( firstiteration && useFeat5 )
|
|
|
|
- imgData.addChannel ( 24 );
|
|
|
|
-
|
|
|
|
- int firstChannel = channelType.size() - (multi*classes) - (multi2*24);
|
|
|
|
- if ( useFeat2 || useFeat3 || useFeat4 )
|
|
|
|
- computeIntegralImage ( nodeIndices, imgData, firstChannel );
|
|
|
|
-
|
|
|
|
- if ( firstiteration && useFeat5 )
|
|
|
|
- computeRayFeatImage( imgData, channelType.size()-24 );
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
if ( useFeat1 )
|
|
if ( useFeat1 )
|
|
{
|
|
{
|
|
int numRegions = ( int ) regionProbs.size();
|
|
int numRegions = ( int ) regionProbs.size();
|
|
for ( int r = 0; r < numRegions; r++ )
|
|
for ( int r = 0; r < numRegions; r++ )
|
|
- {
|
|
|
|
for ( int c = 0; c < classes; c++ )
|
|
for ( int c = 0; c < classes; c++ )
|
|
- {
|
|
|
|
regionProbs[r][c] = 0.0;
|
|
regionProbs[r][c] = 0.0;
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ( depth < maxDepth )
|
|
|
|
+ {
|
|
|
|
+ int firstChannel = rawChannels + shift;
|
|
|
|
+ if ( useFeat3 || useFeat4 )
|
|
|
|
+ this->updateProbabilityMaps ( nodeIndices, imgData, firstChannel );
|
|
}
|
|
}
|
|
|
|
|
|
double weight = computeWeight ( depth, maxDepth )
|
|
double weight = computeWeight ( depth, maxDepth )
|
|
@@ -1913,13 +1878,11 @@ void SemSegContextTree::classify (
|
|
{
|
|
{
|
|
globalCategorFeat->setDim ( uniquenumber );
|
|
globalCategorFeat->setDim ( uniquenumber );
|
|
globalCategorFeat->normalize();
|
|
globalCategorFeat->normalize();
|
|
- int result;
|
|
|
|
- SparseVector scores;
|
|
|
|
- fasthik->classify( globalCategorFeat, result, scores );
|
|
|
|
|
|
+ ClassificationResult cr = fasthik->classify( globalCategorFeat);
|
|
for ( uint i = 0; i < ( uint ) classes; i++ )
|
|
for ( uint i = 0; i < ( uint ) classes; i++ )
|
|
{
|
|
{
|
|
- cerr << scores.get(i) << " ";
|
|
|
|
- if ( scores.get(i) > 0.0/*-0.3*/ )
|
|
|
|
|
|
+ cerr << cr.scores[i] << " ";
|
|
|
|
+ if ( cr.scores[i] > 0.0/*-0.3*/ )
|
|
{
|
|
{
|
|
classesInImg.push_back ( i );
|
|
classesInImg.push_back ( i );
|
|
}
|
|
}
|
|
@@ -2201,12 +2164,6 @@ void SemSegContextTree::store ( std::ostream & os, int format ) const
|
|
}
|
|
}
|
|
os << endl;
|
|
os << endl;
|
|
|
|
|
|
- os << integralMap.size() << endl;
|
|
|
|
- for ( int i = 0; i < ( int ) integralMap.size(); i++ )
|
|
|
|
- {
|
|
|
|
- os << integralMap[i].first << " " << integralMap[i].second << endl;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
os << rawChannels << endl;
|
|
os << rawChannels << endl;
|
|
|
|
|
|
os << uniquenumber << endl;
|
|
os << uniquenumber << endl;
|
|
@@ -2304,20 +2261,32 @@ void SemSegContextTree::restore ( std::istream & is, int format )
|
|
{
|
|
{
|
|
int tmp;
|
|
int tmp;
|
|
is >> tmp;
|
|
is >> tmp;
|
|
|
|
+ switch (tmp)
|
|
|
|
+ {
|
|
|
|
+ case 0: useFeat0 = true; break;
|
|
|
|
+ case 1: useFeat1 = true; break;
|
|
|
|
+ case 2: useFeat2 = true; break;
|
|
|
|
+ case 3: useFeat3 = true; break;
|
|
|
|
+ case 4: useFeat4 = true; break;
|
|
|
|
+ case 5: useFeat5 = true; break;
|
|
|
|
+ }
|
|
channelType.push_back ( tmp );
|
|
channelType.push_back ( tmp );
|
|
}
|
|
}
|
|
|
|
|
|
- integralMap.clear();
|
|
|
|
- int iMapSize;
|
|
|
|
- is >> iMapSize;
|
|
|
|
- for ( int i = 0; i < iMapSize; i++ )
|
|
|
|
- {
|
|
|
|
- int first;
|
|
|
|
- int second;
|
|
|
|
- is >> first;
|
|
|
|
- is >> second;
|
|
|
|
- integralMap.push_back ( pair<int, int> ( first, second ) );
|
|
|
|
- }
|
|
|
|
|
|
+ // integralMap is deprecated but kept in RESTORE
|
|
|
|
+ // for downwards compatibility!
|
|
|
|
+// std::vector<std::pair<int, int> > integralMap;
|
|
|
|
+// integralMap.clear();
|
|
|
|
+// int iMapSize;
|
|
|
|
+// is >> iMapSize;
|
|
|
|
+// for ( int i = 0; i < iMapSize; i++ )
|
|
|
|
+// {
|
|
|
|
+// int first;
|
|
|
|
+// int second;
|
|
|
|
+// is >> first;
|
|
|
|
+// is >> second;
|
|
|
|
+// integralMap.push_back ( pair<int, int> ( first, second ) );
|
|
|
|
+// }
|
|
|
|
|
|
is >> rawChannels;
|
|
is >> rawChannels;
|
|
|
|
|