123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767 |
- #include "SemSegContextTree.h"
- #include "vislearning/baselib/Globals.h"
- #include "vislearning/baselib/ProgressBar.h"
- #include "core/basics/StringTools.h"
- #include "vislearning/cbaselib/CachedExample.h"
- #include "vislearning/cbaselib/PascalResults.h"
- #include "vislearning/baselib/ColorSpace.h"
- #include "objrec/segmentation/RSMeanShift.h"
- #include "objrec/segmentation/RSGraphBased.h"
- #include "core/basics/numerictools.h"
- #include "core/basics/Timer.h"
- #include <omp.h>
- #include <iostream>
- #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
- #undef LOCALFEATS
- //#define LOCALFEATS
- using namespace OBJREC;
- using namespace std;
- using namespace NICE;
- class MCImageAccess: public ValueAccess
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y, const int &channel )
- {
- return feats.feats->get( x, y, channel );
- }
- virtual string writeInfos()
- {
- return "raw";
- }
- };
- class ClassificationResultAcess: public ValueAccess
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y, const int &channel )
- {
- return ( *feats.tree )[feats.cfeats->get( x,y,feats.cTree )].dist[channel];
- }
- virtual string writeInfos()
- {
- return "context";
- }
- };
- class Minus: public Operation
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
- double v2 = values->getVal( feats, BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel2 );
- return v1 -v2;
- }
- virtual Operation* clone()
- {
- return new Minus();
- }
- virtual string writeInfos()
- {
- string out = "Minus";
- if ( values != NULL )
- out += values->writeInfos();
- return out;
- }
- virtual OperationTypes getOps()
- {
- return MINUS;
- }
- };
- class MinusAbs: public Operation
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
- double v2 = values->getVal( feats, BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel2 );
- return abs( v1 -v2 );
- }
- virtual Operation* clone()
- {
- return new MinusAbs();
- };
- virtual string writeInfos()
- {
- string out = "MinusAbs";
- if ( values != NULL )
- out += values->writeInfos();
- return out;
- }
- virtual OperationTypes getOps()
- {
- return MINUSABS;
- }
- };
- class Addition: public Operation
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
- double v2 = values->getVal( feats, BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel2 );
- return v1 + v2;
- }
- virtual Operation* clone()
- {
- return new Addition();
- }
- virtual string writeInfos()
- {
- string out = "Addition";
- if ( values != NULL )
- out += values->writeInfos();
- return out;
- }
- virtual OperationTypes getOps()
- {
- return ADDITION;
- }
- };
- class Only1: public Operation
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- double v1 = values->getVal( feats, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
- return v1;
- }
- virtual Operation* clone()
- {
- return new Only1();
- }
- virtual string writeInfos()
- {
- string out = "Only1";
- if ( values != NULL )
- out += values->writeInfos();
- return out;
- }
- virtual OperationTypes getOps()
- {
- return ONLY1;
- }
- };
- class RelativeXPosition: public Operation
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- return ( double )x / ( double )xsize;
- }
- virtual Operation* clone()
- {
- return new RelativeXPosition();
- }
- virtual string writeInfos()
- {
- return "RelativeXPosition";
- }
- virtual OperationTypes getOps()
- {
- return RELATIVEXPOSITION;
- }
- };
- class RelativeYPosition: public Operation
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- return ( double )x / ( double )xsize;
- }
- virtual Operation* clone()
- {
- return new RelativeYPosition();
- }
- virtual string writeInfos()
- {
- return "RelativeYPosition";
- }
- virtual OperationTypes getOps()
- {
- return RELATIVEYPOSITION;
- }
- };
- // uses mean of classification in window given by (x1,y1) (x2,y2)
- class IntegralOps: public Operation
- {
- public:
- virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
- {
- x1 = min( _x1, _x2 );
- y1 = min( _y1, _y2 );
- x2 = max( _x1, _x2 );
- y2 = max( _y1, _y2 );
- channel1 = _channel1;
- channel2 = _channel2;
- values = _values;
- }
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- return computeMean( *feats.integralImg, BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel1 );
- }
- inline double computeMean( const NICE::MultiChannelImageT<double> &intImg, const int &uLx, const int &uLy, const int &lRx, const int &lRy, const int &chan )
- {
- double val1 = intImg.get( uLx, uLy, chan );
- double val2 = intImg.get( lRx, uLy, chan );
- double val3 = intImg.get( uLx, lRy, chan );
- double val4 = intImg.get( lRx, lRy, chan );
- double area = ( lRx - uLx ) * ( lRy - uLy );
- if ( area == 0 )
- return 0.0;
- return ( val1 + val4 - val2 - val3 ) / area;
- }
- virtual Operation* clone()
- {
- return new IntegralOps();
- }
- virtual string writeInfos()
- {
- return "IntegralOps";
- }
- virtual OperationTypes getOps()
- {
- return INTEGRAL;
- }
- };
- //like a global bag of words to model the current appearance of classes in an image without local context
- class GlobalFeats: public IntegralOps
- {
- public:
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- return computeMean( *feats.integralImg, 0, 0, xsize - 1, ysize - 1, channel1 );
- }
- virtual Operation* clone()
- {
- return new GlobalFeats();
- }
- virtual string writeInfos()
- {
- return "GlobalFeats";
- }
- virtual OperationTypes getOps()
- {
- return GLOBALFEATS;
- }
- };
- //uses mean of Integral image given by x1, y1 with current pixel as center
- class IntegralCenteredOps: public IntegralOps
- {
- public:
- virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
- {
- x1 = abs( _x1 );
- y1 = abs( _y1 );
- x2 = abs( _x2 );
- y2 = abs( _y2 );
- channel1 = _channel1;
- channel2 = _channel2;
- values = _values;
- }
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- return computeMean( *feats.integralImg, BOUND( x - x1, 0, xsize - 1 ), BOUND( y - y1, 0, ysize - 1 ), BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 );
- }
- virtual Operation* clone()
- {
- return new IntegralCenteredOps();
- }
- virtual string writeInfos()
- {
- return "IntegralCenteredOps";
- }
- virtual OperationTypes getOps()
- {
- return INTEGRALCENT;
- }
- };
- //uses different of mean of Integral image given by two windows, where (x1,y1) is the width and height of window1 and (x2,y2) of window 2
- class BiIntegralCenteredOps: public IntegralCenteredOps
- {
- public:
- virtual void set( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
- {
- x1 = min( abs( _x1 ), abs( _x2 ) );
- y1 = min( abs( _y1 ), abs( _y2 ) );
- x2 = max( abs( _x1 ), abs( _x2 ) );
- y2 = max( abs( _y1 ), abs( _y2 ) );
- channel1 = _channel1;
- channel2 = _channel2;
- values = _values;
- }
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- return computeMean( *feats.integralImg, BOUND( x - x1, 0, xsize - 1 ), BOUND( y - y1, 0, ysize - 1 ), BOUND( x + x1, 0, xsize - 1 ), BOUND( y + y1, 0, ysize - 1 ), channel1 ) - computeMean( *feats.integralImg, BOUND( x - x2, 0, xsize - 1 ), BOUND( y - y2, 0, ysize - 1 ), BOUND( x + x2, 0, xsize - 1 ), BOUND( y + y2, 0, ysize - 1 ), channel1 );
- }
- virtual Operation* clone()
- {
- return new BiIntegralCenteredOps();
- }
- virtual string writeInfos()
- {
- return "BiIntegralCenteredOps";
- }
- virtual OperationTypes getOps()
- {
- return BIINTEGRALCENT;
- }
- };
- /** horizontal Haar features
- * ++
- * --
- */
- class HaarHorizontal: public IntegralCenteredOps
- {
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- int tlx = BOUND( x - x1, 0, xsize - 1 );
- int tly = BOUND( y - y1, 0, ysize - 1 );
- int lrx = BOUND( x + x1, 0, xsize - 1 );
- int lry = BOUND( y + y1, 0, ysize - 1 );
- return computeMean( *feats.integralImg, tlx, tly, lrx, y, channel1 ) - computeMean( *feats.integralImg, tlx, y, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new HaarHorizontal();
- }
- virtual string writeInfos()
- {
- return "HaarHorizontal";
- }
- virtual OperationTypes getOps()
- {
- return HAARHORIZ;
- }
- };
- /** vertical Haar features
- * +-
- * +-
- */
- class HaarVertical: public IntegralCenteredOps
- {
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- int tlx = BOUND( x - x1, 0, xsize - 1 );
- int tly = BOUND( y - y1, 0, ysize - 1 );
- int lrx = BOUND( x + x1, 0, xsize - 1 );
- int lry = BOUND( y + y1, 0, ysize - 1 );
- return computeMean( *feats.integralImg, tlx, tly, x, lry, channel1 ) - computeMean( *feats.integralImg, x, tly, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new HaarVertical();
- }
- virtual string writeInfos()
- {
- return "HaarVertical";
- }
- virtual OperationTypes getOps()
- {
- return HAARVERT;
- }
- };
- /** vertical Haar features
- * +-
- * -+
- */
- class HaarDiag: public IntegralCenteredOps
- {
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- int tlx = BOUND( x - x1, 0, xsize - 1 );
- int tly = BOUND( y - y1, 0, ysize - 1 );
- int lrx = BOUND( x + x1, 0, xsize - 1 );
- int lry = BOUND( y + y1, 0, ysize - 1 );
- return computeMean( *feats.integralImg, tlx, tly, x, y, channel1 ) + computeMean( *feats.integralImg, x, y, lrx, lry, channel1 ) - computeMean( *feats.integralImg, tlx, y, x, lry, channel1 ) - computeMean( *feats.integralImg, x, tly, lrx, y, channel1 );
- }
- virtual Operation* clone()
- {
- return new HaarDiag();
- }
- virtual string writeInfos()
- {
- return "HaarDiag";
- }
- virtual OperationTypes getOps()
- {
- return HAARDIAG;
- }
- };
- /** horizontal Haar features
- * +++
- * ---
- * +++
- */
- class Haar3Horiz: public BiIntegralCenteredOps
- {
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- int tlx = BOUND( x - x2, 0, xsize - 1 );
- int tly = BOUND( y - y2, 0, ysize - 1 );
- int mtly = BOUND( y - y1, 0, ysize - 1 );
- int mlry = BOUND( y + y1, 0, ysize - 1 );
- int lrx = BOUND( x + x2, 0, xsize - 1 );
- int lry = BOUND( y + y2, 0, ysize - 1 );
- return computeMean( *feats.integralImg, tlx, tly, lrx, mtly, channel1 ) - computeMean( *feats.integralImg, tlx, mtly, lrx, mlry, channel1 ) + computeMean( *feats.integralImg, tlx, mlry, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new Haar3Horiz();
- }
- virtual string writeInfos()
- {
- return "Haar3Horiz";
- }
- virtual OperationTypes getOps()
- {
- return HAAR3HORIZ;
- }
- };
- /** vertical Haar features
- * +-+
- * +-+
- * +-+
- */
- class Haar3Vert: public BiIntegralCenteredOps
- {
- virtual double getVal( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY( feats, xsize, ysize );
- int tlx = BOUND( x - x2, 0, xsize - 1 );
- int tly = BOUND( y - y2, 0, ysize - 1 );
- int mtlx = BOUND( x - x1, 0, xsize - 1 );
- int mlrx = BOUND( x + x1, 0, xsize - 1 );
- int lrx = BOUND( x + x2, 0, xsize - 1 );
- int lry = BOUND( y + y2, 0, ysize - 1 );
- return computeMean( *feats.integralImg, tlx, tly, mtlx, lry, channel1 ) - computeMean( *feats.integralImg, mtlx, tly, mlrx, lry, channel1 ) + computeMean( *feats.integralImg, mlrx, tly, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new Haar3Vert();
- }
- virtual string writeInfos()
- {
- return "Haar3Vert";
- }
- virtual OperationTypes getOps()
- {
- return HAAR3VERT;
- }
- };
- SemSegContextTree::SemSegContextTree( const Config *conf, const MultiDataset *md )
- : SemanticSegmentation( conf, &( md->getClassNames( "train" ) ) )
- {
- this->conf = conf;
- string section = "SSContextTree";
- lfcw = new LFColorWeijer( conf );
- grid = conf->gI( section, "grid", 10 );
- maxSamples = conf->gI( section, "max_samples", 2000 );
- minFeats = conf->gI( section, "min_feats", 50 );
- maxDepth = conf->gI( section, "max_depth", 10 );
- windowSize = conf->gI( section, "window_size", 16 );
- featsPerSplit = conf->gI( section, "feats_per_split", 200 );
- useShannonEntropy = conf->gB( section, "use_shannon_entropy", true );
- nbTrees = conf->gI( section, "amount_trees", 1 );
- string segmentationtype = conf->gS( section, "segmentation_type", "meanshift" );
- useGaussian = conf->gB( section, "use_gaussian", true );
- if ( useGaussian )
- throw( "there something wrong with using gaussian! first fix it!" );
- pixelWiseLabeling = false;
- if ( segmentationtype == "meanshift" )
- segmentation = new RSMeanShift( conf );
- else if ( segmentationtype == "none" )
- {
- segmentation = NULL;
- pixelWiseLabeling = true;
- }
- else if ( segmentationtype == "felzenszwalb" )
- segmentation = new RSGraphBased( conf );
- else
- throw( "no valid segmenation_type\n please choose between none, meanshift and felzenszwalb\n" );
- ftypes = conf->gI( section, "features", 2 );;
- ops.push_back( new Minus() );
- ops.push_back( new MinusAbs() );
- ops.push_back( new Addition() );
- ops.push_back( new Only1() );
- ops.push_back( new RelativeXPosition() );
- ops.push_back( new RelativeYPosition() );
- cops.push_back( new BiIntegralCenteredOps() );
- cops.push_back( new IntegralCenteredOps() );
- cops.push_back( new IntegralOps() );
- cops.push_back( new HaarHorizontal() );
- cops.push_back( new HaarVertical() );
- cops.push_back( new HaarDiag() );
- cops.push_back( new Haar3Horiz() );
- cops.push_back( new Haar3Vert() );
- cops.push_back( new GlobalFeats() );
- opOverview = vector<int>( NBOPERATIONS, 0 );
- calcVal.push_back( new MCImageAccess() );
- calcVal.push_back( new ClassificationResultAcess() );
- classnames = md->getClassNames( "train" );
- ///////////////////////////////////
- // Train Segmentation Context Trees
- ///////////////////////////////////
- train( md );
- }
- SemSegContextTree::~SemSegContextTree()
- {
- }
- double SemSegContextTree::getBestSplit( std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<int> > ¤tfeats, std::vector<NICE::MultiChannelImageT<double> > &integralImgs, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree )
- {
- int imgCount = 0, featdim = 0;
- try
- {
- imgCount = ( int )feats.size();
- featdim = feats[0].channels();
- }
- catch ( Exception )
- {
- cerr << "no features computed?" << endl;
- }
- double bestig = -numeric_limits< double >::max();
- splitop = NULL;
- splitval = -1.0;
- set<vector<int> >selFeats;
- map<int, int> e;
- int featcounter = forest[tree][node].featcounter;
-
- if ( featcounter < minFeats )
- {
- //cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
- return 0.0;
- }
- vector<double> fraction( a.size(), 0.0 );
- for ( uint i = 0; i < fraction.size(); i++ )
- {
- if ( forbidden_classes.find( labelmapback[i] ) != forbidden_classes.end() )
- fraction[i] = 0;
- else
- fraction[i] = (( double )maxSamples ) / (( double )featcounter * a[i] * a.size() );
- //cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
- }
- featcounter = 0;
- for ( int iCounter = 0; iCounter < imgCount; iCounter++ )
- {
- int xsize = ( int )currentfeats[iCounter].width();
- int ysize = ( int )currentfeats[iCounter].height();
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- if ( currentfeats[iCounter].get( x, y, tree ) == node )
- {
- int cn = labels[iCounter]( x, y );
- double randD = ( double )rand() / ( double )RAND_MAX;
- if ( randD < fraction[labelmap[cn]] )
- {
- vector<int> tmp( 3, 0 );
- tmp[0] = iCounter;
- tmp[1] = x;
- tmp[2] = y;
- featcounter++;
- selFeats.insert( tmp );
- e[cn]++;
- }
- }
- }
- }
- }
- //cout << "size: " << selFeats.size() << endl;
- //getchar();
- map<int, int>::iterator mapit;
- double globent = 0.0;
- for ( mapit = e.begin() ; mapit != e.end(); mapit++ )
- {
- //cout << "class: " << mapit->first << ": " << mapit->second << endl;
- double p = ( double )( *mapit ).second / ( double )featcounter;
- globent += p * log2( p );
- }
- globent = -globent;
- if ( globent < 0.5 )
- {
- //cout << "globent to small: " << globent << endl;
- return 0.0;
- }
- int classes = ( int )forest[tree][0].dist.size();
- featsel.clear();
- for ( int i = 0; i < featsPerSplit; i++ )
- {
- int x1, x2, y1, y2;
- int ft = ( int )(( double )rand() / ( double )RAND_MAX * ( double )ftypes );
- int tmpws = windowSize;
- if ( integralImgs[0].width() == 0 )
- ft = 0;
- if ( ft > 0 )
- {
- tmpws *= 4;
- }
- if ( useGaussian )
- {
- double sigma = ( double )tmpws / 2.0;
- x1 = randGaussDouble( sigma ) * ( double )tmpws;
- x2 = randGaussDouble( sigma ) * ( double )tmpws;
- y1 = randGaussDouble( sigma ) * ( double )tmpws;
- y2 = randGaussDouble( sigma ) * ( double )tmpws;
- }
- else
- {
- x1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
- x2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
- y1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
- y2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )tmpws ) - tmpws / 2;
- }
- if ( ft == 0 )
- {
- int f1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )featdim );
- int f2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )featdim );
- int o = ( int )(( double )rand() / ( double )RAND_MAX * ( double )ops.size() );
- Operation *op = ops[o]->clone();
- op->set( x1, y1, x2, y2, f1, f2, calcVal[ft] );
- featsel.push_back( op );
- }
- else if ( ft == 1 )
- {
- int opssize = ( int )ops.size();
- //opssize = 0;
- int o = ( int )(( double )rand() / ( double )RAND_MAX * ((( double )cops.size() ) + ( double )opssize ) );
- Operation *op;
- if ( o < opssize )
- {
- int chans = ( int )forest[0][0].dist.size();
- int f1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
- int f2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
- op = ops[o]->clone();
- op->set( x1, y1, x2, y2, f1, f2, calcVal[ft] );
- }
- else
- {
- int chans = integralImgs[0].channels();
- int f1 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
- int f2 = ( int )(( double )rand() / ( double )RAND_MAX * ( double )chans );
- o -= opssize;
- op = cops[o]->clone();
- op->set( x1, y1, x2, y2, f1, f2, calcVal[ft] );
- }
- featsel.push_back( op );
- }
- }
-
- #pragma omp parallel for private(mapit)
- for ( int f = 0; f < featsPerSplit; f++ )
- {
- double l_bestig = -numeric_limits< double >::max();
- double l_splitval = -1.0;
- set<vector<int> >::iterator it;
- vector<double> vals;
- for ( it = selFeats.begin() ; it != selFeats.end(); it++ )
- {
- Features feat;
- feat.feats = &feats[( *it )[0]];
- feat.cfeats = ¤tfeats[( *it )[0]];
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImgs[( *it )[0]];
- vals.push_back( featsel[f]->getVal( feat, ( *it )[1], ( *it )[2] ) );
- }
- int counter = 0;
- for ( it = selFeats.begin() ; it != selFeats.end(); it++ , counter++ )
- {
- set<vector<int> >::iterator it2;
- double val = vals[counter];
- map<int, int> eL, eR;
- int counterL = 0, counterR = 0;
- int counter2 = 0;
- for ( it2 = selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
- {
- int cn = labels[( *it2 )[0]](( *it2 )[1], ( *it2 )[2] );
- //cout << "vals[counter2] " << vals[counter2] << " val: " << val << endl;
- if ( vals[counter2] < val )
- {
- //left entropie:
- eL[cn] = eL[cn] + 1;
- counterL++;
- }
- else
- {
- //right entropie:
- eR[cn] = eR[cn] + 1;
- counterR++;
- }
- }
- double leftent = 0.0;
- for ( mapit = eL.begin() ; mapit != eL.end(); mapit++ )
- {
- double p = ( double )( *mapit ).second / ( double )counterL;
- leftent -= p * log2( p );
- }
- double rightent = 0.0;
- for ( mapit = eR.begin() ; mapit != eR.end(); mapit++ )
- {
- double p = ( double )( *mapit ).second / ( double )counterR;
- rightent -= p * log2( p );
- }
- //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
- double pl = ( double )counterL / ( double )( counterL + counterR );
- double ig = globent - ( 1.0 - pl ) * rightent - pl * leftent;
- //double ig = globent - rightent - leftent;
- if ( useShannonEntropy )
- {
- double esplit = - ( pl * log( pl ) + ( 1 - pl ) * log( 1 - pl ) );
- ig = 2 * ig / ( globent + esplit );
- }
- if ( ig > l_bestig )
- {
- l_bestig = ig;
- l_splitval = val;
- }
- }
- #pragma omp critical
- {
- //cout << "globent: " << globent << " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
- //cout << "globent: " << globent << " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
- //cout << "p: " << featsubset[f] << endl;
- if ( l_bestig > bestig )
- {
- bestig = l_bestig;
- splitop = featsel[f];
- splitval = l_splitval;
- }
- }
- }
- //getchar();
- //splitop->writeInfos();
- //cout<< "ig: " << bestig << endl;
- //FIXME: delete all features!
- /*for(int i = 0; i < featsPerSplit; i++)
- {
- if(featsel[i] != splitop)
- delete featsel[i];
- }*/
- #ifdef debug
- cout << "globent: " << globent << " bestig " << bestig << " splitval: " << splitval << endl;
- #endif
- return bestig;
- }
- inline double SemSegContextTree::getMeanProb( const int &x, const int &y, const int &channel, const MultiChannelImageT<int> ¤tfeats )
- {
- double val = 0.0;
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- val += forest[tree][currentfeats.get( x,y,tree )].dist[channel];
- }
- return val / ( double )nbTrees;
- }
- void SemSegContextTree::computeIntegralImage( const NICE::MultiChannelImageT<int> ¤tfeats, const NICE::MultiChannelImageT<double> &lfeats, NICE::MultiChannelImageT<double> &integralImage )
- {
- int xsize = currentfeats.width();
- int ysize = currentfeats.height();
- int channels = ( int )forest[0][0].dist.size();
- #pragma omp parallel for
- for ( int c = 0; c < channels; c++ )
- {
- integralImage.set( 0, 0, getMeanProb( 0, 0, c, currentfeats ), c );
- //first column
- for ( int y = 1; y < ysize; y++ )
- {
- integralImage.set( 0, y, getMeanProb( 0, y, c, currentfeats ) + integralImage.get( 0, y, c ), c );
- }
- //first row
- for ( int x = 1; x < xsize; x++ )
- {
- integralImage.set( x, 0, getMeanProb( x, 0, c, currentfeats ) + integralImage.get( x, 0, c ), c );
- }
- //rest
- for ( int y = 1; y < ysize; y++ )
- {
- for ( int x = 1; x < xsize; x++ )
- {
- double val = getMeanProb( x, y, c, currentfeats ) + integralImage.get( x, y - 1, c ) + integralImage.get( x - 1, y, c ) - integralImage.get( x - 1, y - 1, c );
- integralImage.set( x, y, val, c );
- }
- }
- }
- int channels2 = ( int )lfeats.channels();
- xsize = lfeats.width();
- ysize = lfeats.height();
- if ( integralImage.get( xsize - 1, ysize - 1, channels ) == 0.0 )
- {
- #pragma omp parallel for
- for ( int c1 = 0; c1 < channels2; c1++ )
- {
- int c = channels + c1;
- integralImage.set( 0, 0, lfeats.get( 0, 0, c1 ), c );
- //first column
- for ( int y = 1; y < ysize; y++ )
- {
- integralImage.set( 0, y, lfeats.get( 0, y, c1 ) + integralImage.get( 0, y, c ), c );
- }
- //first row
- for ( int x = 1; x < xsize; x++ )
- {
- integralImage.set( x, 0, lfeats.get( x, 0, c1 ) + integralImage.get( x, 0, c ), c );
- }
- //rest
- for ( int y = 1; y < ysize; y++ )
- {
- for ( int x = 1; x < xsize; x++ )
- {
- double val = lfeats.get( x, y, c1 ) + integralImage.get( x, y - 1, c ) + integralImage.get( x - 1, y, c ) - integralImage.get( x - 1, y - 1, c );
- integralImage.set( x, y, val, c );
- }
- }
- }
- }
- }
- void SemSegContextTree::train( const MultiDataset *md )
- {
- const LabeledSet train = * ( *md )["train"];
- const LabeledSet *trainp = &train;
- ProgressBar pb( "compute feats" );
- pb.show();
- //TODO: Speichefresser!, lohnt sich sparse?
- vector<MultiChannelImageT<double> > allfeats;
- vector<MultiChannelImageT<int> > currentfeats;
- vector<MatrixT<int> > labels;
- std::string forbidden_classes_s = conf->gS( "analysis", "donttrain", "" );
- if ( forbidden_classes_s == "" )
- {
- forbidden_classes_s = conf->gS( "analysis", "forbidden_classes", "" );
- }
- classnames.getSelection( forbidden_classes_s, forbidden_classes );
- int imgcounter = 0;
- /*
- MultiChannelImageT<int> ttmp2(0,0,0);
- MultiChannelImageT<double> ttmp1(100,100,1);
- MultiChannelImageT<double> tint(100,100,1);
- ttmp1.setAll(1.0);
- tint.setAll(0.0);
- computeIntegralImage(ttmp2,ttmp1,tint);
- for(int i = 0; i < cops.size(); i++)
- {
- Features feats;
- feats.feats = ∭
- feats.cfeats = &ttmp2;
- feats.cTree = 0;
- feats.tree = new vector<TreeNode>;
- feats.integralImg = ∭
- cops[i]->set(-10, -6, 8, 9, 0, 0, new MCImageAccess());
- cout << "for: " << cops[i]->writeInfos() << endl;
- int y = 50;
- for(int x = 40; x < 44; x++)
- {
- cout << "x: " << x << " val: " << cops[i]->getVal(feats, x, y) << endl;
- }
- }
- getchar();*/
- int amountPixels = 0;
-
- LOOP_ALL_S( *trainp )
- {
- EACH_INFO( classno, info );
- NICE::ColorImage img;
- std::string currentFile = info.img();
- CachedExample *ce = new CachedExample( currentFile );
- const LocalizationResult *locResult = info.localization();
- if ( locResult->size() <= 0 )
- {
- fprintf( stderr, "WARNING: NO ground truth polygons found for %s !\n",
- currentFile.c_str() );
- continue;
- }
- fprintf( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
- int xsize, ysize;
- ce->getImageSize( xsize, ysize );
- amountPixels += xsize*ysize;
- MatrixT<int> tmpMat( xsize, ysize );
- currentfeats.push_back( MultiChannelImageT<int>( xsize, ysize, nbTrees ) );
- currentfeats[imgcounter].setAll( 0 );
- labels.push_back( tmpMat );
- try {
- img = ColorImage( currentFile );
- } catch ( Exception ) {
- cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
- continue;
- }
- Globals::setCurrentImgFN( currentFile );
- //TODO: resize image?!
- MultiChannelImageT<double> feats;
- allfeats.push_back( feats );
- #ifdef LOCALFEATS
- lfcw->getFeats( img, allfeats[imgcounter] );
- #else
- allfeats[imgcounter].reInit( xsize, ysize, 3, true );
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- for ( int r = 0; r < 3; r++ )
- {
- allfeats[imgcounter].set( x, y, img.getPixel( x, y, r ), r );
- }
- }
- }
- allfeats[imgcounter] = ColorSpace::rgbtolab(allfeats[imgcounter]);
- #endif
- // getting groundtruth
- NICE::Image pixelLabels( xsize, ysize );
- pixelLabels.set( 0 );
- locResult->calcLabeledImage( pixelLabels, ( *classNames ).getBackgroundClass() );
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- classno = pixelLabels.getPixel( x, y );
- labels[imgcounter]( x, y ) = classno;
- if ( forbidden_classes.find( classno ) != forbidden_classes.end() )
- continue;
- labelcounter[classno]++;
- }
- }
- imgcounter++;
- pb.update( trainp->count() );
- delete ce;
- }
- pb.hide();
- map<int, int>::iterator mapit;
- int classes = 0;
- for ( mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++ )
- {
- labelmap[mapit->first] = classes;
- labelmapback[classes] = mapit->first;
- classes++;
- }
- //balancing
- int featcounter = 0;
- a = vector<double>( classes, 0.0 );
- for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
- {
- int xsize = ( int )currentfeats[iCounter].width();
- int ysize = ( int )currentfeats[iCounter].height();
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- featcounter++;
- int cn = labels[iCounter]( x, y );
- a[labelmap[cn]] ++;
- }
- }
- }
- for ( int i = 0; i < ( int )a.size(); i++ )
- {
- a[i] /= ( double )featcounter;
- }
- #ifdef DEBUG
- for ( int i = 0; i < ( int )a.size(); i++ )
- {
- cout << "a[" << i << "]: " << a[i] << endl;
- }
- cout << "a.size: " << a.size() << endl;
- #endif
- int depth = 0;
- for ( int t = 0; t < nbTrees; t++ )
- {
- vector<TreeNode> tree;
- tree.push_back( TreeNode() );
- tree[0].dist = vector<double>( classes, 0.0 );
- tree[0].depth = depth;
- tree[0].featcounter = amountPixels;
- forest.push_back( tree );
- }
- vector<int> startnode( nbTrees, 0 );
- bool allleaf = false;
- //int baseFeatSize = allfeats[0].size();
- vector<MultiChannelImageT<double> > integralImgs( imgcounter, MultiChannelImageT<double>() );
- while ( !allleaf && depth < maxDepth )
- {
- allleaf = true;
- vector<MultiChannelImageT<int> > lastfeats = currentfeats;
- #if 1
- Timer timer;
- timer.start();
- #endif
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- int t = ( int ) forest[tree].size();
- int s = startnode[tree];
- startnode[tree] = t;
- //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
- //#pragma omp parallel for
- #if 0
- timer.stop();
- cout << "time before tree: " << timer.getLast() << endl;
- timer.start();
- #endif
- for ( int i = s; i < t; i++ )
- {
- if ( !forest[tree][i].isleaf && forest[tree][i].left < 0 )
- {
- #if 0
- timer.stop();
- cout << "time 1: " << timer.getLast() << endl;
- timer.start();
- #endif
- Operation *splitfeat = NULL;
- double splitval;
- double bestig = getBestSplit( allfeats, lastfeats, integralImgs, labels, i, splitfeat, splitval, tree );
- #if 0
- timer.stop();
- cout << "time 2: " << timer.getLast() << endl;
- timer.start();
- #endif
- forest[tree][i].feat = splitfeat;
- forest[tree][i].decision = splitval;
- if ( splitfeat != NULL )
- {
- allleaf = false;
- int left = forest[tree].size();
- forest[tree].push_back( TreeNode() );
- forest[tree].push_back( TreeNode() );
- int right = left + 1;
- forest[tree][i].left = left;
- forest[tree][i].right = right;
- forest[tree][left].dist = vector<double>( classes, 0.0 );
- forest[tree][right].dist = vector<double>( classes, 0.0 );
- forest[tree][left].depth = depth + 1;
- forest[tree][right].depth = depth + 1;
- forest[tree][left].featcounter = 0;
- forest[tree][right].featcounter = 0;
- #if 0
- timer.stop();
- cout << "time 3: " << timer.getLast() << endl;
- timer.start();
- #endif
- #pragma omp parallel for
- for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
- {
- int xsize = currentfeats[iCounter].width();
- int ysize = currentfeats[iCounter].height();
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- if ( currentfeats[iCounter].get( x, y, tree ) == i )
- {
- Features feat;
- feat.feats = &allfeats[iCounter];
- feat.cfeats = &lastfeats[iCounter];
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImgs[iCounter];
- double val = splitfeat->getVal( feat, x, y );
- if ( val < splitval )
- {
- currentfeats[iCounter].set( x, y, left, tree );
- forest[tree][left].dist[labelmap[labels[iCounter]( x, y )]]++;
- forest[tree][left].featcounter++;
- }
- else
- {
- currentfeats[iCounter].set( x, y, right, tree );
- forest[tree][right].dist[labelmap[labels[iCounter]( x, y )]]++;
- }
- }
- }
- }
- }
- #if 0
- timer.stop();
- cout << "time 4: " << timer.getLast() << endl;
- timer.start();
- #endif
- forest[tree][right].featcounter = forest[tree][i].featcounter - forest[tree][left].featcounter;
-
- double lcounter = 0.0, rcounter = 0.0;
- for ( uint d = 0; d < forest[tree][left].dist.size(); d++ )
- {
- if ( forbidden_classes.find( labelmapback[d] ) != forbidden_classes.end() )
- {
- forest[tree][left].dist[d] = 0;
- forest[tree][right].dist[d] = 0;
- }
- else
- {
- forest[tree][left].dist[d] /= a[d];
- lcounter += forest[tree][left].dist[d];
- forest[tree][right].dist[d] /= a[d];
- rcounter += forest[tree][right].dist[d];
- }
- }
- #if 0
- timer.stop();
- cout << "time 5: " << timer.getLast() << endl;
- timer.start();
- #endif
- if ( lcounter <= 0 || rcounter <= 0 )
- {
- cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
- cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
- cout << "bestig: " << bestig << endl;
- for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
- {
- int xsize = currentfeats[iCounter].width();
- int ysize = currentfeats[iCounter].height();
- int counter = 0;
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- if ( lastfeats[iCounter].get( x, y, tree ) == i )
- {
- if ( ++counter > 30 )
- break;
- Features feat;
- feat.feats = &allfeats[iCounter];
- feat.cfeats = &lastfeats[iCounter];
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImgs[iCounter];
- double val = splitfeat->getVal( feat, x, y );
- cout << "splitval: " << splitval << " val: " << val << endl;
- }
- }
- }
- }
- assert( lcounter > 0 && rcounter > 0 );
- }
- for ( uint d = 0; d < forest[tree][left].dist.size(); d++ )
- {
- forest[tree][left].dist[d] /= lcounter;
- forest[tree][right].dist[d] /= rcounter;
- }
- }
- else
- {
- forest[tree][i].isleaf = true;
- }
- }
- }
- #if 0
- timer.stop();
- cout << "time after tree: " << timer.getLast() << endl;
- timer.start();
- #endif
- }
- //TODO: features neu berechnen!
- //compute integral image
- int channels = classes + allfeats[0].channels();
- #if 0
- timer.stop();
- cout << "time for part0: " << timer.getLast() << endl;
- timer.start();
- #endif
-
- if ( integralImgs[0].width() == 0 )
- {
- for ( int i = 0; i < imgcounter; i++ )
- {
- int xsize = allfeats[i].width();
- int ysize = allfeats[i].height();
- integralImgs[i].reInit( xsize, ysize, channels );
- integralImgs[i].setAll( 0.0 );
- }
- }
- #if 0
- timer.stop();
- cout << "time for part1: " << timer.getLast() << endl;
- timer.start();
- #endif
- #pragma omp parallel for
- for ( int i = 0; i < imgcounter; i++ )
- {
- computeIntegralImage( currentfeats[i], allfeats[i], integralImgs[i] );
- }
- #if 1
- timer.stop();
- cout << "time for depth " << depth << ": " << timer.getLast() << endl;
- #endif
- depth++;
- #ifdef DEBUG
- cout << "depth: " << depth << endl;
- #endif
- }
- #ifdef DEBUG
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- int t = ( int ) forest[tree].size();
- for ( int i = 0; i < t; i++ )
- {
- printf( "tree[%i]: left: %i, right: %i", i, forest[tree][i].left, forest[tree][i].right );
- if ( !forest[tree][i].isleaf && forest[tree][i].left != -1 )
- {
- cout << ", feat: " << forest[tree][i].feat->writeInfos() << " ";
- opOverview[forest[tree][i].feat->getOps()]++;
- }
- for ( int d = 0; d < ( int )forest[tree][i].dist.size(); d++ )
- {
- cout << " " << forest[tree][i].dist[d];
- }
- cout << endl;
- }
- }
- for ( uint c = 0; c < ops.size(); c++ )
- {
- cout << ops[c]->writeInfos() << ": " << opOverview[ops[c]->getOps()] << endl;
- }
- for ( uint c = 0; c < cops.size(); c++ )
- {
- cout << cops[c]->writeInfos() << ": " << opOverview[cops[c]->getOps()] << endl;
- }
- #endif
- }
- void SemSegContextTree::semanticseg( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
- {
- int xsize;
- int ysize;
- ce->getImageSize( xsize, ysize );
- int numClasses = classNames->numClasses();
- fprintf( stderr, "ContextTree classification !\n" );
- probabilities.reInit( xsize, ysize, numClasses, true );
- probabilities.setAll( 0 );
- NICE::ColorImage img;
- std::string currentFile = Globals::getCurrentImgFN();
- try {
- img = ColorImage( currentFile );
- } catch ( Exception ) {
- cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
- return;
- }
- //TODO: resize image?!
- MultiChannelImageT<double> feats;
- #ifdef LOCALFEATS
- lfcw->getFeats( img, feats );
- #else
- feats.reInit( xsize, ysize, 3, true );
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- for ( int r = 0; r < 3; r++ )
- {
- feats.set( x, y, img.getPixel( x, y, r ), r );
- }
- }
- }
-
- feats = ColorSpace::rgbtolab(feats);
- #endif
- bool allleaf = false;
- MultiChannelImageT<double> integralImg;
- MultiChannelImageT<int> currentfeats( xsize, ysize, nbTrees );
- currentfeats.setAll( 0 );
- int depth = 0;
- while ( !allleaf )
- {
- allleaf = true;
- //TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
- //#pragma omp parallel for
- MultiChannelImageT<int> lastfeats = currentfeats;
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- int t = currentfeats.get( x, y, tree );
- if ( forest[tree][t].left > 0 )
- {
- allleaf = false;
- Features feat;
- feat.feats = &feats;
- feat.cfeats = &lastfeats;
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImg;
- double val = forest[tree][t].feat->getVal( feat, x, y );
- if ( val < forest[tree][t].decision )
- {
- currentfeats.set( x, y, forest[tree][t].left, tree );
- }
- else
- {
- currentfeats.set( x, y, forest[tree][t].right, tree );
- }
- }
- }
- }
- //compute integral image
- int channels = ( int )labelmap.size() + feats.channels();
- if ( integralImg.width() == 0 )
- {
- int xsize = feats.width();
- int ysize = feats.height();
- integralImg.reInit( xsize, ysize, channels );
- }
- }
- computeIntegralImage( currentfeats, feats, integralImg );
- depth++;
- }
- if ( pixelWiseLabeling )
- {
- //finales labeln:
- long int offset = 0;
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++, offset++ )
- {
- double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
- int maxindex = 0;
- uint s = forest[0][0].dist.size();
- for ( uint i = 0; i < s; i++ )
- {
- probabilities.data[labelmapback[i]][offset] = getMeanProb( x, y, i, currentfeats );
- if ( probabilities.data[labelmapback[i]][offset] > maxvalue )
- {
- maxvalue = probabilities.data[labelmapback[i]][offset];
- maxindex = labelmapback[i];
- }
- segresult.setPixel( x, y, maxindex );
- }
- if ( maxvalue > 1 )
- cout << "maxvalue: " << maxvalue << endl;
- }
- }
- }
- else
- {
- //final labeling using segmentation
- //TODO: segmentation
- Matrix regions;
- int regionNumber = segmentation->segRegions( img, regions );
- cout << "regions: " << regionNumber << endl;
- int dSize = forest[0][0].dist.size();
- vector<vector<double> > regionProbs( regionNumber, vector<double>( dSize, 0.0 ) );
- vector<int> bestlabels( regionNumber, 0 );
- /*
- for(int r = 0; r < regionNumber; r++)
- {
- Image over(img.width(), img.height());
- for(int y = 0; y < img.height(); y++)
- {
- for(int x = 0; x < img.width(); x++)
- {
- if(((int)regions(x,y)) == r)
- over.setPixel(x,y,1);
- else
- over.setPixel(x,y,0);
- }
- }
- cout << "r: " << r << endl;
- showImageOverlay(img, over);
- }
- */
- for ( int y = 0; y < img.height(); y++ )
- {
- for ( int x = 0; x < img.width(); x++ )
- {
- int cregion = regions( x, y );
- for ( int d = 0; d < dSize; d++ )
- {
- regionProbs[cregion][d] += getMeanProb( x, y, d, currentfeats );
- }
- }
- }
- int roi = 38;
- for ( int r = 0; r < regionNumber; r++ )
- {
- double maxval = regionProbs[r][0];
- bestlabels[r] = 0;
- if ( roi == r )
- {
- cout << "r: " << r << endl;
- cout << "0: " << regionProbs[r][0] << endl;
- }
- for ( int d = 1; d < dSize; d++ )
- {
- if ( maxval < regionProbs[r][d] )
- {
- maxval = regionProbs[r][d];
- bestlabels[r] = d;
- }
- if ( roi == r )
- {
- cout << d << ": " << regionProbs[r][d] << endl;
- }
- }
- if ( roi == r )
- {
- cout << "bestlabel: " << bestlabels[r] << " danach: " << labelmapback[bestlabels[r]] << endl;
- }
- bestlabels[r] = labelmapback[bestlabels[r]];
- }
- for ( int y = 0; y < img.height(); y++ )
- {
- for ( int x = 0; x < img.width(); x++ )
- {
- segresult.setPixel( x, y, bestlabels[regions( x,y )] );
- }
- }
- }
- }
|