123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142 |
- #include "SemSegContextTree.h"
- #include "vislearning/baselib/Globals.h"
- #include "vislearning/baselib/ProgressBar.h"
- #include "core/basics/StringTools.h"
- #include "vislearning/cbaselib/CachedExample.h"
- #include "vislearning/cbaselib/PascalResults.h"
- #include "vislearning/baselib/ColorSpace.h"
- #include "objrec/segmentation/RSMeanShift.h"
- #include "objrec/segmentation/RSGraphBased.h"
- #include "core/basics/numerictools.h"
- #include "core/basics/Timer.h"
- #include "core/basics/vectorio.h"
- #include <omp.h>
- #include <iostream>
- #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
- #undef LOCALFEATS
- //#define LOCALFEATS
- using namespace OBJREC;
- using namespace std;
- using namespace NICE;
- class MCImageAccess: public ValueAccess
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel )
- {
- return feats.feats->get ( x, y, channel );
- }
- virtual string writeInfos()
- {
- return "raw";
- }
- virtual ValueTypes getType()
- {
- return RAWFEAT;
- }
- };
- class ClassificationResultAcess: public ValueAccess
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel )
- {
- return ( *feats.tree ) [feats.cfeats->get ( x,y,feats.cTree ) ].dist[channel];
- }
- virtual string writeInfos()
- {
- return "context";
- }
- virtual ValueTypes getType()
- {
- return CONTEXT;
- }
- };
- class SparseImageAcess: public ValueAccess
- {
- private:
- double scale;
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y, const int &channel )
- {
- //MultiChannelImageT<SparseVector> textonMap;
- //TODO: implement access
- }
- virtual string writeInfos()
- {
- return "context";
- }
- virtual ValueTypes getType()
- {
- return CONTEXT;
- }
- };
- void Operation::restore ( std::istream &is )
- {
- is >> x1;
- is >> x2;
- is >> y1;
- is >> y2;
- is >> channel1;
- is >> channel2;
- int tmp;
- is >> tmp;
- if ( tmp >= 0 )
- {
- if ( tmp == RAWFEAT )
- {
- values = new MCImageAccess();
- }
- else if ( tmp == CONTEXT )
- {
- values = new ClassificationResultAcess();
- }
- else
- {
- throw ( "no valid ValueAccess" );
- }
- }
- else
- {
- values = NULL;
- }
- }
- std::string Operation::writeInfos()
- {
- std::stringstream ss;
- ss << " x1: " << x1 << " y1: " << y1 << " x2: " << x2 << " y2: " << y2 << " c1: " << channel1 << " c2: " << channel2;
- return ss.str();
- }
- void Operation::set ( int ws, int c1size, int c2size, int c3size, bool useGaussian )
- {
- int types = 1;
- if ( c2size > 0 )
- {
- types++;
- }
- if ( c3size > 0 )
- {
- types++;
- }
-
- types = std::min(types, maxtypes);
- int ft = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) types );
- if ( ft > 0 )
- {
- ws *= 4;
- }
- if ( useGaussian )
- {
- double sigma = ( double ) ws * 2.0;
- x1 = randGaussDouble ( sigma ) * ( double ) ws;
- x2 = randGaussDouble ( sigma ) * ( double ) ws;
- y1 = randGaussDouble ( sigma ) * ( double ) ws;
- y2 = randGaussDouble ( sigma ) * ( double ) ws;
- }
- else
- {
- x1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ws ) - ws / 2;
- x2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ws ) - ws / 2;
- y1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ws ) - ws / 2;
- y2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ws ) - ws / 2;
- }
- if ( ft == RAWFEAT )
- {
- values = new MCImageAccess();
- }
- else if ( ft == CONTEXT )
- {
- values = new ClassificationResultAcess();
- }
- else
- {
- values = new SparseImageAcess();
- }
- }
- class Minus: public Operation
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
- double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel2 );
- return v1 -v2;
- }
- virtual Operation* clone()
- {
- return new Minus();
- }
- virtual string writeInfos()
- {
- string out = "Minus";
- if ( values != NULL )
- out += values->writeInfos();
- return out + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return MINUS;
- }
- };
- class MinusAbs: public Operation
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
- double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel2 );
- return abs ( v1 -v2 );
- }
- virtual Operation* clone()
- {
- return new MinusAbs();
- };
- virtual string writeInfos()
- {
- string out = "MinusAbs";
- if ( values != NULL )
- out += values->writeInfos();
- return out;
- }
- virtual OperationTypes getOps()
- {
- return MINUSABS;
- }
- };
- class Addition: public Operation
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
- double v2 = values->getVal ( feats, BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize -
- 1 ), channel2 );
- return v1 + v2;
- }
- virtual Operation* clone()
- {
- return new Addition();
- }
- virtual string writeInfos()
- {
- string out = "Addition";
- if ( values != NULL )
- out += values->writeInfos();
- return out + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return ADDITION;
- }
- };
- class Only1: public Operation
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- double v1 = values->getVal ( feats, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
- return v1;
- }
- virtual Operation* clone()
- {
- return new Only1();
- }
- virtual string writeInfos()
- {
- string out = "Only1";
- if ( values != NULL )
- out += values->writeInfos();
- return out + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return ONLY1;
- }
- };
- class RelativeXPosition: public Operation
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- return ( double ) x / ( double ) xsize;
- }
- virtual Operation* clone()
- {
- return new RelativeXPosition();
- }
- virtual string writeInfos()
- {
- return "RelativeXPosition" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return RELATIVEXPOSITION;
- }
- };
- class RelativeYPosition: public Operation
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- return ( double ) x / ( double ) xsize;
- }
- virtual Operation* clone()
- {
- return new RelativeYPosition();
- }
- virtual string writeInfos()
- {
- return "RelativeYPosition" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return RELATIVEYPOSITION;
- }
- };
- // uses mean of classification in window given by (x1,y1) (x2,y2)
- class IntegralOps: public Operation
- {
- public:
- virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
- {
- x1 = min ( _x1, _x2 );
- y1 = min ( _y1, _y2 );
- x2 = max ( _x1, _x2 );
- y2 = max ( _y1, _y2 );
- channel1 = _channel1;
- channel2 = _channel2;
- values = _values;
- }
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- return computeMean ( *feats.integralImg, BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel1 );
- }
- inline double computeMean ( const NICE::MultiChannelImageT<double> &intImg, const int &uLx, const int &uLy, const int &lRx, const int &lRy, const int &chan )
- {
- double val1 = intImg.get ( uLx, uLy, chan );
- double val2 = intImg.get ( lRx, uLy, chan );
- double val3 = intImg.get ( uLx, lRy, chan );
- double val4 = intImg.get ( lRx, lRy, chan );
- double area = ( lRx - uLx ) * ( lRy - uLy );
- if ( area == 0 )
- return 0.0;
- return ( val1 + val4 - val2 - val3 ) / area;
- }
- virtual Operation* clone()
- {
- return new IntegralOps();
- }
- virtual string writeInfos()
- {
- return "IntegralOps" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return INTEGRAL;
- }
- };
- //like a global bag of words to model the current appearance of classes in an image without local context
- class GlobalFeats: public IntegralOps
- {
- public:
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- return computeMean ( *feats.integralImg, 0, 0, xsize - 1, ysize - 1, channel1 );
- }
- virtual Operation* clone()
- {
- return new GlobalFeats();
- }
- virtual string writeInfos()
- {
- return "GlobalFeats" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return GLOBALFEATS;
- }
- };
- //uses mean of Integral image given by x1, y1 with current pixel as center
- class IntegralCenteredOps: public IntegralOps
- {
- public:
- virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
- {
- x1 = abs ( _x1 );
- y1 = abs ( _y1 );
- x2 = abs ( _x2 );
- y2 = abs ( _y2 );
- channel1 = _channel1;
- channel2 = _channel2;
- values = _values;
- }
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- return computeMean ( *feats.integralImg, BOUND ( x - x1, 0, xsize - 1 ), BOUND ( y - y1, 0, ysize - 1 ), BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 );
- }
- virtual Operation* clone()
- {
- return new IntegralCenteredOps();
- }
- virtual string writeInfos()
- {
- return "IntegralCenteredOps" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return INTEGRALCENT;
- }
- };
- //uses different of mean of Integral image given by two windows, where (x1,y1) is the width and height of window1 and (x2,y2) of window 2
- class BiIntegralCenteredOps: public IntegralCenteredOps
- {
- public:
- virtual void set ( int _x1, int _y1, int _x2, int _y2, int _channel1, int _channel2, ValueAccess *_values )
- {
- x1 = min ( abs ( _x1 ), abs ( _x2 ) );
- y1 = min ( abs ( _y1 ), abs ( _y2 ) );
- x2 = max ( abs ( _x1 ), abs ( _x2 ) );
- y2 = max ( abs ( _y1 ), abs ( _y2 ) );
- channel1 = _channel1;
- channel2 = _channel2;
- values = _values;
- }
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- return computeMean ( *feats.integralImg, BOUND ( x - x1, 0, xsize - 1 ), BOUND ( y - y1, 0, ysize - 1 ), BOUND ( x + x1, 0, xsize - 1 ), BOUND ( y + y1, 0, ysize - 1 ), channel1 ) - computeMean ( *feats.integralImg, BOUND ( x - x2, 0, xsize - 1 ), BOUND ( y - y2, 0, ysize - 1 ), BOUND ( x + x2, 0, xsize - 1 ), BOUND ( y + y2, 0, ysize - 1 ), channel1 );
- }
- virtual Operation* clone()
- {
- return new BiIntegralCenteredOps();
- }
- virtual string writeInfos()
- {
- return "BiIntegralCenteredOps" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return BIINTEGRALCENT;
- }
- };
- /** horizontal Haar features
- * ++
- * --
- */
- class HaarHorizontal: public IntegralCenteredOps
- {
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- int tlx = BOUND ( x - x1, 0, xsize - 1 );
- int tly = BOUND ( y - y1, 0, ysize - 1 );
- int lrx = BOUND ( x + x1, 0, xsize - 1 );
- int lry = BOUND ( y + y1, 0, ysize - 1 );
- return computeMean ( *feats.integralImg, tlx, tly, lrx, y, channel1 ) - computeMean ( *feats.integralImg, tlx, y, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new HaarHorizontal();
- }
- virtual string writeInfos()
- {
- return "HaarHorizontal" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return HAARHORIZ;
- }
- };
- /** vertical Haar features
- * +-
- * +-
- */
- class HaarVertical: public IntegralCenteredOps
- {
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- int tlx = BOUND ( x - x1, 0, xsize - 1 );
- int tly = BOUND ( y - y1, 0, ysize - 1 );
- int lrx = BOUND ( x + x1, 0, xsize - 1 );
- int lry = BOUND ( y + y1, 0, ysize - 1 );
- return computeMean ( *feats.integralImg, tlx, tly, x, lry, channel1 ) - computeMean ( *feats.integralImg, x, tly, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new HaarVertical();
- }
- virtual string writeInfos()
- {
- return "HaarVertical" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return HAARVERT;
- }
- };
- /** vertical Haar features
- * +-
- * -+
- */
- class HaarDiag: public IntegralCenteredOps
- {
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- int tlx = BOUND ( x - x1, 0, xsize - 1 );
- int tly = BOUND ( y - y1, 0, ysize - 1 );
- int lrx = BOUND ( x + x1, 0, xsize - 1 );
- int lry = BOUND ( y + y1, 0, ysize - 1 );
- return computeMean ( *feats.integralImg, tlx, tly, x, y, channel1 ) + computeMean ( *feats.integralImg, x, y, lrx, lry, channel1 ) - computeMean ( *feats.integralImg, tlx, y, x, lry, channel1 ) - computeMean ( *feats.integralImg, x, tly, lrx, y, channel1 );
- }
- virtual Operation* clone()
- {
- return new HaarDiag();
- }
- virtual string writeInfos()
- {
- return "HaarDiag" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return HAARDIAG;
- }
- };
- /** horizontal Haar features
- * +++
- * ---
- * +++
- */
- class Haar3Horiz: public BiIntegralCenteredOps
- {
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- int tlx = BOUND ( x - x2, 0, xsize - 1 );
- int tly = BOUND ( y - y2, 0, ysize - 1 );
- int mtly = BOUND ( y - y1, 0, ysize - 1 );
- int mlry = BOUND ( y + y1, 0, ysize - 1 );
- int lrx = BOUND ( x + x2, 0, xsize - 1 );
- int lry = BOUND ( y + y2, 0, ysize - 1 );
- return computeMean ( *feats.integralImg, tlx, tly, lrx, mtly, channel1 ) - computeMean ( *feats.integralImg, tlx, mtly, lrx, mlry, channel1 ) + computeMean ( *feats.integralImg, tlx, mlry, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new Haar3Horiz();
- }
- virtual string writeInfos()
- {
- return "Haar3Horiz" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return HAAR3HORIZ;
- }
- };
- /** vertical Haar features
- * +-+
- * +-+
- * +-+
- */
- class Haar3Vert: public BiIntegralCenteredOps
- {
- virtual double getVal ( const Features &feats, const int &x, const int &y )
- {
- int xsize, ysize;
- getXY ( feats, xsize, ysize );
- int tlx = BOUND ( x - x2, 0, xsize - 1 );
- int tly = BOUND ( y - y2, 0, ysize - 1 );
- int mtlx = BOUND ( x - x1, 0, xsize - 1 );
- int mlrx = BOUND ( x + x1, 0, xsize - 1 );
- int lrx = BOUND ( x + x2, 0, xsize - 1 );
- int lry = BOUND ( y + y2, 0, ysize - 1 );
- return computeMean ( *feats.integralImg, tlx, tly, mtlx, lry, channel1 ) - computeMean ( *feats.integralImg, mtlx, tly, mlrx, lry, channel1 ) + computeMean ( *feats.integralImg, mlrx, tly, lrx, lry, channel1 );
- }
- virtual Operation* clone()
- {
- return new Haar3Vert();
- }
- virtual string writeInfos()
- {
- return "Haar3Vert" + Operation::writeInfos();
- }
- virtual OperationTypes getOps()
- {
- return HAAR3VERT;
- }
- };
- SemSegContextTree::SemSegContextTree ( const Config *conf, const MultiDataset *md )
- : SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
- {
- this->conf = conf;
- string section = "SSContextTree";
- lfcw = new LFColorWeijer ( conf );
- grid = conf->gI ( section, "grid", 10 );
- maxSamples = conf->gI ( section, "max_samples", 2000 );
- minFeats = conf->gI ( section, "min_feats", 50 );
- maxDepth = conf->gI ( section, "max_depth", 10 );
- windowSize = conf->gI ( section, "window_size", 16 );
- featsPerSplit = conf->gI ( section, "feats_per_split", 200 );
- useShannonEntropy = conf->gB ( section, "use_shannon_entropy", true );
- nbTrees = conf->gI ( section, "amount_trees", 1 );
- string segmentationtype = conf->gS ( section, "segmentation_type", "meanshift" );
- useGaussian = conf->gB ( section, "use_gaussian", true );
- randomTests = conf->gI ( section, "random_tests", 10 );
- bool saveLoadData = conf->gB ( "debug", "save_load_data", false );
- string fileLocation = conf->gS ( "debug", "datafile", "tmp.txt" );
- if ( useGaussian )
- throw ( "there something wrong with using gaussian! first fix it!" );
- pixelWiseLabeling = false;
- if ( segmentationtype == "meanshift" )
- segmentation = new RSMeanShift ( conf );
- else if ( segmentationtype == "none" )
- {
- segmentation = NULL;
- pixelWiseLabeling = true;
- }
- else if ( segmentationtype == "felzenszwalb" )
- segmentation = new RSGraphBased ( conf );
- else
- throw ( "no valid segmenation_type\n please choose between none, meanshift and felzenszwalb\n" );
- ftypes = conf->gI ( section, "features", 2 );;
- string featsec = "Features";
- if ( conf->gB ( featsec, "minus", true ) )
- ops.push_back ( new Minus() );
- if ( conf->gB ( featsec, "minus_abs", true ) )
- ops.push_back ( new MinusAbs() );
- if ( conf->gB ( featsec, "addition", true ) )
- ops.push_back ( new Addition() );
- if ( conf->gB ( featsec, "only1", true ) )
- ops.push_back ( new Only1() );
- if ( conf->gB ( featsec, "rel_x", true ) )
- ops.push_back ( new RelativeXPosition() );
- if ( conf->gB ( featsec, "rel_y", true ) )
- ops.push_back ( new RelativeYPosition() );
- if ( conf->gB ( featsec, "bi_int_cent", true ) )
- cops.push_back ( new BiIntegralCenteredOps() );
- if ( conf->gB ( featsec, "int_cent", true ) )
- cops.push_back ( new IntegralCenteredOps() );
- if ( conf->gB ( featsec, "int", true ) )
- cops.push_back ( new IntegralOps() );
- if ( conf->gB ( featsec, "haar_horz", true ) )
- cops.push_back ( new HaarHorizontal() );
- if ( conf->gB ( featsec, "haar_vert", true ) )
- cops.push_back ( new HaarVertical() );
- if ( conf->gB ( featsec, "haar_diag", true ) )
- cops.push_back ( new HaarDiag() );
- if ( conf->gB ( featsec, "haar3_horz", true ) )
- cops.push_back ( new Haar3Horiz() );
- if ( conf->gB ( featsec, "haar3_vert", true ) )
- cops.push_back ( new Haar3Vert() );
- if ( conf->gB ( featsec, "glob", true ) )
- cops.push_back ( new GlobalFeats() );
- opOverview = vector<int> ( NBOPERATIONS, 0 );
- contextOverview = vector<vector<double> > ( maxDepth, vector<double> ( 2, 0.0 ) );
- calcVal.push_back ( new MCImageAccess() );
- calcVal.push_back ( new ClassificationResultAcess() );
- classnames = md->getClassNames ( "train" );
- ///////////////////////////////////
- // Train Segmentation Context Trees
- ///////////////////////////////////
- if ( saveLoadData )
- {
- if ( FileMgt::fileExists ( fileLocation ) )
- read ( fileLocation );
- else
- {
- train ( md );
- write ( fileLocation );
- }
- }
- else
- {
- train ( md );
- }
- }
- SemSegContextTree::~SemSegContextTree()
- {
- }
- double SemSegContextTree::getBestSplit ( std::vector<NICE::MultiChannelImageT<double> > &feats, std::vector<NICE::MultiChannelImageT<unsigned short int> > ¤tfeats, std::vector<NICE::MultiChannelImageT<double> > &integralImgs, const std::vector<NICE::MatrixT<int> > &labels, int node, Operation *&splitop, double &splitval, const int &tree )
- {
- Timer t;
- t.start();
- int imgCount = 0, featdim = 0;
- try
- {
- imgCount = ( int ) feats.size();
- featdim = feats[0].channels();
- }
- catch ( Exception )
- {
- cerr << "no features computed?" << endl;
- }
- double bestig = -numeric_limits< double >::max();
- splitop = NULL;
- splitval = -1.0;
- set<vector<int> >selFeats;
- map<int, int> e;
- int featcounter = forest[tree][node].featcounter;
- if ( featcounter < minFeats )
- {
- //cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
- return 0.0;
- }
- vector<double> fraction ( a.size(), 0.0 );
- for ( uint i = 0; i < fraction.size(); i++ )
- {
- if ( forbidden_classes.find ( labelmapback[i] ) != forbidden_classes.end() )
- fraction[i] = 0;
- else
- fraction[i] = ( ( double ) maxSamples ) / ( ( double ) featcounter * a[i] * a.size() );
- //cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
- }
- featcounter = 0;
- for ( int iCounter = 0; iCounter < imgCount; iCounter++ )
- {
- int xsize = ( int ) currentfeats[iCounter].width();
- int ysize = ( int ) currentfeats[iCounter].height();
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- if ( currentfeats[iCounter].get ( x, y, tree ) == node )
- {
- int cn = labels[iCounter] ( x, y );
- double randD = ( double ) rand() / ( double ) RAND_MAX;
- if ( labelmap.find ( cn ) == labelmap.end() )
- continue;
- if ( randD < fraction[labelmap[cn]] )
- {
- vector<int> tmp ( 3, 0 );
- tmp[0] = iCounter;
- tmp[1] = x;
- tmp[2] = y;
- featcounter++;
- selFeats.insert ( tmp );
- e[cn]++;
- }
- }
- }
- }
- }
- //cout << "size: " << selFeats.size() << endl;
- //getchar();
- map<int, int>::iterator mapit;
- double globent = 0.0;
- for ( mapit = e.begin() ; mapit != e.end(); mapit++ )
- {
- //cout << "class: " << mapit->first << ": " << mapit->second << endl;
- double p = ( double ) ( *mapit ).second / ( double ) featcounter;
- globent += p * log2 ( p );
- }
- globent = -globent;
- if ( globent < 0.5 )
- {
- //cout << "globent to small: " << globent << endl;
- return 0.0;
- }
- int classes = ( int ) forest[tree][0].dist.size();
- featsel.clear();
- for ( int i = 0; i < featsPerSplit; i++ )
- {
- int x1, x2, y1, y2;
- int ft = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ftypes );
- int tmpws = windowSize;
- if ( integralImgs[0].width() == 0 )
- ft = 0;
- if ( ft > 0 )
- {
- tmpws *= 4;
- }
- if ( useGaussian )
- {
- double sigma = ( double ) tmpws * 2.0;
- x1 = randGaussDouble ( sigma ) * ( double ) tmpws;
- x2 = randGaussDouble ( sigma ) * ( double ) tmpws;
- y1 = randGaussDouble ( sigma ) * ( double ) tmpws;
- y2 = randGaussDouble ( sigma ) * ( double ) tmpws;
- }
- else
- {
- x1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
- x2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
- y1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
- y2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) tmpws ) - tmpws / 2;
- }
- if ( ft == 0 )
- {
- int f1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) featdim );
- int f2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) featdim );
- int o = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) ops.size() );
- Operation *op = ops[o]->clone();
- op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
- op->setContext ( false );
- featsel.push_back ( op );
- }
- else if ( ft == 1 )
- {
- int opssize = ( int ) ops.size();
- //opssize = 0;
- int o = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( ( ( double ) cops.size() ) + ( double ) opssize ) );
- Operation *op;
- if ( o < opssize )
- {
- int chans = ( int ) forest[0][0].dist.size();
- int f1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
- int f2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
- op = ops[o]->clone();
- op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
- op->setContext ( true );
- }
- else
- {
- int chans = integralImgs[0].channels();
- int f1 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
- int f2 = ( int ) ( ( double ) rand() / ( double ) RAND_MAX * ( double ) chans );
- o -= opssize;
- op = cops[o]->clone();
- op->set ( x1, y1, x2, y2, f1, f2, calcVal[ft] );
- if ( f1 < forest[0][0].dist.size() )
- op->setContext ( true );
- else
- op->setContext ( false );
- }
- featsel.push_back ( op );
- }
- }
- #pragma omp parallel for private(mapit)
- for ( int f = 0; f < featsPerSplit; f++ )
- {
- double l_bestig = -numeric_limits< double >::max();
- double l_splitval = -1.0;
- set<vector<int> >::iterator it;
- vector<double> vals;
- double maxval = -numeric_limits<double>::max();
- double minval = numeric_limits<double>::max();
- for ( it = selFeats.begin() ; it != selFeats.end(); it++ )
- {
- Features feat;
- feat.feats = &feats[ ( *it ) [0]];
- feat.cfeats = ¤tfeats[ ( *it ) [0]];
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImgs[ ( *it ) [0]];
- double val = featsel[f]->getVal ( feat, ( *it ) [1], ( *it ) [2] );
- vals.push_back ( val );
- maxval = std::max ( val, maxval );
- minval = std::min ( val, minval );
- }
- if ( minval == maxval )
- continue;
- double scale = maxval - minval;
- vector<double> splits;
- for ( int r = 0; r < randomTests; r++ )
- {
- splits.push_back ( ( ( double ) rand() / ( double ) RAND_MAX*scale ) + minval );
- }
- for ( int run = 0 ; run < randomTests; run++ )
- {
- set<vector<int> >::iterator it2;
- double val = splits[run];
- map<int, int> eL, eR;
- int counterL = 0, counterR = 0;
- int counter2 = 0;
- for ( it2 = selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
- {
- int cn = labels[ ( *it2 ) [0]] ( ( *it2 ) [1], ( *it2 ) [2] );
- //cout << "vals[counter2] " << vals[counter2] << " val: " << val << endl;
- if ( vals[counter2] < val )
- {
- //left entropie:
- eL[cn] = eL[cn] + 1;
- counterL++;
- }
- else
- {
- //right entropie:
- eR[cn] = eR[cn] + 1;
- counterR++;
- }
- }
- double leftent = 0.0;
- for ( mapit = eL.begin() ; mapit != eL.end(); mapit++ )
- {
- double p = ( double ) ( *mapit ).second / ( double ) counterL;
- leftent -= p * log2 ( p );
- }
- double rightent = 0.0;
- for ( mapit = eR.begin() ; mapit != eR.end(); mapit++ )
- {
- double p = ( double ) ( *mapit ).second / ( double ) counterR;
- rightent -= p * log2 ( p );
- }
- //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
- double pl = ( double ) counterL / ( double ) ( counterL + counterR );
- double ig = globent - ( 1.0 - pl ) * rightent - pl * leftent;
- //double ig = globent - rightent - leftent;
- if ( useShannonEntropy )
- {
- double esplit = - ( pl * log ( pl ) + ( 1 - pl ) * log ( 1 - pl ) );
- ig = 2 * ig / ( globent + esplit );
- }
- if ( ig > l_bestig )
- {
- l_bestig = ig;
- l_splitval = val;
- }
- }
- #pragma omp critical
- {
- //cout << "globent: " << globent << " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
- //cout << "globent: " << globent << " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
- //cout << "p: " << featsubset[f] << endl;
- if ( l_bestig > bestig )
- {
- bestig = l_bestig;
- splitop = featsel[f];
- splitval = l_splitval;
- }
- }
- }
- //getchar();
- //splitop->writeInfos();
- //cout<< "ig: " << bestig << endl;
- //FIXME: delete all features!
- /*for(int i = 0; i < featsPerSplit; i++)
- {
- if(featsel[i] != splitop)
- delete featsel[i];
- }*/
- #ifdef debug
- cout << "globent: " << globent << " bestig " << bestig << " splitval: " << splitval << endl;
- #endif
- return bestig;
- }
- inline double SemSegContextTree::getMeanProb ( const int &x, const int &y, const int &channel, const MultiChannelImageT<unsigned short int> ¤tfeats )
- {
- double val = 0.0;
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- val += forest[tree][currentfeats.get ( x,y,tree ) ].dist[channel];
- }
- return val / ( double ) nbTrees;
- }
- void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<SparseVector> &infeats, NICE::MultiChannelImageT<SparseVector> &integralImage )
- {
- int xsize = infeats.width();
- int ysize = infeats.height();
- integralImage ( 0, 0 ).add ( infeats.get ( 0, 0 ) );
- //first column
- for ( int y = 1; y < ysize; y++ )
- {
- integralImage ( 0, y ).add ( infeats.get ( 0, y ) );
- integralImage ( 0, y ).add ( integralImage ( 0, y - 1 ) );
- }
- //first row
- for ( int x = 1; x < xsize; x++ )
- {
- integralImage ( x, 0 ).add ( infeats.get ( x, 0 ) );
- integralImage ( x, 0 ).add ( integralImage ( x - 1, 0 ) );
- }
- //rest
- for ( int y = 1; y < ysize; y++ )
- {
- for ( int x = 1; x < xsize; x++ )
- {
- integralImage ( x, y ).add ( infeats.get ( x, y ) );
- integralImage ( x, y ).add ( integralImage ( x, y - 1 ) );
- integralImage ( x, y ).add ( integralImage ( x - 1, y ) );
- integralImage ( x, y ).sub ( integralImage ( x - 1, y - 1 ) );
- }
- }
- }
- void SemSegContextTree::computeIntegralImage ( const NICE::MultiChannelImageT<unsigned short int> ¤tfeats, const NICE::MultiChannelImageT<double> &lfeats, NICE::MultiChannelImageT<double> &integralImage )
- {
- int xsize = currentfeats.width();
- int ysize = currentfeats.height();
- int channels = ( int ) forest[0][0].dist.size();
- #pragma omp parallel for
- for ( int c = 0; c < channels; c++ )
- {
- integralImage.set ( 0, 0, getMeanProb ( 0, 0, c, currentfeats ), c );
- //first column
- for ( int y = 1; y < ysize; y++ )
- {
- integralImage.set ( 0, y, getMeanProb ( 0, y, c, currentfeats ) + integralImage.get ( 0, y - 1, c ), c );
- }
- //first row
- for ( int x = 1; x < xsize; x++ )
- {
- integralImage.set ( x, 0, getMeanProb ( x, 0, c, currentfeats ) + integralImage.get ( x - 1, 0, c ), c );
- }
- //rest
- for ( int y = 1; y < ysize; y++ )
- {
- for ( int x = 1; x < xsize; x++ )
- {
- double val = getMeanProb ( x, y, c, currentfeats ) + integralImage.get ( x, y - 1, c ) + integralImage.get ( x - 1, y, c ) - integralImage.get ( x - 1, y - 1, c );
- integralImage.set ( x, y, val, c );
- }
- }
- }
- int channels2 = ( int ) lfeats.channels();
- xsize = lfeats.width();
- ysize = lfeats.height();
- if ( integralImage.get ( xsize - 1, ysize - 1, channels ) == 0.0 )
- {
- #pragma omp parallel for
- for ( int c1 = 0; c1 < channels2; c1++ )
- {
- int c = channels + c1;
- integralImage.set ( 0, 0, lfeats.get ( 0, 0, c1 ), c );
- //first column
- for ( int y = 1; y < ysize; y++ )
- {
- integralImage.set ( 0, y, lfeats.get ( 0, y, c1 ) + integralImage.get ( 0, y, c ), c );
- }
- //first row
- for ( int x = 1; x < xsize; x++ )
- {
- integralImage.set ( x, 0, lfeats.get ( x, 0, c1 ) + integralImage.get ( x, 0, c ), c );
- }
- //rest
- for ( int y = 1; y < ysize; y++ )
- {
- for ( int x = 1; x < xsize; x++ )
- {
- double val = lfeats.get ( x, y, c1 ) + integralImage.get ( x, y - 1, c ) + integralImage.get ( x - 1, y, c ) - integralImage.get ( x - 1, y - 1, c );
- integralImage.set ( x, y, val, c );
- }
- }
- }
- }
- }
- void SemSegContextTree::train ( const MultiDataset *md )
- {
- const LabeledSet train = * ( *md ) ["train"];
- const LabeledSet *trainp = &train;
- ProgressBar pb ( "compute feats" );
- pb.show();
- //TODO: Speichefresser!, lohnt sich sparse?
- vector<MultiChannelImageT<double> > allfeats;
- vector<MultiChannelImageT<unsigned short int> > currentfeats;
- vector<MatrixT<int> > labels;
- vector<MultiChannelImageT<SparseVector> > textonMap;
- vector<MultiChannelImageT<SparseVector> > integralTexton;
- std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
- if ( forbidden_classes_s == "" )
- {
- forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
- }
- classnames.getSelection ( forbidden_classes_s, forbidden_classes );
- int imgcounter = 0;
- int amountPixels = 0;
- LOOP_ALL_S ( *trainp )
- {
- EACH_INFO ( classno, info );
- NICE::ColorImage img;
- std::string currentFile = info.img();
- CachedExample *ce = new CachedExample ( currentFile );
- const LocalizationResult *locResult = info.localization();
- if ( locResult->size() <= 0 )
- {
- fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
- currentFile.c_str() );
- continue;
- }
- fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
- int xsize, ysize;
- ce->getImageSize ( xsize, ysize );
- amountPixels += xsize * ysize;
- MatrixT<int> tmpMat ( xsize, ysize );
- currentfeats.push_back ( MultiChannelImageT<unsigned short int> ( xsize, ysize, nbTrees ) );
- currentfeats[imgcounter].setAll ( 0 );
- textonMap.push_back ( MultiChannelImageT<SparseVector> ( xsize / grid + 1, ysize / grid + 1, 1 ) );
- integralTexton.push_back ( MultiChannelImageT<SparseVector> ( xsize / grid + 1, ysize / grid + 1, 1 ) );
- labels.push_back ( tmpMat );
- try {
- img = ColorImage ( currentFile );
- } catch ( Exception ) {
- cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
- continue;
- }
- Globals::setCurrentImgFN ( currentFile );
- //TODO: resize image?!
- MultiChannelImageT<double> feats;
- allfeats.push_back ( feats );
- #ifdef LOCALFEATS
- lfcw->getFeats ( img, allfeats[imgcounter] );
- #else
- allfeats[imgcounter].reInit ( xsize, ysize, 3, true );
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- for ( int r = 0; r < 3; r++ )
- {
- allfeats[imgcounter].set ( x, y, img.getPixel ( x, y, r ), r );
- }
- }
- }
- allfeats[imgcounter] = ColorSpace::rgbtolab ( allfeats[imgcounter] );
- #endif
- // getting groundtruth
- NICE::Image pixelLabels ( xsize, ysize );
- pixelLabels.set ( 0 );
- locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- classno = pixelLabels.getPixel ( x, y );
- labels[imgcounter] ( x, y ) = classno;
- if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
- continue;
- labelcounter[classno]++;
- }
- }
- imgcounter++;
- pb.update ( trainp->count() );
- delete ce;
- }
- pb.hide();
- map<int, int>::iterator mapit;
- int classes = 0;
- for ( mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++ )
- {
- labelmap[mapit->first] = classes;
- labelmapback[classes] = mapit->first;
- classes++;
- }
- //balancing
- int featcounter = 0;
- a = vector<double> ( classes, 0.0 );
- for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
- {
- int xsize = ( int ) currentfeats[iCounter].width();
- int ysize = ( int ) currentfeats[iCounter].height();
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- featcounter++;
- int cn = labels[iCounter] ( x, y );
- if ( labelmap.find ( cn ) == labelmap.end() )
- continue;
- a[labelmap[cn]] ++;
- }
- }
- }
- for ( int i = 0; i < ( int ) a.size(); i++ )
- {
- a[i] /= ( double ) featcounter;
- }
- #ifdef DEBUG
- for ( int i = 0; i < ( int ) a.size(); i++ )
- {
- cout << "a[" << i << "]: " << a[i] << endl;
- }
- cout << "a.size: " << a.size() << endl;
- #endif
- depth = 0;
- int uniquenumber = 0;
- for ( int t = 0; t < nbTrees; t++ )
- {
- vector<TreeNode> tree;
- tree.push_back ( TreeNode() );
- tree[0].dist = vector<double> ( classes, 0.0 );
- tree[0].depth = depth;
- tree[0].featcounter = amountPixels;
- tree[0].nodeNumber = uniquenumber;
- uniquenumber++;
- forest.push_back ( tree );
- }
- vector<int> startnode ( nbTrees, 0 );
- bool allleaf = false;
- //int baseFeatSize = allfeats[0].size();
- vector<MultiChannelImageT<double> > integralImgs ( imgcounter, MultiChannelImageT<double>() );
- while ( !allleaf && depth < maxDepth )
- {
- #ifdef DEBUG
- cout << "depth: " << depth << endl;
- #endif
- allleaf = true;
- vector<MultiChannelImageT<unsigned short int> > lastfeats = currentfeats;
- #if 1
- Timer timer;
- timer.start();
- #endif
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- int t = ( int ) forest[tree].size();
- int s = startnode[tree];
- startnode[tree] = t;
- //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
- //#pragma omp parallel for
- for ( int i = s; i < t; i++ )
- {
- if ( !forest[tree][i].isleaf && forest[tree][i].left < 0 )
- {
- #if 0
- timer.stop();
- cout << "time 1: " << timer.getLast() << endl;
- timer.start();
- #endif
- Operation *splitfeat = NULL;
- double splitval;
- double bestig = getBestSplit ( allfeats, lastfeats, integralImgs, labels, i, splitfeat, splitval, tree );
- #if 0
- timer.stop();
- double tl = timer.getLast();
- if ( tl > 10.0 )
- {
- cout << "time 2: " << tl << endl;
- cout << "slow split: " << splitfeat->writeInfos() << endl;
- getchar();
- }
- timer.start();
- #endif
- forest[tree][i].feat = splitfeat;
- forest[tree][i].decision = splitval;
- if ( splitfeat != NULL )
- {
- allleaf = false;
- int left = forest[tree].size();
- forest[tree].push_back ( TreeNode() );
- forest[tree].push_back ( TreeNode() );
- int right = left + 1;
- forest[tree][i].left = left;
- forest[tree][i].right = right;
- forest[tree][left].dist = vector<double> ( classes, 0.0 );
- forest[tree][right].dist = vector<double> ( classes, 0.0 );
- forest[tree][left].depth = depth + 1;
- forest[tree][right].depth = depth + 1;
- forest[tree][left].featcounter = 0;
- forest[tree][right].featcounter = 0;
- forest[tree][left].nodeNumber = uniquenumber;
- int leftu = uniquenumber;
- uniquenumber++;
- forest[tree][right].nodeNumber = uniquenumber;
- int rightu = uniquenumber;
- uniquenumber++;
- forest[tree][right].featcounter = 0;
- #if 0
- timer.stop();
- cout << "time 3: " << timer.getLast() << endl;
- timer.start();
- #endif
- #pragma omp parallel for
- for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
- {
- int xsize = currentfeats[iCounter].width();
- int ysize = currentfeats[iCounter].height();
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- if ( currentfeats[iCounter].get ( x, y, tree ) == i )
- {
- Features feat;
- feat.feats = &allfeats[iCounter];
- feat.cfeats = &lastfeats[iCounter];
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImgs[iCounter];
- double val = splitfeat->getVal ( feat, x, y );
- int subx = x / grid;
- int suby = y / grid;
- #pragma omp critical
- if ( val < splitval )
- {
- currentfeats[iCounter].set ( x, y, left, tree );
- if ( labelmap.find ( labels[iCounter] ( x, y ) ) != labelmap.end() )
- forest[tree][left].dist[labelmap[labels[iCounter] ( x, y ) ]]++;
- forest[tree][left].featcounter++;
- SparseVector v;
- v.insert ( pair<int, double> ( leftu, 1.0 ) );
- textonMap[iCounter] ( subx, suby ).add ( v );
- }
- else
- {
- currentfeats[iCounter].set ( x, y, right, tree );
- if ( labelmap.find ( labels[iCounter] ( x, y ) ) != labelmap.end() )
- forest[tree][right].dist[labelmap[labels[iCounter] ( x, y ) ]]++;
- forest[tree][right].featcounter++;
- //feld im subsampled finden und in diesem rechts hochzählen
- SparseVector v;
- v.insert ( pair<int, double> ( rightu, 1.0 ) );
- textonMap[iCounter] ( subx, suby ).add ( v );
- }
- }
- }
- }
- }
- #if 0
- timer.stop();
- cout << "time 4: " << timer.getLast() << endl;
- timer.start();
- #endif
- // forest[tree][right].featcounter = forest[tree][i].featcounter - forest[tree][left].featcounter;
- double lcounter = 0.0, rcounter = 0.0;
- for ( uint d = 0; d < forest[tree][left].dist.size(); d++ )
- {
- if ( forbidden_classes.find ( labelmapback[d] ) != forbidden_classes.end() )
- {
- forest[tree][left].dist[d] = 0;
- forest[tree][right].dist[d] = 0;
- }
- else
- {
- forest[tree][left].dist[d] /= a[d];
- lcounter += forest[tree][left].dist[d];
- forest[tree][right].dist[d] /= a[d];
- rcounter += forest[tree][right].dist[d];
- }
- }
- #if 0
- timer.stop();
- cout << "time 5: " << timer.getLast() << endl;
- timer.start();
- #endif
- if ( lcounter <= 0 || rcounter <= 0 )
- {
- cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
- cout << "splitval: " << splitval << " splittype: " << splitfeat->writeInfos() << endl;
- cout << "bestig: " << bestig << endl;
- for ( int iCounter = 0; iCounter < imgcounter; iCounter++ )
- {
- int xsize = currentfeats[iCounter].width();
- int ysize = currentfeats[iCounter].height();
- int counter = 0;
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- if ( lastfeats[iCounter].get ( x, y, tree ) == i )
- {
- if ( ++counter > 30 )
- break;
- Features feat;
- feat.feats = &allfeats[iCounter];
- feat.cfeats = &lastfeats[iCounter];
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImgs[iCounter];
- double val = splitfeat->getVal ( feat, x, y );
- cout << "splitval: " << splitval << " val: " << val << endl;
- }
- }
- }
- }
- assert ( lcounter > 0 && rcounter > 0 );
- }
- for ( uint d = 0; d < forest[tree][left].dist.size(); d++ )
- {
- forest[tree][left].dist[d] /= lcounter;
- forest[tree][right].dist[d] /= rcounter;
- }
- }
- else
- {
- forest[tree][i].isleaf = true;
- }
- }
- }
- #if 0
- timer.stop();
- cout << "time after tree: " << timer.getLast() << endl;
- timer.start();
- #endif
- }
- //compute integral image
- int channels = classes + allfeats[0].channels();
- #if 0
- timer.stop();
- cout << "time for part0: " << timer.getLast() << endl;
- timer.start();
- #endif
- if ( integralImgs[0].width() == 0 )
- {
- for ( int i = 0; i < imgcounter; i++ )
- {
- int xsize = allfeats[i].width();
- int ysize = allfeats[i].height();
- integralImgs[i].reInit ( xsize, ysize, channels );
- integralImgs[i].setAll ( 0.0 );
- }
- }
- #if 0
- timer.stop();
- cout << "time for part1: " << timer.getLast() << endl;
- timer.start();
- #endif
- #pragma omp parallel for
- for ( int i = 0; i < imgcounter; i++ )
- {
- computeIntegralImage ( currentfeats[i], allfeats[i], integralImgs[i] );
- computeIntegralImage ( textonMap[i], integralTexton[i] );
- }
- #if 1
- timer.stop();
- cout << "time for depth " << depth << ": " << timer.getLast() << endl;
- #endif
- depth++;
- }
- #ifdef DEBUG
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- int t = ( int ) forest[tree].size();
- for ( int i = 0; i < t; i++ )
- {
- printf ( "tree[%i]: left: %i, right: %i", i, forest[tree][i].left, forest[tree][i].right );
- if ( !forest[tree][i].isleaf && forest[tree][i].left != -1 )
- {
- cout << ", feat: " << forest[tree][i].feat->writeInfos() << " ";
- opOverview[forest[tree][i].feat->getOps() ]++;
- contextOverview[forest[tree][i].depth][ ( int ) forest[tree][i].feat->getContext() ]++;
- }
- for ( int d = 0; d < ( int ) forest[tree][i].dist.size(); d++ )
- {
- cout << " " << forest[tree][i].dist[d];
- }
- cout << endl;
- }
- }
- for ( uint c = 0; c < ops.size(); c++ )
- {
- cout << ops[c]->writeInfos() << ": " << opOverview[ops[c]->getOps() ] << endl;
- }
- for ( uint c = 0; c < cops.size(); c++ )
- {
- cout << cops[c]->writeInfos() << ": " << opOverview[cops[c]->getOps() ] << endl;
- }
- for ( int d = 0; d < maxDepth; d++ )
- {
- double sum = contextOverview[d][0] + contextOverview[d][1];
- contextOverview[d][0] /= sum;
- contextOverview[d][1] /= sum;
- cout << "depth: " << d << " woContext: " << contextOverview[d][0] << " wContext: " << contextOverview[d][1] << endl;
- }
- #endif
- }
- void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
- {
- int xpos = 8;
- //int xpos = 15;
- int ypos = 78;
- int xsize;
- int ysize;
- ce->getImageSize ( xsize, ysize );
- int numClasses = classNames->numClasses();
- fprintf ( stderr, "ContextTree classification !\n" );
- probabilities.reInit ( xsize, ysize, numClasses, true );
- probabilities.setAll ( 0 );
- NICE::ColorImage img;
- std::string currentFile = Globals::getCurrentImgFN();
- try {
- img = ColorImage ( currentFile );
- } catch ( Exception ) {
- cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
- return;
- }
- //TODO: resize image?!
- MultiChannelImageT<double> feats;
- MultiChannelImageT<SparseVector> textonMap ( xsize / grid + 1, ysize / grid + 1, 1 );
- MultiChannelImageT<SparseVector> integralTexton ( xsize / grid + 1, ysize / grid + 1, 1 );
- #ifdef LOCALFEATS
- lfcw->getFeats ( img, feats );
- #else
- feats.reInit ( xsize, ysize, 3, true );
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- for ( int r = 0; r < 3; r++ )
- {
- feats.set ( x, y, img.getPixel ( x, y, r ), r );
- }
- }
- }
- feats = ColorSpace::rgbtolab ( feats );
- #endif
- bool allleaf = false;
- MultiChannelImageT<double> integralImg;
- MultiChannelImageT<unsigned short int> currentfeats ( xsize, ysize, nbTrees );
- currentfeats.setAll ( 0 );
- depth = 0;
- for ( int d = 0; d < maxDepth && !allleaf; d++ )
- {
- allleaf = true;
- //TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
- //#pragma omp parallel for
- MultiChannelImageT<unsigned short int> lastfeats = currentfeats;
- for ( int tree = 0; tree < nbTrees; tree++ )
- {
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++ )
- {
- int t = currentfeats.get ( x, y, tree );
- if ( forest[tree][t].left > 0 )
- {
- allleaf = false;
- Features feat;
- feat.feats = &feats;
- feat.cfeats = &lastfeats;
- feat.cTree = tree;
- feat.tree = &forest[tree];
- feat.integralImg = &integralImg;
- double val = forest[tree][t].feat->getVal ( feat, x, y );
- int subx = x / grid;
- int suby = y / grid;
- if ( val < forest[tree][t].decision )
- {
- currentfeats.set ( x, y, forest[tree][t].left, tree );
- SparseVector v;
- v.insert ( pair<int, double> ( forest[tree][forest[tree][t].left].nodeNumber, 1.0 ) );
- textonMap ( subx, suby ).add ( v );
- }
- else
- {
- currentfeats.set ( x, y, forest[tree][t].right, tree );
- SparseVector v;
- v.insert ( pair<int, double> ( forest[tree][forest[tree][t].right].nodeNumber, 1.0 ) );
- textonMap ( subx, suby ).add ( v );
- }
- if ( x == xpos && y == ypos )
- {
- cout << "val: " << val << " decision: " << forest[tree][t].decision << " details: " << forest[tree][t].feat->writeInfos() << endl;
- }
- }
- }
- }
- //compute integral image
- int channels = ( int ) labelmap.size() + feats.channels();
- if ( integralImg.width() == 0 )
- {
- int xsize = feats.width();
- int ysize = feats.height();
- integralImg.reInit ( xsize, ysize, channels );
- integralImg.setAll ( 0.0 );
- }
- }
- computeIntegralImage ( currentfeats, feats, integralImg );
- computeIntegralImage ( textonMap, integralTexton );
- depth++;
- }
- cout << forest[0][currentfeats.get ( xpos, ypos, 0 ) ].dist << endl;
- if ( pixelWiseLabeling )
- {
- //finales labeln:
- long int offset = 0;
- for ( int x = 0; x < xsize; x++ )
- {
- for ( int y = 0; y < ysize; y++, offset++ )
- {
- double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
- int maxindex = 0;
- uint s = forest[0][0].dist.size();
- for ( uint i = 0; i < s; i++ )
- {
- probabilities.data[labelmapback[i]][offset] = getMeanProb ( x, y, i, currentfeats );
- if ( probabilities.data[labelmapback[i]][offset] > maxvalue )
- {
- maxvalue = probabilities.data[labelmapback[i]][offset];
- maxindex = labelmapback[i];
- }
- segresult.setPixel ( x, y, maxindex );
- }
- if ( maxvalue > 1 )
- cout << "maxvalue: " << maxvalue << endl;
- }
- }
- }
- else
- {
- //final labeling using segmentation
- Matrix regions;
- //showImage(img);
- int regionNumber = segmentation->segRegions ( img, regions );
- cout << "regions: " << regionNumber << endl;
- int dSize = forest[0][0].dist.size();
- vector<vector<double> > regionProbs ( regionNumber, vector<double> ( dSize, 0.0 ) );
- vector<int> bestlabels ( regionNumber, 0 );
- /*
- for(int r = 0; r < regionNumber; r++)
- {
- Image over(img.width(), img.height());
- for(int y = 0; y < img.height(); y++)
- {
- for(int x = 0; x < img.width(); x++)
- {
- if(((int)regions(x,y)) == r)
- over.setPixel(x,y,1);
- else
- over.setPixel(x,y,0);
- }
- }
- cout << "r: " << r << endl;
- showImageOverlay(img, over);
- }
- */
- for ( int y = 0; y < img.height(); y++ )
- {
- for ( int x = 0; x < img.width(); x++ )
- {
- int cregion = regions ( x, y );
- for ( int d = 0; d < dSize; d++ )
- {
- regionProbs[cregion][d] += getMeanProb ( x, y, d, currentfeats );
- }
- }
- }
- for ( int r = 0; r < regionNumber; r++ )
- {
- double maxval = regionProbs[r][0];
- bestlabels[r] = 0;
- for ( int d = 1; d < dSize; d++ )
- {
- if ( maxval < regionProbs[r][d] )
- {
- maxval = regionProbs[r][d];
- bestlabels[r] = d;
- }
- }
- bestlabels[r] = labelmapback[bestlabels[r]];
- }
- for ( int y = 0; y < img.height(); y++ )
- {
- for ( int x = 0; x < img.width(); x++ )
- {
- segresult.setPixel ( x, y, bestlabels[regions ( x,y ) ] );
- }
- }
- }
- cout << "segmentation finished" << endl;
- }
- void SemSegContextTree::store ( std::ostream & os, int format ) const
- {
- os << nbTrees << endl;
- classnames.store ( os );
- map<int, int>::const_iterator it;
- os << labelmap.size() << endl;
- for ( it = labelmap.begin() ; it != labelmap.end(); it++ )
- os << ( *it ).first << " " << ( *it ).second << endl;
- os << labelmapback.size() << endl;
- for ( it = labelmapback.begin() ; it != labelmapback.end(); it++ )
- os << ( *it ).first << " " << ( *it ).second << endl;
- int trees = forest.size();
- os << trees << endl;
- for ( int t = 0; t < trees; t++ )
- {
- int nodes = forest[t].size();
- os << nodes << endl;
- for ( int n = 0; n < nodes; n++ )
- {
- os << forest[t][n].left << " " << forest[t][n].right << " " << forest[t][n].decision << " " << forest[t][n].isleaf << " " << forest[t][n].depth << " " << forest[t][n].featcounter << " " << forest[t][n].nodeNumber << endl;
- os << forest[t][n].dist << endl;
- if ( forest[t][n].feat == NULL )
- os << -1 << endl;
- else
- {
- os << forest[t][n].feat->getOps() << endl;
- forest[t][n].feat->store ( os );
- }
- }
- }
- }
- void SemSegContextTree::restore ( std::istream & is, int format )
- {
- is >> nbTrees;
- classnames.restore ( is );
- int lsize;
- is >> lsize;
- labelmap.clear();
- for ( int l = 0; l < lsize; l++ )
- {
- int first, second;
- is >> first;
- is >> second;
- labelmap[first] = second;
- }
- is >> lsize;
- labelmapback.clear();
- for ( int l = 0; l < lsize; l++ )
- {
- int first, second;
- is >> first;
- is >> second;
- labelmapback[first] = second;
- }
- int trees;
- is >> trees;
- forest.clear();
- for ( int t = 0; t < trees; t++ )
- {
- vector<TreeNode> tmptree;
- forest.push_back ( tmptree );
- int nodes;
- is >> nodes;
- //cout << "nodes: " << nodes << endl;
- for ( int n = 0; n < nodes; n++ )
- {
- TreeNode tmpnode;
- forest[t].push_back ( tmpnode );
- is >> forest[t][n].left;
- is >> forest[t][n].right;
- is >> forest[t][n].decision;
- is >> forest[t][n].isleaf;
- is >> forest[t][n].depth;
- is >> forest[t][n].featcounter;
- is >> forest[t][n].nodeNumber;
- is >> forest[t][n].dist;
- int feattype;
- is >> feattype;
- assert ( feattype < NBOPERATIONS );
- forest[t][n].feat = NULL;
- if ( feattype >= 0 )
- {
- for ( int o = 0; o < ops.size(); o++ )
- {
- if ( ops[o]->getOps() == feattype )
- {
- forest[t][n].feat = ops[o]->clone();
- break;
- }
- }
- if ( forest[t][n].feat == NULL )
- {
- for ( int o = 0; o < cops.size(); o++ )
- {
- if ( cops[o]->getOps() == feattype )
- {
- forest[t][n].feat = cops[o]->clone();
- break;
- }
- }
- }
- assert ( forest[t][n].feat != NULL );
- forest[t][n].feat->restore ( is );
- }
- }
- }
- }
|