SemSegContextTree.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. #include <objrec/nice.h>
  2. #include <iostream>
  3. #include "SemSegContextTree.h"
  4. #include "objrec/baselib/Globals.h"
  5. #include "objrec/baselib/ProgressBar.h"
  6. #include "objrec/baselib/StringTools.h"
  7. #include "objrec/baselib/Globals.h"
  8. #include "objrec/cbaselib/CachedExample.h"
  9. #include "objrec/cbaselib/PascalResults.h"
  10. #include <omp.h>
  11. #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
  12. using namespace OBJREC;
  13. using namespace std;
  14. using namespace NICE;
  15. class Minus:public Operation
  16. {
  17. public:
  18. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  19. {
  20. int xsize = feats.size();
  21. int ysize = feats[0].size();
  22. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  23. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  24. return v1-v2;
  25. }
  26. virtual Operation* clone()
  27. {
  28. return new Minus();
  29. };
  30. };
  31. class MinusAbs:public Operation
  32. {
  33. public:
  34. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  35. {
  36. int xsize = feats.size();
  37. int ysize = feats[0].size();
  38. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  39. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  40. return abs(v1-v2);
  41. }
  42. virtual Operation* clone()
  43. {
  44. return new MinusAbs();
  45. };
  46. };
  47. class Addition:public Operation
  48. {
  49. public:
  50. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  51. {
  52. int xsize = feats.size();
  53. int ysize = feats[0].size();
  54. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  55. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  56. return v1+v2;
  57. }
  58. virtual Operation* clone()
  59. {
  60. return new Addition();
  61. };
  62. };
  63. class Only1:public Operation
  64. {
  65. public:
  66. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  67. {
  68. int xsize = feats.size();
  69. int ysize = feats[0].size();
  70. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  71. return v1;
  72. }
  73. virtual Operation* clone()
  74. {
  75. return new Only1();
  76. };
  77. };
  78. SemSegContextTree::SemSegContextTree( const Config *conf, const MultiDataset *md )
  79. : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
  80. {
  81. this->conf = conf;
  82. string section = "SSContextTree";
  83. lfcw = new LFColorWeijer(conf);
  84. grid = conf->gI(section, "grid", 10 );
  85. maxSamples = conf->gI(section, "max_samples", 2000);
  86. minFeats = conf->gI(section, "min_feats", 50 );
  87. maxDepth = conf->gI(section, "max_depth", 10 );
  88. windowSize = conf->gI(section, "window_size", 16);
  89. featsPerSplit = conf->gI(section, "feats_per_split", 200);
  90. useShannonEntropy = conf->gB(section, "use_shannon_entropy", true);
  91. ops.push_back(new Minus());
  92. ops.push_back(new MinusAbs());
  93. ops.push_back(new Addition());
  94. ops.push_back(new Only1());
  95. classnames = md->getClassNames ( "train" );
  96. ///////////////////////////////////
  97. // Train Segmentation Context Trees
  98. //////////////////////////////////
  99. train ( md );
  100. }
  101. SemSegContextTree::~SemSegContextTree()
  102. {
  103. }
  104. void SemSegContextTree::getBestSplit(const vector<vector<vector<vector<double> > > > &feats, vector<vector<vector<int> > > &currentfeats,const vector<vector<vector<int> > > &labels, int node, Operation *&splitop, double &splitval)
  105. {
  106. int imgCount, featdim;
  107. try
  108. {
  109. imgCount = (int)feats.size();
  110. featdim = feats[0][0][0].size();
  111. }
  112. catch(Exception)
  113. {
  114. cerr << "no features computed?" << endl;
  115. }
  116. double bestig = -numeric_limits< double >::max();
  117. splitop = NULL;
  118. splitval = -1.0;
  119. set<vector<int> >selFeats;
  120. map<int,int> e;
  121. int featcounter = 0;
  122. for(int iCounter = 0; iCounter < imgCount; iCounter++)
  123. {
  124. int xsize = (int)currentfeats[iCounter].size();
  125. int ysize = (int)currentfeats[iCounter][0].size();
  126. for(int x = 0; x < xsize; x++)
  127. {
  128. for(int y = 0; y < ysize; y++)
  129. {
  130. if(currentfeats[iCounter][x][y] == node)
  131. {
  132. featcounter++;
  133. }
  134. }
  135. }
  136. }
  137. if(featcounter < minFeats)
  138. {
  139. cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
  140. return;
  141. }
  142. vector<double> fraction(a.size(),0.0);
  143. for(uint i = 0; i < fraction.size(); i++)
  144. {
  145. if ( forbidden_classes.find ( labelmapback[i] ) != forbidden_classes.end() )
  146. fraction[i] = 0;
  147. else
  148. fraction[i] = ((double)maxSamples)/((double)featcounter*a[i]*a.size());
  149. //cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
  150. }
  151. //cout << "a.size(): " << a.size() << endl;
  152. //getchar();
  153. featcounter = 0;
  154. for(int iCounter = 0; iCounter < imgCount; iCounter++)
  155. {
  156. int xsize = (int)currentfeats[iCounter].size();
  157. int ysize = (int)currentfeats[iCounter][0].size();
  158. for(int x = 0; x < xsize; x++)
  159. {
  160. for(int y = 0; y < ysize; y++)
  161. {
  162. if(currentfeats[iCounter][x][y] == node)
  163. {
  164. int cn = labels[iCounter][x][y];
  165. double randD = (double)rand()/(double)RAND_MAX;
  166. if(randD < fraction[labelmap[cn]])
  167. {
  168. vector<int> tmp(3,0);
  169. tmp[0] = iCounter;
  170. tmp[1] = x;
  171. tmp[2] = y;
  172. featcounter++;
  173. selFeats.insert(tmp);
  174. e[cn]++;
  175. }
  176. }
  177. }
  178. }
  179. }
  180. //cout << "size: " << selFeats.size() << endl;
  181. //getchar();
  182. map<int,int>::iterator mapit;
  183. double globent = 0.0;
  184. for ( mapit=e.begin() ; mapit != e.end(); mapit++ )
  185. {
  186. //cout << "class: " << mapit->first << ": " << mapit->second << endl;
  187. double p = (double)(*mapit).second/(double)featcounter;
  188. globent += p*log2(p);
  189. }
  190. globent = -globent;
  191. if(globent < 0.5)
  192. {
  193. cout << "globent to small: " << globent << endl;
  194. return;
  195. }
  196. featsel.clear();
  197. for(int i = 0; i < featsPerSplit; i++)
  198. {
  199. int x1 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  200. int x2 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  201. int y1 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  202. int y2 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  203. int f1 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
  204. int f2 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
  205. int o = (int)((double)rand()/(double)RAND_MAX*(double)ops.size());
  206. Operation *op = ops[o]->clone();
  207. op->set(x1,y1,x2,y2,f1,f2);
  208. featsel.push_back(op);
  209. }
  210. #pragma omp parallel for private(mapit)
  211. for(int f = 0; f < featsPerSplit; f++)
  212. {
  213. double l_bestig = -numeric_limits< double >::max();
  214. double l_splitval = -1.0;
  215. set<vector<int> >::iterator it;
  216. vector<double> vals;
  217. for ( it=selFeats.begin() ; it != selFeats.end(); it++ )
  218. {
  219. vals.push_back(featsel[f]->getVal(feats[(*it)[0]],(*it)[1], (*it)[2]));
  220. }
  221. int counter = 0;
  222. for ( it=selFeats.begin() ; it != selFeats.end(); it++ , counter++)
  223. {
  224. set<vector<int> >::iterator it2;
  225. double val = vals[counter];
  226. map<int,int> eL, eR;
  227. int counterL = 0, counterR = 0;
  228. int counter2 = 0;
  229. for ( it2=selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
  230. {
  231. int cn = labels[(*it2)[0]][(*it2)[1]][(*it2)[2]];
  232. //cout << "vals[counter2] " << vals[counter2] << " val: " << val << endl;
  233. if(vals[counter2] < val)
  234. {
  235. //left entropie:
  236. eL[cn] = eL[cn]+1;
  237. counterL++;
  238. }
  239. else
  240. {
  241. //right entropie:
  242. eR[cn] = eR[cn]+1;
  243. counterR++;
  244. }
  245. }
  246. double leftent = 0.0;
  247. for ( mapit=eL.begin() ; mapit != eL.end(); mapit++ )
  248. {
  249. double p = (double)(*mapit).second/(double)counterL;
  250. leftent -= p*log2(p);
  251. }
  252. double rightent = 0.0;
  253. for ( mapit=eR.begin() ; mapit != eR.end(); mapit++ )
  254. {
  255. double p = (double)(*mapit).second/(double)counterR;
  256. rightent -= p*log2(p);
  257. }
  258. //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
  259. double pl = (double)counterL/(double)(counterL+counterR);
  260. double ig = globent - (1.0-pl) * rightent - pl*leftent;
  261. //double ig = globent - rightent - leftent;
  262. if(useShannonEntropy)
  263. {
  264. double esplit = - ( pl*log(pl) + (1-pl)*log(1-pl) );
  265. ig = 2*ig / ( globent + esplit );
  266. }
  267. if(ig > l_bestig)
  268. {
  269. l_bestig = ig;
  270. l_splitval = val;
  271. }
  272. }
  273. #pragma omp critical
  274. {
  275. //cout << "globent: " << globent << " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
  276. //cout << "globent: " << globent << " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
  277. //cout << "p: " << featsubset[f] << endl;
  278. if(l_bestig > bestig)
  279. {
  280. bestig = l_bestig;
  281. splitop = featsel[f];
  282. splitval = l_splitval;
  283. }
  284. }
  285. }
  286. /*for(int i = 0; i < featsPerSplit; i++)
  287. {
  288. if(featsel[i] != splitop)
  289. delete featsel[i];
  290. }*/
  291. #ifdef debug
  292. cout << "globent: " << globent << " bestig " << bestig << " splitval: " << splitval << endl;
  293. #endif
  294. }
  295. void SemSegContextTree::train ( const MultiDataset *md )
  296. {
  297. const LabeledSet train = * ( *md ) ["train"];
  298. const LabeledSet *trainp = &train;
  299. ProgressBar pb ( "compute feats" );
  300. pb.show();
  301. //TODO: Speichefresser!, lohnt sich sparse?
  302. vector<vector<vector<vector<double> > > > allfeats;
  303. vector<vector<vector<int> > > currentfeats;
  304. vector<vector<vector<int> > > labels;
  305. forbidden_classes;
  306. std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
  307. if ( forbidden_classes_s == "" )
  308. {
  309. forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
  310. }
  311. classnames.getSelection ( forbidden_classes_s, forbidden_classes );
  312. int imgcounter = 0;
  313. LOOP_ALL_S ( *trainp )
  314. {
  315. EACH_INFO ( classno,info );
  316. NICE::ColorImage img;
  317. std::string currentFile = info.img();
  318. CachedExample *ce = new CachedExample ( currentFile );
  319. const LocalizationResult *locResult = info.localization();
  320. if ( locResult->size() <= 0 )
  321. {
  322. fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
  323. currentFile.c_str() );
  324. continue;
  325. }
  326. fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
  327. int xsize, ysize;
  328. ce->getImageSize ( xsize, ysize );
  329. vector<vector<int> > tmp = vector<vector<int> >(xsize, vector<int>(ysize,0));
  330. currentfeats.push_back(tmp);
  331. labels.push_back(tmp);
  332. try {
  333. img = ColorImage(currentFile);
  334. } catch (Exception) {
  335. cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
  336. continue;
  337. }
  338. Globals::setCurrentImgFN ( currentFile );
  339. //TODO: resize image?!
  340. vector<vector<vector<double> > > feats;
  341. #if 0
  342. lfcw->getFeats(img, feats);
  343. #else
  344. feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
  345. for(int x = 0; x < xsize; x++)
  346. {
  347. for(int y = 0; y < ysize; y++)
  348. {
  349. for(int r = 0; r < 3; r++)
  350. {
  351. feats[x][y][r] = img.getPixel(x,y,r);
  352. }
  353. }
  354. }
  355. #endif
  356. allfeats.push_back(feats);
  357. // getting groundtruth
  358. NICE::Image pixelLabels (xsize, ysize);
  359. pixelLabels.set(0);
  360. locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
  361. for(int x = 0; x < xsize; x++)
  362. {
  363. for(int y = 0; y < ysize; y++)
  364. {
  365. classno = pixelLabels.getPixel(x, y);
  366. labels[imgcounter][x][y] = classno;
  367. if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
  368. continue;
  369. labelcounter[classno]++;
  370. }
  371. }
  372. imgcounter++;
  373. pb.update ( trainp->count());
  374. delete ce;
  375. }
  376. pb.hide();
  377. map<int,int>::iterator mapit;
  378. int classes = 0;
  379. for(mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++)
  380. {
  381. labelmap[mapit->first] = classes;
  382. labelmapback[classes] = mapit->first;
  383. classes++;
  384. }
  385. //balancing
  386. int featcounter = 0;
  387. a = vector<double>(classes,0.0);
  388. for(int iCounter = 0; iCounter < imgcounter; iCounter++)
  389. {
  390. int xsize = (int)currentfeats[iCounter].size();
  391. int ysize = (int)currentfeats[iCounter][0].size();
  392. for(int x = 0; x < xsize; x++)
  393. {
  394. for(int y = 0; y < ysize; y++)
  395. {
  396. featcounter++;
  397. int cn = labels[iCounter][x][y];
  398. a[labelmap[cn]] ++;
  399. }
  400. }
  401. }
  402. for(int i = 0; i < (int)a.size(); i++)
  403. {
  404. a[i] /= (double)featcounter;
  405. }
  406. #ifdef DEBUG
  407. for(int i = 0; i < (int)a.size(); i++)
  408. {
  409. cout << "a["<<i<<"]: " << a[i] << endl;
  410. }
  411. cout << "a.size: " << a.size() << endl;
  412. #endif
  413. tree.push_back(Node());
  414. tree[0].dist = vector<double>(classes,0.0);
  415. int depth = 0;
  416. tree[0].depth = depth;
  417. int startnode = 0;
  418. bool allleaf = false;
  419. while(!allleaf && depth < maxDepth)
  420. {
  421. allleaf = true;
  422. //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
  423. int t = (int) tree.size();
  424. int s = startnode;
  425. startnode = t;
  426. //#pragma omp parallel for
  427. for(int i = s; i < t; i++)
  428. {
  429. if(!tree[i].isleaf && tree[i].left < 0)
  430. {
  431. Operation *splitfeat = NULL;
  432. double splitval;
  433. getBestSplit(allfeats, currentfeats,labels, i, splitfeat, splitval);
  434. tree[i].feat = splitfeat;
  435. tree[i].decision = splitval;
  436. if(splitfeat != NULL)
  437. {
  438. allleaf = false;
  439. int left = tree.size();
  440. tree.push_back(Node());
  441. tree.push_back(Node());
  442. int right = left+1;
  443. tree[i].left = left;
  444. tree[i].right = right;
  445. tree[left].dist = vector<double>(classes, 0.0);
  446. tree[right].dist = vector<double>(classes, 0.0);
  447. tree[left].depth = depth+1;
  448. tree[right].depth = depth+1;
  449. #pragma omp parallel for
  450. for(int iCounter = 0; iCounter < imgcounter; iCounter++)
  451. {
  452. int xsize = currentfeats[iCounter].size();
  453. int ysize = currentfeats[iCounter][0].size();
  454. for(int x = 0; x < xsize; x++)
  455. {
  456. for(int y = 0; y < ysize; y++)
  457. {
  458. if(currentfeats[iCounter][x][y] == i)
  459. {
  460. double val = splitfeat->getVal(allfeats[iCounter],x,y);
  461. if(val < splitval)
  462. {
  463. currentfeats[iCounter][x][y] = left;
  464. tree[left].dist[labelmap[labels[iCounter][x][y]]]++;
  465. }
  466. else
  467. {
  468. currentfeats[iCounter][x][y] = right;
  469. tree[right].dist[labelmap[labels[iCounter][x][y]]]++;
  470. }
  471. }
  472. }
  473. }
  474. }
  475. double lcounter = 0.0, rcounter = 0.0;
  476. for(uint d = 0; d < tree[left].dist.size(); d++)
  477. {
  478. if ( forbidden_classes.find ( labelmapback[d] ) != forbidden_classes.end() )
  479. {
  480. tree[left].dist[d] = 0;
  481. tree[right].dist[d] = 0;
  482. }
  483. else
  484. {
  485. tree[left].dist[d]/=a[d];
  486. lcounter +=tree[left].dist[d];
  487. tree[right].dist[d]/=a[d];
  488. rcounter +=tree[right].dist[d];
  489. }
  490. }
  491. if(lcounter <= 0 || rcounter <= 0)
  492. {
  493. cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
  494. cout << "splitval: " << splitval << endl;
  495. assert(lcounter > 0 && rcounter > 0);
  496. }
  497. for(uint d = 0; d < tree[left].dist.size(); d++)
  498. {
  499. tree[left].dist[d]/=lcounter;
  500. tree[right].dist[d]/=rcounter;
  501. }
  502. }
  503. else
  504. {
  505. tree[i].isleaf = true;
  506. }
  507. }
  508. }
  509. //TODO: features neu berechnen!
  510. depth++;
  511. #ifdef DEBUG
  512. cout << "depth: " << depth << endl;
  513. #endif
  514. }
  515. #ifdef DEBUG
  516. int t = (int) tree.size();
  517. for(int i = 0; i < t; i++)
  518. {
  519. printf("tree[%i]: left: %i, right: %i ", i, tree[i].left, tree[i].right);
  520. for(int d = 0; d < (int)tree[i].dist.size(); d++)
  521. {
  522. cout << " " << tree[i].dist[d];
  523. }
  524. cout << endl;
  525. }
  526. #endif
  527. }
  528. void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult,NICE::MultiChannelImageT<double> & probabilities )
  529. {
  530. int xsize;
  531. int ysize;
  532. ce->getImageSize ( xsize, ysize );
  533. int numClasses = classNames->numClasses();
  534. fprintf (stderr, "ContextTree classification !\n");
  535. probabilities.reInit ( xsize, ysize, numClasses, true );
  536. probabilities.setAll ( 0 );
  537. NICE::ColorImage img;
  538. std::string currentFile = Globals::getCurrentImgFN();
  539. try {
  540. img = ColorImage(currentFile);
  541. } catch (Exception) {
  542. cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
  543. return;
  544. }
  545. //TODO: resize image?!
  546. vector<vector<vector<double> > > feats;
  547. #if 0
  548. lfcw->getFeats(img, feats);
  549. #else
  550. feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
  551. for(int x = 0; x < xsize; x++)
  552. {
  553. for(int y = 0; y < ysize; y++)
  554. {
  555. for(int r = 0; r < 3; r++)
  556. {
  557. feats[x][y][r] = img.getPixel(x,y,r);
  558. }
  559. }
  560. }
  561. #endif
  562. bool allleaf = false;
  563. vector<vector<int> > currentfeats = vector<vector<int> >(xsize, vector<int>(ysize,0));
  564. int depth = 0;
  565. while(!allleaf)
  566. {
  567. allleaf = true;
  568. //TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
  569. //#pragma omp parallel for
  570. int t = (int) tree.size();
  571. for(int i = 0; i < t; i++)
  572. {
  573. for(int x = 0; x < xsize; x++)
  574. {
  575. for(int y = 0; y < ysize; y++)
  576. {
  577. int t = currentfeats[x][y];
  578. if(tree[t].left > 0)
  579. {
  580. allleaf = false;
  581. double val = tree[t].feat->getVal(feats,x,y);
  582. if(val < tree[t].decision)
  583. {
  584. currentfeats[x][y] = tree[t].left;
  585. }
  586. else
  587. {
  588. currentfeats[x][y] = tree[t].right;
  589. }
  590. }
  591. }
  592. }
  593. }
  594. //TODO: features neu berechnen! analog zum training
  595. depth++;
  596. }
  597. //finales labeln:
  598. long int offset = 0;
  599. for(int x = 0; x < xsize; x++)
  600. {
  601. for(int y = 0; y < ysize; y++,offset++)
  602. {
  603. int t = currentfeats[x][y];
  604. double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
  605. int maxindex = 0;
  606. for(uint i = 0; i < tree[i].dist.size(); i++)
  607. {
  608. probabilities.data[labelmapback[i]][offset] = tree[t].dist[i];
  609. if(tree[t].dist[i] > maxvalue)
  610. {
  611. maxvalue = tree[t].dist[i];
  612. maxindex = labelmapback[i];
  613. }
  614. segresult.setPixel(x,y,maxindex);
  615. }
  616. }
  617. }
  618. }