SemSegContextTree.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. #include <objrec/nice.h>
  2. #include <iostream>
  3. #include "SemSegContextTree.h"
  4. #include "objrec/baselib/Globals.h"
  5. #include "objrec/baselib/ProgressBar.h"
  6. #include "objrec/baselib/StringTools.h"
  7. #include "objrec/baselib/Globals.h"
  8. #include "objrec/cbaselib/CachedExample.h"
  9. #include "objrec/cbaselib/PascalResults.h"
  10. #include <omp.h>
  11. #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
  12. using namespace OBJREC;
  13. using namespace std;
  14. using namespace NICE;
  15. class Minus:public Operation
  16. {
  17. public:
  18. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  19. {
  20. int xsize = feats.size();
  21. int ysize = feats[0].size();
  22. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  23. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  24. return v1-v2;
  25. }
  26. virtual Operation* clone()
  27. {
  28. return new Minus();
  29. };
  30. };
  31. class MinusAbs:public Operation
  32. {
  33. public:
  34. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  35. {
  36. int xsize = feats.size();
  37. int ysize = feats[0].size();
  38. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  39. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  40. return abs(v1-v2);
  41. }
  42. virtual Operation* clone()
  43. {
  44. return new MinusAbs();
  45. };
  46. };
  47. class Addition:public Operation
  48. {
  49. public:
  50. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  51. {
  52. int xsize = feats.size();
  53. int ysize = feats[0].size();
  54. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  55. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  56. return v1+v2;
  57. }
  58. virtual Operation* clone()
  59. {
  60. return new Addition();
  61. };
  62. };
  63. class Only1:public Operation
  64. {
  65. public:
  66. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  67. {
  68. int xsize = feats.size();
  69. int ysize = feats[0].size();
  70. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  71. return v1;
  72. }
  73. virtual Operation* clone()
  74. {
  75. return new Only1();
  76. };
  77. };
  78. SemSegContextTree::SemSegContextTree( const Config *conf, const MultiDataset *md )
  79. : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
  80. {
  81. string section = "SSContextTree";
  82. lfcw = new LFColorWeijer(conf);
  83. grid = conf->gI(section, "grid", 10 );
  84. maxSamples = conf->gI(section, "max_samples", 2000 );
  85. minFeats = conf->gI(section, "min_feats", 50 );
  86. maxDepth = conf->gI(section, "max_depth", 10 );
  87. windowSize = conf->gI(section, "window_size", 16);
  88. featsPerSplit = conf->gI(section, "feats_per_split", 200);
  89. useShannonEntropy = conf->gB(section, "use_shannon_entropy", false);
  90. ops.push_back(new Minus());
  91. ops.push_back(new MinusAbs());
  92. ops.push_back(new Addition());
  93. ops.push_back(new Only1());
  94. ///////////////////////////////////
  95. // Train Segmentation Context Trees
  96. //////////////////////////////////
  97. train ( md );
  98. }
  99. SemSegContextTree::~SemSegContextTree()
  100. {
  101. }
  102. void SemSegContextTree::getBestSplit(const vector<vector<vector<vector<double> > > > &feats, vector<vector<vector<int> > > &currentfeats,const vector<vector<vector<int> > > &labels, int node, Operation *&splitop, double &splitval)
  103. {
  104. int imgCount, featdim;
  105. try
  106. {
  107. imgCount = (int)feats.size();
  108. featdim = feats[0][0][0].size();
  109. }
  110. catch(Exception)
  111. {
  112. cerr << "no features computed?" << endl;
  113. }
  114. double bestig = -numeric_limits< double >::max();
  115. splitop = NULL;
  116. splitval = -1.0;
  117. set<vector<int> >selFeats;
  118. map<int,int> e;
  119. int featcounter = 0;
  120. for(int iCounter = 0; iCounter < imgCount; iCounter++)
  121. {
  122. int xsize = (int)currentfeats[iCounter].size();
  123. int ysize = (int)currentfeats[iCounter][0].size();
  124. for(int x = 0; x < xsize; x++)
  125. {
  126. for(int y = 0; y < ysize; y++)
  127. {
  128. if(currentfeats[iCounter][x][y] == node)
  129. {
  130. featcounter++;
  131. }
  132. }
  133. }
  134. }
  135. if(featcounter < minFeats)
  136. {
  137. cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
  138. return;
  139. }
  140. vector<double> fraction(a.size(),0.0);
  141. for(uint i = 0; i < fraction.size(); i++)
  142. {
  143. fraction[i] = ((double)maxSamples)/((double)featcounter*a[i]*a.size());
  144. //cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
  145. }
  146. //cout << "a.size(): " << a.size() << endl;
  147. //getchar();
  148. featcounter = 0;
  149. for(int iCounter = 0; iCounter < imgCount; iCounter++)
  150. {
  151. int xsize = (int)currentfeats[iCounter].size();
  152. int ysize = (int)currentfeats[iCounter][0].size();
  153. for(int x = 0; x < xsize; x++)
  154. {
  155. for(int y = 0; y < ysize; y++)
  156. {
  157. if(currentfeats[iCounter][x][y] == node)
  158. {
  159. int cn = labels[iCounter][x][y];
  160. double randD = (double)rand()/(double)RAND_MAX;
  161. if(randD < fraction[labelmap[cn]])
  162. {
  163. vector<int> tmp(3,0);
  164. tmp[0] = iCounter;
  165. tmp[1] = x;
  166. tmp[2] = y;
  167. featcounter++;
  168. selFeats.insert(tmp);
  169. e[cn]++;
  170. }
  171. }
  172. }
  173. }
  174. }
  175. //cout << "size: " << selFeats.size() << endl;
  176. //getchar();
  177. map<int,int>::iterator mapit;
  178. double globent = 0.0;
  179. for ( mapit=e.begin() ; mapit != e.end(); mapit++ )
  180. {
  181. //cout << "class: " << mapit->first << ": " << mapit->second << endl;
  182. double p = (double)(*mapit).second/(double)featcounter;
  183. globent += p*log2(p);
  184. }
  185. globent = -globent;
  186. if(globent < 0.5)
  187. {
  188. cout << "globent to small: " << globent << endl;
  189. return;
  190. }
  191. featsel.clear();
  192. for(int i = 0; i < featsPerSplit; i++)
  193. {
  194. int x1 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  195. int x2 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  196. int y1 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  197. int y2 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  198. int f1 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
  199. int f2 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
  200. int o = (int)((double)rand()/(double)RAND_MAX*(double)ops.size());
  201. Operation *op = ops[o]->clone();
  202. op->set(x1,y1,x2,y2,f1,f2);
  203. featsel.push_back(op);
  204. }
  205. #pragma omp parallel for private(mapit)
  206. for(int f = 0; f < featsPerSplit; f++)
  207. {
  208. double l_bestig = -numeric_limits< double >::max();
  209. double l_splitval = -1.0;
  210. set<vector<int> >::iterator it;
  211. vector<double> vals;
  212. for ( it=selFeats.begin() ; it != selFeats.end(); it++ )
  213. {
  214. vals.push_back(featsel[f]->getVal(feats[(*it)[0]],(*it)[1], (*it)[2]));
  215. }
  216. int counter = 0;
  217. for ( it=selFeats.begin() ; it != selFeats.end(); it++ , counter++)
  218. {
  219. set<vector<int> >::iterator it2;
  220. double val = vals[counter];
  221. map<int,int> eL, eR;
  222. int counterL = 0, counterR = 0;
  223. int counter2 = 0;
  224. for ( it2=selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
  225. {
  226. int cn = labels[(*it2)[0]][(*it2)[1]][(*it2)[2]];
  227. //cout << "vals[counter2] " << vals[counter2] << " val: " << val << endl;
  228. if(vals[counter2] < val)
  229. {
  230. //left entropie:
  231. eL[cn] = eL[cn]+1;
  232. counterL++;
  233. }
  234. else
  235. {
  236. //right entropie:
  237. eR[cn] = eR[cn]+1;
  238. counterR++;
  239. }
  240. }
  241. double leftent = 0.0;
  242. for ( mapit=eL.begin() ; mapit != eL.end(); mapit++ )
  243. {
  244. double p = (double)(*mapit).second/(double)counterL;
  245. leftent -= p*log2(p);
  246. }
  247. double rightent = 0.0;
  248. for ( mapit=eR.begin() ; mapit != eR.end(); mapit++ )
  249. {
  250. double p = (double)(*mapit).second/(double)counterR;
  251. rightent -= p*log2(p);
  252. }
  253. //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
  254. double pl = (double)counterL/(double)(counterL+counterR);
  255. double ig = globent - (1.0-pl) * rightent - pl*leftent;
  256. //double ig = globent - rightent - leftent;
  257. if(useShannonEntropy)
  258. {
  259. double esplit = - ( pl*log(pl) + (1-pl)*log(1-pl) );
  260. ig = 2*ig / ( globent + esplit );
  261. }
  262. if(ig > l_bestig)
  263. {
  264. l_bestig = ig;
  265. l_splitval = val;
  266. }
  267. }
  268. #pragma omp critical
  269. {
  270. //cout << "globent: " << globent << " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
  271. //cout << "globent: " << globent << " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
  272. //cout << "p: " << featsubset[f] << endl;
  273. if(l_bestig > bestig)
  274. {
  275. bestig = l_bestig;
  276. splitop = featsel[f];
  277. splitval = l_splitval;
  278. }
  279. }
  280. }
  281. /*for(int i = 0; i < featsPerSplit; i++)
  282. {
  283. if(featsel[i] != splitop)
  284. delete featsel[i];
  285. }*/
  286. #ifdef debug
  287. cout << "globent: " << globent << " bestig " << bestig << " splitval: " << splitval << endl;
  288. #endif
  289. }
  290. void SemSegContextTree::train ( const MultiDataset *md )
  291. {
  292. const LabeledSet train = * ( *md ) ["train"];
  293. const LabeledSet *trainp = &train;
  294. ProgressBar pb ( "compute feats" );
  295. pb.show();
  296. //TODO: Speichefresser!, lohnt sich sparse?
  297. vector<vector<vector<vector<double> > > > allfeats;
  298. vector<vector<vector<int> > > currentfeats;
  299. vector<vector<vector<int> > > labels;
  300. int imgcounter = 0;
  301. LOOP_ALL_S ( *trainp )
  302. {
  303. EACH_INFO ( classno,info );
  304. NICE::ColorImage img;
  305. std::string currentFile = info.img();
  306. CachedExample *ce = new CachedExample ( currentFile );
  307. const LocalizationResult *locResult = info.localization();
  308. if ( locResult->size() <= 0 )
  309. {
  310. fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
  311. currentFile.c_str() );
  312. continue;
  313. }
  314. fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
  315. int xsize, ysize;
  316. ce->getImageSize ( xsize, ysize );
  317. vector<vector<int> > tmp = vector<vector<int> >(xsize, vector<int>(ysize,0));
  318. currentfeats.push_back(tmp);
  319. labels.push_back(tmp);
  320. try {
  321. img = ColorImage(currentFile);
  322. } catch (Exception) {
  323. cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
  324. continue;
  325. }
  326. Globals::setCurrentImgFN ( currentFile );
  327. //TODO: resize image?!
  328. vector<vector<vector<double> > > feats;
  329. #if 0
  330. lfcw->getFeats(img, feats);
  331. #else
  332. feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
  333. for(int x = 0; x < xsize; x++)
  334. {
  335. for(int y = 0; y < ysize; y++)
  336. {
  337. for(int r = 0; r < 3; r++)
  338. {
  339. feats[x][y][r] = img.getPixel(x,y,r);
  340. }
  341. }
  342. }
  343. #endif
  344. allfeats.push_back(feats);
  345. // getting groundtruth
  346. NICE::Image pixelLabels (xsize, ysize);
  347. pixelLabels.set(0);
  348. locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
  349. for(int x = 0; x < xsize; x++)
  350. {
  351. for(int y = 0; y < ysize; y++)
  352. {
  353. classno = pixelLabels.getPixel(x, y);
  354. labels[imgcounter][x][y] = classno;
  355. labelcounter[classno]++;
  356. //if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
  357. //continue;
  358. }
  359. }
  360. imgcounter++;
  361. pb.update ( trainp->count());
  362. delete ce;
  363. }
  364. pb.hide();
  365. /*int opsize = (int)ops.size();
  366. int featdim = (int)allfeats[0][0][0].size();
  367. for(int x1 = -windowSize/2; x1 < windowSize/2+1; x1++)
  368. {
  369. for(int y1 = -windowSize/2; y1 < windowSize/2+1; y1++)
  370. {
  371. for(int x2 = -windowSize/2; x2 < windowSize/2+1; x2++)
  372. {
  373. for(int y2 = -windowSize/2; y2 < windowSize/2+1; y2++)
  374. {
  375. for(int f = 0; f < featdim; f++)
  376. {
  377. for(int o = 0; o < opsize; o++)
  378. {
  379. vector<int> tmp(6,0);
  380. tmp[0] = x1;
  381. tmp[1] = y1;
  382. tmp[2] = x2;
  383. tmp[3] = y2;
  384. tmp[4] = f;
  385. tmp[5] = o;
  386. featsel.push_back(tmp);
  387. }
  388. }
  389. }
  390. }
  391. }
  392. }*/
  393. map<int,int>::iterator mapit;
  394. int classes = 0;
  395. for(mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++)
  396. {
  397. labelmap[mapit->first] = classes;
  398. labelmapback[classes] = mapit->first;
  399. classes++;
  400. }
  401. //balancing
  402. int featcounter = 0;
  403. a = vector<double>(classes,0.0);
  404. for(int iCounter = 0; iCounter < imgcounter; iCounter++)
  405. {
  406. int xsize = (int)currentfeats[iCounter].size();
  407. int ysize = (int)currentfeats[iCounter][0].size();
  408. for(int x = 0; x < xsize; x++)
  409. {
  410. for(int y = 0; y < ysize; y++)
  411. {
  412. featcounter++;
  413. int cn = labels[iCounter][x][y];
  414. a[labelmap[cn]] ++;
  415. }
  416. }
  417. }
  418. for(int i = 0; i < (int)a.size(); i++)
  419. {
  420. a[i] /= (double)featcounter;
  421. }
  422. #ifdef DEBUG
  423. for(int i = 0; i < (int)a.size(); i++)
  424. {
  425. cout << "a["<<i<<"]: " << a[i] << endl;
  426. }
  427. #endif
  428. tree.push_back(Node());
  429. tree[0].dist = vector<double>(classes,0.0);
  430. int depth = 0;
  431. tree[0].depth = depth;
  432. int startnode = 0;
  433. bool allleaf = false;
  434. while(!allleaf && depth < maxDepth)
  435. {
  436. allleaf = true;
  437. //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
  438. int t = (int) tree.size();
  439. int s = startnode;
  440. startnode = t;
  441. //#pragma omp parallel for
  442. for(int i = s; i < t; i++)
  443. {
  444. if(!tree[i].isleaf && tree[i].left < 0)
  445. {
  446. Operation *splitfeat = NULL;
  447. double splitval;
  448. getBestSplit(allfeats, currentfeats,labels, i, splitfeat, splitval);
  449. tree[i].feat = splitfeat;
  450. tree[i].decision = splitval;
  451. if(splitfeat != NULL)
  452. {
  453. allleaf = false;
  454. int left = tree.size();
  455. tree.push_back(Node());
  456. tree.push_back(Node());
  457. int right = left+1;
  458. tree[i].left = left;
  459. tree[i].right = right;
  460. tree[left].dist = vector<double>(classes, 0.0);
  461. tree[right].dist = vector<double>(classes, 0.0);
  462. tree[left].depth = depth+1;
  463. tree[right].depth = depth+1;
  464. #pragma omp parallel for
  465. for(int iCounter = 0; iCounter < imgcounter; iCounter++)
  466. {
  467. int xsize = currentfeats[iCounter].size();
  468. int ysize = currentfeats[iCounter][0].size();
  469. for(int x = 0; x < xsize; x++)
  470. {
  471. for(int y = 0; y < ysize; y++)
  472. {
  473. if(currentfeats[iCounter][x][y] == i)
  474. {
  475. double val = splitfeat->getVal(allfeats[iCounter],x,y);
  476. if(val < splitval)
  477. {
  478. currentfeats[iCounter][x][y] = left;
  479. tree[left].dist[labelmap[labels[iCounter][x][y]]]++;
  480. }
  481. else
  482. {
  483. currentfeats[iCounter][x][y] = right;
  484. tree[right].dist[labelmap[labels[iCounter][x][y]]]++;
  485. }
  486. }
  487. }
  488. }
  489. }
  490. double lcounter = 0.0, rcounter = 0.0;
  491. for(uint d = 0; d < tree[left].dist.size(); d++)
  492. {
  493. //tree[left].dist[d]/=a[d];
  494. lcounter +=tree[left].dist[d];
  495. //tree[right].dist[d]/=a[d];
  496. rcounter +=tree[right].dist[d];
  497. }
  498. if(lcounter <= 0 || rcounter <= 0)
  499. {
  500. cout << "lcounter : " << lcounter << " rcounter: " << rcounter << endl;
  501. cout << "splitval: " << splitval << endl;
  502. assert(lcounter > 0 && rcounter > 0);
  503. }
  504. for(uint d = 0; d < tree[left].dist.size(); d++)
  505. {
  506. tree[left].dist[d]/=lcounter;
  507. tree[right].dist[d]/=rcounter;
  508. }
  509. }
  510. else
  511. {
  512. tree[i].isleaf = true;
  513. }
  514. }
  515. }
  516. //TODO: features neu berechnen!
  517. depth++;
  518. #ifdef DEBUG
  519. cout << "depth: " << depth << endl;
  520. #endif
  521. }
  522. #ifdef DEBUG
  523. int t = (int) tree.size();
  524. for(int i = 0; i < t; i++)
  525. {
  526. printf("tree[%i]: left: %i, right: %i ", i, tree[i].left, tree[i].right);
  527. for(int d = 0; d < (int)tree[i].dist.size(); d++)
  528. {
  529. cout << " " << tree[i].dist[d];
  530. }
  531. cout << endl;
  532. }
  533. #endif
  534. }
  535. void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult,GenericImage<double> & probabilities )
  536. {
  537. int xsize;
  538. int ysize;
  539. ce->getImageSize ( xsize, ysize );
  540. int numClasses = classNames->numClasses();
  541. fprintf (stderr, "ContextTree classification !\n");
  542. probabilities.reInit ( xsize, ysize, numClasses, true );
  543. probabilities.setAll ( 0 );
  544. NICE::ColorImage img;
  545. std::string currentFile = Globals::getCurrentImgFN();
  546. try {
  547. img = ColorImage(currentFile);
  548. } catch (Exception) {
  549. cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
  550. return;
  551. }
  552. //TODO: resize image?!
  553. vector<vector<vector<double> > > feats;
  554. #if 0
  555. lfcw->getFeats(img, feats);
  556. #else
  557. feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
  558. for(int x = 0; x < xsize; x++)
  559. {
  560. for(int y = 0; y < ysize; y++)
  561. {
  562. for(int r = 0; r < 3; r++)
  563. {
  564. feats[x][y][r] = img.getPixel(x,y,r);
  565. }
  566. }
  567. }
  568. #endif
  569. bool allleaf = false;
  570. vector<vector<int> > currentfeats = vector<vector<int> >(xsize, vector<int>(ysize,0));
  571. int depth = 0;
  572. while(!allleaf)
  573. {
  574. allleaf = true;
  575. //TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
  576. //#pragma omp parallel for
  577. int t = (int) tree.size();
  578. for(int i = 0; i < t; i++)
  579. {
  580. for(int x = 0; x < xsize; x++)
  581. {
  582. for(int y = 0; y < ysize; y++)
  583. {
  584. int t = currentfeats[x][y];
  585. if(tree[t].left > 0)
  586. {
  587. allleaf = false;
  588. double val = tree[t].feat->getVal(feats,x,y);
  589. if(val < tree[t].decision)
  590. {
  591. currentfeats[x][y] = tree[t].left;
  592. }
  593. else
  594. {
  595. currentfeats[x][y] = tree[t].right;
  596. }
  597. }
  598. }
  599. }
  600. }
  601. //TODO: features neu berechnen! analog zum training
  602. depth++;
  603. }
  604. //finales labeln:
  605. long int offset = 0;
  606. for(int x = 0; x < xsize; x++)
  607. {
  608. for(int y = 0; y < ysize; y++,offset++)
  609. {
  610. int t = currentfeats[x][y];
  611. double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
  612. int maxindex = 0;
  613. for(uint i = 0; i < tree[i].dist.size(); i++)
  614. {
  615. probabilities.data[labelmapback[i]][offset] = tree[t].dist[i];
  616. if(tree[t].dist[i] > maxvalue)
  617. {
  618. maxvalue = tree[t].dist[i];
  619. maxindex = labelmapback[i];
  620. }
  621. segresult.setPixel(x,y,maxindex);
  622. }
  623. }
  624. }
  625. }