SemSegContextTree.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. #include <objrec/nice.h>
  2. #include <iostream>
  3. #include "SemSegContextTree.h"
  4. #include "objrec/baselib/Globals.h"
  5. #include "objrec/baselib/ProgressBar.h"
  6. #include "objrec/baselib/StringTools.h"
  7. #include "objrec/baselib/Globals.h"
  8. #include "objrec/cbaselib/CachedExample.h"
  9. #include "objrec/cbaselib/PascalResults.h"
  10. #include <omp.h>
  11. #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
  12. using namespace OBJREC;
  13. using namespace std;
  14. using namespace NICE;
  15. class Minus:public Operation
  16. {
  17. public:
  18. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  19. {
  20. int xsize = feats.size();
  21. int ysize = feats[0].size();
  22. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  23. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  24. return v1-v2;
  25. }
  26. virtual Operation* clone()
  27. {
  28. return new Minus();
  29. };
  30. };
  31. class MinusAbs:public Operation
  32. {
  33. public:
  34. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  35. {
  36. int xsize = feats.size();
  37. int ysize = feats[0].size();
  38. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  39. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  40. return abs(v1-v2);
  41. }
  42. virtual Operation* clone()
  43. {
  44. return new MinusAbs();
  45. };
  46. };
  47. class Addition:public Operation
  48. {
  49. public:
  50. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  51. {
  52. int xsize = feats.size();
  53. int ysize = feats[0].size();
  54. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  55. double v2 = feats[BOUND(x+x2,0,xsize-1)][BOUND(y+y2,0,ysize-1)][channel2];
  56. return v1+v2;
  57. }
  58. virtual Operation* clone()
  59. {
  60. return new Addition();
  61. };
  62. };
  63. class Only1:public Operation
  64. {
  65. public:
  66. virtual double getVal(const vector<vector<vector<double> > > &feats, const int &x, const int &y)
  67. {
  68. int xsize = feats.size();
  69. int ysize = feats[0].size();
  70. double v1 = feats[BOUND(x+x1,0,xsize-1)][BOUND(y+y1,0,ysize-1)][channel1];
  71. return v1;
  72. }
  73. virtual Operation* clone()
  74. {
  75. return new Only1();
  76. };
  77. };
  78. SemSegContextTree::SemSegContextTree( const Config *conf, const MultiDataset *md )
  79. : SemanticSegmentation ( conf, &(md->getClassNames("train")) )
  80. {
  81. string section = "SSContextTree";
  82. lfcw = new LFColorWeijer(conf);
  83. grid = conf->gI(section, "grid", 10 );
  84. maxSamples = conf->gI(section, "max_samples", 2000 );
  85. minFeats = conf->gI(section, "min_feats", 50 );
  86. maxDepth = conf->gI(section, "max_depth", 10 );
  87. windowSize = conf->gI(section, "window_size", 16);
  88. featsPerSplit = conf->gI(section, "feats_per_split", 200);
  89. useShannonEntropy = conf->gB(section, "use_shannon_entropy", true);
  90. ops.push_back(new Minus());
  91. ops.push_back(new MinusAbs());
  92. ops.push_back(new Addition());
  93. ops.push_back(new Only1());
  94. ///////////////////////////////////
  95. // Train Segmentation Context Trees
  96. //////////////////////////////////
  97. train ( md );
  98. }
  99. SemSegContextTree::~SemSegContextTree()
  100. {
  101. }
  102. void SemSegContextTree::getBestSplit(const vector<vector<vector<vector<double> > > > &feats, vector<vector<vector<int> > > &currentfeats,const vector<vector<vector<int> > > &labels, int node, Operation *&splitop, double &splitval)
  103. {
  104. int imgCount, featdim;
  105. try
  106. {
  107. imgCount = (int)feats.size();
  108. featdim = feats[0][0][0].size();
  109. }
  110. catch(Exception)
  111. {
  112. cerr << "no features computed?" << endl;
  113. }
  114. double bestig = -numeric_limits< double >::max();
  115. splitop = NULL;
  116. splitval = -1.0;
  117. set<vector<int> >selFeats;
  118. map<int,int> e;
  119. int featcounter = 0;
  120. for(int iCounter = 0; iCounter < imgCount; iCounter++)
  121. {
  122. int xsize = (int)currentfeats[iCounter].size();
  123. int ysize = (int)currentfeats[iCounter][0].size();
  124. for(int x = 0; x < xsize; x++)
  125. {
  126. for(int y = 0; y < ysize; y++)
  127. {
  128. if(currentfeats[iCounter][x][y] == node)
  129. {
  130. featcounter++;
  131. }
  132. }
  133. }
  134. }
  135. vector<double> fraction(a.size(),0.0);
  136. for(uint i = 0; i < fraction.size(); i++)
  137. {
  138. fraction[i] = ((double)maxSamples)/((double)featcounter*a[i]*a.size());
  139. //cout << "fraction["<<i<<"]: "<< fraction[i] << " a[" << i << "]: " << a[i] << endl;
  140. }
  141. //cout << "a.size(): " << a.size() << endl;
  142. //getchar();
  143. featcounter = 0;
  144. for(int iCounter = 0; iCounter < imgCount; iCounter++)
  145. {
  146. int xsize = (int)currentfeats[iCounter].size();
  147. int ysize = (int)currentfeats[iCounter][0].size();
  148. for(int x = 0; x < xsize; x++)
  149. {
  150. for(int y = 0; y < ysize; y++)
  151. {
  152. if(currentfeats[iCounter][x][y] == node)
  153. {
  154. int cn = labels[iCounter][x][y];
  155. double randD = (double)rand()/(double)RAND_MAX;
  156. if(randD < fraction[labelmap[cn]])
  157. {
  158. vector<int> tmp(3,0);
  159. tmp[0] = iCounter;
  160. tmp[1] = x;
  161. tmp[2] = y;
  162. featcounter++;
  163. selFeats.insert(tmp);
  164. e[cn]++;
  165. }
  166. }
  167. }
  168. }
  169. }
  170. cout << "size: " << selFeats.size() << endl;
  171. //getchar();
  172. map<int,int>::iterator mapit;
  173. double globent = 0.0;
  174. for ( mapit=e.begin() ; mapit != e.end(); mapit++ )
  175. {
  176. //cout << "class: " << mapit->first << ": " << mapit->second << endl;
  177. double p = (double)(*mapit).second/(double)featcounter;
  178. globent += p*log2(p);
  179. }
  180. globent = -globent;
  181. if(globent < 0.5)
  182. {
  183. cout << "globent to small: " << globent << endl;
  184. return;
  185. }
  186. if(featcounter < minFeats)
  187. {
  188. cout << "only " << featcounter << " feats in current node -> it's a leaf" << endl;
  189. return;
  190. }
  191. featsel.clear();
  192. for(int i = 0; i < featsPerSplit; i++)
  193. {
  194. int x1 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  195. int x2 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  196. int y1 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  197. int y2 = (int)((double)rand()/(double)RAND_MAX*(double)windowSize)-windowSize/2;
  198. int f1 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
  199. int f2 = (int)((double)rand()/(double)RAND_MAX*(double)featdim);
  200. int o = (int)((double)rand()/(double)RAND_MAX*(double)ops.size());
  201. Operation *op = ops[o]->clone();
  202. op->set(x1,y1,x2,y2,f1,f2);
  203. featsel.push_back(op);
  204. }
  205. #pragma omp parallel for private(mapit)
  206. for(int f = 0; f < featsPerSplit; f++)
  207. {
  208. double l_bestig = -numeric_limits< double >::max();
  209. double l_splitval = -1.0;
  210. set<vector<int> >::iterator it;
  211. vector<double> vals;
  212. for ( it=selFeats.begin() ; it != selFeats.end(); it++ )
  213. {
  214. vals.push_back(featsel[f]->getVal(feats[(*it)[0]],(*it)[1], (*it)[2]));
  215. }
  216. int counter = 0;
  217. for ( it=selFeats.begin() ; it != selFeats.end(); it++ , counter++)
  218. {
  219. set<vector<int> >::iterator it2;
  220. double val = vals[counter];
  221. map<int,int> eL, eR;
  222. int counterL = 0, counterR = 0;
  223. int counter2 = 0;
  224. for ( it2=selFeats.begin() ; it2 != selFeats.end(); it2++, counter2++ )
  225. {
  226. int cn = labels[(*it2)[0]][(*it2)[1]][(*it2)[2]];
  227. //cout << "vals[counter2] " << vals[counter2] << " val: " << val << endl;
  228. if(vals[counter2] < val)
  229. {
  230. //left entropie:
  231. eL[cn] = eL[cn]+1;
  232. counterL++;
  233. }
  234. else
  235. {
  236. //right entropie:
  237. eR[cn] = eR[cn]+1;
  238. counterR++;
  239. }
  240. }
  241. double leftent = 0.0;
  242. for ( mapit=eL.begin() ; mapit != eL.end(); mapit++ )
  243. {
  244. double p = (double)(*mapit).second/(double)counterL;
  245. leftent -= p*log2(p);
  246. }
  247. double rightent = 0.0;
  248. for ( mapit=eR.begin() ; mapit != eR.end(); mapit++ )
  249. {
  250. double p = (double)(*mapit).second/(double)counterR;
  251. rightent -= p*log2(p);
  252. }
  253. //cout << "rightent: " << rightent << " leftent: " << leftent << endl;
  254. double pl = counterL/(counterL+counterR);
  255. double ig = globent - (1.0-pl) * rightent - pl*leftent;
  256. if(useShannonEntropy)
  257. {
  258. double esplit = - ( pl*log(pl) + (1-pl)*log(1-pl) );
  259. ig = 2*ig / ( globent + esplit );
  260. }
  261. if(ig > l_bestig)
  262. {
  263. l_bestig = ig;
  264. l_splitval = val;
  265. }
  266. }
  267. #pragma omp critical
  268. {
  269. //cout << "globent: " << globent << " bestig " << bestig << " splitfeat: " << splitfeat << " splitval: " << splitval << endl;
  270. //cout << "globent: " << globent << " l_bestig " << l_bestig << " f: " << p << " l_splitval: " << l_splitval << endl;
  271. //cout << "p: " << featsubset[f] << endl;
  272. if(l_bestig > bestig)
  273. {
  274. bestig = l_bestig;
  275. splitop = featsel[f];
  276. splitval = l_splitval;
  277. }
  278. }
  279. }
  280. /*for(int i = 0; i < featsPerSplit; i++)
  281. {
  282. if(featsel[i] != splitop)
  283. delete featsel[i];
  284. }*/
  285. cout << "globent: " << globent << " bestig " << bestig << " splitval: " << splitval << endl;
  286. }
  287. void SemSegContextTree::train ( const MultiDataset *md )
  288. {
  289. const LabeledSet train = * ( *md ) ["train"];
  290. const LabeledSet *trainp = &train;
  291. ProgressBar pb ( "compute feats" );
  292. pb.show();
  293. //TODO: Speichefresser!, lohnt sich sparse?
  294. vector<vector<vector<vector<double> > > > allfeats;
  295. vector<vector<vector<int> > > currentfeats;
  296. vector<vector<vector<int> > > labels;
  297. int imgcounter = 0;
  298. LOOP_ALL_S ( *trainp )
  299. {
  300. EACH_INFO ( classno,info );
  301. NICE::ColorImage img;
  302. std::string currentFile = info.img();
  303. CachedExample *ce = new CachedExample ( currentFile );
  304. const LocalizationResult *locResult = info.localization();
  305. if ( locResult->size() <= 0 )
  306. {
  307. fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
  308. currentFile.c_str() );
  309. continue;
  310. }
  311. fprintf ( stderr, "SemSegCsurka: Collecting pixel examples from localization info: %s\n", currentFile.c_str() );
  312. int xsize, ysize;
  313. ce->getImageSize ( xsize, ysize );
  314. vector<vector<int> > tmp = vector<vector<int> >(xsize, vector<int>(ysize,0));
  315. currentfeats.push_back(tmp);
  316. labels.push_back(tmp);
  317. try {
  318. img = ColorImage(currentFile);
  319. } catch (Exception) {
  320. cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
  321. continue;
  322. }
  323. Globals::setCurrentImgFN ( currentFile );
  324. //TODO: resize image?!
  325. vector<vector<vector<double> > > feats;
  326. #if 0
  327. lfcw->getFeats(img, feats);
  328. #else
  329. feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
  330. for(int x = 0; x < xsize; x++)
  331. {
  332. for(int y = 0; y < ysize; y++)
  333. {
  334. for(int r = 0; r < 3; r++)
  335. {
  336. feats[x][y][r] = img.getPixel(x,y,r);
  337. }
  338. }
  339. }
  340. #endif
  341. allfeats.push_back(feats);
  342. // getting groundtruth
  343. NICE::Image pixelLabels (xsize, ysize);
  344. pixelLabels.set(0);
  345. locResult->calcLabeledImage ( pixelLabels, ( *classNames ).getBackgroundClass() );
  346. for(int x = 0; x < xsize; x++)
  347. {
  348. for(int y = 0; y < ysize; y++)
  349. {
  350. classno = pixelLabels.getPixel(x, y);
  351. labels[imgcounter][x][y] = classno;
  352. labelcounter[classno]++;
  353. //if ( forbidden_classes.find ( classno ) != forbidden_classes.end() )
  354. //continue;
  355. }
  356. }
  357. imgcounter++;
  358. pb.update ( trainp->count());
  359. delete ce;
  360. }
  361. pb.hide();
  362. /*int opsize = (int)ops.size();
  363. int featdim = (int)allfeats[0][0][0].size();
  364. for(int x1 = -windowSize/2; x1 < windowSize/2+1; x1++)
  365. {
  366. for(int y1 = -windowSize/2; y1 < windowSize/2+1; y1++)
  367. {
  368. for(int x2 = -windowSize/2; x2 < windowSize/2+1; x2++)
  369. {
  370. for(int y2 = -windowSize/2; y2 < windowSize/2+1; y2++)
  371. {
  372. for(int f = 0; f < featdim; f++)
  373. {
  374. for(int o = 0; o < opsize; o++)
  375. {
  376. vector<int> tmp(6,0);
  377. tmp[0] = x1;
  378. tmp[1] = y1;
  379. tmp[2] = x2;
  380. tmp[3] = y2;
  381. tmp[4] = f;
  382. tmp[5] = o;
  383. featsel.push_back(tmp);
  384. }
  385. }
  386. }
  387. }
  388. }
  389. }*/
  390. map<int,int>::iterator mapit;
  391. int classes = 0;
  392. for(mapit = labelcounter.begin(); mapit != labelcounter.end(); mapit++)
  393. {
  394. labelmap[mapit->first] = classes;
  395. labelmapback[classes] = mapit->first;
  396. classes++;
  397. }
  398. //balancing
  399. int featcounter = 0;
  400. a = vector<double>(classes,0.0);
  401. for(int iCounter = 0; iCounter < imgcounter; iCounter++)
  402. {
  403. int xsize = (int)currentfeats[iCounter].size();
  404. int ysize = (int)currentfeats[iCounter][0].size();
  405. for(int x = 0; x < xsize; x++)
  406. {
  407. for(int y = 0; y < ysize; y++)
  408. {
  409. featcounter++;
  410. int cn = labels[iCounter][x][y];
  411. a[labelmap[cn]] ++;
  412. }
  413. }
  414. }
  415. for(int i = 0; i < (int)a.size(); i++)
  416. {
  417. a[i] /= (double)featcounter;
  418. cout << "a["<<i<<"]: " << a[i] << endl;
  419. }
  420. tree.push_back(Node());
  421. tree[0].dist = vector<double>(classes,0.0);
  422. int depth = 0;
  423. tree[0].depth = depth;
  424. bool allleaf = false;
  425. while(!allleaf && depth < maxDepth)
  426. {
  427. allleaf = true;
  428. //TODO vielleicht parallel wenn nächste schleife trotzdem noch parallelsiert würde, die hat mehr gewicht
  429. int t = (int) tree.size();
  430. //#pragma omp parallel for
  431. for(int i = 0; i < t; i++)
  432. {
  433. if(!tree[i].isleaf && tree[i].left < 0)
  434. {
  435. Operation *splitfeat = NULL;
  436. double splitval;
  437. getBestSplit(allfeats, currentfeats,labels, i, splitfeat, splitval);
  438. tree[i].feat = splitfeat;
  439. tree[i].decision = splitval;
  440. if(splitfeat != NULL)
  441. {
  442. allleaf = false;
  443. int left = tree.size();
  444. tree.push_back(Node());
  445. tree.push_back(Node());
  446. int right = left+1;
  447. tree[i].left = left;
  448. tree[i].right = right;
  449. tree[left].dist = vector<double>(classes, 0.0);
  450. tree[right].dist = vector<double>(classes, 0.0);
  451. tree[left].depth = depth+1;
  452. tree[right].depth = depth+1;
  453. #pragma omp parallel for
  454. for(int iCounter = 0; iCounter < imgcounter; iCounter++)
  455. {
  456. int xsize = currentfeats[iCounter].size();
  457. int ysize = currentfeats[iCounter][0].size();
  458. for(int x = 0; x < xsize; x++)
  459. {
  460. for(int y = 0; y < ysize; y++)
  461. {
  462. if(currentfeats[iCounter][x][y] == i)
  463. {
  464. double val = splitfeat->getVal(allfeats[iCounter],x,y);
  465. if(val < splitval)
  466. {
  467. currentfeats[iCounter][x][y] = left;
  468. tree[left].dist[labelmap[labels[iCounter][x][y]]]++;
  469. }
  470. else
  471. {
  472. currentfeats[iCounter][x][y] = right;
  473. tree[right].dist[labelmap[labels[iCounter][x][y]]]++;
  474. }
  475. }
  476. }
  477. }
  478. }
  479. double lcounter = 0.0, rcounter = 0.0;
  480. for(uint d = 0; d < tree[left].dist.size(); d++)
  481. {
  482. //tree[left].dist[d]/=a[d];
  483. lcounter +=tree[left].dist[d];
  484. //tree[right].dist[d]/=a[d];
  485. rcounter +=tree[right].dist[d];
  486. }
  487. assert(lcounter > 0 && rcounter > 0);
  488. for(uint d = 0; d < tree[left].dist.size(); d++)
  489. {
  490. tree[left].dist[d]/=lcounter;
  491. tree[right].dist[d]/=rcounter;
  492. }
  493. }
  494. else
  495. {
  496. tree[i].isleaf = true;
  497. }
  498. }
  499. }
  500. //TODO: features neu berechnen!
  501. depth++;
  502. cout << "d: " << depth << endl;
  503. }
  504. int t = (int) tree.size();
  505. for(int i = 0; i < t; i++)
  506. {
  507. printf("tree[%i]: left: %i, right: %i ", i, tree[i].left, tree[i].right);
  508. for(int d = 0; d < (int)tree[i].dist.size(); d++)
  509. {
  510. cout << " " << tree[i].dist[d];
  511. }
  512. cout << endl;
  513. }
  514. }
  515. void SemSegContextTree::semanticseg ( CachedExample *ce, NICE::Image & segresult,GenericImage<double> & probabilities )
  516. {
  517. int xsize;
  518. int ysize;
  519. ce->getImageSize ( xsize, ysize );
  520. int numClasses = classNames->numClasses();
  521. fprintf (stderr, "ContextTree classification !\n");
  522. probabilities.reInit ( xsize, ysize, numClasses, true );
  523. probabilities.setAll ( 0 );
  524. NICE::ColorImage img;
  525. std::string currentFile = Globals::getCurrentImgFN();
  526. try {
  527. img = ColorImage(currentFile);
  528. } catch (Exception) {
  529. cerr << "SemSeg: error opening image file <" << currentFile << ">" << endl;
  530. return;
  531. }
  532. //TODO: resize image?!
  533. vector<vector<vector<double> > > feats;
  534. #if 0
  535. lfcw->getFeats(img, feats);
  536. #else
  537. feats = vector<vector<vector<double> > >(xsize,vector<vector<double> >(ysize,vector<double>(3,0.0)));
  538. for(int x = 0; x < xsize; x++)
  539. {
  540. for(int y = 0; y < ysize; y++)
  541. {
  542. for(int r = 0; r < 3; r++)
  543. {
  544. feats[x][y][r] = img.getPixel(x,y,r);
  545. }
  546. }
  547. }
  548. #endif
  549. bool allleaf = false;
  550. vector<vector<int> > currentfeats = vector<vector<int> >(xsize, vector<int>(ysize,0));
  551. int depth = 0;
  552. while(!allleaf)
  553. {
  554. allleaf = true;
  555. //TODO vielleicht parallel wenn nächste schleife auch noch parallelsiert würde, die hat mehr gewicht
  556. //#pragma omp parallel for
  557. int t = (int) tree.size();
  558. for(int i = 0; i < t; i++)
  559. {
  560. for(int x = 0; x < xsize; x++)
  561. {
  562. for(int y = 0; y < ysize; y++)
  563. {
  564. int t = currentfeats[x][y];
  565. if(tree[t].left > 0)
  566. {
  567. allleaf = false;
  568. double val = tree[t].feat->getVal(feats,x,y);
  569. if(val < tree[t].decision)
  570. {
  571. currentfeats[x][y] = tree[t].left;
  572. }
  573. else
  574. {
  575. currentfeats[x][y] = tree[t].right;
  576. }
  577. }
  578. }
  579. }
  580. }
  581. //TODO: features neu berechnen! analog zum training
  582. depth++;
  583. }
  584. //finales labeln:
  585. long int offset = 0;
  586. for(int x = 0; x < xsize; x++)
  587. {
  588. for(int y = 0; y < ysize; y++,offset++)
  589. {
  590. int t = currentfeats[x][y];
  591. double maxvalue = - numeric_limits<double>::max(); //TODO: das muss nur pro knoten gemacht werden, nicht pro pixel
  592. int maxindex = 0;
  593. for(uint i = 0; i < tree[i].dist.size(); i++)
  594. {
  595. probabilities.data[labelmapback[i]][offset] = tree[t].dist[i];
  596. if(tree[t].dist[i] > maxvalue)
  597. {
  598. maxvalue = tree[t].dist[i];
  599. maxindex = labelmapback[i];
  600. }
  601. segresult.setPixel(x,y,maxindex);
  602. }
  603. }
  604. }
  605. }