SemSegNovelty.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. #include <sstream>
  2. #include <iostream>
  3. #include "SemSegNovelty.h"
  4. #include "core/image/FilterT.h"
  5. #include "gp-hik-exp/GPHIKClassifierNICE.h"
  6. #include "vislearning/baselib/ICETools.h"
  7. #include "vislearning/baselib/Globals.h"
  8. #include "vislearning/features/fpfeatures/SparseVectorFeature.h"
  9. #include "core/basics/StringTools.h"
  10. #include "core/basics/Timer.h"
  11. #include "segmentation/GenericRegionSegmentationMethodSelection.h"
  12. using namespace std;
  13. using namespace NICE;
  14. using namespace OBJREC;
  15. SemSegNovelty::SemSegNovelty ( const Config *conf,
  16. const MultiDataset *md )
  17. : SemanticSegmentation ( conf, & ( md->getClassNames ( "train" ) ) )
  18. {
  19. this->conf = conf;
  20. globalMaxUncert = -numeric_limits<double>::max();
  21. string section = "SemSegNovelty";
  22. featExtract = new LFColorWeijer ( conf );
  23. save_cache = conf->gB ( "FPCPixel", "save_cache", true );
  24. read_cache = conf->gB ( "FPCPixel", "read_cache", false );
  25. uncertdir = conf->gS("debug", "uncertainty", "uncertainty");
  26. cache = conf->gS ( "cache", "root", "" );
  27. classifier = new GPHIKClassifierNICE ( conf, "ClassiferGPHIK" );;
  28. findMaximumUncert = conf->gB(section, "findMaximumUncert", true);
  29. whs = conf->gI ( section, "window_size", 10 );
  30. featdist = conf->gI ( section, "grid", 10 );
  31. testWSize = conf->gI (section, "test_window_size", 10);
  32. string rsMethode = conf->gS ( section, "segmentation", "none" );
  33. if(rsMethode == "none")
  34. {
  35. regionSeg = NULL;
  36. }
  37. else
  38. {
  39. RegionSegmentationMethod *tmpRegionSeg = GenericRegionSegmentationMethodSelection::selectRegionSegmentationMethod(conf, rsMethode);
  40. if ( save_cache )
  41. regionSeg = new RSCache ( conf, tmpRegionSeg );
  42. else
  43. regionSeg = tmpRegionSeg;
  44. }
  45. cn = md->getClassNames ( "train" );
  46. if ( read_cache )
  47. {
  48. string classifierdst = "/classifier.data";
  49. fprintf ( stderr, "SemSegNovelty:: Reading classifier data from %s\n", ( cache + classifierdst ).c_str() );
  50. try
  51. {
  52. if ( classifier != NULL )
  53. {
  54. classifier->read ( cache + classifierdst );
  55. }
  56. fprintf ( stderr, "SemSegNovelty:: successfully read\n" );
  57. }
  58. catch ( char *str )
  59. {
  60. cerr << "error reading data: " << str << endl;
  61. }
  62. }
  63. else
  64. {
  65. train ( md );
  66. }
  67. }
  68. SemSegNovelty::~SemSegNovelty()
  69. {
  70. if(newTrainExamples.size() > 0)
  71. {
  72. // most uncertain region
  73. showImage(maskedImg);
  74. //classifier->add(newTrainExamples)
  75. classifier->save ( cache + "/classifier.data" );
  76. }
  77. // clean-up
  78. if ( classifier != NULL )
  79. delete classifier;
  80. if ( featExtract != NULL )
  81. delete featExtract;
  82. }
  83. void SemSegNovelty::visualizeRegion(const NICE::ColorImage &img, const NICE::Matrix &regions, int region, NICE::ColorImage &outimage)
  84. {
  85. vector<uchar> color;
  86. color.push_back(255);
  87. color.push_back(0);
  88. color.push_back(0);
  89. int width = img.width();
  90. int height = img.height();
  91. outimage.resize(width,height);
  92. for(int y = 0; y < height; y++)
  93. {
  94. for(int x = 0; x < width; x++)
  95. {
  96. if(regions(x,y) == region)
  97. {
  98. for(int c = 0; c < 3; c++)
  99. {
  100. outimage(x,y,c) = color[c];
  101. }
  102. }
  103. else
  104. {
  105. for(int c = 0; c < 3; c++)
  106. {
  107. outimage(x,y,c) = img(x,y,c);
  108. }
  109. }
  110. }
  111. }
  112. }
  113. void SemSegNovelty::train ( const MultiDataset *md )
  114. {
  115. const LabeledSet train = * ( *md ) ["train"];
  116. const LabeledSet *trainp = &train;
  117. ////////////////////////
  118. // feature extraction //
  119. ////////////////////////
  120. std::string forbidden_classes_s = conf->gS ( "analysis", "donttrain", "" );
  121. if ( forbidden_classes_s == "" )
  122. {
  123. forbidden_classes_s = conf->gS ( "analysis", "forbidden_classes", "" );
  124. }
  125. cn.getSelection ( forbidden_classes_s, forbidden_classes );
  126. //check the same thing for the training classes - this is very specific to our setup
  127. std::string forbidden_classesTrain_s = conf->gS ( "analysis", "donttrainTrain", "" );
  128. if ( forbidden_classesTrain_s == "" )
  129. {
  130. forbidden_classesTrain_s = conf->gS ( "analysis", "forbidden_classesTrain", "" );
  131. }
  132. cn.getSelection ( forbidden_classesTrain_s, forbidden_classesTrain );
  133. ProgressBar pb ( "Local Feature Extraction" );
  134. pb.show();
  135. int imgnb = 0;
  136. Examples examples;
  137. examples.filename = "training";
  138. int featdim = -1;
  139. classesInUse.clear();
  140. LOOP_ALL_S ( *trainp )
  141. {
  142. //EACH_S(classno, currentFile);
  143. EACH_INFO ( classno, info );
  144. std::string currentFile = info.img();
  145. CachedExample *ce = new CachedExample ( currentFile );
  146. const LocalizationResult *locResult = info.localization();
  147. if ( locResult->size() <= 0 )
  148. {
  149. fprintf ( stderr, "WARNING: NO ground truth polygons found for %s !\n",
  150. currentFile.c_str() );
  151. continue;
  152. }
  153. int xsize, ysize;
  154. ce->getImageSize ( xsize, ysize );
  155. Image labels ( xsize, ysize );
  156. labels.set ( 0 );
  157. locResult->calcLabeledImage ( labels, ( *classNames ).getBackgroundClass() );
  158. NICE::ColorImage img;
  159. try {
  160. img = ColorImage ( currentFile );
  161. } catch ( Exception ) {
  162. cerr << "SemSegNovelty: error opening image file <" << currentFile << ">" << endl;
  163. continue;
  164. }
  165. Globals::setCurrentImgFN ( currentFile );
  166. MultiChannelImageT<double> feats;
  167. // extract features
  168. featExtract->getFeats ( img, feats );
  169. featdim = feats.channels();
  170. feats.addChannel(featdim);
  171. for (int c = 0; c < featdim; c++)
  172. {
  173. ImageT<double> tmp = feats[c];
  174. ImageT<double> tmp2 = feats[c+featdim];
  175. NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
  176. }
  177. featdim += featdim;
  178. // compute integral images
  179. for ( int c = 0; c < featdim; c++ )
  180. {
  181. feats.calcIntegral ( c );
  182. }
  183. for ( int y = 0; y < ysize; y += featdist )
  184. {
  185. for ( int x = 0; x < xsize; x += featdist )
  186. {
  187. int classnoTmp = labels.getPixel ( x, y );
  188. if ( forbidden_classesTrain.find ( classnoTmp ) != forbidden_classesTrain.end() )
  189. {
  190. continue;
  191. }
  192. if (classesInUse.find(classnoTmp) == classesInUse.end())
  193. {
  194. classesInUse.insert(classnoTmp);
  195. }
  196. Example example;
  197. example.vec = NULL;
  198. example.svec = new SparseVector ( featdim );
  199. for ( int f = 0; f < featdim; f++ )
  200. {
  201. double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
  202. if ( val > 1e-10 )
  203. ( *example.svec ) [f] = val;
  204. }
  205. example.svec->normalize();
  206. example.position = imgnb;
  207. examples.push_back ( pair<int, Example> ( classnoTmp, example ) );
  208. }
  209. }
  210. delete ce;
  211. imgnb++;
  212. pb.update ( trainp->count() );
  213. }
  214. numberOfClasses = classesInUse.size();
  215. std::cerr << "numberOfClasses: " << numberOfClasses << std::endl;
  216. std::cerr << "classes in use: " << std::endl;
  217. for (std::set<int>::const_iterator it = classesInUse.begin(); it != classesInUse.end(); it++)
  218. {
  219. std::cerr << *it << " ";
  220. }
  221. std::cerr << std::endl;
  222. pb.hide();
  223. //////////////////////
  224. // train classifier //
  225. //////////////////////
  226. FeaturePool fp;
  227. Feature *f = new SparseVectorFeature ( featdim );
  228. f->explode ( fp );
  229. delete f;
  230. if ( classifier != NULL )
  231. classifier->train ( fp, examples );
  232. else
  233. {
  234. cerr << "no classifier selected?!" << endl;
  235. exit ( -1 );
  236. }
  237. fp.destroy();
  238. if ( save_cache )
  239. {
  240. if ( classifier != NULL )
  241. classifier->save ( cache + "/classifier.data" );
  242. }
  243. ////////////
  244. //clean up//
  245. ////////////
  246. for ( int i = 0; i < ( int ) examples.size(); i++ )
  247. {
  248. examples[i].second.clean();
  249. }
  250. examples.clear();
  251. cerr << "SemSeg training finished" << endl;
  252. }
  253. void SemSegNovelty::semanticseg ( CachedExample *ce, NICE::Image & segresult, NICE::MultiChannelImageT<double> & probabilities )
  254. {
  255. Timer timer;
  256. timer.start();
  257. Image labels = segresult;
  258. segresult.set(0);
  259. int featdim = -1;
  260. std::string currentFile = Globals::getCurrentImgFN();
  261. int xsize, ysize;
  262. ce->getImageSize ( xsize, ysize );
  263. probabilities.reInit( xsize, ysize, cn.getMaxClassno() + 1);
  264. probabilities.setAll ( 0.0 );
  265. NICE::ColorImage img;
  266. try {
  267. img = ColorImage ( currentFile );
  268. } catch ( Exception ) {
  269. cerr << "SemSegNovelty: error opening image file <" << currentFile << ">" << endl;
  270. return;
  271. }
  272. MultiChannelImageT<double> feats;
  273. // extract features
  274. featExtract->getFeats ( img, feats );
  275. featdim = feats.channels();
  276. feats.addChannel(featdim);
  277. for (int c = 0; c < featdim; c++)
  278. {
  279. ImageT<double> tmp = feats[c];
  280. ImageT<double> tmp2 = feats[c+featdim];
  281. NICE::FilterT<double, double, double>::gradientStrength (tmp, tmp2);
  282. }
  283. featdim += featdim;
  284. // compute integral images
  285. for ( int c = 0; c < featdim; c++ )
  286. {
  287. feats.calcIntegral ( c );
  288. }
  289. FloatImage uncert ( xsize, ysize );
  290. uncert.set ( 0.0 );
  291. FloatImage gpUncertainty ( xsize, ysize );
  292. FloatImage gpMean ( xsize, ysize );
  293. FloatImage gpMeanRatio ( xsize, ysize );
  294. FloatImage gpWeightAll ( xsize, ysize );
  295. FloatImage gpWeightRatio ( xsize, ysize );
  296. gpUncertainty.set ( 0.0 );
  297. gpMean.set ( 0.0 );
  298. gpMeanRatio.set ( 0.0 );
  299. gpWeightAll.set ( 0.0 );
  300. gpWeightRatio.set ( 0.0 );
  301. double maxunc = -numeric_limits<double>::max();
  302. double maxGPUncertainty = -numeric_limits<double>::max();
  303. double maxGPMean = -numeric_limits<double>::max();
  304. double maxGPMeanRatio = -numeric_limits<double>::max();
  305. double maxGPWeightAll = -numeric_limits<double>::max();
  306. double maxGPWeightRatio = -numeric_limits<double>::max();
  307. timer.stop();
  308. cout << "first: " << timer.getLastAbsolute() << endl;
  309. //we need this lateron for active learning stuff
  310. double gpNoise = conf->gD("GPHIK", "noise", 0.01);
  311. timer.start();
  312. #pragma omp parallel for
  313. for ( int y = 0; y < ysize; y += testWSize )
  314. {
  315. Example example;
  316. example.vec = NULL;
  317. example.svec = new SparseVector ( featdim );
  318. for ( int x = 0; x < xsize; x += testWSize)
  319. {
  320. for ( int f = 0; f < featdim; f++ )
  321. {
  322. double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
  323. if ( val > 1e-10 )
  324. ( *example.svec ) [f] = val;
  325. }
  326. example.svec->normalize();
  327. ClassificationResult cr = classifier->classify ( example );
  328. //we need this if we want to compute GP-AL-measure lateron
  329. double minMeanAbs ( numeric_limits<double>::max() );
  330. double maxMeanAbs ( 0.0 );
  331. double sndMaxMeanAbs ( 0.0 );
  332. double maxMean ( -numeric_limits<double>::max() );
  333. double sndMaxMean ( -numeric_limits<double>::max() );
  334. for ( int j = 0 ; j < cr.scores.size(); j++ )
  335. {
  336. if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
  337. {
  338. continue;
  339. }
  340. //check whether we found a class with higher smaller abs mean than the current minimum
  341. if (abs(cr.scores[j]) < minMeanAbs)
  342. minMeanAbs = abs(cr.scores[j]);
  343. //check for larger abs mean as well
  344. if (abs(cr.scores[j]) > maxMeanAbs)
  345. {
  346. sndMaxMeanAbs = maxMeanAbs;
  347. maxMeanAbs = abs(cr.scores[j]);
  348. }
  349. // and also for the second highest mean of all classes
  350. else if (abs(cr.scores[j]) > sndMaxMeanAbs)
  351. {
  352. sndMaxMeanAbs = abs(cr.scores[j]);
  353. }
  354. //check for larger mean without abs as well
  355. if (cr.scores[j] > maxMean)
  356. {
  357. sndMaxMean = maxMean;
  358. maxMean = cr.scores[j];
  359. }
  360. // and also for the second highest mean of all classes
  361. else if (cr.scores[j] > sndMaxMean)
  362. {
  363. sndMaxMean = cr.scores[j];
  364. }
  365. }
  366. double firstTerm (1.0 / sqrt(cr.uncertainty+gpNoise));
  367. //compute the heuristic GP-UNCERTAINTY, as proposed by Kapoor et al. in IJCV 2010
  368. // GP-UNCERTAINTY : |mean| / sqrt(var^2 + gpnoise^2)
  369. double gpUncertaintyVal = maxMeanAbs*firstTerm; //firstTerm = 1.0 / sqrt(r.uncertainty+gpNoise))
  370. // compute results when we take the lowest mean value of all classes
  371. double gpMeanVal = minMeanAbs;
  372. //look at the difference in the absolut mean values for the most plausible class
  373. // and the second most plausible class
  374. double gpMeanRatioVal= maxMean - sndMaxMean;
  375. double gpWeightAllVal ( 0.0 );
  376. double gpWeightRatioVal ( 0.0 );
  377. if ( numberOfClasses > 2)
  378. {
  379. //compute the weight in the alpha-vector for every sample after assuming it to be
  380. // added to the training set.
  381. // Thereby, we measure its "importance" for the current model
  382. //
  383. //double firstTerm is already computed
  384. //
  385. //the second term is only needed when computing impacts
  386. //double secondTerm; //this is the nasty guy :/
  387. //--- compute the third term
  388. // this is the difference between predicted label and GT label
  389. std::vector<double> diffToPositive; diffToPositive.clear();
  390. std::vector<double> diffToNegative; diffToNegative.clear();
  391. double diffToNegativeSum(0.0);
  392. for ( int j = 0 ; j < cr.scores.size(); j++ )
  393. {
  394. if ( forbidden_classesTrain.find ( j ) != forbidden_classesTrain.end() )
  395. {
  396. continue;
  397. }
  398. // look at the difference to plus 1
  399. diffToPositive.push_back(abs(cr.scores[j] - 1));
  400. // look at the difference to -1
  401. diffToNegative.push_back(abs(cr.scores[j] + 1));
  402. //sum up the difference to -1
  403. diffToNegativeSum += abs(cr.scores[j] - 1);
  404. }
  405. //let's subtract for every class its diffToNegative from the sum, add its diffToPositive,
  406. //and use this as the third term for this specific class.
  407. //the final value is obtained by minimizing over all classes
  408. //
  409. // originally, we minimize over all classes after building the final score
  410. // however, the first and the second term do not depend on the choice of
  411. // y*, therefore we minimize here already
  412. double thirdTerm (numeric_limits<double>::max()) ;
  413. for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
  414. {
  415. double tmpVal ( diffToPositive[tmpCnt] + (diffToNegativeSum-diffToNegative[tmpCnt]) );
  416. if (tmpVal < thirdTerm)
  417. thirdTerm = tmpVal;
  418. }
  419. gpWeightAllVal = thirdTerm*firstTerm;
  420. //now look on the ratio of the resulting weights for the most plausible
  421. // against the second most plausible class
  422. double thirdTermMostPlausible ( 0.0 ) ;
  423. double thirdTermSecondMostPlausible ( 0.0 ) ;
  424. for(uint tmpCnt = 0; tmpCnt < diffToPositive.size(); tmpCnt++)
  425. {
  426. if (diffToPositive[tmpCnt] > thirdTermMostPlausible)
  427. {
  428. thirdTermSecondMostPlausible = thirdTermMostPlausible;
  429. thirdTermMostPlausible = diffToPositive[tmpCnt];
  430. }
  431. else if (diffToPositive[tmpCnt] > thirdTermSecondMostPlausible)
  432. {
  433. thirdTermSecondMostPlausible = diffToPositive[tmpCnt];
  434. }
  435. }
  436. //compute the resulting score
  437. gpWeightRatioVal = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm;
  438. //finally, look for this feature how it would affect to whole model (summarized by weight-vector alpha), if we would
  439. //use it as an additional training example
  440. //TODO this would be REALLY computational demanding. Do we really want to do this?
  441. // gpImpactAll[s] ( pce[i].second.x, pce[i].second.y ) = thirdTerm*firstTerm*secondTerm;
  442. // gpImpactRatio[s] ( pce[i].second.x, pce[i].second.y ) = (thirdTermMostPlausible - thirdTermSecondMostPlausible)*firstTerm*secondTerm;
  443. }
  444. else //binary scenario
  445. {
  446. gpWeightAllVal = std::min( abs(cr.scores[*classesInUse.begin()]+1), abs(cr.scores[*classesInUse.begin()]-1) );
  447. gpWeightAllVal *= firstTerm;
  448. gpWeightRatioVal = gpWeightAllVal;
  449. }
  450. int xs = std::max(0, x - testWSize/2);
  451. int xe = std::min(xsize - 1, x + testWSize/2);
  452. int ys = std::max(0, y - testWSize/2);
  453. int ye = std::min(ysize - 1, y + testWSize/2);
  454. for (int yl = ys; yl <= ye; yl++)
  455. {
  456. for (int xl = xs; xl <= xe; xl++)
  457. {
  458. for ( int j = 0 ; j < cr.scores.size(); j++ )
  459. {
  460. probabilities ( xl, yl, j ) = cr.scores[j];
  461. }
  462. segresult ( xl, yl ) = cr.classno;
  463. uncert ( xl, yl ) = cr.uncertainty;
  464. gpUncertainty ( xl, yl ) = gpUncertaintyVal;
  465. gpMean ( xl, yl ) = gpMeanVal;
  466. gpMeanRatio ( xl, yl ) = gpMeanRatioVal;
  467. gpWeightAll ( xl, yl ) = gpWeightAllVal;
  468. gpWeightRatio ( xl, yl ) = gpWeightRatioVal;
  469. }
  470. }
  471. if (maxunc < cr.uncertainty)
  472. maxunc = cr.uncertainty;
  473. if (maxGPUncertainty < gpUncertaintyVal)
  474. maxGPUncertainty = gpUncertaintyVal;
  475. if (maxGPMean < gpMeanVal)
  476. maxGPMean = gpMeanVal;
  477. if (maxGPMeanRatio < gpMeanRatioVal)
  478. maxGPMeanRatio = gpMeanRatioVal;
  479. if (maxGPWeightAll < gpMeanRatioVal)
  480. maxGPWeightAll = gpWeightAllVal;
  481. if (maxGPWeightRatio < gpWeightRatioVal)
  482. maxGPWeightRatio = gpWeightRatioVal;
  483. example.svec->clear();
  484. }
  485. delete example.svec;
  486. example.svec = NULL;
  487. }
  488. // std::cerr << "uncertainty: " << gpUncertaintyVal << " minMean: " << gpMeanVal << " gpMeanRatio: " << gpMeanRatioVal << " weightAll: " << gpWeightAllVal << " weightRatio: "<< gpWeightRatioVal << std::endl;
  489. //Regionen ermitteln
  490. if(regionSeg != NULL)
  491. {
  492. NICE::Matrix mask;
  493. int amountRegions = regionSeg->segRegions ( img, mask );
  494. //compute probs per region
  495. vector<vector<double> > regionProb(amountRegions,vector<double>(probabilities.channels(),0.0));
  496. vector<double> regionNoveltyMeasure (amountRegions, 0.0);
  497. vector<int> regionCounter(amountRegions, 0);
  498. for ( int y = 0; y < ysize; y++)
  499. {
  500. for (int x = 0; x < xsize; x++)
  501. {
  502. int r = mask(x,y);
  503. regionCounter[r]++;
  504. for(int j = 0; j < probabilities.channels(); j++)
  505. {
  506. regionProb[r][j] += probabilities ( x, y, j );
  507. }
  508. regionNoveltyMeasure[r] += uncert(x,y);
  509. }
  510. }
  511. //find best class per region
  512. vector<int> bestClassPerRegion(amountRegions,0);
  513. double maxuncert = -numeric_limits<double>::max();
  514. int maxUncertRegion = -1;
  515. for(int r = 0; r < amountRegions; r++)
  516. {
  517. double maxval = -numeric_limits<double>::max();
  518. for(int c = 0; c < probabilities.channels(); c++)
  519. {
  520. regionProb[r][c] /= regionCounter[r];
  521. if(maxval < regionProb[r][c] && regionProb[r][c] != 0.0)
  522. {
  523. maxval = regionProb[r][c];
  524. bestClassPerRegion[r] = c;
  525. }
  526. }
  527. regionNoveltyMeasure[r] /= regionCounter[r];
  528. if(maxuncert < regionNoveltyMeasure[r])
  529. {
  530. maxuncert = regionNoveltyMeasure[r];
  531. maxUncertRegion = r;
  532. }
  533. }
  534. if(findMaximumUncert)
  535. {
  536. if(maxuncert > globalMaxUncert)
  537. {
  538. //save new important features
  539. Examples examples;
  540. for ( int y = 0; y < ysize; y += testWSize )
  541. {
  542. for ( int x = 0; x < xsize; x += testWSize)
  543. {
  544. if(mask(x,y) == maxUncertRegion)
  545. {
  546. Example example;
  547. example.vec = NULL;
  548. example.svec = new SparseVector ( featdim );
  549. int classnoTmp = labels(x,y);
  550. for ( int f = 0; f < featdim; f++ )
  551. {
  552. double val = feats.getIntegralValue ( x - whs, y - whs, x + whs, y + whs, f );
  553. if ( val > 1e-10 )
  554. ( *example.svec ) [f] = val;
  555. }
  556. example.svec->normalize();
  557. examples.push_back ( pair<int, Example> ( classnoTmp, example ) );
  558. }
  559. }
  560. }
  561. if(examples.size() > 0)
  562. {
  563. newTrainExamples.clear();
  564. newTrainExamples = examples;
  565. globalMaxUncert = maxuncert;
  566. visualizeRegion(img,mask,maxUncertRegion,maskedImg);
  567. }
  568. }
  569. }
  570. //write back best results per region
  571. for ( int y = 0; y < ysize; y++)
  572. {
  573. for (int x = 0; x < xsize; x++)
  574. {
  575. int r = mask(x,y);
  576. for(int j = 0; j < probabilities.channels(); j++)
  577. {
  578. probabilities ( x, y, j ) = regionProb[r][j];
  579. }
  580. segresult(x,y) = bestClassPerRegion[r];
  581. }
  582. }
  583. }
  584. timer.stop();
  585. cout << "second: " << timer.getLastAbsolute() << endl;
  586. timer.start();
  587. ColorImage imgrgb ( xsize, ysize );
  588. std::stringstream out;
  589. std::vector< std::string > list2;
  590. StringTools::split ( Globals::getCurrentImgFN (), '/', list2 );
  591. out << uncertdir << "/" << list2.back();
  592. uncert.writeRaw(out.str() + ".rawfloat");
  593. gpUncertainty.writeRaw(out.str() + "_gpUncertainty.rawfloat");
  594. gpMean.writeRaw(out.str() + "_gpMean.rawfloat");
  595. gpMeanRatio.writeRaw(out.str() + "_gpMeanRatio.rawfloat");
  596. gpWeightAll.writeRaw(out.str() + "_gpWeightAll.rawfloat");
  597. gpWeightRatio.writeRaw(out.str() + "_gpWeightRatio.rawfloat");
  598. //not needed anymore, everything will be done in our nice script :)
  599. //
  600. // uncert(0, 0) = 0.0;
  601. // uncert(0, 1) = 1.0+gpNoise;
  602. // ICETools::convertToRGB ( uncert, imgrgb );
  603. // imgrgb.write ( out.str() + "rough.png" );
  604. //
  605. // //invert images such that large numbers correspond to high impact, high variance, high importance, high novelty, ...
  606. // for ( int y = 0; y < ysize; y++)
  607. // {
  608. // for (int x = 0; x < xsize; x++)
  609. // {
  610. // gpUncertainty(x,y) = maxGPUncertainty - gpUncertainty(x,y);
  611. // gpMean(x,y) = maxGPMean - gpMean(x,y);
  612. // gpMeanRatio(x,y) = maxGPMeanRatio - gpMeanRatio(x,y);
  613. // gpWeightRatio(x,y) = maxGPWeightRatio - gpWeightRatio(x,y);
  614. // }
  615. // }
  616. // //actually, this is also done in the post-processing file
  617. //
  618. //
  619. // //
  620. // gpUncertainty(0, 0) = 0.0;
  621. // gpUncertainty(0, 1) = maxGPUncertainty;
  622. // ICETools::convertToRGB ( gpUncertainty, imgrgb );
  623. // imgrgb.write ( out.str() + "gpUncertainty.png" );
  624. // //
  625. // gpMean(0, 0) = 0.0;
  626. // gpMean(0, 1) = maxGPMean;
  627. // ICETools::convertToRGB ( gpMean, imgrgb );
  628. // imgrgb.write ( out.str() + "gpMean.png" );
  629. // //
  630. // gpMeanRatio(0, 0) = 0.0;
  631. // gpMeanRatio(0, 1) = maxGPMeanRatio;
  632. // ICETools::convertToRGB ( gpMeanRatio, imgrgb );
  633. // imgrgb.write ( out.str() + "gpMeanRatio.png" );
  634. // //
  635. // gpWeightAll(0, 0) = 0.0;
  636. // gpWeightAll(0, 1) = maxGPWeightAll;
  637. // ICETools::convertToRGB ( gpWeightAll, imgrgb );
  638. // imgrgb.write ( out.str() + "gpWeightAll.png" );
  639. // //
  640. // gpWeightRatio(0, 0) = 0.0;
  641. // gpWeightRatio(0, 1) = maxGPWeightRatio;
  642. // ICETools::convertToRGB ( gpWeightRatio, imgrgb );
  643. // imgrgb.write ( out.str() + "gpWeightRatio.png" );
  644. //
  645. timer.stop();
  646. cout << "last: " << timer.getLastAbsolute() << endl;
  647. }