FMKGPHyperparameterOptimization.cpp 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848
  1. /**
  2. * @file FMKGPHyperparameterOptimization.cpp
  3. * @brief Heart of the framework to set up everything, perform optimization, incremental updates, classification, variance prediction (Implementation)
  4. * @author Erik Rodner, Alexander Freytag
  5. * @date 01/02/2012
  6. */
  7. #include <iostream>
  8. #include <map>
  9. #include <core/algebra/ILSConjugateGradients.h>
  10. #include <core/algebra/ILSConjugateGradientsLanczos.h>
  11. #include <core/algebra/ILSSymmLqLanczos.h>
  12. #include <core/algebra/ILSMinResLanczos.h>
  13. #include <core/algebra/ILSPlainGradient.h>
  14. #include <core/algebra/EigValuesTRLAN.h>
  15. #include <core/algebra/CholeskyRobust.h>
  16. #include <core/vector/Algorithms.h>
  17. #include <core/vector/Eigen.h>
  18. #include <core/basics/Timer.h>
  19. #include <core/basics/ResourceStatistics.h>
  20. #include "core/optimization/blackbox/DownhillSimplexOptimizer.h"
  21. #include "FMKGPHyperparameterOptimization.h"
  22. #include "FastMinKernel.h"
  23. #include "GMHIKernel.h"
  24. #include "IKMNoise.h"
  25. using namespace NICE;
  26. using namespace std;
  27. FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization()
  28. {
  29. pf = NULL;
  30. eig = NULL;
  31. linsolver = NULL;
  32. fmk = NULL;
  33. q = NULL;
  34. precomputedTForVarEst = NULL;
  35. verbose = false;
  36. verboseTime = false;
  37. debug = false;
  38. //stupid unneeded default values
  39. binaryLabelPositive = -1;
  40. binaryLabelNegative = -2;
  41. }
  42. FMKGPHyperparameterOptimization::FMKGPHyperparameterOptimization ( const Config *_conf, ParameterizedFunction *_pf, FastMinKernel *_fmk, const string & _confSection )
  43. {
  44. //default settings, may become overwritten lateron
  45. pf = NULL;
  46. eig = NULL;
  47. linsolver = NULL;
  48. fmk = NULL;
  49. q = NULL;
  50. precomputedTForVarEst = NULL;
  51. //stupid unneeded default values
  52. binaryLabelPositive = -1;
  53. binaryLabelNegative = -2;
  54. if ( _fmk == NULL )
  55. this->initialize ( _conf, _pf ); //then the confSection is also the default value
  56. //TODO not needed anymore, only for backword compatibility
  57. // else if ( _confSection.compare ( "HIKGP" ) == 0 )
  58. // this->initialize ( _conf, _pf, _fmk );
  59. else
  60. this->initialize ( _conf, _pf, _fmk, _confSection );
  61. }
  62. FMKGPHyperparameterOptimization::~FMKGPHyperparameterOptimization()
  63. {
  64. //pf will delete from outer program
  65. if ( this->eig != NULL )
  66. delete this->eig;
  67. if ( this->linsolver != NULL )
  68. delete this->linsolver;
  69. if ( this->fmk != NULL )
  70. delete this->fmk;
  71. if ( this->q != NULL )
  72. delete this->q;
  73. for ( uint i = 0 ; i < precomputedT.size(); i++ )
  74. delete [] ( precomputedT[i] );
  75. if ( precomputedTForVarEst != NULL )
  76. delete precomputedTForVarEst;
  77. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  78. delete it->second;
  79. }
  80. void FMKGPHyperparameterOptimization::initialize ( const Config *_conf, ParameterizedFunction *_pf, FastMinKernel *_fmk, const std::string & _confSection )
  81. {
  82. if ( this->fmk != NULL )
  83. delete this->fmk;
  84. if ( _fmk != NULL )
  85. this->fmk = _fmk;
  86. this->pf = _pf;
  87. std::cerr << "------------" << std::endl;
  88. std::cerr << "| set-up |" << std::endl;
  89. std::cerr << "------------" << std::endl;
  90. this->eig = new EVArnoldi ( _conf->gB ( _confSection, "eig_verbose", false ) /* verbose flag */, 10 );
  91. // this->eig = new EigValuesTRLAN();
  92. // My time measurements show that both methods use equal time, a comparision
  93. // of their numerical performance has not been done yet
  94. this->parameterUpperBound = _conf->gD ( _confSection, "parameter_upper_bound", 2.5 );
  95. this->parameterLowerBound = _conf->gD ( _confSection, "parameter_lower_bound", 1.0 );
  96. this->parameterStepSize = _conf->gD ( _confSection, "parameter_step_size", 0.1 );
  97. this->verifyApproximation = _conf->gB ( _confSection, "verify_approximation", false );
  98. this->nrOfEigenvaluesToConsider = _conf->gI ( _confSection, "nrOfEigenvaluesToConsider", 1 );
  99. this->nrOfEigenvaluesToConsiderForVarApprox = _conf->gI ( _confSection, "nrOfEigenvaluesToConsiderForVarApprox", 2 );
  100. this->verbose = _conf->gB ( _confSection, "verbose", false );
  101. this->verboseTime = _conf->gB ( _confSection, "verboseTime", false );
  102. this->debug = _conf->gB ( _confSection, "debug", false );
  103. bool useQuantization = _conf->gB ( _confSection, "use_quantization", false );
  104. std::cerr << "_confSection: " << _confSection << std::endl;
  105. std::cerr << "use_quantization: " << useQuantization << std::endl;
  106. if ( _conf->gB ( _confSection, "use_quantization", false ) ) {
  107. int numBins = _conf->gI ( _confSection, "num_bins", 100 );
  108. if ( verbose )
  109. cerr << "FMKGPHyperparameterOptimization: quantization initialized with " << numBins << " bins." << endl;
  110. this->q = new Quantization ( numBins );
  111. } else {
  112. this->q = NULL;
  113. }
  114. bool ils_verbose = _conf->gB ( _confSection, "ils_verbose", false );
  115. ils_max_iterations = _conf->gI ( _confSection, "ils_max_iterations", 1000 );
  116. if ( verbose )
  117. cerr << "FMKGPHyperparameterOptimization: maximum number of iterations is " << ils_max_iterations << endl;
  118. double ils_min_delta = _conf->gD ( _confSection, "ils_min_delta", 1e-7 );
  119. double ils_min_residual = _conf->gD ( _confSection, "ils_min_residual", 1e-7/*1e-2 */ );
  120. string ils_method = _conf->gS ( _confSection, "ils_method", "CG" );
  121. if ( ils_method.compare ( "CG" ) == 0 )
  122. {
  123. if ( verbose )
  124. std::cerr << "We use CG with " << ils_max_iterations << " iterations, " << ils_min_delta << " as min delta, and " << ils_min_residual << " as min res " << std::endl;
  125. this->linsolver = new ILSConjugateGradients ( ils_verbose , ils_max_iterations, ils_min_delta, ils_min_residual );
  126. if ( verbose )
  127. cerr << "FMKGPHyperparameterOptimization: using ILS ConjugateGradients" << endl;
  128. }
  129. else if ( ils_method.compare ( "CGL" ) == 0 )
  130. {
  131. this->linsolver = new ILSConjugateGradientsLanczos ( ils_verbose , ils_max_iterations );
  132. if ( verbose )
  133. cerr << "FMKGPHyperparameterOptimization: using ILS ConjugateGradients (Lanczos)" << endl;
  134. }
  135. else if ( ils_method.compare ( "SYMMLQ" ) == 0 )
  136. {
  137. this->linsolver = new ILSSymmLqLanczos ( ils_verbose , ils_max_iterations );
  138. if ( verbose )
  139. cerr << "FMKGPHyperparameterOptimization: using ILS SYMMLQ" << endl;
  140. }
  141. else if ( ils_method.compare ( "MINRES" ) == 0 )
  142. {
  143. this->linsolver = new ILSMinResLanczos ( ils_verbose , ils_max_iterations );
  144. if ( verbose )
  145. cerr << "FMKGPHyperparameterOptimization: using ILS MINRES" << endl;
  146. }
  147. else
  148. {
  149. cerr << "FMKGPHyperparameterOptimization: " << _confSection << ":ils_method (" << ils_method << ") does not match any type (CG,CGL,SYMMLQ,MINRES), I will use CG" << endl;
  150. this->linsolver = new ILSConjugateGradients ( ils_verbose , ils_max_iterations, ils_min_delta, ils_min_residual );
  151. }
  152. this->usePreviousAlphas = _conf->gB (_confSection, "usePreviousAlphas", true );
  153. string optimizationMethod_s = _conf->gS ( _confSection, "optimization_method", "greedy" );
  154. if ( optimizationMethod_s == "greedy" )
  155. optimizationMethod = OPT_GREEDY;
  156. else if ( optimizationMethod_s == "downhillsimplex" )
  157. optimizationMethod = OPT_DOWNHILLSIMPLEX;
  158. else if ( optimizationMethod_s == "none" )
  159. optimizationMethod = OPT_NONE;
  160. else
  161. fthrow ( Exception, "Optimization method " << optimizationMethod_s << " is not known." );
  162. if ( verbose )
  163. cerr << "Using optimization method: " << optimizationMethod_s << endl;
  164. downhillSimplexMaxIterations = _conf->gI ( _confSection, "downhillsimplex_max_iterations", 20 );
  165. // do not run longer than a day :)
  166. downhillSimplexTimeLimit = _conf->gD ( _confSection, "downhillsimplex_time_limit", 24 * 60 * 60 );
  167. downhillSimplexParamTol = _conf->gD ( _confSection, "downhillsimplex_delta", 0.01 );
  168. learnBalanced = _conf->gB ( _confSection, "learn_balanced", false );
  169. std::cerr << "balanced learning: " << learnBalanced << std::endl;
  170. optimizeNoise = _conf->gB ( _confSection, "optimize_noise", false );
  171. if ( verbose )
  172. cerr << "Optimize noise: " << ( optimizeNoise ? "on" : "off" ) << endl;
  173. std::cerr << "------------" << std::endl;
  174. std::cerr << "| start |" << std::endl;
  175. std::cerr << "------------" << std::endl;
  176. }
  177. void FMKGPHyperparameterOptimization::setParameterUpperBound ( const double & _parameterUpperBound )
  178. {
  179. parameterUpperBound = _parameterUpperBound;
  180. }
  181. void FMKGPHyperparameterOptimization::setParameterLowerBound ( const double & _parameterLowerBound )
  182. {
  183. parameterLowerBound = _parameterLowerBound;
  184. }
  185. void FMKGPHyperparameterOptimization::setupGPLikelihoodApprox ( std::map<int, GPLikelihoodApprox * > & gplikes, const std::map<int, NICE::Vector> & binaryLabels, std::map<int, uint> & parameterVectorSizes )
  186. {
  187. if ( learnBalanced )
  188. {
  189. if ( verbose )
  190. {
  191. std::cerr << "FMKGPHyperparameterOptimization::setupGPLikelihoodApprox -- balanced setting" << std::endl;
  192. std::cerr << "number of ikmsum-objects: " << ikmsums.size() << std::endl;
  193. }
  194. for ( std::map<int, IKMLinearCombination*>::const_iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  195. {
  196. map<int, NICE::Vector> binaryLabelsSingle;
  197. binaryLabelsSingle.insert ( *binaryLabels.find ( it->first ) );
  198. GPLikelihoodApprox *gplike = new GPLikelihoodApprox ( binaryLabelsSingle, it->second, linsolver, eig, verifyApproximation, nrOfEigenvaluesToConsider );
  199. gplike->setUsePreviousAlphas( usePreviousAlphas );
  200. gplike->setDebug( debug );
  201. gplike->setVerbose( verbose );
  202. gplikes.insert ( std::pair<int, GPLikelihoodApprox * > ( it->first, gplike ) );
  203. parameterVectorSizes.insert ( std::pair<int, uint> ( it->first, it->second->getNumParameters() ) );
  204. }
  205. if ( verbose )
  206. std::cerr << "resulting number of gplike-objects: " << gplikes.size() << std::endl;
  207. }
  208. else
  209. {
  210. GPLikelihoodApprox *gplike = new GPLikelihoodApprox ( binaryLabels, ikmsums.begin()->second, linsolver, eig, verifyApproximation, nrOfEigenvaluesToConsider );
  211. gplike->setUsePreviousAlphas( usePreviousAlphas );
  212. gplike->setDebug( debug );
  213. gplike->setVerbose( verbose );
  214. gplikes.insert ( std::pair<int, GPLikelihoodApprox * > ( 0, gplike ) );
  215. parameterVectorSizes.insert ( std::pair<int, uint> ( 0, ikmsums.begin()->second->getNumParameters() ) );
  216. }
  217. }
  218. void FMKGPHyperparameterOptimization::updateEigenVectors()
  219. {
  220. if ( verbose )
  221. {
  222. std::cerr << "FMKGPHyperparameterOptimization::updateEigenVectors -- size of ikmsums: " << ikmsums.size() << std::endl;
  223. std::cerr << "class of first object: " << ikmsums.begin()->first << std::endl;
  224. }
  225. if ( learnBalanced )
  226. {
  227. //simply use the first kernel matrix to compute the eigenvalues and eigenvectors for the fine approximation of predictive uncertainties
  228. std::map<int, IKMLinearCombination * >::iterator ikmsumsIt;
  229. eigenMax.resize(ikmsums.size());
  230. eigenMaxVectors.resize(ikmsums.size());
  231. int classCnt(0);
  232. for ( ikmsumsIt = ikmsums.begin(); ikmsumsIt != ikmsums.end(); ikmsumsIt++, classCnt++ )
  233. {
  234. eig->getEigenvalues ( * ikmsumsIt->second, eigenMax[classCnt], eigenMaxVectors[classCnt], nrOfEigenvaluesToConsiderForVarApprox );
  235. }
  236. }
  237. else
  238. {
  239. std::cerr << "not balanced, considere for VarApprox: " << nrOfEigenvaluesToConsiderForVarApprox << " eigenvalues" << std::endl;
  240. std::cerr << "and for simple: " << nrOfEigenvaluesToConsider << std::endl;
  241. if (nrOfEigenvaluesToConsiderForVarApprox > 1)
  242. nrOfEigenvaluesToConsiderForVarApprox = 1;
  243. //compute the largest eigenvalue of K + noise
  244. eigenMax.resize(1);
  245. eigenMaxVectors.resize(1);
  246. eig->getEigenvalues ( * ( ikmsums.begin()->second ), eigenMax[0], eigenMaxVectors[0], nrOfEigenvaluesToConsiderForVarApprox );
  247. }
  248. }
  249. void FMKGPHyperparameterOptimization::performOptimization ( std::map<int, GPLikelihoodApprox * > & gplikes, const std::map<int, uint> & parameterVectorSizes, const bool & roughOptimization )
  250. {
  251. if (verbose)
  252. std::cerr << "perform optimization" << std::endl;
  253. if ( optimizationMethod == OPT_GREEDY )
  254. {
  255. if ( verbose )
  256. std::cerr << "OPT_GREEDY!!! " << std::endl;
  257. // simple greedy strategy
  258. if ( ikmsums.begin()->second->getNumParameters() != 1 )
  259. fthrow ( Exception, "Reduce size of the parameter vector or use downhill simplex!" );
  260. Vector lB = ikmsums.begin()->second->getParameterLowerBounds();
  261. Vector uB = ikmsums.begin()->second->getParameterUpperBounds();
  262. if ( verbose )
  263. cerr << "lower bound " << lB << " upper bound " << uB << endl;
  264. if ( learnBalanced )
  265. {
  266. if ( lB[0] == uB[0] ) //do we already know a specific parameter?
  267. {
  268. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  269. {
  270. if ( verbose )
  271. std::cerr << "Optimizing class " << gpLikeIt->first << std::endl;
  272. OPTIMIZATION::matrix_type hyperp ( 1, 1, lB[0] );
  273. gpLikeIt->second->evaluate ( hyperp );
  274. }
  275. }
  276. else
  277. {
  278. fthrow ( Exception, "HYPERPARAMETER OPTIMZIATION SHOULD NOT BE USED TOGETHER WITH BALANCED LEARNING IN THIS FRAMEWORK!!!" );
  279. }
  280. }
  281. else
  282. {
  283. for ( double mypara = lB[0]; mypara <= uB[0]; mypara += this->parameterStepSize )
  284. {
  285. OPTIMIZATION::matrix_type hyperp ( 1, 1, mypara );
  286. gplikes.begin()->second->evaluate ( hyperp );
  287. }
  288. }
  289. }
  290. else if ( optimizationMethod == OPT_DOWNHILLSIMPLEX )
  291. {
  292. if ( learnBalanced )
  293. {
  294. if ( verbose )
  295. std::cerr << "DOWNHILLSIMPLEX WITH BALANCED LEARNING!!! " << std::endl;
  296. fthrow ( Exception, "HYPERPARAMETER OPTIMZIATION SHOULD NOT BE USED TOGETHER WITH BALANCED LEARNING IN THIS FRAMEWORK!!!" );
  297. //unfortunately, we suffer from the fact that we do only have a single fmk-object
  298. //therefore, we should either copy the fmk-object as often as we have classes or do some averaging or whatsoever
  299. }
  300. else
  301. { //standard as before, normal optimization
  302. if ( verbose )
  303. std::cerr << "DOWNHILLSIMPLEX WITHOUT BALANCED LEARNING!!! " << std::endl;
  304. // downhill simplex strategy
  305. OPTIMIZATION::DownhillSimplexOptimizer optimizer;
  306. OPTIMIZATION::matrix_type initialParams ( parameterVectorSizes.begin()->second, 1 );
  307. Vector currentParameters;
  308. ikmsums.begin()->second->getParameters ( currentParameters );
  309. for ( uint i = 0 ; i < parameterVectorSizes.begin()->second; i++ )
  310. initialParams(i,0) = currentParameters[ i ];
  311. if ( verbose )
  312. std::cerr << "Initial parameters: " << initialParams << std::endl;
  313. // OPTIMIZATION::matrix_type scales ( parameterVectorSizes.begin()->second, 1);
  314. if ( roughOptimization ) //should be used when we perform the optimziation for the first time
  315. {
  316. // scales.Set(1.0);
  317. }
  318. else //should be used, when we perform the optimization in an incremental learning scenario, so that we already have a good guess
  319. {
  320. // scales.Set(1.0);
  321. // for ( uint i = 0 ; i < parameterVectorSizes.begin()->second; i++ )
  322. // scales[i][0] = currentParameters[ i ];
  323. optimizer.setDownhillParams ( 0.2 /* default: 1.0 */, 0.1 /* default: 0.5 */, 0.2 /* default: 1.0 */ );
  324. }
  325. //the scales object does not really matter in the actual implementation of Downhill Simplex
  326. OPTIMIZATION::SimpleOptProblem optProblem ( gplikes.begin()->second, initialParams, initialParams /* scales*/ );
  327. // cerr << "OPT: " << mypara << " " << nlikelihood << " " << logdet << " " << dataterm << endl;
  328. optimizer.setMaxNumIter ( true, downhillSimplexMaxIterations );
  329. optimizer.setTimeLimit ( true, downhillSimplexTimeLimit );
  330. optimizer.setParamTol ( true, downhillSimplexParamTol );
  331. optimizer.optimizeProb ( optProblem );
  332. }
  333. }
  334. else if ( optimizationMethod == OPT_NONE )
  335. {
  336. if ( verbose )
  337. std::cerr << "NO OPTIMIZATION!!! " << std::endl;
  338. // without optimization
  339. if ( optimizeNoise )
  340. fthrow ( Exception, "Deactivate optimize_noise!" );
  341. if ( verbose )
  342. std::cerr << "Optimization is deactivated!" << std::endl;
  343. double value (1.0);
  344. if ( this->parameterLowerBound == this->parameterUpperBound)
  345. value = this->parameterLowerBound;
  346. pf->setParameterLowerBounds ( NICE::Vector ( 1, value ) );
  347. pf->setParameterUpperBounds ( NICE::Vector ( 1, value ) );
  348. // we use the standard value
  349. if ( learnBalanced )
  350. {
  351. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  352. {
  353. OPTIMIZATION::matrix_type hyperp ( 1, 1, value);
  354. gpLikeIt->second->setParameterLowerBound ( value );
  355. gpLikeIt->second->setParameterUpperBound ( value );
  356. gpLikeIt->second->evaluate ( hyperp );
  357. }
  358. }
  359. else
  360. {
  361. OPTIMIZATION::matrix_type hyperp ( 1, 1, value );
  362. gplikes.begin()->second->setParameterLowerBound ( value );
  363. gplikes.begin()->second->setParameterUpperBound ( value );
  364. gplikes.begin()->second->evaluate ( hyperp );
  365. }
  366. }
  367. if ( learnBalanced )
  368. {
  369. lastAlphas.clear();
  370. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  371. {
  372. if (verbose)
  373. std::cerr << "Optimal hyperparameter for class " << gpLikeIt->first << " was: " << gpLikeIt->second->getBestParameters() << std::endl;
  374. lastAlphas = gplikes.begin()->second->getBestAlphas();
  375. }
  376. }
  377. else
  378. {
  379. if ( verbose )
  380. std::cerr << "Optimal hyperparameter was: " << gplikes.begin()->second->getBestParameters() << std::endl;
  381. lastAlphas.clear();
  382. lastAlphas = gplikes.begin()->second->getBestAlphas();
  383. }
  384. }
  385. void FMKGPHyperparameterOptimization::transformFeaturesWithOptimalParameters ( const std::map<int, GPLikelihoodApprox * > & gplikes, const std::map<int, uint> & parameterVectorSizes )
  386. {
  387. if ( verbose )
  388. std::cerr << "FMKGPHyperparameterOptimization::transformFeaturesWithOptimalParameters" << std::endl;
  389. // transform all features with the "optimal" parameter
  390. if ( learnBalanced )
  391. {
  392. if ( verbose )
  393. std::cerr << "learn Balanced" << std::endl;
  394. double meanValue ( 0.0 );
  395. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  396. {
  397. meanValue += gpLikeIt->second->getBestParameters() [0];
  398. }
  399. meanValue /= gplikes.size();
  400. NICE::Vector averagedParams ( parameterVectorSizes.begin()->second, meanValue );
  401. if ( verbose)
  402. std::cerr << "averaged Params: " << averagedParams << std::endl;
  403. //since we only have a single fmk-object, we only have to modify our data for a single time
  404. ikmsums.begin()->second->setParameters ( averagedParams );
  405. }
  406. else
  407. {
  408. if ( verbose )
  409. {
  410. std::cerr << "learn not Balanced" << std::endl;
  411. std::cerr << "previous best parameters. " << gplikes.begin()->second->getBestParameters() << std::endl;
  412. // std::cerr << "previous best alphas: " << gplikes.begin()->second->getBestAlphas() << std::endl;
  413. }
  414. ikmsums.begin()->second->setParameters ( gplikes.begin()->second->getBestParameters() );
  415. }
  416. }
  417. void FMKGPHyperparameterOptimization::computeMatricesAndLUTs ( const std::map<int, GPLikelihoodApprox * > & gplikes )
  418. {
  419. precomputedA.clear();
  420. precomputedB.clear();
  421. if ( learnBalanced )
  422. {
  423. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  424. {
  425. map<int, Vector>::const_iterator i = gpLikeIt->second->getBestAlphas().begin();
  426. PrecomputedType A;
  427. PrecomputedType B;
  428. // std::cerr << "computeMatricesAndLUTs -- alpha: " << i->second << std::endl;
  429. fmk->hik_prepare_alpha_multiplications ( i->second, A, B );
  430. A.setIoUntilEndOfFile ( false );
  431. B.setIoUntilEndOfFile ( false );
  432. precomputedA[ gpLikeIt->first ] = A;
  433. precomputedB[ gpLikeIt->first ] = B;
  434. if ( q != NULL )
  435. {
  436. double *T = fmk->hik_prepare_alpha_multiplications_fast ( A, B, *q, pf );
  437. //just to be sure that we do not waste space here
  438. if ( precomputedT[ gpLikeIt->first ] != NULL )
  439. delete precomputedT[ gpLikeIt->first ];
  440. precomputedT[ gpLikeIt->first ] = T;
  441. }
  442. }
  443. }
  444. else
  445. { //no GP rebalancing
  446. for ( map<int, Vector>::const_iterator i = gplikes.begin()->second->getBestAlphas().begin(); i != gplikes.begin()->second->getBestAlphas().end(); i++ )
  447. {
  448. PrecomputedType A;
  449. PrecomputedType B;
  450. // std::cerr << "computeMatricesAndLUTs -- alpha: " << i->second << std::endl;
  451. fmk->hik_prepare_alpha_multiplications ( i->second, A, B );
  452. A.setIoUntilEndOfFile ( false );
  453. B.setIoUntilEndOfFile ( false );
  454. precomputedA[ i->first ] = A;
  455. precomputedB[ i->first ] = B;
  456. if ( q != NULL )
  457. {
  458. double *T = fmk->hik_prepare_alpha_multiplications_fast ( A, B, *q, pf );
  459. //just to be sure that we do not waste space here
  460. if ( precomputedT[ i->first ] != NULL )
  461. delete precomputedT[ i->first ];
  462. precomputedT[ i->first ] = T;
  463. }
  464. }
  465. }
  466. }
  467. #ifdef NICE_USELIB_MATIO
  468. void FMKGPHyperparameterOptimization::optimizeBinary ( const sparse_t & data, const NICE::Vector & yl, const std::set<int> & positives, const std::set<int> & negatives, double noise )
  469. {
  470. map<int, int> examples;
  471. Vector y ( yl.size() );
  472. int ind = 0;
  473. for ( uint i = 0 ; i < yl.size(); i++ )
  474. {
  475. if ( positives.find ( i ) != positives.end() ) {
  476. y[ examples.size() ] = 1.0;
  477. examples.insert ( pair<int, int> ( i, ind ) );
  478. ind++;
  479. } else if ( negatives.find ( i ) != negatives.end() ) {
  480. y[ examples.size() ] = -1.0;
  481. examples.insert ( pair<int, int> ( i, ind ) );
  482. ind++;
  483. }
  484. }
  485. y.resize ( examples.size() );
  486. cerr << "Examples: " << examples.size() << endl;
  487. optimize ( data, y, examples, noise );
  488. }
  489. void FMKGPHyperparameterOptimization::optimize ( const sparse_t & data, const NICE::Vector & y, const std::map<int, int> & examples, double noise )
  490. {
  491. Timer t;
  492. t.start();
  493. cerr << "Initializing data structure ..." << std::endl;
  494. if ( fmk != NULL ) delete fmk;
  495. fmk = new FastMinKernel ( data, noise, examples );
  496. t.stop();
  497. if (verboseTime)
  498. std::cerr << "Time used for initializing the FastMinKernel structure: " << t.getLast() << std::endl;
  499. optimize ( y );
  500. }
  501. #endif
  502. int FMKGPHyperparameterOptimization::prepareBinaryLabels ( map<int, NICE::Vector> & binaryLabels, const NICE::Vector & y , std::set<int> & myClasses )
  503. {
  504. myClasses.clear();
  505. for ( NICE::Vector::const_iterator it = y.begin(); it != y.end(); it++ )
  506. if ( myClasses.find ( *it ) == myClasses.end() )
  507. {
  508. myClasses.insert ( *it );
  509. }
  510. //count how many different classes appear in our data
  511. int nrOfClasses = myClasses.size();
  512. binaryLabels.clear();
  513. //compute the corresponding binary label vectors
  514. if ( nrOfClasses > 2 )
  515. {
  516. //resize every labelVector and set all entries to -1.0
  517. for ( set<int>::const_iterator k = myClasses.begin(); k != myClasses.end(); k++ )
  518. {
  519. binaryLabels[ *k ].resize ( y.size() );
  520. binaryLabels[ *k ].set ( -1.0 );
  521. }
  522. // now look on every example and set the entry of its corresponding label vector to 1.0
  523. // proper existance should not be a problem
  524. for ( int i = 0 ; i < ( int ) y.size(); i++ )
  525. binaryLabels[ y[i] ][i] = 1.0;
  526. }
  527. else if ( nrOfClasses == 2 )
  528. {
  529. // std::cerr << "binary setting -- prepare two binary label vectors with opposite signs" << std::endl;
  530. Vector yb ( y );
  531. binaryLabelNegative = *(myClasses.begin());
  532. std::set<int>::const_iterator classIt = myClasses.begin(); classIt++;
  533. binaryLabelPositive = *classIt;
  534. // std::cerr << "positiveClass : " << binaryLabelPositive << " negativeClass: " << binaryLabelNegative << std::endl;
  535. for ( uint i = 0 ; i < yb.size() ; i++ )
  536. yb[i] = ( y[i] == binaryLabelNegative ) ? -1.0 : 1.0;
  537. binaryLabels[ binaryLabelPositive ] = yb;
  538. //binaryLabels[ 1 ] = yb;
  539. //uncomment the following, if you want to perform real binary computations with 2 classes
  540. // //we only need one vector, which already contains +1 and -1, so we need only one computation too
  541. // binaryLabels[ negativeClass ] = yb;
  542. // binaryLabels[ negativeClass ] *= -1.0;
  543. // std::cerr << "binaryLabels.size(): " << binaryLabels.size() << std::endl;
  544. // binaryLabels[ 0 ] = yb;
  545. // binaryLabels[ 0 ] *= -1.0;
  546. //comment the following, if you want to do a real binary computation. It should be senseless, but let's see...
  547. //we do no real binary computation, but an implicite one with only a single object
  548. nrOfClasses--;
  549. std::set<int>::iterator it = myClasses.begin(); it++;
  550. myClasses.erase(it);
  551. }
  552. else //OCC setting
  553. {
  554. //we set the labels to 1, independent of the previously given class number
  555. //however, the original class numbers are stored and returned in classification
  556. Vector yNew ( y.size(), 1 );
  557. myClasses.clear();
  558. myClasses.insert ( 1 );
  559. //we have to indicate, that we are in an OCC setting
  560. nrOfClasses--;
  561. }
  562. return nrOfClasses;
  563. }
  564. void FMKGPHyperparameterOptimization::optimize ( const NICE::Vector & y )
  565. {
  566. if ( fmk == NULL )
  567. fthrow ( Exception, "FastMinKernel object was not initialized!" );
  568. this->labels = y;
  569. std::map<int, NICE::Vector> binaryLabels;
  570. std::set<int> classesToUse;
  571. prepareBinaryLabels ( binaryLabels, y , classesToUse );
  572. //now call the main function :)
  573. this->optimize(binaryLabels);
  574. }
  575. void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & binaryLabels )
  576. {
  577. Timer t;
  578. t.start();
  579. //how many different classes do we have right now?
  580. int nrOfClasses = binaryLabels.size();
  581. std::set<int> classesToUse;
  582. classesToUse.clear();
  583. for (std::map<int, NICE::Vector>::const_iterator clIt = binaryLabels.begin(); clIt != binaryLabels.end(); clIt++)
  584. {
  585. classesToUse.insert(clIt->first);
  586. }
  587. if (verbose)
  588. {
  589. std::cerr << "Initial noise level: " << fmk->getNoise() << endl;
  590. std::cerr << "Number of classes (=1 means we have a binary setting):" << nrOfClasses << std::endl;
  591. std::cerr << "Effective number of classes (neglecting classes without positive examples): " << classesToUse.size() << std::endl;
  592. }
  593. // combine standard model and noise model
  594. ikmsums.clear();
  595. Timer t1;
  596. t1.start();
  597. //setup the kernel combination
  598. if ( learnBalanced )
  599. {
  600. for ( std::set<int>::const_iterator clIt = classesToUse.begin(); clIt != classesToUse.end(); clIt++ )
  601. {
  602. IKMLinearCombination *ikmsum = new IKMLinearCombination ();
  603. ikmsums.insert ( std::pair<int, IKMLinearCombination*> ( *clIt, ikmsum ) );
  604. }
  605. }
  606. else
  607. {
  608. IKMLinearCombination *ikmsum = new IKMLinearCombination ();
  609. ikmsums.insert ( std::pair<int, IKMLinearCombination*> ( 0, ikmsum ) );
  610. }
  611. if ( verbose )
  612. {
  613. std::cerr << "ikmsums.size(): " << ikmsums.size() << std::endl;
  614. std::cerr << "binaryLabels.size(): " << binaryLabels.size() << std::endl;
  615. }
  616. // First model: noise
  617. if ( learnBalanced )
  618. {
  619. int cnt ( 0 );
  620. for ( std::set<int>::const_iterator clIt = classesToUse.begin(); clIt != classesToUse.end(); clIt++, cnt++ )
  621. {
  622. ikmsums.find ( *clIt )->second->addModel ( new IKMNoise ( binaryLabels[*clIt], fmk->getNoise(), optimizeNoise ) );
  623. }
  624. }
  625. else
  626. {
  627. ikmsums.find ( 0 )->second->addModel ( new IKMNoise ( fmk->get_n(), fmk->getNoise(), optimizeNoise ) );
  628. }
  629. // set pretty low built-in noise, because we explicitely add the noise with the IKMNoise
  630. fmk->setNoise ( 0.0 );
  631. //NOTE The GMHIKernel is always the last model which is added (this is necessary for easy store and restore functionality)
  632. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  633. {
  634. it->second->addModel ( new GMHIKernel ( fmk, pf, NULL /* no quantization */ ) );
  635. }
  636. t1.stop();
  637. if (verboseTime)
  638. std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
  639. std::map<int, GPLikelihoodApprox * > gplikes;
  640. std::map<int, uint> parameterVectorSizes;
  641. t1.start();
  642. this->setupGPLikelihoodApprox ( gplikes, binaryLabels, parameterVectorSizes );
  643. t1.stop();
  644. if (verboseTime)
  645. std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
  646. if (verbose)
  647. {
  648. std::cerr << "parameterVectorSizes: " << std::endl;
  649. for ( std::map<int, uint>::const_iterator pvsIt = parameterVectorSizes.begin(); pvsIt != parameterVectorSizes.end(); pvsIt++ )
  650. {
  651. std::cerr << pvsIt->first << " " << pvsIt->second << " ";
  652. }
  653. std::cerr << std::endl;
  654. }
  655. t1.start();
  656. this->updateEigenVectors();
  657. t1.stop();
  658. if (verboseTime)
  659. std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
  660. if ( verbose )
  661. std::cerr << "resulting eigenvalues for first class: " << eigenMax[0] << std::endl;
  662. t1.start();
  663. this->performOptimization ( gplikes, parameterVectorSizes );
  664. t1.stop();
  665. if (verboseTime)
  666. std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
  667. if ( verbose )
  668. cerr << "Preparing classification ..." << endl;
  669. t1.start();
  670. this->transformFeaturesWithOptimalParameters ( gplikes, parameterVectorSizes );
  671. t1.stop();
  672. if (verboseTime)
  673. std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
  674. t1.start();
  675. this->computeMatricesAndLUTs ( gplikes );
  676. t1.stop();
  677. if (verboseTime)
  678. std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
  679. t.stop();
  680. ResourceStatistics rs;
  681. std::cerr << "Time used for learning: " << t.getLast() << std::endl;
  682. long maxMemory;
  683. rs.getMaximumMemory ( maxMemory );
  684. std::cerr << "Maximum memory used: " << maxMemory << " KB" << std::endl;
  685. //don't waste memory
  686. if ( learnBalanced )
  687. {
  688. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  689. {
  690. delete gpLikeIt->second;
  691. }
  692. }
  693. else
  694. {
  695. delete gplikes.begin()->second;
  696. }
  697. }
  698. void FMKGPHyperparameterOptimization::optimizeAfterSingleIncrement ( const NICE::SparseVector & x, const bool & performOptimizationAfterIncrement )
  699. {
  700. Timer t;
  701. t.start();
  702. if ( fmk == NULL )
  703. fthrow ( Exception, "FastMinKernel object was not initialized!" );
  704. map<int, NICE::Vector> binaryLabels;
  705. set<int> classesToUse;
  706. prepareBinaryLabels ( binaryLabels, labels , classesToUse );
  707. if ( verbose )
  708. std::cerr << "labels.size() after increment: " << labels.size() << std::endl;
  709. Timer t1;
  710. t1.start();
  711. //update the kernel combinations
  712. std::map<int, NICE::Vector>::const_iterator labelIt = binaryLabels.begin();
  713. // note, that if we only have a single ikmsum-object, than the labelvector will not be used at all in the internal objects (only relevant in ikmnoise)
  714. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  715. {
  716. it->second->addExample ( x, labelIt->second );
  717. labelIt++;
  718. }
  719. //we have to reset the fmk explicitely
  720. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  721. {
  722. ( ( GMHIKernel* ) it->second->getModel ( it->second->getNumberOfModels() - 1 ) )->setFastMinKernel ( this->fmk );
  723. }
  724. t1.stop();
  725. if (verboseTime)
  726. std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
  727. std::map<int, GPLikelihoodApprox * > gplikes;
  728. std::map<int, uint> parameterVectorSizes;
  729. t1.start();
  730. this->setupGPLikelihoodApprox ( gplikes, binaryLabels, parameterVectorSizes );
  731. t1.stop();
  732. if (verboseTime)
  733. std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
  734. if ( verbose )
  735. {
  736. std::cerr << "parameterVectorSizes: " << std::endl;
  737. for ( std::map<int, uint>::const_iterator pvsIt = parameterVectorSizes.begin(); pvsIt != parameterVectorSizes.end(); pvsIt++ )
  738. {
  739. std::cerr << pvsIt->first << " " << pvsIt->second << " ";
  740. }
  741. std::cerr << std::endl;
  742. }
  743. t1.start();
  744. if ( usePreviousAlphas )
  745. {
  746. std::map<int, NICE::Vector>::const_iterator binaryLabelsIt = binaryLabels.begin();
  747. std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
  748. for ( std::map<int, NICE::Vector>::iterator lastAlphaIt = lastAlphas.begin() ;lastAlphaIt != lastAlphas.end(); lastAlphaIt++ )
  749. {
  750. int oldSize ( lastAlphaIt->second.size() );
  751. lastAlphaIt->second.resize ( oldSize + 1 );
  752. //We initialize it with the same values as we use in GPLikelihoodApprox in batch training
  753. //default in GPLikelihoodApprox for the first time:
  754. // alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );
  755. double maxEigenValue ( 1.0 );
  756. if ( (*eigenMaxIt).size() > 0 )
  757. maxEigenValue = (*eigenMaxIt)[0];
  758. double factor ( 1.0 / maxEigenValue );
  759. if ( binaryLabelsIt->second[oldSize] > 0 ) //we only have +1 and -1, so this might be benefitial in terms of speed
  760. lastAlphaIt->second[oldSize] = factor;
  761. else
  762. lastAlphaIt->second[oldSize] = -factor; //we follow the initialization as done in previous steps
  763. //lastAlphaIt->second[oldSize] = 0.0; // following the suggestion of Yeh and Darrell
  764. binaryLabelsIt++;
  765. if (learnBalanced)
  766. {
  767. eigenMaxIt++;
  768. }
  769. }
  770. for ( std::map<int, GPLikelihoodApprox * >::iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  771. {
  772. gpLikeIt->second->setLastAlphas ( &lastAlphas );
  773. }
  774. }
  775. //if we do not use previous alphas, we do not have to set up anything here
  776. t1.stop();
  777. if (verboseTime)
  778. std::cerr << "Time used for setting up the alpha-objects: " << t1.getLast() << std::endl;
  779. t1.start();
  780. this->updateEigenVectors();
  781. t1.stop();
  782. if (verboseTime)
  783. std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
  784. if ( verbose )
  785. std::cerr << "resulting eigenvalues for first class: " << eigenMax[0] << std::endl;
  786. // we can reuse the already given performOptimization-method:
  787. // OPT_GREEDY
  788. // for this strategy we can't reuse any of the previously computed scores
  789. // so come on, let's do the whole thing again...
  790. // OPT_DOWNHILLSIMPLEX
  791. // Here we can benefit from previous results, when we use them as initialization for our optimizer
  792. // ikmsums.begin()->second->getParameters ( currentParameters ); uses the previously computed optimal parameters
  793. // as initialization
  794. // OPT_NONE
  795. // nothing to do, obviously
  796. //NOTE we could skip this, if we do not want to change our parameters given new examples
  797. if ( performOptimizationAfterIncrement )
  798. {
  799. t1.start();
  800. this->performOptimization ( gplikes, parameterVectorSizes, false /* initialize not with default values but using the last solution */ );
  801. t1.stop();
  802. if (verboseTime)
  803. std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
  804. if ( verbose )
  805. cerr << "Preparing after retraining for classification ..." << endl;
  806. t1.start();
  807. this->transformFeaturesWithOptimalParameters ( gplikes, parameterVectorSizes );
  808. t1.stop();
  809. if (verboseTime)
  810. std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
  811. }
  812. else
  813. {
  814. t1.start();
  815. t1.stop();
  816. std::cerr << "skip optimization" << std::endl;
  817. if (verboseTime)
  818. std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
  819. std::cerr << "skip feature transformation" << std::endl;
  820. if (verboseTime)
  821. std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
  822. }
  823. //NOTE unfortunately, the whole vector alpha differs, and not only its last entry.
  824. // If we knew any method, which could update this efficiently, we could also compute A and B more efficiently by updating them.
  825. // Since we are not aware of any such method, we have to compute them completely new
  826. // :/
  827. t1.start();
  828. this->computeMatricesAndLUTs ( gplikes );
  829. t1.stop();
  830. if (verboseTime)
  831. std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
  832. t.stop();
  833. ResourceStatistics rs;
  834. std::cerr << "Time used for re-learning: " << t.getLast() << std::endl;
  835. long maxMemory;
  836. rs.getMaximumMemory ( maxMemory );
  837. std::cerr << "Maximum memory used: " << maxMemory << " KB" << std::endl;
  838. //don't waste memory
  839. if ( learnBalanced )
  840. {
  841. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  842. {
  843. delete gpLikeIt->second;
  844. }
  845. }
  846. else
  847. {
  848. delete gplikes.begin()->second;
  849. }
  850. }
  851. void FMKGPHyperparameterOptimization::optimizeAfterMultipleIncrements ( const std::vector<const NICE::SparseVector*> & x, const bool & performOptimizationAfterIncrement )
  852. {
  853. Timer t;
  854. t.start();
  855. if ( fmk == NULL )
  856. fthrow ( Exception, "FastMinKernel object was not initialized!" );
  857. map<int, NICE::Vector> binaryLabels;
  858. set<int> classesToUse;
  859. prepareBinaryLabels ( binaryLabels, labels , classesToUse );
  860. if ( verbose )
  861. std::cerr << "labels.size() after increment: " << labels.size() << std::endl;
  862. Timer t1;
  863. t1.start();
  864. //update the kernel combinations
  865. std::map<int, NICE::Vector>::const_iterator labelIt = binaryLabels.begin();
  866. // note, that if we only have a single ikmsum-object, than the labelvector will not be used at all in the internal objects (only relevant in ikmnoise)
  867. //TODO
  868. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  869. {
  870. for ( std::vector<const NICE::SparseVector*>::const_iterator exampleIt = x.begin(); exampleIt != x.end(); exampleIt++ )
  871. {
  872. it->second->addExample ( **exampleIt, labelIt->second );
  873. }
  874. labelIt++;
  875. }
  876. //we have to reset the fmk explicitely
  877. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  878. {
  879. ( ( GMHIKernel* ) it->second->getModel ( it->second->getNumberOfModels() - 1 ) )->setFastMinKernel ( this->fmk );
  880. }
  881. t1.stop();
  882. if (verboseTime)
  883. std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
  884. std::map<int, GPLikelihoodApprox * > gplikes;
  885. std::map<int, uint> parameterVectorSizes;
  886. t1.start();
  887. this->setupGPLikelihoodApprox ( gplikes, binaryLabels, parameterVectorSizes );
  888. t1.stop();
  889. if (verboseTime)
  890. std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
  891. if ( verbose )
  892. {
  893. std::cerr << "parameterVectorSizes: " << std::endl;
  894. for ( std::map<int, uint>::const_iterator pvsIt = parameterVectorSizes.begin(); pvsIt != parameterVectorSizes.end(); pvsIt++ )
  895. {
  896. std::cerr << pvsIt->first << " " << pvsIt->second << " ";
  897. }
  898. std::cerr << std::endl;
  899. }
  900. t1.start();
  901. if ( usePreviousAlphas )
  902. {
  903. std::map<int, NICE::Vector>::const_iterator binaryLabelsIt = binaryLabels.begin();
  904. std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
  905. for ( std::map<int, NICE::Vector>::iterator lastAlphaIt = lastAlphas.begin() ;lastAlphaIt != lastAlphas.end(); lastAlphaIt++ )
  906. {
  907. int oldSize ( lastAlphaIt->second.size() );
  908. lastAlphaIt->second.resize ( oldSize + x.size() );
  909. //We initialize it with the same values as we use in GPLikelihoodApprox in batch training
  910. //default in GPLikelihoodApprox for the first time:
  911. // alpha = (binaryLabels[classCnt] * (1.0 / eigenmax[0]) );
  912. double maxEigenValue ( 1.0 );
  913. if ( (*eigenMaxIt).size() > 0 )
  914. maxEigenValue = (*eigenMaxIt)[0];
  915. double factor ( 1.0 / maxEigenValue );
  916. for ( uint i = 0; i < x.size(); i++ )
  917. {
  918. if ( binaryLabelsIt->second[oldSize+i] > 0 ) //we only have +1 and -1, so this might be benefitial in terms of speed
  919. lastAlphaIt->second[oldSize+i] = factor;
  920. else
  921. lastAlphaIt->second[oldSize+i] = -factor; //we follow the initialization as done in previous steps
  922. //lastAlphaIt->second[oldSize+i] = 0.0; // following the suggestion of Yeh and Darrell
  923. }
  924. binaryLabelsIt++;
  925. if (learnBalanced)
  926. {
  927. eigenMaxIt++;
  928. }
  929. }
  930. for ( std::map<int, GPLikelihoodApprox * >::iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  931. {
  932. gpLikeIt->second->setLastAlphas ( &lastAlphas );
  933. }
  934. }
  935. //if we do not use previous alphas, we do not have to set up anything here
  936. t1.stop();
  937. if (verboseTime)
  938. std::cerr << "Time used for setting up the alpha-objects: " << t1.getLast() << std::endl;
  939. t1.start();
  940. this->updateEigenVectors();
  941. t1.stop();
  942. if (verboseTime)
  943. std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
  944. if ( verbose )
  945. std::cerr << "resulting eigenvalues of first class: " << eigenMax[0] << std::endl;
  946. // we can reuse the already given performOptimization-method:
  947. // OPT_GREEDY
  948. // for this strategy we can't reuse any of the previously computed scores
  949. // so come on, let's do the whole thing again...
  950. // OPT_DOWNHILLSIMPLEX
  951. // Here we can benefit from previous results, when we use them as initialization for our optimizer
  952. // ikmsums.begin()->second->getParameters ( currentParameters ); uses the previously computed optimal parameters
  953. // as initialization
  954. // OPT_NONE
  955. // nothing to do, obviously
  956. //NOTE we could skip this, if we do not want to change our parameters given new examples
  957. if ( performOptimizationAfterIncrement )
  958. {
  959. t1.start();
  960. this->performOptimization ( gplikes, parameterVectorSizes, false /* initialize not with default values but using the last solution */ );
  961. t1.stop();
  962. if (verboseTime)
  963. std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
  964. t1.start();
  965. this->transformFeaturesWithOptimalParameters ( gplikes, parameterVectorSizes );
  966. t1.stop();
  967. if (verboseTime)
  968. std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
  969. }
  970. else
  971. {
  972. t1.start();
  973. t1.stop();
  974. std::cerr << "skip optimization" << std::endl;
  975. if (verboseTime)
  976. std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
  977. std::cerr << "skip feature transformation" << std::endl;
  978. if (verboseTime)
  979. std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
  980. std::cerr << "skip computation of A, B and LUTs" << std::endl;
  981. if (verboseTime)
  982. std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
  983. }
  984. if ( verbose )
  985. cerr << "Preparing after retraining for classification ..." << endl;
  986. //NOTE unfortunately, the whole vector alpha differs, and not only its last entry.
  987. // If we knew any method, which could update this efficiently, we could also compute A and B more efficiently by updating them.
  988. // Since we are not aware of any such method, we have to compute them completely new
  989. // :/
  990. t1.start();
  991. this->computeMatricesAndLUTs ( gplikes );
  992. t1.stop();
  993. if (verboseTime)
  994. std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
  995. t.stop();
  996. ResourceStatistics rs;
  997. std::cerr << "Time used for re-learning: " << t.getLast() << std::endl;
  998. long maxMemory;
  999. rs.getMaximumMemory ( maxMemory );
  1000. std::cerr << "Maximum memory used: " << maxMemory << " KB" << std::endl;
  1001. //don't waste memory
  1002. if ( learnBalanced )
  1003. {
  1004. for ( std::map<int, GPLikelihoodApprox*>::const_iterator gpLikeIt = gplikes.begin(); gpLikeIt != gplikes.end(); gpLikeIt++ )
  1005. {
  1006. delete gpLikeIt->second;
  1007. }
  1008. }
  1009. else
  1010. {
  1011. delete gplikes.begin()->second;
  1012. }
  1013. }
  1014. void FMKGPHyperparameterOptimization::prepareVarianceApproximation()
  1015. {
  1016. PrecomputedType AVar;
  1017. fmk->hikPrepareKVNApproximation ( AVar );
  1018. precomputedAForVarEst = AVar;
  1019. precomputedAForVarEst.setIoUntilEndOfFile ( false );
  1020. if ( q != NULL )
  1021. {
  1022. //do we have results from previous runs but called this method nonetheless?
  1023. //then delete it and compute it again
  1024. if (precomputedTForVarEst != NULL)
  1025. delete precomputedTForVarEst;
  1026. double *T = fmk->hikPrepareLookupTableForKVNApproximation ( *q, pf );
  1027. precomputedTForVarEst = T;
  1028. }
  1029. }
  1030. int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar, NICE::SparseVector & scores ) const
  1031. {
  1032. // loop through all classes
  1033. if ( precomputedA.size() == 0 )
  1034. {
  1035. fthrow ( Exception, "The precomputation vector is zero...have you trained this classifier?" );
  1036. }
  1037. uint maxClassNo = 0;
  1038. for ( map<int, PrecomputedType>::const_iterator i = precomputedA.begin() ; i != precomputedA.end(); i++ )
  1039. {
  1040. uint classno = i->first;
  1041. maxClassNo = std::max ( maxClassNo, classno );
  1042. double beta;
  1043. if ( q != NULL ) {
  1044. map<int, double *>::const_iterator j = precomputedT.find ( classno );
  1045. double *T = j->second;
  1046. fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
  1047. } else {
  1048. const PrecomputedType & A = i->second;
  1049. map<int, PrecomputedType>::const_iterator j = precomputedB.find ( classno );
  1050. const PrecomputedType & B = j->second;
  1051. // fmk->hik_kernel_sum ( A, B, xstar, beta ); if A, B are of type Matrix
  1052. // Giving the transformation pf as an additional
  1053. // argument is necessary due to the following reason:
  1054. // FeatureMatrixT is sorted according to the original values, therefore,
  1055. // searching for upper and lower bounds ( findFirst... functions ) require original feature
  1056. // values as inputs. However, for calculation we need the transformed features values.
  1057. fmk->hik_kernel_sum ( A, B, xstar, beta, pf );
  1058. }
  1059. scores[ classno ] = beta;
  1060. }
  1061. scores.setDim ( maxClassNo + 1 );
  1062. if ( precomputedA.size() > 1 ) {
  1063. // multi-class classification
  1064. return scores.maxElement();
  1065. } else {
  1066. // binary setting
  1067. // FIXME: not really flexible for every situation
  1068. scores[binaryLabelNegative] = -scores[binaryLabelPositive];
  1069. return scores[ binaryLabelPositive ] <= 0.0 ? binaryLabelNegative : binaryLabelPositive;
  1070. }
  1071. }
  1072. void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateRough ( const NICE::SparseVector & x, NICE::Vector & predVariances ) const
  1073. {
  1074. double kSelf ( 0.0 );
  1075. for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
  1076. {
  1077. kSelf += pf->f ( 0, it->second );
  1078. // if weighted dimensions:
  1079. //kSelf += pf->f(it->first,it->second);
  1080. }
  1081. double normKStar;
  1082. if ( q != NULL )
  1083. {
  1084. if ( precomputedTForVarEst == NULL )
  1085. {
  1086. fthrow ( Exception, "The precomputed LUT for uncertainty prediction is NULL...have you prepared the uncertainty prediction?" );
  1087. }
  1088. fmk->hikComputeKVNApproximationFast ( precomputedTForVarEst, *q, x, normKStar );
  1089. }
  1090. else
  1091. {
  1092. fmk->hikComputeKVNApproximation ( precomputedAForVarEst, x, normKStar, pf );
  1093. }
  1094. predVariances.clear();
  1095. predVariances.resize( eigenMax.size() );
  1096. // for balanced setting, we get approximations for every binary task
  1097. int cnt( 0 );
  1098. for (std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin(); eigenMaxIt != eigenMax.end(); eigenMaxIt++, cnt++)
  1099. {
  1100. predVariances[cnt] = kSelf - ( 1.0 / (*eigenMaxIt)[0] )* normKStar;
  1101. }
  1102. }
  1103. void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine ( const NICE::SparseVector & x, NICE::Vector & predVariances ) const
  1104. {
  1105. // ---------------- compute the first term --------------------
  1106. // Timer t;
  1107. // t.start();
  1108. double kSelf ( 0.0 );
  1109. for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
  1110. {
  1111. kSelf += pf->f ( 0, it->second );
  1112. // if weighted dimensions:
  1113. //kSelf += pf->f(it->first,it->second);
  1114. }
  1115. // ---------------- compute the approximation of the second term --------------------
  1116. // t.stop();
  1117. // std::cerr << "ApproxFine -- time for first term: " << t.getLast() << std::endl;
  1118. // t.start();
  1119. NICE::Vector kStar;
  1120. fmk->hikComputeKernelVector ( x, kStar );
  1121. /* t.stop();
  1122. std::cerr << "ApproxFine -- time for kernel vector: " << t.getLast() << std::endl;*/
  1123. std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
  1124. predVariances.clear();
  1125. predVariances.resize( eigenMax.size() );
  1126. int classIdx( 0 );
  1127. // for balanced setting, we get approximations for every binary task
  1128. for (std::vector< NICE::Matrix>::const_iterator eigenMaxVectorIt = eigenMaxVectors.begin(); eigenMaxVectorIt != eigenMaxVectors.end(); eigenMaxVectorIt++, eigenMaxIt++, classIdx++)
  1129. {
  1130. double currentSecondTerm ( 0.0 );
  1131. double sumOfProjectionLengths ( 0.0 );
  1132. if ( ( kStar.size() != (*eigenMaxVectorIt).rows() ) || ( kStar.size() <= 0 ) )
  1133. {
  1134. //NOTE output?
  1135. }
  1136. // NICE::Vector multiplicationResults; // will contain nrOfEigenvaluesToConsiderForVarApprox many entries
  1137. // multiplicationResults.multiply ( *eigenMaxVectorIt, kStar, true/* transpose */ );
  1138. NICE::Vector multiplicationResults( nrOfEigenvaluesToConsiderForVarApprox, 0.0 );
  1139. //ok, there seems to be a nasty thing in computing multiplicationResults.multiply ( *eigenMaxVectorIt, kStar, true/* transpose */ );
  1140. //wherefor it takes aeons...
  1141. //so we compute it by ourselves
  1142. for ( uint tmpI = 0; tmpI < kStar.size(); tmpI++)
  1143. {
  1144. double kStarI ( kStar[tmpI] );
  1145. for ( int tmpJ = 0; tmpJ < nrOfEigenvaluesToConsiderForVarApprox; tmpJ++)
  1146. {
  1147. multiplicationResults[tmpJ] += kStarI * (*eigenMaxVectorIt)(tmpI,tmpJ);
  1148. }
  1149. }
  1150. double projectionLength ( 0.0 );
  1151. int cnt ( 0 );
  1152. NICE::Vector::const_iterator it = multiplicationResults.begin();
  1153. while ( cnt < ( nrOfEigenvaluesToConsiderForVarApprox - 1 ) )
  1154. {
  1155. projectionLength = ( *it );
  1156. currentSecondTerm += ( 1.0 / (*eigenMaxIt)[cnt] ) * pow ( projectionLength, 2 );
  1157. sumOfProjectionLengths += pow ( projectionLength, 2 );
  1158. it++;
  1159. cnt++;
  1160. }
  1161. double normKStar ( pow ( kStar.normL2 (), 2 ) );
  1162. currentSecondTerm += ( 1.0 / (*eigenMaxIt)[nrOfEigenvaluesToConsiderForVarApprox-1] ) * ( normKStar - sumOfProjectionLengths );
  1163. if ( ( normKStar - sumOfProjectionLengths ) < 0 )
  1164. {
  1165. // std::cerr << "Attention: normKStar - sumOfProjectionLengths is smaller than zero -- strange!" << std::endl;
  1166. }
  1167. predVariances[classIdx] = kSelf - currentSecondTerm;
  1168. }
  1169. }
  1170. void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NICE::SparseVector & x, NICE::Vector & predVariances ) const
  1171. {
  1172. Timer t;
  1173. // t.start();
  1174. // ---------------- compute the first term --------------------
  1175. double kSelf ( 0.0 );
  1176. for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
  1177. {
  1178. kSelf += pf->f ( 0, it->second );
  1179. // if weighted dimensions:
  1180. //kSelf += pf->f(it->first,it->second);
  1181. }
  1182. // ---------------- compute the second term --------------------
  1183. // t.stop();
  1184. // std::cerr << "ApproxExact -- time for first term: " << t.getLast() << std::endl;
  1185. // t.start();
  1186. NICE::Vector kStar;
  1187. fmk->hikComputeKernelVector ( x, kStar );
  1188. // t.stop();
  1189. // std::cerr << "ApproxExact -- time for kernel vector: " << t.getLast() << std::endl;
  1190. //
  1191. // for balanced setting, we get uncertainties for every binary task
  1192. std::vector<NICE::Vector>::const_iterator eigenMaxIt = eigenMax.begin();
  1193. predVariances.clear();
  1194. predVariances.resize( eigenMax.size() );
  1195. int cnt( 0 );
  1196. for (std::map<int, IKMLinearCombination * >::const_iterator ikmSumIt = ikmsums.begin(); ikmSumIt != ikmsums.end(); ikmSumIt++, eigenMaxIt++, cnt++ )
  1197. {
  1198. //now run the ILS method
  1199. NICE::Vector diagonalElements;
  1200. ikmSumIt->second->getDiagonalElements ( diagonalElements );
  1201. // t.start();
  1202. // init simple jacobi pre-conditioning
  1203. ILSConjugateGradients *linsolver_cg = dynamic_cast<ILSConjugateGradients *> ( linsolver );
  1204. //perform pre-conditioning
  1205. if ( linsolver_cg != NULL )
  1206. linsolver_cg->setJacobiPreconditioner ( diagonalElements );
  1207. Vector beta;
  1208. /** About finding a good initial solution (see also GPLikelihoodApproximation)
  1209. * K~ = K + sigma^2 I
  1210. *
  1211. * K~ \approx lambda_max v v^T
  1212. * \lambda_max v v^T * alpha = k_* | multiply with v^T from left
  1213. * => \lambda_max v^T alpha = v^T k_*
  1214. * => alpha = k_* / lambda_max could be a good initial start
  1215. * If we put everything in the first equation this gives us
  1216. * v = k_*
  1217. * This reduces the number of iterations by 5 or 8
  1218. */
  1219. beta = (kStar * (1.0 / (*eigenMaxIt)[0]) );
  1220. /* t.stop();
  1221. std::cerr << "ApproxExact -- time for preconditioning etc: " << t.getLast() << std::endl;
  1222. t.start();*/
  1223. // t.start();
  1224. linsolver->solveLin ( * ( ikmSumIt->second ), kStar, beta );
  1225. // t.stop();
  1226. // t.stop();
  1227. // t.stop();
  1228. // std::cerr << "ApproxExact -- time for lin solve: " << t.getLast() << std::endl;
  1229. beta *= kStar;
  1230. double currentSecondTerm( beta.Sum() );
  1231. predVariances[cnt] = kSelf - currentSecondTerm;
  1232. }
  1233. }
  1234. // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
  1235. void FMKGPHyperparameterOptimization::restore ( std::istream & is, int format )
  1236. {
  1237. if ( is.good() )
  1238. {
  1239. //load the underlying data
  1240. if (fmk != NULL)
  1241. delete fmk;
  1242. fmk = new FastMinKernel;
  1243. fmk->restore(is,format);
  1244. //now set up the GHIK-things in ikmsums
  1245. for ( std::map<int, IKMLinearCombination * >::iterator it = ikmsums.begin(); it != ikmsums.end(); it++ )
  1246. {
  1247. it->second->addModel ( new GMHIKernel ( fmk, this->pf, this->q ) );
  1248. }
  1249. is.precision ( numeric_limits<double>::digits10 + 1 );
  1250. string tmp;
  1251. is >> tmp; //class name
  1252. is >> tmp;
  1253. is >> learnBalanced;
  1254. is >> tmp; //precomputedA:
  1255. is >> tmp; //size:
  1256. int preCompSize ( 0 );
  1257. is >> preCompSize;
  1258. precomputedA.clear();
  1259. std::cerr << "precomputedA.size(): "<< preCompSize << std::endl;
  1260. for ( int i = 0; i < preCompSize; i++ )
  1261. {
  1262. int nr;
  1263. is >> nr;
  1264. PrecomputedType pct;
  1265. pct.setIoUntilEndOfFile ( false );
  1266. pct.restore ( is, format );
  1267. precomputedA.insert ( std::pair<int, PrecomputedType> ( nr, pct ) );
  1268. }
  1269. is >> tmp; //precomputedB:
  1270. is >> tmp; //size:
  1271. is >> preCompSize;
  1272. precomputedB.clear();
  1273. for ( int i = 0; i < preCompSize; i++ )
  1274. {
  1275. int nr;
  1276. is >> nr;
  1277. PrecomputedType pct;
  1278. pct.setIoUntilEndOfFile ( false );
  1279. pct.restore ( is, format );
  1280. precomputedB.insert ( std::pair<int, PrecomputedType> ( nr, pct ) );
  1281. }
  1282. is >> tmp;
  1283. int precomputedTSize;
  1284. is >> precomputedTSize;
  1285. precomputedT.clear();
  1286. if ( precomputedTSize > 0 )
  1287. {
  1288. is >> tmp;
  1289. int sizeOfLUT;
  1290. is >> sizeOfLUT;
  1291. for (int i = 0; i < precomputedTSize; i++)
  1292. {
  1293. is >> tmp;
  1294. int index;
  1295. is >> index;
  1296. double * array = new double [ sizeOfLUT];
  1297. for ( int i = 0; i < sizeOfLUT; i++ )
  1298. {
  1299. is >> array[i];
  1300. }
  1301. precomputedT.insert ( std::pair<int, double*> ( index, array ) );
  1302. }
  1303. }
  1304. //now restore the things we need for the variance computation
  1305. is >> tmp;
  1306. int sizeOfAForVarEst;
  1307. is >> sizeOfAForVarEst;
  1308. if ( sizeOfAForVarEst > 0 )
  1309. if (precomputedAForVarEst.size() > 0)
  1310. {
  1311. precomputedAForVarEst.setIoUntilEndOfFile ( false );
  1312. precomputedAForVarEst.restore ( is, format );
  1313. }
  1314. is >> tmp; //precomputedTForVarEst
  1315. is >> tmp; // NOTNULL or NULL
  1316. if (tmp.compare("NOTNULL") == 0)
  1317. {
  1318. int sizeOfLUT;
  1319. is >> sizeOfLUT;
  1320. precomputedTForVarEst = new double [ sizeOfLUT ];
  1321. for ( int i = 0; i < sizeOfLUT; i++ )
  1322. {
  1323. is >> precomputedTForVarEst[i];
  1324. }
  1325. }
  1326. else
  1327. {
  1328. if (precomputedTForVarEst != NULL)
  1329. delete precomputedTForVarEst;
  1330. }
  1331. //restore eigenvalues and eigenvectors
  1332. is >> tmp; //eigenMax.size():
  1333. int eigenMaxSize;
  1334. is >> eigenMaxSize;
  1335. for (int i = 0; i < eigenMaxSize; i++)
  1336. {
  1337. NICE::Vector eigenMaxEntry;
  1338. is >> eigenMaxEntry;
  1339. eigenMax.push_back( eigenMaxEntry );
  1340. }
  1341. is >> tmp; //eigenMaxVector.size():
  1342. int eigenMaxVectorsSize;
  1343. is >> eigenMaxVectorsSize;
  1344. for (int i = 0; i < eigenMaxVectorsSize; i++)
  1345. {
  1346. NICE::Matrix eigenMaxVectorsEntry;
  1347. is >> eigenMaxVectorsEntry;
  1348. eigenMaxVectors.push_back( eigenMaxVectorsEntry );
  1349. }
  1350. is >> tmp; //ikmsums:
  1351. is >> tmp; //size:
  1352. int ikmSumsSize ( 0 );
  1353. is >> ikmSumsSize;
  1354. ikmsums.clear();
  1355. for ( int i = 0; i < ikmSumsSize; i++ )
  1356. {
  1357. int clNr ( 0 );
  1358. is >> clNr;
  1359. IKMLinearCombination *ikmsum = new IKMLinearCombination ();
  1360. int nrOfModels ( 0 );
  1361. is >> tmp;
  1362. is >> nrOfModels;
  1363. //the first one is always our noise-model
  1364. IKMNoise * ikmnoise = new IKMNoise ();
  1365. ikmnoise->restore ( is, format );
  1366. ikmsum->addModel ( ikmnoise );
  1367. //NOTE are there any more models you added? then add them here respectively in the correct order
  1368. ikmsums.insert ( std::pair<int, IKMLinearCombination*> ( clNr, ikmsum ) );
  1369. //the last one is the GHIK - which we do not have to restore, but simple reset it lateron
  1370. }
  1371. //restore the class numbers for binary settings (if mc-settings, these values will be negative by default)
  1372. is >> tmp; // "binaryLabelPositive: "
  1373. is >> binaryLabelPositive;
  1374. is >> tmp; // " binaryLabelNegative: "
  1375. is >> binaryLabelNegative;
  1376. }
  1377. else
  1378. {
  1379. std::cerr << "InStream not initialized - restoring not possible!" << std::endl;
  1380. }
  1381. }
  1382. void FMKGPHyperparameterOptimization::store ( std::ostream & os, int format ) const
  1383. {
  1384. if ( os.good() )
  1385. {
  1386. fmk->store ( os, format );
  1387. os.precision ( numeric_limits<double>::digits10 + 1 );
  1388. os << "FMKGPHyperparameterOptimization" << std::endl;
  1389. os << "learnBalanced: " << learnBalanced << std::endl;
  1390. //we only have to store the things we computed, since the remaining settings come with the config file afterwards
  1391. os << "precomputedA: size: " << precomputedA.size() << std::endl;
  1392. std::map< int, PrecomputedType >::const_iterator preCompIt = precomputedA.begin();
  1393. for ( uint i = 0; i < precomputedA.size(); i++ )
  1394. {
  1395. os << preCompIt->first << std::endl;
  1396. ( preCompIt->second ).store ( os, format );
  1397. preCompIt++;
  1398. }
  1399. os << "precomputedB: size: " << precomputedB.size() << std::endl;
  1400. preCompIt = precomputedB.begin();
  1401. for ( uint i = 0; i < precomputedB.size(); i++ )
  1402. {
  1403. os << preCompIt->first << std::endl;
  1404. ( preCompIt->second ).store ( os, format );
  1405. preCompIt++;
  1406. }
  1407. os << "precomputedT.size(): " << precomputedT.size() << std::endl;
  1408. if ( precomputedT.size() > 0 )
  1409. {
  1410. int sizeOfLUT ( 0 );
  1411. if ( q != NULL )
  1412. sizeOfLUT = q->size() * this->fmk->get_d();
  1413. os << "SizeOfLUTs: " << sizeOfLUT << std::endl;
  1414. for ( std::map< int, double * >::const_iterator it = precomputedT.begin(); it != precomputedT.end(); it++ )
  1415. {
  1416. os << "index: " << it->first << std::endl;
  1417. for ( int i = 0; i < sizeOfLUT; i++ )
  1418. {
  1419. os << ( it->second ) [i] << " ";
  1420. }
  1421. os << std::endl;
  1422. }
  1423. }
  1424. //now store the things needed for the variance estimation
  1425. os << "precomputedAForVarEst.size(): "<< precomputedAForVarEst.size() << std::endl;
  1426. if (precomputedAForVarEst.size() > 0)
  1427. {
  1428. precomputedAForVarEst.store ( os, format );
  1429. os << std::endl;
  1430. }
  1431. if ( precomputedTForVarEst != NULL )
  1432. {
  1433. os << "precomputedTForVarEst NOTNULL" << std::endl;
  1434. int sizeOfLUT ( 0 );
  1435. if ( q != NULL )
  1436. sizeOfLUT = q->size() * this->fmk->get_d();
  1437. os << sizeOfLUT << std::endl;
  1438. for ( int i = 0; i < sizeOfLUT; i++ )
  1439. {
  1440. os << precomputedTForVarEst[i] << " ";
  1441. }
  1442. os << std::endl;
  1443. }
  1444. else
  1445. {
  1446. os << "precomputedTForVarEst NULL" << std::endl;
  1447. }
  1448. //store the eigenvalues and eigenvectors
  1449. os << "eigenMax.size(): " << std::endl;
  1450. os << eigenMax.size() << std::endl;
  1451. for (std::vector<NICE::Vector>::const_iterator it = this->eigenMax.begin(); it != this->eigenMax.end(); it++)
  1452. {
  1453. os << *it << std::endl;
  1454. }
  1455. os << "eigenMaxVectors.size(): " << std::endl;
  1456. os << eigenMaxVectors.size() << std::endl;
  1457. for (std::vector<NICE::Matrix>::const_iterator it = eigenMaxVectors.begin(); it != eigenMaxVectors.end(); it++)
  1458. {
  1459. os << *it << std::endl;
  1460. }
  1461. os << "ikmsums: size: " << ikmsums.size() << std::endl;
  1462. std::map<int, IKMLinearCombination * >::const_iterator ikmSumIt = ikmsums.begin();
  1463. for ( uint i = 0; i < ikmsums.size(); i++ )
  1464. {
  1465. os << ikmSumIt->first << std::endl;
  1466. os << "numberOfModels: " << ( ikmSumIt->second )->getNumberOfModels() << std::endl;
  1467. //the last one os always the GHIK, which we do not have to restore
  1468. for ( int j = 0; j < ( ikmSumIt->second )->getNumberOfModels() - 1; j++ )
  1469. {
  1470. ( ( ikmSumIt->second )->getModel ( j ) )->store ( os, format );
  1471. }
  1472. ikmSumIt++;
  1473. }
  1474. //store the class numbers for binary settings (if mc-settings, these values will be negative by default)
  1475. os << "binaryLabelPositive: " << binaryLabelPositive << " binaryLabelNegative: " << binaryLabelNegative << std::endl;
  1476. }
  1477. else
  1478. {
  1479. std::cerr << "OutStream not initialized - storing not possible!" << std::endl;
  1480. }
  1481. }
  1482. void FMKGPHyperparameterOptimization::clear ( ) {};
  1483. void FMKGPHyperparameterOptimization::addExample ( const NICE::SparseVector & x, const double & label, const bool & performOptimizationAfterIncrement )
  1484. {
  1485. this->labels.append ( label );
  1486. // add the new example to our data structure
  1487. // It is necessary to do this already here and not lateron for internal reasons (see GMHIKernel for more details)
  1488. Timer t;
  1489. t.start();
  1490. fmk->addExample ( x, pf );
  1491. t.stop();
  1492. if (verboseTime)
  1493. std::cerr << "Time used for adding the data to the fmk object: " << t.getLast() << std::endl;
  1494. // do the optimization again using the previously known solutions as initialization
  1495. // update the corresponding matrices A, B and lookup tables T
  1496. optimizeAfterSingleIncrement ( x, performOptimizationAfterIncrement );
  1497. }
  1498. void FMKGPHyperparameterOptimization::addMultipleExamples ( const std::vector<const NICE::SparseVector*> & newExamples, const NICE::Vector & _labels, const bool & performOptimizationAfterIncrement )
  1499. {
  1500. int oldSize ( this->labels.size() );
  1501. this->labels.resize ( this->labels.size() + _labels.size() );
  1502. for ( uint i = 0; i < _labels.size(); i++ )
  1503. {
  1504. this->labels[i+oldSize] = _labels[i];
  1505. }
  1506. // add the new example to our data structure
  1507. // It is necessary to do this already here and not lateron for internal reasons (see GMHIKernel for more details)
  1508. Timer t;
  1509. t.start();
  1510. for ( std::vector<const NICE::SparseVector*>::const_iterator exampleIt = newExamples.begin(); exampleIt != newExamples.end(); exampleIt++ )
  1511. {
  1512. fmk->addExample ( **exampleIt , pf );
  1513. }
  1514. t.stop();
  1515. if (verboseTime)
  1516. std::cerr << "Time used for adding the data to the fmk object: " << t.getLast() << std::endl;
  1517. Timer tVar;
  1518. tVar.start();
  1519. //do we need to update our matrices?
  1520. if ( precomputedAForVarEst.size() != 0)
  1521. {
  1522. //this compute everything from the scratch
  1523. this->prepareVarianceApproximation();
  1524. //this would perform a more sophisticated update
  1525. //unfortunately, there is a bug somewhere
  1526. //TODO fixme!
  1527. // std::cerr << "update the LUTs needed for variance computation" << std::endl;
  1528. // for ( std::vector<const NICE::SparseVector*>::const_iterator exampleIt = newExamples.begin(); exampleIt != newExamples.end(); exampleIt++ )
  1529. // {
  1530. // std::cerr << "new example: " << std::endl;
  1531. // (**exampleIt).store(std::cerr);
  1532. // std::cerr << "now update the LUT for var est" << std::endl;
  1533. // fmk->updatePreparationForKVNApproximation( **exampleIt, precomputedAForVarEst, pf );
  1534. // if ( q != NULL )
  1535. // {
  1536. // fmk->updateLookupTableForKVNApproximation( **exampleIt, precomputedTForVarEst, *q, pf );
  1537. // }
  1538. // }
  1539. // std::cerr << "update of LUTs for variance compuation done" << std::endl;
  1540. }
  1541. tVar.stop();
  1542. if (verboseTime)
  1543. std::cerr << "Time used for computing the Variance Matrix and LUT: " << tVar.getLast() << std::endl;
  1544. // do the optimization again using the previously known solutions as initialization
  1545. // update the corresponding matrices A, B and lookup tables T
  1546. optimizeAfterMultipleIncrements ( newExamples, performOptimizationAfterIncrement );
  1547. }