123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696 |
- /**
- * @file IL_AL.cpp
- * @brief Incrementally train the GP HIK classifier using the predictive variance and its approximations to select new samples
- * @author Alexander Freytag
- * @date 09-05-2012
- */
- #include <vector>
- #include <stdlib.h>
- #include <time.h>
- #include <set>
- #include <core/basics/Config.h>
- #include <core/basics/StringTools.h>
- #include <core/vector/SparseVectorT.h>
- #include <core/vector/VectorT.h>
- //----------
- #include "vislearning/baselib/ProgressBar.h"
- #include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
- #include <vislearning/classifier/fpclassifier/gphik/FPCGPHIK.h>
- #include "vislearning/cbaselib/MultiDataset.h"
- #include <vislearning/cbaselib/LabeledSet.h>
- #include "vislearning/cbaselib/ClassificationResults.h"
- #include <vislearning/baselib/Globals.h>
- #include <vislearning/math/kernels/KernelData.h>
- //----------
- #include "gp-hik-exp/progs/datatools.h"
- //----------
- // #include <incrementallearning/IL_Framework_Generic.h>
- //
- using namespace std;
- using namespace NICE;
- using namespace OBJREC;
- enum verbose_level {NONE = 0, LOW = 1, MEDIUM = 2, EVERYTHING = 3};
- enum QueryStrategy{
- RANDOM = 0,
- GPMEAN,
- GPPREDVAR,
- GPHEURISTIC
- };
-
- std::string convertInt(int number)
- {
- stringstream ss;//create a stringstream
- ss << number;//add number to the stream
- return ss.str();//return a string with the contents of the stream
- }
- /**
- Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
- */
- int main ( int argc, char **argv )
- {
- std::cout.precision ( 10 );
- std::cerr.precision ( 10 );
- NICE::Config conf ( argc, argv );
- int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
- int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
- int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
- int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );
- bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
-
- double squaredNoise = pow( conf.gD("FPCGPHIK", "noise", 0.01) , 2);
- string queryStrategyString = conf.gS( "main", "queryStrategy", "random");
- QueryStrategy queryStrategy;
- if (queryStrategyString.compare("gpMean") == 0)
- {
- queryStrategy = GPMEAN;
- }
- else if (queryStrategyString.compare("gpPredVar") == 0)
- {
- queryStrategy = GPPREDVAR;
- }
- else if (queryStrategyString.compare("gpHeuristic") == 0)
- {
- queryStrategy = GPHEURISTIC;
- }
- else
- {
- queryStrategy = RANDOM;
- }
-
-
- int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
- verbose_level verbose ( NONE );
- switch ( verbose_int )
- {
- case 0:
- verbose = NONE;
- break;
- case 1:
- verbose = LOW;
- break;
- case 2:
- verbose = MEDIUM;
- break;
- case 3:
- verbose = EVERYTHING;
- break;
- }
- /* initialize random seed: */
- srand ( time ( NULL ) ); //with 0 for reproductive results
- // srand ( 0 ); //with 0 for reproductive results
- // =========================== INIT ===========================
-
- std::vector<std::vector<double> > recognitions_rates(nrOfIncrements+1);
- std::vector<std::vector<float> > classification_times(nrOfIncrements+1);
- std::vector<std::vector<float> > IL_training_times(nrOfIncrements);
-
- int nrOfClassesUsed;
-
- for ( int run = 0; run < num_runs; run++ )
- {
- std::cerr << "run: " << run << std::endl;
-
- //15-scenes settings
- std::string ext = conf.gS("main", "ext", ".txt");
- std::cerr << "Using cache extension: " << ext << std::endl;
- OBJREC::MultiDataset md ( &conf );
-
- std::cerr << "now read the dataset" << std::endl;
-
- // read training set
- vector< NICE::Vector > trainDataOrig;
- Vector y;
- string trainRun ( "train" + convertInt( run ) );
- std::cerr << "look for " << trainRun << std::endl;
- const LabeledSet *train = md[ trainRun ]; //previously, we only selected "train", no we select the permutation for this run
-
- LabeledSet::Permutation orderTrain;
- train->getPermutation(orderTrain);
- std::vector<string> filenamesTraining;
- for ( LabeledSet::Permutation::const_iterator i = orderTrain.begin(); i != orderTrain.end(); i++)
- {
- string filename((i->second)->img());
- filenamesTraining.push_back(filename);
- }
- readData< std::vector< NICE::Vector >, NICE::Vector > ( conf, *train, trainDataOrig, y, ext );
-
- std::set<int> classesAvailable;
- for ( uint i = 0; i < y.size(); i++)
- {
- //automatically check for duplicates
- classesAvailable.insert( y[i] );
- }
-
- int numberOfClasses = classesAvailable.size();
-
- std::map<int,int> nrExamplesPerClassInDataset;
- std::map<int,std::vector<int> > examplesPerClassInDataset;
-
- for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
- {
- nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
- examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
- }
- for ( uint i = 0; i < y.size(); i++ )
- {
- (examplesPerClassInDataset.find( y[i] )->second).push_back(i);
- }
-
- for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
- {
- nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
- }
-
- for ( std::map<int,int>::const_iterator it = nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
- {
- cerr << it->first << ": " << it->second << endl;
- }
-
- Examples examples;
-
- //count how many examples of every class we have while actively selecting new examples
- //NOTE works only if we have subsequent class numbers
- NICE::Vector pickedExamplesPerClass( classesAvailable.size(), trainExPerClass);
-
- std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
-
- //chose examples for every class used for training
- //we will always use the first examples from each class, since the dataset comes already randomly ordered
- for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++)
- {
- std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
- std::cerr << "pick training examples for class " << *clIt << std::endl;
-
- for (int i = 0; i < trainExPerClass; i++)
- {
- std::cerr << "i: " << i << std::endl;
- int exampleIndex ( 0 ); //old: rand() % ( exIt->second.size() ) );
- std::cerr << "pick example " << exIt->second[exampleIndex] << " - " << y[exIt->second[exampleIndex] ] << " -- " << filenamesTraining[exIt->second[exampleIndex]] << std::endl;
-
- Example example;
- NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
- example.svec = new SparseVector(xTrain);
- //let's take this example and its corresponding label (which should be *clIt)
- examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
- //
- exIt->second.erase(exIt->second.begin()+exampleIndex);
- }
- }
-
-
- std::vector<string> filenamesUnlabeled;
- filenamesUnlabeled.clear();
- //which examples are left to be actively chosen lateron?
- std::vector<int> unlabeledExamples( y.size() - trainExPerClass*classesAvailable.size() );
- int exCnt( 0 );
- for (std::set<int>::const_iterator clIt = classesAvailable.begin(); clIt != classesAvailable.end(); clIt++ )
- {
- std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
- //list all examples of this specific class
- for (std::vector<int>::const_iterator it = exIt->second.begin(); it != exIt->second.end(); it++)
- {
- unlabeledExamples[exCnt] = *it;
- exCnt++;
- filenamesUnlabeled.push_back( filenamesTraining[*it] );
- }
- }
-
- time_t prep_start_time = clock();
- FPCGPHIK * classifier = new FPCGPHIK( &conf );
-
- FeaturePool fp; // will be ignored
- classifier->train ( fp, examples );
- float time_preparation = ( float ) ( clock() - prep_start_time ) ;
- std::cerr << "Time for initial training: " << time_preparation / CLOCKS_PER_SEC << std::endl;
-
- nrOfClassesUsed = classesAvailable.size();
-
- // ------------------ TESTING
- string testRun ( "test" + convertInt( run ) );
- const LabeledSet *test = md[ testRun ]; //previously, we only selected "test", no we select the permutation for this run
- VVector testData;
- Vector yTest;
- readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
-
- NICE::Matrix confusionMatrix ( numberOfClasses, numberOfClasses );
- confusionMatrix.set ( 0.0 );
- time_t start_time = clock();
- std::vector<int> chosen_examples_per_class ( numberOfClasses );
-
- std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
- if ( do_classification )
- {
- for ( uint i = 0 ; i < testData.size(); i++ )
- {
- Example example;
- const Vector & xstar = testData[i];
- SparseVector xstar_sparse ( xstar );
- OBJREC::ClassificationResult result;
- example.svec = &xstar_sparse;
-
- result = classifier->classify( example );
- // cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
-
- result.classno_groundtruth = yTest[i];
- confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
- }
- float time_classification = ( float ) ( clock() - start_time ) ;
- if ( verbose >= LOW )
- cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
- ( classification_times[0] ).push_back ( time_classification / CLOCKS_PER_SEC );
- confusionMatrix.normalizeRowsL1();
- double avg_recognition_rate = 0.0;
- for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
- {
- if ( verbose >= MEDIUM )
- {
- std::cerr << "Class no: " << i << " : " << confusionMatrix ( i, i ) << std::endl;
- }
- avg_recognition_rate += confusionMatrix ( i, i );
- }
- avg_recognition_rate /= confusionMatrix.rows();
- std::cerr << confusionMatrix;
- std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " % -- " << examples.size() << " training examples used" << std::endl << std::endl;
- recognitions_rates[0].push_back ( avg_recognition_rate*100 );
- }
- //Now start the Incremental-Learning-Part
-
- for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
- {
- //chose examples for every class used for training
- Examples newExamples;
-
-
- //simply count how many possible example we have
- int nrOfPossibleExamples( unlabeledExamples.size() );
-
- if (queryStrategy == RANDOM)
- {
- std::cerr << "print chosen examples: " << std::endl;
- for (int i = 0; i < incrementalAddSize; i++)
- {
- int exampleIndex ( rand() % ( unlabeledExamples.size() ) );
-
- Example newExample;
- NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exampleIndex] ];
- newExample.svec = new SparseVector( xTrain );
- int label( y[ unlabeledExamples[exampleIndex] ] );
- newExamples.push_back ( pair<int, Example> ( label, newExample ) );
- unlabeledExamples.erase( unlabeledExamples.begin()+exampleIndex );
- std::cerr << exampleIndex+1 << " / " << incrementalAddSize << " : " << filenamesUnlabeled[ exampleIndex ] << std::endl;
- filenamesUnlabeled.erase( filenamesUnlabeled.begin()+exampleIndex );
- pickedExamplesPerClass[label]++;
- }
- }// end computation for RANDOM
- else if ( (queryStrategy == GPMEAN) || (queryStrategy == GPPREDVAR) || (queryStrategy == GPHEURISTIC) )
- {
- //compute uncertainty values for all examples according to the query strategy
- std::vector<std::pair<int,double> > scores;
- scores.clear();
- time_t unc_pred_start_time = clock();
- // std::cerr << "possible examples to query: " << unlabeledExamples.size() << std::endl;
- for (uint exIndex = 0; exIndex < unlabeledExamples.size(); exIndex++)
- {
- Example example;
- NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[exIndex] ];
- SparseVector xTrainSparse ( xTrain );
- example.svec = &xTrainSparse;
-
- if (queryStrategy == GPMEAN)
- {
- ClassificationResult r = classifier->classify( example );
- double bestScore( numeric_limits<double>::max() );
- for( int clCnt = 0; clCnt < nrOfClassesUsed; clCnt++)
- {
- if ( fabs(r.scores[clCnt]) < bestScore )
- bestScore = fabs(r.scores[clCnt]);
- }
- scores.push_back( std::pair<int,double> ( exIndex, bestScore ) );
- }
- else if (queryStrategy == GPPREDVAR)
- {
- double singleUncertainty;
- //use the pred variance computation specified in the config file
- classifier->predictUncertainty( example, singleUncertainty );
- //take the maximum of the scores for the predictive variance
- scores.push_back( std::pair<int,double> ( exIndex, singleUncertainty ) );
- }
- else if (queryStrategy == GPHEURISTIC)
- {
- double singleUncertainty;
- //use the pred variance computation specified in the config file
- classifier->predictUncertainty( example, singleUncertainty );
- //compute the mean values for every class
- ClassificationResult r = classifier->classify( example );
- NICE::Vector heuristicValues ( r.scores.size(), 0);
- for ( int tmp = 0; tmp < heuristicValues.size(); tmp++ )
- {
- heuristicValues[tmp] = fabs(r.scores[tmp]) / sqrt( squaredNoise + singleUncertainty );
- }
- //take the minimum of the scores for the heuristic measure
- scores.push_back( std::pair<int,double> ( exIndex, heuristicValues.Min()) );
- }
- }
- float time_score_computation = ( float ) ( clock() - unc_pred_start_time ) ;
-
- //pick the ones with best score
- //we could speed this up using a more sophisticated search method
-
- if (queryStrategy == GPPREDVAR) //take the maximum of the scores for the predictive variance
- {
- std::set<int> chosenExamplesForThisRun;
- chosenExamplesForThisRun.clear();
- for (int i = 0; i < incrementalAddSize; i++)
- {
- std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
-
- for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
- {
- if (jIt->second > bestExample->second)
- bestExample = jIt;
- }
- Example newExample;
- NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
- newExample.svec = new SparseVector( xTrain );
- //actually this is the ACTIVE LEARNING step (query a label)
- int label( y[ unlabeledExamples[bestExample->first] ] );
- newExamples.push_back ( pair<int, Example> ( label, newExample ) );
- //remember the index, to safely remove this example afterwards from unlabeledExamples
- chosenExamplesForThisRun.insert(bestExample->first);
- scores.erase(bestExample);
- pickedExamplesPerClass[label]++;
- }
-
- std::cerr << "print chosen examples: " << std::endl;
- int tmpCnt(0);
- for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
- {
- std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " << filenamesUnlabeled[ *it ] << std::endl;
- }
-
- //delete the queried examples from the set of unlabeled ones
- //do this in an decreasing order in terms of indices to ensure valid access
- for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
- {
- unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );
- }
- }
- else //take the minimum of the scores for the heuristic and the gp mean (minimum margin)
- {
- std::set<int> chosenExamplesForThisRun;
- chosenExamplesForThisRun.clear();
- for (int i = 0; i < incrementalAddSize; i++)
- {
- std::vector<std::pair<int,double> >::iterator bestExample = scores.begin();
-
- for (std::vector<std::pair<int,double> >::iterator jIt = scores.begin(); jIt !=scores.end(); jIt++)
- {
- if (jIt->second < bestExample->second)
- bestExample = jIt;
- }
- Example newExample;
- NICE::Vector & xTrain = trainDataOrig[ unlabeledExamples[bestExample->first] ];
- newExample.svec = new SparseVector( xTrain );
- //actually this is the ACTIVE LEARNING step (query a label)
- int label( y[ unlabeledExamples[bestExample->first] ] );
- newExamples.push_back ( pair<int, Example> ( label, newExample ) );
- //remember the index, to safely remove this example afterwards from unlabeledExamples
- chosenExamplesForThisRun.insert(bestExample->first);
- scores.erase(bestExample);
- pickedExamplesPerClass[label]++;
- }
-
- std::cerr << "print chosen examples: " << std::endl;
- int tmpCnt(0);
- for (std::set<int>::const_iterator it = chosenExamplesForThisRun.begin(); it != chosenExamplesForThisRun.end(); it++, tmpCnt++)
- {
- std::cerr << tmpCnt+1 << " / " << incrementalAddSize << " : " << filenamesUnlabeled[ *it ] << std::endl;
- }
-
- //delete the queried example from the set of unlabeled ones
- //do this in an decreasing order in terms of indices to ensure valid access
- for (std::set<int>::const_reverse_iterator it = chosenExamplesForThisRun.rbegin(); it != chosenExamplesForThisRun.rend(); it++)
- {
- unlabeledExamples.erase( unlabeledExamples.begin()+(*it) );
- filenamesUnlabeled.erase( filenamesUnlabeled.begin()+(*it) );
- }
- }
-
- std::cerr << "Time used to compute query-scores for " << nrOfPossibleExamples << " examples: " << time_score_computation / CLOCKS_PER_SEC << " [s]" << std::endl;
- } // end computation for GPMEAN, GPPREDVAR, or GPHEURISTIC
-
-
- std::cerr << "Current statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
-
- time_t IL_add_start_time = clock();
- classifier->addMultipleExamples( newExamples );
-
- //remove the memory used in newExamples
- for ( uint tmp = 0; tmp < newExamples.size(); tmp++ )
- {
- delete newExamples[tmp].second.svec;
- newExamples[tmp].second.svec = NULL;
- }
-
- float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
- std::cerr << "Time for IL-adding of " << incrementalAddSize << " examples to already " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*incrementationStep << " training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
- IL_training_times[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);
-
- //do the classification for evaluating the benefit of new examples
- if ( do_classification )
- {
- confusionMatrix.set( 0.0 );
- for ( uint i = 0 ; i < testData.size(); i++ )
- {
- Example example;
- const Vector & xstar = testData[i];
- SparseVector xstar_sparse ( xstar );
- example.svec = &xstar_sparse;
- OBJREC::ClassificationResult result;
-
- result = classifier->classify( example );
-
- result.classno_groundtruth = yTest[i];
- confusionMatrix ( result.classno_groundtruth , result.classno ) ++;
- }
- float time_classification = ( float ) ( clock() - start_time ) ;
- if ( verbose >= LOW )
- std::cerr << "Time for Classification with " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
- ( classification_times[incrementationStep+1] ).push_back ( time_classification / CLOCKS_PER_SEC );
- confusionMatrix.normalizeRowsL1();
- double avg_recognition_rate = 0.0;
- for ( int i = 0 ; i < ( int ) confusionMatrix.rows(); i++ )
- {
- if ( verbose >= MEDIUM )
- {
- std::cerr << "Class no: " << i << " : " << confusionMatrix ( i, i ) << std::endl;
- }
- avg_recognition_rate += confusionMatrix ( i, i );
- }
- avg_recognition_rate /= confusionMatrix.rows();
- std::cerr << confusionMatrix;
- std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " % -- " << nrOfClassesUsed*trainExPerClass+incrementalAddSize*(incrementationStep+1) << " training examples used" << std::endl << std::endl;
- recognitions_rates[incrementationStep+1].push_back ( avg_recognition_rate*100 );
- } //classification after IL adding
- } //IL adding of different classes
- std::cerr << "Final statistic about picked examples per class: " << pickedExamplesPerClass << std::endl;
-
- //don't waste memory!
- delete classifier;
- for ( int tmp = 0; tmp < examples.size(); tmp++ )
- {
- delete examples[tmp].second.svec;
- examples[tmp].second.svec = NULL;
- }
- }//runs
- std::cerr << "no of classes used: " << nrOfClassesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
-
- // ================= EVALUATION ========================0
- if ( do_classification )
- {
- std::cerr << "========================" << std::endl;
- std::cerr << "content of classification_times: " << std::endl;
- for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
- {
- for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::vector<float> mean_classification_times;
- std::vector<float> std_dev_classification_times;
- for ( std::vector<std::vector<float> >::const_iterator it = classification_times.begin(); it != classification_times.end(); it++ )
- {
- float mean_classification_time ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- mean_classification_time += *itRun;
- }
- mean_classification_time /= it->size();
- mean_classification_times.push_back ( mean_classification_time );
- double std_dev_classification_time ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- std_dev_classification_time += pow ( *itRun - mean_classification_time, 2 );
- }
- std_dev_classification_time /= it->size();
- std_dev_classification_time = sqrt ( std_dev_classification_time );
- std_dev_classification_times.push_back ( std_dev_classification_time );
- }
-
- int datasize ( nrOfClassesUsed*trainExPerClass );
- for ( uint i = 0; i < mean_classification_times.size(); i++)
- {
- std::cerr << "size: " << datasize << " mean classification time: " << mean_classification_times[i] << " std_dev classification time: " << std_dev_classification_times[i] << std::endl;
- datasize += incrementalAddSize ;
- }
- }
- else
- {
- std::cerr << "========================" << std::endl;
- std::cerr << "No classification done therefor no classification times available." << std::endl;
- }
- std::cerr << "========================" << std::endl;
- std::cerr << "content of IL_training_times: " << std::endl;
- for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
- {
- for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::vector<float> mean_IL_training_times;
- std::vector<float> std_dev_IL_training_times;
- for ( std::vector<std::vector<float> >::const_iterator it = IL_training_times.begin(); it != IL_training_times.end(); it++ )
- {
- float mean_IL_training_time ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- mean_IL_training_time += *itRun;
- }
- mean_IL_training_time /= it->size();
- mean_IL_training_times.push_back ( mean_IL_training_time );
- double std_dev_IL_training_time ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- std_dev_IL_training_time += pow ( *itRun - mean_IL_training_time, 2 );
- }
- std_dev_IL_training_time /= it->size();
- std_dev_IL_training_time = sqrt ( std_dev_IL_training_time );
- std_dev_IL_training_times.push_back ( std_dev_IL_training_time );
- }
- int datasize ( nrOfClassesUsed*trainExPerClass );
- for ( uint i = 0; i < mean_IL_training_times.size(); i++)
- {
- cerr << "size: " << datasize << " and adding " << incrementalAddSize << " mean IL_training time: " << mean_IL_training_times[i] << " std_dev IL_training time: " << std_dev_IL_training_times[i] << endl;
- datasize += incrementalAddSize ;
- }
- if ( do_classification )
- {
- std::cerr << "========================" << std::endl;
- std::cerr << "content of recognition_rates: " << std::endl;
- for ( std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
- {
- for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::cerr << "calculating final results " << std::endl;
- std::vector<double> mean_recs;
- std::vector<double> std_dev_recs;
- for (std::vector<std::vector<double> >::const_iterator it = recognitions_rates.begin(); it != recognitions_rates.end(); it++ )
- {
- double mean_rec ( 0.0 );
- for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- mean_rec += *itRun;
- }
- mean_rec /= it->size();
- mean_recs.push_back ( mean_rec );
- double std_dev_rec ( 0.0 );
- for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- std_dev_rec += pow ( *itRun - mean_rec, 2 );
- }
- std_dev_rec /= it->size();
- std_dev_rec = sqrt ( std_dev_rec );
- std_dev_recs.push_back ( std_dev_rec );
- }
- int datasize ( nrOfClassesUsed*trainExPerClass );
- for ( uint i = 0; i < recognitions_rates.size(); i++)
- {
- std::cerr << "size: " << datasize << " mean_IL: " << mean_recs[i] << " std_dev_IL: " << std_dev_recs[i] << std::endl;
- datasize += incrementalAddSize ;
- }
- }
- else
- {
- std::cerr << "========================" << std::endl;
- std::cerr << "No classification done therefor no classification times available." << std::endl;
- }
- return 0;
- }
|