123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571 |
- /**
- * @file IL_NewExamples_Comparison.cpp
- * @brief Large GP-IL-Testsetup
- * @author Alexander Freytag
- * @date 09-05-2012
- */
- #include <vector>
- #include <stdlib.h>
- #include <time.h>
- #include <set>
- #include <iostream>
- #include <math.h>
- #include <core/basics/Config.h>
- #include <core/basics/StringTools.h>
- #include <core/vector/SparseVectorT.h>
- #include <core/vector/VectorT.h>
- //----------
- #include "vislearning/baselib/ProgressBar.h"
- #include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
- #include "vislearning/cbaselib/MultiDataset.h"
- #include <vislearning/cbaselib/LabeledSet.h>
- #include "vislearning/cbaselib/ClassificationResults.h"
- #include <vislearning/baselib/Globals.h>
- #include <vislearning/math/kernels/KernelData.h>
- //----------
- #include "gp-hik-exp/progs/datatools.h"
- #include "gp-hik-exp/GPHIKClassifierNICE.h"
- //----------
- // #include <incrementallearning/IL_Framework_Generic.h>
- //
- using namespace std;
- using namespace NICE;
- using namespace OBJREC;
- enum verbose_level {NONE = 0, LOW = 1, MEDIUM = 2, EVERYTHING = 3};
- /**
- Computes from randomly or deterministically choosen trainimages kernelmatrizes and evaluates their performance, using ROI-optimization
- */
- int main ( int argc, char **argv )
- {
- std::cout.precision ( 5 );
- std::cerr.precision ( 5 );
- NICE::Config conf ( argc, argv );
- int trainExPerClass = conf.gI ( "GP_IL", "trainExPerClass", 10 );
- int incrementalAddSize = conf.gI("GP_IL", "incrementalAddSize", 1);
- int nrOfIncrements = conf.gI("GP_IL", "nrOfIncrements", 9);
- int num_runs = conf.gI ( "GP_IL", "num_runs", 10 );
- bool do_classification = conf.gB ( "GP_IL", "do_classification", true );
-
- string featureLocation = conf.gS( "GP_IL", "featureLocation", "toyExampleLargeLargeScale.data");
-
- int verbose_int = conf.gI ( "GP_IL", "verbose", 0 );
- verbose_level verbose ( NONE );
- switch ( verbose_int )
- {
- case 0:
- verbose = NONE;
- break;
- case 1:
- verbose = LOW;
- break;
- case 2:
- verbose = MEDIUM;
- break;
- case 3:
- verbose = EVERYTHING;
- break;
- }
-
- /* initialize random seed: */
- srand ( time ( NULL ) ); //with 0 for reproductive results
- // srand ( 0 ); //with 0 for reproductive results
- // =========================== INIT ===========================
-
- //these classes are the basic knowledge we have at the beginning
- set<int> classesForTraining;
- classesForTraining.insert(0);
- classesForTraining.insert(1);
- classesForTraining.insert(2);
- classesForTraining.insert(3);
- classesForTraining.insert(4);
- classesForTraining.insert(5);
- classesForTraining.insert(6);
- classesForTraining.insert(7);
- classesForTraining.insert(8);
- classesForTraining.insert(9);
- classesForTraining.insert(10);
- classesForTraining.insert(11);
- classesForTraining.insert(12);
- classesForTraining.insert(13);
- classesForTraining.insert(14);
-
- // //these classes will be added iteratively to our training set
- // std::set<int> classesForIncrementalTraining;
-
- std::vector<std::vector<double> > recognitionsRatesBatch(nrOfIncrements+1);
- std::vector<std::vector<double> > recognitionsRatesIL(nrOfIncrements+1);
-
- std::vector<std::vector<float> > trainingTimesBatch(nrOfIncrements+1);
- std::vector<std::vector<float> > trainingTimesIL(nrOfIncrements+1);
-
- for ( int run = 0; run < num_runs; run++ )
- {
- std::cerr << "run: " << run << std::endl;
-
- //15-scenes settings
- std::string ext = conf.gS("main", "ext", ".txt");
- std::cerr << "Using cache extension: " << ext << std::endl;
- OBJREC::MultiDataset md ( &conf );
- const ClassNames & classNamesTrain = md.getClassNames("train");
-
- // read training set
- vector< NICE::Vector > trainDataOrig;
- Vector y;
- const LabeledSet *train = md["train"];
- readData< std::vector< NICE::Vector >, NICE::Vector > ( conf, *train, trainDataOrig, y, ext );
- std::vector<double> labelsStd;
- int datasize_all ( trainDataOrig.size() );
-
- std::set<int> classesAvailable;
- for ( uint i = 0; i < y.size(); i++)
- {
- //automatically check for duplicates
- classesAvailable.insert(y[i]);
- }
-
- int numberOfClasses = classesAvailable.size();
-
- std::map<int,int> nrExamplesPerClassInDataset;
- std::map<int,std::vector<int> > examplesPerClassInDataset;
-
- for (std::set<int>::const_iterator it = classesAvailable.begin(); it != classesAvailable.end(); it++)
- {
- nrExamplesPerClassInDataset.insert(std::pair<int,int>(*it,0));
- examplesPerClassInDataset.insert(std::pair<int,std::vector<int> >(*it,std::vector<int>(0)));
- }
-
- for ( uint i = 0; i < y.size(); i++ )
- {
- (examplesPerClassInDataset.find(y[i])->second).push_back(i);
- }
-
- for (std::map<int,std::vector<int> >::const_iterator it = examplesPerClassInDataset.begin(); it != examplesPerClassInDataset.end(); it++)
- {
- nrExamplesPerClassInDataset.find(it->first)->second = it->second.size();
- }
-
- for ( std::map<int,int>::const_iterator it = nrExamplesPerClassInDataset.begin(); it != nrExamplesPerClassInDataset.end(); it++)
- {
- cerr << it->first << ": " << it->second << endl;
- }
-
- Examples examples;
-
-
- std::map<int,std::vector<int> > examplesPerClassInDatasetTmp (examplesPerClassInDataset);
- //chose examples for every class used for training
- for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
- {
- std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
- // std::cerr << "pick training examples for class " << *clIt << std::endl;
-
- for (int i = 0; i < trainExPerClass; i++)
- {
- // std::cerr << "i: " << i << std::endl;
- int exampleIndex ( rand() % ( exIt->second.size() ) );
- // std::cerr << "exampleIndex: " << exampleIndex << std::endl;
-
- Example example;
- NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex]];
- example.svec = new SparseVector(xTrain);
- examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
-
- exIt->second.erase(exIt->second.begin()+exampleIndex);
- }
- }
- std::cerr << "start training " << std::endl;
- time_t prep_start_time = clock();
- GPHIKClassifierNICE * classifierBatch = new GPHIKClassifierNICE( &conf ); //we don't need this one in the first round
- GPHIKClassifierNICE * classifierIL = new GPHIKClassifierNICE( &conf );
-
- FeaturePool fp; // will be ignored
- classifierIL->train ( fp, examples );
- float time_preparation = ( float ) ( clock() - prep_start_time ) ;
-
- int classesUsed(classesForTraining.size());
-
- std::cerr << "training done " << std::endl;
-
- // ------------------ TESTING
- const LabeledSet *test = md["test"];
- VVector testData;
- Vector yTest;
- readData< VVector, Vector > ( conf, *test, testData, yTest, ext );
-
- NICE::Matrix confusionMatrixBatch ( numberOfClasses, numberOfClasses );
- NICE::Matrix confusionMatrixIL ( numberOfClasses, numberOfClasses );
- confusionMatrixBatch.set ( 0.0 );
- confusionMatrixIL.set ( 0.0 );
- time_t start_time = clock();
- std::vector<int> chosen_examples_per_class ( numberOfClasses );
- if ( do_classification )
- {
- for ( uint i = 0 ; i < testData.size(); i++ )
- {
- Example example;
- const Vector & xstar = testData[i];
- SparseVector xstar_sparse ( xstar );
- OBJREC::ClassificationResult result;
- example.svec = &xstar_sparse;
-
- result = classifierIL->classify( example );
- cerr << "[" << i << " / " << testData.size() << "] " << result.classno << " " << yTest[i] << std::endl;
-
- result.classno_groundtruth = yTest[i];
- confusionMatrixIL ( result.classno_groundtruth , result.classno ) ++;
- }
- float time_classification = ( float ) ( clock() - start_time ) ;
- if ( verbose >= LOW )
- cerr << "Time for Classification with " << classesUsed*trainExPerClass << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << endl;
- confusionMatrixIL.normalizeRowsL1();
- double avg_recognition_rate = 0.0;
- for ( int i = 0 ; i < ( int ) confusionMatrixIL.rows(); i++ )
- {
- if ( verbose >= MEDIUM )
- {
- cerr << "Class no: " << i << " : " << confusionMatrixIL ( i, i ) << endl;
- }
- avg_recognition_rate += confusionMatrixIL ( i, i );
- }
- avg_recognition_rate /= confusionMatrixIL.rows();
- std::cerr << confusionMatrixIL << std::endl;
- std::cerr << "avg recognition rate " << avg_recognition_rate*100 << " %" << std::endl;
- recognitionsRatesBatch[0].push_back ( avg_recognition_rate*100 );
- recognitionsRatesIL[0].push_back ( avg_recognition_rate*100 );
- }
- //Now start the Incremental-Learning-Part
-
- for (int incrementationStep = 0; incrementationStep < nrOfIncrements; incrementationStep++)
- {
- uint oldSize = examples.size();
- //chose examples for every class used for training
- int cnt(0);
- Examples newExamples;
- for (std::set<int>::const_iterator clIt = classesForTraining.begin(); clIt != classesForTraining.end(); clIt++)
- {
- std::map<int,std::vector<int> >::iterator exIt = examplesPerClassInDatasetTmp.find(*clIt);
-
- for (int i = 0; i < incrementalAddSize; i++)
- {
- std::cerr << "i: " << cnt << std::endl;
- Example example;
-
- int exampleIndex ( rand() % ( exIt->second.size() ) );
- NICE::Vector & xTrain = trainDataOrig[exIt->second[exampleIndex] ];
- example.svec = new SparseVector(xTrain);
- examples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
- newExamples.push_back ( pair<int, Example> ( y[exIt->second[exampleIndex] ], example ) );
- exIt->second.erase(exIt->second.begin()+exampleIndex);
- cnt++;
- }
- }
-
- std::cerr << "Incremental, but not batch" << std::endl;
- time_t IL_add_start_time = clock();
- // for ( uint i = oldSize ; i < examples.size() ; i++ )
- // {
- // Example & example = examples[i].second;
- // int classno = examples[i].first;
- //
- // //skip the optimization for the first k examples
- // classifierIL->addExample( example, (double) classno, true );
- // }
- // for ( uint i = examples.size()-1 ; i < examples.size() ; i++ )
- // {
- // Example & example = examples[i].second;
- // int classno = examples[i].first;
- // //perform the optimization
- // classifierIL->addExample( example, (double) classno, true );
- // }
- classifierIL->addMultipleExamples( newExamples );
- float time_IL_add = ( float ) ( clock() - IL_add_start_time ) ;
- std::cerr << "Time for IL-adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << " training-examples: " << time_IL_add / CLOCKS_PER_SEC << " [s]" << std::endl;
- trainingTimesIL[incrementationStep].push_back(time_IL_add / CLOCKS_PER_SEC);
-
-
- std::cerr << "start batch retraining" << std::endl;
- time_t batch_add_start_time = clock();
- //
- if (classifierBatch != NULL)
- delete classifierBatch;
- classifierBatch = new GPHIKClassifierNICE( &conf );
- classifierBatch->train( fp, examples );
- //
- float time_batch_add = ( float ) ( clock() - batch_add_start_time ) ;
- std::cerr << "Time for batch relearning after adding of " << incrementalAddSize*classesForTraining.size() << " examples to already " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*incrementationStep << " training-examples: " << time_batch_add / CLOCKS_PER_SEC << " [s]" << std::endl;
- trainingTimesBatch[incrementationStep].push_back(time_batch_add / CLOCKS_PER_SEC);
-
- //do the classification for evaluating the benefit of new examples
- if ( do_classification )
- {
- std::cerr << "do classification" << std::endl;
- for ( uint i = 0 ; i < testData.size(); i++ )
- {
- Example example;
- const Vector & xstar = testData[i];
- SparseVector xstar_sparse ( xstar );
- example.svec = &xstar_sparse;
- OBJREC::ClassificationResult resultBatch;
- OBJREC::ClassificationResult resultIL;
-
- resultBatch = classifierBatch->classify( example );
- resultIL = classifierIL->classify( example );
-
- std::cerr << "Batch: [" << i << " / " << testData.size() << "] " << resultBatch.classno << " " << yTest[i] << std::endl;
- std::cerr << "IL: [" << i << " / " << testData.size() << "] " << resultIL.classno << " " << yTest[i] << std::endl;
-
- resultBatch.classno_groundtruth = yTest[i];
- resultIL.classno_groundtruth = yTest[i];
-
- confusionMatrixBatch ( resultBatch.classno_groundtruth , resultBatch.classno ) ++;
- confusionMatrixIL ( resultIL.classno_groundtruth , resultIL.classno ) ++;
- }
- float time_classification = ( float ) ( clock() - start_time ) ;
- if ( verbose >= LOW )
- std::cerr << "Time for Classification with " << classesUsed*trainExPerClass+classesUsed*incrementalAddSize*(incrementationStep+1) << " training-examples: " << time_classification / CLOCKS_PER_SEC << " [s]" << std::endl;
- confusionMatrixBatch.normalizeRowsL1();
- confusionMatrixIL.normalizeRowsL1();
-
- double ARRBatch = 0.0;
- double ARRIL = 0.0;
- for ( int i = 0 ; i < ( int ) confusionMatrixBatch.rows(); i++ )
- {
- if ( verbose >= MEDIUM )
- {
- std::cerr << "Batch Class no: " << i << " : " << confusionMatrixBatch ( i, i ) << std::endl;
- std::cerr << "IL Class no: " << i << " : " << confusionMatrixIL ( i, i ) << std::endl;
- }
- ARRBatch += confusionMatrixBatch ( i, i );
- ARRIL += confusionMatrixIL ( i, i );
- }
- ARRBatch /= confusionMatrixBatch.rows();
- ARRIL /= confusionMatrixIL.rows();
- std::cerr << "Batch matrix and results: " << std::endl;
- std::cerr << confusionMatrixBatch << std::endl;
- std::cerr << "ARRBatch " << ARRBatch*100 << " %" << std::endl;
-
- std::cerr << "IL matrix and results: " << std::endl;
- std::cerr << confusionMatrixIL << std::endl;
- std::cerr << "ARRIL " << ARRIL*100 << " %" << std::endl;
- recognitionsRatesBatch[incrementationStep+1].push_back ( ARRBatch*100 );
- recognitionsRatesIL[incrementationStep+1].push_back ( ARRIL*100 );
- } //classification after IL adding
- } //IL adding of different classes
- }//runs
- int classesUsed(classesForTraining.size());
- std::cerr << "classes used: " << classesUsed << " incrementalAddSize: " << incrementalAddSize << std::endl;
- std::cerr << "========================" << std::endl;
- std::cerr << "content of trainingTimesIL: " << std::endl;
- for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesIL.begin(); it != trainingTimesIL.end(); it++ )
- {
- for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::vector<float> trainingTimesILMean;
- std::vector<float> trainingTimesILStdDev;
- for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesIL.begin(); it != trainingTimesIL.end(); it++ )
- {
- float trainingTimeILMean ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- trainingTimeILMean += *itRun;
- }
- trainingTimeILMean /= it->size();
- trainingTimesILMean.push_back ( trainingTimeILMean );
- double trainingTimeILStdDev ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- trainingTimeILStdDev += pow ( *itRun - trainingTimeILMean, 2 );
- }
- trainingTimeILStdDev /= it->size();
- trainingTimeILStdDev = sqrt ( trainingTimeILStdDev );
- trainingTimesILStdDev.push_back ( trainingTimeILStdDev );
- }
- int datasize ( classesUsed*trainExPerClass );
- for ( uint i = 0; i < trainingTimesILMean.size(); i++)
- {
- cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " trainingTimesILMean: " << trainingTimesILMean[i] << " trainingTimesILStdDev: " << trainingTimesILStdDev[i] << endl;
- datasize += classesUsed*incrementalAddSize ;
- }
-
- std::cerr << "========================" << std::endl;
- std::cerr << "content of trainingTimesBatch: " << std::endl;
- for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesBatch.begin(); it != trainingTimesBatch.end(); it++ )
- {
- for ( std::vector<float> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::vector<float> trainingTimesBatchMean;
- std::vector<float> trainingTimesBatchStdDev;
- for ( std::vector<std::vector<float> >::const_iterator it = trainingTimesBatch.begin(); it != trainingTimesBatch.end(); it++ )
- {
- float trainingTimeBatchMean ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- trainingTimeBatchMean += *itRun;
- }
- trainingTimeBatchMean /= it->size();
- trainingTimesBatchMean.push_back ( trainingTimeBatchMean );
- double trainingTimeBatchStdDev ( 0.0 );
- for ( std::vector<float>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- trainingTimeBatchStdDev += pow ( *itRun - trainingTimeBatchMean, 2 );
- }
- trainingTimeBatchStdDev /= it->size();
- trainingTimeBatchStdDev = sqrt ( trainingTimeBatchStdDev );
- trainingTimesBatchStdDev.push_back ( trainingTimeBatchStdDev );
- }
- datasize = classesUsed*trainExPerClass;
- for ( uint i = 0; i < trainingTimesBatchMean.size(); i++)
- {
- cerr << "size: " << datasize << " and adding " << classesUsed*incrementalAddSize << " trainingTimesBatchMean: " << trainingTimesBatchMean[i] << " trainingTimesBatchStdDev: " << trainingTimesBatchStdDev[i] << endl;
- datasize += classesUsed*incrementalAddSize ;
- }
- if ( do_classification )
- {
- std::cerr << "========================" << std::endl;
- std::cerr << "content of recognitionsRatesIL: " << std::endl;
- for ( std::vector<std::vector<double> >::const_iterator it = recognitionsRatesIL.begin(); it != recognitionsRatesIL.end(); it++ )
- {
- for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::cerr << "calculating final IL results " << std::endl;
- std::vector<double> recRatesILMean;
- std::vector<double> recRatesILStdDev;
- for (std::vector<std::vector<double> >::const_iterator it = recognitionsRatesIL.begin(); it != recognitionsRatesIL.end(); it++ )
- {
- double recRateILMean ( 0.0 );
- for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- recRateILMean += *itRun;
- }
- recRateILMean /= it->size();
- recRatesILMean.push_back ( recRateILMean );
- double recRateILStdDev ( 0.0 );
- for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- recRateILStdDev += pow ( *itRun - recRateILMean, 2 );
- }
- recRateILStdDev /= it->size();
- recRateILStdDev = sqrt ( recRateILStdDev );
- recRatesILStdDev.push_back ( recRateILStdDev );
- }
- int datasize ( classesUsed*trainExPerClass);
- for ( uint i = 0; i < recRatesILMean.size(); i++)
- {
- std::cerr << "size: " << datasize << " recRatesILMean: " << recRatesILMean[i] << " recRatesILStdDev: " << recRatesILStdDev[i] << std::endl;
- datasize += classesUsed*incrementalAddSize ;
- }
-
- std::cerr << "========================" << std::endl;
- std::cerr << "content of recognitionsRatesBatch: " << std::endl;
- for ( std::vector<std::vector<double> >::const_iterator it = recognitionsRatesBatch.begin(); it != recognitionsRatesBatch.end(); it++ )
- {
- for ( std::vector<double> ::const_iterator jt = ( *it ).begin(); jt != ( *it ).end(); jt++ )
- {
- std::cerr << *jt << " ";
- }
- std::cerr << std::endl;
- }
- std::cerr << "calculating final batch results " << std::endl;
- std::vector<double> recRatesBatchMean;
- std::vector<double> recRatesBatchStdDev;
- for (std::vector<std::vector<double> >::const_iterator it = recognitionsRatesBatch.begin(); it != recognitionsRatesBatch.end(); it++ )
- {
- double recRateBatchMean ( 0.0 );
- for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- recRateBatchMean += *itRun;
- }
- recRateBatchMean /= it->size();
- recRatesBatchMean.push_back ( recRateBatchMean );
- double recRateBatchStdDev ( 0.0 );
- for ( std::vector<double>::const_iterator itRun = it->begin(); itRun != it->end(); itRun++ )
- {
- recRateBatchStdDev += pow ( *itRun - recRateBatchMean, 2 );
- }
- recRateBatchStdDev /= it->size();
- recRateBatchStdDev = sqrt ( recRateBatchStdDev );
- recRatesBatchStdDev.push_back ( recRateBatchStdDev );
- }
- datasize = classesUsed*trainExPerClass;
- for ( uint i = 0; i < recRatesBatchMean.size(); i++)
- {
- std::cerr << "size: " << datasize << " recRatesBatchMean: " << recRatesBatchMean[i] << " recRatesBatchStdDev: " << recRatesBatchStdDev[i] << std::endl;
- datasize += classesUsed*incrementalAddSize ;
- }
- }
- else
- {
- std::cerr << "========================" << std::endl;
- std::cerr << "No classification done therefor no classification times available." << std::endl;
- }
- return 0;
- }
|