|
@@ -0,0 +1,536 @@
|
|
|
+#ifdef NICE_USELIB_CPPUNIT
|
|
|
+
|
|
|
+#include <string>
|
|
|
+#include <exception>
|
|
|
+#include <iostream>
|
|
|
+#include <fstream>
|
|
|
+
|
|
|
+//----------
|
|
|
+
|
|
|
+#include <core/basics/Timer.h>
|
|
|
+
|
|
|
+//----------
|
|
|
+
|
|
|
+#include <vislearning/cbaselib/ClassificationResults.h>
|
|
|
+#include <vislearning/classifier/kernelclassifier/KCGPRegOneVsAll.h>
|
|
|
+
|
|
|
+//----------
|
|
|
+
|
|
|
+#include "gp-hik-exp/GPHIKClassifierNICE.h"
|
|
|
+
|
|
|
+//----------
|
|
|
+
|
|
|
+#include "TestGPHIKClassifier.h"
|
|
|
+
|
|
|
+
|
|
|
+const bool verbose = false;
|
|
|
+const bool verboseStartEnd = true;
|
|
|
+
|
|
|
+using namespace OBJREC;
|
|
|
+using namespace NICE;
|
|
|
+using namespace std;
|
|
|
+
|
|
|
+CPPUNIT_TEST_SUITE_REGISTRATION( TestGPHIKClassifier );
|
|
|
+
|
|
|
+void TestGPHIKClassifier::setUp() {
|
|
|
+}
|
|
|
+
|
|
|
+void TestGPHIKClassifier::tearDown() {
|
|
|
+}
|
|
|
+
|
|
|
+void myClassifierTest( GPHIKClassifierNICE & classifier, const Matrix & mX, const Vector & vY )
|
|
|
+{
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::myClassifierTest ===================== " << std::endl;
|
|
|
+
|
|
|
+ Examples examples;
|
|
|
+
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ if ( i % 2 == 1 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ examples.push_back ( pair<int, Example> ( vY[i], example ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ FeaturePool fp; // will be ignored
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "preparation done." << std::endl;
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "learning ..." << std::endl;
|
|
|
+ classifier.train ( fp, examples );
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "testing ..." << std::endl;
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ if ( i % 2 == 0 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ ClassificationResult r = classifier.classify ( example );
|
|
|
+ if (verbose)
|
|
|
+ {
|
|
|
+ r.scores >> std::cerr;
|
|
|
+ std::cerr << "predicted uncertainty: " << r.uncertainty << std::endl;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ examples.clean();
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::myClassifierTest done ===================== " << std::endl;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void myClassifierStoreRestoreTest( GPHIKClassifierNICE & classifier, const Matrix & mX, const Vector & vY )
|
|
|
+{
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::myClassifierStoreRestoreTest ===================== " << std::endl;
|
|
|
+
|
|
|
+ Examples examples;
|
|
|
+
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ if ( i % 2 == 1 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ examples.push_back ( pair<int, Example> ( vY[i], example ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ FeaturePool fp; // will be ignored
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "preparation done." << std::endl;
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "learning ..." << std::endl;
|
|
|
+ classifier.train ( fp, examples );
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "storing ..." << std::endl;
|
|
|
+ //test the store-functionality
|
|
|
+ string destination("/tmp/GPHIK_store.txt");
|
|
|
+
|
|
|
+ std::filebuf fb;
|
|
|
+ fb.open (destination.c_str(),ios::out);
|
|
|
+ std::ostream os(&fb);
|
|
|
+//
|
|
|
+ classifier.store(os);
|
|
|
+//
|
|
|
+ fb.close();
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "loading ..." << std::endl;
|
|
|
+
|
|
|
+ Config confTmp;
|
|
|
+ GPHIKClassifierNICE classifierRestored(&confTmp);
|
|
|
+
|
|
|
+ std::filebuf fbIn;
|
|
|
+ fbIn.open (destination.c_str(),ios::in);
|
|
|
+ std::istream is(&fbIn);
|
|
|
+//
|
|
|
+ classifierRestored.restore(is);
|
|
|
+//
|
|
|
+ fbIn.close();
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "testing ..." << std::endl;
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ if ( i % 2 == 0 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ ClassificationResult rOrig = classifier.classify ( example );
|
|
|
+ ClassificationResult rRestored = classifierRestored.classify ( example );
|
|
|
+
|
|
|
+ //scores are of type FullVector
|
|
|
+ //we use the [] operator, since there are no iterators given in FullVector.h
|
|
|
+ bool equal(true);
|
|
|
+ for (int i = 0; i< rOrig.scores.size(); i++)
|
|
|
+ {
|
|
|
+ if ( fabs(rOrig.scores[i] - rRestored.scores[i]) > 10-6)
|
|
|
+ {
|
|
|
+ equal = false;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ CPPUNIT_ASSERT_EQUAL ( equal, true );
|
|
|
+ }
|
|
|
+
|
|
|
+ examples.clean();
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::myClassifierStoreRestoreTest done ===================== " << std::endl;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void myClassifierILTest( GPHIKClassifierNICE & classifierRetrain, GPHIKClassifierNICE & classifierIL, const Matrix & mX, const Vector & vY )
|
|
|
+{
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::myClassifierILTest ===================== " << std::endl;
|
|
|
+
|
|
|
+ Examples examples;
|
|
|
+
|
|
|
+ if (verbose)
|
|
|
+ std::cerr << "vY: " << vY << std::endl;
|
|
|
+
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ {
|
|
|
+ if ( i % 4 == 1 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ examples.push_back ( pair<int, Example> ( vY[i], example ) );
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (verbose)
|
|
|
+ std::cerr << "examples.size(): " << examples.size() << std::endl;
|
|
|
+
|
|
|
+ FeaturePool fp; // will be ignored
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "preparation done." << std::endl;
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "learning ..." << std::endl;
|
|
|
+ classifierIL.train ( fp, examples );
|
|
|
+
|
|
|
+ //choose next example(s)
|
|
|
+
|
|
|
+ Examples newExamples;
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ {
|
|
|
+ if ( i % 4 == 3 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ newExamples.push_back ( pair<int, Example> ( vY[i], example ) );
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+// if ( verbose )
|
|
|
+ std::cerr << std::endl << " =============== " << std::endl << "incremental learning ..." << std::endl;
|
|
|
+
|
|
|
+ // add them to classifierIL
|
|
|
+// std::cerr << "We add several new examples" << std::endl;
|
|
|
+ Timer t;
|
|
|
+ t.start();
|
|
|
+// for (uint i = 0; i < newExamples.size(); i++)
|
|
|
+ for (uint i = 0; i < 1; i++)
|
|
|
+ {
|
|
|
+ classifierIL.addExample( newExamples[i].second, newExamples[i].first);
|
|
|
+ }
|
|
|
+
|
|
|
+ t.stop();
|
|
|
+ std::cerr << "Time used for incremental training: " << t.getLast() << std::endl;
|
|
|
+
|
|
|
+ //add the new features to feature pool needed for batch training
|
|
|
+// for (uint i = 0; i < newExamples.size(); i++)
|
|
|
+ for (uint i = 0; i < 2; i++)
|
|
|
+ {
|
|
|
+ examples.push_back( newExamples[i] );
|
|
|
+ }
|
|
|
+
|
|
|
+ std::cerr << std::endl << " =============== " << std::endl << "We train the second classifier from the scratch with the additional new example" << std::endl;
|
|
|
+ t.start();
|
|
|
+
|
|
|
+ classifierRetrain.train ( fp, examples );
|
|
|
+
|
|
|
+ t.stop();
|
|
|
+ std::cerr << "Time used for batch training: " << t.getLast() << std::endl;
|
|
|
+
|
|
|
+ //evaluate both and compare the resulting scores
|
|
|
+// if ( verbose )
|
|
|
+ std::cerr << "testing ..." << std::endl;
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ if ( i % 2 == 0 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ ClassificationResult resultIL = classifierIL.classify ( example );
|
|
|
+ ClassificationResult resultBatch = classifierRetrain.classify ( example );
|
|
|
+
|
|
|
+ if (verbose)
|
|
|
+ {
|
|
|
+ std::cerr << "result of IL classifier: " << std::endl;
|
|
|
+ resultIL.scores >> std::cerr;
|
|
|
+
|
|
|
+ std::cerr << "result of batch classifier: " << std::endl;
|
|
|
+ resultBatch.scores >> std::cerr;
|
|
|
+ }
|
|
|
+
|
|
|
+ //scores are of type FullVector
|
|
|
+ //we use the [] operator, since there are no iterators given in FullVector.h
|
|
|
+ bool equal(true);
|
|
|
+ for (int i = 0; i< resultIL.scores.size(); i++)
|
|
|
+ {
|
|
|
+ if ( fabs(resultIL.scores[i] - resultBatch.scores[i]) > 10e-3)
|
|
|
+ {
|
|
|
+ equal = false;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ CPPUNIT_ASSERT_EQUAL ( equal, true );
|
|
|
+ }
|
|
|
+
|
|
|
+ examples.clean();
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::myClassifierILTest done ===================== " << std::endl;
|
|
|
+}
|
|
|
+
|
|
|
+void TestGPHIKClassifier::testGPHIKClassifier()
|
|
|
+{
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::testGPHIKClassifier ===================== " << std::endl;
|
|
|
+
|
|
|
+ Config conf;
|
|
|
+ conf.sD( "GPHIKClassifier", "noise", 0.01 );
|
|
|
+ conf.sD( "GPHIKClassifier", "parameter_lower_bound", 0.5 );
|
|
|
+ conf.sD( "GPHIKClassifier", "parameter_upper_bound", 3.5 );
|
|
|
+ conf.sI( "GPHIKClassifier", "uncertaintyPrediction", 1);
|
|
|
+// conf.sS( "GPHIKClassifier", "optimization_method", "none");
|
|
|
+ conf.sS( "GPHIKClassifier", "optimization_method", "downhillsimplex");
|
|
|
+ conf.sB( "GPHIKClassifier", "uncertaintyPredictionForClassification", true);
|
|
|
+
|
|
|
+ GPHIKClassifierNICE * classifier = new GPHIKClassifierNICE ( &conf );
|
|
|
+
|
|
|
+ Matrix mX;
|
|
|
+ Vector vY;
|
|
|
+ Vector vY_multi;
|
|
|
+
|
|
|
+// ifstream ifs ("toyExample1.data", ios::in);
|
|
|
+// ifstream ifs ("toyExampleLargeScale.data", ios::in);
|
|
|
+ ifstream ifs ("toyExampleLargeLargeScale.data", ios::in);
|
|
|
+ CPPUNIT_ASSERT ( ifs.good() );
|
|
|
+ ifs >> mX;
|
|
|
+ ifs >> vY;
|
|
|
+ ifs >> vY_multi;
|
|
|
+ ifs.close();
|
|
|
+
|
|
|
+ if (verbose)
|
|
|
+ {
|
|
|
+ std::cerr << "data loaded: mX" << std::endl;
|
|
|
+ std::cerr << mX << std::endl;
|
|
|
+ std::cerr << "vY: " << std::endl;
|
|
|
+ std::cerr << vY << std::endl;
|
|
|
+ std::cerr << "vY_multi: " << std::endl;
|
|
|
+ std::cerr << vY_multi << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "Binary classification test " << std::endl;
|
|
|
+
|
|
|
+ myClassifierTest ( *classifier, mX, vY );
|
|
|
+
|
|
|
+ // ... we remove nothing here since we are only interested in store and restore :)
|
|
|
+ myClassifierStoreRestoreTest ( *classifier, mX, vY );
|
|
|
+
|
|
|
+ // ... remove previously computed things and start again, this time with incremental settings
|
|
|
+ if (classifier != NULL)
|
|
|
+ delete classifier;
|
|
|
+
|
|
|
+ classifier = new GPHIKClassifierNICE ( &conf );
|
|
|
+ GPHIKClassifierNICE * classifierBatch = new GPHIKClassifierNICE ( &conf );
|
|
|
+
|
|
|
+ myClassifierILTest( *classifierBatch, *classifier, mX, vY );
|
|
|
+
|
|
|
+ if (classifier != NULL)
|
|
|
+ delete classifier;
|
|
|
+ if (classifierBatch != NULL)
|
|
|
+ delete classifierBatch;
|
|
|
+
|
|
|
+ classifier = new GPHIKClassifierNICE ( &conf );
|
|
|
+ classifierBatch = new GPHIKClassifierNICE ( &conf );
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "Multi-class classification test " << std::endl;
|
|
|
+ myClassifierTest ( *classifier, mX, vY_multi );
|
|
|
+
|
|
|
+ // ... we remove nothing here since we are only interested and store and restore :)
|
|
|
+//
|
|
|
+// myClassifierStoreRestoreTest ( classifier, mX, vY_multi );
|
|
|
+
|
|
|
+ // ... remove previously computed things and start again, this time with incremental settings
|
|
|
+ if (classifier != NULL)
|
|
|
+ delete classifier;
|
|
|
+ if (classifierBatch != NULL)
|
|
|
+ delete classifierBatch;
|
|
|
+
|
|
|
+ classifier = new GPHIKClassifierNICE ( &conf );
|
|
|
+ classifierBatch = new GPHIKClassifierNICE ( &conf );
|
|
|
+
|
|
|
+ myClassifierILTest( *classifierBatch, *classifier, mX, vY_multi );
|
|
|
+
|
|
|
+ if (classifier != NULL)
|
|
|
+ delete classifier;
|
|
|
+ if (classifierBatch != NULL)
|
|
|
+ delete classifierBatch;
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::testGPHIKClassifier done ===================== " << std::endl;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void TestGPHIKClassifier::testGPHIKVariance()
|
|
|
+{
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::testGPHIKVariance ===================== " << std::endl;
|
|
|
+
|
|
|
+ double noise (0.01);
|
|
|
+
|
|
|
+ Config conf;
|
|
|
+ conf.sD( "GPHIKClassifier", "noise", noise );
|
|
|
+ conf.sD( "GPHIKClassifier", "parameter_lower_bound", 1.0 );
|
|
|
+ conf.sD( "GPHIKClassifier", "parameter_upper_bound", 1.0 );
|
|
|
+ conf.sS( "GPHIKClassifier", "varianceApproximation", "approximate_rough");
|
|
|
+ conf.sB( "GPHIKClassifier", "learn_balanced", true);
|
|
|
+ conf.sB( "GPHIKClassifier", "uncertaintyPredictionForClassification", true);
|
|
|
+
|
|
|
+ GPHIKClassifierNICE classifier ( &conf );
|
|
|
+
|
|
|
+ Config confVarApproxQuant(conf);
|
|
|
+ confVarApproxQuant.sB( "GPHIKClassifier", "use_quantization", true );
|
|
|
+ GPHIKClassifierNICE classifierQuant ( &confVarApproxQuant );
|
|
|
+
|
|
|
+ Config confVarApproxFine1(conf);
|
|
|
+ confVarApproxFine1.sS( "GPHIKClassifier", "varianceApproximation", "approximate_fine");
|
|
|
+ confVarApproxFine1.sI( "GPHIKClassifier", "nrOfEigenvaluesToConsiderForVarApprox", 1);
|
|
|
+
|
|
|
+ GPHIKClassifierNICE classifierVarApproxFine1 ( &confVarApproxFine1 );
|
|
|
+
|
|
|
+ Config confVarApproxFine2(conf);
|
|
|
+ confVarApproxFine2.sS( "GPHIKClassifier", "varianceApproximation", "approximate_fine");
|
|
|
+ confVarApproxFine2.sI( "GPHIKClassifier", "nrOfEigenvaluesToConsiderForVarApprox", 2);
|
|
|
+
|
|
|
+ GPHIKClassifierNICE classifierVarApproxFine2 ( &confVarApproxFine2 );
|
|
|
+
|
|
|
+ Config confExact(conf);
|
|
|
+ confExact.sS( "GPHIKClassifier", "varianceApproximation", "exact");
|
|
|
+
|
|
|
+ GPHIKClassifierNICE classifierVarExact ( &confExact );
|
|
|
+
|
|
|
+ NICE::Matrix mX;
|
|
|
+ NICE::Vector vY;
|
|
|
+ NICE::Vector vY_multi;
|
|
|
+
|
|
|
+ ifstream ifs ("toyExample2.data", ios::in);
|
|
|
+ CPPUNIT_ASSERT ( ifs.good() );
|
|
|
+ ifs >> mX;
|
|
|
+ ifs >> vY;
|
|
|
+ ifs >> vY_multi;
|
|
|
+ ifs.close();
|
|
|
+
|
|
|
+ if (verbose)
|
|
|
+ {
|
|
|
+ std::cerr << "data loaded: mX" << std::endl;
|
|
|
+ std::cerr << mX << std::endl;
|
|
|
+ std::cerr << "vY: " << std::endl;
|
|
|
+ std::cerr << vY << std::endl;
|
|
|
+ std::cerr << "vY_multi: " << std::endl;
|
|
|
+ std::cerr << vY_multi << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+ Examples examples;
|
|
|
+
|
|
|
+ for ( uint i = 0 ; i < vY.size() ; i++ )
|
|
|
+ if ( i % 2 == 0 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ examples.push_back ( pair<int, Example> ( vY_multi[i], example ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ FeaturePool fp; // will be ignored
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "preparation for variance testing done." << std::endl;
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "learning for variance testing ..." << std::endl;
|
|
|
+ classifier.train ( fp, examples );
|
|
|
+ classifierQuant.train ( fp, examples );
|
|
|
+ classifierVarApproxFine1.train ( fp, examples );
|
|
|
+ classifierVarApproxFine2.train ( fp, examples );
|
|
|
+ classifierVarExact.train ( fp, examples );
|
|
|
+
|
|
|
+ if ( verbose )
|
|
|
+ std::cerr << "testing for variance testing ..." << std::endl;
|
|
|
+
|
|
|
+ for ( uint i = 0 ; i < vY_multi.size() ; i++ )
|
|
|
+ if ( i % 2 == 1 )
|
|
|
+ {
|
|
|
+ Example example;
|
|
|
+ example.svec = new SparseVector;
|
|
|
+ example.svec->setDim(3);
|
|
|
+ example.svec->set ( 0, mX(i,0) );
|
|
|
+ example.svec->set ( 1, mX(i,1) );
|
|
|
+ example.svec->set ( 2, 1.0-mX(i,0)-mX(i,1) );
|
|
|
+ ClassificationResult r = classifier.classify ( example );
|
|
|
+ ClassificationResult rQuant = classifierQuant.classify ( example );
|
|
|
+ ClassificationResult rVarApproxFine1 = classifierVarApproxFine1.classify ( example );
|
|
|
+ ClassificationResult rVarApproxFine2 = classifierVarApproxFine2.classify ( example );
|
|
|
+ ClassificationResult rExact = classifierVarExact.classify ( example );
|
|
|
+
|
|
|
+ if (verbose)
|
|
|
+ {
|
|
|
+ std::cerr << "approxUnc: " << r.uncertainty << " approxUncQuant: " << rQuant.uncertainty<< " approxUncFine1: " << rVarApproxFine1.uncertainty << " approxUncFine2: " << rVarApproxFine2.uncertainty << " exactUnc: " << rExact.uncertainty << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+ CPPUNIT_ASSERT ( r.uncertainty <= (1.0 + noise) ); //using the "standard" HIK, this is the upper bound
|
|
|
+ CPPUNIT_ASSERT ( r.uncertainty > rVarApproxFine1.uncertainty);
|
|
|
+ CPPUNIT_ASSERT ( rQuant.uncertainty > rVarApproxFine1.uncertainty);
|
|
|
+ CPPUNIT_ASSERT ( rVarApproxFine1.uncertainty > rVarApproxFine2.uncertainty);
|
|
|
+ CPPUNIT_ASSERT ( rVarApproxFine2.uncertainty > rExact.uncertainty);
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ examples.clean();
|
|
|
+
|
|
|
+
|
|
|
+ if (verboseStartEnd)
|
|
|
+ std::cerr << "================== TestGPHIKClassifier::testGPHIKVariance done ===================== " << std::endl;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|