|
@@ -661,15 +661,17 @@ inline void FMKGPHyperparameterOptimization::setupGPLikelihoodApprox ( GPLikelih
|
|
|
gplike = new GPLikelihoodApprox ( binaryLabels, ikmsum, linsolver, eig, verifyApproximation, nrOfEigenvaluesToConsider );
|
|
|
gplike->setDebug( this->b_debug );
|
|
|
gplike->setVerbose( this->b_verbose );
|
|
|
- parameterVectorSize = ikmsum->getNumParameters();
|
|
|
+ parameterVectorSize = this->ikmsum->getNumParameters();
|
|
|
}
|
|
|
|
|
|
-void FMKGPHyperparameterOptimization::updateEigenDecomposition( const int & i_noEigenValues )
|
|
|
+void FMKGPHyperparameterOptimization::updateEigenDecomposition( const int & _noEigenValues )
|
|
|
{
|
|
|
//compute the largest eigenvalue of K + noise
|
|
|
+
|
|
|
+ std::cerr << "IKM rows: " << ikmsum->rows() << " cols: " << ikmsum->cols() << std::endl;
|
|
|
try
|
|
|
{
|
|
|
- eig->getEigenvalues ( *ikmsum, eigenMax, eigenMaxVectors, i_noEigenValues );
|
|
|
+ this->eig->getEigenvalues ( *ikmsum, eigenMax, eigenMaxVectors, _noEigenValues );
|
|
|
}
|
|
|
catch ( char const* exceptionMsg)
|
|
|
{
|
|
@@ -683,7 +685,7 @@ void FMKGPHyperparameterOptimization::updateEigenDecomposition( const int & i_no
|
|
|
|
|
|
void FMKGPHyperparameterOptimization::performOptimization ( GPLikelihoodApprox & gplike, const uint & parameterVectorSize )
|
|
|
{
|
|
|
- if (verbose)
|
|
|
+ if ( this->b_verbose )
|
|
|
std::cerr << "perform optimization" << std::endl;
|
|
|
|
|
|
if ( optimizationMethod == OPT_GREEDY )
|
|
@@ -855,7 +857,7 @@ void FMKGPHyperparameterOptimization::optimize ( const sparse_t & data, const NI
|
|
|
if ( fmk != NULL ) delete fmk;
|
|
|
fmk = new FastMinKernel ( data, noise, examples );
|
|
|
t.stop();
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for initializing the FastMinKernel structure: " << t.getLast() << std::endl;
|
|
|
|
|
|
optimize ( y );
|
|
@@ -905,7 +907,10 @@ int FMKGPHyperparameterOptimization::prepareBinaryLabels ( std::map<int, NICE::V
|
|
|
this->i_binaryLabelPositive = *classIt;
|
|
|
|
|
|
if ( this->b_verbose )
|
|
|
+ {
|
|
|
std::cerr << "positiveClass : " << this->i_binaryLabelPositive << " negativeClass: " << this->i_binaryLabelNegative << std::endl;
|
|
|
+ std::cerr << " all labels: " << y << std::endl << std::endl;
|
|
|
+ }
|
|
|
|
|
|
for ( uint i = 0 ; i < yb.size() ; i++ )
|
|
|
yb[i] = ( y[i] == this->i_binaryLabelNegative ) ? -1.0 : 1.0;
|
|
@@ -967,12 +972,12 @@ void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & b
|
|
|
//how many different classes do we have right now?
|
|
|
int nrOfClasses = binaryLabels.size();
|
|
|
|
|
|
- if (verbose)
|
|
|
+ if ( this->b_verbose )
|
|
|
{
|
|
|
- std::cerr << "Initial noise level: " << fmk->getNoise() << std::endl;
|
|
|
+ std::cerr << "Initial noise level: " << this->fmk->getNoise() << std::endl;
|
|
|
|
|
|
std::cerr << "Number of classes (=1 means we have a binary setting):" << nrOfClasses << std::endl;
|
|
|
- std::cerr << "Effective number of classes (neglecting classes without positive examples): " << knownClasses.size() << std::endl;
|
|
|
+ std::cerr << "Effective number of classes (neglecting classes without positive examples): " << this->knownClasses.size() << std::endl;
|
|
|
}
|
|
|
|
|
|
// combine standard model and noise model
|
|
@@ -981,7 +986,7 @@ void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & b
|
|
|
|
|
|
t1.start();
|
|
|
//setup the kernel combination
|
|
|
- ikmsum = new IKMLinearCombination ();
|
|
|
+ this->ikmsum = new IKMLinearCombination ();
|
|
|
|
|
|
if ( this->b_verbose )
|
|
|
{
|
|
@@ -989,15 +994,15 @@ void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & b
|
|
|
}
|
|
|
|
|
|
//First model: noise
|
|
|
- ikmsum->addModel ( new IKMNoise ( fmk->get_n(), fmk->getNoise(), optimizeNoise ) );
|
|
|
+ this->ikmsum->addModel ( new IKMNoise ( this->fmk->get_n(), this->fmk->getNoise(), this->optimizeNoise ) );
|
|
|
|
|
|
// set pretty low built-in noise, because we explicitely add the noise with the IKMNoise
|
|
|
- fmk->setNoise ( 0.0 );
|
|
|
+ this->fmk->setNoise ( 0.0 );
|
|
|
|
|
|
- ikmsum->addModel ( new GMHIKernel ( fmk, pf, NULL /* no quantization */ ) );
|
|
|
+ this->ikmsum->addModel ( new GMHIKernel ( this->fmk, this->pf, NULL /* no quantization */ ) );
|
|
|
|
|
|
t1.stop();
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for setting up the ikm-objects: " << t1.getLast() << std::endl;
|
|
|
|
|
|
GPLikelihoodApprox * gplike;
|
|
@@ -1007,10 +1012,10 @@ void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & b
|
|
|
this->setupGPLikelihoodApprox ( gplike, binaryLabels, parameterVectorSize );
|
|
|
t1.stop();
|
|
|
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for setting up the gplike-objects: " << t1.getLast() << std::endl;
|
|
|
|
|
|
- if (verbose)
|
|
|
+ if ( this->b_verbose )
|
|
|
{
|
|
|
std::cerr << "parameterVectorSize: " << parameterVectorSize << std::endl;
|
|
|
}
|
|
@@ -1018,18 +1023,21 @@ void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & b
|
|
|
t1.start();
|
|
|
// we compute all needed eigenvectors for standard classification and variance prediction at ones.
|
|
|
// nrOfEigenvaluesToConsiderForVarApprox should NOT be larger than 1 if a method different than approximate_fine is used!
|
|
|
+ std::cerr << "EV for Arnoldi: " << std::max ( this->nrOfEigenvaluesToConsider, this->nrOfEigenvaluesToConsiderForVarApprox) << std::endl;
|
|
|
+
|
|
|
this->updateEigenDecomposition( std::max ( this->nrOfEigenvaluesToConsider, this->nrOfEigenvaluesToConsiderForVarApprox) );
|
|
|
+
|
|
|
t1.stop();
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for setting up the eigenvectors-objects: " << t1.getLast() << std::endl;
|
|
|
|
|
|
if ( this->b_verbose )
|
|
|
- std::cerr << "resulting eigenvalues for first class: " << eigenMax[0] << std::endl;
|
|
|
+ std::cerr << "resulting eigenvalues for first class: " << this->eigenMax[0] << std::endl;
|
|
|
|
|
|
t1.start();
|
|
|
this->performOptimization ( *gplike, parameterVectorSize );
|
|
|
t1.stop();
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for performing the optimization: " << t1.getLast() << std::endl;
|
|
|
|
|
|
if ( this->b_verbose )
|
|
@@ -1038,13 +1046,13 @@ void FMKGPHyperparameterOptimization::optimize ( std::map<int, NICE::Vector> & b
|
|
|
t1.start();
|
|
|
this->transformFeaturesWithOptimalParameters ( *gplike, parameterVectorSize );
|
|
|
t1.stop();
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for transforming features with optimal parameters: " << t1.getLast() << std::endl;
|
|
|
|
|
|
t1.start();
|
|
|
this->computeMatricesAndLUTs ( *gplike );
|
|
|
t1.stop();
|
|
|
- if (verboseTime)
|
|
|
+ if ( this->b_verboseTime )
|
|
|
std::cerr << "Time used for setting up the A'nB -objects: " << t1.getLast() << std::endl;
|
|
|
|
|
|
t.stop();
|
|
@@ -1093,20 +1101,20 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar
|
|
|
fthrow ( Exception, "The precomputation vector is zero...have you trained this classifier?" );
|
|
|
}
|
|
|
|
|
|
- uint maxClassNo = 0;
|
|
|
- for ( std::map<int, PrecomputedType>::const_iterator i = precomputedA.begin() ; i != precomputedA.end(); i++ )
|
|
|
+ int maxClassNo = 0;
|
|
|
+ for ( std::map<int, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
|
|
|
{
|
|
|
- uint classno = i->first;
|
|
|
+ int classno = i->first;
|
|
|
maxClassNo = std::max ( maxClassNo, classno );
|
|
|
double beta;
|
|
|
|
|
|
- if ( q != NULL ) {
|
|
|
- std::map<int, double *>::const_iterator j = precomputedT.find ( classno );
|
|
|
+ if ( this->q != NULL ) {
|
|
|
+ std::map<int, double *>::const_iterator j = this->precomputedT.find ( classno );
|
|
|
double *T = j->second;
|
|
|
- fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
|
|
|
+ this->fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
|
|
|
} else {
|
|
|
const PrecomputedType & A = i->second;
|
|
|
- std::map<int, PrecomputedType>::const_iterator j = precomputedB.find ( classno );
|
|
|
+ std::map<int, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
|
|
|
const PrecomputedType & B = j->second;
|
|
|
|
|
|
// fmk->hik_kernel_sum ( A, B, xstar, beta ); if A, B are of type Matrix
|
|
@@ -1116,14 +1124,14 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar
|
|
|
// searching for upper and lower bounds ( findFirst... functions ) require original feature
|
|
|
// values as inputs. However, for calculation we need the transformed features values.
|
|
|
|
|
|
- fmk->hik_kernel_sum ( A, B, xstar, beta, pf );
|
|
|
+ this->fmk->hik_kernel_sum ( A, B, xstar, beta, pf );
|
|
|
}
|
|
|
|
|
|
scores[ classno ] = beta;
|
|
|
}
|
|
|
scores.setDim ( maxClassNo + 1 );
|
|
|
|
|
|
- if ( precomputedA.size() > 1 )
|
|
|
+ if ( this->precomputedA.size() > 1 )
|
|
|
{ // multi-class classification
|
|
|
return scores.maxElement();
|
|
|
}
|
|
@@ -1140,26 +1148,32 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::SparseVector & xstar
|
|
|
|
|
|
int FMKGPHyperparameterOptimization::classify ( const NICE::Vector & xstar, NICE::SparseVector & scores ) const
|
|
|
{
|
|
|
+
|
|
|
// loop through all classes
|
|
|
- if ( precomputedA.size() == 0 )
|
|
|
+ if ( this->precomputedA.size() == 0 )
|
|
|
{
|
|
|
fthrow ( Exception, "The precomputation vector is zero...have you trained this classifier?" );
|
|
|
}
|
|
|
|
|
|
- uint maxClassNo = 0;
|
|
|
- for ( std::map<int, PrecomputedType>::const_iterator i = precomputedA.begin() ; i != precomputedA.end(); i++ )
|
|
|
+ int maxClassNo = -std::numeric_limits<int>::max();
|
|
|
+ for ( std::map<int, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
|
|
|
{
|
|
|
- uint classno = i->first;
|
|
|
+ int classno = i->first;
|
|
|
+ std::cerr << " classno: " << classno << std::endl;
|
|
|
maxClassNo = std::max ( maxClassNo, classno );
|
|
|
+ std::cerr << " current max class number: " << maxClassNo << std::endl;
|
|
|
double beta;
|
|
|
|
|
|
- if ( q != NULL ) {
|
|
|
- std::map<int, double *>::const_iterator j = precomputedT.find ( classno );
|
|
|
+ if ( this->q != NULL )
|
|
|
+ {
|
|
|
+ std::map<int, double *>::const_iterator j = this->precomputedT.find ( classno );
|
|
|
double *T = j->second;
|
|
|
- fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
|
|
|
- } else {
|
|
|
+ this->fmk->hik_kernel_sum_fast ( T, *q, xstar, beta );
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
const PrecomputedType & A = i->second;
|
|
|
- std::map<int, PrecomputedType>::const_iterator j = precomputedB.find ( classno );
|
|
|
+ std::map<int, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
|
|
|
const PrecomputedType & B = j->second;
|
|
|
|
|
|
// fmk->hik_kernel_sum ( A, B, xstar, beta ); if A, B are of type Matrix
|
|
@@ -1169,20 +1183,24 @@ int FMKGPHyperparameterOptimization::classify ( const NICE::Vector & xstar, NICE
|
|
|
// searching for upper and lower bounds ( findFirst... functions ) require original feature
|
|
|
// values as inputs. However, for calculation we need the transformed features values.
|
|
|
|
|
|
- fmk->hik_kernel_sum ( A, B, xstar, beta, pf );
|
|
|
+ std::cerr << " call this->fmk->hik_kernel_sum ( A, B, xstar, beta, pf ); " << std::endl;
|
|
|
+ this->fmk->hik_kernel_sum ( A, B, xstar, beta, this->pf );
|
|
|
}
|
|
|
|
|
|
+ std::cerr << "score for " << classno << " : " << beta << std::endl;
|
|
|
scores[ classno ] = beta;
|
|
|
}
|
|
|
scores.setDim ( maxClassNo + 1 );
|
|
|
|
|
|
- if ( precomputedA.size() > 1 )
|
|
|
+
|
|
|
+ if ( this->precomputedA.size() > 1 )
|
|
|
{ // multi-class classification
|
|
|
return scores.maxElement();
|
|
|
}
|
|
|
else if ( this->knownClasses.size() == 2 ) // binary setting
|
|
|
{
|
|
|
scores[ this->i_binaryLabelNegative ] = -scores[ this->i_binaryLabelPositive ];
|
|
|
+
|
|
|
return scores[ this->i_binaryLabelPositive ] <= 0.0 ? this->i_binaryLabelNegative : this->i_binaryLabelPositive;
|
|
|
}
|
|
|
else //OCC or regression setting
|
|
@@ -1312,7 +1330,7 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine (
|
|
|
void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NICE::SparseVector & x, double & predVariance ) const
|
|
|
{
|
|
|
// security check!
|
|
|
- if ( ikmsum->getNumberOfModels() == 0 )
|
|
|
+ if ( this->ikmsum->getNumberOfModels() == 0 )
|
|
|
{
|
|
|
fthrow ( Exception, "ikmsum is empty... have you trained this classifer? Aborting..." );
|
|
|
}
|
|
@@ -1323,7 +1341,7 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NIC
|
|
|
double kSelf ( 0.0 );
|
|
|
for ( NICE::SparseVector::const_iterator it = x.begin(); it != x.end(); it++ )
|
|
|
{
|
|
|
- kSelf += pf->f ( 0, it->second );
|
|
|
+ kSelf += this->pf->f ( 0, it->second );
|
|
|
// if weighted dimensions:
|
|
|
//kSelf += pf->f(it->first,it->second);
|
|
|
}
|
|
@@ -1412,10 +1430,12 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateRough
|
|
|
predVariance = kSelf - ( 1.0 / eigenMax[0] )* normKStar;
|
|
|
}
|
|
|
|
|
|
-void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine ( const NICE::Vector & x, double & predVariance ) const
|
|
|
+void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine ( const NICE::Vector & _x,
|
|
|
+ double & _predVariance
|
|
|
+ ) const
|
|
|
{
|
|
|
// security check!
|
|
|
- if ( eigenMaxVectors.rows() == 0 )
|
|
|
+ if ( this->eigenMaxVectors.rows() == 0 )
|
|
|
{
|
|
|
fthrow ( Exception, "eigenMaxVectors is empty...have you trained this classifer? Aborting..." );
|
|
|
}
|
|
@@ -1423,16 +1443,16 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine (
|
|
|
// ---------------- compute the first term --------------------
|
|
|
|
|
|
double kSelf ( 0.0 );
|
|
|
- int dim ( 0 );
|
|
|
- for ( NICE::Vector::const_iterator it = x.begin(); it != x.end(); it++, dim++ )
|
|
|
+ uint dim ( 0 );
|
|
|
+ for ( NICE::Vector::const_iterator it = _x.begin(); it != _x.end(); it++, dim++ )
|
|
|
{
|
|
|
- kSelf += pf->f ( 0, *it );
|
|
|
+ kSelf += this->pf->f ( 0, *it );
|
|
|
// if weighted dimensions:
|
|
|
//kSelf += pf->f(dim,*it);
|
|
|
}
|
|
|
// ---------------- compute the approximation of the second term --------------------
|
|
|
NICE::Vector kStar;
|
|
|
- fmk->hikComputeKernelVector ( x, kStar );
|
|
|
+ this->fmk->hikComputeKernelVector ( _x, kStar );
|
|
|
|
|
|
|
|
|
//ok, there seems to be a nasty thing in computing multiplicationResults.multiply ( *eigenMaxVectorIt, kStar, true/* transpose */ );
|
|
@@ -1441,9 +1461,9 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine (
|
|
|
// NICE::Vector multiplicationResults; // will contain nrOfEigenvaluesToConsiderForVarApprox many entries
|
|
|
// multiplicationResults.multiply ( *eigenMaxVectorIt, kStar, true/* transpose */ );
|
|
|
|
|
|
- NICE::Vector multiplicationResults( nrOfEigenvaluesToConsiderForVarApprox-1, 0.0 );
|
|
|
- NICE::Matrix::const_iterator eigenVecIt = eigenMaxVectors.begin();
|
|
|
- for ( int tmpJ = 0; tmpJ < nrOfEigenvaluesToConsiderForVarApprox-1; tmpJ++)
|
|
|
+ NICE::Vector multiplicationResults(this-> nrOfEigenvaluesToConsiderForVarApprox-1, 0.0 );
|
|
|
+ NICE::Matrix::const_iterator eigenVecIt = this->eigenMaxVectors.begin();
|
|
|
+ for ( int tmpJ = 0; tmpJ < this->nrOfEigenvaluesToConsiderForVarApprox-1; tmpJ++)
|
|
|
{
|
|
|
for ( NICE::Vector::const_iterator kStarIt = kStar.begin(); kStarIt != kStar.end(); kStarIt++,eigenVecIt++)
|
|
|
{
|
|
@@ -1457,10 +1477,10 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine (
|
|
|
int cnt ( 0 );
|
|
|
NICE::Vector::const_iterator it = multiplicationResults.begin();
|
|
|
|
|
|
- while ( cnt < ( nrOfEigenvaluesToConsiderForVarApprox - 1 ) )
|
|
|
+ while ( cnt < ( this->nrOfEigenvaluesToConsiderForVarApprox - 1 ) )
|
|
|
{
|
|
|
projectionLength = ( *it );
|
|
|
- currentSecondTerm += ( 1.0 / eigenMax[cnt] ) * pow ( projectionLength, 2 );
|
|
|
+ currentSecondTerm += ( 1.0 / this->eigenMax[cnt] ) * pow ( projectionLength, 2 );
|
|
|
sumOfProjectionLengths += pow ( projectionLength, 2 );
|
|
|
|
|
|
it++;
|
|
@@ -1470,43 +1490,48 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceApproximateFine (
|
|
|
|
|
|
double normKStar ( pow ( kStar.normL2 (), 2 ) );
|
|
|
|
|
|
- currentSecondTerm += ( 1.0 / eigenMax[nrOfEigenvaluesToConsiderForVarApprox-1] ) * ( normKStar - sumOfProjectionLengths );
|
|
|
+ currentSecondTerm += ( 1.0 / this->eigenMax[nrOfEigenvaluesToConsiderForVarApprox-1] ) * ( normKStar - sumOfProjectionLengths );
|
|
|
|
|
|
|
|
|
if ( ( normKStar - sumOfProjectionLengths ) < 0 )
|
|
|
{
|
|
|
std::cerr << "Attention: normKStar - sumOfProjectionLengths is smaller than zero -- strange!" << std::endl;
|
|
|
}
|
|
|
- predVariance = kSelf - currentSecondTerm;
|
|
|
+ _predVariance = kSelf - currentSecondTerm;
|
|
|
}
|
|
|
|
|
|
-void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NICE::Vector & x, double & predVariance ) const
|
|
|
+void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NICE::Vector & _x,
|
|
|
+ double & _predVariance
|
|
|
+ ) const
|
|
|
{
|
|
|
- if ( ikmsum->getNumberOfModels() == 0 )
|
|
|
+ if ( this->ikmsum->getNumberOfModels() == 0 )
|
|
|
{
|
|
|
fthrow ( Exception, "ikmsum is empty... have you trained this classifer? Aborting..." );
|
|
|
}
|
|
|
|
|
|
// ---------------- compute the first term --------------------
|
|
|
double kSelf ( 0.0 );
|
|
|
- int dim ( 0 );
|
|
|
- for ( NICE::Vector::const_iterator it = x.begin(); it != x.end(); it++, dim++ )
|
|
|
+ uint dim ( 0 );
|
|
|
+ for ( NICE::Vector::const_iterator it = _x.begin(); it != _x.end(); it++, dim++ )
|
|
|
{
|
|
|
- kSelf += pf->f ( 0, *it );
|
|
|
+ kSelf += this->pf->f ( 0, *it );
|
|
|
// if weighted dimensions:
|
|
|
//kSelf += pf->f(dim,*it);
|
|
|
}
|
|
|
+
|
|
|
|
|
|
// ---------------- compute the second term --------------------
|
|
|
NICE::Vector kStar;
|
|
|
- fmk->hikComputeKernelVector ( x, kStar );
|
|
|
+ this->fmk->hikComputeKernelVector ( _x, kStar );
|
|
|
|
|
|
+ std::cerr << " kStar: " << kStar << std::endl;
|
|
|
+
|
|
|
//now run the ILS method
|
|
|
NICE::Vector diagonalElements;
|
|
|
- ikmsum->getDiagonalElements ( diagonalElements );
|
|
|
+ this->ikmsum->getDiagonalElements ( diagonalElements );
|
|
|
|
|
|
// init simple jacobi pre-conditioning
|
|
|
- ILSConjugateGradients *linsolver_cg = dynamic_cast<ILSConjugateGradients *> ( linsolver );
|
|
|
+ ILSConjugateGradients *linsolver_cg = dynamic_cast<ILSConjugateGradients *> ( this->linsolver );
|
|
|
|
|
|
|
|
|
//perform pre-conditioning
|
|
@@ -1527,13 +1552,15 @@ void FMKGPHyperparameterOptimization::computePredictiveVarianceExact ( const NIC
|
|
|
* v = k_*
|
|
|
* This reduces the number of iterations by 5 or 8
|
|
|
*/
|
|
|
- beta = (kStar * (1.0 / eigenMax[0]) );
|
|
|
- linsolver->solveLin ( *ikmsum, kStar, beta );
|
|
|
+ beta = (kStar * (1.0 / this->eigenMax[0]) );
|
|
|
+ this->linsolver->solveLin ( *ikmsum, kStar, beta );
|
|
|
|
|
|
beta *= kStar;
|
|
|
|
|
|
double currentSecondTerm( beta.Sum() );
|
|
|
- predVariance = kSelf - currentSecondTerm;
|
|
|
+
|
|
|
+ std::cerr << "kSelf: " << kSelf << " currentSecondTerm: " << currentSecondTerm << std::endl;
|
|
|
+ _predVariance = kSelf - currentSecondTerm;
|
|
|
}
|
|
|
|
|
|
///////////////////// INTERFACE PERSISTENT /////////////////////
|
|
@@ -1604,13 +1631,13 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
|
|
|
///////////////////////////////////
|
|
|
if ( tmp.compare("verbose") == 0 )
|
|
|
{
|
|
|
- _is >> verbose;
|
|
|
+ _is >> this->b_verbose;
|
|
|
_is >> tmp; // end of block
|
|
|
tmp = this->removeEndTag ( tmp );
|
|
|
}
|
|
|
else if ( tmp.compare("verboseTime") == 0 )
|
|
|
{
|
|
|
- _is >> verboseTime;
|
|
|
+ _is >> this->b_verboseTime;
|
|
|
_is >> tmp; // end of block
|
|
|
tmp = this->removeEndTag ( tmp );
|
|
|
}
|
|
@@ -1625,36 +1652,36 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
|
|
|
//////////////////////////////////////
|
|
|
else if ( tmp.compare("b_performRegression") == 0 )
|
|
|
{
|
|
|
- _is >> b_performRegression;
|
|
|
+ _is >> this->b_performRegression;
|
|
|
_is >> tmp; // end of block
|
|
|
tmp = this->removeEndTag ( tmp );
|
|
|
}
|
|
|
else if ( tmp.compare("fmk") == 0 )
|
|
|
{
|
|
|
- if ( fmk != NULL )
|
|
|
- delete fmk;
|
|
|
- fmk = new FastMinKernel();
|
|
|
- fmk->restore( _is, _format );
|
|
|
+ if ( this->fmk != NULL )
|
|
|
+ delete this->fmk;
|
|
|
+ this->fmk = new FastMinKernel();
|
|
|
+ this->fmk->restore( _is, _format );
|
|
|
|
|
|
_is >> tmp; // end of block
|
|
|
tmp = this->removeEndTag ( tmp );
|
|
|
}
|
|
|
else if ( tmp.compare("q") == 0 )
|
|
|
{
|
|
|
- std::string _isNull;
|
|
|
+ std::string isNull;
|
|
|
_is >> isNull; // NOTNULL or NULL
|
|
|
if (isNull.compare("NOTNULL") == 0)
|
|
|
{
|
|
|
- if ( q != NULL )
|
|
|
- delete q;
|
|
|
- q = new Quantization();
|
|
|
- q->restore ( _is, _format );
|
|
|
+ if ( this->q != NULL )
|
|
|
+ delete this->q;
|
|
|
+ this->q = new Quantization();
|
|
|
+ this->q->restore ( _is, _format );
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
- if ( q != NULL )
|
|
|
- delete q;
|
|
|
- q = NULL;
|
|
|
+ if ( this->q != NULL )
|
|
|
+ delete this->q;
|
|
|
+ this->q = NULL;
|
|
|
}
|
|
|
_is >> tmp; // end of block
|
|
|
tmp = this->removeEndTag ( tmp );
|
|
@@ -1693,7 +1720,7 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
|
|
|
fthrow(Exception, "Transformation type is unknown " << transform);
|
|
|
}
|
|
|
|
|
|
- pf->restore(is, _format);
|
|
|
+ this->pf->restore( _is, _format);
|
|
|
|
|
|
_is >> tmp; // end of block
|
|
|
tmp = this->removeEndTag ( tmp );
|
|
@@ -2407,7 +2434,7 @@ void FMKGPHyperparameterOptimization::addExample( const NICE::SparseVector * exa
|
|
|
// could be dealt with implicitely.
|
|
|
// Therefore, we insert its label here...
|
|
|
if ( (newClasses.size() > 0 ) && ( (this->knownClasses.size() - newClasses.size() ) == 2 ) )
|
|
|
- newClasses.insert( binaryLabelNegative );
|
|
|
+ newClasses.insert( this->i_binaryLabelNegative );
|
|
|
|
|
|
// add the new example to our data structure
|
|
|
// It is necessary to do this already here and not lateron for internal reasons (see GMHIKernel for more details)
|
|
@@ -2490,7 +2517,7 @@ void FMKGPHyperparameterOptimization::addMultipleExamples( const std::vector< co
|
|
|
// could be dealt with implicitely.
|
|
|
// Therefore, we insert its label here...
|
|
|
if ( (newClasses.size() > 0 ) && ( (this->knownClasses.size() - newClasses.size() ) == 2 ) )
|
|
|
- newClasses.insert( binaryLabelNegative );
|
|
|
+ newClasses.insert( this->i_binaryLabelNegative );
|
|
|
|
|
|
}
|
|
|
// in a regression setting, we do not have to remember any "class labels"
|