/** 
* @file KCGPOneClass.cpp
* @brief One-Class Gaussian Process Regression for Classification
* @author Erik Rodner + Mi.Ke.
* @date 12/03/2010

*/
#include <iostream>
#include <typeinfo>

#include "KCMinimumEnclosingBall.h"

using namespace std;
using namespace NICE;
using namespace OBJREC;


KCMinimumEnclosingBall::KCMinimumEnclosingBall( const Config *conf, Kernel *kernel, const string & section ) 
	: KernelClassifier ( conf, kernel )
{
	//specify parameters, if any
	nu = conf->gD(section,"outlier_fraction");
	fprintf(stderr,"\nUsing (upper bound) outlier fraction of nu=%f (upper bound is so far guaranteed only for stationary kernels)\n",nu);
	if(nu<0 || nu>1){
		fprintf(stderr,"ERROR: Outlier fraction 'nu' (outlier_fraction) specifies must be in [0,1] !!!\n\n");
		exit(-1);
	}
}

KCMinimumEnclosingBall::KCMinimumEnclosingBall( const KCMinimumEnclosingBall & src ) : KernelClassifier ( src )
{
	this->alpha=src.alpha;
	this->radius=src.radius;
	this->aKa=src.aKa;
	this->nu=src.nu;
	this->trainingSize=src.trainingSize;
	this->TwoK=src.TwoK;
	this->Eye=src.Eye;
	this->Ones=src.Ones;
	this->ones=src.ones;
	this->b=src.b;
	this->minusDiagK=src.minusDiagK;
	this->minusOne=src.minusOne;
	this->zeros=src.zeros;
}

KCMinimumEnclosingBall::~KCMinimumEnclosingBall()
{
}


void KCMinimumEnclosingBall::teach ( KernelData *kernelData, const NICE::Vector & y )
{
	NICE::Matrix kernelMatrix = kernelData->getKernelMatrix();
	trainingSize=kernelMatrix.rows();
	int n=trainingSize;

	if(nu==0.0){ //no outlier, i.e. hard minimum enclosing ball algorithm
		TwoK.resize(n,n);
		Eye.resize(n,n);
		minusDiagK.resize(n);
		Ones.resize(n,1);
		minusOne.resize(1);
		zeros.resize(n);
		alpha.resize(n);
		for(int i=0;i<n;i++){
			for(int j=0;j<n;j++){
				Eye[i][j]= ( i==j ? 1 : 0 );
				TwoK[i][j]=2*kernelMatrix(i,j);
			}
			minusDiagK[i]=-kernelMatrix(i,i);
			Ones[i][0]=1;
			zeros[i]=0;
		}
		minusOne[0]=-1;
	}else{	//soft MEB problem (more complicated since we must introduce further inequalities)
		TwoK.resize(n,n);
		Eye.resize(n,2*n);	// instead of Id, we also insert -Id (to the right of Id)
		minusDiagK.resize(n);
		Ones.resize(n,1);
		minusOne.resize(1);
		zeros.resize(2*n);	// instead of zeros we also insert 1/(v*n)
		alpha.resize(n);
		for(int i=0;i<n;i++){
			for(int j=0;j<n;j++){
				Eye[i][j]= ( i==j ? 1 : 0 );
				Eye[i][n+j]= ( i==j ? -1 : 0 );
				TwoK[i][j]=2*kernelMatrix(i,j);
			}
			minusDiagK[i]=-kernelMatrix(i,i);
			Ones[i][0]=1;
			zeros[i]=0;
			zeros[n+i]=1.0/(nu*double(n));
		}
		minusOne[0]=-1;

	}

	QuadProgPP::solve_quadprog(TwoK, minusDiagK, Ones, minusOne, Eye, zeros, alpha);
	radius=0.0;
	aKa=0.0;
	double rad2=-1e+100,tmp;
	for(int i=0;i<n;i++){
		tmp=0.0;
		for(int j=0;j<n;j++){
			aKa+=0.5*alpha[i]*TwoK[i][j]*alpha[j];
			tmp+=-TwoK[i][j]*alpha[j];
		}
		if( (alpha[i]>1e-10) && (alpha[i]<1/(n*nu)+1e-10) ){
			tmp+=0.5*TwoK[i][i];
			rad2=(rad2>tmp ? rad2 : tmp);
		}
		//fprintf(stderr,"alpha_%d=%f\t 1/vl=%f\t 0>alpha<1/vl? %d => R_%d=aKa+%f\n",i,alpha[i],1.0/(nu*n),((alpha[i]>1e-10)&&alpha[i]<1/(n*nu)+1e-10),i,tmp);
	}
	radius=aKa+rad2;
	//fprintf(stderr,"\nfinal radius R=aKa+%f\n",rad2);
}
	
ClassificationResult KCMinimumEnclosingBall::classifyKernel ( const NICE::Vector & kernelVector, double kernelSelf ) const
{
	FullVector scores ( 2 );
	scores[0] = 0.0;
	double rest = 0.0;
	for(int i=0;i<trainingSize;i++){
		rest+=-2*kernelVector[i]*alpha[i];
	}
	rest=rest+kernelSelf;
	double inlierness = radius - (aKa + rest);
	scores[1] = inlierness;
	ClassificationResult r ( inlierness<0 ? 0 : 1, scores );

	return r;
}

KCMinimumEnclosingBall *KCMinimumEnclosingBall::clone() const
{
	return new KCMinimumEnclosingBall ( *this );
}