12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273 |
- #! /usr/bin/python
- import numpy
- import pickle
- import sys
- import os
- sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),os.pardir))
- import helperFunctions
- import activeLearningLinGPprototype
- class Classifier(activeLearningLinGPprototype.ClassifierPrototype):
- def __init__(self,
- sigmaN = 0.00178,
- usePde = True,
- configFile=None):
- activeLearningLinGPprototype.ClassifierPrototype.__init__(self, sigmaN=sigmaN, configFile=configFile)
- self.usePde = helperFunctions.getConfig(configFile, 'activeLearning', 'usePde', usePde, 'bool', True)
- # x.shape = (number of samples, feat dim)
- def calcEMOC(self, x, allX=None):
- if allX is None:
- allX = numpy.append(self.X, x, axis=0)
- containsNoise = (self.yUni == -1).any()
- tmpVec1 = self.invCreg*x.T
- tmpVec2 = numpy.sum(numpy.multiply(x.T,tmpVec1), axis=0)
- sigmaF = self.calcSigmaF(x, tmpVec2)
- infY = self.infer(x, containsNoise)
- probs = self.calcProbs(x, infY, sigmaF)
- term1 = 1.0/(1.0 + tmpVec2)
- term2 = numpy.sum(numpy.absolute(allX*tmpVec1), axis=0)
- pro = numpy.absolute(infY - 1)
- contra = numpy.absolute(infY + 1)
- diff = numpy.repeat(numpy.sum(contra,axis=1),contra.shape[1],axis=1)
- diff = (diff - contra + pro)
- term3 = numpy.sum(numpy.multiply(probs,diff),axis=1)
- return numpy.multiply(numpy.multiply(term1, term2).T, term3)
- # x.shape = (number of samples, feat dim)
- def calcEMOCpde(self, x):
- allX = numpy.append(self.X, x, axis=0)
- scores = self.calcEMOC(x, allX)
- density = x*numpy.mean(allX, axis=0).T
- return numpy.multiply(scores, density)
- # x.shape = (number of samples, feat dim)
- def calcAlScores(self, x):
- if self.usePde:
- return self.calcEMOCpde(x)
- else:
- return self.calcEMOC(x)
|