activeLearningGPLinKemoc.py 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. #! /usr/bin/python
  2. import numpy
  3. import sys
  4. import os
  5. sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),os.pardir))
  6. import helperFunctions
  7. import activeLearningGPLinKprototype
  8. class Classifier(activeLearningGPLinKprototype.ClassifierPrototype):
  9. def __init__(self,
  10. sigmaN = 0.00178,
  11. useDensity = True,
  12. configFile = None):
  13. activeLearningGPLinKprototype.ClassifierPrototype.__init__(self, sigmaN=sigmaN, configFile=configFile)
  14. self.useDensity = helperFunctions.getConfig(configFile, 'activeLearning', 'useDensity', useDensity, 'bool', True)
  15. # x.shape = (number of samples, feat dim)
  16. def calcEMOC(self, x, allX=None, kAll=None, sigmaF=None):
  17. if allX is None:
  18. allX = numpy.append(self.X, x, axis=0)
  19. if kAll is None:
  20. kAll = numpy.dot(allX,allX.T)
  21. k = kAll[0:self.X.shape[0],self.X.shape[0]:]
  22. if sigmaF is None:
  23. selfK = numpy.asmatrix(numpy.diag(kAll[self.X.shape[0]:,self.X.shape[0]:])).T
  24. sigmaF = self.calcSigmaF(x, k, selfK)
  25. containsNoise = (self.yUni == -1).any()
  26. infY = self.infer(x, containsNoise, k)
  27. probs = self.calcProbs(x, infY, sigmaF)
  28. term1 = 1.0 / (self.sigmaN + sigmaF)
  29. term2 = numpy.asmatrix(numpy.ones((self.X.shape[0] + 1,x.shape[0]))) * (-1.0)
  30. term2[0:self.X.shape[0],:] = numpy.linalg.solve(numpy.add(self.K, numpy.identity(self.X.shape[0], dtype=numpy.float32)*self.sigmaN), k)
  31. pro = numpy.absolute(infY - 1)
  32. contra = numpy.absolute(infY + 1)
  33. term3 = numpy.repeat(numpy.sum(contra,axis=1),contra.shape[1],axis=1)
  34. term3 = numpy.add(numpy.subtract(term3,contra),pro)
  35. scores = numpy.asmatrix(numpy.zeros((x.shape[0],1)))
  36. for cls in range(infY.shape[1]):
  37. diffAlpha = numpy.multiply(numpy.repeat(numpy.multiply(term1,term3[:,cls]), term2.shape[0], axis=1),term2.T)
  38. change = numpy.dot(diffAlpha[:,:-1],kAll[0:self.X.shape[0],:])
  39. change = numpy.add(change, numpy.multiply(numpy.repeat(diffAlpha[:,-1], kAll.shape[0], axis=1),kAll[self.X.shape[0]:,:]))
  40. scores = numpy.add(scores, numpy.multiply(probs[:,cls],numpy.sum(numpy.absolute(change),axis=1)))
  41. return scores
  42. def getDensity(self, sim):
  43. return numpy.sum(sim, axis=1) / float(sim.shape[1])
  44. def getDiversity(self, sim):
  45. return 1.0 / numpy.max(sim, axis=1)
  46. # x.shape = (feat dim, number of samples)
  47. def calcAlScores(self, x):
  48. allX = numpy.append(self.X, x, axis=0)
  49. kAll = numpy.dot(allX,allX.T)
  50. k = kAll[0:self.X.shape[0],self.X.shape[0]:]
  51. selfK = numpy.asmatrix(numpy.diag(kAll[self.X.shape[0]:,self.X.shape[0]:])).T
  52. sigmaF = self.calcSigmaF(x, k, selfK)
  53. alScores = self.calcEMOC(x, allX=allX, kAll=kAll, sigmaF=sigmaF)
  54. if self.useDensity:
  55. density = self.getDensity(kAll[self.X.shape[0]:,:])
  56. alScores = numpy.multiply(alScores, density)
  57. return alScores