activeLearningLinGPprototype.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. #! /usr/bin/python
  2. import numpy
  3. import pickle
  4. import sys
  5. import os
  6. sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)),os.pardir))
  7. import helperFunctions
  8. class ClassifierPrototype:
  9. def __init__(self, sigmaN = 0.00178, configFile=None):
  10. self.sigmaN = helperFunctions.getConfig(configFile, 'activeLearning', 'sigmaN', sigmaN, 'float', True)
  11. self.invCreg = []
  12. self.X = []
  13. self.yBin = [] # .shape = (number of samples, number of unique classes)
  14. self.yUni = []
  15. self.w = [] # .shape = (feat dim, number of unique classes)
  16. def checkModel(self):
  17. if not numpy.all(numpy.isfinite(self.invCreg)):
  18. raise Exception('not numpy.all(numpy.isfinite(self.invCreg))')
  19. if not numpy.all(numpy.isfinite(self.w)):
  20. raise Exception('not numpy.all(numpy.isfinite(self.w))')
  21. if not numpy.all(numpy.isfinite(self.X)):
  22. raise Exception('not numpy.all(numpy.isfinite(self.X))')
  23. if not numpy.all(numpy.isfinite(self.yUni)):
  24. raise Exception('not numpy.all(numpy.isfinite(self.yUni))')
  25. if len(numpy.unique(self.y)) > 2:
  26. raise Exception('len(numpy.unique(self.y)) > 2')
  27. def train2(self, X, y, sigmaN=None):
  28. self.train(X[0,:],y[0,:],sigmaN)
  29. for idx in range(1,X.shape[0]):
  30. self.update(X[idx,:],y[idx,:])
  31. # X.shape = (number of samples, feat dim), y.shape = (number of samples, 1)
  32. def train(self, X, y, sigmaN=None):
  33. if sigmaN is not None:
  34. self.sigmaN = sigmaN
  35. # get all known classes
  36. self.yUni = numpy.asmatrix(numpy.unique(numpy.asarray(y)))
  37. # save stuff
  38. self.X = X
  39. # calculate inverse of C_reg
  40. self.invCreg = numpy.linalg.inv((X.T*X) + numpy.identity(X.shape[1])*self.sigmaN)
  41. # get binary labels for each ovr - classifier
  42. self.yBin = numpy.asmatrix(numpy.empty((y.shape[0],self.yUni.shape[1])), dtype=numpy.int)
  43. for cls in range(self.yUni.shape[1]):
  44. self.yBin[:,cls] = 2*(y == self.yUni[0,cls]) - 1
  45. # calculate w
  46. self.w = self.invCreg*X.T*self.yBin
  47. self.checkModel()
  48. # x.shape = (1, feat dim), y.shape = (1, 1)
  49. def update(self, x, y):
  50. # update w and C_reg
  51. tmpVec = self.invCreg*x.T
  52. tmpScalar = 1.0 + x*tmpVec
  53. self.w = self.w + tmpVec*((numpy.asmatrix(2*(y==self.yUni) - 1) - x*self.w)/tmpScalar)
  54. self.invCreg = self.invCreg - ((tmpVec*tmpVec.T)/tmpScalar)
  55. # update samples
  56. self.X = numpy.append(self.X, x, axis=0)
  57. # update binary labels for known class
  58. self.yBin = numpy.append(self.yBin, 2*(y == self.yUni) - 1, axis=0)
  59. # create labels and w for new class
  60. if not (self.yUni == y).any():
  61. # get insertion idx
  62. idx = numpy.searchsorted(numpy.ravel(numpy.asarray(self.yUni)), y[0,0])
  63. # store new label and find new class index
  64. self.yUni = numpy.insert(self.yUni, [idx], y, axis=1)
  65. ## add new binary label vector for new class
  66. self.yBin = numpy.insert(self.yBin, [idx], numpy.zeros((self.yBin.shape[0],1), dtype=numpy.int), axis=1)
  67. self.yBin[:-1,idx] = -1
  68. self.yBin[-1,idx] = 1
  69. ## train new w for new class
  70. self.w = numpy.insert(self.w, [idx], self.invCreg*self.X.T*self.yBin[:,idx], axis=1)
  71. self.checkModel()
  72. # X.shape = (number of samples, feat dim), loNoise = {0,1}
  73. def infer(self, x, loNoise=False):
  74. loNoise = loNoise and (self.yUni == -1).any()
  75. # division by number of training samples is a hack to prevent huge scores
  76. return numpy.asmatrix(x*self.w[:,int(loNoise):]) #/ float(self.X.shape[0])
  77. # X.shape = (number of samples, feat dim)
  78. def test(self, x, loNoise=False):
  79. loNoise = loNoise and (self.yUni == -1).any()
  80. return self.yUni[0,numpy.argmax(self.infer(x, loNoise), axis=1) + int(loNoise)]
  81. # X.shape = (number of samples, feat dim)
  82. def calcSigmaF(self, x, tmpVec=None):
  83. if tmpVec is None:
  84. return numpy.sum(numpy.multiply(x,(self.sigmaN*self.invCreg*x.T).T),axis=1)
  85. else:
  86. return self.sigmaN*tmpVec.T
  87. # X.shape = (number of samples, feat dim)
  88. def calcProbs(self, x, mu=None, sigmaF=None, nmb=1000):
  89. # get mu and sigma
  90. if mu is None:
  91. mu = self.infer(x)
  92. if sigmaF is None:
  93. sigmaF = self.calcSigmaF(x)
  94. # prepare
  95. probs = numpy.asmatrix(numpy.zeros(mu.shape))
  96. for idx in range(nmb):
  97. draws = numpy.asmatrix(numpy.random.randn(mu.shape[0],mu.shape[1]))
  98. draws = numpy.multiply(draws, numpy.repeat(sigmaF, draws.shape[1], axis=1)) + mu
  99. maxIdx = numpy.argmax(draws, axis=1)
  100. idxs = (range(len(maxIdx)),numpy.squeeze(maxIdx))
  101. probs[idxs] = probs[idxs] + 1
  102. # convert absolute to relative amount
  103. return probs/float(nmb)
  104. # x.shape = (feat dim, number of samples)
  105. def calcAlScores(self, x):
  106. return None
  107. # x.shape = (feat dim, number of samples)
  108. def getAlScores(self, x):
  109. alScores = self.calcAlScores(x)
  110. if not numpy.all(numpy.isfinite(alScores)):
  111. raise Exception('not numpy.all(numpy.isfinite(alScores))')
  112. if alScores.shape[0] != x.shape[0] or alScores.shape[1] != 1:
  113. raise Exception('alScores.shape[0] != x.shape[0] or alScores.shape[1] != 1')
  114. return alScores
  115. # x.shape = (feat dim, number of samples)
  116. def chooseSample(self, x):
  117. return numpy.argmax(self.getAlScores(x), axis=0).item(0)