learnWithGivenWhitening.m 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. function modelNew = learnWithGivenWhitening(model,R, neg, features, i_truncDim)
  2. % function modelNew = learnWithGivenWhitening(model,R, neg, features, i_truncDim)
  3. % BRIEF:
  4. % Learn model by linear discriminant analysis with already given
  5. % negative mean and covariance matrix
  6. %
  7. % INPUT:
  8. % model -- struct, previously untrained model, contains at
  9. % least the following fields:
  10. % .w -- previous (most likely empty) weight vector of
  11. % model, relevant for determining the correct size
  12. % .i_numCells -- number of cells, specified per dimension (copied
  13. % only)
  14. % .i_binSize -- number of pixel of every cell in both directions
  15. % (copied only)
  16. % .interval -- how many octaves are used for feature extraction
  17. % (copied only)
  18. % .d_detectionThreshold -- minimum scores for accepting a response
  19. %
  20. % R -- covariance matrix learned previously
  21. % neg -- negative mean learned previously
  22. % features -- features of positive examples FIXME write
  23. % dimensionality!
  24. % i_truncDim -- int, indicating which dimension, if any, serves as
  25. % truncation feature by being constant to zero ( as
  26. % done for DPM HOG features )
  27. %
  28. % OUTPUT:
  29. % modelNew -- struct, learned model, with fields 'w',
  30. % 'i_numCells', 'i_binSize', 'interval', 'd_detectionThreshold
  31. %
  32. % author: Alexander Freytag
  33. % last time modified: 27-02-2014 (dd-mm-yyyy)
  34. %num is the number of blocks we have for this cache
  35. numSamples = length(features);
  36. assert (numSamples >= 1, 'LDA - No features for model training provided');
  37. model.d_detectionThreshold = 0; %FIXME
  38. % we assume that all features of the positive class are of same size!
  39. [ny,nx,nf] = size(features{1});
  40. if ( i_truncDim > 0 )
  41. nf = nf - 1;
  42. end
  43. % flatten features into single vectors
  44. feats = zeros(ny*nx*nf,numSamples);
  45. for i = 1:numSamples
  46. % get current feature
  47. feat = features{i};
  48. % possibly remove unneeded truncation feature
  49. if ( i_truncDim > 0 )
  50. feat = feat(:, :, 1:end~=i_truncDim );
  51. end
  52. % flatten vector and store in feats-struct
  53. feats(:,i) = feat(:);
  54. end
  55. % average all features together
  56. pos = mean(feats,2);
  57. % perform actual LDA training
  58. w=R\(R'\(pos-neg));
  59. % bring weight vector into correct layout
  60. w = reshape(w,[ny nx nf]);
  61. if ( i_truncDim > 0 )
  62. % Add in occlusion feature
  63. %note: might only be troublesome if very first dim is the td...
  64. w = cat( 3, w(:,:,1:(i_truncDim-1) ), ...
  65. zeros ( size(w,1),size(w,2),1,class(w) ), ...
  66. w(:, :, i_truncDim:end) );
  67. end
  68. modelNew.w = w;
  69. % size of root filter in HOG cells
  70. modelNew.i_numCells = [ny nx];
  71. % size of each cell in pixel
  72. modelNew.i_binSize = model.i_binSize;
  73. % strange interval
  74. modelNew.interval = model.interval;
  75. %threshold to reject detection with score lower than that
  76. modelNew.d_detectionThreshold = model.d_detectionThreshold;
  77. end