Browse Source

learning is more robust wrt memory issues, less redundant code

Alexander Freytag 9 years ago
parent
commit
b656903ee5
3 changed files with 108 additions and 53 deletions
  1. 23 14
      learn/learnWithGivenWhitening.m
  2. 2 2
      learn/trainBGwithArbitraryFeatures.m
  3. 83 37
      learn/whitenWithDropout.m

+ 23 - 14
learn/learnWithGivenWhitening.m

@@ -1,5 +1,5 @@
-function modelNew = learnWithGivenWhitening(model,R, neg, pyraFeats, i_truncDim)
-% function modelNew = learnWithGivenWhitening(model,R, neg, pyraFeats, i_truncDim)
+function modelNew = learnWithGivenWhitening(model,R, neg, features, i_truncDim)
+% function modelNew = learnWithGivenWhitening(model,R, neg, features, i_truncDim)
 % BRIEF:
 %    Learn model by linear discriminant analysis with already given
 %    negative mean and covariance matrix
@@ -19,7 +19,8 @@ function modelNew = learnWithGivenWhitening(model,R, neg, pyraFeats, i_truncDim)
 % 
 %    R               -- covariance matrix learned previously
 %    neg             -- negative mean learned previously
-%    pyraFeats       -- features of positive examples
+%    features        -- features of positive examples FIXME write
+%                       dimensionality!
 %    i_truncDim      -- int, indicating which dimension, if any,  serves as 
 %                       truncation feature by being constant to zero ( as
 %                       done for DPM HOG features )
@@ -32,23 +33,29 @@ function modelNew = learnWithGivenWhitening(model,R, neg, pyraFeats, i_truncDim)
 % last time modified:   27-02-2014 (dd-mm-yyyy)
 
 
+    %num is the number of blocks we have for this cache
+    numSamples  = length(features);
+    
+    assert (numSamples >= 1, 'LDA - No features for model training provided');
+
+    model.d_detectionThreshold = 0; %FIXME
 
-    [ny,nx,nf] = size(model.w);
+    % we assume that all features of the positive class are of same size!
+    [ny,nx,nf] = size(features{1});
     
     if ( i_truncDim > 0 )
         nf = nf - 1;
     end
 
 
-    %num is the number of blocks we have for this cache
-    num  = length(pyraFeats);
-    feats = zeros(ny*nx*nf,num);
-
     % flatten features into single vectors
-    for i = 1:num
+    feats = zeros(ny*nx*nf,numSamples);
+    
+    for i = 1:numSamples      
+        
         % get current feature
-        feat = pyraFeats(i).feature;
-      
+        feat = features{i};
+        
         % possibly remove unneeded truncation feature
         if ( i_truncDim > 0 )
             feat = feat(:, :, 1:end~=i_truncDim );
@@ -65,8 +72,8 @@ function modelNew = learnWithGivenWhitening(model,R, neg, pyraFeats, i_truncDim)
     w=R\(R'\(pos-neg));
     
     % bring weight vector into correct layout
-    w = reshape(w,[ny nx nf]);
-
+    w = reshape(w,[ny nx nf]);     
+    
     if ( i_truncDim > 0 )
         % Add in occlusion feature
         %note: might only be troublesome if very first dim is the td...
@@ -75,9 +82,11 @@ function modelNew = learnWithGivenWhitening(model,R, neg, pyraFeats, i_truncDim)
                     w(:, :, i_truncDim:end) ); 
     end
     
+   
+    
     modelNew.w                    = w;
     % size of root filter in HOG cells
-    modelNew.i_numCells           = model.i_numCells;
+    modelNew.i_numCells           = [ny nx];
     % size of each cell in pixel
     modelNew.i_binSize            = model.i_binSize;    
     % strange interval

+ 2 - 2
learn/trainBGwithArbitraryFeatures.m

@@ -81,7 +81,7 @@ function BG = trainBGwithArbitraryFeatures( allImages, settings )
     for i = 1:length(allImages)
         
       % progressbar-like output  
-      if(rem(i,100)==0)
+      if( rem(i,10)==1 )
           fprintf('%d / %d\n', i,length(allImages) );
       end
    
@@ -150,7 +150,7 @@ function BG = trainBGwithArbitraryFeatures( allImages, settings )
     for i = 1:length(allImages)
                 
         % progressbar-like output
-        if( rem(i,100)==0 )
+        if( rem(i,10)==1 )
             fprintf('%d / %d\n', i,length(allImages) );
         end
         im        = readImage( allImages{i} );

+ 83 - 37
learn/whitenWithDropout.m

@@ -7,10 +7,35 @@
 % bg.cov: covariance for k spatial offsets (nf by nf by k)
 % bg.dxy: k spatial offsets (k by 2)
 % lda.lambda: regularizer
-function [R,neg] = whitenWithDropout(bg,lda,nx,ny)
-  neg = repmat(bg.neg',ny*nx,1);
-  neg = neg(:);
-  p=1;
+function [R,neg,b_success] = whitenWithDropout(bg,lda,nx,ny)
+
+    if ( nargout > 2 )
+        b_success = true;
+    end
+  
+%     % the following line is intended to prevent memory overwhelming due to
+%     % impossibly large covariance matrices which could be desired for large
+%     % selected regions...
+%     %
+%     % size(bg.cov,1) -> number of feature dimensions
+%     % nx -> size of desired filter x dimension
+%     % ny -> size of desired filter y dimension
+%     %
+%     if ( ((nx*ny)^2 * size(bg.cov,1)^2 )> 5*10e8 )
+%         if ( nargout > 2 )
+%             R         = [];
+%             neg       = [];
+%             b_success = false;
+%             return;
+%         end        
+%     end
+
+    % now start the actual whitening
+    neg = repmat(bg.neg',ny*nx,1);
+    neg = neg(:);
+    p   = 1;
+  
+
   
   
 %   if ( ~isfield(lda,'b_noiseDropOut') || isempty(lda.b_noiseDropOut) )
@@ -28,7 +53,15 @@ function [R,neg] = whitenWithDropout(bg,lda,nx,ny)
 
   
   while(p~=0)
-      sig = reconstructSig(nx,ny,bg.cov,bg.dxy);
+      [sig, b_success] = reconstructSig(nx,ny,bg.cov,bg.dxy);
+      if ( ~b_success)
+            R         = [];
+            neg       = [];
+            if ( nargout > 2 )
+                b_success = false;
+            end
+            return;
+      end
 
       % drop-out like noise model, as described by Chen et al. (Marginalized Denoising Autoencoders for Domain Adaptation)
       if ( lda.b_noiseDropOut )
@@ -42,52 +75,65 @@ function [R,neg] = whitenWithDropout(bg,lda,nx,ny)
 
       [R,p] = chol(sig);
       if p ~= 0,
-        disp('Increasing lambda');
-        lda.lambda = lda.lambda+0.01;
+        %disp('Increasing lambda');
+        lda.lambda = lda.lambda*5;
         %display('Sig is not positive definite, add a larger regularizer');
         %keyboard;
       end
   end
 end
   
-function w = reconstructSig(nx,ny,ww,dxy)
+function [w, b_success] = reconstructSig(nx,ny,ww,dxy)
 % W = reconstructSig(nx,ny,ww,dxy)
 % W = n x n 
 % n = ny * nx * nf
 
+  if ( nargout > 1 )
+    b_success = true;
+  end
+
   k  = size(dxy,1);
   nf = size(ww,1);
   n  = ny*nx;  
-  w  = zeros(nf,nf,n,n);
-  
-  for x1 = 1:nx,
-    for y1 = 1:ny,
-      i1 = (x1-1)*ny + y1;
-      for i = 1:k,
-        x = dxy(i,1);
-        y = dxy(i,2);
-        x2 = x1 + x;        
-        y2 = y1 + y;
-        if x2 >= 1 && x2 <= nx && y2 >= 1 && y2 <= ny,
-          i2 = (x2-1)*ny + y2;
-          w(:,:,i1,i2) = ww(:,:,i); 
-        end
-        x2 = x1 - x;        
-        y2 = y1 - y;
-        if x2 >= 1 && x2 <= nx && y2 >= 1 && y2 <= ny,
-          i2 = (x2-1)*ny + y2; 
-          w(:,:,i1,i2) = ww(:,:,i)'; 
+  try
+      
+      w  = zeros(nf,nf,n,n);
+
+      for x1 = 1:nx,
+        for y1 = 1:ny,
+          i1 = (x1-1)*ny + y1;
+          for i = 1:k,
+            x = dxy(i,1);
+            y = dxy(i,2);
+            x2 = x1 + x;        
+            y2 = y1 + y;
+            if x2 >= 1 && x2 <= nx && y2 >= 1 && y2 <= ny,
+              i2 = (x2-1)*ny + y2;
+              w(:,:,i1,i2) = ww(:,:,i); 
+            end
+            x2 = x1 - x;        
+            y2 = y1 - y;
+            if x2 >= 1 && x2 <= nx && y2 >= 1 && y2 <= ny,
+              i2 = (x2-1)*ny + y2; 
+              w(:,:,i1,i2) = ww(:,:,i)'; 
+            end
+          end
         end
       end
-    end
+
+      % Permute [nf nf n n] to [n nf n nf]
+      w = permute(w,[3 1 4 2]);
+      w = reshape(w,n*nf,n*nf);
+
+      % Make sure returned matrix is close to symmetric
+      assert(sum(sum(abs(w - w'))) < 1e-5);
+
+      w = (w+w')/2;
+  catch err      
+      w = [];
+      if ( nargout > 1 )
+        b_success = false;
+      end
+      return;
   end
-  
-  % Permute [nf nf n n] to [n nf n nf]
-  w = permute(w,[3 1 4 2]);
-  w = reshape(w,n*nf,n*nf);
-  
-  % Make sure returned matrix is close to symmetric
-  assert(sum(sum(abs(w - w'))) < 1e-5);
-  
-  w = (w+w')/2;
 end