Эх сурвалжийг харах

Merge /home/luetz/code/nice/vislearning

Erik Rodner 13 жил өмнө
parent
commit
b86e24518f
61 өөрчлөгдсөн 4353 нэмэгдсэн , 2769 устгасан
  1. 1 1
      baselib/ColorSpace.cpp
  2. 96 98
      baselib/ColorSpace.tcc
  3. 9 0
      baselib/FastFilter.h
  4. 47 0
      baselib/FastFilter.tcc
  5. 2 1
      cbaselib/BoundingBox.cpp
  6. 29 52
      cbaselib/CachedExample.cpp
  7. 3 3
      cbaselib/CachedExample.h
  8. 20 19
      cbaselib/Feature.h
  9. 12 11
      cbaselib/ImageInfo.cpp
  10. 1 1
      cbaselib/ImageInfo.h
  11. 218 218
      cbaselib/LabeledFileList.cpp
  12. 35 35
      cbaselib/LabeledFileList.h
  13. 28 28
      cbaselib/MultiDataset.h
  14. 49 47
      cbaselib/progs/testCachedExample.cpp
  15. 51 57
      cbaselib/progs/testLabeledSet.cpp
  16. 2 0
      classifier/classifiercombination/VCPreRandomForest.cpp
  17. 0 2
      classifier/classifiercombination/VCPreRandomForest.h
  18. 2 0
      classifier/classifierinterfaces/VCFeaturePool.cpp
  19. 0 2
      classifier/classifierinterfaces/VCFeaturePool.h
  20. 3 0
      classifier/fpclassifier/logisticregression/FPCSMLR.cpp
  21. 0 2
      classifier/fpclassifier/logisticregression/FPCSMLR.h
  22. 3 0
      classifier/fpclassifier/logisticregression/SLR.cpp
  23. 0 2
      classifier/fpclassifier/logisticregression/SLR.h
  24. 3 0
      classifier/fpclassifier/randomforest/DecisionNode.cpp
  25. 1 2
      classifier/fpclassifier/randomforest/DecisionNode.h
  26. 3 0
      classifier/fpclassifier/randomforest/FPCRandomForests.cpp
  27. 0 2
      classifier/fpclassifier/randomforest/FPCRandomForests.h
  28. 8 6
      classifier/genericClassifierSelection.h
  29. 173 0
      classifier/kernelclassifier/KCGPApproxOneClass.cpp
  30. 64 0
      classifier/kernelclassifier/KCGPApproxOneClass.h
  31. 2 0
      classifier/vclassifier/VCDTSVM.cpp
  32. 0 2
      classifier/vclassifier/VCDTSVM.h
  33. 110 0
      classifier/vclassifier/VCNearestClassMean.cpp
  34. 55 0
      classifier/vclassifier/VCNearestClassMean.h
  35. 3 0
      classifier/vclassifier/VCOneVsAll.cpp
  36. 0 2
      classifier/vclassifier/VCOneVsAll.h
  37. 98 102
      features/fpfeatures/ColorHistogramFeature.cpp
  38. 34 37
      features/fpfeatures/ColorHistogramFeature.h
  39. 162 174
      features/fpfeatures/EOHFeature.cpp
  40. 49 46
      features/fpfeatures/EOHFeature.h
  41. 57 51
      features/fpfeatures/FIGradients.cpp
  42. 8 8
      features/fpfeatures/FIGradients.h
  43. 80 80
      features/fpfeatures/FIHistograms.cpp
  44. 191 187
      features/fpfeatures/HOGFeature.cpp
  45. 8 4
      features/fpfeatures/HaarFeature.cpp
  46. 163 168
      features/fpfeatures/HistFeature.cpp
  47. 53 58
      features/fpfeatures/HistFeature.h
  48. 180 173
      features/fpfeatures/PixelPairFeature.cpp
  49. 53 53
      features/fpfeatures/PixelPairFeature.h
  50. 121 117
      features/fpfeatures/SemanticFeature.cpp
  51. 47 47
      features/fpfeatures/SemanticFeature.h
  52. 1 1
      features/localfeatures/LFColorWeijer.cpp
  53. 2 5
      image/GenericImageTools.h
  54. 66 48
      image/GenericImageTools.tcc
  55. 694 763
      math/kernels/KernelData.cpp
  56. 54 54
      math/pdf/tests/TestPDF.cpp
  57. 27 0
      progs/ImagenetBinary.conf
  58. 9 0
      progs/libdepend.inc
  59. 149 0
      progs/testImageNetBinary.cpp
  60. 733 0
      progs/testImageNetBinaryBruteForce.cpp
  61. 281 0
      progs/testImageNetBinaryGPBaseline.cpp

+ 1 - 1
baselib/ColorSpace.cpp

@@ -22,7 +22,7 @@ using namespace NICE;
 //bad position for this function
 void ColorSpace::ColorImagetoMultiChannelImage(  const NICE::ColorImage &imgrgb, NICE::MultiChannelImageT<double> &genimg )
 {
-  genimg.reInit( imgrgb.width(), imgrgb.height(), 3, true );
+  genimg.reInit( imgrgb.width(), imgrgb.height(), 3);
   for ( int y = 0;y < imgrgb.height();y++ )
   {
     for ( int x = 0;x < imgrgb.width();x++ )

+ 96 - 98
baselib/ColorSpace.tcc

@@ -4,111 +4,109 @@
 
 namespace OBJREC {
 
-template<class SrcPixelType,class DstPixelType>
+template<class SrcPixelType, class DstPixelType>
 void ColorSpace::convert ( NICE::MultiChannelImageT<DstPixelType> & dst, const NICE::MultiChannelImageT<SrcPixelType> & src,
-	      int dstColorSpace, int srcColorSpace, double dstM, double srcM )
+                           int dstColorSpace, int srcColorSpace, double dstM, double srcM )
 {
-    assert ( (srcColorSpace >= 0) && (srcColorSpace < NUM_COLORSPACES) );
-    assert ( (dstColorSpace >= 0) && (dstColorSpace < NUM_COLORSPACES) );
+  assert ( ( srcColorSpace >= 0 ) && ( srcColorSpace < NUM_COLORSPACES ) );
+  assert ( ( dstColorSpace >= 0 ) && ( dstColorSpace < NUM_COLORSPACES ) );
 
-    dst.reInitFrom ( src, true );
+  dst.reInitFrom ( src );
 
-    if ( (srcColorSpace == COLORSPACE_RGB) && (dstColorSpace == COLORSPACE_HSL) )
-    {
-	 assert ( dst.numChannels == 3 );
-	 assert ( src.numChannels == 3 );
+  if ( ( srcColorSpace == COLORSPACE_RGB ) && ( dstColorSpace == COLORSPACE_HSL ) )
+  {
+    assert ( dst.channels() == 3 );
+    assert ( src.channels() == 3 );
 
-	 long k = 0;
-	 for ( int y = 0 ; y < src.ysize ; y++ )
-	     for ( int x = 0 ; x < src.xsize ; x++,k++ )
-	     {
-	       double R,G,B,H,S,L;
-	       R=(double)src.data[0][k]/srcM;
-	       G=(double)src.data[1][k]/srcM;
-	       B=(double)src.data[2][k]/srcM;
-	       ColorConversion::ccRGBtoHSL(R,G,B,&H,&S,&L);
-	       dst.data[0][k] = (DstPixelType)(H*dstM);
-	       dst.data[1][k] = (DstPixelType)(S*dstM);
-	       dst.data[2][k] = (DstPixelType)(L*dstM);
-	    }
-    } 
-    else if ( (srcColorSpace == COLORSPACE_RGB) && (dstColorSpace == COLORSPACE_LAB) )
-     {
-          long k = 0;
-          for ( int y = 0 ; y < src.ysize ; y++ )
-               for ( int x = 0 ; x < src.xsize ; x++,k++ )
-          {
-               double R,G,B,X,Y,Z,L,a,b;
-               R=(double)src.data[0][k]/255.0;
-			   G=(double)src.data[1][k]/255.0;
-			   B=(double)src.data[2][k]/255.0;
-               ColorConversion::ccRGBtoXYZ(R,G,B,&X,&Y,&Z,0);
-               ColorConversion::ccXYZtoCIE_Lab(X,Y,Z,&L,&a,&b,0);
-               dst.data[0][k] = (DstPixelType)(L);
-               dst.data[1][k] = (DstPixelType)(a);
-               dst.data[2][k] = (DstPixelType)(b);
-          }
-     }
-	 else if ( (srcColorSpace == COLORSPACE_LAB) && (dstColorSpace == COLORSPACE_RGB) )
-	 {
-		 long k = 0;
-		 for ( int y = 0 ; y < src.ysize ; y++ )
-			 for ( int x = 0 ; x < src.xsize ; x++,k++ )
-		 {
-			 double R,G,B,X,Y,Z,L,a,b;
-			 L=(double)src.data[0][k];
-			 a=(double)src.data[1][k];
-			 b=(double)src.data[2][k];
-			 ColorConversion::ccCIE_LabtoXYZ(L,a,b,&X,&Y,&Z,0);
-			 ColorConversion::ccXYZtoRGB(X,Y,Z,&R,&G,&B,0);
-			 dst.data[0][k] = (DstPixelType)(R);
-			 dst.data[1][k] = (DstPixelType)(G);
-			 dst.data[2][k] = (DstPixelType)(B);
-		 }		 
-	 }
-     else if ( (srcColorSpace == COLORSPACE_RGB) && (dstColorSpace == COLORSPACE_LMS) )
-     {
-          long k = 0;
-          for ( int y = 0 ; y < src.ysize ; y++ )
-               for ( int x = 0 ; x < src.xsize ; x++,k++ )
-          {
-               double R,G,B,X,Y,Z,L,M,S;
-               R=(double)src.data[0][k]/srcM;
-               G=(double)src.data[1][k]/srcM;
-               B=(double)src.data[2][k]/srcM;
-               ColorConversion::ccRGBtoXYZ(R,G,B,&X,&Y,&Z,0);
-               ColorConversion::ccXYZtoLMS(X,Y,Z,&L,&M,&S);
-               dst.data[0][k] = (DstPixelType)(L);
-               dst.data[1][k] = (DstPixelType)(M);
-               dst.data[2][k] = (DstPixelType)(S);
-          }
-     }
-     else if ( (srcColorSpace == COLORSPACE_RGB) && (dstColorSpace == COLORSPACE_OPP) )
-{
-          long k = 0;
-          for ( int y = 0 ; y < src.ysize ; y++ )
-               for ( int x = 0 ; x < src.xsize ; x++,k++ )
-{
-               double R,G,B,X,Y,Z,L,M,S,Lum,LM,SLM;
-               R=(double)src.data[0][k]/srcM;
-               G=(double)src.data[1][k]/srcM;
-               B=(double)src.data[2][k]/srcM;
-               ColorConversion::ccRGBtoXYZ(R,G,B,&X,&Y,&Z,0);
-               ColorConversion::ccXYZtoLMS(X,Y,Z,&L,&M,&S);
-               ColorConversion::ccLMStoOPP(L,M,S,&Lum,&LM,&SLM);
-               
-               dst.data[0][k] = (DstPixelType)(Lum);
-               dst.data[1][k] = (DstPixelType)(LM);
-               dst.data[2][k] = (DstPixelType)(SLM);
+    for ( int y = 0 ; y < src.height() ; y++ )
+      for ( int x = 0 ; x < src.width() ; x++)
+      {
+        double R, G, B, H, S, L;
+        R = ( double ) src.get(x,y,0) / srcM;
+        G = ( double ) src.get(x,y,1) / srcM;
+        B = ( double ) src.get(x,y,2) / srcM;
+        ColorConversion::ccRGBtoHSL ( R, G, B, &H, &S, &L );
+        dst[0](x,y) = ( DstPixelType ) ( H * dstM );
+        dst[1](x,y) = ( DstPixelType ) ( S * dstM );
+        dst[2](x,y) = ( DstPixelType ) ( L * dstM );
+      }
+  }
+  else if ( ( srcColorSpace == COLORSPACE_RGB ) && ( dstColorSpace == COLORSPACE_LAB ) )
+  {
+    for ( int y = 0 ; y < src.height() ; y++ )
+      for ( int x = 0 ; x < src.width() ; x++ )
+      {
+        double R, G, B, X, Y, Z, L, a, b;
+        R = ( double ) src.get(x,y,0) / 255.0;
+        G = ( double ) src.get(x,y,1) / 255.0;
+        B = ( double ) src.get(x,y,2) / 255.0;
+        ColorConversion::ccRGBtoXYZ ( R, G, B, &X, &Y, &Z, 0 );
+        ColorConversion::ccXYZtoCIE_Lab ( X, Y, Z, &L, &a, &b, 0 );
+        dst[0](x,y) = ( DstPixelType ) ( L );
+        dst[1](x,y) = ( DstPixelType ) ( a );
+        dst[2](x,y) = ( DstPixelType ) ( b );
+      }
+  }
+  else if ( ( srcColorSpace == COLORSPACE_LAB ) && ( dstColorSpace == COLORSPACE_RGB ) )
+  {
+    long k = 0;
+    for ( int y = 0 ; y < src.height() ; y++ )
+      for ( int x = 0 ; x < src.width() ; x++, k++ )
+      {
+        double R, G, B, X, Y, Z, L, a, b;
+        L = ( double ) src.get(x,y,0);
+        a = ( double ) src.get(x,y,1);
+        b = ( double ) src.get(x,y,2);
+        ColorConversion::ccCIE_LabtoXYZ ( L, a, b, &X, &Y, &Z, 0 );
+        ColorConversion::ccXYZtoRGB ( X, Y, Z, &R, &G, &B, 0 );
+        dst[0](x,y) = ( DstPixelType ) ( R );
+        dst[1](x,y) = ( DstPixelType ) ( G );
+        dst[2](x,y) = ( DstPixelType ) ( B );
+      }
+  }
+  else if ( ( srcColorSpace == COLORSPACE_RGB ) && ( dstColorSpace == COLORSPACE_LMS ) )
+  {
+    long k = 0;
+    for ( int y = 0 ; y < src.height() ; y++ )
+      for ( int x = 0 ; x < src.width() ; x++, k++ )
+      {
+        double R, G, B, X, Y, Z, L, M, S;
+        R = ( double ) src.get(x,y,0) / srcM;
+        G = ( double ) src.get(x,y,1) / srcM;
+        B = ( double ) src.get(x,y,2) / srcM;
+        ColorConversion::ccRGBtoXYZ ( R, G, B, &X, &Y, &Z, 0 );
+        ColorConversion::ccXYZtoLMS ( X, Y, Z, &L, &M, &S );
+        dst[0](x,y) = ( DstPixelType ) ( L );
+        dst[1](x,y) = ( DstPixelType ) ( M );
+        dst[2](x,y) = ( DstPixelType ) ( S );
+      }
+  }
+  else if ( ( srcColorSpace == COLORSPACE_RGB ) && ( dstColorSpace == COLORSPACE_OPP ) )
+  {
+    long k = 0;
+    for ( int y = 0 ; y < src.height() ; y++ )
+      for ( int x = 0 ; x < src.width() ; x++, k++ )
+      {
+        double R, G, B, X, Y, Z, L, M, S, Lum, LM, SLM;
+        R = ( double ) src.get(x,y,0) / srcM;
+        G = ( double ) src.get(x,y,1) / srcM;
+        B = ( double ) src.get(x,y,2) / srcM;
+        ColorConversion::ccRGBtoXYZ ( R, G, B, &X, &Y, &Z, 0 );
+        ColorConversion::ccXYZtoLMS ( X, Y, Z, &L, &M, &S );
+        ColorConversion::ccLMStoOPP ( L, M, S, &Lum, &LM, &SLM );
+
+        dst[0](x,y) = ( DstPixelType ) ( Lum );
+        dst[1](x,y) = ( DstPixelType ) ( LM );
+        dst[2](x,y) = ( DstPixelType ) ( SLM );
 #ifdef DEBUG
-               fprintf(stderr,"R:%.4f G:%.4f B:%.4f X:%.4f Y:%.4f Z:%.4f L:%.4f M:%.4f S:%.4f Lum:%.4f LM:%.4f SLM:%.4f\n",R,G,B,X,Y,Z,L,M,S,Lum,LM,SLM);
+        fprintf ( stderr, "R:%.4f G:%.4f B:%.4f X:%.4f Y:%.4f Z:%.4f L:%.4f M:%.4f S:%.4f Lum:%.4f LM:%.4f SLM:%.4f\n", R, G, B, X, Y, Z, L, M, S, Lum, LM, SLM );
 #endif
-}
-}
-     else {
-          fprintf (stderr, "ColorSpace::convert(): not yet implemented - SrcColorSpace %d -> DstColorSpace %d!!\n",srcColorSpace,dstColorSpace);
-	exit(-1);
-    }
+      }
+  }
+  else {
+    fprintf ( stderr, "ColorSpace::convert(): not yet implemented - SrcColorSpace %d -> DstColorSpace %d!!\n", srcColorSpace, dstColorSpace );
+    exit ( -1 );
+  }
 
 }
 

+ 9 - 0
baselib/FastFilter.h

@@ -65,6 +65,15 @@ class FastFilter
 	template <class SrcValueType, class DstValueType>
 	static void calcGradientX ( const SrcValueType *img, int xsize, int ysize, DstValueType *d );
 
+	///lazy attempt for realizing fast histogram of oriented gradients
+	///since (neg.) double values are allowed, fast filtering based on look-up tables is no longer possible
+	template <class GrayValueType, class OrientedGradientHistogramType>
+	static void calcOrientedGradientHistogram ( const GrayValueType *gray, 
+						    int xsize, int ysize,
+						    OrientedGradientHistogramType *hog, 
+						    int numBins, 
+						    bool usesigned );
+
 };
 
 } // namespace

+ 47 - 0
baselib/FastFilter.tcc

@@ -171,3 +171,50 @@ void FastFilter::calcGradient ( const GrayValueType *gray,
     delete [] gy;
 }
 
+
+//lazy attempt for realizing fast histogram of oriented gradients
+//since (neg.) double values are allowed, fast filtering based on look-up tables is no longer possible
+template <class GrayValueType, class OrientedGradientHistogramType>
+void FastFilter::calcOrientedGradientHistogram ( const GrayValueType *gray, 
+						 int xsize, int ysize,
+						 OrientedGradientHistogramType *hog, 
+						 int numBins, 
+						 bool usesigned )
+{
+    double *gx = new double[xsize*ysize];
+    calcGradientX ( gray, xsize, ysize, gx );
+    double *gy = new double[xsize*ysize];
+    calcGradientY ( gray, xsize, ysize, gy );
+    
+    double binq = usesigned ? 2*M_PI / numBins : M_PI / numBins;
+
+    for(int i=0; i<numBins; i++) hog[i] = (OrientedGradientHistogramType) 0.0;
+
+    int k = 0;
+    for ( int y = 0 ; y < ysize ; y++ )
+	for ( int x = 0 ; x < xsize ; x++,k++ )
+	{
+	    double rx = gx[k];
+	    double ry = gy[k];
+	    //GradientMagnitudeType g  = (GradientMagnitudeType)sqrt ( rx*rx + ry*ry );
+	    GrayValueType g  = (GrayValueType) sqrt(rx*rx + ry*ry);
+
+	    double angle = atan2(ry,rx);
+
+	    if ( usesigned ) {
+		if ( angle < 0 )
+		    angle = 2*M_PI + angle;
+	    } else {
+		if ( angle < 0 )
+		    angle = M_PI + angle;
+	    }
+
+	    uint bin = (uint)(angle / binq);
+            bin = bin < numBins ? bin : numBins - 1;
+	    hog[bin]+= (OrientedGradientHistogramType) g;
+	    
+	}
+
+    delete [] gx;
+    delete [] gy;
+}

+ 2 - 1
cbaselib/BoundingBox.cpp

@@ -132,6 +132,7 @@ BoundingBox::setID(const int &anID)
 		return;
 		/* NOTREACHED */
 	}
+	id_ = anID;
 }
 
 //! returns top left coordinate of the bounding box
@@ -191,7 +192,7 @@ BoundingBox::isValid() const
 		bottom_right_.x < 0 ||
 		bottom_right_.y < 0 ||
 		bottom_right_.y <= top_left_.y ||
-		top_left_.x <= bottom_right_.x)
+		top_left_.x >= bottom_right_.x)
 	{
 		return false;
 		/* NOTREACHED */

+ 29 - 52
cbaselib/CachedExample.cpp

@@ -59,15 +59,9 @@ CachedExample::CachedExample ( const NICE::Image & _img )
 
   oxsize = _img.width();
   oysize = _img.height();
-  int *gray = new int [ oxsize*oysize ];
-  int k = 0;
-  for ( int y = 0 ; y < oysize ; y++ )
-    for ( int x = 0 ; x < oxsize ; x++, k++ )
-      gray[k] = _img.getPixel ( x, y );
-
-  ichannels[I_GRAYVALUES].reInit ( oxsize, oysize, 1, false );
-  ichannels[I_GRAYVALUES].setImage ( gray, oxsize, oysize, 0 );
-
+  
+  ichannels[I_GRAYVALUES].freeData();
+  ichannels[I_GRAYVALUES].addChannel(_img);
   hasColorInformation = false;
 }
 
@@ -87,31 +81,18 @@ CachedExample::CachedExample ( const NICE::ColorImage & img, bool disableGrayCon
     NICE::Image imggray;
     ICETools::calcGrayImage ( img, imggray );
 
-    ichannels[I_GRAYVALUES].reInit ( oxsize, oysize, 1, true );
-    int *gray = ichannels[I_GRAYVALUES].data[0];
-    int k = 0;
-    for ( int y = 0 ; y < oysize ; y++ )
-      for ( int x = 0 ; x < oxsize ; x++, k++ )
-        // refactor-nice.pl: check this substitution
-        // old: gray[k] = GetVal(imggray,x,y);
-        gray[k] = imggray.getPixel ( x, y );
+    ichannels[I_GRAYVALUES].freeData();
+    ichannels[I_GRAYVALUES].addChannel(imggray);
   }
 
-  ichannels[I_COLOR].reInit ( oxsize, oysize, 3, true );
+  ichannels[I_COLOR].reInit ( oxsize, oysize, 3);
 
-  long int k = 0;
   for ( int y = 0 ; y < oysize ; y++ )
-    for ( int x = 0 ; x < oxsize ; x++, k++ )
+    for ( int x = 0 ; x < oxsize ; x++ )
     {
-      // refactor-nice.pl: check this substitution
-      // old: ichannels[I_COLOR].data[0][k] = GetVal(img.RedImage(), x, y);
-      ichannels[I_COLOR].data[0][k] = img.getPixel ( x, y, 0 );
-      // refactor-nice.pl: check this substitution
-      // old: ichannels[I_COLOR].data[1][k] = GetVal(img.GreenImage(), x, y);
-      ichannels[I_COLOR].data[1][k] = img.getPixel ( x, y, 1 );
-      // refactor-nice.pl: check this substitution
-      // old: ichannels[I_COLOR].data[2][k] = GetVal(img.BlueImage(), x, y);
-      ichannels[I_COLOR].data[2][k] = img.getPixel ( x, y, 2 );
+      ichannels[I_COLOR](x,y,0) = img.getPixel ( x, y, 0 );
+      ichannels[I_COLOR](x,y,1) = img.getPixel ( x, y, 1 );
+      ichannels[I_COLOR](x,y,2) = img.getPixel ( x, y, 2 );
     }
 
   hasColorInformation = true;
@@ -173,12 +154,8 @@ void CachedExample::readImageData ()
   oxsize = imggray.width();
   oysize = imggray.height();
 
-  ichannels[I_GRAYVALUES].reInit ( oxsize, oysize, 1, true );
-  int *gray = ichannels[I_GRAYVALUES].data[0];
-  int k = 0;
-  for ( int y = 0 ; y < oysize ; y++ )
-    for ( int x = 0 ; x < oxsize ; x++, k++ )
-      gray[k] = imggray.getPixel ( x, y );
+  ichannels[I_GRAYVALUES].freeData();
+  ichannels[I_GRAYVALUES].addChannel(imggray);
 }
 
 void CachedExample::readImageDataRGB ()
@@ -199,40 +176,40 @@ void CachedExample::readImageDataRGB ()
 
   hasColorInformation = true;
 
-  ichannels[I_COLOR].reInit ( oxsize, oysize, 3, true );
-
-  long k = 0;
+  ichannels[I_COLOR].reInit ( oxsize, oysize, 3);
+  
   for ( int y = 0 ; y < oysize ; y++ )
-    for ( int x = 0 ; x < oxsize ; x++, k++ )
+    for ( int x = 0 ; x < oxsize ; x++)
     {
-      ichannels[I_COLOR].data[0][k] = img.getPixel ( x, y, 0 );
-      ichannels[I_COLOR].data[1][k] = img.getPixel ( x, y, 1 );
-      ichannels[I_COLOR].data[2][k] = img.getPixel ( x, y, 2 );
+      ichannels[I_COLOR](x,y,0) = img.getPixel ( x, y, 0 );
+      ichannels[I_COLOR](x,y,1) = img.getPixel ( x, y, 1 );
+      ichannels[I_COLOR](x,y,2) = img.getPixel ( x, y, 2 );
     }
 }
 
 void CachedExample::calcIntegralImage ()
 {
-  if ( ichannels[I_GRAYVALUES].xsize == 0 )
+  if ( ichannels[I_GRAYVALUES].width() == 0 )
   {
     readImageData ();
-    if ( ichannels[I_GRAYVALUES].xsize == 0 )
+    if ( ichannels[I_GRAYVALUES].width() == 0 )
     {
       fprintf ( stderr, "CachedExample::getChannel: unable to recover data channel\n" );
       exit ( -1 );
     }
   }
 
-  lchannels[L_INTEGRALIMAGE].reInit ( ichannels[I_GRAYVALUES].xsize,
-                                      ichannels[I_GRAYVALUES].ysize,
-                                      1, true );
+  lchannels[L_INTEGRALIMAGE].reInit ( ichannels[I_GRAYVALUES].width(),
+                                      ichannels[I_GRAYVALUES].height(),
+                                      1);
 
-  GenericImageTools::calcIntegralImage (
-    lchannels[L_INTEGRALIMAGE].data[0],
-    ichannels[I_GRAYVALUES].data[0],
-    ichannels[I_GRAYVALUES].xsize,
-    ichannels[I_GRAYVALUES].ysize );
+  ImageT<long int> tmp = lchannels[L_INTEGRALIMAGE][0];
 
+  GenericImageTools::calcIntegralImage (
+    tmp,
+    ichannels[I_GRAYVALUES][0],
+    ichannels[I_GRAYVALUES].width(),
+    ichannels[I_GRAYVALUES].height() );
 }
 
 void CachedExample::buildIntegralSV ( int svchannel,

+ 3 - 3
cbaselib/CachedExample.h

@@ -220,7 +220,7 @@ inline NICE::MultiChannelImageT<double> & CachedExample::getDChannel ( int chann
 {
   assert ( ( channel >= 0 ) && ( channel < D_NUMCHANNELS ) );
 
-  if ( dchannels[channel].data == NULL )
+  if ( dchannels[channel].channels() == 0 )
   {
     std::map<int, std::string>::const_iterator j = dtemps.find ( channel );
     if ( j == dtemps.end() )
@@ -248,7 +248,7 @@ inline NICE::MultiChannelImageT<int> & CachedExample::getIChannel ( int channel
 {
   assert ( ( channel >= 0 ) && ( channel < I_NUMCHANNELS ) );
 
-  if ( ( ichannels[channel].data == NULL ) )
+  if ( ( ichannels[channel].channels() == 0 ) )
   {
     if ( ( imgfn != "" ) && ( channel == I_GRAYVALUES ) )
     {
@@ -282,7 +282,7 @@ inline NICE::MultiChannelImageT<long> & CachedExample::getLChannel ( int channel
 {
   assert ( ( channel >= 0 ) && ( channel < L_NUMCHANNELS ) );
 
-  if ( lchannels[channel].data == NULL )
+  if ( lchannels[channel].channels() == 0 )
   {
     std::map<int, std::string>::const_iterator j = ltemps.find ( channel );
     if ( j == ltemps.end() )

+ 20 - 19
cbaselib/Feature.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file Feature.h
 * @brief abstraction of a feature
 * @author Erik Rodner
@@ -38,29 +38,29 @@ class FeatureStorageUnsorted : public std::map< int, FeatureValuesUnsorted > {};
 class Feature : public NICE::Persistent
 {
 
-    protected:
+protected:
 
-    public:
-  
-	/** simple constructor */
-	Feature();
-      
-	/** simple destructor */
-	virtual ~Feature();
+public:
 
-	virtual double val( const Example *example ) const = 0;
+    /** simple constructor */
+    Feature();
 
-	virtual Feature *clone() const = 0;
+    /** simple destructor */
+    virtual ~Feature();
 
-	virtual void explode ( FeaturePool & featurePool, bool variableWindow = true ) const = 0;
+    virtual double val( const Example *example ) const = 0;
 
-	virtual void calcFeatureValues ( const Examples & examples,
-				    std::vector<int> & examples_selection,
-				    FeatureValuesUnsorted & values ) const;
-    
-        virtual void calcFeatureValues ( const Examples & examples,
-				    std::vector<int> & examples_selection,
-				    FeatureValues & values ) const;
+    virtual Feature *clone() const = 0;
+
+    virtual void explode ( FeaturePool & featurePool, bool variableWindow = true ) const = 0;
+
+    virtual void calcFeatureValues ( const Examples & examples,
+                                     std::vector<int> & examples_selection,
+                                     FeatureValuesUnsorted & values ) const;
+
+    virtual void calcFeatureValues ( const Examples & examples,
+                                     std::vector<int> & examples_selection,
+                                     FeatureValues & values ) const;
 
 };
 
@@ -70,3 +70,4 @@ class Feature : public NICE::Persistent
 #undef ROADWORKS
 
 #endif
+

+ 12 - 11
cbaselib/ImageInfo.cpp

@@ -99,20 +99,18 @@ ImageInfo::loadImageInfo(const string &aFilename)
 			/* path to the segmented image */
 			if (element.tagName() == "segmented") {
 				string_buffer = element.text();
-				if (string_buffer.isEmpty()) {
-					continue;
+				if ( !string_buffer.isEmpty() ) {
+					QByteArray array = string_buffer.toAscii();
+					segmented_image_path_ = string(array.data());
 				}
-				QByteArray array = string_buffer.toAscii();
-				segmented_image_path_ = string(array.data());
 			}
 			/* image description */
 			else if (element.tagName() == "description") {
 				string_buffer = element.text();
-				if (string_buffer.isEmpty()) {
-					continue;
+				if ( !string_buffer.isEmpty()) {
+					QByteArray array = string_buffer.toAscii();
+					image_description_ = string(array.data());
 				}
-				QByteArray array = string_buffer.toAscii();
-				image_description_ = string(array.data());
 			}
 			/* tags */
 			else if (element.tagName() == "tags") {
@@ -157,8 +155,8 @@ ImageInfo::loadImageInfo(const string &aFilename)
 					string_buffer = subElement.text();
 
 					if (subElement.tagName() == "bbox") {
-						BoundingBox bbox = BBoxFromData(&string_buffer);
-						bbox.setID(id);
+						BoundingBox bbox = BBoxFromData(&string_buffer, id);
+						//bbox.setID(id);
 						bboxes_.push_back(bbox);
 					}
 					if (subElement.tagName() == "poly") {
@@ -320,10 +318,13 @@ ImageInfo::loadCategoryInfo(QDomElement *anElement)
  */
 BoundingBox
 ImageInfo::BBoxFromData(
-	QString *aBBoxData
+	QString *aBBoxData,
+	int &id
 )
 {
 	BoundingBox bbox;
+	bbox.setID(id);
+
 	QString buffer;
 	int startPos = 0;
 	bool ok = 1;

+ 1 - 1
cbaselib/ImageInfo.h

@@ -40,7 +40,7 @@ class ImageInfo
 #ifdef NICE_USELIB_QT4_XML
 
     Polygon polyFromData ( QString *aPolyData );
-    BoundingBox BBoxFromData ( QString *aBBoxData );
+    BoundingBox BBoxFromData ( QString *aBBoxData, int &id );
     void loadLegendFromElement ( QDomElement *anElement );
     bool loadCategoryInfo ( QDomElement *anElement );
     NICE::ImageT< unsigned int > imageTFromData (

+ 218 - 218
cbaselib/LabeledFileList.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file LabeledFileList.cpp
 * @brief reads images from directory
 * @author Erik Rodner
@@ -22,9 +22,9 @@ using namespace OBJREC;
 using namespace std;
 using namespace NICE;
 
-LabeledFileList::LabeledFileList() 
+LabeledFileList::LabeledFileList()
 {
-    debug_dataset = false;
+  debug_dataset = false;
 }
 
 LabeledFileList::~LabeledFileList()
@@ -33,247 +33,247 @@ LabeledFileList::~LabeledFileList()
 
 
 LocalizationResult *LabeledFileList::getLocalizationInfo ( const ClassNames & classnames,
-					    int classno,
-					    const std::string & file,
-					    const Config & conf ) const
+    int classno,
+    const std::string & file,
+    const Config & conf ) const
 {
-    /*
-    localization_pattern = image
-    localization_subst   = mask
-    localization_format  = image
-    */
-    std::string format = conf.gS("main", "localization_format", "unknown");
-    if ( format == "unknown" )
-	return NULL;
-
-    std::string pattern = conf.gS("main", "localization_pattern" );
-    std::string subst   = conf.gS("main", "localization_subst" );
-
-    std::string lfile = file;
-    if ( ! StringTools::regexSubstitute ( lfile, pattern, subst ) )
-    {
-	fprintf (stderr, "Unable to substitute using pattern #%s# and string #%s#\n",
-	    pattern.c_str(), lfile.c_str() );
-	exit(-1);
+  /*
+  localization_pattern = image
+  localization_subst   = mask
+  localization_format  = image
+  */
+  std::string format = conf.gS ( "main", "localization_format", "unknown" );
+  if ( format == "unknown" )
+    return NULL;
+
+  std::string pattern = conf.gS ( "main", "localization_pattern" );
+  std::string subst   = conf.gS ( "main", "localization_subst" );
+
+  std::string lfile = file;
+  if ( ! StringTools::regexSubstitute ( lfile, pattern, subst ) )
+  {
+    fprintf ( stderr, "Unable to substitute using pattern #%s# and string #%s#\n",
+              pattern.c_str(), lfile.c_str() );
+    exit ( -1 );
+  }
+
+  if ( ! FileMgt::fileExists ( lfile ) ) return NULL;
+  if ( debug_dataset )
+  {
+    fprintf ( stderr, "LabeledFileList: reading localization information %s\n", lfile.c_str() );
+  }
+
+  LocalizationResult *lr = NULL;
+
+  if ( format == "image" )
+  {
+    NICE::Image mask;
+    try {
+      mask.read ( lfile );
+    } catch ( ImageException & ) {
+      fprintf ( stderr, "WARNING: unable to open file %s (no localization info provided)\n",
+                lfile.c_str() );
+      return NULL;
     }
 
-    if ( ! FileMgt::fileExists(lfile) ) return NULL;
-    if ( debug_dataset )
-    {
-	fprintf (stderr, "LabeledFileList: reading localization information %s\n", lfile.c_str() );
+    lr = new LocalizationResult ( &classnames, mask, classno );
+
+  } else if ( format == "imagergb" ) {
+    NICE::ColorImage mask;
+    try {
+      mask.read ( lfile );
+    } catch ( ImageException &e ) {
+      fprintf ( stderr, "WARNING: unable to open file %s (no localization info provided)\n",
+                lfile.c_str() );
+      fprintf ( stderr, "Error: %s\n", e.what() );
+      return NULL;
     }
+    lr = new LocalizationResult ( &classnames, mask );
 
-    LocalizationResult *lr = NULL;
+  } else if ( format == "polygon" ) {
+    lr = new LocalizationResult ( &classnames );
 
-    if ( format == "image" )
-    {
-		NICE::Image mask;
-		try {
-			mask.read(lfile);
-		} catch (ImageException &) {
-			fprintf (stderr, "WARNING: unable to open file %s (no localization info provided)\n",
-			lfile.c_str() );
-			return NULL;
-		}
-		
-		lr = new LocalizationResult ( &classnames, mask, classno );
-
-    } else if ( format == "imagergb" ) {
-		NICE::ColorImage mask;
-		try {
-			mask.read( lfile );
-		} catch (ImageException &e) {
-			fprintf (stderr, "WARNING: unable to open file %s (no localization info provided)\n",
-			lfile.c_str() );
-			fprintf (stderr, "Error: %s\n", e.what() );
-			return NULL;
-		}
-		lr = new LocalizationResult ( &classnames, mask );
-
-    } else if ( format == "polygon" ) {
-		lr = new LocalizationResult ( &classnames );
-
-		lr->read ( lfile, LocalizationResult::FILEFORMAT_POLYGON );
-
-		if ( debug_dataset )
-		   fprintf (stderr, "LabeledFileList: object localization %d\n", (int)lr->size() );
-		} else {
-			fthrow(Exception, "Localization format not yet supported !!\n");
-		}
-   
-	if ( debug_dataset )
-	    if ( lr != NULL )
-			fprintf (stderr, "%s (%d objects)\n", lfile.c_str(), (int)lr->size() );
-
-    return lr;
+    lr->read ( lfile, LocalizationResult::FILEFORMAT_POLYGON );
+
+    if ( debug_dataset )
+      fprintf ( stderr, "LabeledFileList: object localization %d\n", ( int ) lr->size() );
+  } else {
+    fthrow ( Exception, "Localization format not yet supported !!\n" );
+  }
+
+  if ( debug_dataset )
+    if ( lr != NULL )
+      fprintf ( stderr, "%s (%d objects)\n", lfile.c_str(), ( int ) lr->size() );
+
+  return lr;
 }
 
 void LabeledFileList::getFromPattern (
-    const std::string & dir,
-    const Config & datasetconf,
-    const ClassNames & classnames, 
-    LabeledSet & ls,
-    bool localizationInfoDisabled ) const
+  const std::string & dir,
+  const Config & datasetconf,
+  const ClassNames & classnames,
+  LabeledSet & ls,
+  bool localizationInfoDisabled ) const
 {
-    std::string filemask;
-    
-    if ( dir.substr(dir.length()-1,1) != "/" )
-		filemask = dir + "/" + datasetconf.gS("main", "pattern");
-    else
-		filemask = dir + datasetconf.gS("main", "pattern");
-
-    std::vector<string> files;
-
-    int classnameField = datasetconf.gI("main", "classname_field", 1);
-    std::string fixedClassname = datasetconf.gS("main", "fixed_classname", "");
-    
-    files.clear();
-    FileMgt::DirectoryRecursive ( files, dir );
-    fprintf (stderr, "LabeledFileList: Files: %d\n", (int)files.size());
-
-    sort ( files.begin(), files.end() );
-
-    for ( vector<string>::const_iterator i  = files.begin();
-					 i != files.end(); 
-					 i++ ) 
+  std::string filemask;
+
+  if ( dir.substr ( dir.length() - 1, 1 ) != "/" )
+    filemask = dir + "/" + datasetconf.gS ( "main", "pattern" );
+  else
+    filemask = dir + datasetconf.gS ( "main", "pattern" );
+
+  std::vector<string> files;
+
+  int classnameField = datasetconf.gI ( "main", "classname_field", 1 );
+  std::string fixedClassname = datasetconf.gS ( "main", "fixed_classname", "" );
+
+  files.clear();
+  FileMgt::DirectoryRecursive ( files, dir );
+  fprintf ( stderr, "LabeledFileList: Files: %d\n", ( int ) files.size() );
+
+  sort ( files.begin(), files.end() );
+
+  for ( vector<string>::const_iterator i  = files.begin();
+        i != files.end();
+        i++ )
+  {
+    vector<string> submatches;
+    // refactor-nice.pl: check this substitution
+    // old: const string & file = *i;
+    const std::string & file = *i;
+    if ( debug_dataset )
+      fprintf ( stderr, "LabeledFileList: next file: %s\n", file.c_str() );
+
+    bool match = StringTools::regexMatch ( file, filemask, submatches );
+
+    if ( ( fixedClassname == "" ) && ( ( int ) submatches.size() <= classnameField ) ) match = false;
+
+    if ( ! match )
     {
-		vector<string> submatches;
-		// refactor-nice.pl: check this substitution
-		// old: const string & file = *i;
-		const std::string & file = *i;
-		if ( debug_dataset ) 
-			fprintf (stderr, "LabeledFileList: next file: %s\n", file.c_str() );
-
-		bool match = StringTools::regexMatch ( file, filemask, submatches );
-
-		if ( (fixedClassname == "") && ((int)submatches.size() <= classnameField) ) match = false;
-
-		if ( ! match  )
-		{
-			if ( debug_dataset )
-			fprintf (stderr, "LabeledFileList: WARNING: %s does not match filemask: %s!!\n", file.c_str(), filemask.c_str() );
-		} else {
-			std::string classcode = ( fixedClassname == "" ) ? submatches[classnameField] : fixedClassname;
-
-			if ( classnames.existsClassCode(classcode) ) {
-				int classno = classnames.classno(classcode);
-				LocalizationResult *lr  = NULL;
-				
-				if ( ! localizationInfoDisabled )
-					lr = getLocalizationInfo ( 
-					classnames, classno, file, datasetconf);
-
-				if ( debug_dataset )
-					fprintf (stderr, "LabeledFileList: LabeledSet: add %s (%d)\n", file.c_str(), classno );
-				if ( lr == NULL ) 
-				{
-					ls.add ( classno, new ImageInfo(file) );
-				} else {
-					ls.add ( classno, new ImageInfo(file, lr) );
-					if ( debug_dataset )
-					fprintf (stderr, "LabeledFileList: LocalizationResult added!\n");
-
-				}
-			} else {
-				if ( debug_dataset )
-				{
-					for ( vector<string>::iterator i = submatches.begin();
-								   i != submatches.end();
-								   i++ )
-					{
-						fprintf (stderr, "LabeledFileList: submatch: %s\n", i->c_str() );
-					}
-					fprintf (stderr, "LabeledFileList: WARNING: code %s ignored !\n", classcode.c_str() );
-				}
-			}
-		}
-		if ( debug_dataset )
-			fprintf (stderr, "LabeledFileList: filename processed\n");
-	}
-
-    cerr << "directory " << dir << " loaded..." << endl;
-    ls.printInformation();
+      if ( debug_dataset )
+        fprintf ( stderr, "LabeledFileList: WARNING: %s does not match filemask: %s!!\n", file.c_str(), filemask.c_str() );
+    } else {
+      std::string classcode = ( fixedClassname == "" ) ? submatches[classnameField] : fixedClassname;
+
+      if ( classnames.existsClassCode ( classcode ) ) {
+        int classno = classnames.classno ( classcode );
+        LocalizationResult *lr  = NULL;
+
+        if ( ! localizationInfoDisabled )
+          lr = getLocalizationInfo (
+                 classnames, classno, file, datasetconf );
+
+        if ( debug_dataset )
+          fprintf ( stderr, "LabeledFileList: LabeledSet: add %s (%d)\n", file.c_str(), classno );
+        if ( lr == NULL )
+        {
+          ls.add ( classno, new ImageInfo ( file ) );
+        } else {
+          ls.add ( classno, new ImageInfo ( file, lr ) );
+          if ( debug_dataset )
+            fprintf ( stderr, "LabeledFileList: LocalizationResult added!\n" );
+
+        }
+      } else {
+        if ( debug_dataset )
+        {
+          for ( vector<string>::iterator i = submatches.begin();
+                i != submatches.end();
+                i++ )
+          {
+            fprintf ( stderr, "LabeledFileList: submatch: %s\n", i->c_str() );
+          }
+          fprintf ( stderr, "LabeledFileList: WARNING: code %s ignored !\n", classcode.c_str() );
+        }
+      }
+    }
+    if ( debug_dataset )
+      fprintf ( stderr, "LabeledFileList: filename processed\n" );
+  }
+
+  cerr << "directory " << dir << " loaded..." << endl;
+  ls.printInformation();
 
 }
 
 void LabeledFileList::getFromList (
-    const std::string & filelist,
-    const Config & datasetconf,
-    const ClassNames & classnames, 
-    LabeledSet & ls,
-    bool localizationInfoDisabled) const
+  const std::string & filelist,
+  const Config & datasetconf,
+  const ClassNames & classnames,
+  LabeledSet & ls,
+  bool localizationInfoDisabled ) const
 {
-	if ( debug_dataset ) 
-		fprintf (stderr, "Reading file list: %s\n", filelist.c_str() );
+  if ( debug_dataset )
+    fprintf ( stderr, "Reading file list: %s\n", filelist.c_str() );
 
-    ifstream ifs ( filelist.c_str(), ios::in );
-	if ( ! ifs.good() ) 
-		fthrow(IOException, "File list " << filelist << " not found !");
-    
-    std::string fixedClassname = datasetconf.gS("main", "fixed_classname", "");
+  ifstream ifs ( filelist.c_str(), ios::in );
+  if ( ! ifs.good() )
+    fthrow ( IOException, "File list " << filelist << " not found !" );
 
-    while ( ! ifs.eof() )
-    {
-		std::string classcode;
-		std::string file;
-
-		if ( fixedClassname == "" ) {
-			if ( ! (ifs >> classcode) ) break;
-		} else {
-			classcode = fixedClassname;
-		}
-
-		if ( ! (ifs >> file) ) break;
-
-		if ( classnames.existsClassCode(classcode) ) {
-			int classno = classnames.classno(classcode);
-			
-			LocalizationResult *lr  = NULL;
-			
-			if ( ! localizationInfoDisabled )
-				lr = getLocalizationInfo ( classnames, classno, file, datasetconf);
-
-			if ( debug_dataset )
-				cerr << "Adding file " << file << " with classno " << classno << endl;
-
-			if ( lr == NULL ) 
-				ls.add ( classno, new ImageInfo(file) );
-			else
-				ls.add ( classno, new ImageInfo(file, lr) );
-		} else {
-			if ( debug_dataset )
-				fprintf (stderr, "WARNING: code %s ignored !\n", classcode.c_str() );
-		}
+  std::string fixedClassname = datasetconf.gS ( "main", "fixed_classname", "" );
+
+  while ( ! ifs.eof() )
+  {
+    std::string classcode;
+    std::string file;
 
+    if ( fixedClassname == "" ) {
+      if ( ! ( ifs >> classcode ) ) break;
+    } else {
+      classcode = fixedClassname;
     }
 
-	if ( debug_dataset )
-	    ls.printInformation();
-}
+    if ( ! ( ifs >> file ) ) break;
 
+    if ( classnames.existsClassCode ( classcode ) ) {
+      int classno = classnames.classno ( classcode );
 
-void LabeledFileList::get ( 
-    const std::string & dir,
-    const Config & datasetconf,
-    const ClassNames & classnames, 
-    LabeledSet & ls, 
-    bool localizationInfoDisabled,
-    bool debugDataset ) 
-{
-    std::string pattern = datasetconf.gS("main", "pattern", "");
-    std::string filelist = datasetconf.gS("main", "filelist", "");
-    this->debug_dataset = debugDataset;
+      LocalizationResult *lr  = NULL;
 
-    if ( pattern.size() > 0 ) 
-		getFromPattern ( dir, datasetconf, classnames, ls, localizationInfoDisabled );
-    else if ( filelist.size() > 0 ) {
+      if ( ! localizationInfoDisabled )
+        lr = getLocalizationInfo ( classnames, classno, file, datasetconf );
 
-		std::string cfilelist = datasetconf.gS("main", "filelist");
-		std::string filelist = ( cfilelist.substr(0,1) == "/" ) ? cfilelist : dir + "/" + cfilelist;
+      if ( debug_dataset )
+        cerr << "Adding file " << file << " with classno " << classno << endl;
 
-		getFromList ( filelist, datasetconf, classnames, ls, localizationInfoDisabled );
+      if ( lr == NULL )
+        ls.add ( classno, new ImageInfo ( file ) );
+      else
+        ls.add ( classno, new ImageInfo ( file, lr ) );
     } else {
-		fprintf (stderr, "LabeledFileList: Unable to obtain labeled file list\n");
-		exit(-1);
+      if ( debug_dataset )
+        fprintf ( stderr, "WARNING: code %s ignored !\n", classcode.c_str() );
     }
+
+  }
+
+  if ( debug_dataset )
+    ls.printInformation();
+}
+
+
+void LabeledFileList::get (
+  const std::string & dir,
+  const Config & datasetconf,
+  const ClassNames & classnames,
+  LabeledSet & ls,
+  bool localizationInfoDisabled,
+  bool debugDataset )
+{
+  std::string pattern = datasetconf.gS ( "main", "pattern", "" );
+  std::string filelist = datasetconf.gS ( "main", "filelist", "" );
+  this->debug_dataset = debugDataset;
+
+  if ( pattern.size() > 0 )
+    getFromPattern ( dir, datasetconf, classnames, ls, localizationInfoDisabled );
+  else if ( filelist.size() > 0 ) {
+
+    std::string cfilelist = datasetconf.gS ( "main", "filelist" );
+    std::string filelist = ( cfilelist.substr ( 0, 1 ) == "/" ) ? cfilelist : dir + "/" + cfilelist;
+
+    getFromList ( filelist, datasetconf, classnames, ls, localizationInfoDisabled );
+  } else {
+    fprintf ( stderr, "LabeledFileList: Unable to obtain labeled file list\n" );
+    exit ( -1 );
+  }
 }

+ 35 - 35
cbaselib/LabeledFileList.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file LabeledFileList.h
 * @brief reads images from directory
 * @author Erik Rodner
@@ -21,40 +21,40 @@ namespace OBJREC {
 /** reads images from directory */
 class LabeledFileList
 {
-    private:
-	bool debug_dataset;
-
-    public:
-  
-	/** simple constructor */
-	LabeledFileList(); 
-      
-	/** simple destructor */
-	virtual ~LabeledFileList();
- 
-	LocalizationResult *getLocalizationInfo ( const ClassNames & classnames,
-					    int classno,
-					    const std::string & file,
-					    const NICE::Config & conf ) const;
-
-	void get ( const std::string & dir,
-		   const NICE::Config & datasetconf,
-		   const ClassNames & classnames, 
-		   LabeledSet & ls,
-		   bool localizationInfoDisabled = false,
-		   bool debugDataset = false );
-		   
-	void getFromPattern ( const std::string & dir,
-		   const NICE::Config & datasetconf,
-		   const ClassNames & classnames, 
-		   LabeledSet & ls,
-		   bool localizationInfoDisabled = false ) const;
-
-	void getFromList ( const std::string & filelist,
-		   const NICE::Config & datasetconf,
-		   const ClassNames & classnames, 
-		   LabeledSet & ls,
-		   bool localizationInfoDisabled = false ) const;
+  private:
+    bool debug_dataset;
+
+  public:
+
+    /** simple constructor */
+    LabeledFileList();
+
+    /** simple destructor */
+    virtual ~LabeledFileList();
+
+    LocalizationResult *getLocalizationInfo ( const ClassNames & classnames,
+        int classno,
+        const std::string & file,
+        const NICE::Config & conf ) const;
+
+    void get ( const std::string & dir,
+               const NICE::Config & datasetconf,
+               const ClassNames & classnames,
+               LabeledSet & ls,
+               bool localizationInfoDisabled = false,
+               bool debugDataset = false );
+
+    void getFromPattern ( const std::string & dir,
+                          const NICE::Config & datasetconf,
+                          const ClassNames & classnames,
+                          LabeledSet & ls,
+                          bool localizationInfoDisabled = false ) const;
+
+    void getFromList ( const std::string & filelist,
+                       const NICE::Config & datasetconf,
+                       const ClassNames & classnames,
+                       LabeledSet & ls,
+                       bool localizationInfoDisabled = false ) const;
 
 };
 

+ 28 - 28
cbaselib/MultiDataset.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file MultiDataset.h
 * @brief multiple datasets
 * @author Erik Rodner
@@ -27,33 +27,33 @@ namespace OBJREC {
 class MultiDataset
 {
 
-    protected:
-	LabeledFileList lfl;
-	std::map<std::string, ClassNames> classnames;
-	std::map<std::string, LabeledSet> datasets;
-
-	void selectExamples ( const std::string & examples_command,
-		      const LabeledSet & base,
-		      LabeledSet & positives,
-		      LabeledSet & negatives,
-		      const ClassNames & cn ) const;
-
-    public:
-  
-	/** simple constructor */
-	MultiDataset( const NICE::Config *conf );
-      
-	/** simple destructor */
-	virtual ~MultiDataset();
-    
-  /** access class information, e.g., md.getClassNames("train") */
-	const ClassNames & getClassNames ( const std::string & key ) const;
-
-  /** access stored dataset by name, e.g., md["train"], if you have a [train] section
-    * in your config file */
-	const LabeledSet * operator[] ( const std::string & key ) const;
-
-	const LabeledSet * at ( const std::string & key ) const;
+protected:
+    LabeledFileList lfl;
+    std::map<std::string, ClassNames> classnames;
+    std::map<std::string, LabeledSet> datasets;
+
+    void selectExamples ( const std::string & examples_command,
+                          const LabeledSet & base,
+                          LabeledSet & positives,
+                          LabeledSet & negatives,
+                          const ClassNames & cn ) const;
+
+public:
+
+    /** simple constructor */
+    MultiDataset( const NICE::Config *conf );
+
+    /** simple destructor */
+    virtual ~MultiDataset();
+
+    /** access class information, e.g., md.getClassNames("train") */
+    const ClassNames & getClassNames ( const std::string & key ) const;
+
+    /** access stored dataset by name, e.g., md["train"], if you have a [train] section
+      * in your config file */
+    const LabeledSet * operator[] ( const std::string & key ) const;
+
+    const LabeledSet * at ( const std::string & key ) const;
 
 };
 

+ 49 - 47
cbaselib/progs/testCachedExample.cpp

@@ -1,9 +1,8 @@
-/** 
+/**
 * @file testCachedExample.cpp
 * @brief test cached example implementation
 * @author Erik Rodner
 * @date 09/12/2008
-
 */
 
 #include <core/imagedisplay/ImageDisplay.h>
@@ -19,61 +18,64 @@ using namespace NICE;
 using namespace std;
 
 
-int main (int argc, char **argv)
-{   
-    std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+int main ( int argc, char **argv )
+{
+  std::set_terminate ( __gnu_cxx::__verbose_terminate_handler );
 
-    char configfile [300];
+  char configfile [300];
 
-    struct CmdLineOption options[] = {
-	{"config", "use config file", NULL, "%s", configfile},
-	{NULL, NULL, NULL, NULL, NULL} 
-    };
-    int ret;
-    char *more_options[argc];
-    ret = parse_arguments( argc, (const char**)argv, options, more_options);
+  struct CmdLineOption options[] = {
+    {"config", "use config file", NULL, "%s", configfile},
+    {NULL, NULL, NULL, NULL, NULL}
+  };
+  int ret;
+  char *more_options[argc];
+  ret = parse_arguments ( argc, ( const char** ) argv, options, more_options );
 
-    if ( ret != 0 )
-    {
-	if ( ret != 1 ) fprintf (stderr, "Error parsing command line !\n");
-	exit (-1);
-    }
+  if ( ret != 0 )
+  {
+    if ( ret != 1 ) fprintf ( stderr, "Error parsing command line !\n" );
+    exit ( -1 );
+  }
 
-    Config conf ( configfile );
-    
-    int i = 0;
-    while ( more_options[i] != NULL )
-    {
+  Config conf ( configfile );
 
-	std::string filename ( more_options[i] );
-	fprintf (stderr, "Filename: %s\n", filename.c_str() );
-	CachedExample ce ( filename );
+  int i = 0;
+  while ( more_options[i] != NULL )
+  {
 
-	NICE::MultiChannelImageT<int> & img = ce.getIChannel(CachedExample::I_COLOR);
-	NICE::MultiChannelImageT<double> & imgc = ce.getDChannel(CachedExample::D_INTEGRALCOLOR);
+    std::string filename ( more_options[i] );
+    fprintf ( stderr, "Filename: %s\n", filename.c_str() );
+    CachedExample ce ( filename );
 
-	imgc.reInitFrom ( img, true );
-	for ( uint j = 0 ; j < img.numChannels; j++ )
-	    GenericImageTools::calcIntegralImage ( imgc.data[j], img.data[j], img.xsize, img.ysize );
+    NICE::MultiChannelImageT<int> & img = ce.getIChannel ( CachedExample::I_COLOR );
+    NICE::MultiChannelImageT<double> & imgc = ce.getDChannel ( CachedExample::D_INTEGRALCOLOR );
+
+    imgc.reInitFrom ( img );
+    for ( uint j = 0 ; j < img.channels(); j++ )
+    {
+      ImageT<double> tmp = imgc[j];
+      GenericImageTools::calcIntegralImage ( tmp, img[j], img.width(), img.height() );
+    }
 
-	Image visimg = imgc.getChannel(0);
-	showImage(visimg);
+    Image visimg = imgc.getChannel ( 0 );
+    showImage ( visimg );
 
-	ce.dropPreCached();
+    ce.dropPreCached();
 
 #ifndef NOVISUAL
-	getchar();
+    getchar();
 #endif
-	
-	imgc = ce.getDChannel ( CachedExample::D_INTEGRALCOLOR );
-	
-	visimg = imgc.getChannel(0);
-	showImage(visimg);
-	
-
-	i++;
-    }
-    
-    
-    return 0;
+
+    imgc = ce.getDChannel ( CachedExample::D_INTEGRALCOLOR );
+
+    visimg = imgc.getChannel ( 0 );
+    showImage ( visimg );
+
+
+    i++;
+  }
+
+
+  return 0;
 }

+ 51 - 57
cbaselib/progs/testLabeledSet.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file testLabeledSet.cpp
 * @brief test multidataset
 * @author Erik Rodner
@@ -15,65 +15,59 @@
 #include <vislearning/cbaselib/MultiDataset.h>
 
 using namespace OBJREC;
-
-
-
-// refactor-nice.pl: check this substitution
-// old: using namespace ice;
 using namespace NICE;
 using namespace std;
 
 
-/** 
-    
-    test multidataset 
-    
+/**
+
+    test multidataset
+
 */
-int main (int argc, char **argv)
-{   
-    std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
-
-    char configfile [300];
-
-    struct CmdLineOption options[] = {
-	{"config", "use config file", NULL, "%s", configfile},
-	{NULL, NULL, NULL, NULL, NULL} 
-    };
-    int ret;
-    char *more_options[argc];
-    ret = parse_arguments( argc, (const char**)argv, options, more_options);
-
-    if ( ret != 0 )
-    {
-	if ( ret != 1 ) fprintf (stderr, "Error parsing command line !\n");
-	exit (-1);
-    }
-
-    Config conf ( configfile );
-    
-    
-    MultiDataset md ( &conf );
-
-    const LabeledSet *train = md["train"];
-    const LabeledSet *test = md["test"];
-    const ClassNames & cn = md.getClassNames ( "train" );
-    
-    
-    fprintf (stderr, "Training Dataset\n");
-    LOOP_ALL_S( *train )
-    {
-	EACH_S(classno, fn);
-	fprintf (stderr, "%s %s\n", cn.text(classno).c_str(), fn.c_str() );
-    }
- 
-    fprintf (stderr, "Test Dataset\n");
-    LOOP_ALL_S( *test )
-    {
-	EACH_S(classno, fn);
-	fprintf (stderr, "%s %s\n", cn.text(classno).c_str(), fn.c_str() );
-    }
-   
-    
-    
-    return 0;
+int main ( int argc, char **argv )
+{
+  std::set_terminate ( __gnu_cxx::__verbose_terminate_handler );
+
+  char configfile [300];
+
+  struct CmdLineOption options[] = {
+    {"config", "use config file", NULL, "%s", configfile},
+    {NULL, NULL, NULL, NULL, NULL}
+  };
+  int ret;
+  char *more_options[argc];
+  ret = parse_arguments ( argc, ( const char** ) argv, options, more_options );
+
+  if ( ret != 0 )
+  {
+    if ( ret != 1 ) fprintf ( stderr, "Error parsing command line !\n" );
+    exit ( -1 );
+  }
+
+  Config conf ( configfile );
+
+  MultiDataset md ( &conf );
+
+  const LabeledSet *train = md["train"];
+  const LabeledSet *test = md["test"];
+  const ClassNames & cn = md.getClassNames ( "train" );
+
+
+  fprintf ( stderr, "Training Dataset\n" );
+  LOOP_ALL_S ( *train )
+  {
+    EACH_S ( classno, fn );
+    fprintf ( stderr, "%s %s\n", cn.text ( classno ).c_str(), fn.c_str() );
+  }
+
+  fprintf ( stderr, "Test Dataset\n" );
+  LOOP_ALL_S ( *test )
+  {
+    EACH_S ( classno, fn );
+    fprintf ( stderr, "%s %s\n", cn.text ( classno ).c_str(), fn.c_str() );
+  }
+
+
+
+  return 0;
 }

+ 2 - 0
classifier/classifiercombination/VCPreRandomForest.cpp

@@ -12,6 +12,8 @@
 
 #include <vislearning/cbaselib/VectorFeature.h>
 
+#include "core/image/ImageT.h"
+//#include "core/imagedisplay/ImageDisplay.h"
 
 using namespace OBJREC;
 using namespace std;

+ 0 - 2
classifier/classifiercombination/VCPreRandomForest.h

@@ -9,8 +9,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include <map>
 

+ 2 - 0
classifier/classifierinterfaces/VCFeaturePool.cpp

@@ -9,6 +9,8 @@
 
 #include "VCFeaturePool.h"
 #include "vislearning/cbaselib/VectorFeature.h"
+#include "core/image/ImageT.h"
+//#include "core/imagedisplay/ImageDisplay.h"
 
 using namespace OBJREC;
 

+ 0 - 2
classifier/classifierinterfaces/VCFeaturePool.h

@@ -10,8 +10,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include <string>
 

+ 3 - 0
classifier/fpclassifier/logisticregression/FPCSMLR.cpp

@@ -1,6 +1,9 @@
 #include "vislearning/classifier/fpclassifier/logisticregression/FPCSMLR.h"
 #include "vislearning/cbaselib/FeaturePool.h"
 
+#include "core/image/ImageT.h"
+//#include "core/imagedisplay/ImageDisplay.h"
+
 #include <iostream>
 
 using namespace OBJREC;

+ 0 - 2
classifier/fpclassifier/logisticregression/FPCSMLR.h

@@ -10,8 +10,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
 #include "vislearning/classifier/fpclassifier/logisticregression/SLR.h"

+ 3 - 0
classifier/fpclassifier/logisticregression/SLR.cpp

@@ -5,6 +5,9 @@
 #include "vislearning/classifier/fpclassifier/logisticregression/SLR.h"
 #include "vislearning/cbaselib/FeaturePool.h"
 
+#include "core/image/ImageT.h"
+//#include "core/imagedisplay/ImageDisplay.h"
+
 #include <iostream>
 
 #define SLRDEBUG

+ 0 - 2
classifier/fpclassifier/logisticregression/SLR.h

@@ -9,8 +9,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
 #include "vislearning/cbaselib/FeaturePool.h"

+ 3 - 0
classifier/fpclassifier/randomforest/DecisionNode.cpp

@@ -7,6 +7,9 @@
 */
 #include <iostream>
 
+#include "core/image/ImageT.h"
+#include "core/imagedisplay/ImageDisplay.h"
+
 #include "vislearning/classifier/fpclassifier/randomforest/DecisionNode.h"
 
 using namespace OBJREC;

+ 1 - 2
classifier/fpclassifier/randomforest/DecisionNode.h

@@ -10,8 +10,7 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
+
 #include <map>
 #include <limits>
 

+ 3 - 0
classifier/fpclassifier/randomforest/FPCRandomForests.cpp

@@ -12,6 +12,9 @@
 
 #include <iostream>
 
+#include "core/image/ImageT.h"
+#include "core/imagedisplay/ImageDisplay.h"
+
 #include "vislearning/classifier/fpclassifier/randomforest/FPCRandomForests.h"
 #include "vislearning/classifier/fpclassifier/randomforest/DTBStandard.h"
 #include "vislearning/classifier/fpclassifier/randomforest/DTBRandom.h"

+ 0 - 2
classifier/fpclassifier/randomforest/FPCRandomForests.h

@@ -12,8 +12,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include "vislearning/classifier/classifierbase/FeaturePoolClassifier.h"
 #include "vislearning/cbaselib/FeaturePool.h"

+ 8 - 6
classifier/genericClassifierSelection.h

@@ -4,6 +4,7 @@
 #include "core/basics/StringTools.h"
 
 #include "vislearning/classifier/vclassifier/VCAmitSVM.h"
+#include "vislearning/classifier/vclassifier/VCNearestClassMean.h"
 #include "vislearning/classifier/vclassifier/VCSimpleGaussian.h"
 #include "vislearning/classifier/vclassifier/VCNearestNeighbour.h"
 #include "vislearning/classifier/vclassifier/VCCrossGeneralization.h"
@@ -60,12 +61,13 @@ class GenericClassifierSelection
 
       if ( classifier_type == "amit" ) {
         classifier = new VCAmitSVM ( conf );
-
-      } else if ( classifier_type == "nn" ) {
-        classifier = new VCNearestNeighbour ( conf, new NICE::EuclidianDistance<double>() );
-#ifdef NICE_USELIB_ICE
-      } else if ( classifier_type == "gauss" ) {
-        classifier = new VCSimpleGaussian ( conf );
+     } else if ( classifier_type == "nn" ) {
+	classifier = new VCNearestNeighbour( conf, new NICE::EuclidianDistance<double>() );
+#ifdef NICE_USELIB_ICE 
+     } else if ( classifier_type == "gauss" ) {
+        classifier = new VCSimpleGaussian( conf );
+     } else if ( classifier_type == "nearest_classmean" ) {
+        classifier = new VCNearestClassMean( conf, new NICE::EuclidianDistance<double>() );
 #endif
       } else if ( classifier_type == "random_forest" ) {
         FeaturePoolClassifier *fpc = new FPCRandomForests ( conf, "RandomForest" );

+ 173 - 0
classifier/kernelclassifier/KCGPApproxOneClass.cpp

@@ -0,0 +1,173 @@
+/** 
+* @file KCGPApproxOneClass.cpp
+* @brief One-Class Gaussian Process Regression for Classification: we approximate the inverse of the regularized kernel matrix using a diagonal matrix
+* @author Alexander Lütz
+* @date 22-05-2012 (dd-mm-yyyy)
+
+*/
+#include <iostream>
+#include <typeinfo>
+#include <cstring>
+
+#include "core/vector/Algorithms.h"
+#include "core/vector/VVector.h"
+
+#include "KCGPApproxOneClass.h"
+
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+
+KCGPApproxOneClass::KCGPApproxOneClass( const Config *conf, Kernel *kernel, const string & section ) : KernelClassifier ( conf, kernel )
+{
+//   this->kernelFunction = kernel;  
+  //overwrite the default optimization options, since we don't want to perform standard loo or marginal likelihood stuff
+  Config config(*conf);
+  string modestr = config.gS(section,"detection_mode"); 
+
+  if(strcmp("mean",modestr.c_str())==0){
+    this->mode=MEAN_DETECTION_MODE;cerr << "One-class classification via GP predictive _mean_ !!!"<<endl;
+  }
+  if(strcmp("variance",modestr.c_str())==0){
+    mode=VARIANCE_DETECTION_MODE;cerr << "One-class classification via GP predictive _variance_ !!!"<<endl;
+  }
+
+  this->staticNoise = conf->gD(section, "static_noise", 0.0);
+}
+
+
+
+KCGPApproxOneClass::KCGPApproxOneClass( const KCGPApproxOneClass & src ) : KernelClassifier ( src )
+{
+  this->matrixDInv = src.matrixDInv;
+  this->InvDY = src.InvDY;
+  this->mode = src.mode;
+  this->staticNoise = src.staticNoise;
+}
+
+KCGPApproxOneClass::~KCGPApproxOneClass()
+{
+}
+
+
+void KCGPApproxOneClass::teach ( KernelData *kernelData, const NICE::Vector & y )
+{
+    fthrow( Exception, "KCGPApproxOneClass::teach: this method is not implemented for this specific type of classifier. Please use the second teach-method." );  
+}
+
+void KCGPApproxOneClass::teach (const LabeledSetVector &teachSet)
+{
+  if ( this->kernelFunction == NULL )
+    fthrow( Exception, "KernelClassifier::teach: To use this function, you have to specify a kernel function using the constructor" );  
+  
+  //we do not have to allocate new storage here since these variables come from the interface KernelClassifier
+//   NICE::VVector vecSet;
+
+  teachSet.getFlatRepresentation (this->vecSet, this->vecSetLabels);
+    
+  if ( (this->vecSetLabels.Min() != 1) || (this->vecSetLabels.Max() != 1) ) {
+    fthrow(Exception, "This classifier is suitable only for one-class classification problems, i.e. max(y) = min(y) = 1");
+  }  
+
+  this->matrixDInv.resize(this->vecSetLabels.size());
+ 
+  //compute D 
+  //start with adding some noise, if necessary
+  if (this->staticNoise != 0.0)
+    this->matrixDInv.set(this->staticNoise);
+  else
+    this->matrixDInv.set(0.0);
+  
+  //now sum up all entries of each row in the original kernel matrix
+  double kernelScore(0.0);
+  for (int i = 0; i < this->vecSetLabels.size(); i++)
+  {
+    for (int j = i; j < this->vecSetLabels.size(); j++)
+    {
+      kernelScore = this->kernelFunction->K(vecSet[i],vecSet[j]);
+      this->matrixDInv[i] += kernelScore;
+      if (i != j)
+        this->matrixDInv[j] += kernelScore; 
+    }
+  }  
+  
+  //compute its inverse
+  for (int i = 0; i < this->vecSetLabels.size(); i++)
+  {
+    this->matrixDInv[i] = 1.0 / this->matrixDInv[i];
+  }
+  
+  //and multiply it from right with the label vector (precalculation for mean computation)
+  if(this->mode==MEAN_DETECTION_MODE)
+  {
+    this->InvDY.resize ( this->vecSetLabels.size() );
+    for (int i = 0; i < this->vecSetLabels.size(); i++)
+    {
+      this->InvDY[i] = this->vecSetLabels[i] * this->matrixDInv[i];
+    }
+  }  
+}
+
+ClassificationResult KCGPApproxOneClass::classifyKernel ( const NICE::Vector & kernelVector, double kernelSelf ) const
+{
+  FullVector scores ( 2 );
+  scores[0] = 0.0;
+
+  if(this->mode==MEAN_DETECTION_MODE)
+  {
+    // kernelSelf is not needed for the regression type of GP
+
+    if ( kernelVector.size() != this->vecSetLabels.size() ) 
+      fthrow(Exception, "KCGPApproxOneClass::classifyKernel: size of kernel value vector " << 
+        kernelVector.size() << " does not match number of training points " << this->vecSetLabels.size() );
+      
+    double yEstimate = kernelVector.scalarProduct ( InvDY );    
+    scores[1] = yEstimate;
+  }
+  if(this->mode==VARIANCE_DETECTION_MODE)
+  {
+    if ( kernelVector.size() != this->vecSetLabels.size() ) 
+      fthrow(Exception, "KCGPApproxOneClass::classifyKernel: size of kernel value vector " << 
+        kernelVector.size() << " does not match number of training points " << this->vecSetLabels.size() );
+      
+    NICE::Vector rightPart (this->vecSetLabels.size());
+    for (int i = 0; i < this->vecSetLabels.size(); i++)
+    {
+      rightPart[i] = kernelVector[i] * this->matrixDInv[i];
+    }
+
+    double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+    scores[1] = 1.0 - uncertainty;
+
+  }
+  ClassificationResult r ( scores[1]<0.5 ? 0 : 1, scores );
+
+  return r;
+}
+
+KCGPApproxOneClass *KCGPApproxOneClass::clone() const
+{
+  return new KCGPApproxOneClass ( *this );
+}
+
+void KCGPApproxOneClass::store(std::ostream& ofs, int type) const
+{
+  ofs << this->matrixDInv << std::endl;
+  ofs << this->InvDY << std::endl;
+  ofs << this->mode << std::endl;
+  ofs << this->staticNoise << std::endl;
+}
+
+void KCGPApproxOneClass::restore(std::istream& ifs, int type)
+{
+  ifs >> this->matrixDInv;
+  ifs >> this->InvDY;
+  ifs >> this->mode;
+  ifs >> this->staticNoise;
+} 
+
+void KCGPApproxOneClass::clear()
+{
+}

+ 64 - 0
classifier/kernelclassifier/KCGPApproxOneClass.h

@@ -0,0 +1,64 @@
+/** 
+* @file KCGPApproxOneClass.h
+* @brief One-Class Gaussian Process Regression for Classification: we approximate the inverse of the regularized kernel matrix using a diagonal matrix
+* @author Alexander Lütz
+* @date 22-05-2012 (dd-mm-yyyy)
+
+*/
+#ifndef KCGPAPPROXONECLASSINCLUDE
+#define KCGPAPPROXONECLASSINCLUDE
+
+#include "vislearning/cbaselib/ClassificationResult.h"
+
+#include "vislearning/classifier/classifierbase/KernelClassifier.h"
+
+#include "vislearning/math/kernels/Kernel.h"
+
+
+#define VARIANCE_DETECTION_MODE 1
+#define MEAN_DETECTION_MODE 2
+
+namespace OBJREC {
+ 
+  class KCGPApproxOneClass : public KernelClassifier
+  {
+
+      protected:
+        NICE::Vector matrixDInv;
+        NICE::Vector InvDY;
+        int mode;
+        double staticNoise;
+      
+
+      public:
+    
+        /** simple constructor */
+        KCGPApproxOneClass( const NICE::Config *conf, Kernel *kernel = NULL, const std::string & section = "OneClassGP" );
+
+        /** copy constructor */
+        KCGPApproxOneClass( const KCGPApproxOneClass & src );
+            
+        /** simple destructor */
+        virtual ~KCGPApproxOneClass();
+          
+        /** teach the classifier with a kernel matrix and the corresponding class labels @param y ! */
+        void teach ( KernelData *kernelData, const NICE::Vector & y );
+        
+        void teach (const LabeledSetVector &teachSet);
+          
+        /** classify an example by using its kernel values with the training set,
+          be careful with the order in @param kernelVector */
+        virtual ClassificationResult classifyKernel ( const NICE::Vector & kernelVector, double kernelSelf ) const;
+        
+          /** clone this object */
+        KCGPApproxOneClass *clone() const;
+
+        void restore(std::istream&, int);
+        void store(std::ostream&, int) const;
+        void clear();
+
+  };
+
+}
+
+#endif

+ 2 - 0
classifier/vclassifier/VCDTSVM.cpp

@@ -2,6 +2,8 @@
 
 #include "vislearning/classifier/vclassifier/VCDTSVM.h"
 
+#include "core/image/ImageT.h"
+//#include "core/imagedisplay/ImageDisplay.h"
 #include "core/basics/StringTools.h"
 
 #undef WRITE

+ 0 - 2
classifier/vclassifier/VCDTSVM.h

@@ -9,8 +9,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include "vislearning/cbaselib/LabeledSet.h"
 #include "vislearning/classifier/classifierbase/VecClassifier.h"

+ 110 - 0
classifier/vclassifier/VCNearestClassMean.cpp

@@ -0,0 +1,110 @@
+#ifdef NICE_USELIB_ICE
+
+#include <iostream>
+
+#include "vislearning/classifier/vclassifier/VCNearestClassMean.h"
+
+using namespace OBJREC;
+
+using namespace std;
+
+using namespace NICE;
+
+VCNearestClassMean::VCNearestClassMean( const Config *_conf, NICE::VectorDistance<double> *_distancefunc  ) 
+: VecClassifier ( _conf ), distancefunc (_distancefunc)
+{    
+    if ( _distancefunc == NULL )
+		distancefunc = new EuclidianDistance<double>();
+}
+
+VCNearestClassMean::~VCNearestClassMean()
+{
+    clear();
+}
+
+/** classify using simple vector */
+
+ClassificationResult VCNearestClassMean::classify ( const NICE::Vector & x ) const
+{
+     double min_distance= std::numeric_limits<double>::max();
+     int min_class = -1;
+     FullVector scores ( classNo.size() );
+     
+     for(uint i=0;i<this->classNo.size();i++)
+     {
+          double distance = distancefunc->calculate ( x, means[i] );
+          scores[i] = - distance;
+          if ( distance < min_distance)
+          {
+               min_distance = distance;
+               min_class = classNo[i];
+          }
+     }
+     
+     return ClassificationResult ( min_class, scores );
+}
+
+
+void VCNearestClassMean::teach ( const LabeledSetVector & _teachSet )
+{
+
+    _teachSet.getClasses ( this->classNo );
+
+    //initialize means
+    NICE::Vector zero( _teachSet.dimension() );
+    for(uint d=0;d<zero.size();d++) zero[d]=0.0;
+    for(uint c=0;c<this->classNo.size();c++)
+    {
+	means.push_back(zero);
+    }
+
+    //add all class-specific vectors 
+    int index=0;
+    LOOP_ALL(_teachSet)
+    {
+	EACH(classno,x);
+	//determine index
+	for(uint c=0;c<this->classNo.size();c++)
+        {
+		if(classno==classNo[c]) index=c;
+        }
+	for(uint d=0;d<zero.size();d++)
+        {
+	   means[index][d]+=x[d];
+        }
+    }
+
+    //normalize vectors
+    for(uint c=0;c<this->classNo.size();c++)
+    {
+	for(uint d=0;d<zero.size();d++)
+        {
+	   means[c][d]/=_teachSet.count(this->classNo[c]);
+        }
+    }
+
+}
+
+void VCNearestClassMean::finishTeaching()
+{
+//nothing more to do
+}
+
+void VCNearestClassMean::clear ()
+{
+//nothing to do
+}
+
+void VCNearestClassMean::store ( std::ostream & os, int format ) const
+{
+    fprintf (stderr, "NOT YET IMPLEMENTED\n");
+    exit(-1);
+}
+
+void VCNearestClassMean::restore ( std::istream & is, int format )
+{
+    fprintf (stderr, "NOT YET IMPLEMENTED\n");
+    exit(-1);
+}
+
+#endif

+ 55 - 0
classifier/vclassifier/VCNearestClassMean.h

@@ -0,0 +1,55 @@
+/** 
+* @file VCNearestClassMean.h
+* @brief Nearest Class Mean Classifier (Naive Bayes with identity covariance matrix for all classes) -> code is based on VCSimpleGaussian and VCNearestNeighbor
+* @author Erik Rodner + Mi.Ke
+* @date 12/05/2007
+
+*/
+
+#ifndef VCNEARESTCLASSMEANINCLUDE
+#define VCNEARESTCLASSMEANINCLUDE
+
+#include "vislearning/classifier/classifierbase/VecClassifier.h"
+#include <core/vector/Distance.h>
+
+#include <image_nonvis.h>
+
+namespace OBJREC {
+
+/** Simple Gaussian Classifier */
+class VCNearestClassMean : public VecClassifier
+{
+    public:
+	std::vector<NICE::Vector> means;
+	std::vector<int> classNo;
+	NICE::VectorDistance<double> *distancefunc;
+	std::map<int, ice::Statistics *> statistics;
+
+    public:
+  
+	/** simple constructor */
+	VCNearestClassMean( const NICE::Config *conf, NICE::VectorDistance<double> *distancefunc = NULL );
+      
+	/** simple destructor */
+	virtual ~VCNearestClassMean();
+ 
+	/** classify using simple vector */
+	ClassificationResult classify ( const NICE::Vector & x ) const;
+
+	/** classify using a simple vector */
+	void teach ( const LabeledSetVector & teachSet );
+	
+	void finishTeaching();
+
+	void clear ();
+
+	void store ( std::ostream & os, int format = 0 ) const;
+
+	void restore ( std::istream & is, int format = 0 );
+
+};
+
+
+} // namespace
+
+#endif

+ 3 - 0
classifier/vclassifier/VCOneVsAll.cpp

@@ -6,6 +6,9 @@
 */
 #include <iostream>
 
+#include "core/image/ImageT.h"
+#include "core/imagedisplay/ImageDisplay.h"
+
 #include "vislearning/classifier/vclassifier/VCOneVsAll.h"
 
 

+ 0 - 2
classifier/vclassifier/VCOneVsAll.h

@@ -10,8 +10,6 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
-#include "core/image/ImageT.h"
-#include "core/imagedisplay/ImageDisplay.h"
 
 #include "vislearning/classifier/classifierbase/VecClassifier.h"
 #include "core/basics/triplet.h"

+ 98 - 102
features/fpfeatures/ColorHistogramFeature.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file ColorHistogramFeature.cpp
 * @brief histogram of oriented gradients ( dalal and triggs )
 * @author Erik Rodner
@@ -19,17 +19,17 @@ using namespace NICE;
 const double epsilon = 10e-8;
 
 /** simple constructor */
-ColorHistogramFeature::ColorHistogramFeature( const Config *conf )
+ColorHistogramFeature::ColorHistogramFeature ( const Config *conf )
 {
-    window_size_x = conf->gI("ColorHistogramFeature", "window_size_x", 21 );
-    window_size_y = conf->gI("ColorHistogramFeature", "window_size_y", 21 );
-    scaleStep = conf->gD("ColorHistogramFeature", "scale_step", sqrt(2) );
-    numScales = conf->gI("ColorHistogramFeature", "num_scales", 5 );
-
-    int numBinsH = conf->gI("ColorHistogramFeature", "num_bins_h", 4);
-    int numBinsS = conf->gI("ColorHistogramFeature", "num_bins_s", 2);
-    int numBinsV = conf->gI("ColorHistogramFeature", "num_bins_v", 2);
-    numBins = numBinsH*numBinsS*numBinsV;
+  window_size_x = conf->gI ( "ColorHistogramFeature", "window_size_x", 21 );
+  window_size_y = conf->gI ( "ColorHistogramFeature", "window_size_y", 21 );
+  scaleStep = conf->gD ( "ColorHistogramFeature", "scale_step", sqrt ( 2 ) );
+  numScales = conf->gI ( "ColorHistogramFeature", "num_scales", 5 );
+
+  int numBinsH = conf->gI ( "ColorHistogramFeature", "num_bins_h", 4 );
+  int numBinsS = conf->gI ( "ColorHistogramFeature", "num_bins_s", 2 );
+  int numBinsV = conf->gI ( "ColorHistogramFeature", "num_bins_v", 2 );
+  numBins = numBinsH * numBinsS * numBinsV;
 }
 
 /** simple destructor */
@@ -37,124 +37,120 @@ ColorHistogramFeature::~ColorHistogramFeature()
 {
 }
 
-double ColorHistogramFeature::val( const Example *example ) const
+double ColorHistogramFeature::val ( const Example *example ) const
 {
-    const NICE::MultiChannelImageT<double> & img = 
-	example->ce->getDChannel ( CachedExample::D_INTEGRALCOLOR );
-    int tm_xsize = img.xsize;
-    int tm_ysize = img.ysize;
-
-    int xsize;
-    int ysize;
-    example->ce->getImageSize ( xsize, ysize );
-
-    int wsx2, wsy2;
-    int exwidth = example->width;
-    if ( exwidth == 0 ) {
-	wsx2 = window_size_x * tm_xsize / (2*xsize);
-	wsy2 = window_size_y * tm_ysize / (2*ysize);
-    } else {
-	int exheight = example->height;
-	wsx2 = exwidth * tm_xsize / (2*xsize);
-	wsy2 = exheight * tm_ysize / (2*ysize);
-    }
-	
-    int xx, yy;
-    xx = ( example->x ) * tm_xsize / xsize;
-    yy = ( example->y ) * tm_ysize / ysize;
-
-    assert ( (wsx2 > 0) && (wsy2 > 0) );
-
-    int xtl = xx - wsx2;
-    int ytl = yy - wsy2;
-    int xrb = xx + wsx2;
-    int yrb = yy + wsy2;
+  const NICE::MultiChannelImageT<double> & img =
+    example->ce->getDChannel ( CachedExample::D_INTEGRALCOLOR );
+  int tm_xsize = img.width();
+  int tm_ysize = img.height();
+
+  int xsize;
+  int ysize;
+  example->ce->getImageSize ( xsize, ysize );
+
+  int wsx2, wsy2;
+  int exwidth = example->width;
+  if ( exwidth == 0 ) {
+    wsx2 = window_size_x * tm_xsize / ( 2 * xsize );
+    wsy2 = window_size_y * tm_ysize / ( 2 * ysize );
+  } else {
+    int exheight = example->height;
+    wsx2 = exwidth * tm_xsize / ( 2 * xsize );
+    wsy2 = exheight * tm_ysize / ( 2 * ysize );
+  }
+
+  int xx, yy;
+  xx = ( example->x ) * tm_xsize / xsize;
+  yy = ( example->y ) * tm_ysize / ysize;
+
+  assert ( ( wsx2 > 0 ) && ( wsy2 > 0 ) );
+
+  int xtl = xx - wsx2;
+  int ytl = yy - wsy2;
+  int xrb = xx + wsx2;
+  int yrb = yy + wsy2;
 
 #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
-    xtl = BOUND ( xtl, 0, tm_xsize - 1 );
-    ytl = BOUND ( ytl, 0, tm_ysize - 1 );
-    xrb = BOUND ( xrb, 0, tm_xsize - 1 );
-    yrb = BOUND ( yrb, 0, tm_ysize - 1 );
+  xtl = BOUND ( xtl, 0, tm_xsize - 1 );
+  ytl = BOUND ( ytl, 0, tm_ysize - 1 );
+  xrb = BOUND ( xrb, 0, tm_xsize - 1 );
+  yrb = BOUND ( yrb, 0, tm_ysize - 1 );
 #undef BOUND
 
-    assert ( bin < (int)img.numChannels );
-    assert ( img.data[bin] != NULL );
-
-    long kA = xtl + ytl * tm_xsize;
-    long kB = xrb + ytl * tm_xsize;
-    long kC = xtl + yrb * tm_xsize;
-    long kD = xrb + yrb * tm_xsize;
-    double A,B,C,D;
-    A = img.data[bin][ kA ];
-    B = img.data[bin][ kB ];
-    C = img.data[bin][ kC ];
-    D = img.data[bin][ kD ];
-
-    double val1 =  (D - B - C + A);
-    double sum = val1*val1;
-    for ( int b = 0 ; b < (int)img.numChannels ; b++)
-    {
-	if ( b == bin ) continue;
-	A = img.data[b][ kA ];
-	B = img.data[b][ kB ];
-	C = img.data[b][ kC ];
-	D = img.data[b][ kD ];
-	double val = ( D - B - C + A );
-	sum += val*val;
-    }
-    sum = sqrt(sum);
-    return ( val1 + epsilon ) / ( sum + epsilon );
+  assert ( bin < ( int ) img.channels() );
+
+  double A, B, C, D;
+  A = img.get ( xtl, ytl, bin );
+  B = img.get ( xrb, ytl, bin );
+  C = img.get ( xtl, yrb, bin );
+  D = img.get ( xrb, yrb, bin );
+
+  double val1 = ( D - B - C + A );
+  double sum = val1 * val1;
+  for ( int b = 0 ; b < ( int ) img.channels() ; b++ )
+  {
+    if ( b == bin ) 
+      continue;
+    A = img.get ( xtl, ytl, b );
+    B = img.get ( xrb, ytl, b );
+    C = img.get ( xtl, yrb, b );
+    D = img.get ( xrb, yrb, b );
+    double val = ( D - B - C + A );
+    sum += val * val;
+  }
+  sum = sqrt ( sum );
+  return ( val1 + epsilon ) / ( sum + epsilon );
 }
 
 void ColorHistogramFeature::explode ( FeaturePool & featurePool, bool variableWindow ) const
 {
-    int nScales = (variableWindow ? numScales : 1 );
+  int nScales = ( variableWindow ? numScales : 1 );
 
-    for ( int i = 0 ; i < nScales ; i++ )
+  for ( int i = 0 ; i < nScales ; i++ )
+  {
+    int wsy = window_size_y;
+    int wsx = window_size_x;
+    for ( int _bin = 0 ; _bin < numBins ; _bin++ )
     {
-	int wsy = window_size_y;
-	int wsx = window_size_x;
-	for ( int _bin = 0 ; _bin < numBins ; _bin++ )
-	{
-	    ColorHistogramFeature *f = new ColorHistogramFeature();
-	    f->window_size_x = wsx;
-	    f->window_size_y = wsy;
-	    f->bin = _bin;
-	    featurePool.addFeature ( f, 1.0 / ( numBins * nScales ) ); 
-	}
-	wsx = (int) (scaleStep * wsx);
-	wsy = (int) (scaleStep * wsy);
+      ColorHistogramFeature *f = new ColorHistogramFeature();
+      f->window_size_x = wsx;
+      f->window_size_y = wsy;
+      f->bin = _bin;
+      featurePool.addFeature ( f, 1.0 / ( numBins * nScales ) );
     }
+    wsx = ( int ) ( scaleStep * wsx );
+    wsy = ( int ) ( scaleStep * wsy );
+  }
 }
 
 Feature *ColorHistogramFeature::clone() const
 {
-    ColorHistogramFeature *f = new ColorHistogramFeature();
-    f->window_size_x = window_size_x;
-    f->window_size_y = window_size_y;
-    f->bin = bin;
+  ColorHistogramFeature *f = new ColorHistogramFeature();
+  f->window_size_x = window_size_x;
+  f->window_size_y = window_size_y;
+  f->bin = bin;
 
-    return f;
+  return f;
 }
 
 Feature *ColorHistogramFeature::generateFirstParameter () const
 {
-    return clone();
+  return clone();
 }
 
-void ColorHistogramFeature::restore (istream & is, int format)
+void ColorHistogramFeature::restore ( istream & is, int format )
 {
-    is >> window_size_x;
-    is >> window_size_y;
-    is >> bin;
+  is >> window_size_x;
+  is >> window_size_y;
+  is >> bin;
 }
 
-void ColorHistogramFeature::store (ostream & os, int format) const
+void ColorHistogramFeature::store ( ostream & os, int format ) const
 {
-    os << "ColorHistogramFeature "
-       << window_size_x << " "
-       << window_size_y << " "
-       << bin;
+  os << "ColorHistogramFeature "
+  << window_size_x << " "
+  << window_size_y << " "
+  << bin;
 }
 
 void ColorHistogramFeature::clear ()

+ 34 - 37
features/fpfeatures/ColorHistogramFeature.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file ColorHistogramFeature.h
 * @brief simple color histograms
 * @author Erik Rodner
@@ -10,7 +10,7 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
- 
+
 #include "core/basics/Config.h"
 #include "vislearning/cbaselib/Feature.h"
 
@@ -21,44 +21,41 @@ namespace OBJREC {
 class ColorHistogramFeature : public Feature
 {
 
-    protected:
-
-	/** @{ feature parameter */
-	int window_size_x;
-	int window_size_y;
-
-	int bin;
-	/** @} */
-
-	/** @{ parameter for feature generation */
-	int numScales;
-	int numBins;
-	double scaleStep;
-	/** @} */
-
-    public:
-  
-	/** simple constructor */
-	ColorHistogramFeature( const NICE::Config *conf );
-
-	/** internally used by ColorHistogramFeature::explode */
-	ColorHistogramFeature () {};
-      
-	/** simple destructor */
-	virtual ~ColorHistogramFeature();
-     
-	double val( const Example *example ) const;
-	void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
-	Feature *clone() const;
-	Feature *generateFirstParameter () const;
-
-	void restore (std::istream & is, int format = 0);
-	void store (std::ostream & os, int format = 0) const;
-	void clear ();
-};
+  protected:
+
+    /** @{ feature parameter */
+    int window_size_x;
+    int window_size_y;
+
+    int bin;
+    /** @} */
+
+    /** @{ parameter for feature generation */
+    int numScales;
+    int numBins;
+    double scaleStep;
+    /** @} */
 
+  public:
 
+    /** simple constructor */
+    ColorHistogramFeature ( const NICE::Config *conf );
 
+    /** internally used by ColorHistogramFeature::explode */
+    ColorHistogramFeature () {};
+
+    /** simple destructor */
+    virtual ~ColorHistogramFeature();
+
+    double val ( const Example *example ) const;
+    void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
+    Feature *clone() const;
+    Feature *generateFirstParameter () const;
+
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
+};
 
 } // namespace
 

+ 162 - 174
features/fpfeatures/EOHFeature.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file EOHFeature.cpp
 * @brief edge orientation histograms (Levi and Weiss 2004)
 * @author Erik Rodner
@@ -19,17 +19,17 @@ using namespace NICE;
 
 
 /** simple constructor */
-EOHFeature::EOHFeature( const Config *conf )
+EOHFeature::EOHFeature ( const Config *conf )
 {
-    window_size_x = conf->gI("EOHFeature", "window_size_x", 21 );
-    window_size_y = conf->gI("EOHFeature", "window_size_y", 21 );
-    scaleStep = conf->gD("EOHFeature", "scale_step", sqrt(2) );
-    numScales = conf->gI("EOHFeature", "num_scales", 5 );
-    numBins = conf->gI("EOHFeature", "num_bins", 9 );
-
-    bin  = 0;
-    bin2 = 0;
-    type = EOH_VALUE;
+  window_size_x = conf->gI ( "EOHFeature", "window_size_x", 21 );
+  window_size_y = conf->gI ( "EOHFeature", "window_size_y", 21 );
+  scaleStep = conf->gD ( "EOHFeature", "scale_step", sqrt ( 2 ) );
+  numScales = conf->gI ( "EOHFeature", "num_scales", 5 );
+  numBins = conf->gI ( "EOHFeature", "num_bins", 9 );
+
+  bin  = 0;
+  bin2 = 0;
+  type = EOH_VALUE;
 }
 
 /** simple destructor */
@@ -37,197 +37,185 @@ EOHFeature::~EOHFeature()
 {
 }
 
-double EOHFeature::val( const Example *example ) const
+double EOHFeature::val ( const Example *example ) const
 {
-    const NICE::MultiChannelImageT<double> & img = example->ce->getDChannel (
-	CachedExample::D_INTEGRALEOH );
-
-    int xsize;
-    int ysize;
-    example->ce->getImageSize ( xsize, ysize );
-    int tm_xsize = img.xsize;
-    int tm_ysize = img.ysize;
-
-#if 0
-    int xtl = example->x - window_size_x/2;
-    int ytl = example->y - window_size_y/2;
-    int xrb = example->x + window_size_x/2;
-    int yrb = example->y + window_size_y/2;
-
-    xtl = xtl * tm_xsize / xsize;
-    ytl = ytl * tm_ysize / ysize;
-    xrb = xrb * tm_xsize / xsize;
-    yrb = yrb * tm_ysize / ysize;
-#endif
-
-    
-    int wsx2, wsy2;
-    int xx, yy;
-
-    int exwidth = example->width;
-    if ( exwidth == 0 ) {
-	wsx2 = window_size_x * tm_xsize / (2*xsize);
-	wsy2 = window_size_y * tm_ysize / (2*ysize);
-    } else {
-	int exheight = example->height;
-	wsx2 = exwidth * tm_xsize / (2*xsize);
-	wsy2 = exheight * tm_ysize / (2*ysize);
-    }
-	
-    xx = ( example->x ) * tm_xsize / xsize;
-    yy = ( example->y ) * tm_ysize / ysize;
-    
-    int xtl = xx - wsx2;
-    int ytl = yy - wsy2;
-    int xrb = xx + wsx2;
-    int yrb = yy + wsy2;
+  const NICE::MultiChannelImageT<double> & img = example->ce->getDChannel (
+        CachedExample::D_INTEGRALEOH );
+
+  int xsize;
+  int ysize;
+  example->ce->getImageSize ( xsize, ysize );
+  int tm_xsize = img.width();
+  int tm_ysize = img.height();
+
+  int wsx2, wsy2;
+  int xx, yy;
+
+  int exwidth = example->width;
+  if ( exwidth == 0 ) {
+    wsx2 = window_size_x * tm_xsize / ( 2 * xsize );
+    wsy2 = window_size_y * tm_ysize / ( 2 * ysize );
+  } else {
+    int exheight = example->height;
+    wsx2 = exwidth * tm_xsize / ( 2 * xsize );
+    wsy2 = exheight * tm_ysize / ( 2 * ysize );
+  }
+
+  xx = ( example->x ) * tm_xsize / xsize;
+  yy = ( example->y ) * tm_ysize / ysize;
+
+  int xtl = xx - wsx2;
+  int ytl = yy - wsy2;
+  int xrb = xx + wsx2;
+  int yrb = yy + wsy2;
 
 #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
-    xtl = BOUND ( xtl, 0, tm_xsize - 1 );
-    ytl = BOUND ( ytl, 0, tm_ysize - 1 );
-    xrb = BOUND ( xrb, 0, tm_xsize - 1 );
-    yrb = BOUND ( yrb, 0, tm_ysize - 1 );
+  xtl = BOUND ( xtl, 0, tm_xsize - 1 );
+  ytl = BOUND ( ytl, 0, tm_ysize - 1 );
+  xrb = BOUND ( xrb, 0, tm_xsize - 1 );
+  yrb = BOUND ( yrb, 0, tm_ysize - 1 );
 #undef BOUND
 
-    double A,B,C,D;
-
-    assert ( bin < (int)img.numChannels );
-    assert ( img.data[bin] != NULL );
-
-    int kA = xtl + ytl * tm_xsize;
-    int kB = xrb + ytl * tm_xsize;
-    int kC = xtl + yrb * tm_xsize;
-    int kD = xrb + yrb * tm_xsize;
-    A = img.data[bin][ kA ];
-    B = img.data[bin][ kB ];
-    C = img.data[bin][ kC ];
-    D = img.data[bin][ kD ];
-
-    if ( type == EOH_VALUE ) { 
-	int area = (xrb - xtl)*(yrb - ytl);
-	
-	if ( area == 0 ) 
-	    return 0.0;
-	else {        
-	   /* A B 
-	      C D  */
-	    double value = (D - B - C + A) / area;
-
-	    return value;
-	}
-    } else if ( type == EOH_RATIO ) {
-	assert ( bin2 < (int)img.numChannels );
-
-	double val1 =  (D - B - C + A);
-	A = img.data[bin2][ kA ];
-	B = img.data[bin2][ kB ];
-	C = img.data[bin2][ kC ];
-	D = img.data[bin2][ kD ];
-
-	double val2 = ( D - B - C + A );
-
-	return ( val1 + epsilon ) / ( val2 + epsilon );
-    } else if ( type == EOH_DOMINANT_ORIENTATION ) {
-	double val1 =  (D - B - C + A);
-	double sum = val1;
-	for ( int b = 0 ; b < (int)img.numChannels ; b++)
-	{
-	    if ( b == bin ) continue;
-	    A = img.data[b][ kA ];
-	    B = img.data[b][ kB ];
-	    C = img.data[b][ kC ];
-	    D = img.data[b][ kD ];
-	    sum += ( D - B - C + A );
-	}
-
-	return ( val1 + epsilon ) / ( sum + epsilon );
+  double A, B, C, D;
+
+  assert ( bin < ( int ) img.channels() );
+
+  A = img.get ( xtl, ytl, bin );
+  B = img.get ( xrb, ytl, bin );
+  C = img.get ( xtl, yrb, bin );
+  D = img.get ( xrb, yrb, bin );
+
+  if ( type == EOH_VALUE )
+  {
+    int area = ( xrb - xtl ) * ( yrb - ytl );
+
+    if ( area == 0 )
+      return 0.0;
+    else {
+      /* A B
+         C D  */
+      double value = ( D - B - C + A ) / area;
+
+      return value;
+    }
+  }
+  else if ( type == EOH_RATIO )
+  {
+    assert ( bin2 < ( int ) img.channels() );
+
+    double val1 = ( D - B - C + A );
+    A = img.get ( xtl, ytl, bin2 );
+    B = img.get ( xrb, ytl, bin2 );
+    C = img.get ( xtl, yrb, bin2 );
+    D = img.get ( xrb, yrb, bin2 );
+
+    double val2 = ( D - B - C + A );
+
+    return ( val1 + epsilon ) / ( val2 + epsilon );
+  }
+  else if ( type == EOH_DOMINANT_ORIENTATION )
+  {
+    double val1 = ( D - B - C + A );
+    double sum = val1;
+    for ( int b = 0 ; b < ( int ) img.channels() ; b++ )
+    {
+      if ( b == bin ) 
+        continue;
+      A = img.get ( xtl, ytl, b );
+      B = img.get ( xrb, ytl, b );
+      C = img.get ( xtl, yrb, b );
+      D = img.get ( xrb, yrb, b );
+      sum += ( D - B - C + A );
     }
 
-    assert ( 1 == 0 );
+    return ( val1 + epsilon ) / ( sum + epsilon );
+  }
+
+  assert ( 1 == 0 );
 }
 
 void EOHFeature::explode ( FeaturePool & featurePool, bool variableWindow ) const
 {
-    int nScales = (variableWindow ? numScales : 1 );
+  int nScales = ( variableWindow ? numScales : 1 );
 
-    for ( int i = 0 ; i < nScales ; i++ )
+  for ( int i = 0 ; i < nScales ; i++ )
+  {
+    int wsy = window_size_y;
+    int wsx = window_size_x;
+    for ( int _type = 0 ; _type < EOH_NUMTYPES; _type++ )
     {
-	int wsy = window_size_y;
-	int wsx = window_size_x;
-	for ( int _type = 0 ; _type < EOH_NUMTYPES; _type++ )
-	{
-	    if ( (_type == EOH_VALUE) || (_type == EOH_DOMINANT_ORIENTATION) )
-	    {
-		for ( int _bin = 0 ; _bin < numBins ; _bin++ )
-		{
-		    EOHFeature *f = new EOHFeature();
-		    f->window_size_x = wsx;
-		    f->window_size_y = wsy;
-		    f->bin = _bin;
-		    f->type = _type;
-		    featurePool.addFeature ( f, 1.0 / ( EOH_NUMTYPES * numBins * nScales ) ); 
-		}
-	    }
-	    
-	    if ( (_type == EOH_RATIO) )
-	    {
-		for ( int _bin = 0 ; _bin < numBins ; _bin++ )
-		{
-		    for ( int _bin2 = 0 ; _bin2 < numBins ; _bin2++ )
-		    {
-			if ( bin == bin2 ) continue;
-
-			EOHFeature *f = new EOHFeature();
-			f->window_size_x = wsx;
-			f->window_size_y = wsy;
-			f->bin = _bin;
-			f->bin2 = _bin2;
-			f->type = _type;
-			featurePool.addFeature ( f, 1.0 / (EOH_NUMTYPES * (numBins - 1) * numBins * nScales ) ); 
-		    }
-		}
-	    }
-	}
-
-	wsx = (int) (scaleStep * wsx);
-	wsy = (int) (scaleStep * wsy);
+      if ( ( _type == EOH_VALUE ) || ( _type == EOH_DOMINANT_ORIENTATION ) )
+      {
+        for ( int _bin = 0 ; _bin < numBins ; _bin++ )
+        {
+          EOHFeature *f = new EOHFeature();
+          f->window_size_x = wsx;
+          f->window_size_y = wsy;
+          f->bin = _bin;
+          f->type = _type;
+          featurePool.addFeature ( f, 1.0 / ( EOH_NUMTYPES * numBins * nScales ) );
+        }
+      }
+
+      if ( ( _type == EOH_RATIO ) )
+      {
+        for ( int _bin = 0 ; _bin < numBins ; _bin++ )
+        {
+          for ( int _bin2 = 0 ; _bin2 < numBins ; _bin2++ )
+          {
+            if ( bin == bin2 ) continue;
+
+            EOHFeature *f = new EOHFeature();
+            f->window_size_x = wsx;
+            f->window_size_y = wsy;
+            f->bin = _bin;
+            f->bin2 = _bin2;
+            f->type = _type;
+            featurePool.addFeature ( f, 1.0 / ( EOH_NUMTYPES * ( numBins - 1 ) * numBins * nScales ) );
+          }
+        }
+      }
     }
+
+    wsx = ( int ) ( scaleStep * wsx );
+    wsy = ( int ) ( scaleStep * wsy );
+  }
 }
 
 Feature *EOHFeature::clone() const
 {
-    EOHFeature *f = new EOHFeature();
-    f->window_size_x = window_size_x;
-    f->window_size_y = window_size_y;
-    f->bin = bin;
-    f->bin2 = bin2;
-    f->type = type;
-
-    return f;
+  EOHFeature *f = new EOHFeature();
+  f->window_size_x = window_size_x;
+  f->window_size_y = window_size_y;
+  f->bin = bin;
+  f->bin2 = bin2;
+  f->type = type;
+
+  return f;
 }
 
 Feature *EOHFeature::generateFirstParameter () const
 {
-    return clone();
+  return clone();
 }
 
-void EOHFeature::restore (istream & is, int format)
+void EOHFeature::restore ( istream & is, int format )
 {
-    is >> window_size_x;
-    is >> window_size_y;
-    is >> type;
-    is >> bin;
-    is >> bin2;
+  is >> window_size_x;
+  is >> window_size_y;
+  is >> type;
+  is >> bin;
+  is >> bin2;
 }
 
-void EOHFeature::store (ostream & os, int format) const
+void EOHFeature::store ( ostream & os, int format ) const
 {
-    os << "EOHFEATURE "
-       << window_size_x << " "
-       << window_size_y << " "
-       << type << " "
-       << bin << " "
-       << bin2;
+  os << "EOHFEATURE "
+  << window_size_x << " "
+  << window_size_y << " "
+  << type << " "
+  << bin << " "
+  << bin2;
 }
 
 void EOHFeature::clear ()

+ 49 - 46
features/fpfeatures/EOHFeature.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file EOHFeature.h
 * @brief edge orientation histogram (Levi and Weiss, 2004)
 * @author Erik Rodner
@@ -10,7 +10,7 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
- 
+
 #include "core/basics/Config.h"
 #include "vislearning/cbaselib/Feature.h"
 
@@ -21,50 +21,53 @@ namespace OBJREC {
 class EOHFeature : public Feature
 {
 
-    protected:
-
-	enum {
-	    EOH_VALUE = 0,
-	    EOH_RATIO,
-	    EOH_DOMINANT_ORIENTATION,
-	    EOH_NUMTYPES
-	};
-
-	/** @{ feature parameter */
-	int window_size_x;
-	int window_size_y;
-	int bin;
-	int bin2; // used for EOH_RATIO
-	int type;
-	/** @} */
-
-
-	/** @{ parameter for feature generation */
-	int numScales;
-	int numBins;
-	double scaleStep;
-	int maxdepth;
-	/** @} */
-
-    public:
-  
-	/** simple constructor */
-	EOHFeature( const NICE::Config *conf );
-
-	/** internally used by EOHFeature::explode */
-	EOHFeature () { bin = bin2 = 0; type = EOH_VALUE; };
-      
-	/** simple destructor */
-	virtual ~EOHFeature();
-     
-	double val( const Example *example ) const;
-	void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
-	Feature *clone() const;
-	Feature *generateFirstParameter () const;
-
-	void restore (std::istream & is, int format = 0);
-	void store (std::ostream & os, int format = 0) const;
-	void clear ();
+  protected:
+
+    enum {
+      EOH_VALUE = 0,
+      EOH_RATIO,
+      EOH_DOMINANT_ORIENTATION,
+      EOH_NUMTYPES
+    };
+
+    /** @{ feature parameter */
+    int window_size_x;
+    int window_size_y;
+    int bin;
+    int bin2; // used for EOH_RATIO
+    int type;
+    /** @} */
+
+
+    /** @{ parameter for feature generation */
+    int numScales;
+    int numBins;
+    double scaleStep;
+    int maxdepth;
+    /** @} */
+
+  public:
+
+    /** simple constructor */
+    EOHFeature ( const NICE::Config *conf );
+
+    /** internally used by EOHFeature::explode */
+    EOHFeature () {
+      bin = bin2 = 0;
+      type = EOH_VALUE;
+    };
+
+    /** simple destructor */
+    virtual ~EOHFeature();
+
+    double val ( const Example *example ) const;
+    void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
+    Feature *clone() const;
+    Feature *generateFirstParameter () const;
+
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
 };
 
 

+ 57 - 51
features/fpfeatures/FIGradients.cpp

@@ -8,67 +8,73 @@ using namespace NICE;
 using namespace std;
 
 void FIGradients::buildEOHMap ( CachedExample *ce,
-				int subsamplex, int subsampley,
-				int numBins, bool usesigned )
+                                int subsamplex, int subsampley,
+                                int numBins, bool usesigned )
 {
-    int xsize;
-    int ysize;
-    ce->getImageSize ( xsize, ysize );
-    int xsize_s = xsize / subsamplex;
-    int ysize_s = ysize / subsampley;
+  int xsize;
+  int ysize;
+  ce->getImageSize ( xsize, ysize );
+  int xsize_s = xsize / subsamplex;
+  int ysize_s = ysize / subsampley;
 
-    NICE::MultiChannelImageT<double> & eohimg = ce->getDChannel ( CachedExample::D_EOH );
-    eohimg.reInit ( xsize_s, ysize_s, numBins, true );
-    
-    double *gradient = new double[xsize*ysize];
-    int *dir = new int[xsize*ysize];
+  NICE::MultiChannelImageT<double> & eohimg = ce->getDChannel ( CachedExample::D_EOH );
+  eohimg.reInit ( xsize_s, ysize_s, numBins);
 
-    if ( ce->colorInformationAvailable() ) {
-	NICE::MultiChannelImageT<int> & colorimg = ce->getIChannel ( CachedExample::I_COLOR );
-	const int *r = colorimg.data[0];
-	const int *g = colorimg.data[1];
-	const int *b = colorimg.data[2];
-	FastFilter::calcColorGradient ( r,g,b,xsize,ysize,
-			    gradient, dir, numBins, usesigned );
-    } else {
-	NICE::MultiChannelImageT<int> & grayvalues = ce->getIChannel ( CachedExample::I_GRAYVALUES );
-	const int *gr = grayvalues.data[0];
-	FastFilter::calcGradient ( gr, xsize, ysize, gradient, dir, numBins, usesigned );
-    }
+  double *gradient = new double[xsize*ysize];
+  int *dir = new int[xsize*ysize];
+
+  if ( ce->colorInformationAvailable() ) {
+    NICE::MultiChannelImageT<int> & colorimg = ce->getIChannel ( CachedExample::I_COLOR );
+    int **data = colorimg.getDataPointer();
+    const int *r = data[0];
+    const int *g = data[1];
+    const int *b = data[2];
+    FastFilter::calcColorGradient ( r, g, b, xsize, ysize,
+                                    gradient, dir, numBins, usesigned );
+  } else {
+    NICE::MultiChannelImageT<int> & grayvalues = ce->getIChannel ( CachedExample::I_GRAYVALUES );
+    int **data = grayvalues.getDataPointer();
+    const int *gr = data[0];
+    FastFilter::calcGradient ( gr, xsize, ysize, gradient, dir, numBins, usesigned );
+  }
 
-    eohimg.setAll ( 0 );
+  eohimg.setAll ( 0 );
+  double **data = eohimg.getDataPointer();
+  long korig = 0;
+  for ( int y = 0 ; y < ysize ; y++ )
+    for ( int x = 0 ; x < xsize ; x++, korig++ )
+    {
+      int xs = x / subsamplex;
+      int ys = y / subsampley;
 
-    long korig = 0;
-    for ( int y = 0 ; y < ysize ; y++ )
-	for ( int x = 0 ; x < xsize ; x++,korig++ )
-	{
-	    int xs = x / subsamplex;
-	    int ys = y / subsampley;
+      if ( xs >= xsize_s ) xs = xsize_s - 1;
+      if ( xs < 0 ) xs = 0;
+      if ( ys >= ysize_s ) ys = ysize_s - 1;
+      if ( ys < 0 ) ys = 0;
+      int k = xs + ys * xsize_s;
+      int val = dir[korig];
+      double strength = gradient[korig];
 
-	    if ( xs >= xsize_s ) xs = xsize_s-1;
-	    if ( xs < 0 ) xs = 0;
-	    if ( ys >= ysize_s ) ys = ysize_s-1;
-	    if ( ys < 0 ) ys = 0;
-	    int k = xs + ys*xsize_s;
-	    int val = dir[korig];
-	    double strength = gradient[korig];
+      assert ( val < eohimg.channels() );
 
-	    assert ( val < eohimg.numChannels );
-	    eohimg.data[val][k] += strength;
+      data[val][k] += strength;
 
-	    if ( !finite(eohimg.data[val][k]) ) {
-		fprintf (stderr, "EOH Image failed: %f\n", eohimg.data[val][k]);
-		exit(-1);
-	    }
-	}
+      if ( !finite ( data[val][k] ) ) {
+        fprintf ( stderr, "EOH Image failed: %f\n", data[val][k] );
+        exit ( -1 );
+      }
+    }
 
-    delete [] gradient;
-    delete [] dir;
+  delete [] gradient;
+  delete [] dir;
 
-    NICE::MultiChannelImageT<double> & eohintimg = ce->getDChannel ( CachedExample::D_INTEGRALEOH );
-    eohintimg.reInit ( xsize_s, ysize_s, numBins, true );
-    for ( uint i = 0 ; i < (uint)numBins ; i++ )
-	GenericImageTools::calcIntegralImage ( eohintimg.data[i], eohimg.data[i], xsize_s, ysize_s );
+  NICE::MultiChannelImageT<double> & eohintimg = ce->getDChannel ( CachedExample::D_INTEGRALEOH );
+  eohintimg.reInit ( xsize_s, ysize_s, numBins );
+  for ( uint i = 0 ; i < ( uint ) numBins ; i++ )
+  {
+    ImageT<double> tmp = eohimg[i];
+    GenericImageTools::calcIntegralImage ( tmp, tmp, xsize_s, ysize_s );
+  }
 
 }
 

+ 8 - 8
features/fpfeatures/FIGradients.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file FIGradients.h
 * @brief feature images storing gradient information
 * @author Erik Rodner
@@ -15,14 +15,14 @@ namespace OBJREC {
 
 class FIGradients {
 
-    public:
+  public:
 
-    static void buildEOHMap ( 
-	    CachedExample *ce,
-	    int subsamplex, 
-	    int subsampley,
-	    int numBins, 
-	    bool usesigned );
+    static void buildEOHMap (
+      CachedExample *ce,
+      int subsamplex,
+      int subsampley,
+      int numBins,
+      bool usesigned );
 
 };
 

+ 80 - 80
features/fpfeatures/FIHistograms.cpp

@@ -9,102 +9,102 @@ using namespace OBJREC;
 using namespace NICE;
 using namespace std;
 
-void FIHistograms::buildHSVMap ( 
-	    CachedExample *ce,
-	    int subsamplex, 
-	    int subsampley,
-	    int numBinsH, 
-	    int numBinsS,
-	    int numBinsV )
+void FIHistograms::buildHSVMap (
+  CachedExample *ce,
+  int subsamplex,
+  int subsampley,
+  int numBinsH,
+  int numBinsS,
+  int numBinsV )
 {
-    // build HSV image
-    // discrete !!
-    // build integral images
-
-    int numBins = numBinsH*numBinsS*numBinsV;
-    int xsize;
-    int ysize;
-    ce->getImageSize ( xsize, ysize );
-    
-    int xsize_s = xsize / subsamplex;
-    int ysize_s = ysize / subsampley;
-
-    if ( ! ce->colorInformationAvailable() ) {
-	fprintf (stderr, "FIHistograms: No color information available !\n");
-	exit(-1);
-    }
-
-    NICE::MultiChannelImageT<int> & colorimg = ce->getIChannel ( CachedExample::I_COLOR );
-    assert ( colorimg.numChannels == 3 );
-
-    NICE::MultiChannelImageT<double> hsvimg ( xsize, ysize, colorimg.numChannels, true );
+  // build HSV image
+  // discrete !!
+  // build integral images
 
-    ColorSpace::convert ( hsvimg, colorimg, 
-			  ColorSpace::COLORSPACE_HSL, 
-			  ColorSpace::COLORSPACE_RGB,
-			  1.0, 255.0 );
+  int numBins = numBinsH * numBinsS * numBinsV;
+  int xsize;
+  int ysize;
+  ce->getImageSize ( xsize, ysize );
 
-    int *discretecolor = new int [ xsize * ysize ];
+  int xsize_s = xsize / subsamplex;
+  int ysize_s = ysize / subsampley;
 
-    long k = 0;
-    for ( int y = 0 ; y < hsvimg.ysize ; y++ )
-	for ( int x = 0 ; x < hsvimg.xsize ; x++,k++ )
-	{
-	    double h = hsvimg.data[0][k];
-	    double s = hsvimg.data[1][k];
-	    double v = hsvimg.data[2][k];
+  if ( ! ce->colorInformationAvailable() ) {
+    fprintf ( stderr, "FIHistograms: No color information available !\n" );
+    exit ( -1 );
+  }
 
-	    int hbin = (int)(numBinsH * h);
-	    if ( hbin >= numBinsH ) hbin = numBinsH - 1;
-	    int sbin = (int)(numBinsS * s);
-	    if ( sbin >= numBinsS ) sbin = numBinsS - 1;
-	    int vbin = (int)(numBinsV * v);
-	    if ( vbin >= numBinsV ) vbin = numBinsV - 1;
+  NICE::MultiChannelImageT<int> & colorimg = ce->getIChannel ( CachedExample::I_COLOR );
+  assert ( colorimg.channels() == 3 );
 
-	    int bin = ( hbin*numBinsS + sbin )*numBinsV + vbin;
+  NICE::MultiChannelImageT<double> hsvimg ( xsize, ysize, colorimg.channels() );
 
-	    discretecolor[k] = bin;
-	}
+  ColorSpace::convert ( hsvimg, colorimg,
+                        ColorSpace::COLORSPACE_HSL,
+                        ColorSpace::COLORSPACE_RGB,
+                        1.0, 255.0 );
 
-    hsvimg.freeData();
+  int *discretecolor = new int [ xsize * ysize ];
 
-    NICE::MultiChannelImageT<double> & colorhist = ce->getDChannel ( CachedExample::D_INTEGRALCOLOR );
-    colorhist.reInit ( xsize_s, ysize_s, numBins, true );
-    colorhist.setAll ( 0 );
+  long k = 0;
+  for ( int y = 0 ; y < hsvimg.height() ; y++ )
+    for ( int x = 0 ; x < hsvimg.width() ; x++, k++ )
+    {
+      double h = hsvimg.get(x,y,0);
+      double s = hsvimg.get(x,y,1);
+      double v = hsvimg.get(x,y,2);
 
-    long korig = 0;
-    for ( int y = 0 ; y < ysize ; y++ )
-	for ( int x = 0 ; x < xsize ; x++,korig++ )
-	{
-	    int xs = x / subsamplex;
-	    int ys = y / subsampley;
+      int hbin = ( int ) ( numBinsH * h );
+      if ( hbin >= numBinsH ) hbin = numBinsH - 1;
+      int sbin = ( int ) ( numBinsS * s );
+      if ( sbin >= numBinsS ) sbin = numBinsS - 1;
+      int vbin = ( int ) ( numBinsV * v );
+      if ( vbin >= numBinsV ) vbin = numBinsV - 1;
 
-	    if ( xs >= xsize_s ) xs = xsize_s-1;
-	    if ( xs < 0 ) xs = 0;
-	    if ( ys >= ysize_s ) ys = ysize_s-1;
-	    if ( ys < 0 ) ys = 0;
-	    int k = xs + ys*xsize_s;
-	    int val = discretecolor[korig];
+      int bin = ( hbin * numBinsS + sbin ) * numBinsV + vbin;
 
-	    if ( val >= colorhist.numChannels )
-	    {
-		fprintf (stderr, "v %d nc %d\n", val, colorhist.numChannels );
-	    }
-	    colorhist.data[val][k] += 1;
+      discretecolor[k] = bin;
+    }
 
-	    if ( !finite(colorhist.data[val][k]) ) {
-		fprintf (stderr, "EOH Image failed: %f\n", colorhist.data[val][k]);
-		exit(-1);
-	    }
-	}
+  hsvimg.freeData();
+
+  NICE::MultiChannelImageT<double> & colorhist = ce->getDChannel ( CachedExample::D_INTEGRALCOLOR );
+  colorhist.reInit ( xsize_s, ysize_s, numBins);
+  colorhist.setAll ( 0 );
+
+  long korig = 0;
+  for ( int y = 0 ; y < ysize ; y++ )
+    for ( int x = 0 ; x < xsize ; x++, korig++ )
+    {
+      int xs = x / subsamplex;
+      int ys = y / subsampley;
+
+      if ( xs >= xsize_s ) xs = xsize_s - 1;
+      if ( xs < 0 ) xs = 0;
+      if ( ys >= ysize_s ) ys = ysize_s - 1;
+      if ( ys < 0 ) ys = 0;
+      int k = xs + ys * xsize_s;
+      int val = discretecolor[korig];
+
+      if ( val >= colorhist.channels() )
+      {
+        fprintf ( stderr, "v %d nc %d\n", val, colorhist.channels() );
+      }
+      colorhist[val](x,y) += 1;
+
+      if ( !finite ( colorhist[val](x,y) ) ) {
+        fprintf ( stderr, "EOH Image failed: %f\n", colorhist[val](x,y) );
+        exit ( -1 );
+      }
+    }
 
-    delete [] discretecolor;
+  delete [] discretecolor;
 
-    fprintf (stderr, "Calculating Integral Images\n");
+  fprintf ( stderr, "Calculating Integral Images\n" );
 
-    for ( uint i = 0 ; i < colorhist.numChannels ; i++ )
-	colorhist.calcIntegral ( i );
+  for ( uint i = 0 ; i < colorhist.channels() ; i++ )
+    colorhist.calcIntegral ( i );
 
-    fprintf (stderr, "FIGradients: finished\n");
+  fprintf ( stderr, "FIGradients: finished\n" );
 
 }

+ 191 - 187
features/fpfeatures/HOGFeature.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file HOGFeature.cpp
 * @brief histogram of oriented gradients ( dalal and triggs )
 * @author Erik Rodner
@@ -19,17 +19,17 @@ using namespace NICE;
 const double epsilon = 10e-8;
 
 /** simple constructor */
-HOGFeature::HOGFeature( const Config *conf )
+HOGFeature::HOGFeature ( const Config *conf )
 {
-    window_size_x = conf->gI("HOGFeature", "window_size_x", 21 );
-    window_size_y = conf->gI("HOGFeature", "window_size_y", 21 );
-    scaleStep = conf->gD("HOGFeature", "scale_step", sqrt(2) );
-    numScales = conf->gI("HOGFeature", "num_scales", 5 );
-    flexibleGrid = conf->gB("HOGFeature", "flexible_grid", false );
-
-    numBins = conf->gI("HOGFeature", "num_bins", 9 );
-    cellcountx = conf->gI("HOGFeature", "cellcountx", 10 );
-    cellcounty = conf->gI("HOGFeature", "cellcounty", 10 );
+  window_size_x = conf->gI ( "HOGFeature", "window_size_x", 21 );
+  window_size_y = conf->gI ( "HOGFeature", "window_size_y", 21 );
+  scaleStep = conf->gD ( "HOGFeature", "scale_step", sqrt ( 2 ) );
+  numScales = conf->gI ( "HOGFeature", "num_scales", 5 );
+  flexibleGrid = conf->gB ( "HOGFeature", "flexible_grid", false );
+
+  numBins = conf->gI ( "HOGFeature", "num_bins", 9 );
+  cellcountx = conf->gI ( "HOGFeature", "cellcountx", 10 );
+  cellcounty = conf->gI ( "HOGFeature", "cellcounty", 10 );
 }
 
 /** simple destructor */
@@ -37,207 +37,211 @@ HOGFeature::~HOGFeature()
 {
 }
 
-double HOGFeature::val( const Example *example ) const
+double HOGFeature::val ( const Example *example ) const
 {
-    const NICE::MultiChannelImageT<double> & img = 
-	example->ce->getDChannel ( CachedExample::D_INTEGRALEOH );
-    int tm_xsize = img.xsize;
-    int tm_ysize = img.ysize;
-
-    int xsize;
-    int ysize;
-    example->ce->getImageSize ( xsize, ysize );
-
-    /** without overlap: normalized cell and bin **/
-
-    int wsx2, wsy2;
-    int exwidth = example->width;
-    if ( exwidth == 0 ) {
-	wsx2 = window_size_x * tm_xsize / (2*xsize);
-	wsy2 = window_size_y * tm_ysize / (2*ysize);
-    } else {
-	int exheight = example->height;
-	wsx2 = exwidth * tm_xsize / (2*xsize);
-	wsy2 = exheight * tm_ysize / (2*ysize);
-    }
-	
-    int xx, yy;
-    xx = ( example->x ) * tm_xsize / xsize;
-    yy = ( example->y ) * tm_ysize / ysize;
-
-    assert ( (wsx2 > 0) && (wsy2 > 0) );
-
-    int xtl = xx - wsx2;
-    int ytl = yy - wsy2;
-    int xrb = xx + wsx2;
-    int yrb = yy + wsy2;
+  const NICE::MultiChannelImageT<double> & img =
+    example->ce->getDChannel ( CachedExample::D_INTEGRALEOH );
+  int tm_xsize = img.width();
+  int tm_ysize = img.height();
+
+  int xsize;
+  int ysize;
+  example->ce->getImageSize ( xsize, ysize );
+
+  /** without overlap: normalized cell and bin **/
+
+  int wsx2, wsy2;
+  int exwidth = example->width;
+  if ( exwidth == 0 ) 
+  {
+    wsx2 = window_size_x * tm_xsize / ( 2 * xsize );
+    wsy2 = window_size_y * tm_ysize / ( 2 * ysize );
+  } 
+  else 
+  {
+    int exheight = example->height;
+    wsx2 = exwidth * tm_xsize / ( 2 * xsize );
+    wsy2 = exheight * tm_ysize / ( 2 * ysize );
+  }
+
+  int xx, yy;
+  xx = ( example->x ) * tm_xsize / xsize;
+  yy = ( example->y ) * tm_ysize / ysize;
+
+  assert ( ( wsx2 > 0 ) && ( wsy2 > 0 ) );
+
+  int xtl = xx - wsx2;
+  int ytl = yy - wsy2;
+  int xrb = xx + wsx2;
+  int yrb = yy + wsy2;
 
 #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
-    xtl = BOUND ( xtl, 0, tm_xsize - 1 );
-    ytl = BOUND ( ytl, 0, tm_ysize - 1 );
-    xrb = BOUND ( xrb, 0, tm_xsize - 1 );
-    yrb = BOUND ( yrb, 0, tm_ysize - 1 );
+  xtl = BOUND ( xtl, 0, tm_xsize - 1 );
+  ytl = BOUND ( ytl, 0, tm_ysize - 1 );
+  xrb = BOUND ( xrb, 0, tm_xsize - 1 );
+  yrb = BOUND ( yrb, 0, tm_ysize - 1 );
 #undef BOUND
 
-    double stepx = (xrb - xtl) / (double)( cellcountx );
-    double stepy = (yrb - ytl) / (double)( cellcounty );
-    int cxtl = (int)(xtl + stepx*cellx1);
-    int cytl = (int)(ytl + stepy*celly1);
-    int cxrb = (int)(xtl + stepx*cellx2);
-    int cyrb = (int)(ytl + stepy*celly2);
-
-    if ( cxrb <= cxtl ) cxrb = cxtl+1;
-    if ( cyrb <= cytl ) cyrb = cytl+1;
-
-    double A,B,C,D;
-
-    assert ( bin < (int)img.numChannels );
-    assert ( img.data[bin] != NULL );
-
-    if ( (cxtl < 0) || (cxtl >= tm_xsize) )
-    {
-	fprintf (stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
-	fprintf (stderr, "cxtl %d tm_xsize %d xsize %d\n", cxtl, tm_xsize, xsize );
-	fprintf (stderr, "cellx1 %d stepx %f xtl %d xrb %d\n", cellx1, stepx, xtl, xrb );
-    }
-    if ( (cxrb < 0) || (cxrb >= tm_xsize) )
-    {
-	fprintf (stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
-	fprintf (stderr, "cxrb %d tm_xsize %d xsize %d\n", cxrb, tm_xsize, xsize );
-	fprintf (stderr, "cellx1 %d stepx %f xtl %d xrb %d\n", cellx1, stepx, xtl, xrb );
-    }
-    if ( (cytl < 0) || (cytl >= tm_ysize) )
-    {
-	fprintf (stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
-	fprintf (stderr, "cytl %d tm_ysize %d ysize %d\n", cytl, tm_ysize, ysize );
-	fprintf (stderr, "celly1 %d stepy %f ytl %d yrb %d\n", celly1, stepy, ytl, yrb );
-    }
-    if ( (cyrb < 0) || (cyrb >= tm_ysize) )
-    {
-	fprintf (stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
-	fprintf (stderr, "cyrb %d tm_ysize %d ysize %d\n", cyrb, tm_ysize, ysize );
-	fprintf (stderr, "celly1 %d stepy %f ytl %d yrb %d\n", celly1, stepy, ytl, yrb );
-    }
-
-    long kA = cxtl + cytl * tm_xsize;
-    long kB = cxrb + cytl * tm_xsize;
-    long kC = cxtl + cyrb * tm_xsize;
-    long kD = cxrb + cyrb * tm_xsize;
-    A = img.data[bin][ kA ];
-    B = img.data[bin][ kB ];
-    C = img.data[bin][ kC ];
-    D = img.data[bin][ kD ];
-
-    double val1 =  (D - B - C + A);
-    double sum = val1*val1;
-	for ( int b = 0 ; b < (int)img.numChannels ; b++)
-	{
-		if ( b == bin ) continue;
-		A = img.data[b][ kA ];
-		B = img.data[b][ kB ];
-		C = img.data[b][ kC ];
-		D = img.data[b][ kD ];
-		double val = ( D - B - C + A );
-		sum += val*val;
-	}
-    // FIXME: maybe L_1 normalization is sufficient
-    sum = sqrt(sum);
-    return ( val1 + epsilon ) / ( sum + epsilon );
+  double stepx = ( xrb - xtl ) / ( double ) ( cellcountx );
+  double stepy = ( yrb - ytl ) / ( double ) ( cellcounty );
+  int cxtl = ( int ) ( xtl + stepx * cellx1 );
+  int cytl = ( int ) ( ytl + stepy * celly1 );
+  int cxrb = ( int ) ( xtl + stepx * cellx2 );
+  int cyrb = ( int ) ( ytl + stepy * celly2 );
+
+  if ( cxrb <= cxtl ) cxrb = cxtl + 1;
+  if ( cyrb <= cytl ) cyrb = cytl + 1;
+
+  double A, B, C, D;
+
+  assert ( bin < ( int ) img.channels() );
+
+  if ( ( cxtl < 0 ) || ( cxtl >= tm_xsize ) )
+  {
+    fprintf ( stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
+    fprintf ( stderr, "cxtl %d tm_xsize %d xsize %d\n", cxtl, tm_xsize, xsize );
+    fprintf ( stderr, "cellx1 %d stepx %f xtl %d xrb %d\n", cellx1, stepx, xtl, xrb );
+  }
+  if ( ( cxrb < 0 ) || ( cxrb >= tm_xsize ) )
+  {
+    fprintf ( stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
+    fprintf ( stderr, "cxrb %d tm_xsize %d xsize %d\n", cxrb, tm_xsize, xsize );
+    fprintf ( stderr, "cellx1 %d stepx %f xtl %d xrb %d\n", cellx1, stepx, xtl, xrb );
+  }
+  if ( ( cytl < 0 ) || ( cytl >= tm_ysize ) )
+  {
+    fprintf ( stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
+    fprintf ( stderr, "cytl %d tm_ysize %d ysize %d\n", cytl, tm_ysize, ysize );
+    fprintf ( stderr, "celly1 %d stepy %f ytl %d yrb %d\n", celly1, stepy, ytl, yrb );
+  }
+  if ( ( cyrb < 0 ) || ( cyrb >= tm_ysize ) )
+  {
+    fprintf ( stderr, "cellcountx %d cellcounty %d\n", cellcountx, cellcounty );
+    fprintf ( stderr, "cyrb %d tm_ysize %d ysize %d\n", cyrb, tm_ysize, ysize );
+    fprintf ( stderr, "celly1 %d stepy %f ytl %d yrb %d\n", celly1, stepy, ytl, yrb );
+  }
+
+  long kA = cxtl + cytl * tm_xsize;
+  long kB = cxrb + cytl * tm_xsize;
+  long kC = cxtl + cyrb * tm_xsize;
+  long kD = cxrb + cyrb * tm_xsize;
+
+  A = img.get ( cxtl, cytl, bin );
+  B = img.get ( cxrb, cytl, bin );
+  C = img.get ( cxtl, cyrb, bin );
+  D = img.get ( cxrb, cyrb, bin );
+
+  double val1 = ( D - B - C + A );
+  double sum = val1 * val1;
+  for ( int b = 0 ; b < ( int ) img.channels() ; b++ )
+  {
+    if ( b == bin ) 
+      continue;
+    A = img.get ( cxtl, cytl, b );
+    B = img.get ( cxrb, cytl, b );
+    C = img.get ( cxtl, cyrb, b );
+    D = img.get ( cxrb, cyrb, b );
+    double val = ( D - B - C + A );
+    sum += val * val;
+  }
+  // FIXME: maybe L_1 normalization is sufficient
+  sum = sqrt ( sum );
+  return ( val1 + epsilon ) / ( sum + epsilon );
 }
 
 void HOGFeature::explode ( FeaturePool & featurePool, bool variableWindow ) const
 {
-    int nScales = (variableWindow ? numScales : 1 );
-
-    double weight = 1.0 / ( numBins * nScales );
-
-    if ( flexibleGrid ) 
-	weight *= 4.0 / ( cellcountx * (cellcountx - 1) * (cellcounty - 1) * cellcounty );
-    else
-	weight *= 1.0 / (cellcountx * cellcounty);
-
-    for ( int i = 0 ; i < nScales ; i++ )
-    {
-	int wsy = window_size_y;
-	int wsx = window_size_x;
-	for ( int _cellx1 = 0 ; _cellx1 < cellcountx ; _cellx1++ )
-	    for ( int _celly1 = 0 ; _celly1 < cellcounty ; _celly1++ )
-		for ( int _cellx2 = _cellx1+1 ; 
-			  _cellx2 < (flexibleGrid ? cellcountx : _cellx1+2) ; 
-			  _cellx2++ )
-		    for ( int _celly2 = _celly1+1 ; 
-			      _celly2 < (flexibleGrid ? cellcounty : 
-			      _celly1+2) ; _celly2++ )
-			for ( int _bin = 0 ; _bin < numBins ; _bin++ )
-			{
-			    HOGFeature *f = new HOGFeature();
-			    f->window_size_x = wsx;
-			    f->window_size_y = wsy;
-			    f->bin = _bin;
-			    f->cellx1 = _cellx1;
-			    f->celly1 = _celly1;
-			    f->cellx2 = _cellx2;
-			    f->celly2 = _celly2;
-			    f->cellcountx = cellcountx;
-			    f->cellcounty = cellcounty;
-			    featurePool.addFeature ( f, weight ); 
-			}
-	wsx = (int) (scaleStep * wsx);
-	wsy = (int) (scaleStep * wsy);
-    }
+  int nScales = ( variableWindow ? numScales : 1 );
+
+  double weight = 1.0 / ( numBins * nScales );
+
+  if ( flexibleGrid )
+    weight *= 4.0 / ( cellcountx * ( cellcountx - 1 ) * ( cellcounty - 1 ) * cellcounty );
+  else
+    weight *= 1.0 / ( cellcountx * cellcounty );
+
+  for ( int i = 0 ; i < nScales ; i++ )
+  {
+    int wsy = window_size_y;
+    int wsx = window_size_x;
+    for ( int _cellx1 = 0 ; _cellx1 < cellcountx ; _cellx1++ )
+      for ( int _celly1 = 0 ; _celly1 < cellcounty ; _celly1++ )
+        for ( int _cellx2 = _cellx1 + 1 ;
+              _cellx2 < ( flexibleGrid ? cellcountx : _cellx1 + 2 ) ;
+              _cellx2++ )
+          for ( int _celly2 = _celly1 + 1 ;
+                _celly2 < ( flexibleGrid ? cellcounty :
+                            _celly1 + 2 ) ; _celly2++ )
+            for ( int _bin = 0 ; _bin < numBins ; _bin++ )
+            {
+              HOGFeature *f = new HOGFeature();
+              f->window_size_x = wsx;
+              f->window_size_y = wsy;
+              f->bin = _bin;
+              f->cellx1 = _cellx1;
+              f->celly1 = _celly1;
+              f->cellx2 = _cellx2;
+              f->celly2 = _celly2;
+              f->cellcountx = cellcountx;
+              f->cellcounty = cellcounty;
+              featurePool.addFeature ( f, weight );
+            }
+    wsx = ( int ) ( scaleStep * wsx );
+    wsy = ( int ) ( scaleStep * wsy );
+  }
 }
 
 Feature *HOGFeature::clone() const
 {
-    HOGFeature *f = new HOGFeature();
-    f->window_size_x = window_size_x;
-    f->window_size_y = window_size_y;
-    f->bin = bin;
-    f->cellx1 = cellx1;
-    f->celly1 = celly1;
-    f->cellx2 = cellx2;
-    f->celly2 = celly2;
-    f->cellcountx = cellcountx;
-    f->cellcounty = cellcounty;
-    f->flexibleGrid = flexibleGrid;
-
-    return f;
+  HOGFeature *f = new HOGFeature();
+  f->window_size_x = window_size_x;
+  f->window_size_y = window_size_y;
+  f->bin = bin;
+  f->cellx1 = cellx1;
+  f->celly1 = celly1;
+  f->cellx2 = cellx2;
+  f->celly2 = celly2;
+  f->cellcountx = cellcountx;
+  f->cellcounty = cellcounty;
+  f->flexibleGrid = flexibleGrid;
+
+  return f;
 }
 
 Feature *HOGFeature::generateFirstParameter () const
 {
-    return clone();
+  return clone();
 }
 
-void HOGFeature::restore (istream & is, int format)
+void HOGFeature::restore ( istream & is, int format )
 {
-    is >> window_size_x;
-    is >> window_size_y;
-    is >> bin;
-    is >> cellx1;
-    is >> celly1;
+  is >> window_size_x;
+  is >> window_size_y;
+  is >> bin;
+  is >> cellx1;
+  is >> celly1;
 
-    is >> cellx2;
-    is >> celly2;
+  is >> cellx2;
+  is >> celly2;
 
-    is >> cellcountx;
-    is >> cellcounty;
+  is >> cellcountx;
+  is >> cellcounty;
 }
 
-void HOGFeature::store (ostream & os, int format) const
+void HOGFeature::store ( ostream & os, int format ) const
 {
-    os << "HOGFEATURE "
-       << window_size_x << " "
-       << window_size_y << " "
-       << bin << " "
-       << cellx1 << " "
-       << celly1 << " ";
-
-   os << cellx2 << " "
-      << celly2 << " ";
-
-    os << cellcountx << " "
-       << cellcounty;
+  os << "HOGFEATURE "
+  << window_size_x << " "
+  << window_size_y << " "
+  << bin << " "
+  << cellx1 << " "
+  << celly1 << " ";
+
+  os << cellx2 << " "
+  << celly2 << " ";
+
+  os << cellcountx << " "
+  << cellcounty;
 }
 
 void HOGFeature::clear ()

+ 8 - 4
features/fpfeatures/HaarFeature.cpp

@@ -89,9 +89,9 @@ double HaarFeature::val ( const Example *example ) const
 {
   const NICE::MultiChannelImageT<long> & img = example->ce->getLChannel ( CachedExample::L_INTEGRALIMAGE );
 
-  const long *integralImage = img.data[0];
-  int xsize = img.xsize;
-  int ysize = img.ysize;
+  //const long *integralImage = img.data[0];
+  int xsize = img.width();
+  int ysize = img.height();
 
   int x = example->x;
   int y = example->y;
@@ -127,6 +127,7 @@ double HaarFeature::val ( const Example *example ) const
   assert ( pos1 >= 0 );
   assert ( pos2 >= 0 );
 
+#if 0
   double value;
   if ( type == HaarFeature::HAARTYPE_HORIZONTAL )
   {
@@ -193,7 +194,10 @@ double HaarFeature::val ( const Example *example ) const
   }
 
   assert ( finite ( value ) );
-
+#else
+  throw("not yet adapted for new MultiChannelImageT!");
+  double value = 0.0;
+#endif
   return value;
 }
 

+ 163 - 168
features/fpfeatures/HistFeature.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file HistFeature.cpp
 * @brief histogram integral feature
 * @author Erik Rodner
@@ -21,22 +21,22 @@ using namespace NICE;
 const double epsilon = 10e-8;
 
 /** simple constructor */
-HistFeature::HistFeature( const Config *conf, 
-			  const std::string & section, 
-			  int _histtype,
-			  int _numBins )
+HistFeature::HistFeature ( const Config *conf,
+                           const std::string & section,
+                           int _histtype,
+                           int _numBins )
 {
-    window_size_x = conf->gI(section, "window_size_x", 21 );
-    window_size_y = conf->gI(section, "window_size_y", 21 );
-    scaleStep = conf->gD(section, "scale_step", sqrt(2) );
-    numScales = conf->gI(section, "num_scales", 5 );
+  window_size_x = conf->gI ( section, "window_size_x", 21 );
+  window_size_y = conf->gI ( section, "window_size_y", 21 );
+  scaleStep = conf->gD ( section, "scale_step", sqrt ( 2 ) );
+  numScales = conf->gI ( section, "num_scales", 5 );
 
-    flexibleGrid = conf->gB(section, "flexible_grid", false );
+  flexibleGrid = conf->gB ( section, "flexible_grid", false );
 
-    cellcountx = conf->gI(section, "cellcountx", 10 );
-    cellcounty = conf->gI(section, "cellcounty", 10 );
+  cellcountx = conf->gI ( section, "cellcountx", 10 );
+  cellcounty = conf->gI ( section, "cellcounty", 10 );
 
-    histtype = _histtype;
+  histtype = _histtype;
 }
 
 /** simple destructor */
@@ -44,185 +44,180 @@ HistFeature::~HistFeature()
 {
 }
 
-double HistFeature::val( const Example *example ) const
+double HistFeature::val ( const Example *example ) const
 {
-    const NICE::MultiChannelImageT<double> & img = example->ce->getDChannel ( histtype );
-    int tm_xsize = img.xsize;
-    int tm_ysize = img.ysize;
-
-    int xsize;
-    int ysize;
-    example->ce->getImageSize ( xsize, ysize );
-
-    /** without overlap: normalized cell and bin **/
-
-    int wsx2, wsy2;
-    int exwidth = example->width;
-    if ( exwidth == 0 ) {
-	wsx2 = window_size_x * tm_xsize / (2*xsize);
-	wsy2 = window_size_y * tm_ysize / (2*ysize);
-    } else {
-	int exheight = example->height;
-	wsx2 = exwidth * tm_xsize / (2*xsize);
-	wsy2 = exheight * tm_ysize / (2*ysize);
-    }
-	
-    int xx, yy;
-    xx = ( example->x ) * tm_xsize / xsize;
-    yy = ( example->y ) * tm_ysize / ysize;
-
-    assert ( (wsx2 > 0) && (wsy2 > 0) );
-
-    int xtl = xx - wsx2;
-    int ytl = yy - wsy2;
-    int xrb = xx + wsx2;
-    int yrb = yy + wsy2;
+  const NICE::MultiChannelImageT<double> & img = example->ce->getDChannel ( histtype );
+  int tm_xsize = img.width();
+  int tm_ysize = img.height();
+
+  int xsize;
+  int ysize;
+  example->ce->getImageSize ( xsize, ysize );
+
+  /** without overlap: normalized cell and bin **/
+
+  int wsx2, wsy2;
+  int exwidth = example->width;
+  if ( exwidth == 0 ) {
+    wsx2 = window_size_x * tm_xsize / ( 2 * xsize );
+    wsy2 = window_size_y * tm_ysize / ( 2 * ysize );
+  } else {
+    int exheight = example->height;
+    wsx2 = exwidth * tm_xsize / ( 2 * xsize );
+    wsy2 = exheight * tm_ysize / ( 2 * ysize );
+  }
+
+  int xx, yy;
+  xx = ( example->x ) * tm_xsize / xsize;
+  yy = ( example->y ) * tm_ysize / ysize;
+
+  assert ( ( wsx2 > 0 ) && ( wsy2 > 0 ) );
+
+  int xtl = xx - wsx2;
+  int ytl = yy - wsy2;
+  int xrb = xx + wsx2;
+  int yrb = yy + wsy2;
 
 #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
-    xtl = BOUND ( xtl, 0, tm_xsize - 1 );
-    ytl = BOUND ( ytl, 0, tm_ysize - 1 );
-    xrb = BOUND ( xrb, 0, tm_xsize - 1 );
-    yrb = BOUND ( yrb, 0, tm_ysize - 1 );
+  xtl = BOUND ( xtl, 0, tm_xsize - 1 );
+  ytl = BOUND ( ytl, 0, tm_ysize - 1 );
+  xrb = BOUND ( xrb, 0, tm_xsize - 1 );
+  yrb = BOUND ( yrb, 0, tm_ysize - 1 );
 #undef BOUND
 
-    double stepx = (xrb - xtl) / (double)( cellcountx );
-    double stepy = (yrb - ytl) / (double)( cellcounty );
-    int cxtl = (int)(xtl + stepx*cellx1);
-    int cytl = (int)(ytl + stepy*celly1);
-    int cxrb = (int)(xtl + stepx*cellx2);
-    int cyrb = (int)(ytl + stepy*celly2);
-
-    if ( cxrb <= cxtl ) cxrb = cxtl+1;
-    if ( cyrb <= cytl ) cyrb = cytl+1;
-
-    double A,B,C,D;
-
-    assert ( bin < (int)img.numChannels );
-    assert ( img.data[bin] != NULL );
-
-    long kA = cxtl + cytl * tm_xsize;
-    long kB = cxrb + cytl * tm_xsize;
-    long kC = cxtl + cyrb * tm_xsize;
-    long kD = cxrb + cyrb * tm_xsize;
-    A = img.data[bin][ kA ];
-    B = img.data[bin][ kB ];
-    C = img.data[bin][ kC ];
-    D = img.data[bin][ kD ];
-
-    double val1 =  (D - B - C + A);
-    double sum = val1*val1;
-    for ( int b = 0 ; b < (int)img.numChannels ; b++)
-    {
-	if ( b == bin ) continue;
-	A = img.data[b][ kA ];
-	B = img.data[b][ kB ];
-	C = img.data[b][ kC ];
-	D = img.data[b][ kD ];
-	double val = ( D - B - C + A );
-
-	if ( normalizationMethod == HISTFEATURE_NORMMETHOD_L2 )
-	    sum += val*val;
-	else if ( normalizationMethod == HISTFEATURE_NORMMETHOD_L1 )
-	    sum += val;
-    }
-    if ( normalizationMethod == HISTFEATURE_NORMMETHOD_L2 )
-	sum = sqrt(sum);
+  double stepx = ( xrb - xtl ) / ( double ) ( cellcountx );
+  double stepy = ( yrb - ytl ) / ( double ) ( cellcounty );
+  int cxtl = ( int ) ( xtl + stepx * cellx1 );
+  int cytl = ( int ) ( ytl + stepy * celly1 );
+  int cxrb = ( int ) ( xtl + stepx * cellx2 );
+  int cyrb = ( int ) ( ytl + stepy * celly2 );
+
+  if ( cxrb <= cxtl ) cxrb = cxtl + 1;
+  if ( cyrb <= cytl ) cyrb = cytl + 1;
+
+  double A, B, C, D;
+
+  assert ( bin < ( int ) img.channels() );
 
-    return ( val1 + epsilon ) / ( sum + epsilon );
+  A = img.get ( cxtl, cytl, bin );
+  B = img.get ( cxrb, cytl, bin );
+  C = img.get ( cxtl, cyrb, bin );
+  D = img.get ( cxrb, cyrb, bin );
+
+  double val1 = ( D - B - C + A );
+  double sum = val1 * val1;
+  for ( int b = 0 ; b < ( int ) img.channels() ; b++ )
+  {
+    if ( b == bin ) continue;
+    A = img.get ( cxtl, cytl, b );
+    B = img.get ( cxrb, cytl, b );
+    C = img.get ( cxtl, cyrb, b );
+    D = img.get ( cxrb, cyrb, b );
+    double val = ( D - B - C + A );
+
+    if ( normalizationMethod == HISTFEATURE_NORMMETHOD_L2 )
+      sum += val * val;
+    else if ( normalizationMethod == HISTFEATURE_NORMMETHOD_L1 )
+      sum += val;
+  }
+  if ( normalizationMethod == HISTFEATURE_NORMMETHOD_L2 )
+    sum = sqrt ( sum );
+
+  return ( val1 + epsilon ) / ( sum + epsilon );
 }
 
 void HistFeature::explode ( FeaturePool & featurePool, bool variableWindow ) const
 {
-    int nScales = (variableWindow ? numScales : 1 );
-
-    double weight = 1.0 / ( numBins * nScales );
-
-    if ( flexibleGrid ) 
-	weight *= 4.0 / ( cellcountx * (cellcountx - 1) * (cellcounty - 1) * cellcounty );
-    else
-	weight *= 1.0 / (cellcountx * cellcounty);
-
-    for ( int i = 0 ; i < nScales ; i++ )
-    {
-	int wsy = window_size_y;
-	int wsx = window_size_x;
-	for ( int _cellx1 = 0 ; _cellx1 < cellcountx ; _cellx1++ )
-	    for ( int _celly1 = 0 ; _celly1 < cellcounty ; _celly1++ )
-		for ( int _cellx2 = _cellx1+1 ; 
-			  _cellx2 < (flexibleGrid ? cellcountx : _cellx1+2) ; 
-			  _cellx2++ )
-		    for ( int _celly2 = _celly1+1 ; 
-			      _celly2 < (flexibleGrid ? cellcounty : 
-			      _celly1+2) ; _celly2++ )
-			for ( int _bin = 0 ; _bin < numBins ; _bin++ )
-			{
-			    HistFeature *f = new HistFeature();
-			    f->histtype = histtype;
-			    f->window_size_x = wsx;
-			    f->window_size_y = wsy;
-			    f->bin = _bin;
-			    f->cellx1 = _cellx1;
-			    f->celly1 = _celly1;
-			    f->cellx2 = _cellx2;
-			    f->celly2 = _celly2;
-			    f->cellcountx = cellcountx;
-			    f->cellcounty = cellcounty;
-			    featurePool.addFeature ( f, weight ); 
-			}
-	wsx = (int) (scaleStep * wsx);
-	wsy = (int) (scaleStep * wsy);
-    }
+  int nScales = ( variableWindow ? numScales : 1 );
+
+  double weight = 1.0 / ( numBins * nScales );
+
+  if ( flexibleGrid )
+    weight *= 4.0 / ( cellcountx * ( cellcountx - 1 ) * ( cellcounty - 1 ) * cellcounty );
+  else
+    weight *= 1.0 / ( cellcountx * cellcounty );
+
+  for ( int i = 0 ; i < nScales ; i++ )
+  {
+    int wsy = window_size_y;
+    int wsx = window_size_x;
+    for ( int _cellx1 = 0 ; _cellx1 < cellcountx ; _cellx1++ )
+      for ( int _celly1 = 0 ; _celly1 < cellcounty ; _celly1++ )
+        for ( int _cellx2 = _cellx1 + 1 ;
+              _cellx2 < ( flexibleGrid ? cellcountx : _cellx1 + 2 ) ;
+              _cellx2++ )
+          for ( int _celly2 = _celly1 + 1 ;
+                _celly2 < ( flexibleGrid ? cellcounty :
+                            _celly1 + 2 ) ; _celly2++ )
+            for ( int _bin = 0 ; _bin < numBins ; _bin++ )
+            {
+              HistFeature *f = new HistFeature();
+              f->histtype = histtype;
+              f->window_size_x = wsx;
+              f->window_size_y = wsy;
+              f->bin = _bin;
+              f->cellx1 = _cellx1;
+              f->celly1 = _celly1;
+              f->cellx2 = _cellx2;
+              f->celly2 = _celly2;
+              f->cellcountx = cellcountx;
+              f->cellcounty = cellcounty;
+              featurePool.addFeature ( f, weight );
+            }
+    wsx = ( int ) ( scaleStep * wsx );
+    wsy = ( int ) ( scaleStep * wsy );
+  }
 }
 
 Feature *HistFeature::clone() const
 {
-    HistFeature *f = new HistFeature();
-    f->histtype = histtype;
-    f->window_size_x = window_size_x;
-    f->window_size_y = window_size_y;
-    f->bin = bin;
-    f->cellx1 = cellx1;
-    f->celly1 = celly1;
-    f->cellx2 = cellx2;
-    f->celly2 = celly2;
-    f->cellcountx = cellcountx;
-    f->cellcounty = cellcounty;
-
-    return f;
+  HistFeature *f = new HistFeature();
+  f->histtype = histtype;
+  f->window_size_x = window_size_x;
+  f->window_size_y = window_size_y;
+  f->bin = bin;
+  f->cellx1 = cellx1;
+  f->celly1 = celly1;
+  f->cellx2 = cellx2;
+  f->celly2 = celly2;
+  f->cellcountx = cellcountx;
+  f->cellcounty = cellcounty;
+
+  return f;
 }
 
 Feature *HistFeature::generateFirstParameter () const
 {
-    return clone();
+  return clone();
 }
 
-void HistFeature::restore (istream & is, int format)
+void HistFeature::restore ( istream & is, int format )
 {
-    is >> histtype;
-    is >> window_size_x;
-    is >> window_size_y;
-    is >> bin;
-    is >> cellx1;
-    is >> celly1;
-    is >> cellx2;
-    is >> celly2;
-    is >> cellcountx;
-    is >> cellcounty;
+  is >> histtype;
+  is >> window_size_x;
+  is >> window_size_y;
+  is >> bin;
+  is >> cellx1;
+  is >> celly1;
+  is >> cellx2;
+  is >> celly2;
+  is >> cellcountx;
+  is >> cellcounty;
 }
 
-void HistFeature::store (ostream & os, int format) const
+void HistFeature::store ( ostream & os, int format ) const
 {
-    os << "HistFeature "
-       << histtype << " "
-       << window_size_x << " "
-       << window_size_y << " "
-       << bin << " "
-       << cellx1 << " "
-       << celly1 << " "
-       << cellx2 << " "
-       << celly2 << " "
-       << cellcountx << " "
-       << cellcounty;
+  os << "HistFeature "
+  << histtype << " "
+  << window_size_x << " "
+  << window_size_y << " "
+  << bin << " "
+  << cellx1 << " "
+  << celly1 << " "
+  << cellx2 << " "
+  << celly2 << " "
+  << cellcountx << " "
+  << cellcounty;
 }
 
 void HistFeature::clear ()

+ 53 - 58
features/fpfeatures/HistFeature.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file HistFeature.h
 * @brief histogram integral feature
 * @author Erik Rodner
@@ -10,7 +10,7 @@
 
 #include "core/vector/VectorT.h"
 #include "core/vector/MatrixT.h"
- 
+
 #include "core/basics/Config.h"
 #include "vislearning/cbaselib/Feature.h"
 
@@ -21,65 +21,60 @@ namespace OBJREC {
 class HistFeature : public Feature
 {
 
-    protected:
-
-	enum {
-	    HISTFEATURE_NORMMETHOD_L1 = 0,
-	    HISTFEATURE_NORMMETHOD_L2
-	};
-
-	/** @{ feature parameter */
-	int window_size_x;
-	int window_size_y;
-
-	int histtype;
-	int bin;
-	int cellx1;
-	int celly1;
-	int cellx2;
-	int celly2;
-	int cellcountx;
-	int cellcounty;
-	int normalizationMethod;
-
-	bool flexibleGrid;
-
-	/** @} */
-
-	/** @{ parameter for feature generation */
-	int numScales;
-	int numBins;
-	double scaleStep;
-	/** @} */
-
-    public:
-  
-	/** simple constructor */
-	HistFeature( const NICE::Config *conf, 
-		     // refactor-nice.pl: check this substitution
-		     // old: const std::string & section, 
-		     const std::string & section, 
-		     int _histtype,
-		     int _numBins);
-
-	/** internally used by HistFeature::explode */
-	HistFeature () {};
-      
-	/** simple destructor */
-	virtual ~HistFeature();
-     
-	double val( const Example *example ) const;
-	void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
-	Feature *clone() const;
-	Feature *generateFirstParameter () const;
-
-	void restore (std::istream & is, int format = 0);
-	void store (std::ostream & os, int format = 0) const;
-	void clear ();
-};
+  protected:
+
+    enum {
+      HISTFEATURE_NORMMETHOD_L1 = 0,
+      HISTFEATURE_NORMMETHOD_L2
+    };
+
+    /** @{ feature parameter */
+    int window_size_x;
+    int window_size_y;
+
+    int histtype;
+    int bin;
+    int cellx1;
+    int celly1;
+    int cellx2;
+    int celly2;
+    int cellcountx;
+    int cellcounty;
+    int normalizationMethod;
+
+    bool flexibleGrid;
 
+    /** @} */
 
+    /** @{ parameter for feature generation */
+    int numScales;
+    int numBins;
+    double scaleStep;
+    /** @} */
 
+  public:
+
+    /** simple constructor */
+    HistFeature ( const NICE::Config *conf,
+                  const std::string & section,
+                  int _histtype,
+                  int _numBins );
+
+    /** internally used by HistFeature::explode */
+    HistFeature () {};
+
+    /** simple destructor */
+    virtual ~HistFeature();
+
+    double val ( const Example *example ) const;
+    void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
+    Feature *clone() const;
+    Feature *generateFirstParameter () const;
+
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
+};
 
 } // namespace
 

+ 180 - 173
features/fpfeatures/PixelPairFeature.cpp

@@ -14,219 +14,226 @@ const int lastColorchannel = 2;
 
 void PixelPairFeature::explode ( FeaturePool & featurePool, bool variableWindow ) const
 {
-	PixelPairFeature *f = new PixelPairFeature ( *this );
-	int firstchannel = ( imagetype == CachedExample::I_COLOR ) ? firstColorchannel : 0;
-	int lastchannel = ( imagetype == CachedExample::I_COLOR ) ? lastColorchannel : 0;
-
-	int wsx = window_size_x / 2;
-	int wsy = window_size_y / 2;
-	
-	int numberOfPairFeatures = (lastchannel - firstchannel + 1) * 
-		(2*wsx / step_x) * (2*wsy / step_y);
-
-	numberOfPairFeatures *= numberOfPairFeatures;
-
-	for ( int _type = PPTYPE_DIFF ; _type < PPTYPE_VALUE ; _type++ )
-	for ( int _x1 = -wsx ; _x1 < wsx ; _x1 += step_x )
-	for ( int _y1 = -wsy ; _y1 < wsy ; _y1 += step_y )
-	for ( int _b1 = firstchannel ; _b1 <= lastchannel ; _b1++ ) 
-	for ( int _x2 = -wsx ; _x2 < wsx ; _x2 += step_x )
-	for ( int _y2 = -wsy ; _y2 < wsy ; _y2 += step_y )
-	for ( int _b2 = firstchannel ; _b2 <= lastchannel ; _b2++ ) 
-	{
-		if ( (_b1 == _b2) && (_x1 == _x2) && (_y1 == _y2) ) continue;
-		f->x1 = _x1; f->y1 = _y1; f->b1 = _b1;
-		f->x2 = _x2; f->y2 = _y2; f->b2 = _b2;
-		f->type = _type;
-		featurePool.addFeature ( f->clone(), 1.0 / ((PPTYPE_VALUE - PPTYPE_DIFF) * numberOfPairFeatures) );
-	}
-
-	f->type = PPTYPE_VALUE;
-	for ( int _x1 = -wsx ; _x1 < wsx ; _x1 += step_x )
-	for ( int _y1 = -wsy ; _y1 < wsy ; _y1 += step_y )
-	for ( int _b1 = firstchannel ; _b1 <= lastchannel ; _b1++ ) 
-	{
-		f->x1 = _x1; f->y1 = _y1; f->b1 = _b1;
-		featurePool.addFeature ( f->clone(), 1.0 / numberOfPairFeatures );
-	}
-
-	delete f;
+  PixelPairFeature *f = new PixelPairFeature ( *this );
+  int firstchannel = ( imagetype == CachedExample::I_COLOR ) ? firstColorchannel : 0;
+  int lastchannel = ( imagetype == CachedExample::I_COLOR ) ? lastColorchannel : 0;
+
+  int wsx = window_size_x / 2;
+  int wsy = window_size_y / 2;
+
+  int numberOfPairFeatures = ( lastchannel - firstchannel + 1 ) *
+                             ( 2 * wsx / step_x ) * ( 2 * wsy / step_y );
+
+  numberOfPairFeatures *= numberOfPairFeatures;
+
+  for ( int _type = PPTYPE_DIFF ; _type < PPTYPE_VALUE ; _type++ )
+    for ( int _x1 = -wsx ; _x1 < wsx ; _x1 += step_x )
+      for ( int _y1 = -wsy ; _y1 < wsy ; _y1 += step_y )
+        for ( int _b1 = firstchannel ; _b1 <= lastchannel ; _b1++ )
+          for ( int _x2 = -wsx ; _x2 < wsx ; _x2 += step_x )
+            for ( int _y2 = -wsy ; _y2 < wsy ; _y2 += step_y )
+              for ( int _b2 = firstchannel ; _b2 <= lastchannel ; _b2++ )
+              {
+                if ( ( _b1 == _b2 ) && ( _x1 == _x2 ) && ( _y1 == _y2 ) ) continue;
+                f->x1 = _x1;
+                f->y1 = _y1;
+                f->b1 = _b1;
+                f->x2 = _x2;
+                f->y2 = _y2;
+                f->b2 = _b2;
+                f->type = _type;
+                featurePool.addFeature ( f->clone(), 1.0 / ( ( PPTYPE_VALUE - PPTYPE_DIFF ) * numberOfPairFeatures ) );
+              }
+
+  f->type = PPTYPE_VALUE;
+  for ( int _x1 = -wsx ; _x1 < wsx ; _x1 += step_x )
+    for ( int _y1 = -wsy ; _y1 < wsy ; _y1 += step_y )
+      for ( int _b1 = firstchannel ; _b1 <= lastchannel ; _b1++ )
+      {
+        f->x1 = _x1;
+        f->y1 = _y1;
+        f->b1 = _b1;
+        featurePool.addFeature ( f->clone(), 1.0 / numberOfPairFeatures );
+      }
+
+  delete f;
 }
 
 
 Feature *PixelPairFeature::clone() const
 {
-    PixelPairFeature *fp =  new PixelPairFeature(*this);
-    return fp;
+  PixelPairFeature *fp =  new PixelPairFeature ( *this );
+  return fp;
 }
 
 
 /************* PixelPairFeature **************/
-PixelPairFeature::PixelPairFeature( const Config *conf )
+PixelPairFeature::PixelPairFeature ( const Config *conf )
 {
-    window_size_x = conf->gI("PixelPairFeatures", "window_size_x", 24 );
-    window_size_y = conf->gI("PixelPairFeatures", "window_size_y", 24 );
-    step_x = conf->gI("PixelPairFeatures", "step_x", 1 );
-    step_y = conf->gI("PixelPairFeatures", "step_y", 1 );
-    bool use_color = conf->gB("PixelPairFeatures", "use_color", true );
-
-    if ( use_color ) {
-	imagetype = CachedExample::I_COLOR;
-    } else {
-	imagetype = CachedExample::I_GRAYVALUES;
-    }
+  window_size_x = conf->gI ( "PixelPairFeatures", "window_size_x", 24 );
+  window_size_y = conf->gI ( "PixelPairFeatures", "window_size_y", 24 );
+  step_x = conf->gI ( "PixelPairFeatures", "step_x", 1 );
+  step_y = conf->gI ( "PixelPairFeatures", "step_y", 1 );
+  bool use_color = conf->gB ( "PixelPairFeatures", "use_color", true );
+
+  if ( use_color ) {
+    imagetype = CachedExample::I_COLOR;
+  } else {
+    imagetype = CachedExample::I_GRAYVALUES;
+  }
 }
 
-PixelPairFeature::PixelPairFeature ( int _window_size_x,	
-			   int _window_size_y,
-			   int _step_x,
-			   int _step_y,
-			   int _imagetype )
+PixelPairFeature::PixelPairFeature ( int _window_size_x,
+                                     int _window_size_y,
+                                     int _step_x,
+                                     int _step_y,
+                                     int _imagetype )
 {
-    window_size_x = _window_size_x;
-    window_size_y = _window_size_y;
-    x1 = 0; y1 = 0; b1 = firstColorchannel;
-    x2 = 1; y1 = 0; b2 = firstColorchannel;
-    step_x = _step_x;
-    step_y = _step_y;
-    type = PixelPairFeature::PPTYPE_DIFF;
-    imagetype = _imagetype;
+  window_size_x = _window_size_x;
+  window_size_y = _window_size_y;
+  x1 = 0;
+  y1 = 0;
+  b1 = firstColorchannel;
+  x2 = 1;
+  y1 = 0;
+  b2 = firstColorchannel;
+  step_x = _step_x;
+  step_y = _step_y;
+  type = PixelPairFeature::PPTYPE_DIFF;
+  imagetype = _imagetype;
 }
 
 PixelPairFeature::~PixelPairFeature()
 {
 }
 
-double PixelPairFeature::val( const Example *example ) const
+double PixelPairFeature::val ( const Example *example ) const
 {
-    int xl = example->x;
-    int yl = example->y;
-    NICE::MultiChannelImageT<int> & img = example->ce->getIChannel ( imagetype );
-
-    int xx1 = x1;
-    int yy1 = y1;
-    int xx2 = x1;
-    int yy2 = x2;
-    int exwidth = example->width;
-    if ( exwidth != 0 )
-    {
-	int exheight = example->height;
-	xx1 = xx1 * exwidth / window_size_x;	
-	yy1 = yy1 * exheight / window_size_y;	
-	xx2 = xx2 * exwidth / window_size_x;	
-	yy2 = yy2 * exheight / window_size_y;	
-    }
-
-    int xsize = img.xsize;
-    int ysize = img.ysize;
-
-    const int *channel1 = img.data[b1];
-    int p1x = BOUND ( xl + xx1, 0, xsize-1 );
-    int p1y = BOUND ( yl + yy1, 0, ysize-1 );
-    long off1 = p1x + p1y*xsize;
-    int v1 = channel1[off1];
+  int xl = example->x;
+  int yl = example->y;
+  NICE::MultiChannelImageT<int> & img = example->ce->getIChannel ( imagetype );
+
+  int xx1 = x1;
+  int yy1 = y1;
+  int xx2 = x1;
+  int yy2 = x2;
+  int exwidth = example->width;
+  if ( exwidth != 0 )
+  {
+    int exheight = example->height;
+    xx1 = xx1 * exwidth / window_size_x;
+    yy1 = yy1 * exheight / window_size_y;
+    xx2 = xx2 * exwidth / window_size_x;
+    yy2 = yy2 * exheight / window_size_y;
+  }
+
+  int xsize = img.width();
+  int ysize = img.height();
+
+  int p1x = BOUND ( xl + xx1, 0, xsize - 1 );
+  int p1y = BOUND ( yl + yy1, 0, ysize - 1 );
+
+  int v1 = img.get(p1x,p1y,b1);
+
+  if ( type != PPTYPE_VALUE )
+  {
+    int p2x = BOUND ( xl + xx2, 0, xsize - 1 );
+    int p2y = BOUND ( yl + yy2, 0, ysize - 1 );
     
-    if ( type != PPTYPE_VALUE ) 
-    {
-	const int *channel2 = img.data[b2];
-
-	int p2x = BOUND ( xl + xx2, 0, xsize-1 );
-	int p2y = BOUND ( yl + yy2, 0, ysize-1 );
-	long off2 = p2x + p2y*xsize;
-	int v2 = channel2[off2];
-	
-	if ( type == PPTYPE_DIFF ) 
-	    return v1 - v2;
-	else if ( type == PPTYPE_ABSDIFF )
-	    return fabs(v1-v2);
-	else if ( type == PPTYPE_SUM )
-	    return v1 + v2;
-	else
-	    exit(-1);
-
-    } else {
-	return v1;
-    }
+    int v2 = img.get(p2x,p2y,b2);
+
+    if ( type == PPTYPE_DIFF )
+      return v1 - v2;
+    else if ( type == PPTYPE_ABSDIFF )
+      return fabs ( v1 -v2 );
+    else if ( type == PPTYPE_SUM )
+      return v1 + v2;
+    else
+      exit ( -1 );
+
+  } else {
+    return v1;
+  }
 }
 
-void PixelPairFeature::restore (istream & is, int format)
+void PixelPairFeature::restore ( istream & is, int format )
 {
-    is >> type;
-    is >> imagetype;
-    is >> window_size_x;
-    is >> window_size_y;
-    is >> x1;
-    is >> y1;
-    is >> b1;
-    is >> x2;
-    is >> y2;
-    is >> b2;
+  is >> type;
+  is >> imagetype;
+  is >> window_size_x;
+  is >> window_size_y;
+  is >> x1;
+  is >> y1;
+  is >> b1;
+  is >> x2;
+  is >> y2;
+  is >> b2;
 }
 
-void PixelPairFeature::store (ostream & os, int format) const
+void PixelPairFeature::store ( ostream & os, int format ) const
 {
-    os << "PIXELPAIRFEATURE" << " " << type << " " 
-       << imagetype << " "
-       << window_size_x << " " << window_size_y << " "
-       << " " << x1 << " " << y1 << " " << b1
-       << " " << x2 << " " << y2 << " " << b2;
+  os << "PIXELPAIRFEATURE" << " " << type << " "
+  << imagetype << " "
+  << window_size_x << " " << window_size_y << " "
+  << " " << x1 << " " << y1 << " " << b1
+  << " " << x2 << " " << y2 << " " << b2;
 }
 
 void PixelPairFeature::clear ()
 {
-    // nothing to do in my opinion
+  // nothing to do in my opinion
 }
 
 #if 0
 void PixelPairFeature::calcFeatureValues ( const Examples & examples,
-				    vector<int> & examples_selection,
-				    FeatureValuesUnsorted & values ) const
+    vector<int> & examples_selection,
+    FeatureValuesUnsorted & values ) const
 {
-    for ( vector<int>::const_iterator si = examples_selection.begin();
-				   si != examples_selection.end();
-				   si++ )
+  for ( vector<int>::const_iterator si = examples_selection.begin();
+        si != examples_selection.end();
+        si++ )
+  {
+    int index = *si;
+    const pair<int, Example> & p = examples[index];
+    int classno = p.first;
+    const Example & example = p.second;
+    double value = 0.0;
+
+    int xsize, ysize;
+    int xl = example.x - window_size_x / 2;
+    int yl = example.y - window_size_y / 2;
+
+    const double *channel1 = example.ce->getChannel ( b1, xsize, ysize );
+    int p1x = BOUND ( xl + x1, 0, xsize - 1 );
+    int p1y = BOUND ( yl + y1, 0, ysize - 1 );
+    long off1 = p1x + p1y * xsize;
+    double v1 = channel1[off1];
+
+
+    if ( type != PPTYPE_VALUE )
     {
-	int index = *si;
-	const pair<int, Example> & p = examples[index];
-	int classno = p.first;
-	const Example & example = p.second;
-	double value = 0.0;
-
-	int xsize, ysize;
-	int xl = example.x - window_size_x/2;
-        int yl = example.y - window_size_y/2;
-
-	const double *channel1 = example.ce->getChannel ( b1, xsize, ysize );
-	int p1x = BOUND ( xl + x1, 0, xsize-1 );
-	int p1y = BOUND ( yl + y1, 0, ysize-1 );
-	long off1 = p1x + p1y*xsize;
-	double v1 = channel1[off1];
-    
+      const double *channel2 = example.ce->getChannel ( b2, xsize, ysize );
+
+      int p2x = BOUND ( xl + x2, 0, xsize - 1 );
+      int p2y = BOUND ( yl + y2, 0, ysize - 1 );
+      long off2 = p2x + p2y * xsize;
+      double v2 = channel2[off2];
 
-	if ( type != PPTYPE_VALUE ) 
-	{
-	    const double *channel2 = example.ce->getChannel ( b2, xsize, ysize );
-
-	    int p2x = BOUND ( xl + x2, 0, xsize-1 );
-	    int p2y = BOUND ( yl + y2, 0, ysize-1 );
-	    long off2 = p2x + p2y*xsize;
-	    double v2 = channel2[off2];
-	    
-	    
-	    if ( type == PPTYPE_DIFF ) 
-		value = v1 - v2;
-	    else if ( type == PPTYPE_ABSDIFF )
-		value = fabs(v1-v2);
-	    else if ( type == PPTYPE_SUM )
-		value = v1 + v2;
-	} else {
-	    value = v1;
-	}
-
-	values.push_back ( quadruplet<double, int, int, double> ( 
-	    value, classno, index, example.weight ) );
+
+      if ( type == PPTYPE_DIFF )
+        value = v1 - v2;
+      else if ( type == PPTYPE_ABSDIFF )
+        value = fabs ( v1 - v2 );
+      else if ( type == PPTYPE_SUM )
+        value = v1 + v2;
+    } else {
+      value = v1;
     }
 
+    values.push_back ( quadruplet<double, int, int, double> (
+                         value, classno, index, example.weight ) );
+  }
+
 }
 #endif
 

+ 53 - 53
features/fpfeatures/PixelPairFeature.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file PixelPairFeature.h
 * @brief like in Shotton paper
 * @author Erik Rodner
@@ -22,60 +22,60 @@ namespace OBJREC {
 class PixelPairFeature : public Feature
 {
 
-    protected:
-	enum {
-	    PPTYPE_DIFF = 0,
-	    PPTYPE_ABSDIFF,
-	    PPTYPE_SUM,
-	    PPTYPE_VALUE
-	};
-
-	int type;
-	int imagetype;
-
-	int x1;
-	int y1;
-	int b1;
-
-	int x2;
-	int y2;
-	int b2;
-
-	int step_x;
-	int step_y;
-
-	int window_size_x;
-	int window_size_y;
-
-    public:
-  
-	/** simple constructor */
-	PixelPairFeature( const NICE::Config *conf );
-      
-	/** without memory wasting config */
-	PixelPairFeature ( int window_size_x,	
-		      int window_size_y,
-		      int step_x,
-		      int step_y,
-		      int imagetype );
-
-	/** simple destructor */
-	virtual ~PixelPairFeature();
-     
-	double val( const Example *example ) const;
-
-	void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
-
-	Feature *clone() const;
-	
-	void restore (std::istream & is, int format = 0);
-	void store (std::ostream & os, int format = 0) const;
-	void clear ();
+  protected:
+    enum {
+      PPTYPE_DIFF = 0,
+      PPTYPE_ABSDIFF,
+      PPTYPE_SUM,
+      PPTYPE_VALUE
+    };
+
+    int type;
+    int imagetype;
+
+    int x1;
+    int y1;
+    int b1;
+
+    int x2;
+    int y2;
+    int b2;
+
+    int step_x;
+    int step_y;
+
+    int window_size_x;
+    int window_size_y;
+
+  public:
+
+    /** simple constructor */
+    PixelPairFeature ( const NICE::Config *conf );
+
+    /** without memory wasting config */
+    PixelPairFeature ( int window_size_x,
+                       int window_size_y,
+                       int step_x,
+                       int step_y,
+                       int imagetype );
+
+    /** simple destructor */
+    virtual ~PixelPairFeature();
+
+    double val ( const Example *example ) const;
+
+    void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
+
+    Feature *clone() const;
+
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
 
 #if 0
-	void calcFeatureValues ( const Examples & examples,
-				    std::vector<int> & examples_selection,
-				    FeatureValuesUnsorted & values ) const;
+    void calcFeatureValues ( const Examples & examples,
+                             std::vector<int> & examples_selection,
+                             FeatureValuesUnsorted & values ) const;
 #endif
 
 };

+ 121 - 117
features/fpfeatures/SemanticFeature.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file SemanticFeature.cpp
 * @brief texton feature similar to jamie shottons idea
 * @author Erik Rodner
@@ -18,26 +18,26 @@ using namespace NICE;
 
 
 /** simple constructor */
-SemanticFeature::SemanticFeature( const Config *conf, 
-				  const set<int> *_possibleClassNos )
+SemanticFeature::SemanticFeature ( const Config *conf,
+                                   const set<int> *_possibleClassNos )
     : possibleClassNos ( _possibleClassNos )
 {
-    window_size_x = conf->gI("SemanticFeature", "window_size_x", 21 );
-    window_size_y = conf->gI("SemanticFeature", "window_size_y", 21 );
-    scaleStep = conf->gD("SemanticFeature", "scale_step", sqrt(2) );
-    numScales = conf->gI("SemanticFeature", "num_scales", 5 );
-    end_shiftx = conf->gI("SemanticFeature", "end_shift_x", 40 );
-    end_shifty = conf->gI("SemanticFeature", "end_shift_y", 40 );
-    step_shiftx = conf->gI("SemanticFeature", "step_shift_x", 5 );
-    step_shifty = conf->gI("SemanticFeature", "step_shift_y", 5 );
-
-    shiftx = 0;
-    shifty = 0;
+  window_size_x = conf->gI ( "SemanticFeature", "window_size_x", 21 );
+  window_size_y = conf->gI ( "SemanticFeature", "window_size_y", 21 );
+  scaleStep = conf->gD ( "SemanticFeature", "scale_step", sqrt ( 2 ) );
+  numScales = conf->gI ( "SemanticFeature", "num_scales", 5 );
+  end_shiftx = conf->gI ( "SemanticFeature", "end_shift_x", 40 );
+  end_shifty = conf->gI ( "SemanticFeature", "end_shift_y", 40 );
+  step_shiftx = conf->gI ( "SemanticFeature", "step_shift_x", 5 );
+  step_shifty = conf->gI ( "SemanticFeature", "step_shift_y", 5 );
+
+  shiftx = 0;
+  shifty = 0;
 }
 
-SemanticFeature::SemanticFeature( const Config *conf )
+SemanticFeature::SemanticFeature ( const Config *conf )
 {
-    SemanticFeature ( conf, NULL );
+  SemanticFeature ( conf, NULL );
 }
 
 /** simple destructor */
@@ -45,137 +45,141 @@ SemanticFeature::~SemanticFeature()
 {
 }
 
-double SemanticFeature::val( const Example *example ) const
+double SemanticFeature::val ( const Example *example ) const
 {
-    const NICE::MultiChannelImageT<double> & img = example->ce->getDChannel (
-	CachedExample::D_INTEGRALPRIOR );
+  const NICE::MultiChannelImageT<double> & img = example->ce->getDChannel (
+        CachedExample::D_INTEGRALPRIOR );
 
-    int xsize;
-    int ysize;
-    example->ce->getImageSize ( xsize, ysize );
-    int tm_xsize = img.xsize;
-    int tm_ysize = img.ysize;
+  int xsize;
+  int ysize;
+  example->ce->getImageSize ( xsize, ysize );
+  int tm_xsize = img.width();
+  int tm_ysize = img.height();
 
 #if 0
-    int xtl = example->x - window_size_x/2;
-    int ytl = example->y - window_size_y/2;
-    int xrb = example->x + window_size_x/2;
-    int yrb = example->y + window_size_y/2;
-
-    xtl = xtl * tm_xsize / xsize;
-    ytl = ytl * tm_ysize / ysize;
-    xrb = xrb * tm_xsize / xsize;
-    yrb = yrb * tm_ysize / ysize;
+  int xtl = example->x - window_size_x / 2;
+  int ytl = example->y - window_size_y / 2;
+  int xrb = example->x + window_size_x / 2;
+  int yrb = example->y + window_size_y / 2;
+
+  xtl = xtl * tm_xsize / xsize;
+  ytl = ytl * tm_ysize / ysize;
+  xrb = xrb * tm_xsize / xsize;
+  yrb = yrb * tm_ysize / ysize;
 #endif
 
-    int wsx2 = window_size_x * tm_xsize / (2*xsize);
-    int wsy2 = window_size_y * tm_ysize / (2*ysize);
-    int xx = ( example->x + shiftx ) * tm_xsize / xsize;
-    int yy = ( example->y + shifty ) * tm_ysize / ysize;
-    int xtl = xx - wsx2;
-    int ytl = yy - wsy2;
-    int xrb = xx + wsx2;
-    int yrb = yy + wsy2;
+  int wsx2 = window_size_x * tm_xsize / ( 2 * xsize );
+  int wsy2 = window_size_y * tm_ysize / ( 2 * ysize );
+  int xx = ( example->x + shiftx ) * tm_xsize / xsize;
+  int yy = ( example->y + shifty ) * tm_ysize / ysize;
+  int xtl = xx - wsx2;
+  int ytl = yy - wsy2;
+  int xrb = xx + wsx2;
+  int yrb = yy + wsy2;
 
 #define BOUND(x,min,max) (((x)<(min))?(min):((x)>(max)?(max):(x)))
-    xtl = BOUND ( xtl, 0, tm_xsize - 1 );
-    ytl = BOUND ( ytl, 0, tm_ysize - 1 );
-    xrb = BOUND ( xrb, 0, tm_xsize - 1 );
-    yrb = BOUND ( yrb, 0, tm_ysize - 1 );
+  xtl = BOUND ( xtl, 0, tm_xsize - 1 );
+  ytl = BOUND ( ytl, 0, tm_ysize - 1 );
+  xrb = BOUND ( xrb, 0, tm_xsize - 1 );
+  yrb = BOUND ( yrb, 0, tm_ysize - 1 );
 #undef BOUND
 
-    double A,B,C,D;
-
-    A = img.data[classno][ xtl + ytl * tm_xsize ];
-    B = img.data[classno][ xrb + ytl * tm_xsize ];
-    C = img.data[classno][ xtl + yrb * tm_xsize ];
-    D = img.data[classno][ xrb + yrb * tm_xsize ];
-
-    int area = (xrb - xtl)*(yrb - ytl);
-    
-    /*******************************
-	    BE CAREFUL
-	THIS INCORPORATES POSTION
-	INFORMATION INDIRECTLY
-    ********************************/
-    
-    if ( area == 0 ) 
-	return 0.0;
-    else        
-       /* A B 
-	  C D  */
-	return (D - B - C + A) / area;
+  double A, B, C, D;
+
+  A = img.get(xtl,ytl,classno);
+  B = img.get(xrb,ytl,classno); 
+  C = img.get(xtl,yrb,classno);
+  D = img.get(xrb,yrb,classno);
+
+  int area = ( xrb - xtl ) * ( yrb - ytl );
+
+  /*******************************
+   BE CAREFUL
+  THIS INCORPORATES POSTION
+  INFORMATION INDIRECTLY
+  ********************************/
+
+  if ( area == 0 )
+  {
+    return 0.0;
+  }
+  else
+  {
+    /* A B
+    C D  */
+    return ( D - B - C + A ) / area;
+  }
 }
 
 void SemanticFeature::explode ( FeaturePool & featurePool, bool variableWindow ) const
 {
-    if ( possibleClassNos == NULL )
+  if ( possibleClassNos == NULL )
+  {
+    fprintf ( stderr, "SemanticFeature::explode: no classno set given !\n" );
+    exit ( -1 );
+  }
+  // use leaf nodes only !!
+  for ( set<int>::const_iterator k = possibleClassNos->begin();
+        k != possibleClassNos->end();
+        k++ )
+  {
+    for ( int sy = 0 ; sy <= end_shifty ; sy += step_shifty )
     {
-	fprintf (stderr, "SemanticFeature::explode: no classno set given !\n");
-	exit(-1);
-    }
-    // use leaf nodes only !!
-    for ( set<int>::const_iterator k = possibleClassNos->begin();
-				   k != possibleClassNos->end();
-				   k++ )
-    {
-	for ( int sy = 0 ; sy <= end_shifty ; sy += step_shifty )
-	{
-	    for ( int sx = 0 ; sx <= end_shiftx ; sx += step_shiftx )
-	    {
-		int wsy = window_size_y;
-		int wsx = window_size_x;
-		for ( int i = 0 ; i < numScales ; i++ )
-		{
-		    SemanticFeature *f = new SemanticFeature();
-		    f->classno = *k;
-		    f->window_size_x = wsx;
-		    f->window_size_y = wsy;
-		    f->shiftx = sx;
-		    f->shifty = sy;
-		    featurePool.addFeature ( f, step_shiftx * step_shifty / (double)( end_shiftx * end_shifty * possibleClassNos->size() ) ); 
-		    wsx = (int) (scaleStep * wsx);
-		    wsy = (int) (scaleStep * wsy);
-		}
-	    }
-	}
+      for ( int sx = 0 ; sx <= end_shiftx ; sx += step_shiftx )
+      {
+        int wsy = window_size_y;
+        int wsx = window_size_x;
+        for ( int i = 0 ; i < numScales ; i++ )
+        {
+          SemanticFeature *f = new SemanticFeature();
+          f->classno = *k;
+          f->window_size_x = wsx;
+          f->window_size_y = wsy;
+          f->shiftx = sx;
+          f->shifty = sy;
+          featurePool.addFeature ( f, step_shiftx * step_shifty / ( double ) ( end_shiftx * end_shifty * possibleClassNos->size() ) );
+          wsx = ( int ) ( scaleStep * wsx );
+          wsy = ( int ) ( scaleStep * wsy );
+        }
+      }
     }
+  }
 }
 
 Feature *SemanticFeature::clone() const
 {
-    SemanticFeature *f = new SemanticFeature();
-    f->window_size_x = window_size_x;
-    f->window_size_y = window_size_y;
-    f->classno = classno;
-    f->shiftx = shiftx;
-    f->shifty = shifty;
-
-    return f;
+  SemanticFeature *f = new SemanticFeature();
+  f->window_size_x = window_size_x;
+  f->window_size_y = window_size_y;
+  f->classno = classno;
+  f->shiftx = shiftx;
+  f->shifty = shifty;
+
+  return f;
 }
 
 Feature *SemanticFeature::generateFirstParameter () const
 {
-    return clone();
+  return clone();
 }
 
-void SemanticFeature::restore (istream & is, int format)
+void SemanticFeature::restore ( istream & is, int format )
 {
-    is >> window_size_x;
-    is >> window_size_y;
-    is >> shiftx;
-    is >> shifty;
-    is >> classno;
+  is >> window_size_x;
+  is >> window_size_y;
+  is >> shiftx;
+  is >> shifty;
+  is >> classno;
 }
 
-void SemanticFeature::store (ostream & os, int format) const
+void SemanticFeature::store ( ostream & os, int format ) const
 {
-    os << "SemanticFeature "
-       << window_size_x << " "
-       << window_size_y << " "
-       << shiftx << " "
-       << shifty << " "
-       << classno;
+  os << "SemanticFeature "
+  << window_size_x << " "
+  << window_size_y << " "
+  << shiftx << " "
+  << shifty << " "
+  << classno;
 }
 
 void SemanticFeature::clear ()

+ 47 - 47
features/fpfeatures/SemanticFeature.h

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file SemanticFeature.h
 * @brief texton feature similar to jamie shottons idea
 * @author Erik Rodner
@@ -21,52 +21,52 @@ namespace OBJREC {
 class SemanticFeature : public Feature
 {
 
-    protected:
-	/** @{ feature parameter */
-	int window_size_x;
-	int window_size_y;
-	int shiftx;
-	int shifty;
-	int classno;
-	/** @} */
-
-
-	/** @{ parameter for feature generation */
-	int numScales;
-	double scaleStep;
-	int maxdepth;
-
-	int end_shiftx;
-	int end_shifty;
-	int step_shiftx;
-	int step_shifty;
-	/** @} */
-
-	const std::set<int> *possibleClassNos;
-
-    public:
-  
-	/** simple constructor */
-	SemanticFeature( const NICE::Config *conf, 
-			 const std::set<int> *_possibleClassNos );
-
-	/** simple constructor */
-	SemanticFeature( const NICE::Config *conf );
-
-	/** internally used by SemanticFeature::explode */
-	SemanticFeature () {};
-      
-	/** simple destructor */
-	virtual ~SemanticFeature();
-     
-	double val( const Example *example ) const;
-	void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
-	Feature *clone() const;
-	Feature *generateFirstParameter () const;
-
-	void restore (std::istream & is, int format = 0);
-	void store (std::ostream & os, int format = 0) const;
-	void clear ();
+  protected:
+    /** @{ feature parameter */
+    int window_size_x;
+    int window_size_y;
+    int shiftx;
+    int shifty;
+    int classno;
+    /** @} */
+
+
+    /** @{ parameter for feature generation */
+    int numScales;
+    double scaleStep;
+    int maxdepth;
+
+    int end_shiftx;
+    int end_shifty;
+    int step_shiftx;
+    int step_shifty;
+    /** @} */
+
+    const std::set<int> *possibleClassNos;
+
+  public:
+
+    /** simple constructor */
+    SemanticFeature ( const NICE::Config *conf,
+                      const std::set<int> *_possibleClassNos );
+
+    /** simple constructor */
+    SemanticFeature ( const NICE::Config *conf );
+
+    /** internally used by SemanticFeature::explode */
+    SemanticFeature () {};
+
+    /** simple destructor */
+    virtual ~SemanticFeature();
+
+    double val ( const Example *example ) const;
+    void explode ( FeaturePool & featurePool, bool variableWindow = true ) const;
+    Feature *clone() const;
+    Feature *generateFirstParameter () const;
+
+    void restore ( std::istream & is, int format = 0 );
+    void store ( std::ostream & os, int format = 0 ) const;
+    void clear ();
 
 };
 

+ 1 - 1
features/localfeatures/LFColorWeijer.cpp

@@ -462,7 +462,7 @@ void LFColorWeijer::getFeats( const ColorImage &img, MultiChannelImageT<double>
 {
   int width = ( int )img.width();
   int height = ( int )img.height();
-  feats.reInit( width, height, hist.size(), true );
+  feats.reInit( width, height, hist.size());
 
   NICE::MultiChannelImageT<double> genimg, imglab;
 

+ 2 - 5
image/GenericImageTools.h

@@ -3,14 +3,12 @@
 * @brief simple filter stuff
 * @author Erik Rodner
 * @date 07/30/2008
-
 */
 #ifndef GENERICIMAGETOOLSINCLUDE
 #define GENERICIMAGETOOLSINCLUDE
 
 #include "core/image/MultiChannelImageT.h"
 
-
 namespace OBJREC {
 
 /** simple filter stuff */
@@ -19,15 +17,14 @@ class GenericImageTools
     public:
 
     template <class PixelValueDst, class PixelValueSrc>
-    static void calcIntegralImage ( PixelValueDst *integral, const PixelValueSrc *image, int xsize, int ysize );
+    static void calcIntegralImage ( NICE::ImageT<PixelValueDst> &integralImage, const NICE::ImageT<PixelValueSrc> &image, int xsize, int ysize );
 
     template <class PixelValueDst, class PixelValueSrc>
-    static void nonMaximumSuppression ( PixelValueDst *dst, const PixelValueSrc *src, int xsize, int ysize, bool useEightConnectivity = true );
+    static void nonMaximumSuppression ( NICE::ImageT<PixelValueDst> &dst, const NICE::ImageT<PixelValueSrc> &src, int xsize, int ysize, bool useEightConnectivity = true );
 
 };
 
 #include "GenericImageTools.tcc"
- 
 
 } // namespace
 

+ 66 - 48
image/GenericImageTools.tcc

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file GenericImageTools.cpp
 * @brief simple filter stuff
 * @author Erik Rodner
@@ -10,65 +10,83 @@
 #include "GenericImageTools.h"
 
 template <class PixelValueDst, class PixelValueSrc>
-void GenericImageTools::calcIntegralImage ( PixelValueDst *integralImage, const PixelValueSrc *image, int xsize, int ysize )
+void GenericImageTools::calcIntegralImage ( NICE::ImageT<PixelValueDst> &integralImage, const NICE::ImageT<PixelValueSrc> &image, int xsize, int ysize )
 {
-    integralImage[0] = image[0];
+  integralImage ( 0, 0 ) = ( PixelValueDst ) image ( 0, 0 );;
 
-    int k = xsize;
-    for ( int y = 1 ; y < ysize; y++, k+=xsize )
-	integralImage[k] = (PixelValueDst)(integralImage[k-xsize] + image[k]);
+  for ( int y = 1 ; y < ysize; y++ )
+    integralImage ( 0, y ) += image ( 0, y - 1 );
 
-    k = 1;
-    for ( int x = 1 ; x < xsize; x++, k++ )
-	integralImage[k] = (PixelValueDst)(integralImage[k-1] + image[k]);
+  for ( int x = 1 ; x < xsize; x++ )
+    integralImage ( x, 0 ) += image ( x - 1, 0 );
 
-    k = xsize + 1;
-    for ( int y = 1 ; y < ysize ; y++,k++ )
-	for ( int x = 1 ; x < xsize ; x++,k++ )
-	{
-	    integralImage[k] = (PixelValueDst)image[k];
-	    integralImage[k] += integralImage[k-1]; 
-	    integralImage[k] += integralImage[k - xsize];
-	    integralImage[k] -= integralImage[k - xsize - 1];
-	}
+  for ( int y = 1 ; y < ysize ; y++ )
+    for ( int x = 1 ; x < xsize ; x++ )
+    {
+      integralImage ( x, y ) = ( PixelValueDst ) image ( x, y );
+      integralImage ( x, y ) += integralImage ( x, y - 1 );
+      integralImage ( x, y ) += integralImage ( x - 1, y );
+      integralImage ( x, y ) -= integralImage ( x - 1, y - 1 );
+    }
 }
 
 template <class PixelValueDst, class PixelValueSrc>
-void GenericImageTools::nonMaximumSuppression ( PixelValueDst *dst, const PixelValueSrc *src,
-    int xsize, int ysize, bool useEightConnectivity )
+void GenericImageTools::nonMaximumSuppression ( NICE::ImageT<PixelValueDst> &dst, const NICE::ImageT<PixelValueSrc> &src, int xsize, int ysize, bool useEightConnectivity )
 {
-    long k = 0;
-    for ( int y = 0 ; y < ysize ; y++ )
-	for ( int x = 0 ; x < xsize ; x++,k++ )
-	{
-	    if ( x != 0 )
-	    {
-			if ( src[k-1] > src[k] ) { dst[k] = 0; continue; };
-			if ( useEightConnectivity ) {
-				if ( ( y != 0 ) && ( src[k-xsize-1] > src[k] ) ) { dst[k] = 0; continue; };
-				if ( ( y != ysize-1 ) && ( src[k+xsize-1] > src[k] ) ) { dst[k] = 0; continue; };
-			}
-	    }
+  for ( int y = 0 ; y < ysize ; y++ )
+    for ( int x = 0 ; x < xsize ; x++ )
+    {
+      if ( x != 0 )
+      {
+        if ( src ( x - 1, y ) > src ( x, y ) ) {
+          dst ( x, y ) = 0;
+          continue;
+        };
+        if ( useEightConnectivity ) {
+          if ( ( y != 0 ) && ( src ( x, y - 1 ) > src ( x, y ) ) ) {
+            dst ( x, y ) = 0;
+            continue;
+          };
+          if ( ( y != ysize - 1 ) && ( src ( x - 1, y + 1 ) > src ( x, y ) ) ) {
+            dst ( x, y ) = 0;
+            continue;
+          };
+        }
+      }
 
-	    if ( x != xsize-1 ) 
-	    {
-			if ( src[k+1] > src[k] ) { dst[k] = 0; continue; };
-			if ( useEightConnectivity ) {
-				if ( ( y != 0 ) && ( src[k-xsize+1] > src[k] ) ) { dst[k] = 0; continue; };
-				if ( ( y != ysize-1 ) && ( src[k+xsize+1] > src[k] ) ) { dst[k] = 0; continue; };
-			}
-	    }
+      if ( x != xsize - 1 )
+      {
+        if ( src ( x + 1, y ) > src ( x, y ) ) {
+          dst ( x, y ) = 0;
+          continue;
+        };
+        if ( useEightConnectivity ) {
+          if ( ( y != 0 ) && ( src ( x + 1, y - 1 ) > src ( x, y ) ) ) {
+            dst ( x, y ) = 0;
+            continue;
+          };
+          if ( ( y != ysize - 1 ) && ( src ( x + 1, y + 1 ) > src ( x, y ) ) ) {
+            dst ( x, y ) = 0;
+            continue;
+          };
+        }
+      }
 
-	    // CHANGE THIS to dst <-> src !!
+      // CHANGE THIS to dst <-> src !!
 
-	    if ( y != 0 ) 
-			if ( src[k-xsize] > src[k] ) { dst[k] = 0; continue; };
-	    
-	    if ( y != ysize-1 ) 
-			if ( src[k+xsize] > src[k] ) { dst[k] = 0; continue; };
+      if ( y != 0 )
+        if ( src ( x, y - 1 ) > src ( x, y ) ) {
+          dst ( x, y ) = 0;
+          continue;
+        };
 
-	    dst[k] = src[k];
-	}
+      if ( y != ysize - 1 )
+        if ( src ( x, y + 1 ) > src ( x, y ) ) {
+          dst ( x, y ) = 0;
+          continue;
+        };
 
+      dst ( x, y ) = src ( x, y );
+    }
 }
 

+ 694 - 763
math/kernels/KernelData.cpp

@@ -1,4 +1,4 @@
-/** 
+/**
 * @file KernelData.cpp
 * @brief caching some kernel data
 * @author Erik Rodner
@@ -26,873 +26,804 @@ using namespace OBJREC;
 
 KernelData::KernelData()
 {
-	// empty config
-	Config conf;
-	initFromConfig( &conf, "DONTCARE");
+  // empty config
+  Config conf;
+  initFromConfig ( &conf, "DONTCARE" );
 }
 
 KernelData::KernelData ( const KernelData & src )
 {
-	kernelMatrix = src.kernelMatrix;
-	inverseKernelMatrix = src.inverseKernelMatrix;
-	choleskyMatrix = src.choleskyMatrix;
-	for ( map<int, NICE::Matrix *>::const_iterator i = src.cachedMatrices.begin();
-		i != src.cachedMatrices.end(); i++ )
-	{
-		Matrix *M = new Matrix ( *(i->second) );
-		cachedMatrices.insert ( pair<int, NICE::Matrix *> ( i->first, M ) );
-	}
-	logdet = src.logdet;
-	verbose = src.verbose;
-	cr = src.cr->clone();
+  kernelMatrix = src.kernelMatrix;
+  inverseKernelMatrix = src.inverseKernelMatrix;
+  choleskyMatrix = src.choleskyMatrix;
+  for ( map<int, NICE::Matrix *>::const_iterator i = src.cachedMatrices.begin();
+        i != src.cachedMatrices.end(); i++ )
+  {
+    Matrix *M = new Matrix ( * ( i->second ) );
+    cachedMatrices.insert ( pair<int, NICE::Matrix *> ( i->first, M ) );
+  }
+  logdet = src.logdet;
+  verbose = src.verbose;
+  cr = src.cr->clone();
 }
 
-KernelData::KernelData( const Config *conf, const Matrix & kernelMatrix, const string & section )
+KernelData::KernelData ( const Config *conf, const Matrix & kernelMatrix, const string & section )
 {
-	initFromConfig ( conf, section );
-	this->kernelMatrix = kernelMatrix;
-	updateCholeskyFactorization();
+  initFromConfig ( conf, section );
+  this->kernelMatrix = kernelMatrix;
+  updateCholeskyFactorization();
 }
 
-KernelData::KernelData( const Config *conf, const string & section )
+KernelData::KernelData ( const Config *conf, const string & section )
 {
-	initFromConfig ( conf, section );
+  initFromConfig ( conf, section );
 }
 
 void KernelData::initFromConfig ( const Config *conf, const string & section )
 {
-	verbose = conf->gB(section, "verbose", false );
-	string inv_method = conf->gS(section, "robust_cholesky", "auto" );
-
-	double noiseStep = conf->gD(section, "rchol_noise_variance", 1e-7 );
-	double minimumLogDet = conf->gD(section, "rchol_minimum_logdet", - std::numeric_limits<double>::max() );
-	bool useCuda = conf->gB(section, "rchol_cuda", true );
-	if ( verbose && useCuda )
-		std::cerr << "KernelData: using the cuda implementation of cholesky decomposition (might be inaccurate)" << std::endl;
-
-	if ( inv_method == "auto" )
-	{
-		if ( verbose )
-			std::cerr << "KernelData: using the cholesky method with automatic regularization" << std::endl;
-		cr = new CholeskyRobustAuto ( verbose, noiseStep, minimumLogDet, useCuda );
-	} else {
-		if ( verbose )
-			std::cerr << "KernelData: using the cholesky method with static regularization" << std::endl;
-
-		cr = new CholeskyRobust ( verbose, noiseStep, useCuda );
-	}
+  verbose = conf->gB ( section, "verbose", false );
+  string inv_method = conf->gS ( section, "robust_cholesky", "auto" );
+
+  double noiseStep = conf->gD ( section, "rchol_noise_variance", 1e-7 );
+  double minimumLogDet = conf->gD ( section, "rchol_minimum_logdet", - std::numeric_limits<double>::max() );
+  bool useCuda = conf->gB ( section, "rchol_cuda", true );
+  if ( verbose && useCuda )
+    std::cerr << "KernelData: using the cuda implementation of cholesky decomposition (might be inaccurate)" << std::endl;
+
+  if ( inv_method == "auto" )
+  {
+    if ( verbose )
+      std::cerr << "KernelData: using the cholesky method with automatic regularization" << std::endl;
+    cr = new CholeskyRobustAuto ( verbose, noiseStep, minimumLogDet, useCuda );
+  } else {
+    if ( verbose )
+      std::cerr << "KernelData: using the cholesky method with static regularization" << std::endl;
+
+    cr = new CholeskyRobust ( verbose, noiseStep, useCuda );
+  }
 }
 
 KernelData::~KernelData()
 {
-	delete cr;
+  delete cr;
 }
 
 void KernelData::updateCholeskyFactorization ()
 {
-	if ( verbose )
-		std::cerr << "KernelData: kernel: " << kernelMatrix.rows() << " " << kernelMatrix.cols() << std::endl;
-
-	if ( (kernelMatrix.rows() <= 0) || (kernelMatrix.cols() <= 0) )
-		fthrow(Exception, "KernelData: no kernel matrix available !");
-
-	if ( kernelMatrix.containsNaN() ) 
-	{
-		if ( verbose )
-			std::cerr << "KernelData: kernel matrix contains NaNs (setting inverse to identity)" << std::endl;
-
-		logdet = numeric_limits<double>::max();
-
-		choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
-		choleskyMatrix.setIdentity();
-	} else {
-		if ( verbose )
-			std::cerr << "KernelData: calculating cholesky decomposition" << std::endl;
-
-		cr->robustChol ( kernelMatrix, choleskyMatrix );
-		logdet = cr->getLastLogDet();
-
-		if ( !finite(logdet) )
-		{
-			choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
-			choleskyMatrix.setIdentity();
-			logdet = numeric_limits<double>::max();
-		}
-	}
+  if ( verbose )
+    std::cerr << "KernelData: kernel: " << kernelMatrix.rows() << " " << kernelMatrix.cols() << std::endl;
+
+  if ( ( kernelMatrix.rows() <= 0 ) || ( kernelMatrix.cols() <= 0 ) )
+    fthrow ( Exception, "KernelData: no kernel matrix available !" );
+
+  if ( kernelMatrix.containsNaN() )
+  {
+    if ( verbose )
+      std::cerr << "KernelData: kernel matrix contains NaNs (setting inverse to identity)" << std::endl;
+
+    logdet = numeric_limits<double>::max();
+
+    choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
+    choleskyMatrix.setIdentity();
+  } else {
+    if ( verbose )
+      std::cerr << "KernelData: calculating cholesky decomposition" << std::endl;
+
+    cr->robustChol ( kernelMatrix, choleskyMatrix );
+    logdet = cr->getLastLogDet();
+
+    if ( !finite ( logdet ) )
+    {
+      choleskyMatrix.resize ( kernelMatrix.rows(), kernelMatrix.cols() );
+      choleskyMatrix.setIdentity();
+      logdet = numeric_limits<double>::max();
+    }
+  }
 }
 
 void KernelData::updateInverseKernelMatrix ()
 {
-	if ( ! hasCholeskyFactorization() )
-		updateCholeskyFactorization();
-	inverseKernelMatrix.resize ( choleskyMatrix.rows(), choleskyMatrix.cols() );
-	choleskyInvertLargeScale ( choleskyMatrix, inverseKernelMatrix );
+  if ( ! hasCholeskyFactorization() )
+    updateCholeskyFactorization();
+  inverseKernelMatrix.resize ( choleskyMatrix.rows(), choleskyMatrix.cols() );
+  choleskyInvertLargeScale ( choleskyMatrix, inverseKernelMatrix );
 }
 
-		
+
 void KernelData::computeInverseKernelMultiply ( const Vector & x, Vector & result ) const
 {
-	if ( choleskyMatrix.rows() == 0 )
-		fthrow(Exception, "Cholesky factorization was not initialized, use updateCholeskyFactorization() in advance");
-	choleskySolveLargeScale ( choleskyMatrix, x, result );
+  if ( choleskyMatrix.rows() == 0 )
+    fthrow ( Exception, "Cholesky factorization was not initialized, use updateCholeskyFactorization() in advance" );
+  choleskySolveLargeScale ( choleskyMatrix, x, result );
 }
 
-const NICE::Matrix & KernelData::getKernelMatrix() const 
+const NICE::Matrix & KernelData::getKernelMatrix() const
 {
-	return kernelMatrix; 
+  return kernelMatrix;
 }
 
-NICE::Matrix & KernelData::getKernelMatrix() 
-{ 
-	return kernelMatrix; 
+NICE::Matrix & KernelData::getKernelMatrix()
+{
+  return kernelMatrix;
 }
 
-const NICE::Matrix & KernelData::getInverseKernelMatrix() const 
-{ 
-	return inverseKernelMatrix; 
+const NICE::Matrix & KernelData::getInverseKernelMatrix() const
+{
+  return inverseKernelMatrix;
 };
 
 NICE::Matrix & KernelData::getInverseKernelMatrix()
-{ 
-	return inverseKernelMatrix; 
+{
+  return inverseKernelMatrix;
 };
 
 const NICE::Matrix & KernelData::getCholeskyMatrix() const
 {
-	return choleskyMatrix;
+  return choleskyMatrix;
 }
 
-const Matrix & KernelData::getCachedMatrix (int i) const
+const Matrix & KernelData::getCachedMatrix ( int i ) const
 {
-	map<int, NICE::Matrix *>::const_iterator it = cachedMatrices.find(i);
-	if ( it != cachedMatrices.end() )
-		return *(it->second);
-	else
-		fthrow(Exception, "Cached matrix with index " << i << " is not available.");
+  map<int, NICE::Matrix *>::const_iterator it = cachedMatrices.find ( i );
+  if ( it != cachedMatrices.end() )
+    return * ( it->second );
+  else
+    fthrow ( Exception, "Cached matrix with index " << i << " is not available." );
 }
-		
-void KernelData::setCachedMatrix (int i, Matrix *m)
+
+void KernelData::setCachedMatrix ( int i, Matrix *m )
 {
-	cachedMatrices[i] = m; 
+  cachedMatrices[i] = m;
 }
-		
-uint KernelData::getKernelMatrixSize () const { 
-	uint mysize = ( kernelMatrix.rows() == 0 ) ? (choleskyMatrix.rows()) : kernelMatrix.rows(); 
-	return mysize;
+
+uint KernelData::getKernelMatrixSize () const {
+  uint mysize = ( kernelMatrix.rows() == 0 ) ? ( choleskyMatrix.rows() ) : kernelMatrix.rows();
+  return mysize;
 };
-		
+
 void KernelData::getLooEstimates ( const Vector & y, Vector & muLoo, Vector & sigmaLoo ) const
 {
-	if ( inverseKernelMatrix.rows() != getKernelMatrixSize() )  
-		fthrow(Exception, "updateInverseKernelMatrix() has to be called in advance to use this function\n");
-	if ( y.size() != inverseKernelMatrix.rows() )
-		fthrow(Exception, "inverse kernel matrix does not fit to the size of the vector of function values y\n");
-	
-	Vector alpha;
-	computeInverseKernelMultiply ( y, alpha );
-	muLoo.resize ( y.size() );
-	sigmaLoo.resize ( y.size() );
-    for ( uint l = 0 ; l < y.size(); l++ )
-	{
-		 sigmaLoo[l] = 1.0 / inverseKernelMatrix(l,l);
-		 muLoo[l] = y[l] - alpha[l] * sigmaLoo[l];
-	}
+  if ( inverseKernelMatrix.rows() != getKernelMatrixSize() )
+    fthrow ( Exception, "updateInverseKernelMatrix() has to be called in advance to use this function\n" );
+  if ( y.size() != inverseKernelMatrix.rows() )
+    fthrow ( Exception, "inverse kernel matrix does not fit to the size of the vector of function values y\n" );
+
+  Vector alpha;
+  computeInverseKernelMultiply ( y, alpha );
+  muLoo.resize ( y.size() );
+  sigmaLoo.resize ( y.size() );
+  for ( uint l = 0 ; l < y.size(); l++ )
+  {
+    sigmaLoo[l] = 1.0 / inverseKernelMatrix ( l, l );
+    muLoo[l] = y[l] - alpha[l] * sigmaLoo[l];
+  }
 }
-	
-	
-KernelData *KernelData::clone(void) const
+
+
+KernelData *KernelData::clone ( void ) const
 {
-	return new KernelData( *this );
+  return new KernelData ( *this );
 }
 
-/** 
+/**
 * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3)
 * @author Alexander Lütz
 * @date 01/12/2010 (dd/mm/yyyy)
 */
-void KernelData::getGPLikelihoodWithOneNewRow(  const NICE::Vector & y, const double & oldLogdetK, const int & rowIndex, const NICE::Vector & newRow, const Vector & oldAlpha , Vector & newAlpha, double & loglike)
+void KernelData::getGPLikelihoodWithOneNewRow ( const NICE::Vector & y, const double & oldLogdetK, const int & rowIndex, const NICE::Vector & newRow, const Vector & oldAlpha , Vector & newAlpha, double & loglike )
 {
-	// oldAlpha = K^{-1} y
-
-	// K' new kernel matrix = exchange the row and column at position rowIndex
-	// with newRow
-	// try to find U and V such that K' = K + U*V with U,V having rank 2
-	
-	// rowIndex'th base vector 
-	Vector ei (y.size(), 0.0);
-	ei[rowIndex] = 1.0;
-
-	// we have to consider the diagonal entry
-	Vector a = newRow - kernelMatrix.getRow(rowIndex) ;
-	a[rowIndex] = a[rowIndex]/2;
-
-	NICE::Matrix U (y.size(),2);
-	NICE::Matrix V (2,y.size());
-	for (uint i = 0; i < y.size(); i++)
-	{
-		U(i,0) = a[i];
-		U(i,1) = ei[i];
-		V(0,i) = ei[i];
-		V(1,i) = a[i];
-	}
-
-	// Sherman Woodbury-Morrison Formula:
-	// alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
-	// K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
-	// = oldAlpha - B F^{-1} V oldAlpha
-	// with B = K^{-1} U and F = (I+VB)
-	
-	// Time complexity: 2 choleskySolve calls: O(n^2)
-
-	NICE::Vector B_1;
-	computeInverseKernelMultiply(U.getColumn(0),B_1);
-
-	NICE::Vector B_2;
-	computeInverseKernelMultiply(U.getColumn(1),B_2);
-
-	NICE::Matrix B(y.size(),2);
-
-	for (uint i = 0; i < y.size(); i++)
-	{
-		B(i,0) = B_1[i];
-		B(i,1) = B_2[i];
-	}
-	
-	NICE::Matrix F (2,2);
-	F.setIdentity();
-	Matrix V_B (2,2);
-	V_B.multiply(V,B);
-	F += V_B;
-
-	// Time complexity: 1 linear equation system with 2 variables
-	// can be computed in O(1) using a fixed implementation
-	NICE::Matrix F_inv = NICE::Matrix(2,2);
-	double denominator = F(0,0)*F(1,1)-F(0,1)*F(1,0);
-	F_inv(0,0) = F(1,1)/denominator;
-	F_inv(0,1) = -F(0,1)/denominator;
-	F_inv(1,0) = - F(1,0)/denominator;
-	F_inv(1,1) = F(0,0)/denominator;
-
-	Matrix M_oldAlpha (y.size(),1);
-	for (uint i = 0; i < y.size(); i++)
-	{
-		M_oldAlpha(i,0) = oldAlpha[i];
-	}
-	NICE::Matrix V_oldAlpha;
-	V_oldAlpha.multiply( V,M_oldAlpha);
-	NICE::Matrix F_inv_V_old_Alpha;
-	F_inv_V_old_Alpha.multiply(F_inv, V_oldAlpha);
-
-	NICE::Matrix M_newAlpha;
-	M_newAlpha.multiply(B, F_inv_V_old_Alpha);
-
-	M_newAlpha *= -1;
-	M_newAlpha += M_oldAlpha;
-
-	newAlpha = NICE::Vector(y.size());
-	for (uint i = 0; i < y.size(); i++)
-	{
-		newAlpha[i] = M_newAlpha(i,0);
-	}
-
-	// Matrix Determinant Lemma
-	// http://en.wikipedia.org/wiki/Matrix_determinant_lemma
-	// det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
-	// logdet(K + U*V) = logdet( F ) + logdet(K)
-	double logdetF = log(F(0,0) * F(1,1) - F(0,1) * F(1,0));
-
-	double newLogdetK = logdetF + oldLogdetK;
-
-	logdet = newLogdetK;
-
-	loglike = newLogdetK + newAlpha.scalarProduct(y);
+  // oldAlpha = K^{-1} y
+
+  // K' new kernel matrix = exchange the row and column at position rowIndex
+  // with newRow
+  // try to find U and V such that K' = K + U*V with U,V having rank 2
+
+  // rowIndex'th base vector
+  Vector ei ( y.size(), 0.0 );
+  ei[rowIndex] = 1.0;
+
+  // we have to consider the diagonal entry
+  Vector a = newRow - kernelMatrix.getRow ( rowIndex ) ;
+  a[rowIndex] = a[rowIndex] / 2;
+
+  NICE::Matrix U ( y.size(), 2 );
+  NICE::Matrix V ( 2, y.size() );
+  for ( uint i = 0; i < y.size(); i++ )
+  {
+    U ( i, 0 ) = a[i];
+    U ( i, 1 ) = ei[i];
+    V ( 0, i ) = ei[i];
+    V ( 1, i ) = a[i];
+  }
+
+  // Sherman Woodbury-Morrison Formula:
+  // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
+  // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
+  // = oldAlpha - B F^{-1} V oldAlpha
+  // with B = K^{-1} U and F = (I+VB)
+
+  // Time complexity: 2 choleskySolve calls: O(n^2)
+
+  NICE::Vector B_1;
+  computeInverseKernelMultiply ( U.getColumn ( 0 ), B_1 );
+
+  NICE::Vector B_2;
+  computeInverseKernelMultiply ( U.getColumn ( 1 ), B_2 );
+
+  NICE::Matrix B ( y.size(), 2 );
+
+  for ( uint i = 0; i < y.size(); i++ )
+  {
+    B ( i, 0 ) = B_1[i];
+    B ( i, 1 ) = B_2[i];
+  }
+
+  NICE::Matrix F ( 2, 2 );
+  F.setIdentity();
+  Matrix V_B ( 2, 2 );
+  V_B.multiply ( V, B );
+  F += V_B;
+
+  // Time complexity: 1 linear equation system with 2 variables
+  // can be computed in O(1) using a fixed implementation
+  NICE::Matrix F_inv = NICE::Matrix ( 2, 2 );
+  double denominator = F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 );
+  F_inv ( 0, 0 ) = F ( 1, 1 ) / denominator;
+  F_inv ( 0, 1 ) = -F ( 0, 1 ) / denominator;
+  F_inv ( 1, 0 ) = - F ( 1, 0 ) / denominator;
+  F_inv ( 1, 1 ) = F ( 0, 0 ) / denominator;
+
+  Matrix M_oldAlpha ( y.size(), 1 );
+  for ( uint i = 0; i < y.size(); i++ )
+  {
+    M_oldAlpha ( i, 0 ) = oldAlpha[i];
+  }
+  NICE::Matrix V_oldAlpha;
+  V_oldAlpha.multiply ( V, M_oldAlpha );
+  NICE::Matrix F_inv_V_old_Alpha;
+  F_inv_V_old_Alpha.multiply ( F_inv, V_oldAlpha );
+
+  NICE::Matrix M_newAlpha;
+  M_newAlpha.multiply ( B, F_inv_V_old_Alpha );
+
+  M_newAlpha *= -1;
+  M_newAlpha += M_oldAlpha;
+
+  newAlpha = NICE::Vector ( y.size() );
+  for ( uint i = 0; i < y.size(); i++ )
+  {
+    newAlpha[i] = M_newAlpha ( i, 0 );
+  }
+
+  // Matrix Determinant Lemma
+  // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
+  // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
+  // logdet(K + U*V) = logdet( F ) + logdet(K)
+  double logdetF = log ( F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 ) );
+
+  double newLogdetK = logdetF + oldLogdetK;
+
+  logdet = newLogdetK;
+
+  loglike = newLogdetK + newAlpha.scalarProduct ( y );
 }
 
-/** 
+/**
 * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the first part. Usefull for multiclass-problems.
 * @author Alexander Lütz
 * @date 01/12/2010 (dd/mm/yyyy)
 */
-void KernelData::getGPLikelihoodWithOneNewRow_FirstPart(const int & rowIndex, const NICE::Vector & newRow)
+void KernelData::getGPLikelihoodWithOneNewRow_FirstPart ( const int & rowIndex, const NICE::Vector & newRow )
 {
 
-	// oldAlpha = K^{-1} y
-
-	// K' new kernel matrix = exchange the row and column at position rowIndex
-	// with newRow
-	// try to find U and V such that K' = K + U*V with U,V having rank 2
-	
-	// rowIndex'th base vector 
-	Vector ei (newRow.size(), 0.0);
-	ei[rowIndex] = 1.0;
-
-	// we have to consider the diagonal entry
-	Vector a = newRow - kernelMatrix.getRow(rowIndex) ;
-	a[rowIndex] = a[rowIndex]/2;
-
-	U.resize(newRow.size(),2);
-	V.resize(2,newRow.size());
-// 	#pragma omp parallel for
-	for (uint i = 0; i < newRow.size(); i++)
-	{
-		U(i,0) = a[i];
-		U(i,1) = ei[i];
-		V(0,i) = ei[i];
-		V(1,i) = a[i];
-	}
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- U:"  << std::endl;
-		for ( uint ik = 0; ik < U.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < U.cols(); jk++)
-			{
-				std::cerr << U(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- V:"  << std::endl;
-		for ( uint ik = 0; ik < V.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < V.cols(); jk++)
-			{
-				std::cerr << V(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
-
-	// Sherman Woodbury-Morrison Formula:
-	// alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
-	// K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
-	// = oldAlpha - B F^{-1} V oldAlpha
-	// with B = K^{-1} U and F = (I+VB)
-	
-	// Time complexity: 2 choleskySolve calls: O(n^2)
-
-
-	NICE::Vector B_1;
-	computeInverseKernelMultiply(U.getColumn(0),B_1);
-
-	NICE::Vector B_2;
-	computeInverseKernelMultiply(U.getColumn(1),B_2);
-
-	B.resize(newRow.size(),2);
-
-	for (uint i = 0; i < newRow.size(); i++)
-	{
-		B(i,0) = B_1[i];
-		B(i,1) = B_2[i];
-	}
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- B:"  << std::endl;
-		for ( uint ik = 0; ik < B.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < B.cols(); jk++)
-			{
-				std::cerr << B(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
-
-	F.resize(2,2);
-	F.setIdentity();
-	Matrix V_B (2,2);
-	V_B.multiply(V,B);
-	F += V_B;
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F:"  << std::endl;
-		for ( uint ik = 0; ik < F.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < F.cols(); jk++)
-			{
-				std::cerr << F(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
-
-	// Time complexity: 1 linear equation system with 2 variables
-	// can be computed in O(1) using a fixed implementation
-	F_inv.resize(2,2);
-	double denominator = F(0,0)*F(1,1)-F(0,1)*F(1,0);
-	F_inv(0,0) = F(1,1)/denominator;
-	F_inv(0,1) = -F(0,1)/denominator;
-	F_inv(1,0) = - F(1,0)/denominator;
-	F_inv(1,1) = F(0,0)/denominator;
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F_inv:"  << std::endl;
-		for ( uint ik = 0; ik < F_inv.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < F_inv.cols(); jk++)
-			{
-				std::cerr << F_inv(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-		
-		NICE::Matrix MultiplicationResult( F_inv.cols(), F_inv.cols(), 0.0 );
-		MultiplicationResult.multiply(F,F_inv);
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F-inversion MultiplicationResult:"  << std::endl;
-		for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++)
-			{
-				std::cerr << MultiplicationResult(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
+  // oldAlpha = K^{-1} y
+
+  // K' new kernel matrix = exchange the row and column at position rowIndex
+  // with newRow
+  // try to find U and V such that K' = K + U*V with U,V having rank 2
+
+  // rowIndex'th base vector
+  Vector ei ( newRow.size(), 0.0 );
+  ei[rowIndex] = 1.0;
+
+  // we have to consider the diagonal entry
+  Vector a = newRow - kernelMatrix.getRow ( rowIndex ) ;
+  a[rowIndex] = a[rowIndex] / 2;
+
+  U.resize ( newRow.size(), 2 );
+  V.resize ( 2, newRow.size() );
+//  #pragma omp parallel for
+  for ( uint i = 0; i < newRow.size(); i++ )
+  {
+    U ( i, 0 ) = a[i];
+    U ( i, 1 ) = ei[i];
+    V ( 0, i ) = ei[i];
+    V ( 1, i ) = a[i];
+  }
+
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- U:"  << std::endl;
+    for ( uint ik = 0; ik < U.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < U.cols(); jk++ )
+      {
+        std::cerr << U ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- V:"  << std::endl;
+    for ( uint ik = 0; ik < V.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < V.cols(); jk++ )
+      {
+        std::cerr << V ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
+
+  // Sherman Woodbury-Morrison Formula:
+  // alpha_new = (K + UV)^{-1} y = K^{-1} y - K^{-1} U ( I + V
+  // K^{-1} U )^{-1} V K^{-1} y = oldAlpha - B ( I + V B )^{-1} V oldAlpha
+  // = oldAlpha - B F^{-1} V oldAlpha
+  // with B = K^{-1} U and F = (I+VB)
+
+  // Time complexity: 2 choleskySolve calls: O(n^2)
+
+
+  NICE::Vector B_1;
+  computeInverseKernelMultiply ( U.getColumn ( 0 ), B_1 );
+
+  NICE::Vector B_2;
+  computeInverseKernelMultiply ( U.getColumn ( 1 ), B_2 );
+
+  B.resize ( newRow.size(), 2 );
+
+  for ( uint i = 0; i < newRow.size(); i++ )
+  {
+    B ( i, 0 ) = B_1[i];
+    B ( i, 1 ) = B_2[i];
+  }
+
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- B:"  << std::endl;
+    for ( uint ik = 0; ik < B.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < B.cols(); jk++ )
+      {
+        std::cerr << B ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
+
+  F.resize ( 2, 2 );
+  F.setIdentity();
+  Matrix V_B ( 2, 2 );
+  V_B.multiply ( V, B );
+  F += V_B;
+
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F:"  << std::endl;
+    for ( uint ik = 0; ik < F.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < F.cols(); jk++ )
+      {
+        std::cerr << F ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
+
+  // Time complexity: 1 linear equation system with 2 variables
+  // can be computed in O(1) using a fixed implementation
+  F_inv.resize ( 2, 2 );
+  double denominator = F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 );
+  F_inv ( 0, 0 ) = F ( 1, 1 ) / denominator;
+  F_inv ( 0, 1 ) = -F ( 0, 1 ) / denominator;
+  F_inv ( 1, 0 ) = - F ( 1, 0 ) / denominator;
+  F_inv ( 1, 1 ) = F ( 0, 0 ) / denominator;
+  
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F_inv:"  << std::endl;
+    for ( uint ik = 0; ik < F_inv.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < F_inv.cols(); jk++ )
+      {
+        std::cerr << F_inv ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+
+    NICE::Matrix MultiplicationResult ( F_inv.cols(), F_inv.cols(), 0.0 );
+    MultiplicationResult.multiply ( F, F_inv );
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- F-inversion MultiplicationResult:"  << std::endl;
+    for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++ )
+      {
+        std::cerr << MultiplicationResult ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
 }
 
-/** 
+/**
 * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3). This is only the second part. Usefull for multiclass-problems.
 * @author Alexander Lütz
 * @date 01/12/2010 (dd/mm/yyyy)
 */
-void KernelData::getGPLikelihoodWithOneNewRow_SecondPart(  const NICE::Vector & y, const double & oldLogdetK, const Vector & oldAlpha , Vector & newAlpha, double & loglike)
+void KernelData::getGPLikelihoodWithOneNewRow_SecondPart ( const NICE::Vector & y, const double & oldLogdetK, const Vector & oldAlpha , Vector & newAlpha, double & loglike )
 {
-	Matrix M_oldAlpha (y.size(),1);
-	for (uint i = 0; i < y.size(); i++)
-	{
-		M_oldAlpha(i,0) = oldAlpha[i];
-	}
-	NICE::Matrix V_oldAlpha;
-	V_oldAlpha.multiply( V,M_oldAlpha);
-	NICE::Matrix F_inv_V_old_Alpha;
-	F_inv_V_old_Alpha.multiply(F_inv, V_oldAlpha);
-
-	NICE::Matrix M_newAlpha;
-	M_newAlpha.multiply(B, F_inv_V_old_Alpha);
-
-	M_newAlpha *= -1;
-	M_newAlpha += M_oldAlpha;
-
-	newAlpha = NICE::Vector(y.size());
-	for (uint i = 0; i < y.size(); i++)
-	{
-		newAlpha[i] = M_newAlpha(i,0);
-	}
-
-	// Matrix Determinant Lemma
-	// http://en.wikipedia.org/wiki/Matrix_determinant_lemma
-	// det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
-	// logdet(K + U*V) = logdet( F ) + logdet(K)
-	double logdetF = log(F(0,0) * F(1,1) - F(0,1) * F(1,0));
-
-	double newLogdetK = logdetF + oldLogdetK;
-
-	logdet = newLogdetK;
-
-	loglike = newLogdetK + newAlpha.scalarProduct(y);
+  Matrix M_oldAlpha ( y.size(), 1 );
+  for ( uint i = 0; i < y.size(); i++ )
+  {
+    M_oldAlpha ( i, 0 ) = oldAlpha[i];
+  }
+  NICE::Matrix V_oldAlpha;
+  V_oldAlpha.multiply ( V, M_oldAlpha );
+  NICE::Matrix F_inv_V_old_Alpha;
+  F_inv_V_old_Alpha.multiply ( F_inv, V_oldAlpha );
+
+  NICE::Matrix M_newAlpha;
+  M_newAlpha.multiply ( B, F_inv_V_old_Alpha );
+
+  M_newAlpha *= -1;
+  M_newAlpha += M_oldAlpha;
+
+  newAlpha = NICE::Vector ( y.size() );
+  for ( uint i = 0; i < y.size(); i++ )
+  {
+    newAlpha[i] = M_newAlpha ( i, 0 );
+  }
+
+  // Matrix Determinant Lemma
+  // http://en.wikipedia.org/wiki/Matrix_determinant_lemma
+  // det(K + U*V) = det(I + V * K^{-1} * U) * det(K)
+  // logdet(K + U*V) = logdet( F ) + logdet(K)
+  double logdetF = log ( F ( 0, 0 ) * F ( 1, 1 ) - F ( 0, 1 ) * F ( 1, 0 ) );
+
+  double newLogdetK = logdetF + oldLogdetK;
+
+  logdet = newLogdetK;
+
+  loglike = newLogdetK + newAlpha.scalarProduct ( y );
 }
 
 
-/** 
+/**
 * @brief Updates the GP-Likelihood, if only the i-th row and column has changed. Time is O(n^2) instaed O(n^3).
 * @author Alexander Lütz
 * @date 01/09/2011 (dd/mm/yyyy)
 */
-void KernelData::perform_Rank_2_Update(const int & rowIndex, const NICE::Vector & newRow)
+void KernelData::perform_Rank_2_Update ( const int & rowIndex, const NICE::Vector & newRow )
 {
-	getGPLikelihoodWithOneNewRow_FirstPart(rowIndex,newRow);
-	Matrix prod_1;
-	prod_1.multiply(V,inverseKernelMatrix);
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_1:"  << std::endl;
-		for ( uint ik = 0; ik < prod_1.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < prod_1.cols(); jk++)
-			{
-				std::cerr << prod_1(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
-	
-	Matrix prod_2;
-	prod_2.multiply(F_inv,prod_1);
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_2:"  << std::endl;
-		for ( uint ik = 0; ik < prod_2.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < prod_2.cols(); jk++)
-			{
-				std::cerr << prod_2(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
-	
-	Matrix prod_3;
-	prod_3.multiply(B,prod_2);
-	
-	if (verbose)
-	{
-		std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_3:"  << std::endl;
-		for ( uint ik = 0; ik < prod_3.rows(); ik++ )
-		{
-			for ( uint jk = 0; jk < prod_3.cols(); jk++)
-			{
-				std::cerr << prod_3(ik,jk) << " ";
-			}
-			std::cerr << std::endl;
-		}
-	}
-	inverseKernelMatrix = inverseKernelMatrix - prod_3;
-	
-	//correct the stored kernel matrix after our computations
-	for (uint i = 0; i < newRow.size(); i++)
-	{
-		kernelMatrix(i,rowIndex) = newRow[i];
-		kernelMatrix(rowIndex,i) = newRow[i];
-	}
+  getGPLikelihoodWithOneNewRow_FirstPart ( rowIndex, newRow );
+  Matrix prod_1;
+  prod_1.multiply ( V, inverseKernelMatrix );
+
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_1:"  << std::endl;
+    for ( uint ik = 0; ik < prod_1.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < prod_1.cols(); jk++ )
+      {
+        std::cerr << prod_1 ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
+
+  Matrix prod_2;
+  prod_2.multiply ( F_inv, prod_1 );
+
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_2:"  << std::endl;
+    for ( uint ik = 0; ik < prod_2.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < prod_2.cols(); jk++ )
+      {
+        std::cerr << prod_2 ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
+
+  Matrix prod_3;
+  prod_3.multiply ( B, prod_2 );
+
+  if ( verbose )
+  {
+    std::cerr << std::endl << "KernelData::perform_Rank_2_Update -- prod_3:"  << std::endl;
+    for ( uint ik = 0; ik < prod_3.rows(); ik++ )
+    {
+      for ( uint jk = 0; jk < prod_3.cols(); jk++ )
+      {
+        std::cerr << prod_3 ( ik, jk ) << " ";
+      }
+      std::cerr << std::endl;
+    }
+  }
+  inverseKernelMatrix = inverseKernelMatrix - prod_3;
+  
+//   std::cerr << "perform rank 2 update: inverseKernelMatrix: " << inverseKernelMatrix << std::endl;
+
+  //correct the stored kernel matrix after our computations
+  for ( uint i = 0; i < newRow.size(); i++ )
+  {
+    kernelMatrix ( i, rowIndex ) = newRow[i];
+    kernelMatrix ( rowIndex, i ) = newRow[i];
+  }
+//   std::cerr << "kernelMatrix: " << kernelMatrix << std::endl << " newRow: " << newRow << std::endl;
 }
 
-/** 
+/**
 * @brief Updates the GP-Likelihood, if only k rows and colums are changed. Time is O(k^3+n^2) instaed O(n^3). Alternatively it could also be done with iteratively change one row, which leads to O(k*n^2).
 * @author Alexander Lütz
 * @date 01/09/2011 (dd/mm/yyyy)
 */
-void KernelData::perform_Rank_2k_Update(const std::vector<int> & rowIndices, const std::vector<NICE::Vector> & newRows)
+void KernelData::perform_Rank_2k_Update ( const std::vector<int> & rowIndices, const std::vector<NICE::Vector> & newRows )
 {
-	if ( (rowIndices.size() != 0) && (rowIndices.size() == newRows.size()) )
-	{
-		std::vector<NICE::Vector> unity_vectors;
-		std::vector<NICE::Vector> diff_vectors;
-		for (uint j = 0; j < rowIndices.size(); j++)
-		{
-			NICE::Vector unity_vector(newRows[0].size(), 0.0);
-			unity_vector[rowIndices[j] ] = 1.0;
-			unity_vectors.push_back(unity_vector);
-			
-			NICE::Vector a = newRows[j] - kernelMatrix.getRow(rowIndices[j]);
-			for (uint x = 0; x < rowIndices.size(); x++)
-			{
-				a[rowIndices[x] ] /= 2.0;
-			}
-			diff_vectors.push_back(a);
-		}
-
-		U.resize(newRows[0].size(),2*rowIndices.size());
-		V.resize(2*rowIndices.size(),newRows[0].size());
-		
-		for (uint i = 0; i < newRows[0].size(); i++)
-		{
-			for (uint j = 0; j < rowIndices.size(); j++)
-			{
-				U(i,rowIndices.size()+j) = (unity_vectors[j])[i];
-				U(i,j) = (diff_vectors[j])[i];
-				
-				V(rowIndices.size()+j,i) = (diff_vectors[j])[i];
-				V(j,i) = (unity_vectors[j])[i];
-			}
-		}
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- U:"  << std::endl;
-			for ( uint ik = 0; ik < U.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < U.cols(); jk++)
-				{
-					std::cerr << U(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- V:"  << std::endl;
-			for ( uint ik = 0; ik < V.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < V.cols(); jk++)
-				{
-					std::cerr << V(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		
-		NICE::Matrix UV(newRows[0].size(),newRows[0].size());
-		UV.multiply(U,V);
-	
-		if (verbose)
-		{
-			// we have to consider the entries which are added twice
-			for (int x = 0; x < (int)rowIndices.size(); x++)
-			{
-				for (int y = x; y < (int)rowIndices.size(); y++)
-				{
-					UV(rowIndices[x] , rowIndices[y]) /= 2.0;
-					if (x!=y)
-						UV(rowIndices[y] , rowIndices[x]) /= 2.0;
-				}
-			}
-		}
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- UV:"  << std::endl;
-			for ( uint ik = 0; ik < UV.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < UV.cols(); jk++)
-				{
-					std::cerr << UV(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		
-		
-		B.resize(newRows[0].size(),2*rowIndices.size());
-		for (uint j = 0; j < 2*rowIndices.size(); j++)
-		{
-			NICE::Vector B_row;
-			computeInverseKernelMultiply(U.getColumn(j),B_row);
-			for (uint i = 0; i < newRows[0].size(); i++)
-			{
-				B(i,j) = B_row[i];
-			}
-		}
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- B:"  << std::endl;
-			for ( uint ik = 0; ik < B.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < B.cols(); jk++)
-				{
-					std::cerr << B(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		
-		F.resize(2*rowIndices.size(),2*rowIndices.size());
-		F.setIdentity();
-		Matrix V_B (2*rowIndices.size(),2*rowIndices.size());
-		V_B.multiply(V,B);
-		F += V_B;
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F:"  << std::endl;
-			for ( uint ik = 0; ik < F.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < F.cols(); jk++)
-				{
-					std::cerr << F(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-
-		//invert F!
-		F_inv = invert(F);
-
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F_inv:"  << std::endl;
-			for ( uint ik = 0; ik < F_inv.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < F_inv.cols(); jk++)
-				{
-					std::cerr << F_inv(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		
-		NICE::Matrix MultiplicationResult( F.rows(), F_inv.cols(), 0.0 );
-		MultiplicationResult.multiply(F,F_inv);
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F-inversion MultiplicationResult:"  << std::endl;
-			for ( uint ik = 0; ik < MultiplicationResult.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < MultiplicationResult.cols(); jk++)
-				{
-					std::cerr << MultiplicationResult(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		
-		Matrix prod_1;
-		prod_1.multiply(V,inverseKernelMatrix);
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_1:"  << std::endl;
-			for ( uint ik = 0; ik < prod_1.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < prod_1.cols(); jk++)
-				{
-					std::cerr << prod_1(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		
-		Matrix prod_2;
-		prod_2.multiply(F_inv,prod_1);
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_2:"  << std::endl;
-			for ( uint ik = 0; ik < prod_2.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < prod_2.cols(); jk++)
-				{
-					std::cerr << prod_2(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-
-		Matrix prod_3;
-		prod_3.multiply(B,prod_2);
-		
-		if (verbose)
-		{
-			std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_3:"  << std::endl;
-			for ( uint ik = 0; ik < prod_3.rows(); ik++ )
-			{
-				for ( uint jk = 0; jk < prod_3.cols(); jk++)
-				{
-					std::cerr << prod_3(ik,jk) << " ";
-				}
-				std::cerr << std::endl;
-			}
-		}
-		inverseKernelMatrix = inverseKernelMatrix - prod_3;
-		
-		//remember the new kernel entries for the next time
-		for (uint i = 0; i < rowIndices.size(); i++)
-		{
-			for ( uint ik = 0; ik < kernelMatrix.rows(); ik++ )
-			{
-				kernelMatrix(ik,rowIndices[i]) = (newRows[i])[ik];
-				kernelMatrix(rowIndices[i],ik) = (newRows[i])[ik];
-			}
-		}
-	}
-	else
-	{
-		std::cerr << "Failure" << std::endl;
-	}
+  if ( ( rowIndices.size() != 0 ) && ( rowIndices.size() == newRows.size() ) )
+  {
+    std::vector<NICE::Vector> unity_vectors;
+    std::vector<NICE::Vector> diff_vectors;
+    for ( uint j = 0; j < rowIndices.size(); j++ )
+    {
+      NICE::Vector unity_vector ( newRows[0].size(), 0.0 );
+      unity_vector[rowIndices[j] ] = 1.0;
+      unity_vectors.push_back ( unity_vector );
+
+      NICE::Vector a = newRows[j] - kernelMatrix.getRow ( rowIndices[j] );
+      for ( uint x = 0; x < rowIndices.size(); x++ )
+      {
+        a[rowIndices[x] ] /= 2.0;
+      }
+      diff_vectors.push_back ( a );
+    }
+
+    U.resize ( newRows[0].size(), 2*rowIndices.size() );
+    V.resize ( 2*rowIndices.size(), newRows[0].size() );
+
+    for ( uint i = 0; i < newRows[0].size(); i++ )
+    {
+      for ( uint j = 0; j < rowIndices.size(); j++ )
+      {
+        U ( i, rowIndices.size() + j ) = ( unity_vectors[j] ) [i];
+        U ( i, j ) = ( diff_vectors[j] ) [i];
+
+        V ( rowIndices.size() + j, i ) = ( diff_vectors[j] ) [i];
+        V ( j, i ) = ( unity_vectors[j] ) [i];
+      }
+    }
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- U: " << U << std::endl;
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- V: " << V  << std::endl;
+    }
+
+    NICE::Matrix UV ( newRows[0].size(), newRows[0].size() );
+    UV.multiply ( U, V );
+
+    if ( verbose )
+    {
+      // we have to consider the entries which are added twice
+      for ( int x = 0; x < ( int ) rowIndices.size(); x++ )
+      {
+        for ( int y = x; y < ( int ) rowIndices.size(); y++ )
+        {
+          UV ( rowIndices[x] , rowIndices[y] ) /= 2.0;
+          if ( x != y )
+            UV ( rowIndices[y] , rowIndices[x] ) /= 2.0;
+        }
+      }
+    }
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- UV:"  << UV << std::endl;
+    }
+
+
+    B.resize ( newRows[0].size(), 2*rowIndices.size() );
+    for ( uint j = 0; j < 2*rowIndices.size(); j++ )
+    {
+      NICE::Vector B_row;
+      computeInverseKernelMultiply ( U.getColumn ( j ), B_row );
+      for ( uint i = 0; i < newRows[0].size(); i++ )
+      {
+        B ( i, j ) = B_row[i];
+      }
+    }
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- B:"  << B << std::endl;
+    }
+
+    F.resize ( 2*rowIndices.size(), 2*rowIndices.size() );
+    F.setIdentity();
+    Matrix V_B ( 2*rowIndices.size(), 2*rowIndices.size() );
+    V_B.multiply ( V, B );
+    F += V_B;
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F:"  << F << std::endl;
+    }
+
+    //invert F!
+    //we can't rely on methods like cholesky decomposition, since F has not to be positive definite
+    F_inv = invert ( F );
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F_inv:"  << F_inv <<std::endl;
+    }
+
+    if ( verbose )
+    {
+      NICE::Matrix MultiplicationResult ( F.rows(), F_inv.cols(), 0.0 );
+      MultiplicationResult.multiply ( F, F_inv );  
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- F-inversion MultiplicationResult:"  << MultiplicationResult << std::endl;
+    }
+
+    Matrix prod_1;  
+    prod_1.multiply ( V, inverseKernelMatrix );
+    std::cerr << "prod_1: " << prod_1.rows() << " x " << prod_1.cols() << std::endl;
+    
+    std::cerr << "v and inverse matrix multiplied" << std::endl;
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_1:"  << prod_1 << std::endl;
+    }
+
+    Matrix prod_2;   
+    prod_2.resize(F.rows(), prod_1.cols());
+          
+    prod_2.multiply ( F_inv, prod_1 );
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_2:"  << prod_2 << std::endl;
+    }
+
+    std::cerr << "B: " << B.rows() << " x " << B.cols() << std::endl;
+    Matrix prod_3;
+    prod_3.multiply ( B, prod_2 );
+    
+    std::cerr << "prod_3 created: " << prod_3.rows() << " x " << prod_3.cols() << std::endl;
+
+    if ( verbose )
+    {
+      std::cerr << std::endl << "KernelData::perform_Rank_2k_Update -- prod_3:"  << prod_3 << std::endl;
+    }
+    inverseKernelMatrix = inverseKernelMatrix - prod_3;
+
+    //remember the new kernel entries for the next time
+    for ( uint i = 0; i < rowIndices.size(); i++ )
+    {
+      for ( uint ik = 0; ik < kernelMatrix.rows(); ik++ )
+      {
+        kernelMatrix ( ik, rowIndices[i] ) = ( newRows[i] ) [ik];
+        kernelMatrix ( rowIndices[i], ik ) = ( newRows[i] ) [ik];
+      }
+    }
+  }
+  else
+  {
+    std::cerr << "Failure" << std::endl;
+  }
 }
 
-void KernelData::delete_one_row(const int & rowIndex)
+void KernelData::delete_one_row ( const int & rowIndex )
 {
-	if ( (inverseKernelMatrix.rows() != 0) && (inverseKernelMatrix.cols() != 0)) 
-	{
-		inverseKernelMatrix.deleteCol(rowIndex);
-		inverseKernelMatrix.deleteRow(rowIndex);
-	}
-
-	if ( (choleskyMatrix.rows() != 0) && (choleskyMatrix.cols() != 0)) 
-	{
-		choleskyMatrix.deleteCol(rowIndex);
-		choleskyMatrix.deleteRow(rowIndex);
-	}
-	
-	if ( (kernelMatrix.rows() != 0) && (kernelMatrix.cols() != 0)) 
-	{
-		kernelMatrix.deleteCol(rowIndex);
-		kernelMatrix.deleteRow(rowIndex);
-	}
+  if ( ( inverseKernelMatrix.rows() != 0 ) && ( inverseKernelMatrix.cols() != 0 ) )
+  {
+    inverseKernelMatrix.deleteCol ( rowIndex );
+    inverseKernelMatrix.deleteRow ( rowIndex );
+  }
+
+  if ( ( choleskyMatrix.rows() != 0 ) && ( choleskyMatrix.cols() != 0 ) )
+  {
+    choleskyMatrix.deleteCol ( rowIndex );
+    choleskyMatrix.deleteRow ( rowIndex );
+  }
+
+  if ( ( kernelMatrix.rows() != 0 ) && ( kernelMatrix.cols() != 0 ) )
+  {
+    kernelMatrix.deleteCol ( rowIndex );
+    kernelMatrix.deleteRow ( rowIndex );
+  }
 }
 
-void KernelData::delete_multiple_rows(std::vector<int> & indices)
+void KernelData::delete_multiple_rows ( std::vector<int> & indices )
 {
-	if ( (inverseKernelMatrix.rows() >= indices.size()) && (inverseKernelMatrix.cols() >= indices.size())) 
-	{
-		inverseKernelMatrix.deleteCols(indices);
-		inverseKernelMatrix.deleteRows(indices);
-	}
-
-	if ( (choleskyMatrix.rows() >= indices.size()) && (choleskyMatrix.cols() >= indices.size())) 
-	{
-		choleskyMatrix.deleteCols(indices);
-		choleskyMatrix.deleteRows(indices);
-	}
-	
-	if ( (kernelMatrix.rows() >= indices.size()) && (kernelMatrix.cols() >= indices.size())) 
-	{
-		kernelMatrix.deleteCols(indices);
-		kernelMatrix.deleteRows(indices);
-	}
+  if ( ( inverseKernelMatrix.rows() >= indices.size() ) && ( inverseKernelMatrix.cols() >= indices.size() ) )
+  {
+    inverseKernelMatrix.deleteCols ( indices );
+    inverseKernelMatrix.deleteRows ( indices );
+  }
+
+  if ( ( choleskyMatrix.rows() >= indices.size() ) && ( choleskyMatrix.cols() >= indices.size() ) )
+  {
+    choleskyMatrix.deleteCols ( indices );
+    choleskyMatrix.deleteRows ( indices );
+  }
+
+  if ( ( kernelMatrix.rows() >= indices.size() ) && ( kernelMatrix.cols() >= indices.size() ) )
+  {
+    kernelMatrix.deleteCols ( indices );
+    kernelMatrix.deleteRows ( indices );
+  }
 }
 
-void KernelData::setKernelMatrix(const NICE::Matrix & k_matrix)
+void KernelData::setKernelMatrix ( const NICE::Matrix & k_matrix )
 {
-	kernelMatrix = k_matrix;
+  kernelMatrix = k_matrix;
 }
 
 void KernelData::increase_size_by_One()
 {
-	NICE::Matrix new_Kernel(kernelMatrix.rows()+1, kernelMatrix.cols()+1);
-	new_Kernel.setBlock(0, 0, kernelMatrix);
-	for (uint i = 0; i < kernelMatrix.rows()-1; i++)
-	{
-		new_Kernel(i,kernelMatrix.cols()) = 0.0;
-		new_Kernel(kernelMatrix.rows(),i) = 0.0;
-	}
-	new_Kernel(kernelMatrix.rows(),kernelMatrix.cols()) = 1.0;
-	//NOTE Maybe it would be more efficient to work directly with pointers to the memory
-	kernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
-	kernelMatrix = new_Kernel;
-	
-	new_Kernel.setBlock(0, 0, inverseKernelMatrix);
-	//NOTE Maybe it would be more efficient to work directly with pointers to the memory
-	inverseKernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
-	inverseKernelMatrix = new_Kernel;
-
-	new_Kernel.setBlock(0, 0, choleskyMatrix);
-	//NOTE Maybe it would be more efficient to work directly with pointers to the memory
-	choleskyMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
-	choleskyMatrix = new_Kernel;
+  NICE::Matrix new_Kernel ( kernelMatrix.rows() + 1, kernelMatrix.cols() + 1 );
+  new_Kernel.setBlock ( 0, 0, kernelMatrix );
+  for ( uint i = 0; i < kernelMatrix.rows() - 1; i++ )
+  {
+    new_Kernel ( i, kernelMatrix.cols() ) = 0.0;
+    new_Kernel ( kernelMatrix.rows(), i ) = 0.0;
+  }
+  new_Kernel ( kernelMatrix.rows(), kernelMatrix.cols() ) = 1.0;
+  //NOTE Maybe it would be more efficient to work directly with pointers to the memory
+  kernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
+  kernelMatrix = new_Kernel;
+
+  new_Kernel.setBlock ( 0, 0, inverseKernelMatrix );
+  //NOTE Maybe it would be more efficient to work directly with pointers to the memory
+  inverseKernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
+  inverseKernelMatrix = new_Kernel;
+
+  new_Kernel.setBlock ( 0, 0, choleskyMatrix );
+  //NOTE Maybe it would be more efficient to work directly with pointers to the memory
+  choleskyMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
+  choleskyMatrix = new_Kernel;
 }
 
-void KernelData::increase_size_by_k(const uint & k)
+void KernelData::increase_size_by_k ( const uint & k )
 {
-	NICE::Matrix new_Kernel(kernelMatrix.rows()+k, kernelMatrix.cols()+k);
-	new_Kernel.setBlock(0, 0, kernelMatrix);
-	for (uint i = 0; i < kernelMatrix.rows()-1; i++)
-	{
-		for (uint j = 0; j < k; j++)
-		{
-			new_Kernel(i,kernelMatrix.cols()+j) = 0.0;
-			new_Kernel(kernelMatrix.rows()+j,i) = 0.0;
-		}
-	}
-	for (uint j = 0; j < k; j++)
-	{
-		new_Kernel(kernelMatrix.rows()+j,kernelMatrix.cols()+j) = 1.0;
-	}
-	//NOTE Maybe it would be more efficient to work directly with pointers to the memory
-	kernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
-	kernelMatrix = new_Kernel;
-	
-	new_Kernel.setBlock(0, 0, inverseKernelMatrix);
-	//NOTE Maybe it would be more efficient to work directly with pointers to the memory
-	inverseKernelMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
-	inverseKernelMatrix = new_Kernel;
-
-	new_Kernel.setBlock(0, 0, choleskyMatrix);
-	//NOTE Maybe it would be more efficient to work directly with pointers to the memory
-	choleskyMatrix.resize(new_Kernel.rows(), new_Kernel.cols());
-	choleskyMatrix = new_Kernel;
-}
+  NICE::Matrix new_Kernel ( kernelMatrix.rows() + k, kernelMatrix.cols() + k );
+  new_Kernel.setBlock ( 0, 0, kernelMatrix );
+  for ( uint i = 0; i < kernelMatrix.rows() - 1; i++ )
+  {
+    for ( uint j = 0; j < k; j++ )
+    {
+      new_Kernel ( i, kernelMatrix.cols() + j ) = 0.0;
+      new_Kernel ( kernelMatrix.rows() + j, i ) = 0.0;
+    }
+  }
+  for ( uint j = 0; j < k; j++ )
+  {
+    new_Kernel ( kernelMatrix.rows() + j, kernelMatrix.cols() + j ) = 1.0;
+  }
+  //NOTE Maybe it would be more efficient to work directly with pointers to the memory
+  kernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
+  kernelMatrix = new_Kernel;
+
+  new_Kernel.setBlock ( 0, 0, inverseKernelMatrix );
+  //NOTE Maybe it would be more efficient to work directly with pointers to the memory
+  inverseKernelMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
+  inverseKernelMatrix = new_Kernel;
+
+  new_Kernel.setBlock ( 0, 0, choleskyMatrix );
+  //NOTE Maybe it would be more efficient to work directly with pointers to the memory
+  choleskyMatrix.resize ( new_Kernel.rows(), new_Kernel.cols() );
+  choleskyMatrix = new_Kernel;
+}

+ 54 - 54
math/pdf/tests/TestPDF.cpp

@@ -25,35 +25,35 @@
 #include "core/vector/VVector.h"
 #include "vislearning/math/pdf/PDFGaussian.h"
 
-#include "vislearning/nice_nonvis.h"
+#include "objrec/nice_nonvis.h"
 
 using namespace std;
 using namespace NICE;
 using namespace OBJREC;
 
-Matrix computeCovariance(const VVector & vecs)
+Matrix computeCovariance ( const VVector & vecs )
 {
-	Vector mean(vecs[0].size(), 0.0);
+  Vector mean ( vecs[0].size(), 0.0 );
 
-    for (unsigned int i = 0; i < vecs.size(); ++i)
-		mean += vecs[i];
-    
-	mean *= 1.0 / vecs.size();
+  for ( unsigned int i = 0; i < vecs.size(); ++i )
+    mean += vecs[i];
 
-    Matrix cov(vecs[0].size(), vecs[0].size(), 0.0);
+  mean *= 1.0 / vecs.size();
 
-    for (unsigned int i = 0; i < vecs.size(); ++i)
-    {
-        Vector diff = vecs[i] - mean;
-        cov.addTensorProduct ( 1.0, diff, diff );
-    }
+  Matrix cov ( vecs[0].size(), vecs[0].size(), 0.0 );
+
+  for ( unsigned int i = 0; i < vecs.size(); ++i )
+  {
+    Vector diff = vecs[i] - mean;
+    cov.addTensorProduct ( 1.0, diff, diff );
+  }
 
-    cov *= 1.0 / ( vecs.size() );
+  cov *= 1.0 / ( vecs.size() );
 
-    return cov;
+  return cov;
 }
 
-CPPUNIT_TEST_SUITE_REGISTRATION(TestPDF);
+CPPUNIT_TEST_SUITE_REGISTRATION ( TestPDF );
 
 
 
@@ -67,42 +67,42 @@ void TestPDF::tearDown()
 
 void TestPDF::TestPDFComputation()
 {
-    uint dim = 3;
-    bool init_random = true ;
-    uint samples = dim*500;
-
-    NICE::Matrix C(dim, dim);
-    NICE::Vector mean(dim, 0.0);
-    VVector samplevectors;
-    C.set(0.0);
-	cerr << "Sample from Gaussian" << endl;
-    //init random
-    if (init_random)
-        srand48(time(NULL));
-
-    // generate random symmetric matrix
-    for (uint i = 0 ; i < dim ; i++)
-        for (uint j = i ; j < dim ; j++)
-        {
-            C(i, j) = drand48();
-            C(j, i) = C(i, j);
-        }
-	C=C*C.transpose();
-    cerr << "Ground-Truth covariance" << endl;
-    cerr << C << endl;
-    //initialize GaussPDF
-    PDFGaussian pdf_gauss(C, mean);
-    
-	//draw samples
-    pdf_gauss.sample(samplevectors, samples);
-    Matrix Cov_test = computeCovariance(samplevectors);
-    cerr << "Estimated covariance" << endl;
-    cerr<<Cov_test<<endl;
-        
-	NICE::Matrix diff = C-Cov_test;
-	double frobNorm = diff.frobeniusNorm();
-
-	cerr << "Frobenius norm: " << frobNorm << endl;
-	CPPUNIT_ASSERT_DOUBLES_EQUAL_NOT_NAN(0.0, frobNorm, 0.1);
-     
+  uint dim = 3;
+  bool init_random = true ;
+  uint samples = dim * 500;
+
+  NICE::Matrix C ( dim, dim );
+  NICE::Vector mean ( dim, 0.0 );
+  VVector samplevectors;
+  C.set ( 0.0 );
+  cerr << "Sample from Gaussian" << endl;
+  //init random
+  if ( init_random )
+    srand48 ( time ( NULL ) );
+
+  // generate random symmetric matrix
+  for ( uint i = 0 ; i < dim ; i++ )
+    for ( uint j = i ; j < dim ; j++ )
+    {
+      C ( i, j ) = drand48();
+      C ( j, i ) = C ( i, j );
+    }
+  C = C * C.transpose();
+  cerr << "Ground-Truth covariance" << endl;
+  cerr << C << endl;
+  //initialize GaussPDF
+  PDFGaussian pdf_gauss ( C, mean );
+
+  //draw samples
+  pdf_gauss.sample ( samplevectors, samples );
+  Matrix Cov_test = computeCovariance ( samplevectors );
+  cerr << "Estimated covariance" << endl;
+  cerr << Cov_test << endl;
+
+  NICE::Matrix diff = C - Cov_test;
+  double frobNorm = diff.frobeniusNorm();
+
+  cerr << "Frobenius norm: " << frobNorm << endl;
+  CPPUNIT_ASSERT_DOUBLES_EQUAL_NOT_NAN ( 0.0, frobNorm, 0.1 );
+
 }

+ 27 - 0
progs/ImagenetBinary.conf

@@ -0,0 +1,27 @@
+[main]
+# whether to use eriks folder (only works on dionysos)
+imageNetLocal = false
+
+#GP variance approximation
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+#GP variance
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+#GP mean approximation
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+#GP mean
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+#Parzen
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+#SVDD
+sigmaGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarSigma.txt
+noiseGPVarApproxFile = /home/luetz/code/nice/vislearning/progs/approxVarNoise.txt
+
+indexOfFirstClass = 0
+indexOfLastClass = 999
+
+nrOfExamplesPerClass = 50

+ 9 - 0
progs/libdepend.inc

@@ -0,0 +1,9 @@
+$(call PKG_DEPEND_INT,core/basics)
+$(call PKG_DEPEND_INT,core/algebra)
+$(call PKG_DEPEND_INT,vislearning/math)
+$(call PKG_DEPEND_INT,vislearning/baselib)
+$(call PKG_DEPEND_INT,vislearning/cbaselib)
+$(call PKG_DEPEND_INT,vislearning/classifier/kernelclassifier)
+$(call PKG_DEPEND_INT,fast-hik)
+
+

+ 149 - 0
progs/testImageNetBinary.cpp

@@ -0,0 +1,149 @@
+/** 
+* @file testImageNetBinary.cpp
+* @brief perform ImageNet tests with binary tasks for OCC
+* @author Alexander Lütz
+* @date 23-05-2012 (dd-mm-yyyy)
+
+*/
+#include "core/basics/Config.h"
+
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include "vislearning/baselib/ProgressBar.h"
+
+#include "fast-hik/tools.h"
+#include "fast-hik/MatFileIO.h"
+#include "fast-hik/ImageNetData.h"
+
+#include "vislearning/classifier/kernelclassifier/KCGPOneClass.h"
+#include "vislearning/classifier/kernelclassifier/KCGPApproxOneClass.h"
+
+
+#include "vislearning/math/kernels/KernelData.h"
+#include "vislearning/math/kernels/Kernel.h"
+#include "vislearning/math/kernels/KernelRBF.h"
+#include "vislearning/math/kernels/KernelExp.h"
+
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  int positiveClass = conf.gI("main", "positive_class");
+
+  std::cerr << "Positive class is " << positiveClass << std::endl;
+  
+  sparse_t data;
+  NICE::Vector y;
+  
+  std::cerr << "Reading ImageNet data ..." << std::endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNet ( imageNetPath + "demo/" );
+
+//   imageNet.getBatchData ( data, y, "train", "training" );
+  LabeledSetVector train;
+  imageNet.loadDataAsLabeledSetVector( train );
+  
+  //set up the kernel function
+  double rbf_sigma = conf.gD("main", "rbf_sigma", -2.0 );
+  KernelRBF kernelFunction ( rbf_sigma, 0.0 );
+    //KernelExp kernelFunction ( rbf_sigma, 0.0, 0.0 );
+
+  //set up our OC-classifier
+  string classifierName = conf.gS("main", "classifier", "KCGPApproxOneClass");
+  
+  KernelClassifier *classifier;
+  if(strcmp("KCGPApproxOneClass",classifierName.c_str())==0)
+  {
+    classifier = new KCGPApproxOneClass ( &conf, &kernelFunction );
+  }
+  else if (strcmp("KCGPOneClass",classifierName.c_str())==0) {
+    classifier = new KCGPOneClass ( &conf, &kernelFunction );
+  }
+  else{ //default
+    classifier = new KCGPApproxOneClass ( &conf, &kernelFunction );
+  }
+  //and perform the training
+  classifier->teach( train );    
+
+//   uint n = y.size();
+//   
+//   set<int> positives;
+//   set<int> negatives;
+// 
+//   map< int, set<int> > mysets;
+//   for ( uint i = 0 ; i < n; i++ )
+//     mysets[ y[i] ].insert ( i );
+// 
+//   if ( mysets[ positiveClass ].size() == 0 ) 
+//     fthrow(Exception, "Class " << positiveClass << " is not available.");
+// 
+//   // add our positive examples
+//   for ( set<int>::const_iterator i = mysets[positiveClass].begin(); i != mysets[positiveClass].end(); i++ )
+//     positives.insert ( *i );
+// 
+//   int Nneg = conf.gI("main", "nneg", 1 );
+//   for ( map<int, set<int> >::const_iterator k = mysets.begin(); k != mysets.end(); k++ )
+//   {
+//     int classno = k->first;
+//     if ( classno == positiveClass )
+//       continue;
+//     const set<int> & s = k->second;
+//     uint ind = 0;
+//     for ( set<int>::const_iterator i = s.begin(); (i != s.end() && ind < Nneg); i++,ind++  )
+//       negatives.insert ( *i );
+//   }
+//   std::cerr << "Number of positive examples: " << positives.size() << std::endl;
+//   std::cerr << "Number of negative examples: " << negatives.size() << std::endl;
+
+  // ------------------------------ TESTING ------------------------------
+ 
+  std::cerr << "Reading ImageNet test data files (takes some seconds)..." << std::endl;
+  imageNet.preloadData ( "val", "testing" );
+  imageNet.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );
+ 
+  ClassificationResults results;
+  std::cerr << "Classification step ... with " << imageNet.getNumPreloadedExamples() << " examples" << std::endl;
+  ProgressBar pb;
+  for ( uint i = 0 ; i < (uint)imageNet.getNumPreloadedExamples(); i++ )
+  {
+    pb.update ( imageNet.getNumPreloadedExamples() );
+
+    const SparseVector & svec = imageNet.getPreloadedExample ( i );
+    NICE::Vector vec;
+    svec.convertToVectorT( vec );
+
+    // classification step
+    ClassificationResult r = classifier->classify ( vec );
+    
+    // set ground truth label
+    r.classno_groundtruth = (((int)imageNet.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+    results.push_back ( r );
+  }
+
+  std::cerr << "Writing results to " << resultsfile << std::endl;
+  results.writeWEKA ( resultsfile, 0 );
+  double perfvalue = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+
+  std::cerr << "Performance: " << perfvalue << std::endl;
+  
+  //don't waste memory
+  delete classifier;
+  
+  return 0;
+}

+ 733 - 0
progs/testImageNetBinaryBruteForce.cpp

@@ -0,0 +1,733 @@
+/** 
+* @file testImageNetBinaryBruteForce.cpp
+* @brief perform ImageNet tests with binary tasks for OCC using GP mean and variance, sophisticated approximations of both, Parzen Density Estimation and SVDD
+* @author Alexander Lütz
+* @date 23-05-2012 (dd-mm-yyyy)
+*/
+
+#include <ctime>
+#include <time.h>
+
+#include "core/basics/Config.h"
+#include "core/basics/Timer.h"
+#include "core/algebra/CholeskyRobust.h"
+#include "core/vector/Algorithms.h"
+#include "core/vector/SparseVectorT.h"
+
+
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include "vislearning/baselib/ProgressBar.h"
+
+#include "fast-hik/tools.h"
+#include "fast-hik/MatFileIO.h"
+#include "fast-hik/ImageNetData.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+// --------------- THE KERNEL FUNCTION ( exponential kernel with euclidian distance ) ----------------------
+double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)//, const bool & verbose = false)
+{
+  double inner_sum(0.0);
+
+  double d;      
+  
+  //new version, where we needed on average 0.001707 s for each test sample
+  NICE::SparseVector::const_iterator aIt = a.begin();
+  NICE::SparseVector::const_iterator bIt = b.begin();
+   
+  while ( (aIt != a.end()) && (bIt != b.end()) )
+  {
+    if (aIt->first == bIt->first)
+    {
+      d = ( aIt->second - bIt->second );      
+      inner_sum += d * d;
+      aIt++;
+      bIt++;
+    }
+    else if ( aIt->first < bIt->first)
+    {
+      inner_sum += aIt->second * aIt->second;
+      aIt++;      
+    }
+    else
+    {
+      inner_sum += bIt->second * bIt->second;
+      bIt++;       
+    }
+  }
+  
+  //compute remaining values, if b reached the end but not a
+  while (aIt != a.end())
+  {
+    inner_sum += aIt->second * aIt->second;
+    aIt++; 
+  }
+  //compute remaining values, if a reached the end but not b
+  while (bIt != b.end())
+  {
+    inner_sum += bIt->second * bIt->second;
+    bIt++; 
+  }  
+
+  inner_sum /= (2.0*sigma*sigma);
+  
+  return exp(-inner_sum);
+}
+
+void readParameters(const string & filename, const int & size, NICE::Vector & parameterVector)
+{
+  parameterVector.resize(size);
+  parameterVector.set(0.0);
+  
+  ifstream is(filename.c_str());
+  if ( !is.good() )
+    fthrow(IOException, "Unable to read parameters.");  
+//
+  string tmp;
+  int cnt(0);
+  while (! is.eof())
+  {
+    is >> tmp;
+    parameterVector[cnt] = atof(tmp.c_str());
+    cnt++;
+  }
+//   
+  is.close(); 
+}
+
+//------------------- TRAINING METHODS --------------------
+
+void inline trainGPVarApprox(NICE::Vector & matrixDInv, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+{
+
+    std::cerr << "nrOfExamplesPerClass : " << nrOfExamplesPerClass << std::endl;
+  
+    Timer tTrainPreciseTimer;
+    tTrainPreciseTimer.start();     
+    
+//     time_t time;
+//     std::cerr <<
+    std::cerr << time(NULL) << std::endl;
+    
+    //tic tTrainPrecise
+    clock_t  tTrainPreciseStart = clock() * CLOCKS_PER_SEC;    
+    
+    usleep(35);
+    
+    matrixDInv.resize(nrOfExamplesPerClass);
+    matrixDInv.set(0.0);
+    //compute D 
+    //start with adding some noise, if necessary
+    if (noise != 0.0)
+      matrixDInv.set(noise);
+    else
+      matrixDInv.set(0.0);    
+    
+    for (int i = 0; i < nrOfExamplesPerClass; i++)
+    {
+      for (int j = i; j < nrOfExamplesPerClass; j++)
+      {
+        matrixDInv[i] += kernelMatrix(i,j);
+        if (i != j)
+          matrixDInv[j] += kernelMatrix(i,j);
+      }
+    }
+    
+    //compute its inverse
+    for (int i = 0; i < nrOfExamplesPerClass; i++)
+    {
+      matrixDInv[i] = 1.0 / matrixDInv[i];
+    }
+    
+    tTrainPreciseTimer.stop(); 
+    std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPreciseTimer.getLast() << std::endl;    
+    //toc tTrainPrecise
+    clock_t  currentTime = clock() * CLOCKS_PER_SEC;
+    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
+    
+    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
+    std::cerr << "current time: " << currentTime << std::endl;
+    std::cerr << "Precise time used for GPVarApprox training class " << classNumber << ": " << currentTime-tTrainPreciseStart << std::endl;
+    
+    std::cerr << "final time in system clock whatever:" << std::endl;
+    std::cerr << time(NULL) << std::endl;
+}
+
+void inline trainGPVar(NICE::Matrix & choleskyMatrix, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+{
+
+/*    Timer tTrainPrecise;
+    tTrainPrecise.start();  */   
+    
+    //tic tTrainPrecise
+    time_t  tTrainPreciseStart = clock();    
+    
+    CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+    
+    choleskyMatrix.resize(nrOfExamplesPerClass, nrOfExamplesPerClass);
+    choleskyMatrix.set(0.0);      
+    cr.robustChol ( kernelMatrix, choleskyMatrix );      
+ 
+//     tTrainPrecise.stop(); 
+//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
+    //toc tTrainPrecise
+    time_t  currentTime = clock();
+    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
+    
+    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
+    std::cerr << "current time: " << currentTime << std::endl;
+    std::cerr << "Precise time used for GPVar training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+}
+
+void inline trainGPMeanApprox(NICE::Vector & GPMeanApproxRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+{
+
+/*    Timer tTrainPrecise;
+    tTrainPrecise.start();  */   
+    
+    //tic tTrainPrecise
+    time_t  tTrainPreciseStart = clock();    
+    
+    NICE::Vector matrixDInv(nrOfExamplesPerClass,0.0);
+    //compute D 
+    //start with adding some noise, if necessary
+    if (noise != 0.0)
+      matrixDInv.set(noise);
+    else
+      matrixDInv.set(0.0);    
+    
+    for (int i = 0; i < nrOfExamplesPerClass; i++)
+    {
+      for (int j = i; j < nrOfExamplesPerClass; j++)
+      {
+        matrixDInv[i] += kernelMatrix(i,j);
+        if (i != j)
+          matrixDInv[j] += kernelMatrix(i,j);
+      }
+    }
+    
+    //compute its inverse (and multiply every element with the label vector, which contains only one-entries...)
+    GPMeanApproxRightPart.resize(nrOfExamplesPerClass);    
+    for (int i = 0; i < nrOfExamplesPerClass; i++)
+    {
+      GPMeanApproxRightPart[i] = 1.0 / matrixDInv[i];
+    } 
+    
+    
+//     tTrainPrecise.stop(); 
+//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
+    //toc tTrainPrecise
+    time_t  currentTime = clock();
+    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
+    
+    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
+    std::cerr << "current time: " << currentTime << std::endl;
+    std::cerr << "Precise time used for GPMeanApprox training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+}
+    
+void inline trainGPMean(NICE::Vector & GPMeanRightPart, const double & noise, const NICE::Matrix & kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+{
+
+/*    Timer tTrainPrecise;
+    tTrainPrecise.start();  */   
+    
+    //tic tTrainPrecise
+    time_t  tTrainPreciseStart = clock();    
+    
+    CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+    
+    NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+    cr.robustChol ( kernelMatrix, choleskyMatrix );  
+    
+    GPMeanRightPart.resize(nrOfExamplesPerClass);
+    GPMeanRightPart.set(0.0);
+    
+    NICE::Vector y(nrOfExamplesPerClass,1.0); //OCC setting :)
+    choleskySolveLargeScale ( choleskyMatrix, y, GPMeanRightPart );
+ 
+//     tTrainPrecise.stop(); 
+//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
+    //toc tTrainPrecise
+    time_t  currentTime = clock();
+    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
+    
+    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
+    std::cerr << "current time: " << currentTime << std::endl;
+    std::cerr << "Precise time used for GPMean training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+}    
+
+void inline trainSVDD( const double & noise, const NICE::Matrix kernelMatrix, const int & nrOfExamplesPerClass, const int & classNumber)
+{
+/*    Timer tTrainPrecise;
+    tTrainPrecise.start();  */   
+    
+    //tic tTrainPrecise
+    time_t  tTrainPreciseStart = clock();  
+    
+//     tTrainPrecise.stop(); 
+//     std::cerr << "Precise time used for training class " << classNumber << ": " << tTrainPrecise.getLast() << std::endl;    
+    //toc tTrainPrecise
+    time_t  currentTime = clock();
+    float tTrainPrecise = (float) (currentTime - tTrainPreciseStart);
+    
+    //TODO!!!
+    
+    
+    std::cerr << "start time: " << tTrainPreciseStart << std::endl;
+    std::cerr << "current time: " << currentTime << std::endl;
+    std::cerr << "Precise time used for SVDD training class " << classNumber << ": " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;
+}
+
+// ------------- EVALUATION METHODS ---------------------
+void inline evaluateGPVarApprox(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Vector & matrixDInv, ClassificationResult & r, double & timeForSingleExamples)
+{
+      Timer tTestSingle;
+      tTestSingle.start();
+      NICE::Vector rightPart (kernelVector.size());
+      for (int j = 0; j < kernelVector.size(); j++)
+      {
+        rightPart[j] = kernelVector[j] * matrixDInv[j];
+      }
+
+      double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+      
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = 1.0 - uncertainty;
+
+      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+void inline evaluateGPVar(const NICE::Vector & kernelVector, const double & kernelSelf, const NICE::Matrix & choleskyMatrix, ClassificationResult & r, double & timeForSingleExamples)
+{
+      Timer tTestSingle;
+      tTestSingle.start();
+      NICE::Vector rightPart (kernelVector.size(),0.0);
+      
+      choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
+      
+      double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+      
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = 1.0 - uncertainty;
+
+      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+void inline evaluateGPMeanApprox(const NICE::Vector & kernelVector, const NICE::Vector & rightPart, ClassificationResult & r, double & timeForSingleExamples)
+{
+      Timer tTestSingle;
+      tTestSingle.start();
+
+      double mean = kernelVector.scalarProduct ( rightPart );
+      
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = mean;
+
+      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+void inline evaluateGPMean(const NICE::Vector & kernelVector,  const NICE::Vector & GPMeanRightPart, ClassificationResult & r, double & timeForSingleExamples)
+{
+      Timer tTestSingle;
+      tTestSingle.start();
+      
+      double mean = kernelVector.scalarProduct ( GPMeanRightPart );
+      
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = mean;
+
+      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+void inline evaluateParzen(const NICE::Vector & kernelVector,  ClassificationResult & r, double & timeForSingleExamples)
+{
+      Timer tTestSingle;
+      tTestSingle.start();
+      
+      double score( kernelVector.Sum() / (double) kernelVector.size() ); //maybe we could directly call kernelVector.Mean()      
+      
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = score;
+
+      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+void inline evaluateSVDD(const NICE::Vector & kernelVector,  ClassificationResult & r, double & timeForSingleExamples)
+{
+      Timer tTestSingle;
+      tTestSingle.start();
+      
+      double score (0.0);
+      //TODO
+      
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();      
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = score;
+
+      r = ClassificationResult ( scores[1]<0.5 ? 0 : 1, scores );    
+}
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50);
+  nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class
+  
+  int indexOfFirstClass = conf.gI("main", "indexOfFirstClass", 0);
+  indexOfFirstClass = std::max(indexOfFirstClass, 0); //we do not have less than 0 classes
+  int indexOfLastClass = conf.gI("main", "indexOfLastClass", 999);
+  indexOfLastClass = std::min(indexOfLastClass, 999); //we do not have more than 1000 classes
+  
+  int nrOfClassesToConcidere =  (indexOfLastClass - indexOfLastClass)+1;
+
+  //read the optimal parameters for the different methods
+  
+  // GP variance approximation
+  string sigmaGPVarApproxFile = conf.gS("main", "sigmaGPVarApproxFile", "approxVarSigma.txt");  
+  string noiseGPVarApproxFile = conf.gS("main", "noiseGPVarApproxFile", "approxVarNoise.txt");   
+  // GP variance
+  string sigmaGPVarFile = conf.gS("main", "sigmaGPVarFile", "approxVarSigma.txt");  
+  string noiseGPVarFile = conf.gS("main", "noiseGPVarFile", "approxVarNoise.txt");  
+  //GP mean approximation
+  string sigmaGPMeanApproxFile = conf.gS("main", "sigmaGPMeanApproxFile", "approxVarSigma.txt");  
+  string noiseGPMeanApproxFile = conf.gS("main", "noiseGPMeanApproxFile", "approxVarNoise.txt");    
+  //GP mean
+  string sigmaGPMeanFile = conf.gS("main", "sigmaGPMeanFile", "approxVarSigma.txt");  
+  string noiseGPMeanFile = conf.gS("main", "noiseGPMeanFile", "approxVarNoise.txt");      
+  //Parzen
+  string sigmaParzenFile = conf.gS("main", "sigmaParzenFile", "approxVarSigma.txt");  
+  string noiseParzenFile = conf.gS("main", "noiseParzenFile", "approxVarNoise.txt");    
+  //SVDD
+  string sigmaSVDDFile = conf.gS("main", "sigmaSVDDFile", "approxVarSigma.txt");  
+  string noiseSVDDFile = conf.gS("main", "noiseSVDDFile", "approxVarNoise.txt");      
+  
+  // GP variance approximation  
+  NICE::Vector sigmaGPVarApproxParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseGPVarApproxParas(nrOfClassesToConcidere,0.0);
+  // GP variance  
+  NICE::Vector sigmaGPVarParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseGPVarParas(nrOfClassesToConcidere,0.0);
+  //GP mean approximation  
+  NICE::Vector sigmaGPMeanApproxParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseGPMeanApproxParas(nrOfClassesToConcidere,0.0);
+  //GP mean  
+  NICE::Vector sigmaGPMeanParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseGPMeanParas(nrOfClassesToConcidere,0.0);
+  //Parzen  
+  NICE::Vector sigmaParzenParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseParzenParas(nrOfClassesToConcidere,0.0);
+  //SVDD  
+  NICE::Vector sigmaSVDDParas(nrOfClassesToConcidere,0.0);
+  NICE::Vector noiseSVDDParas(nrOfClassesToConcidere,0.0); 
+
+  // GP variance approximation    
+  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarApproxParas);
+  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarApproxParas);  
+  // GP variance    
+  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPVarParas);
+  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPVarParas);  
+  //GP mean approximation   
+  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanApproxParas);
+  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanApproxParas);  
+  //GP mean  
+  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaGPMeanParas);
+  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseGPMeanParas); 
+  //Parzen    
+  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaParzenParas);
+  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseParzenParas);  
+  //SVDD    
+  readParameters(sigmaGPVarApproxFile,nrOfClassesToConcidere, sigmaSVDDParas);
+  readParameters(noiseGPVarApproxFile,nrOfClassesToConcidere, noiseSVDDParas);   
+  
+  
+  // -------- optimal parameters read --------------  
+  
+  std::vector<SparseVector> trainingData;
+  NICE::Vector y;
+  
+  std::cerr << "Reading ImageNet data ..." << std::endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNetTrain ( imageNetPath + "demo/" );
+
+  imageNetTrain.preloadData( "train", "training" );
+  trainingData = imageNetTrain.getPreloadedData();
+  y = imageNetTrain.getPreloadedLabels();
+    
+  std::cerr << "Reading of training data finished" << std::endl;
+  std::cerr << "trainingData.size(): " << trainingData.size() << std::endl;
+  std::cerr << "y.size(): " << y.size() << std::endl;
+  
+  std::cerr << "Reading ImageNet test data files (takes some seconds)..." << std::endl;
+  ImageNetData imageNetTest ( imageNetPath + "demo/" );
+  imageNetTest.preloadData ( "val", "testing" );
+  imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );  
+  
+  double OverallPerformanceGPVarApprox(0.0);
+  double OverallPerformanceGPVar(0.0);
+  double OverallPerformanceGPMeanApprox(0.0);
+  double OverallPerformanceGPMean(0.0);
+  double OverallPerformanceParzen(0.0);
+  double OverallPerformanceSVDD(0.0);
+
+  
+  double kernelSigmaGPVarApprox;
+  double kernelSigmaGPVar;
+  double kernelSigmaGPMeanApprox;
+  double kernelSigmaGPMean;
+  double kernelSigmaParzen;
+  double kernelSigmaSVDD;
+  
+  for (int cl = indexOfFirstClass; cl < indexOfLastClass; cl++)
+  {
+    std::cerr << "run for class " << cl << std::endl;
+    int positiveClass = cl+1; //labels are from 1 to 1000, but our indices from 0 to 999
+    // ------------------------------ TRAINING ------------------------------
+  
+    kernelSigmaGPVarApprox = sigmaGPVarApproxParas[cl];
+    kernelSigmaGPVar = sigmaGPVarParas[cl];
+    kernelSigmaGPMeanApprox = sigmaGPMeanApproxParas[cl];
+    kernelSigmaGPMean = sigmaGPMeanParas[cl];
+    kernelSigmaParzen = sigmaParzenParas[cl];
+    kernelSigmaSVDD = sigmaSVDDParas[cl];
+    
+    Timer tTrain;
+    tTrain.start();
+       
+    NICE::Matrix kernelMatrix(nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+    
+    //TODO in theory we have to compute a single kernel Matrix for every method, since every method may have its own optimal parameter
+    // I'm sure, we can speed it up a bit and compute it only for every different parameter
+    //nonetheless, it's not as nice as we originally thought (same matrix for every method) 
+    
+    //NOTE since we're only interested in runtimes, we can ignore this (and still do some further code optimization...) //TODO
+    
+/*    //adding some noise, if necessary
+    if (noiseParas[cl] != 0.0)
+    {
+      kernelMatrix.addIdentity(noiseParas[cl]);
+    }
+    else
+    {
+      //zero was already set
+    } */     
+       
+    //now sum up all entries of each row in the original kernel matrix
+    double kernelScore(0.0);
+    for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
+    {
+      for (int j = i; j < cl*100+nrOfExamplesPerClass; j++)
+      {
+        kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigmaGPVarApprox);
+        kernelMatrix(i-cl*100,j-cl*100) = kernelScore;
+        
+        if (i != j)
+            kernelMatrix(j-cl*100,i-cl*100) = kernelScore;
+      }
+    }  
+    
+    //train GP Var Approx
+    NICE::Vector matrixDInv;
+    trainGPVarApprox(matrixDInv, noiseGPVarApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);
+    
+    //train GP Var
+    NICE::Matrix GPVarCholesky;
+    trainGPVar(GPVarCholesky, noiseGPVarParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);    
+    
+    //train GP Mean Approx
+    NICE::Vector GPMeanApproxRightPart;
+    trainGPMeanApprox(GPMeanApproxRightPart, noiseGPMeanApproxParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);
+    
+    //train GP Mean
+    NICE::Vector GPMeanRightPart;
+    trainGPMean(GPMeanRightPart, noiseGPMeanParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);    
+    
+    //train Parzen 
+    //nothing to do :)
+    
+    //train SVDD
+    //TODO what do we need here?
+    trainSVDD(noiseSVDDParas[cl], kernelMatrix, nrOfExamplesPerClass, cl);
+  
+    tTrain.stop();
+    std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl;      
+       
+    std::cerr << "training done - now perform the evaluation" << std::endl;
+
+
+    // ------------------------------ TESTING ------------------------------
+   
+    std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl;
+    
+    ClassificationResults resultsGPVarApprox;
+    ClassificationResults resultsGPVar;
+    ClassificationResults resultsGPMeanApprox;
+    ClassificationResults resultsGPMean;    
+    ClassificationResults resultsParzen;
+    ClassificationResults resultsSVDD;       
+    
+    ProgressBar pb;
+    Timer tTest;
+    tTest.start();    
+    Timer tTestSingle;
+    
+    double timeForSingleExamplesGPVarApprox(0.0);    
+    double timeForSingleExamplesGPVar(0.0);
+    double timeForSingleExamplesGPMeanApprox(0.0);    
+    double timeForSingleExamplesGPMean(0.0);    
+    double timeForSingleExamplesParzen(0.0);    
+    double timeForSingleExamplesSVDD(0.0);    
+    
+    for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ )
+    {
+      pb.update ( imageNetTest.getNumPreloadedExamples() );
+
+      const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
+
+      //TODO: again we should use method-specific optimal parameters. If we're only interested in the runtimes, this doesn't matter
+      double kernelSelf (measureDistance(svec,svec, kernelSigmaGPVarApprox) );
+      NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
+      
+      for (int j = 0; j < nrOfExamplesPerClass; j++)
+      {
+        kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigmaGPVarApprox);
+      }     
+      
+      //evaluate GP Var Approx
+      ClassificationResult rGPVarApprox;      
+      evaluateGPVarApprox(kernelVector, kernelSelf, matrixDInv, rGPVarApprox, timeForSingleExamplesGPVarApprox);
+      
+      //evaluate GP Var
+      ClassificationResult rGPVar;
+      evaluateGPVar(kernelVector, kernelSelf, GPVarCholesky, rGPVar, timeForSingleExamplesGPVar);      
+      
+      //evaluate GP Mean Approx
+      ClassificationResult rGPMeanApprox;      
+      evaluateGPMeanApprox(kernelVector, matrixDInv, rGPMeanApprox, timeForSingleExamplesGPMeanApprox);
+      
+      //evaluate GP Mean
+      ClassificationResult rGPMean;
+      evaluateGPMean(kernelVector, GPMeanRightPart, rGPMean, timeForSingleExamplesGPMean);       
+      
+      //evaluate Parzen
+      ClassificationResult rParzen;
+      evaluateParzen(kernelVector, rParzen, timeForSingleExamplesParzen); 
+      
+      //evaluate SVDD
+      ClassificationResult rSVDD;
+      evaluateSVDD(kernelVector, rSVDD, timeForSingleExamplesSVDD);       
+
+      
+      // set ground truth label
+      rGPVarApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rGPVar.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rGPMeanApprox.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rGPMean.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rParzen.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      rSVDD.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      
+//       std::cerr << "scores: " << std::endl;
+//       scores >> std::cerr;
+//       std::cerr << "gt: " <<  r.classno_groundtruth << " -- " << r.classno << std::endl;
+      
+      resultsGPVarApprox.push_back ( rGPVarApprox );
+      resultsGPVar.push_back ( rGPVar );
+      resultsGPMeanApprox.push_back ( rGPMeanApprox );
+      resultsGPMean.push_back ( rGPMean );
+      resultsParzen.push_back ( rParzen );
+      resultsSVDD.push_back ( rSVDD );      
+    }
+    
+    tTest.stop();
+    std::cerr << "Time used for evaluating class " << cl << ": " << tTest.getLast() << std::endl;       
+    
+    timeForSingleExamplesGPVarApprox/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesGPVar/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesGPMeanApprox/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesGPMean/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesParzen/= imageNetTest.getNumPreloadedExamples();
+    timeForSingleExamplesSVDD/= imageNetTest.getNumPreloadedExamples();
+    
+    std::cerr << "GPVarApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVarApprox << std::endl;    
+    std::cerr << "GPVar -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPVar << std::endl;    
+    std::cerr << "GPMeanApprox -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMeanApprox << std::endl;    
+    std::cerr << "GPMean -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesGPMean << std::endl;    
+    std::cerr << "Parzen -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesParzen << std::endl;    
+    std::cerr << "SVDD -- time used for evaluation single elements of class " << cl << " : " << timeForSingleExamplesSVDD << std::endl;    
+
+//     std::cerr << "Writing results to " << resultsfile << std::endl;
+//     results.writeWEKA ( resultsfile, 1 );
+    double perfvalueGPVarApprox = resultsGPVarApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    double perfvalueGPVar = resultsGPVar.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    double perfvalueGPMeanApprox = resultsGPMeanApprox.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    double perfvalueGPMean = resultsGPMean.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    double perfvalueParzen = resultsParzen.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+    double perfvalueSVDD = resultsSVDD.getBinaryClassPerformance( ClassificationResults::PERF_AUC );    
+
+    std::cerr << "Performance GPVarApprox: " << perfvalueGPVarApprox << std::endl;
+    std::cerr << "Performance GPVar: " << perfvalueGPVar << std::endl;
+    std::cerr << "Performance GPMeanApprox: " << perfvalueGPMeanApprox << std::endl;
+    std::cerr << "Performance GPMean: " << perfvalueGPMean << std::endl;
+    std::cerr << "Performance Parzen: " << perfvalueParzen << std::endl;
+    std::cerr << "Performance SVDD: " << perfvalueSVDD << std::endl;    
+    
+    OverallPerformanceGPVarApprox += perfvalueGPVar;    
+    OverallPerformanceGPVar += perfvalueGPVarApprox;
+    OverallPerformanceGPMeanApprox += perfvalueGPMeanApprox;
+    OverallPerformanceGPMean += perfvalueGPMean;
+    OverallPerformanceParzen += perfvalueParzen;
+    OverallPerformanceSVDD += perfvalueSVDD;    
+  }
+  
+  OverallPerformanceGPVarApprox /= nrOfClassesToConcidere;
+  OverallPerformanceGPVar /= nrOfClassesToConcidere;
+  OverallPerformanceGPMeanApprox /= nrOfClassesToConcidere;
+  OverallPerformanceGPMean /= nrOfClassesToConcidere;
+  OverallPerformanceParzen /= nrOfClassesToConcidere;
+  OverallPerformanceSVDD /= nrOfClassesToConcidere;  
+  
+  std::cerr << "overall performance GPVarApprox: " << OverallPerformanceGPVarApprox << std::endl;
+  std::cerr << "overall performance GPVar: " << OverallPerformanceGPVar << std::endl;
+  std::cerr << "overall performance GPMeanApprox: " << OverallPerformanceGPMeanApprox << std::endl;
+  std::cerr << "overall performance GPMean: " << OverallPerformanceGPMean << std::endl;
+  std::cerr << "overall performance Parzen: " << OverallPerformanceParzen << std::endl;
+  std::cerr << "overall performance SVDD: " << OverallPerformanceSVDD << std::endl;  
+  
+  return 0;
+}

+ 281 - 0
progs/testImageNetBinaryGPBaseline.cpp

@@ -0,0 +1,281 @@
+/** 
+* @file testImageNetBinaryGPBaseline.cpp
+* @brief perform ImageNet tests with binary tasks for OCC using the baseline GP
+* @author Alexander Lütz
+* @date 29-05-2012 (dd-mm-yyyy)
+
+*/
+#include "core/basics/Config.h"
+#include "core/basics/Timer.h"
+#include "core/vector/SparseVectorT.h"
+#include "core/algebra/CholeskyRobust.h"
+#include "core/vector/Algorithms.h"
+
+#include "vislearning/cbaselib/ClassificationResults.h"
+#include "vislearning/baselib/ProgressBar.h"
+
+#include "fast-hik/tools.h"
+#include "fast-hik/MatFileIO.h"
+#include "fast-hik/ImageNetData.h"
+
+using namespace std;
+using namespace NICE;
+using namespace OBJREC;
+
+double measureDistance ( const NICE::SparseVector & a, const NICE::SparseVector & b, const double & sigma = 2.0)//, const bool & verbose = false)
+{
+  double inner_sum(0.0);
+
+  double d;    
+    
+  //new version, where we needed on average 0.001707 s for each test sample
+  NICE::SparseVector::const_iterator aIt = a.begin();
+  NICE::SparseVector::const_iterator bIt = b.begin();
+   
+  while ( (aIt != a.end()) && (bIt != b.end()) )
+  {
+    if (aIt->first == bIt->first)
+    {
+      d = ( aIt->second - bIt->second );      
+      inner_sum += d * d;
+      aIt++;
+      bIt++;
+    }
+    else if ( aIt->first < bIt->first)
+    {
+      inner_sum += aIt->second * aIt->second;
+      aIt++;      
+    }
+    else
+    {
+      inner_sum += bIt->second * bIt->second;
+      bIt++;       
+    }
+  }
+  
+  //compute remaining values, if b reached the end but not a
+  while (aIt != a.end())
+  {
+    inner_sum += aIt->second * aIt->second;
+    aIt++; 
+  }
+  //compute remaining values, if a reached the end but not b
+  while (bIt != b.end())
+  {
+    inner_sum += bIt->second * bIt->second;
+    bIt++; 
+  }  
+  inner_sum /= (2.0*sigma*sigma);
+  
+  return exp(-inner_sum); //expValue;
+}
+
+void readParameters(const string & filename, const int & size, NICE::Vector & parameterVector)
+{
+  parameterVector.resize(size);
+  parameterVector.set(0.0);
+  
+  ifstream is(filename.c_str());
+  if ( !is.good() )
+    fthrow(IOException, "Unable to read parameters.");  
+//
+  string tmp;
+  int cnt(0);
+  while (! is.eof())
+  {
+    is >> tmp;
+    parameterVector[cnt] = atof(tmp.c_str());
+    cnt++;
+  }
+//   
+  is.close(); 
+}
+
+
+/** 
+    test the basic functionality of fast-hik hyperparameter optimization 
+*/
+int main (int argc, char **argv)
+{   
+  std::set_terminate(__gnu_cxx::__verbose_terminate_handler);
+
+  Config conf ( argc, argv );
+  string resultsfile = conf.gS("main", "results", "results.txt" );
+  double kernelSigma = conf.gD("main", "kernelSigma", 2.0);
+  int nrOfExamplesPerClass = conf.gI("main", "nrOfExamplesPerClass", 50);
+  nrOfExamplesPerClass = std::min(nrOfExamplesPerClass, 100); // we do not have more than 100 examples per class
+  int nrOfClassesToConcidere = conf.gI("main", "nrOfClassesToConcidere", 1000);
+  nrOfClassesToConcidere = std::min(nrOfClassesToConcidere, 1000); //we do not have more than 1000 classes
+
+  string sigmaFile = conf.gS("main", "sigmaFile", "approxVarSigma.txt");  
+  string noiseFile = conf.gS("main", "noiseFile", "approxVarNoise.txt");  
+  
+  
+  NICE::Vector sigmaParas(nrOfClassesToConcidere,kernelSigma);
+  NICE::Vector noiseParas(nrOfClassesToConcidere,0.0);
+  
+  std::cerr << "try to read optimal sigmas from " << sigmaFile << std::endl;
+  readParameters(sigmaFile,nrOfClassesToConcidere, sigmaParas);
+  //------------
+  std::cerr << "try to read optimal noises from " << noiseFile << std::endl;
+  readParameters(noiseFile,nrOfClassesToConcidere, noiseParas);
+  
+  std::vector<SparseVector> trainingData;
+  NICE::Vector y;
+  
+  std::cerr << "Reading ImageNet data ..." << std::endl;
+  bool imageNetLocal = conf.gB("main", "imageNetLocal" , false);
+  string imageNetPath;
+  if (imageNetLocal)
+    imageNetPath = "/users2/rodner/data/imagenet/devkit-1.0/";
+  else
+    imageNetPath = "/home/dbv/bilder/imagenet/devkit-1.0/";
+
+  ImageNetData imageNetTrain ( imageNetPath + "demo/" );
+
+  imageNetTrain.preloadData( "train", "training" );
+  trainingData = imageNetTrain.getPreloadedData();
+  y = imageNetTrain.getPreloadedLabels();
+    
+  std::cerr << "Reading of training data finished" << std::endl;
+  std::cerr << "trainingData.size(): " << trainingData.size() << std::endl;
+  std::cerr << "y.size(): " << y.size() << std::endl;
+  
+  std::cerr << "Reading ImageNet test data files (takes some seconds)..." << std::endl;
+  ImageNetData imageNetTest ( imageNetPath + "demo/" );
+  imageNetTest.preloadData ( "val", "testing" );
+  imageNetTest.loadExternalLabels ( imageNetPath + "data/ILSVRC2010_validation_ground_truth.txt" );  
+  
+  double OverallPerformance(0.0);  
+  
+  for (int cl = 0; cl < nrOfClassesToConcidere; cl++)
+  {
+    std::cerr << "run for class " << cl << std::endl;
+    int positiveClass = cl+1;
+    // ------------------------------ TRAINING ------------------------------
+  
+    kernelSigma = sigmaParas[cl];
+    
+    std::cerr << "using sigma: " << kernelSigma << " and noise " << noiseParas[cl] << std::endl;
+    Timer tTrain;
+    tTrain.start();
+    NICE::Matrix kernelMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);
+      
+    //now compute the kernelScores for every element
+    double kernelScore(0.0);
+    for (int i = cl*100; i < cl*100+nrOfExamplesPerClass; i++)
+    {
+      for (int j = i; j < cl*100+nrOfExamplesPerClass; j++)
+      {
+        kernelScore = measureDistance(trainingData[i],trainingData[j], kernelSigma);//optimalParameters[cl]);
+        kernelMatrix(i-cl*100,j-cl*100) = kernelScore;
+        if (i != j)
+          kernelMatrix(j-cl*100,i-cl*100) = kernelScore;
+      }
+    }  
+    
+    //adding some noise, if necessary
+    if (noiseParas[cl] != 0.0)
+    {
+      kernelMatrix.addIdentity(noiseParas[cl]);
+    }
+    else
+    {
+      //zero was already set
+    }    
+   
+    //compute its inverse
+    //noise is already added :)
+/*    Timer tTrainPrecise;
+    tTrainPrecise.start();  */   
+    
+    //tic tTrainPrecise
+    time_t  tTrainPreciseStart = clock(); 
+    
+    
+    CholeskyRobust cr  ( false /* verbose*/, 0.0 /*noiseStep*/, false /* useCuda*/);
+    
+    NICE::Matrix choleskyMatrix (nrOfExamplesPerClass, nrOfExamplesPerClass, 0.0);      
+    cr.robustChol ( kernelMatrix, choleskyMatrix );    
+    
+//     tTrainPrecise.stop(); 
+//     std::cerr << "Precise time used for training class " << cl << ": " << tTrainPrecise.getLast() << std::endl;    
+    //toc tTrainPrecise
+    float tTrainPrecise = (float) (clock() - tTrainPreciseStart);
+    std::cerr << "Time for HIK preparation of alpha multiplications: " << tTrainPrecise/CLOCKS_PER_SEC << std::endl;       
+    
+    tTrain.stop();
+    std::cerr << "Time used for training class " << cl << ": " << tTrain.getLast() << std::endl;    
+       
+    std::cerr << "training done - now perform the evaluation" << std::endl;
+
+
+    // ------------------------------ TESTING ------------------------------
+   
+    ClassificationResults results;
+    std::cerr << "Classification step ... with " << imageNetTest.getNumPreloadedExamples() << " examples" << std::endl;
+    ProgressBar pb;
+    Timer tTest;
+    tTest.start();    
+    Timer tTestSingle;
+    double timeForSingleExamples(0.0);
+    for ( uint i = 0 ; i < (uint)imageNetTest.getNumPreloadedExamples(); i++ )
+    {
+      pb.update ( imageNetTest.getNumPreloadedExamples() );
+
+      const SparseVector & svec = imageNetTest.getPreloadedExample ( i );
+      
+      double kernelSelf (measureDistance(svec,svec, kernelSigma) );
+      NICE::Vector kernelVector (nrOfExamplesPerClass, 0.0);
+      
+      for (int j = 0; j < nrOfExamplesPerClass; j++)
+      {
+        kernelVector[j] = measureDistance(trainingData[j+cl*100],svec, kernelSigma);
+      }     
+      
+      tTestSingle.start();
+      NICE::Vector rightPart (nrOfExamplesPerClass);
+      choleskySolveLargeScale ( choleskyMatrix, kernelVector, rightPart );
+        
+      double uncertainty = kernelSelf - kernelVector.scalarProduct ( rightPart );
+      tTestSingle.stop();
+      timeForSingleExamples += tTestSingle.getLast();
+      
+      FullVector scores ( 2 );
+      scores[0] = 0.0;
+      scores[1] = 1.0 - uncertainty;
+
+      ClassificationResult r ( scores[1]<0.5 ? 0 : 1, scores );    
+      
+      // set ground truth label
+      r.classno_groundtruth = (((int)imageNetTest.getPreloadedLabel ( i )) == positiveClass) ? 1 : 0;
+      
+//       std::cerr << "scores: " << std::endl;
+//       scores >> std::cerr;
+//       std::cerr << "gt: " <<  r.classno_groundtruth << " -- " << r.classno << std::endl;
+      
+      results.push_back ( r );
+    }
+    
+    tTest.stop();
+    std::cerr << "Time used for evaluating class " << cl << ": " << tTest.getLast() << std::endl;       
+    
+    timeForSingleExamples/= imageNetTest.getNumPreloadedExamples();
+    std::cerr << "Time used for evaluation single elements of class " << cl << " : " << timeForSingleExamples << std::endl;
+    
+
+//     std::cerr << "Writing results to " << resultsfile << std::endl;
+//     results.writeWEKA ( resultsfile, 1 );
+    double perfvalue = results.getBinaryClassPerformance( ClassificationResults::PERF_AUC );
+
+    std::cerr << "Performance: " << perfvalue << std::endl;
+    
+    OverallPerformance += perfvalue;    
+  }
+  
+  OverallPerformance /= nrOfClassesToConcidere;
+  
+  std::cerr << "overall performance: " << OverallPerformance << std::endl;
+  
+  return 0;
+}