فهرست منبع

merged latest version of raw classifier, corrected minor issues

Alexander Freytag 9 سال پیش
والد
کامیت
90337e9f64

+ 13 - 2
FMKGPHyperparameterOptimization.cpp

@@ -674,7 +674,8 @@ void FMKGPHyperparameterOptimization::setFastMinKernel ( FastMinKernel * _fmk )
   //
   //
   if ( this->q != NULL )
   if ( this->q != NULL )
   {  
   {  
-    this->q->computeParametersFromData ( &(this->fmk->featureMatrix()) );
+    NICE::Vector _maxValuesPerDimension = this->fmk->featureMatrix().getLargestValuePerDimension();
+    this->q->computeParametersFromData ( _maxValuesPerDimension );
   }
   }
 }
 }
 
 
@@ -850,8 +851,18 @@ void FMKGPHyperparameterOptimization::computeMatricesAndLUTs ( const GPLikelihoo
         delete precomputedT[ i->first ];
         delete precomputedT[ i->first ];
       
       
       precomputedT[ i->first ] = T;
       precomputedT[ i->first ] = T;
+
+
+//      //debug
+//      double * p_t = T;
+//      for ( uint i=0; i < this->q->getNumberOfBins(); i++ , p_t++)
+//      {
+//          std::cerr << " " << *p_t;
+//      }
+//      std::cerr << std::endl;
     }
     }
   }
   }
+
   
   
   if ( this->precomputedTForVarEst != NULL )
   if ( this->precomputedTForVarEst != NULL )
   {
   {
@@ -2063,7 +2074,7 @@ void FMKGPHyperparameterOptimization::restore ( std::istream & _is,
         //TODO eig
         //TODO eig
         // currently hard coded, since EV does not offer Persistent functionalities and 
         // currently hard coded, since EV does not offer Persistent functionalities and 
         // in addition, we currently have no other choice for EV then EVArnoldi
         // in addition, we currently have no other choice for EV then EVArnoldi
-        this->eig = new EVArnoldi ( false /*eig_verbose */, 10 );        
+        this->eig = new EVArnoldi ( false /*eig_verbose */, 10 /*_maxiterations*/ );
         _is >> tmp; // end of block 
         _is >> tmp; // end of block 
         tmp = this->removeEndTag ( tmp );
         tmp = this->removeEndTag ( tmp );
       }     
       }     

+ 126 - 58
FastMinKernel.cpp

@@ -147,6 +147,9 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
                                                       NICE::VVector & _A,
                                                       NICE::VVector & _A,
                                                       NICE::VVector & _B) const
                                                       NICE::VVector & _B) const
 {
 {
+//  //debug
+//    std::cerr << "alpha: " << _alpha << std::endl;
+
   _A.resize( this->ui_d );
   _A.resize( this->ui_d );
   _B.resize( this->ui_d );
   _B.resize( this->ui_d );
 
 
@@ -193,29 +196,29 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
 
 
   for (uint dim = 0; dim < this->ui_d; dim++)
   for (uint dim = 0; dim < this->ui_d; dim++)
   {
   {
-    double alpha_sum(0.0);
-    double alpha_times_x_sum(0.0);
-
-    uint cntNonzeroFeat(0);
+    double alpha_sum         = 0.0;
+    double alpha_times_x_sum = 0.0;
 
 
-    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
+    //////////
     // loop through all elements in sorted order
     // loop through all elements in sorted order
-    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++ )
+    const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
+    uint cntNonzeroFeat = 0;
+    for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin();
+          i != nonzeroElements.end();
+          i++, cntNonzeroFeat++ )
     {
     {
       const SortedVectorSparse<double>::dataelement & de = i->second;
       const SortedVectorSparse<double>::dataelement & de = i->second;
 
 
       // index of the feature
       // index of the feature
-      int index = de.first;
-      // transformed element of the feature
-      //
-      double elem( de.second );
+      int index   = de.first;
+      // element of the feature
+      double elem = de.second;
 
 
       alpha_times_x_sum += _alpha[index] * elem;
       alpha_times_x_sum += _alpha[index] * elem;
-      _A[dim][cntNonzeroFeat] = alpha_times_x_sum;
+      alpha_sum         += _alpha[index];
 
 
-      alpha_sum += _alpha[index];
+      _A[dim][cntNonzeroFeat] = alpha_times_x_sum;
       _B[dim][cntNonzeroFeat] = alpha_sum;
       _B[dim][cntNonzeroFeat] = alpha_sum;
-      cntNonzeroFeat++;
     }
     }
   }
   }
 
 
@@ -232,10 +235,13 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
   // number of quantization bins
   // number of quantization bins
   uint hmax = _q->getNumberOfBins();
   uint hmax = _q->getNumberOfBins();
 
 
-  // store (transformed) prototypes
-  double * prototypes   = new double [ hmax * this->ui_d ];
-  double * p_prototypes = prototypes;
+  double * prototypes;
+  prototypes   = new double [ hmax * this->ui_d ];
+
+  double * p_prototypes;
+  p_prototypes = prototypes;
 
 
+  // compute all prototypes to compare against lateron
   for (uint dim = 0; dim < this->ui_d; dim++)
   for (uint dim = 0; dim < this->ui_d; dim++)
   {
   {
     for ( uint i = 0 ; i < hmax ; i++ )
     for ( uint i = 0 ; i < hmax ; i++ )
@@ -252,17 +258,26 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
     }
     }
   }
   }
 
 
-  // creating the lookup table as pure C, which might be beneficial
-  // for fast evaluation
+  // allocate memory for LUT T
   double *Tlookup = new double [ hmax * this->ui_d ];
   double *Tlookup = new double [ hmax * this->ui_d ];
 
 
-
-  // loop through all dimensions
+  // start the actual computation of  T
   for ( uint dim = 0; dim < this->ui_d; dim++ )
   for ( uint dim = 0; dim < this->ui_d; dim++ )
   {
   {
-    uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
-    if ( nrZeroIndices == this->ui_n )
-      continue;
+    // nz == nrZeroIndices
+    uint nz    = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
+    // nnz == nrNonZeroIndices
+    uint nnz  = this->ui_n-nz;
+
+    if ( nz == this->ui_n )
+    {
+        double * itT = Tlookup + dim*hmax;
+        for ( uint idxProto = 0; idxProto < hmax; idxProto++, itT++ )
+        {
+            *itT = 0;
+        }
+        continue;
+    }
 
 
     const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
     const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
 
 
@@ -270,54 +285,107 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
     SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
     SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
 
 
     // index of the element, which is always bigger than the current value fval
     // index of the element, which is always bigger than the current value fval
-    uint index = 0;
+    int indexElem = 0;
+    // element of the feature
+    double elem = i->first;
+
     // we use the quantization of the original features! the transformed feature were
     // we use the quantization of the original features! the transformed feature were
     // already used to calculate A and B, this of course assumes monotonic functions!!!
     // already used to calculate A and B, this of course assumes monotonic functions!!!
-    uint qBin = _q->quantize ( i->first, dim );
-
-    // the next loop is linear in max(hmax, n)
-    // REMARK: this could be changed to hmax*log(n), when
-    // we use binary search
-
-    for (uint j = 0; j < hmax; j++)
+    uint idxProtoElem = _q->quantize ( elem, dim );// denotes the bin number in dim i of a quantized example, previously termed qBin
+
+    uint idxProto;
+    double * itProtoVal = prototypes + dim*hmax;
+    double * itT = Tlookup + dim*hmax;
+
+    // special case 1:
+    // loop over all prototypes smaller then the smallest quantized example in this dimension
+    for ( idxProto = 0;
+          idxProto < idxProtoElem;
+          idxProto++, itProtoVal++, itT++
+        ) // idxProto previously j
     {
     {
-      double fval = prototypes[ dim*hmax + j ];
-      double t;
+      // current prototype is smaller than all known examples
+      // -> resulting value = fval * sum_l=1^n alpha_l
+      (*itT) = (*itProtoVal) * ( _B[ dim ][ nnz-1 ] );
+    }//for-loop over prototypes -- special case 1
 
 
-      if (  (index == 0) && (j < qBin) ) {
-        // current element is smaller than everything else
-        // resulting value = fval * sum_l=1^n alpha_l
-        t = fval*( _B[dim][this->ui_n-1 - nrZeroIndices] );
-      } else {
-
-         // move to next example, if necessary
-        while ( (j >= qBin) && ( index < (this->ui_n-1-nrZeroIndices)) )
+    // standard case: prototypes larger then the smallest element, but smaller then the largest one in the corrent dimension
+    for ( ; idxProto < hmax; idxProto++, itProtoVal++, itT++)
+    {
+        //move to next example, which is smaller then the current prototype after quantization
+        // pay attentation to not loop over the number of non-zero elements
+        while ( (idxProto >= idxProtoElem) && ( indexElem  < ( nnz - 1)  ) ) //(this->ui_n-1-nrZeroIndices)) )
         {
         {
-          index++;
+          indexElem++;
           iPredecessor = i;
           iPredecessor = i;
           i++;
           i++;
 
 
+          // only quantize if value changed
           if ( i->first !=  iPredecessor->first )
           if ( i->first !=  iPredecessor->first )
-            qBin = _q->quantize ( i->first, dim );
+          {
+            idxProtoElem = _q->quantize ( i->first, dim );
+          }
         }
         }
-        // compute current element in the lookup table and keep in mind that
-        // index is the next element and not the previous one
-        //NOTE pay attention: this is only valid if all entries are positive! -
-        // If not, ask whether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
-        if ( (j >= qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
-          // the current element (fval) is equal or bigger to the element indexed by index
-          // in fact, the term B[dim][this->n-1-nrZeroIndices] - B[dim][index] is equal to zero and vanishes, which is logical, since all elements are smaller than j!
-          t = _A[dim][index];// + fval*( _B[dim][this->ui_n-1-nrZeroIndices] - _B[dim][index] );
-        } else {
-          // standard case
-          t = _A[dim][index-1] + fval*( _B[dim][this->ui_n-1-nrZeroIndices] - _B[dim][index-1] );
+
+        // did we looped over the largest element in this dimension?
+        if ( indexElem==( nnz-1 ) )
+        {
+          break;
         }
         }
-      }
 
 
-      Tlookup[ dim*hmax + j ] = t;
-    }
-  }
+        (*itT) = _A[ dim ][ indexElem-1 ] + (*itProtoVal)*( _B[ dim ][ nnz-1 ] - _B[ dim ][ indexElem-1 ] );
+    }//for-loop over prototypes -- standard case
 
 
+    // special case 2:
+    // the current prototype is equal to or larger than the largest training example in this dimension
+    // -> the term B[ dim ][ nnz-1 ] - B[ dim ][ indexElem ] is equal to zero and vanishes, which is logical, since all elements are smaller than the remaining prototypes!
+
+    for ( ; idxProto < hmax; idxProto++, itProtoVal++, itT++)
+    {
+      (*itT) = _A[ dim ][ indexElem ];
+    }//for-loop over prototypes -- special case 2
+
+//    for (uint j = 0; j < hmax; j++)
+//    {
+//      double fval = prototypes[ dim*hmax + j ];
+//      double t;
+
+//      if (  (index == 0) && (j < idxProtoElem) ) {
+//        // current element is smaller than everything else
+//        // resulting value = fval * sum_l=1^n alpha_l
+//        t = fval*( _B[dim][this->ui_n-1 - nrZeroIndices] );
+//      } else {
+
+//         // move to next example, if necessary
+//        while ( (j >= idxProtoElem) && ( index < (this->ui_n-1-nrZeroIndices)) )
+//        {
+//          index++;
+//          iPredecessor = i;
+//          i++;
+
+//          if ( i->first !=  iPredecessor->first )
+//            idxProtoElem = _q->quantize ( i->first, dim );
+//        }
+//        // compute current element in the lookup table and keep in mind that
+//        // index is the next element and not the previous one
+//        //NOTE pay attention: this is only valid if all entries are positive! -
+//        // If not, ask whether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
+//        if ( (j >= idxProtoElem) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
+//          // the current element (fval) is equal or bigger to the element indexed by index
+//          // in fact, the term B[dim][this->n-1-nrZeroIndices] - B[dim][index] is equal to zero and vanishes, which is logical, since all elements are smaller than j!
+//          t = _A[dim][index];// + fval*( _B[dim][this->ui_n-1-nrZeroIndices] - _B[dim][index] );
+//        } else {
+//          // standard case
+//          t = _A[dim][index-1] + fval*( _B[dim][this->ui_n-1-nrZeroIndices] - _B[dim][index-1] );
+//        }
+//      }
+
+//      Tlookup[ dim*hmax + j ] = t;
+//    }
+
+  }//for-loop over dimensions
+
+  // clean-up prototypes
   delete [] prototypes;
   delete [] prototypes;
 
 
   return Tlookup;
   return Tlookup;

+ 259 - 37
GMHIKernelRaw.cpp

@@ -16,43 +16,59 @@ using namespace NICE;
 using namespace std;
 using namespace std;
 
 
 
 
-GMHIKernelRaw::GMHIKernelRaw( const std::vector< const NICE::SparseVector *> &_examples, const double _d_noise )
+GMHIKernelRaw::GMHIKernelRaw( const std::vector< const NICE::SparseVector *> &_examples,
+                              const double _d_noise,
+                              NICE::Quantization * _q
+                            )
 {
 {
     this->examples_raw = NULL;
     this->examples_raw = NULL;
     this->nnz_per_dimension = NULL;
     this->nnz_per_dimension = NULL;
     this->table_A = NULL;
     this->table_A = NULL;
     this->table_B = NULL;
     this->table_B = NULL;
+    this->table_T = NULL;
     this->d_noise = _d_noise;
     this->d_noise = _d_noise;
+    this->q       = _q;
 
 
-    initData(_examples);
+    this->initData(_examples);
 }
 }
 
 
 GMHIKernelRaw::~GMHIKernelRaw()
 GMHIKernelRaw::~GMHIKernelRaw()
 {
 {
-    cleanupData();
+    this->cleanupData();
 }
 }
 
 
 void GMHIKernelRaw::cleanupData()
 void GMHIKernelRaw::cleanupData()
 {
 {
-    if ( this->examples_raw != NULL ) {
+    // data structure of examples
+    if ( this->examples_raw != NULL )
+    {
         for ( uint d = 0; d < this->num_dimension; d++ )
         for ( uint d = 0; d < this->num_dimension; d++ )
             if (examples_raw[d] != NULL)
             if (examples_raw[d] != NULL)
                 delete [] examples_raw[d];
                 delete [] examples_raw[d];
         delete [] this->examples_raw;
         delete [] this->examples_raw;
         this->examples_raw = NULL;
         this->examples_raw = NULL;
     }
     }
-    if ( this->nnz_per_dimension != NULL ) {
+
+    // counter of non-zero examples in each dimension
+    if ( this->nnz_per_dimension != NULL )
+    {
         delete [] this->nnz_per_dimension;
         delete [] this->nnz_per_dimension;
         this->nnz_per_dimension = NULL;
         this->nnz_per_dimension = NULL;
     }
     }
-    if ( this->table_A != NULL ) {
+
+    // LUT A for classification without quantization
+    if ( this->table_A != NULL )
+    {
         for ( uint d = 0; d < this->num_dimension; d++ )
         for ( uint d = 0; d < this->num_dimension; d++ )
             if (table_A[d] != NULL)
             if (table_A[d] != NULL)
                 delete [] table_A[d];
                 delete [] table_A[d];
         delete [] this->table_A;
         delete [] this->table_A;
         this->table_A = NULL;
         this->table_A = NULL;
     }
     }
-    if ( this->table_B != NULL ) {
+
+    // LUT B for classification without quantization
+    if ( this->table_B != NULL )
+    {
         for ( uint d = 0; d < this->num_dimension; d++ )
         for ( uint d = 0; d < this->num_dimension; d++ )
             if (table_B[d] != NULL)
             if (table_B[d] != NULL)
                 delete [] table_B[d];
                 delete [] table_B[d];
@@ -60,6 +76,12 @@ void GMHIKernelRaw::cleanupData()
         this->table_B = NULL;
         this->table_B = NULL;
     }
     }
 
 
+    // LUT T for classification with quantization
+    if ( this->table_T != NULL )
+    {
+        delete [] this->table_T;
+        this->table_T = NULL;
+    }
 }
 }
 
 
 void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_examples )
 void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_examples )
@@ -69,10 +91,10 @@ void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_
 
 
     cleanupData();
     cleanupData();
 
 
-    this->num_dimension = _examples[0]->getDim();
-    this->examples_raw = new sparseVectorElement *[num_dimension];
+    this->num_dimension     = _examples[0]->getDim();
+    this->examples_raw      = new sparseVectorElement *[num_dimension];
     this->nnz_per_dimension = new uint [num_dimension];
     this->nnz_per_dimension = new uint [num_dimension];
-    this->num_examples = _examples.size();
+    this->num_examples      = _examples.size();
 
 
     // waste memory and allocate a non-sparse data block
     // waste memory and allocate a non-sparse data block
     sparseVectorElement **examples_raw_increment = new sparseVectorElement *[num_dimension];
     sparseVectorElement **examples_raw_increment = new sparseVectorElement *[num_dimension];
@@ -94,10 +116,11 @@ void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_
     NICE::Vector::iterator itDiagEl = this->diagonalElements.begin();
     NICE::Vector::iterator itDiagEl = this->diagonalElements.begin();
 
 
     // minor pre-allocation
     // minor pre-allocation
-    uint index;
+    uint i_dimNonZero;
     double value;
     double value;
     double l1norm;
     double l1norm;
 
 
+    // iterate over all provided training examples to process their data
     for ( std::vector< const NICE::SparseVector * >::const_iterator i = _examples.begin();
     for ( std::vector< const NICE::SparseVector * >::const_iterator i = _examples.begin();
           i != _examples.end();
           i != _examples.end();
           i++, example_index++, itDiagEl++
           i++, example_index++, itDiagEl++
@@ -105,15 +128,18 @@ void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_
     {
     {
         l1norm = 0.0;
         l1norm = 0.0;
         const NICE::SparseVector *x = *i;
         const NICE::SparseVector *x = *i;
+        // loop over all non-zero dimensions, copy dimension and value into our data structure, and compute the L1 norm
         for ( NICE::SparseVector::const_iterator j = x->begin(); j != x->end(); j++ )
         for ( NICE::SparseVector::const_iterator j = x->begin(); j != x->end(); j++ )
         {
         {
-            index = j->first;
-            value = j->second;
-            examples_raw_increment[index]->value = value;
-            examples_raw_increment[index]->example_index = example_index;
-            // move to the next element
-            examples_raw_increment[index]++;
-            this->nnz_per_dimension[index]++;
+            i_dimNonZero = j->first;
+            value        = j->second;
+
+            examples_raw_increment[i_dimNonZero]->value = value;
+            examples_raw_increment[i_dimNonZero]->example_index = example_index;
+
+            // move data pointer to the next element in the current dimension
+            examples_raw_increment[i_dimNonZero]++;
+            this->nnz_per_dimension[i_dimNonZero]++;
 
 
             l1norm = l1norm + value;
             l1norm = l1norm + value;
         }
         }
@@ -131,11 +157,20 @@ void GMHIKernelRaw::initData ( const std::vector< const NICE::SparseVector *> &_
     }
     }
 
 
     // pre-allocate the A and B matrices
     // pre-allocate the A and B matrices
-    this->table_A = allocateTable();
-    this->table_B = allocateTable();
+    this->table_A = allocateTableAorB();
+    this->table_B = allocateTableAorB();
+
+    // Quantization for classification?
+    if ( this->q != NULL )
+    {
+      // (1) if yes, setup the parameters of the quantization object
+      NICE::Vector _maxValuesPerDimension = this->getLargestValuePerDimension();
+      this->q->computeParametersFromData ( _maxValuesPerDimension );
+      this->table_T = this->allocateTableT();
+    }
 }
 }
 
 
-double **GMHIKernelRaw::allocateTable() const
+double **GMHIKernelRaw::allocateTableAorB() const
 {
 {
     double **table;
     double **table;
     table = new double *[this->num_dimension];
     table = new double *[this->num_dimension];
@@ -151,53 +186,204 @@ double **GMHIKernelRaw::allocateTable() const
     return table;
     return table;
 }
 }
 
 
-void GMHIKernelRaw::copyTable(double **src, double **dst) const
+double *GMHIKernelRaw::allocateTableT() const
+{
+    double *table;
+    table = new double [this->num_dimension * this->q->getNumberOfBins()];
+    return table;
+}
+
+void GMHIKernelRaw::copyTableAorB(double **src, double **dst) const
 {
 {
     for (uint i = 0; i < this->num_dimension; i++)
     for (uint i = 0; i < this->num_dimension; i++)
     {
     {
         uint nnz = this->nnz_per_dimension[i];
         uint nnz = this->nnz_per_dimension[i];
-        if (nnz>0) {
+        if (nnz>0)
+        {
             for (uint j = 0; j < nnz; j++)
             for (uint j = 0; j < nnz; j++)
                 dst[i][j] = src[i][j];
                 dst[i][j] = src[i][j];
-        } else {
+        }
+        else
+        {
             dst[i] = NULL;
             dst[i] = NULL;
         }
         }
     }
     }
 }
 }
 
 
-void GMHIKernelRaw::updateTables ( const NICE::Vector _x ) const
+void GMHIKernelRaw::copyTableT(double *_src, double *_dst) const
 {
 {
+  double * p_src = _src;
+  double * p_dst = _dst;
+  for ( int i = 0; 
+        i < this->num_dimension * this->q->getNumberOfBins(); 
+        i++, p_src++, p_dst++ 
+      )
+  {
+    *p_dst = *p_src;
+  }
+}
+
+void GMHIKernelRaw::updateTablesAandB ( const NICE::Vector _x ) const
+{
+    // start the actual computations of A, B, and optionally T
     for (uint dim = 0; dim < this->num_dimension; dim++)
     for (uint dim = 0; dim < this->num_dimension; dim++)
     {
     {
-      double alpha_sum = 0.0;
+      double alpha_sum         = 0.0;
       double alpha_times_x_sum = 0.0;
       double alpha_times_x_sum = 0.0;
-      uint nnz = nnz_per_dimension[dim];
+      uint nnz                 = nnz_per_dimension[dim];
+      
 
 
+      //////////
       // loop through all elements in sorted order
       // loop through all elements in sorted order
       sparseVectorElement *training_values_in_dim = examples_raw[dim];
       sparseVectorElement *training_values_in_dim = examples_raw[dim];
-      for ( uint cntNonzeroFeat = 0; cntNonzeroFeat < nnz; cntNonzeroFeat++, training_values_in_dim++ )
+      for ( uint cntNonzeroFeat = 0; 
+            cntNonzeroFeat < nnz; 
+            cntNonzeroFeat++, training_values_in_dim++ 
+          )
       {
       {
         // index of the feature
         // index of the feature
-        int index = training_values_in_dim->example_index;
+        int index   = training_values_in_dim->example_index;
         // element of the feature
         // element of the feature
         double elem = training_values_in_dim->value;
         double elem = training_values_in_dim->value;
 
 
         alpha_times_x_sum += _x[index] * elem;
         alpha_times_x_sum += _x[index] * elem;
-        this->table_A[dim][cntNonzeroFeat] = alpha_times_x_sum;
-
-        alpha_sum += _x[index];
+        alpha_sum         += _x[index];
+        
+        this->table_A[dim][cntNonzeroFeat] = alpha_times_x_sum;        
         this->table_B[dim][cntNonzeroFeat] = alpha_sum;
         this->table_B[dim][cntNonzeroFeat] = alpha_sum;
+      }      
+    }
+}
+
+void GMHIKernelRaw::updateTableT ( const NICE::Vector _x ) const
+{
+    // sanity check
+    if ( this->q == NULL)
+    {
+        return;
+    }
+
+
+
+    // number of quantization bins
+    uint hmax = this->q->getNumberOfBins();
+
+
+    double * prototypes;
+    prototypes   = new double [ hmax * this->num_dimension ];
+
+    double * p_prototypes;
+    p_prototypes = prototypes;
+
+    // compute all prototypes to compare against lateron
+    for (uint dim = 0; dim < this->num_dimension; dim++)
+    {
+      for ( uint i = 0 ; i < hmax ; i++ )
+      {
+        *p_prototypes = this->q->getPrototype( i, dim );
+         p_prototypes++;
       }
       }
     }
     }
 
 
+    // start the actual computation of  T
+    for (uint dim = 0; dim < this->num_dimension; dim++)
+    {
+      uint nnz = nnz_per_dimension[dim];
+
+      if ( nnz == 0 )
+      {
+          double * itT = this->table_T + dim*hmax;
+          for ( uint idxProto = 0; idxProto < hmax; idxProto++, itT++ )
+          {
+              *itT = 0;
+          }
+          continue;
+      }
+
+        uint idxProtoElem; // denotes the bin number in dim i of a quantized example, previously termed qBin
+
+        sparseVectorElement * i            = examples_raw[dim];
+        sparseVectorElement * iPredecessor = examples_raw[dim];
+
+        // index of the element, which is always bigger than the current value fval
+        int indexElem = 0;
+        // element of the feature
+        double elem = i->value;
+        
+        idxProtoElem = this->q->quantize ( elem, dim );
+
+        uint idxProto;
+        double * itProtoVal = prototypes + dim*hmax;
+        double * itT = this->table_T + dim*hmax;
+        
+        // special case 1:
+        // loop over all prototypes smaller then the smallest quantized example in this dimension
+        for ( idxProto = 0; idxProto < idxProtoElem; idxProto++, itProtoVal++, itT++) // idxProto previously j
+        {
+          // current prototype is smaller than all known examples
+          // -> resulting value = fval * sum_l=1^n alpha_l          
+          (*itT) = (*itProtoVal) * ( this->table_B[ dim ][ nnz-1 ] );          
+        }//for-loop over prototypes -- special case 1
+
+        // standard case: prototypes larger then the smallest element, but smaller then the largest one in the corrent dimension        
+        for ( ; idxProto < hmax; idxProto++, itProtoVal++, itT++)
+        {
+            //move to next example, which is smaller then the current prototype after quantization
+            // pay attentation to not loop over the number of non-zero elements
+            while ( (idxProto >= idxProtoElem) && ( indexElem < ( nnz - 1 ) ) ) //(this->ui_n-1-nrZeroIndices)) )
+            {
+              indexElem++;
+              iPredecessor = i;
+              i++;
+
+              // only quantize if value changed
+              if ( i->value !=  iPredecessor->value )
+              {
+                idxProtoElem = this->q->quantize ( i->value, dim );
+              }
+            }
+            
+            // did we looped over the largest element in this dimension?
+            if ( indexElem==( nnz-1 ) )
+            {
+              break;
+            }
+
+            (*itT) = table_A[ dim ][ indexElem-1 ] + (*itProtoVal)*( table_B[ dim ][ nnz-1 ] - table_B[ dim ][ indexElem-1 ] );
+        }//for-loop over prototypes -- standard case 
+            
+        // special case 2:
+        // the current prototype is equal to or larger than the largest training example in this dimension
+        // -> the term B[ dim ][ nnz-1 ] - B[ dim ][ indexElem ] is equal to zero and vanishes, which is logical, since all elements are smaller than the remaining prototypes!
+
+        for ( ; idxProto < hmax; idxProto++, itProtoVal++, itT++)
+        {
+          (*itT) = table_A[ dim ][ indexElem ];
+        }//for-loop over prototypes -- special case 2
+        
+    }//for-loop over dimensions
+
+
+    // clean-up prototypes
+    if ( this->q != NULL)
+    {
+      delete [] prototypes;
+    }
 
 
+//    //debug
+//    double * p_t = table_T;
+//    for ( uint i=0; i < hmax; i++ , p_t++)
+//    {
+//        std::cerr << " " << *p_t;
+//    }
+//    std::cerr << std::endl;
 }
 }
 
 
 /** multiply with a vector: A*x = y */
 /** multiply with a vector: A*x = y */
 void GMHIKernelRaw::multiply (NICE::Vector & _y, const NICE::Vector & _x) const
 void GMHIKernelRaw::multiply (NICE::Vector & _y, const NICE::Vector & _x) const
 {
 {
   // STEP 1: initialize tables A and B
   // STEP 1: initialize tables A and B
-  updateTables(_x);
+  this->updateTablesAandB(_x);
 
 
   _y.resize( this->num_examples );
   _y.resize( this->num_examples );
   _y.set(0.0);
   _y.set(0.0);
@@ -248,18 +434,25 @@ uint GMHIKernelRaw::cols () const
 
 
 double **GMHIKernelRaw::getTableA() const
 double **GMHIKernelRaw::getTableA() const
 {
 {
-    double **t = allocateTable();
-    copyTable(this->table_A, t);
+    double **t = allocateTableAorB();
+    copyTableAorB(this->table_A, t);
     return t;
     return t;
 }
 }
 
 
 double **GMHIKernelRaw::getTableB() const
 double **GMHIKernelRaw::getTableB() const
 {
 {
-    double **t = allocateTable();
-    copyTable(this->table_B, t);
+    double **t = allocateTableAorB();
+    copyTableAorB(this->table_B, t);
     return t;
     return t;
 }
 }
 
 
+double * GMHIKernelRaw::getTableT() const
+{
+    double * T = this->allocateTableT();
+    copyTableT(this->table_T, T);
+    return T;
+}
+
 uint *GMHIKernelRaw::getNNZPerDimension() const
 uint *GMHIKernelRaw::getNNZPerDimension() const
 {
 {
     uint *v = new uint[this->num_dimension];
     uint *v = new uint[this->num_dimension];
@@ -269,7 +462,36 @@ uint *GMHIKernelRaw::getNNZPerDimension() const
 }
 }
 
 
 
 
+uint NICE::GMHIKernelRaw::getNumberOfDimensions() const
+{
+    return this->num_dimension;
+}
+
 void NICE::GMHIKernelRaw::getDiagonalElements( NICE::Vector & _diagonalElements) const
 void NICE::GMHIKernelRaw::getDiagonalElements( NICE::Vector & _diagonalElements) const
 {
 {
     _diagonalElements = this->diagonalElements;
     _diagonalElements = this->diagonalElements;
 }
 }
+
+
+NICE::Vector NICE::GMHIKernelRaw::getLargestValuePerDimension ( ) const
+{
+  NICE::Vector vmax ( this->num_dimension );
+
+  NICE::Vector::iterator vmaxIt = vmax.begin();
+
+  for (uint d = 0; d < this->num_dimension; d++, vmaxIt++)
+  {
+      uint nnz = this->nnz_per_dimension[d];
+
+      if ( nnz > 0 )
+      {
+          *vmaxIt = this->examples_raw[ d ][ nnz-1 ].value;
+      }
+      else
+      {
+          *vmaxIt = 0.0;
+      }
+  }
+
+  return vmax;
+}

+ 48 - 6
GMHIKernelRaw.h

@@ -11,10 +11,12 @@
 
 
 #include <core/algebra/GenericMatrix.h>
 #include <core/algebra/GenericMatrix.h>
 
 
+#include "quantization/Quantization.h"
+
 namespace NICE {
 namespace NICE {
 
 
  /**
  /**
- * @class GMHIKernel
+ * @class GMHIKernelRaw
  * @brief Fast multiplication with histogram intersection kernel matrices
  * @brief Fast multiplication with histogram intersection kernel matrices
  * @author Erik Rodner, Alexander Freytag
  * @author Erik Rodner, Alexander Freytag
  */
  */
@@ -38,6 +40,7 @@ class GMHIKernelRaw : public GenericMatrix
     sparseVectorElement **examples_raw;
     sparseVectorElement **examples_raw;
     double **table_A;
     double **table_A;
     double **table_B;
     double **table_B;
+    double *table_T;
 
 
     NICE::Vector diagonalElements;
     NICE::Vector diagonalElements;
 
 
@@ -46,18 +49,51 @@ class GMHIKernelRaw : public GenericMatrix
     uint num_examples;
     uint num_examples;
     double d_noise;
     double d_noise;
 
 
+    /** object performing feature quantization */
+    NICE::Quantization *q;
+
+
+
+    /////////////////////////
+    /////////////////////////
+    //  PROTECTED METHODS  //
+    /////////////////////////
+    /////////////////////////
+
     void initData ( const std::vector< const NICE::SparseVector *> & examples );
     void initData ( const std::vector< const NICE::SparseVector *> & examples );
     void cleanupData ();
     void cleanupData ();
-    double **allocateTable() const;
-    void copyTable(double **src, double **dst) const;
+
+    double** allocateTableAorB() const;
+    double* allocateTableT() const;
+
+    void copyTableAorB(double **src, double **dst) const;
+    void copyTableT(double *src, double *dst) const;
+
+    void clearTablesAandB();
+    void clearTablesT();
+
+
+    double * computeTableT ( const NICE::Vector & _alpha
+                           );
+
+    /////////////////////////
+    /////////////////////////
+    //    PUBLIC METHODS   //
+    /////////////////////////
+    /////////////////////////
 
 
   public:
   public:
 
 
     /** simple constructor */
     /** simple constructor */
-    GMHIKernelRaw( const std::vector< const NICE::SparseVector *> & examples, const double d_noise = 0.1 );
+    GMHIKernelRaw( const std::vector< const NICE::SparseVector *> & _examples,
+                   const double _d_noise = 0.1,
+                   NICE::Quantization * _q = NULL
+                 );
 
 
     /** multiply with a vector: A*x = y; this is not really const anymore!! */
     /** multiply with a vector: A*x = y; this is not really const anymore!! */
-    virtual void multiply (NICE::Vector & y, const NICE::Vector & x) const;
+    virtual void multiply ( NICE::Vector & y,
+                            const NICE::Vector & x
+                          ) const;
 
 
     /** get the number of rows in A */
     /** get the number of rows in A */
     virtual uint rows () const;
     virtual uint rows () const;
@@ -67,17 +103,23 @@ class GMHIKernelRaw : public GenericMatrix
 
 
     double **getTableA() const;
     double **getTableA() const;
     double **getTableB() const;
     double **getTableB() const;
+    double *getTableT() const;
+
     uint *getNNZPerDimension() const;
     uint *getNNZPerDimension() const;
+    uint getNumberOfDimensions() const;
 
 
     /** simple destructor */
     /** simple destructor */
     virtual ~GMHIKernelRaw();
     virtual ~GMHIKernelRaw();
 
 
     sparseVectorElement **getDataMatrix() const { return examples_raw; };
     sparseVectorElement **getDataMatrix() const { return examples_raw; };
-    void updateTables ( const NICE::Vector _x ) const;
+    void updateTablesAandB ( const NICE::Vector _x ) const;
+    void updateTableT ( const NICE::Vector _x ) const;
 
 
     /** get the diagonal elements of the current matrix */
     /** get the diagonal elements of the current matrix */
     void getDiagonalElements ( NICE::Vector & _diagonalElements ) const;
     void getDiagonalElements ( NICE::Vector & _diagonalElements ) const;
 
 
+    NICE::Vector getLargestValuePerDimension ( ) const;
+
 };
 };
 
 
 }
 }

+ 465 - 109
GPHIKRawClassifier.cpp

@@ -17,8 +17,13 @@
 #include <core/algebra/EigValues.h>
 #include <core/algebra/EigValues.h>
 
 
 // gp-hik-core includes
 // gp-hik-core includes
-#include "GPHIKRawClassifier.h"
-#include "GMHIKernelRaw.h"
+#include "gp-hik-core/GPHIKRawClassifier.h"
+#include "gp-hik-core/GMHIKernelRaw.h"
+
+//
+#include "gp-hik-core/quantization/Quantization1DAequiDist0To1.h"
+#include "gp-hik-core/quantization/Quantization1DAequiDist0ToMax.h"
+#include "gp-hik-core/quantization/QuantizationNDAequiDist0ToMax.h"
 
 
 using namespace std;
 using namespace std;
 using namespace NICE;
 using namespace NICE;
@@ -30,6 +35,53 @@ using namespace NICE;
 /////////////////////////////////////////////////////
 /////////////////////////////////////////////////////
 
 
 
 
+void GPHIKRawClassifier::clearSetsOfTablesAandB( )
+{
+
+    // delete all LUTs A which are needed when no quantization is activated
+    for ( std::map< uint,PrecomputedType >::iterator itA = this->precomputedA.begin();
+          itA != this->precomputedA.end();
+          itA++
+        )
+    {
+        for ( uint idxDim = 0 ; idxDim < this->num_dimension; idxDim++ )
+        {
+            if ( (itA->second)[idxDim] != NULL )
+                delete [] (itA->second)[idxDim];
+        }
+        delete [] itA->second;
+    }
+    this->precomputedA.clear();
+
+
+    // delete all LUTs B which are needed when no quantization is activated
+    for ( std::map< uint,PrecomputedType >::iterator itB = this->precomputedB.begin();
+          itB != this->precomputedB.end();
+          itB++
+        )
+    {
+        for ( uint idxDim = 0 ; idxDim < this->num_dimension; idxDim++ )
+        {
+            if ( (itB->second)[idxDim] != NULL )
+                delete [] (itB->second)[idxDim];
+        }
+        delete [] itB->second;
+    }
+    this->precomputedB.clear();
+}
+
+void GPHIKRawClassifier::clearSetsOfTablesT( )
+{
+    // delete all LUTs used for quantization
+    for ( std::map< uint, double * >::iterator itT = this->precomputedT.begin();
+          itT != this->precomputedT.end();
+          itT++
+         )
+    {
+        delete [] itT->second;
+    }
+    this->precomputedT.clear();
+}
 
 
 /////////////////////////////////////////////////////
 /////////////////////////////////////////////////////
 /////////////////////////////////////////////////////
 /////////////////////////////////////////////////////
@@ -38,11 +90,18 @@ using namespace NICE;
 /////////////////////////////////////////////////////
 /////////////////////////////////////////////////////
 GPHIKRawClassifier::GPHIKRawClassifier( )
 GPHIKRawClassifier::GPHIKRawClassifier( )
 {
 {
-  this->b_isTrained = false;
-  this->confSection = "";
+  this->b_isTrained       = false;
+  this->confSection       = "";
+
   this->nnz_per_dimension = NULL;
   this->nnz_per_dimension = NULL;
-  this->q = NULL;
-  this->gm = NULL;
+  this->num_examples      = 0;
+  this->num_dimension     = 0;
+
+  this->solver            = NULL;    
+  this->q                 = NULL;
+  this->gm                = NULL;
+
+
 
 
   // in order to be sure about all necessary variables be setup with default values, we
   // in order to be sure about all necessary variables be setup with default values, we
   // run initFromConfig with an empty config
   // run initFromConfig with an empty config
@@ -59,11 +118,16 @@ GPHIKRawClassifier::GPHIKRawClassifier( const Config *_conf,
   // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
   // same code as in empty constructor - duplication can be avoided with C++11 allowing for constructor delegation
   ///////////
   ///////////
 
 
-  this->b_isTrained = false;
-  this->confSection = "";
+  this->b_isTrained       = false;
+  this->confSection       = "";
+
   this->nnz_per_dimension = NULL;
   this->nnz_per_dimension = NULL;
-  this->q = NULL;
-  this->gm = NULL;
+  this->num_examples      = 0;
+  this->num_dimension     = 0;
+
+  this->solver            = NULL;    
+  this->q                 = NULL;
+  this->gm                = NULL;
 
 
   ///////////
   ///////////
   // here comes the new code part different from the empty constructor
   // here comes the new code part different from the empty constructor
@@ -88,11 +152,26 @@ GPHIKRawClassifier::GPHIKRawClassifier( const Config *_conf,
 
 
 GPHIKRawClassifier::~GPHIKRawClassifier()
 GPHIKRawClassifier::~GPHIKRawClassifier()
 {
 {
-  delete this->solver;
-  this->solver = NULL;
+  if ( this->solver != NULL )
+  {
+    delete this->solver;
+    this->solver = NULL;
+  }
 
 
-  if (gm != NULL)
-    delete gm;
+  if ( this->gm != NULL)
+  {
+    delete this->gm;
+    this->gm = NULL;
+  }
+
+  this->clearSetsOfTablesAandB();
+  this->clearSetsOfTablesT();
+
+  if ( this->q != NULL )
+  {
+      delete this->q;
+      this->q = NULL;
+  }
 }
 }
 
 
 void GPHIKRawClassifier::initFromConfig(const Config *_conf,
 void GPHIKRawClassifier::initFromConfig(const Config *_conf,
@@ -137,6 +216,46 @@ void GPHIKRawClassifier::initFromConfig(const Config *_conf,
       std::cerr << "   b_eig_verbose " << b_eig_verbose << std::endl;
       std::cerr << "   b_eig_verbose " << b_eig_verbose << std::endl;
       std::cerr << "   i_eig_value_max_iterations " << i_eig_value_max_iterations << std::endl;
       std::cerr << "   i_eig_value_max_iterations " << i_eig_value_max_iterations << std::endl;
   }
   }
+
+  //quantization during classification?
+  bool useQuantization = _conf->gB ( _confSection, "use_quantization", false );
+
+  if ( this->b_verbose )
+  {
+    std::cerr << "_confSection: " << _confSection << std::endl;
+    std::cerr << "use_quantization: " << useQuantization << std::endl;
+  }
+
+  if ( _conf->gB ( _confSection, "use_quantization", false ) )
+  {
+    int numBins = _conf->gI ( _confSection, "num_bins", 100 );
+    if ( this->b_verbose )
+      std::cerr << "GPHIKRawClassifier: quantization initialized with " << numBins << " bins." << std::endl;
+
+
+    std::string s_quantType = _conf->gS( _confSection, "s_quantType", "1d-aequi-0-1" );
+
+    if ( s_quantType == "1d-aequi-0-1" )
+    {
+      this->q = new NICE::Quantization1DAequiDist0To1 ( numBins );
+    }
+    else if ( s_quantType == "1d-aequi-0-max" )
+    {
+      this->q = new NICE::Quantization1DAequiDist0ToMax ( numBins );
+    }
+    else if ( s_quantType == "nd-aequi-0-max" )
+    {
+      this->q = new NICE::QuantizationNDAequiDist0ToMax ( numBins );
+    }
+    else
+    {
+      fthrow(Exception, "Quantization type is unknown " << s_quantType);
+    }
+  }
+  else
+  {
+    this->q = NULL;
+  }
 }
 }
 
 
 ///////////////////// ///////////////////// /////////////////////
 ///////////////////// ///////////////////// /////////////////////
@@ -167,95 +286,269 @@ void GPHIKRawClassifier::classify ( const NICE::SparseVector * _xstar,
      fthrow(Exception, "Classifier not trained yet -- aborting!" );
      fthrow(Exception, "Classifier not trained yet -- aborting!" );
   _scores.clear();
   _scores.clear();
 
 
-  GMHIKernelRaw::sparseVectorElement **dataMatrix = gm->getDataMatrix();
-
-  uint maxClassNo = 0;
-  for ( std::map<uint, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
-  {
-    uint classno = i->first;
-    maxClassNo = std::max ( maxClassNo, classno );
-    double beta = 0;
-
-    if ( this->q != NULL ) {
-      std::map<uint, double *>::const_iterator j = this->precomputedT.find ( classno );
-      double *T = j->second;
-      for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++ )
-      {
-        uint dim = i->first;
-        double v = i->second;
-        uint qBin = q->quantize( v, dim );
-
-        beta += T[dim * q->getNumberOfBins() + qBin];
-      }
-    } else {
-      const PrecomputedType & A = i->second;
-      std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
-      const PrecomputedType & B = j->second;
-
-      for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++)
-      {
-        uint dim = i->first;
-        double fval = i->second;
-
-        uint nnz = this->nnz_per_dimension[dim];
-        uint nz = this->num_examples - nnz;
-
-        if ( nnz == 0 ) continue;
-        // useful
-        //if ( fval < this->f_tolerance ) continue;
 
 
-        uint position = 0;
+    // classification with quantization of test inputs
+    if ( this->q != NULL )
+    {
+        uint maxClassNo = 0;
+        for ( std::map< uint, double * >::const_iterator itT = this->precomputedT.begin() ;
+              itT != this->precomputedT.end();
+              itT++
+            )
+        {
+          uint classno = itT->first;
+          maxClassNo   = std::max ( maxClassNo, classno );
+          double beta  = 0;
+          double *T    = itT->second;
+
+          for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++ )
+          {
+            uint dim  = i->first;
+            double v  = i->second;
+            uint qBin = this->q->quantize( v, dim );
+
+            beta += T[dim * this->q->getNumberOfBins() + qBin];
+          }//for-loop over dimensions of test input
+
+          _scores[ classno ] = beta;
+
+        }//for-loop over 1-vs-all models
+    }
+    // classification with exact test inputs, i.e., no quantization involved
+    else
+    {
+        uint maxClassNo = 0;
+        for ( std::map<uint, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
+        {
+          uint classno = i->first;
+          maxClassNo   = std::max ( maxClassNo, classno );
+          double beta  = 0;
+          GMHIKernelRaw::sparseVectorElement **dataMatrix = this->gm->getDataMatrix();
+
+          const PrecomputedType & A = i->second;
+          std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
+          const PrecomputedType & B = j->second;
+
+          for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++)
+          {
+            uint dim    = i->first;
+            double fval = i->second;
+
+            uint nnz = this->nnz_per_dimension[dim];
+            uint nz  = this->num_examples - nnz;
+
+            if ( nnz == 0 ) continue;
+            // useful
+            //if ( fval < this->f_tolerance ) continue;
+
+            uint position = 0;
+
+            //this->X_sorted.findFirstLargerInDimension(dim, fval, position);
+            GMHIKernelRaw::sparseVectorElement fval_element;
+            fval_element.value = fval;
+
+            //std::cerr << "value to search for " << fval << endl;
+            //std::cerr << "data matrix in dimension " << dim << endl;
+            //for (int j = 0; j < nnz; j++)
+            //    std::cerr << dataMatrix[dim][j].value << std::endl;
+
+            GMHIKernelRaw::sparseVectorElement *it = upper_bound ( dataMatrix[dim], dataMatrix[dim] + nnz, fval_element );
+            position = distance ( dataMatrix[dim], it );
+            
+//             /*// add zero elements
+//             if ( fval_element.value > 0.0 )
+//                 position += nz;*/
+
+
+            bool posIsZero ( position == 0 );
+            
+            // special case 1:
+            // new example is smaller than all known examples
+            // -> resulting value = fval * sum_l=1^n alpha_l               
+            if ( position == 0 )
+            {
+              beta += fval * B[ dim ][ nnz - 1 ];  
+            }
+            // special case 2:
+            // new example is equal to or larger than the largest training example in this dimension
+            // -> the term B[ dim ][ nnz-1 ] - B[ dim ][ indexElem ] is equal to zero and vanishes, which is logical, since all elements are smaller than the remaining prototypes!            
+            else if ( position == nnz )
+            {
+              beta += A[ dim ][ nnz - 1 ];
+            }
+            // standard case: new example is larger then the smallest element, but smaller then the largest one in the corrent dimension        
+            else
+            {
+                beta += A[ dim ][ position - 1 ] + fval * ( B[ dim ][ nnz - 1 ] - B[ dim ][ position - 1 ] );
+            }
+            
+//             // correct upper bound to correct position, only possible if new example is not the smallest value in this dimension
+//             if ( !posIsZero )
+//                 position--;
+// 
+// 
+//             double firstPart = 0.0;
+//             if ( !posIsZero  )
+//               firstPart = ( A[ dim ][ position ] );
+// 
+//             double secondPart( B[ dim ][ this->num_examples-1-nz ]);
+//             if ( !posIsZero && (position >= nz) )
+//                 secondPart -= B[dim][ position ];
+// 
+//             // but apply using the transformed one
+//             beta += firstPart + secondPart* fval;
+          }//for-loop over dimensions of test input
+
+          _scores[ classno ] = beta;
+
+        }//for-loop over 1-vs-all models
+
+    } // if-condition wrt quantization
+  _scores.setDim ( *this->knownClasses.rbegin() + 1 );
 
 
-        //this->X_sorted.findFirstLargerInDimension(dim, fval, position);
-        GMHIKernelRaw::sparseVectorElement fval_element;
-        fval_element.value = fval;
 
 
-        //std::cerr << "value to search for " << fval << endl;
-        //std::cerr << "data matrix in dimension " << dim << endl;
-        //for (int j = 0; j < nnz; j++)
-        //    std::cerr << dataMatrix[dim][j].value << std::endl;
+  if ( this->knownClasses.size() > 2 )
+  { // multi-class classification
+    _result = _scores.maxElement();
+  }
+  else if ( this->knownClasses.size() == 2 ) // binary setting
+  {
+    uint class1 = *(this->knownClasses.begin());
+    uint class2 = *(this->knownClasses.rbegin());
 
 
-        GMHIKernelRaw::sparseVectorElement *it = upper_bound ( dataMatrix[dim], dataMatrix[dim] + nnz, fval_element );
-        position = distance ( dataMatrix[dim], it );
-        // add zero elements
-        if ( fval_element.value > 0.0 )
-            position += nz;
+    // since we erased the binary label vector corresponding to the smaller class number,
+    // we only have scores for the larger class number
+    uint class_for_which_we_have_a_score          = class2;
+    uint class_for_which_we_dont_have_a_score     = class1;
 
 
+    _scores[class_for_which_we_dont_have_a_score] = - _scores[class_for_which_we_have_a_score];
 
 
-        bool posIsZero ( position == 0 );
-        if ( !posIsZero )
-            position--;
+    _result = _scores[class_for_which_we_have_a_score] > 0.0 ? class_for_which_we_have_a_score : class_for_which_we_dont_have_a_score;
+  }
 
 
+}
 
 
-        double firstPart = 0.0;
-        if ( !posIsZero && ((position-nz) < this->num_examples) )
-          firstPart = (A[dim][position-nz]);
 
 
-        double secondPart( B[dim][this->num_examples-1-nz]);
-        if ( !posIsZero && (position >= nz) )
-            secondPart -= B[dim][position-nz];
+void GPHIKRawClassifier::classify ( const NICE::SparseVector * _xstar,
+                                 uint & _result,
+                                 Vector & _scores
+                               ) const
+{
+  if ( ! this->b_isTrained )
+     fthrow(Exception, "Classifier not trained yet -- aborting!" );
 
 
-        // but apply using the transformed one
-        beta += firstPart + secondPart* fval;
-      }
+    // classification with quantization of test inputs
+    if ( this->q != NULL )
+    {
+        uint maxClassNo = 0;
+        for ( std::map< uint, double * >::const_iterator itT = this->precomputedT.begin() ;
+              itT != this->precomputedT.end();
+              itT++
+            )
+        {
+          uint classno = itT->first;
+          maxClassNo   = std::max ( maxClassNo, classno );
+          double beta  = 0;
+          double *T    = itT->second;
+
+          for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++ )
+          {
+            uint dim  = i->first;
+            double v  = i->second;
+            uint qBin = this->q->quantize( v, dim );
+
+            beta += T[dim * this->q->getNumberOfBins() + qBin];
+          }//for-loop over dimensions of test input
+
+          _scores[ classno ] = beta;
+
+        }//for-loop over 1-vs-all models
     }
     }
-
-    _scores[ classno ] = beta;
-  }
-  _scores.setDim ( *this->knownClasses.rbegin() + 1 );
-
+    // classification with exact test inputs, i.e., no quantization involved
+    else
+    {
+        uint maxClassNo = 0;
+        for ( std::map<uint, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
+        {
+          uint classno = i->first;
+          maxClassNo   = std::max ( maxClassNo, classno );
+          double beta  = 0;
+          GMHIKernelRaw::sparseVectorElement **dataMatrix = this->gm->getDataMatrix();
+
+          const PrecomputedType & A = i->second;
+          std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
+          const PrecomputedType & B = j->second;
+
+          for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++)
+          {
+            uint dim    = i->first;
+            double fval = i->second;
+
+            uint nnz = this->nnz_per_dimension[dim];
+            uint nz  = this->num_examples - nnz;
+
+            if ( nnz == 0 ) continue;
+            // useful
+            //if ( fval < this->f_tolerance ) continue;
+
+            uint position = 0;
+
+            //this->X_sorted.findFirstLargerInDimension(dim, fval, position);
+            GMHIKernelRaw::sparseVectorElement fval_element;
+            fval_element.value = fval;
+
+            //std::cerr << "value to search for " << fval << endl;
+            //std::cerr << "data matrix in dimension " << dim << endl;
+            //for (int j = 0; j < nnz; j++)
+            //    std::cerr << dataMatrix[dim][j].value << std::endl;
+
+            GMHIKernelRaw::sparseVectorElement *it = upper_bound ( dataMatrix[dim], dataMatrix[dim] + nnz, fval_element );
+            position = distance ( dataMatrix[dim], it );
+
+            bool posIsZero ( position == 0 );
+
+            // special case 1:
+            // new example is smaller than all known examples
+            // -> resulting value = fval * sum_l=1^n alpha_l
+            if ( position == 0 )
+            {
+              beta += fval * B[ dim ][ nnz - 1 ];
+            }
+            // special case 2:
+            // new example is equal to or larger than the largest training example in this dimension
+            // -> the term B[ dim ][ nnz-1 ] - B[ dim ][ indexElem ] is equal to zero and vanishes, which is logical, since all elements are smaller than the remaining prototypes!
+            else if ( position == nnz )
+            {
+              beta += A[ dim ][ nnz - 1 ];
+            }
+            // standard case: new example is larger then the smallest element, but smaller then the largest one in the corrent dimension
+            else
+            {
+                beta += A[ dim ][ position - 1 ] + fval * ( B[ dim ][ nnz - 1 ] - B[ dim ][ position - 1 ] );
+            }
+
+          }//for-loop over dimensions of test input
+
+          _scores[ classno ] = beta;
+
+        }//for-loop over 1-vs-all models
+
+    } // if-condition wrt quantization
 
 
   if ( this->knownClasses.size() > 2 )
   if ( this->knownClasses.size() > 2 )
   { // multi-class classification
   { // multi-class classification
-    _result = _scores.maxElement();
+    _result = _scores.MaxIndex();
   }
   }
   else if ( this->knownClasses.size() == 2 ) // binary setting
   else if ( this->knownClasses.size() == 2 ) // binary setting
   {
   {
     uint class1 = *(this->knownClasses.begin());
     uint class1 = *(this->knownClasses.begin());
     uint class2 = *(this->knownClasses.rbegin());
     uint class2 = *(this->knownClasses.rbegin());
-    uint class_for_which_we_have_a_score = _scores.begin()->first;
-    uint class_for_which_we_dont_have_a_score = (class1 == class_for_which_we_have_a_score ? class2 : class1);
 
 
+    // since we erased the binary label vector corresponding to the smaller class number,
+    // we only have scores for the larger class number
+    uint class_for_which_we_have_a_score          = class2;
+    uint class_for_which_we_dont_have_a_score     = class1;
+    
     _scores[class_for_which_we_dont_have_a_score] = - _scores[class_for_which_we_have_a_score];
     _scores[class_for_which_we_dont_have_a_score] = - _scores[class_for_which_we_have_a_score];
 
 
     _result = _scores[class_for_which_we_have_a_score] > 0.0 ? class_for_which_we_have_a_score : class_for_which_we_dont_have_a_score;
     _result = _scores[class_for_which_we_have_a_score] > 0.0 ? class_for_which_we_have_a_score : class_for_which_we_dont_have_a_score;
@@ -263,6 +556,41 @@ void GPHIKRawClassifier::classify ( const NICE::SparseVector * _xstar,
 
 
 }
 }
 
 
+void GPHIKRawClassifier::classify ( const std::vector< const NICE::SparseVector *> _examples,
+                                    NICE::Vector & _results,
+                                    NICE::Matrix & _scores
+                                  ) const
+{
+    _scores.resize( _examples.size(), this->knownClasses.size() );
+    _scores.set( 0.0 );
+
+    _results.resize( _examples.size() );
+    _results.set( 0.0 );
+
+
+    NICE::Vector::iterator resultsIt = _results.begin();
+    NICE::Vector scoresSingle( this->knownClasses.size(), 0.0);
+
+
+    uint exCnt ( 0 );
+    for ( std::vector< const NICE::SparseVector *>::const_iterator exIt = _examples.begin();
+          exIt != _examples.end();
+          exIt++, resultsIt++, exCnt++
+        )
+    {
+      uint resUI;
+        this->classify ( *exIt,
+                        resUI,
+                        scoresSingle
+                       );
+
+        *resultsIt = resUI;
+        _scores.setRow( exCnt, scoresSingle );
+        scoresSingle.set( 0.0 );
+    }
+}
+
+
 
 
 /** training process */
 /** training process */
 void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
 void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
@@ -286,25 +614,36 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
     uint current_class = *j;
     uint current_class = *j;
     Vector labels_binary ( _labels.size() );
     Vector labels_binary ( _labels.size() );
     for ( uint i = 0; i < _labels.size(); i++ )
     for ( uint i = 0; i < _labels.size(); i++ )
+    {
         labels_binary[i] = ( _labels[i] == current_class ) ? 1.0 : -1.0;
         labels_binary[i] = ( _labels[i] == current_class ) ? 1.0 : -1.0;
+    }
 
 
-    binLabels.insert ( pair<uint, NICE::Vector>( current_class, labels_binary) );
+    binLabels.insert ( std::pair<uint, NICE::Vector>( current_class, labels_binary) );
   }
   }
 
 
   // handle special binary case
   // handle special binary case
   if ( knownClasses.size() == 2 )
   if ( knownClasses.size() == 2 )
   {
   {
-    std::map<uint, NICE::Vector>::iterator it = binLabels.begin();
-    it++;
-    binLabels.erase( binLabels.begin(), it );
+      // we erase the binary label vector which corresponds to the smaller class number as positive class
+      uint clNoSmall = *(this->knownClasses.begin());
+      std::map<uint, NICE::Vector>::iterator it = binLabels.begin();
+      it++;
+      if ( binLabels.begin()->first == clNoSmall )
+      {
+        binLabels.erase( binLabels.begin(), it );
+      }
+      else
+      {
+        binLabels.erase( it, binLabels.end() );
+      }
   }
   }
 
 
   this->train ( _examples, binLabels );
   this->train ( _examples, binLabels );
 }
 }
 
 
 void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
 void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *> & _examples,
-                              std::map<uint, NICE::Vector> & _binLabels
-                            )
+                                 std::map<uint, NICE::Vector> & _binLabels
+                               )
 {
 {
   // security-check: examples and labels have to be of same size
   // security-check: examples and labels have to be of same size
   for ( std::map< uint, NICE::Vector >::const_iterator binLabIt = _binLabels.begin();
   for ( std::map< uint, NICE::Vector >::const_iterator binLabIt = _binLabels.begin();
@@ -324,17 +663,19 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
   Timer t;
   Timer t;
   t.start();
   t.start();
 
 
-  precomputedA.clear();
-  precomputedB.clear();
-  precomputedT.clear();
+  this->clearSetsOfTablesAandB();
+  this->clearSetsOfTablesT();
+
 
 
   // sort examples in each dimension and "transpose" the feature matrix
   // sort examples in each dimension and "transpose" the feature matrix
   // set up the GenericMatrix interface
   // set up the GenericMatrix interface
-  if (gm != NULL)
-    delete gm;
+  if ( this->gm != NULL )
+    delete this->gm;
+
+  this->gm = new GMHIKernelRaw ( _examples, this->d_noise, this->q );
+  this->nnz_per_dimension = this->gm->getNNZPerDimension();
+  this->num_dimension     = this->gm->getNumberOfDimensions();
 
 
-  gm = new GMHIKernelRaw ( _examples, this->d_noise );
-  nnz_per_dimension = gm->getNNZPerDimension();
 
 
   // compute largest eigenvalue of our kernel matrix
   // compute largest eigenvalue of our kernel matrix
   // note: this guy is shared among all categories,
   // note: this guy is shared among all categories,
@@ -342,6 +683,7 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
   NICE::Vector eigenMax;
   NICE::Vector eigenMax;
   NICE::Matrix eigenMaxV;
   NICE::Matrix eigenMaxV;
   // for reproducibility during debuggin
   // for reproducibility during debuggin
+  //FIXME
   srand ( 0 );
   srand ( 0 );
   srand48 ( 0 );
   srand48 ( 0 );
 
 
@@ -349,14 +691,13 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
                                           this->i_eig_value_max_iterations
                                           this->i_eig_value_max_iterations
                                         );
                                         );
 
 
-
   eig->getEigenvalues( *gm, eigenMax, eigenMaxV, 1 /*rank*/ );
   eig->getEigenvalues( *gm, eigenMax, eigenMaxV, 1 /*rank*/ );
   delete eig;
   delete eig;
 
 
   // set simple jacobi pre-conditioning
   // set simple jacobi pre-conditioning
   NICE::Vector diagonalElements;
   NICE::Vector diagonalElements;
-  gm->getDiagonalElements ( diagonalElements );
-  solver->setJacobiPreconditioner ( diagonalElements );
+  this->gm->getDiagonalElements ( diagonalElements );
+  this->solver->setJacobiPreconditioner ( diagonalElements );
 
 
   // solve linear equations for each class
   // solve linear equations for each class
   // be careful when parallising this!
   // be careful when parallising this!
@@ -385,15 +726,33 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
     */
     */
     alpha = (y * (1.0 / eigenMax[0]) );
     alpha = (y * (1.0 / eigenMax[0]) );
 
 
-    solver->solveLin( *gm, y, alpha );
+    this->solver->solveLin( *gm, y, alpha );
 
 
-    // TODO: get lookup tables, A, B, etc. and store them
-    gm->updateTables(alpha);
-    double **A = gm->getTableA();
-    double **B = gm->getTableB();
+//    //debug
+//      std::cerr << "alpha: " << alpha << std::endl;
+
+    // get lookup tables, A, B, etc. and store them
+    this->gm->updateTablesAandB( alpha );
+    double **A = this->gm->getTableA();
+    double **B = this->gm->getTableB();
+
+    this->precomputedA.insert ( std::pair<uint, PrecomputedType> ( classno, A ) );
+    this->precomputedB.insert ( std::pair<uint, PrecomputedType> ( classno, B ) );
+
+    // Quantization for classification?
+    if ( this->q != NULL )
+    {
+      this->gm->updateTableT( alpha );
+      double *T = this->gm->getTableT ( );
+      this->precomputedT.insert( std::pair<uint, double * > ( classno, T ) );
 
 
-    precomputedA.insert ( pair<uint, PrecomputedType> ( classno, A ) );
-    precomputedB.insert ( pair<uint, PrecomputedType> ( classno, B ) );
+    }
+  }
+
+  // NOTE if quantization is turned on, we do not need LUTs A and B anymore
+  if ( this->q != NULL )
+  {
+    this->clearSetsOfTablesAandB();
   }
   }
 
 
 
 
@@ -401,7 +760,6 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
   if ( this->b_verbose )
   if ( this->b_verbose )
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;
     std::cerr << "Time used for setting up the fmk object: " << t.getLast() << std::endl;
 
 
-
   //indicate that we finished training successfully
   //indicate that we finished training successfully
   this->b_isTrained = true;
   this->b_isTrained = true;
 
 
@@ -411,5 +769,3 @@ void GPHIKRawClassifier::train ( const std::vector< const NICE::SparseVector *>
 
 
 
 
 }
 }
-
-

+ 37 - 3
GPHIKRawClassifier.h

@@ -25,12 +25,12 @@
 namespace NICE {
 namespace NICE {
 
 
  /**
  /**
- * @class GPHIKClassifier
+ * @class GPHIKRawClassifier
  * @brief ...
  * @brief ...
- * @author Erik Rodner
+ * @author Erik Rodner, Alexander Freytag
  */
  */
 
 
-class GPHIKRawClassifier //: public NICE::Persistent
+class GPHIKRawClassifier
 {
 {
 
 
   protected:
   protected:
@@ -90,6 +90,7 @@ class GPHIKRawClassifier //: public NICE::Persistent
 
 
     uint *nnz_per_dimension;
     uint *nnz_per_dimension;
     uint num_examples;
     uint num_examples;
+    uint num_dimension;
 
 
     double f_tolerance;
     double f_tolerance;
 
 
@@ -102,6 +103,15 @@ class GPHIKRawClassifier //: public NICE::Persistent
     /////////////////////////
     /////////////////////////
     /////////////////////////
     /////////////////////////
 
 
+    void clearSetsOfTablesAandB();
+    void clearSetsOfTablesT();
+
+
+    /////////////////////////
+    /////////////////////////
+    //    PUBLIC METHODS   //
+    /////////////////////////
+    /////////////////////////
 
 
   public:
   public:
 
 
@@ -159,6 +169,30 @@ class GPHIKRawClassifier //: public NICE::Persistent
                     NICE::SparseVector & _scores
                     NICE::SparseVector & _scores
                   ) const;
                   ) const;
 
 
+    /**
+     * @brief classify a given example with the previously learned model
+     * @author Alexander Freytag, Erik Rodner
+     * @param example (SparseVector) to be classified given in a sparse representation
+     * @param result (int) class number of most likely class
+     * @param scores (Vector) classification scores for known classes
+     */
+    void classify ( const NICE::SparseVector * _example,
+                    uint & _result,
+                    NICE::Vector & _scores
+                  ) const;
+
+    /**
+     * @brief classify a given set of examples with the previously learned model
+     * @author Alexander Freytag, Erik Rodner
+     * @param examples ((std::vector< NICE::SparseVector *>)) to be classified given in a sparse representation
+     * @param results (Vector) class number of most likely class per example
+     * @param scores (NICE::Matrix) classification scores for known classes and test examples
+     */
+    void classify ( const std::vector< const NICE::SparseVector *> _examples,
+                    NICE::Vector & _results,
+                    NICE::Matrix & _scores
+                  ) const;
+
     /**
     /**
      * @brief train this classifier using a given set of examples and a given set of binary label vectors
      * @brief train this classifier using a given set of examples and a given set of binary label vectors
      * @date 18-10-2012 (dd-mm-yyyy)
      * @date 18-10-2012 (dd-mm-yyyy)

+ 18 - 0
matlab/ConverterMatlabToNICE.cpp

@@ -233,4 +233,22 @@ bool MatlabConversion::convertMatlabToBool( const mxArray *matlabBool )
   bool* ptr = (bool*) mxGetData( matlabBool );
   bool* ptr = (bool*) mxGetData( matlabBool );
   return ptr[0];
   return ptr[0];
 }
 }
+
+bool MatlabConversion::isSparseDataAMatrix( const mxArray *array_ptr )
+{
+     mwSize   i_numExamples, i_numDim;
+
+     // dimenions of the matrix -> feature dimension and number of examples
+     i_numExamples = mxGetM( array_ptr );
+     i_numDim = mxGetN( array_ptr );
+
+     if ( ( i_numExamples == 1) || ( i_numDim == 1) )
+     {
+         return false;
+     }
+     else
+     {
+         return true;
+     }
+ }
 #endif
 #endif

+ 8 - 0
matlab/ConverterMatlabToNICE.h

@@ -92,6 +92,14 @@ namespace NICE {
      **/    
      **/    
     bool convertMatlabToBool( const mxArray *matlabBool );
     bool convertMatlabToBool( const mxArray *matlabBool );
 
 
+    /**
+     * @brief Checks whether a given sparse data structure is a matrix (or a vector instead)
+     *
+     * @param array_ptr Sparse MxD Matlab matrix
+     * @return bool. false of either M or D equals to 1
+     **/
+    bool isSparseDataAMatrix( const mxArray *array_ptr );
+
 } //ns MatlabConversion
 } //ns MatlabConversion
 
 
 }
 }

+ 70 - 17
matlab/GPHIKRawClassifierMex.cpp

@@ -56,6 +56,7 @@ NICE::Config parseParametersGPHIKRawClassifier(const mxArray *prhs[], int nrhs)
     /////////////////////////////////////////
     /////////////////////////////////////////
     if( (variable == "verbose") ||
     if( (variable == "verbose") ||
         (variable == "debug") ||
         (variable == "debug") ||
+        (variable == "use_quantization") ||
         (variable == "ils_verbose")
         (variable == "ils_verbose")
       )
       )
     {
     {
@@ -92,7 +93,8 @@ NICE::Config parseParametersGPHIKRawClassifier(const mxArray *prhs[], int nrhs)
     /////////////////////////////////////////
     /////////////////////////////////////////
     // READ STRICT POSITIVE INT VARIABLES
     // READ STRICT POSITIVE INT VARIABLES
     /////////////////////////////////////////
     /////////////////////////////////////////
-    if ( ( variable == "ils_max_iterations" )||
+    if ( (variable == "num_bins") ||
+         ( variable == "ils_max_iterations" )||
          ( variable == "eig_value_max_iterations" )
          ( variable == "eig_value_max_iterations" )
        )
        )
     {
     {
@@ -166,6 +168,14 @@ NICE::Config parseParametersGPHIKRawClassifier(const mxArray *prhs[], int nrhs)
         conf.sS("GPHIKRawClassifier", variable, value);
         conf.sS("GPHIKRawClassifier", variable, value);
     }
     }
 
 
+    if(variable == "s_quantType")
+    {
+      string value = MatlabConversion::convertMatlabToString( prhs[i+1] );
+      if( value != "1d-aequi-0-1" && value != "1d-aequi-0-max" && value != "nd-aequi-0-max" )
+        mexErrMsgIdAndTxt("mexnice:error","Unexpected parameter value for \'s_quantType\'. \'1d-aequi-0-1\' , \'1d-aequi-0-max\' or \'nd-aequi-0-max\' expected.");
+        conf.sS("GPHIKRawClassifier", variable, value);
+    }
+
   }
   }
 
 
 
 
@@ -283,44 +293,87 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
             mexErrMsgTxt("Test: Unexpected arguments.");
             mexErrMsgTxt("Test: Unexpected arguments.");
         }
         }
 
 
-        //------------- read the data --------------
-
-        uint result;
-        NICE::SparseVector scores;
-
         if ( mxIsSparse( prhs[2] ) )
         if ( mxIsSparse( prhs[2] ) )
         {
         {
+          if ( MatlabConversion::isSparseDataAMatrix( prhs[2] ) )
+          {
+            //----------------- conversion -------------
+            std::vector< const NICE::SparseVector *> examplesTest;
+            examplesTest = MatlabConversion::convertSparseMatrixToNice( prhs[2] );
+            
+            //----------------- classification -------------
+            NICE::Vector results;
+            NICE::Matrix scores;            
+            classifier->classify ( examplesTest,  results, scores );
+            
+            //----------------- clean up -------------
+            for ( std::vector< const NICE::SparseVector *>::iterator exIt = examplesTest.begin();
+                 exIt != examplesTest.end();
+                 exIt++
+            )
+            {
+              delete *exIt;
+            }
+            
+            //----------------- output -------------
+            plhs[0] = MatlabConversion::convertVectorFromNice( results );
+
+            if(nlhs >= 2)
+            {
+              plhs[1] = MatlabConversion::convertMatrixFromNice( scores );
+            }
+            return;            
+          }
+          else
+          { 
+            //----------------- conversion -------------
             NICE::SparseVector * example;
             NICE::SparseVector * example;
             example = new NICE::SparseVector ( MatlabConversion::convertSparseVectorToNice( prhs[2] ) );
             example = new NICE::SparseVector ( MatlabConversion::convertSparseVectorToNice( prhs[2] ) );
+            
+            //----------------- classification -------------
+            uint result;
+            NICE::SparseVector scores;
             classifier->classify ( example,  result, scores );
             classifier->classify ( example,  result, scores );
 
 
             //----------------- clean up -------------
             //----------------- clean up -------------
             delete example;
             delete example;
+            
+            //----------------- output -------------
+            plhs[0] = mxCreateDoubleScalar( result );
+
+            if(nlhs >= 2)
+            {
+              plhs[1] = MatlabConversion::convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
+            }
+            return;            
+          }
         }
         }
         else
         else
         {
         {
+            //----------------- conversion -------------          
             NICE::Vector * example;
             NICE::Vector * example;
             example = new NICE::Vector ( MatlabConversion::convertDoubleVectorToNice(prhs[2]) );
             example = new NICE::Vector ( MatlabConversion::convertDoubleVectorToNice(prhs[2]) );
             NICE::SparseVector * svec  = new NICE::SparseVector( *example );
             NICE::SparseVector * svec  = new NICE::SparseVector( *example );
             delete example;
             delete example;
 
 
+            //----------------- classification -------------
+            uint result;
+            NICE::SparseVector scores;            
             classifier->classify ( svec,  result, scores );
             classifier->classify ( svec,  result, scores );
 
 
             //----------------- clean up -------------
             //----------------- clean up -------------
             delete svec;
             delete svec;
+            
+            
+            //----------------- output -------------
+            plhs[0] = mxCreateDoubleScalar( result );
 
 
+            if(nlhs >= 2)
+            {
+              plhs[1] = MatlabConversion::convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
+            }
+            return;
         }
         }
-
-
-
-          // output
-          plhs[0] = mxCreateDoubleScalar( result );
-
-          if(nlhs >= 2)
-          {
-            plhs[1] = MatlabConversion::convertSparseVectorFromNice( scores, true  /*b_adaptIndex*/);
-          }
-          return;
     }
     }
 
 
 
 

+ 3 - 4
quantization/Quantization.h

@@ -13,9 +13,6 @@
 // 
 // 
 #include <core/vector/VectorT.h>
 #include <core/vector/VectorT.h>
 
 
-// gp-hik-core includes
-#include "gp-hik-core/FeatureMatrixT.h"
-
 namespace NICE {
 namespace NICE {
   
   
  /** 
  /** 
@@ -97,7 +94,9 @@ class Quantization  : public NICE::Persistent
                         
                         
                         
                         
   //FIXME should the argument _fm be templated?
   //FIXME should the argument _fm be templated?
-  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) = 0;                        
+  virtual void computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension ) = 0;
+//  FeatureMatrix *  _fm
+//  virtual void computeParametersFromData ( const NICE::GMHIKernelRaw *  _gm ) = 0;
   
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   // interface specific methods for store and restore

+ 2 - 1
quantization/Quantization1DAequiDist0To1.cpp

@@ -52,10 +52,11 @@ uint Quantization1DAequiDist0To1::quantize ( double _value,
     return static_cast<uint> ( _value * (this->ui_numBins-1) + 0.5 );
     return static_cast<uint> ( _value * (this->ui_numBins-1) + 0.5 );
 }
 }
 
 
-void Quantization1DAequiDist0To1::computeParametersFromData ( const NICE::FeatureMatrix *  _fm )
+void Quantization1DAequiDist0To1::computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension )
 {
 {
   // nothing to do here...
   // nothing to do here...
 }
 }
+
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
 
 void Quantization1DAequiDist0To1::restore ( std::istream & _is, 
 void Quantization1DAequiDist0To1::restore ( std::istream & _is, 

+ 1 - 1
quantization/Quantization1DAequiDist0To1.h

@@ -77,7 +77,7 @@ class Quantization1DAequiDist0To1  : public NICE::Quantization
                           const uint & _dim = 0
                           const uint & _dim = 0
                         ) const;
                         ) const;
                         
                         
-  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) ;                        
+  virtual void computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension );
   
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   // interface specific methods for store and restore

+ 3 - 3
quantization/Quantization1DAequiDist0ToMax.cpp

@@ -58,12 +58,12 @@ uint Quantization1DAequiDist0ToMax::quantize ( double _value,
 
 
 
 
 
 
-void Quantization1DAequiDist0ToMax::computeParametersFromData ( const NICE::FeatureMatrix *  _fm )
+void Quantization1DAequiDist0ToMax::computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension )
 {
 {
-      double vmax = ( _fm->getLargestValue( ) );       
       this->v_upperBounds.resize ( 1 );
       this->v_upperBounds.resize ( 1 );
-      this->v_upperBounds ( 0 ) = vmax;
+      this->v_upperBounds ( 0 ) = _maxValuesPerDimension.Max();
 }
 }
+
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
 
 void Quantization1DAequiDist0ToMax::restore ( std::istream & _is, 
 void Quantization1DAequiDist0ToMax::restore ( std::istream & _is, 

+ 1 - 1
quantization/Quantization1DAequiDist0ToMax.h

@@ -78,7 +78,7 @@ class Quantization1DAequiDist0ToMax  : public NICE::Quantization
                         ) const;
                         ) const;
                         
                         
                         
                         
-  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) ;
+  virtual void computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension );
   
   
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   // interface specific methods for store and restore

+ 4 - 5
quantization/QuantizationNDAequiDist0ToMax.cpp

@@ -53,13 +53,12 @@ uint QuantizationNDAequiDist0ToMax::quantize ( double _value,
 }
 }
 
 
 
 
-void QuantizationNDAequiDist0ToMax::computeParametersFromData ( const NICE::FeatureMatrix *  _fm )
-{     
-  // 100% quantile...
-  double d_quantile ( 1.00 ); 
-  this->v_upperBounds = _fm->getLargestValuePerDimension( d_quantile );  
+void QuantizationNDAequiDist0ToMax::computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension )
+{
+  this->v_upperBounds = _maxValuesPerDimension;
 }
 }
 
 
+
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 // ---------------------- STORE AND RESTORE FUNCTIONS ----------------------
 
 
 void QuantizationNDAequiDist0ToMax::restore ( std::istream & _is, 
 void QuantizationNDAequiDist0ToMax::restore ( std::istream & _is, 

+ 1 - 1
quantization/QuantizationNDAequiDist0ToMax.h

@@ -78,7 +78,7 @@ class QuantizationNDAequiDist0ToMax  : public NICE::Quantization
                         ) const;
                         ) const;
                         
                         
                         
                         
-  virtual void computeParametersFromData ( const NICE::FeatureMatrix *  _fm ) ;
+  virtual void computeParametersFromData ( const NICE::Vector & _maxValuesPerDimension );
                           
                           
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   ///////////////////// INTERFACE PERSISTENT /////////////////////
   // interface specific methods for store and restore
   // interface specific methods for store and restore