|
@@ -56,7 +56,7 @@ FastMinKernel::FastMinKernel( const std::vector<std::vector<double> > & _X,
|
|
|
#ifdef NICE_USELIB_MATIO
|
|
|
FastMinKernel::FastMinKernel ( const sparse_t & _X,
|
|
|
const double _noise,
|
|
|
- const std::map<int, int> & _examples,
|
|
|
+ const std::map<uint, uint> & _examples,
|
|
|
const bool _debug,
|
|
|
const uint & _dim
|
|
|
) : this->X_sorted( _X, _examples, _dim )
|
|
@@ -205,7 +205,7 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
|
|
|
// = b_{k,n} - b_{k,j}
|
|
|
|
|
|
// we only need as many entries as we have nonZero entries in our features for the corresponding dimensions
|
|
|
- for (int i = 0; i < this->ui_d; i++)
|
|
|
+ for (uint i = 0; i < this->ui_d; i++)
|
|
|
{
|
|
|
uint numNonZero = this->X_sorted.getNumberOfNonZeroElementsPerDimension(i);
|
|
|
//DEBUG
|
|
@@ -221,7 +221,7 @@ void FastMinKernel::hik_prepare_alpha_multiplications(const NICE::Vector & _alph
|
|
|
double alpha_sum(0.0);
|
|
|
double alpha_times_x_sum(0.0);
|
|
|
|
|
|
- int cntNonzeroFeat(0);
|
|
|
+ uint cntNonzeroFeat(0);
|
|
|
|
|
|
const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
|
|
|
// loop through all elements in sorted order
|
|
@@ -289,17 +289,16 @@ double *FastMinKernel::hik_prepare_alpha_multiplications_fast(const NICE::VVecto
|
|
|
SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
|
|
|
|
|
|
// index of the element, which is always bigger than the current value fval
|
|
|
- int index = 0;
|
|
|
+ uint index = 0;
|
|
|
// we use the quantization of the original features! the transformed feature were
|
|
|
// already used to calculate A and B, this of course assumes monotonic functions!!!
|
|
|
- int qBin = _q.quantize ( i->first );
|
|
|
+ uint qBin = _q.quantize ( i->first );
|
|
|
|
|
|
// the next loop is linear in max(hmax, n)
|
|
|
// REMARK: this could be changed to hmax*log(n), when
|
|
|
// we use binary search
|
|
|
|
|
|
- //FIXME cast to int might be dangerous...
|
|
|
- for (int j = 0; j < (int)hmax; j++)
|
|
|
+ for (uint j = 0; j < hmax; j++)
|
|
|
{
|
|
|
double fval = prototypes[j];
|
|
|
double t;
|
|
@@ -387,10 +386,10 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
|
|
|
SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
|
|
|
|
|
|
// index of the element, which is always bigger than the current value fval
|
|
|
- int index = 0;
|
|
|
+ uint index = 0;
|
|
|
|
|
|
// we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
|
|
|
- int qBin = _q.quantize ( i->first );
|
|
|
+ uint qBin = _q.quantize ( i->first );
|
|
|
|
|
|
double alpha_sum(0.0);
|
|
|
double alpha_times_x_sum(0.0);
|
|
@@ -402,7 +401,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
|
|
|
double fval = prototypes[j];
|
|
|
double t;
|
|
|
|
|
|
- if ( (index == 0) && (j < (uint)qBin) ) {
|
|
|
+ if ( (index == 0) && (j < qBin) ) {
|
|
|
// current element is smaller than everything else
|
|
|
// resulting value = fval * sum_l=1^n alpha_l
|
|
|
//t = fval*( B[dim][this->n-1 - nrZeroIndices] );
|
|
@@ -410,7 +409,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
|
|
|
} else {
|
|
|
|
|
|
// move to next example, if necessary
|
|
|
- while ( (j >= (uint)qBin) && ( index < (this->ui_n-1-nrZeroIndices)) )
|
|
|
+ while ( (j >= qBin) && ( index < (this->ui_n-1-nrZeroIndices)) )
|
|
|
{
|
|
|
alpha_times_x_sum_prev = alpha_times_x_sum;
|
|
|
alpha_sum_prev = alpha_sum;
|
|
@@ -427,7 +426,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
|
|
|
// compute current element in the lookup table and keep in mind that
|
|
|
// index is the next element and not the previous one
|
|
|
//NOTE pay attention: this is only valid if all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
|
|
|
- if ( (j >= (uint)qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
|
|
|
+ if ( (j >= qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
|
|
|
// the current element (fval) is equal or bigger to the element indexed by index
|
|
|
// in fact, the term B[dim][this->n-1-nrZeroIndices] - B[dim][index] is equal to zero and vanishes, which is logical, since all elements are smaller than j!
|
|
|
// double lastTermAlphaTimesXSum;
|
|
@@ -452,7 +451,7 @@ double *FastMinKernel::hikPrepareLookupTable(const NICE::Vector & _alpha,
|
|
|
void FastMinKernel::hikUpdateLookupTable(double * _T,
|
|
|
const double & _alphaNew,
|
|
|
const double & _alphaOld,
|
|
|
- const int & _idx,
|
|
|
+ const uint & _idx,
|
|
|
const Quantization & _q,
|
|
|
const ParameterizedFunction *_pf
|
|
|
) const
|
|
@@ -493,9 +492,9 @@ void FastMinKernel::hikUpdateLookupTable(double * _T,
|
|
|
for (uint j = 0; j < hmax; j++)
|
|
|
{
|
|
|
double fval;
|
|
|
- int q_bin = _q.quantize(x_i);
|
|
|
+ uint q_bin = _q.quantize(x_i);
|
|
|
|
|
|
- if ( q_bin > (int) j )
|
|
|
+ if ( q_bin > j )
|
|
|
fval = prototypes[j];
|
|
|
else
|
|
|
fval = x_i;
|
|
@@ -529,12 +528,12 @@ void FastMinKernel::hik_kernel_multiply(const NICE::VVector & _A,
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- int cnt(0);
|
|
|
+ uint cnt(0);
|
|
|
for ( multimap< double, SortedVectorSparse<double>::dataelement>::const_iterator i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, cnt++)
|
|
|
{
|
|
|
const SortedVectorSparse<double>::dataelement & de = i->second;
|
|
|
uint feat = de.first;
|
|
|
- int inversePosition = cnt;
|
|
|
+ uint inversePosition = cnt;
|
|
|
double fval = de.second;
|
|
|
|
|
|
// in which position was the element sorted in? actually we only care about the nonzero elements, so we have to subtract the number of zero elements.
|
|
@@ -587,7 +586,7 @@ void FastMinKernel::hik_kernel_multiply_fast(const double *_Tlookup,
|
|
|
// -- efficient sparse solution
|
|
|
const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
|
|
|
|
|
|
- int cnt(0);
|
|
|
+ uint cnt(0);
|
|
|
for ( multimap< double, SortedVectorSparse<double>::dataelement>::const_iterator i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, cnt++)
|
|
|
{
|
|
|
const SortedVectorSparse<double>::dataelement & de = i->second;
|
|
@@ -616,16 +615,16 @@ void FastMinKernel::hik_kernel_sum(const NICE::VVector & _A,
|
|
|
for (SparseVector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++)
|
|
|
{
|
|
|
|
|
|
- int dim = i->first;
|
|
|
+ uint dim = i->first;
|
|
|
double fval = i->second;
|
|
|
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n ) {
|
|
|
// all features are zero and let us ignore it completely
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- int position;
|
|
|
+ uint position;
|
|
|
|
|
|
//where is the example x^z_i located in
|
|
|
//the sorted array? -> perform binary search, runtime O(log(n))
|
|
@@ -671,19 +670,19 @@ void FastMinKernel::hik_kernel_sum(const NICE::VVector & _A,
|
|
|
) const
|
|
|
{
|
|
|
_beta = 0.0;
|
|
|
- int dim ( 0 );
|
|
|
+ uint dim ( 0 );
|
|
|
for (NICE::Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++)
|
|
|
{
|
|
|
|
|
|
double fval = *i;
|
|
|
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n ) {
|
|
|
// all features are zero and let us ignore it completely
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- int position;
|
|
|
+ uint position;
|
|
|
|
|
|
//where is the example x^z_i located in
|
|
|
//the sorted array? -> perform binary search, runtime O(log(n))
|
|
@@ -757,7 +756,7 @@ void FastMinKernel::hik_kernel_sum_fast(const double *_Tlookup,
|
|
|
// runtime is O(d) if the quantizer is O(1)
|
|
|
for (SparseVector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++ )
|
|
|
{
|
|
|
- int dim = i->first;
|
|
|
+ uint dim = i->first;
|
|
|
double v = i->second;
|
|
|
uint qBin = _q.quantize(v);
|
|
|
|
|
@@ -840,7 +839,7 @@ double *FastMinKernel::solveLin(const NICE::Vector & _y,
|
|
|
t.start();
|
|
|
}
|
|
|
|
|
|
- for ( int i = 0; i < sizeOfRandomSubset; i++)
|
|
|
+ for ( uint i = 0; i < sizeOfRandomSubset; i++)
|
|
|
{
|
|
|
|
|
|
pseudoResidual(perm[i]) = -_y(perm[i]) + (this->d_noise * _alpha(perm[i]));
|
|
@@ -1082,7 +1081,7 @@ void FastMinKernel::hikPrepareKVNApproximation(NICE::VVector & _A) const
|
|
|
{
|
|
|
double squared_sum(0.0);
|
|
|
|
|
|
- int cntNonzeroFeat(0);
|
|
|
+ uint cntNonzeroFeat(0);
|
|
|
|
|
|
const multimap< double, SortedVectorSparse<double>::dataelement> & nonzeroElements = this->X_sorted.getFeatureValues(dim).nonzeroElements();
|
|
|
// loop through all elements in sorted order
|
|
@@ -1139,16 +1138,17 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
|
|
|
SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
|
|
|
|
|
|
// index of the element, which is always bigger than the current value fval
|
|
|
- int index = 0;
|
|
|
+ uint index = 0;
|
|
|
// we use the quantization of the original features! the transformed feature were
|
|
|
// already used to calculate A and B, this of course assumes monotonic functions!!!
|
|
|
- int qBin = _q.quantize ( i->first );
|
|
|
+ uint qBin = _q.quantize ( i->first );
|
|
|
|
|
|
// the next loop is linear in max(hmax, n)
|
|
|
// REMARK: this could be changed to hmax*log(n), when
|
|
|
// we use binary search
|
|
|
+ //FIXME we should do this!
|
|
|
|
|
|
- for (int j = 0; j < (int)hmax; j++)
|
|
|
+ for (uint j = 0; j < hmax; j++)
|
|
|
{
|
|
|
double fval = prototypes[j];
|
|
|
double t;
|
|
@@ -1172,7 +1172,7 @@ double * FastMinKernel::hikPrepareKVNApproximationFast(NICE::VVector & _A,
|
|
|
// compute current element in the lookup table and keep in mind that
|
|
|
// index is the next element and not the previous one
|
|
|
//NOTE pay attention: this is only valid if all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
|
|
|
- if ( (j >= (uint)qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
|
|
|
+ if ( (j >= qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
|
|
|
// the current element (fval) is equal or bigger to the element indexed by index
|
|
|
// the second term vanishes, which is logical, since all elements are smaller than j!
|
|
|
t = _A[dim][index];
|
|
@@ -1216,9 +1216,9 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
|
|
|
double *Tlookup = new double [ hmax * this->ui_d ];
|
|
|
|
|
|
// loop through all dimensions
|
|
|
- for (int dim = 0; dim < this->ui_d; dim++)
|
|
|
+ for (uint dim = 0; dim < this->ui_d; dim++)
|
|
|
{
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n )
|
|
|
continue;
|
|
|
|
|
@@ -1228,10 +1228,10 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
|
|
|
SortedVectorSparse<double>::const_elementpointer iPredecessor = nonzeroElements.begin();
|
|
|
|
|
|
// index of the element, which is always bigger than the current value fval
|
|
|
- int index = 0;
|
|
|
+ uint index = 0;
|
|
|
|
|
|
// we use the quantization of the original features! Nevetheless, the resulting lookupTable is computed using the transformed ones
|
|
|
- int qBin = _q.quantize ( i->first );
|
|
|
+ uint qBin = _q.quantize ( i->first );
|
|
|
|
|
|
double sum(0.0);
|
|
|
|
|
@@ -1240,14 +1240,14 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
|
|
|
double fval = prototypes[j];
|
|
|
double t;
|
|
|
|
|
|
- if ( (index == 0) && (j < (uint)qBin) ) {
|
|
|
+ if ( (index == 0) && (j < qBin) ) {
|
|
|
// current element is smaller than everything else
|
|
|
// resulting value = fval * sum_l=1^n 1
|
|
|
t = pow( fval, 2 ) * (this->ui_n-nrZeroIndices-index);
|
|
|
} else {
|
|
|
|
|
|
// move to next example, if necessary
|
|
|
- while ( (j >= (uint)qBin) && ( index < (this->ui_n-nrZeroIndices)) )
|
|
|
+ while ( (j >= qBin) && ( index < (this->ui_n-nrZeroIndices)) )
|
|
|
{
|
|
|
sum += pow( i->second.second, 2 ); //i->dataElement.transformedFeatureValue
|
|
|
|
|
@@ -1261,7 +1261,7 @@ double* FastMinKernel::hikPrepareLookupTableForKVNApproximation(const Quantizati
|
|
|
// compute current element in the lookup table and keep in mind that
|
|
|
// index is the next element and not the previous one
|
|
|
//NOTE pay attention: this is only valid if we all entries are positiv! - if not, ask wether the current feature is greater than zero. If so, subtract the nrZeroIndices, if not do not
|
|
|
- if ( (j >= (uint)qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
|
|
|
+ if ( (j >= qBin) && ( index==(this->ui_n-1-nrZeroIndices) ) ) {
|
|
|
// the current element (fval) is equal or bigger to the element indexed by index
|
|
|
// the second term vanishes, which is logical, since all elements are smaller than j!
|
|
|
t = sum;
|
|
@@ -1293,16 +1293,16 @@ void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & _A,
|
|
|
for (SparseVector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++)
|
|
|
{
|
|
|
|
|
|
- int dim = i->first;
|
|
|
+ uint dim = i->first;
|
|
|
double fval = i->second;
|
|
|
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n ) {
|
|
|
// all features are zero so let us ignore them completely
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- int position;
|
|
|
+ uint position;
|
|
|
|
|
|
//where is the example x^z_i located in
|
|
|
//the sorted array? -> perform binary search, runtime O(log(n))
|
|
@@ -1346,7 +1346,7 @@ void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
|
|
|
// runtime is O(d) if the quantizer is O(1)
|
|
|
for (SparseVector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++ )
|
|
|
{
|
|
|
- int dim = i->first;
|
|
|
+ uint dim = i->first;
|
|
|
double v = i->second;
|
|
|
// we do not need a parameterized function here, since the quantizer works on the original feature values.
|
|
|
// nonetheless, the lookup table was created using the parameterized function
|
|
@@ -1368,17 +1368,17 @@ void FastMinKernel::hikComputeKernelVector ( const NICE::SparseVector& _xstar,
|
|
|
for (SparseVector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++)
|
|
|
{
|
|
|
|
|
|
- int dim = i->first;
|
|
|
+ uint dim = i->first;
|
|
|
double fval = i->second;
|
|
|
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n ) {
|
|
|
// all features are zero so let us ignore them completely
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
|
|
|
- int position;
|
|
|
+ uint position;
|
|
|
|
|
|
//where is the example x^z_i located in
|
|
|
//the sorted array? -> perform binary search, runtime O(log(n))
|
|
@@ -1391,10 +1391,10 @@ void FastMinKernel::hikComputeKernelVector ( const NICE::SparseVector& _xstar,
|
|
|
|
|
|
//run over the non-zero elements and add the corresponding entries to our kernel vector
|
|
|
|
|
|
- int count(nrZeroIndices);
|
|
|
+ uint count(nrZeroIndices);
|
|
|
for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, count++ )
|
|
|
{
|
|
|
- int origIndex(i->second.first); //orig index (i->second.second would be the transformed feature value)
|
|
|
+ uint origIndex(i->second.first); //orig index (i->second.second would be the transformed feature value)
|
|
|
if (count <= position)
|
|
|
_kstar[origIndex] += i->first; //orig feature value
|
|
|
else
|
|
@@ -1413,19 +1413,19 @@ void FastMinKernel::hikComputeKVNApproximation(const NICE::VVector & _A,
|
|
|
const ParameterizedFunction *_pf )
|
|
|
{
|
|
|
_norm = 0.0;
|
|
|
- int dim ( 0 );
|
|
|
+ uint dim ( 0 );
|
|
|
for (Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++)
|
|
|
{
|
|
|
|
|
|
double fval = *i;
|
|
|
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n ) {
|
|
|
// all features are zero so let us ignore them completely
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- int position;
|
|
|
+ uint position;
|
|
|
|
|
|
//where is the example x^z_i located in
|
|
|
//the sorted array? -> perform binary search, runtime O(log(n))
|
|
@@ -1467,7 +1467,7 @@ void FastMinKernel::hikComputeKVNApproximationFast(const double *_Tlookup,
|
|
|
{
|
|
|
_norm = 0.0;
|
|
|
// runtime is O(d) if the quantizer is O(1)
|
|
|
- int dim ( 0 );
|
|
|
+ uint dim ( 0 );
|
|
|
for (Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++ )
|
|
|
{
|
|
|
double v = *i;
|
|
@@ -1488,20 +1488,20 @@ void FastMinKernel::hikComputeKernelVector( const NICE::Vector & _xstar,
|
|
|
_kstar.set(0.0);
|
|
|
|
|
|
//let's start :)
|
|
|
- int dim ( 0 );
|
|
|
+ uint dim ( 0 );
|
|
|
for (NICE::Vector::const_iterator i = _xstar.begin(); i != _xstar.end(); i++, dim++)
|
|
|
{
|
|
|
|
|
|
double fval = *i;
|
|
|
|
|
|
- int nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
+ uint nrZeroIndices = this->X_sorted.getNumberOfZeroElementsPerDimension(dim);
|
|
|
if ( nrZeroIndices == this->ui_n ) {
|
|
|
// all features are zero so let us ignore them completely
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
|
|
|
- int position;
|
|
|
+ uint position;
|
|
|
|
|
|
//where is the example x^z_i located in
|
|
|
//the sorted array? -> perform binary search, runtime O(log(n))
|
|
@@ -1514,10 +1514,10 @@ void FastMinKernel::hikComputeKernelVector( const NICE::Vector & _xstar,
|
|
|
|
|
|
//run over the non-zero elements and add the corresponding entries to our kernel vector
|
|
|
|
|
|
- int count(nrZeroIndices);
|
|
|
+ uint count(nrZeroIndices);
|
|
|
for ( SortedVectorSparse<double>::const_elementpointer i = nonzeroElements.begin(); i != nonzeroElements.end(); i++, count++ )
|
|
|
{
|
|
|
- int origIndex(i->second.first); //orig index (i->second.second would be the transformed feature value)
|
|
|
+ uint origIndex(i->second.first); //orig index (i->second.second would be the transformed feature value)
|
|
|
if (count <= position)
|
|
|
_kstar[origIndex] += i->first; //orig feature value
|
|
|
else
|