Переглянути джерело

converted everything to new structure

Alexander Freytag 12 роки тому
батько
коміт
cab1ea77bc
47 змінених файлів з 4734 додано та 4634 видалено
  1. 826 824
      AdaptiveDirectionRandomSearchOptimizer.cpp
  2. 273 271
      AdaptiveDirectionRandomSearchOptimizer.h
  3. 60 54
      AdditionalIceUtils.cpp
  4. 6 6
      AdditionalIceUtils.h
  5. 74 72
      ArmijoLineSearcher.cpp
  6. 88 83
      ArmijoLineSearcher.h
  7. 22 22
      BFGSOptimizer.cpp
  8. 70 67
      BFGSOptimizer.h
  9. 2 0
      BrentLineSearcher.cpp
  10. 87 83
      BrentLineSearcher.h
  11. 281 280
      CombinatorialOptimizer.cpp
  12. 168 165
      CombinatorialOptimizer.h
  13. 2 2
      Constraints.cpp
  14. 3 3
      Constraints.h
  15. 51 51
      CostFunction_ndim_2ndOrder.cpp
  16. 92 89
      CostFunction_ndim_2ndOrder.h
  17. 44 44
      DerivativeBasedOptimizer.cpp
  18. 115 110
      DerivativeBasedOptimizer.h
  19. 36 34
      EmptyLog.h
  20. 1 0
      FileLog.cpp
  21. 41 38
      FileLog.h
  22. 96 94
      GoldenCutLineSearcher.cpp
  23. 87 83
      GoldenCutLineSearcher.h
  24. 336 336
      GradientDescentOptimizer.cpp
  25. 102 99
      GradientDescentOptimizer.h
  26. 158 158
      LineSearcher.cpp
  27. 128 124
      LineSearcher.h
  28. 448 444
      MatrixIterativeOptimizer.cpp
  29. 133 129
      MatrixIterativeOptimizer.h
  30. 15 13
      NewtonMethodOptimizer.cpp
  31. 70 68
      NewtonMethodOptimizer.h
  32. 16 14
      OptTestSuite.cpp
  33. 37 34
      OptTestSuite.h
  34. 3 2
      Opt_Namespace.h
  35. BIN
      OptimizerToolBox.pdf
  36. 7 6
      ParamLog.cpp
  37. 46 43
      ParamLog.h
  38. 38 38
      Plotter.cpp
  39. 103 99
      Plotter.h
  40. 296 295
      PowellBrentOptimizer.cpp
  41. 101 97
      PowellBrentOptimizer.h
  42. 46 45
      SimpleOptTestGrid.cpp
  43. 83 80
      SimpleOptTestGrid.h
  44. 1 0
      libdepend.inc
  45. 7 5
      tests/MyCostFunction.cpp
  46. 13 9
      tests/MyCostFunction.h
  47. 22 21
      tests/TestGradientDescent.cpp

+ 826 - 824
AdaptiveDirectionRandomSearchOptimizer.cpp

@@ -14,427 +14,428 @@
 
 #include "numbase.h" // ice random
 #include "optimization/AdditionalIceUtils.h"
-#include "optimization/Opt_Namespace.h"
+#include "core/optimization/blackbox/Definitions_core_opt.h"
 
-using namespace optimization;
+using namespace OPTIMIZATION;
 
 AdaptiveDirectionRandomSearchOptimizer::AdaptiveDirectionRandomSearchOptimizer(
-	unsigned int numOfPoints,OptLogBase *loger)	: SuperClass(loger)
+  unsigned int numOfPoints,OptLogBase *loger)	: SuperClass(loger)
 {
-	m_numberOfParallelPoints = numOfPoints;
-
-	
-	m_b0 = 1.0;
-	m_bfac = 0.5;
-	m_bThresTimesNotDecreased = 0;
-	
-	m_bk = new double[m_numberOfParallelPoints];
-	m_pointsAccepted = new bool[m_numberOfParallelPoints];
-	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-	{
-		m_bk[j] = m_b0;
-		m_pointsAccepted[j]= false;
-	}
-	m_bBreak = 0.1;
-	m_backupActive = false;
-
-	
-	m_c0f = 0.75;
-	m_c0s = 0.75;
-	m_c1f = -0.75;
-	m_c1s = 1.25;
-	m_D = 3;
-
-	m_initFuncThreshActive = false;
-	m_initFuncTresh = 0.0;
-	
-	// advanced initialization is turned off per default
-	m_advancedInit= false;
+  m_numberOfParallelPoints = numOfPoints;
+
+  
+  m_b0 = 1.0;
+  m_bfac = 0.5;
+  m_bThresTimesNotDecreased = 0;
+  
+  m_bk = new double[m_numberOfParallelPoints];
+  m_pointsAccepted = new bool[m_numberOfParallelPoints];
+  for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+  {
+    m_bk[j] = m_b0;
+    m_pointsAccepted[j]= false;
+  }
+  m_bBreak = 0.1;
+  m_backupActive = false;
+
+  
+  m_c0f = 0.75;
+  m_c0s = 0.75;
+  m_c1f = -0.75;
+  m_c1s = 1.25;
+  m_D = 3;
+
+  m_initFuncThreshActive = false;
+  m_initFuncTresh = 0.0;
+  
+  // advanced initialization is turned off per default
+  m_advancedInit= false;
 }
 
 
 AdaptiveDirectionRandomSearchOptimizer::AdaptiveDirectionRandomSearchOptimizer( const AdaptiveDirectionRandomSearchOptimizer &opt) : SuperClass(opt)
 {
 
-	m_numberOfParallelPoints = opt.m_numberOfParallelPoints;
-	
-	m_Parameters = opt.m_Parameters;
-	m_b0		= opt.m_b0;
-	m_bfac		= opt.m_bfac;
-
-	m_bk = new double[m_numberOfParallelPoints];
-	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-	{
-		m_bk[j] = opt.m_bk[j];
-	}
-
-	m_pointsAccepted = new bool[m_numberOfParallelPoints];
-	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-	{
-		m_pointsAccepted[j] = opt.m_pointsAccepted[j];
-	}
-	
-	m_bBreak = opt.m_bBreak;
-	m_backupActive	= opt.m_backupActive;
-	m_backupPoint	= opt.m_backupPoint;
-	m_backupPointValue = opt.m_backupPointValue;
-
-	m_bThresTimesNotDecreased = opt.m_bThresTimesNotDecreased;
-	m_c0f		= opt.m_c0f;
-	m_c0s		= opt.m_c0s;
-	m_c1f		= opt.m_c1f;
-	m_c1s		= opt.m_c1s;
-	m_D			= opt.m_D;
-	m_initFuncThreshActive = opt.m_initFuncThreshActive;
-	m_initFuncTresh = opt.m_initFuncTresh;
-	
-	// advanced init setup
-	m_advancedInit= opt.m_advancedInit;
-	m_advancedinitLowerBounds= opt.m_advancedinitLowerBounds;
-	m_advancedinitUpperBounds= opt.m_advancedinitUpperBounds;
+  m_numberOfParallelPoints = opt.m_numberOfParallelPoints;
+  
+  m_Parameters = opt.m_Parameters;
+  m_b0		= opt.m_b0;
+  m_bfac		= opt.m_bfac;
+
+  m_bk = new double[m_numberOfParallelPoints];
+  for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+  {
+    m_bk[j] = opt.m_bk[j];
+  }
+
+  m_pointsAccepted = new bool[m_numberOfParallelPoints];
+  for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+  {
+    m_pointsAccepted[j] = opt.m_pointsAccepted[j];
+  }
+  
+  m_bBreak = opt.m_bBreak;
+  m_backupActive	= opt.m_backupActive;
+  m_backupPoint	= opt.m_backupPoint;
+  m_backupPointValue = opt.m_backupPointValue;
+
+  m_bThresTimesNotDecreased = opt.m_bThresTimesNotDecreased;
+  m_c0f		= opt.m_c0f;
+  m_c0s		= opt.m_c0s;
+  m_c1f		= opt.m_c1f;
+  m_c1s		= opt.m_c1s;
+  m_D			= opt.m_D;
+  m_initFuncThreshActive = opt.m_initFuncThreshActive;
+  m_initFuncTresh = opt.m_initFuncTresh;
+  
+  // advanced init setup
+  m_advancedInit= opt.m_advancedInit;
+  m_advancedinitLowerBounds= opt.m_advancedinitLowerBounds;
+  m_advancedinitUpperBounds= opt.m_advancedinitUpperBounds;
 }
 
 /*
-	operator=
+  operator=
 */
 AdaptiveDirectionRandomSearchOptimizer & AdaptiveDirectionRandomSearchOptimizer::operator=(const AdaptiveDirectionRandomSearchOptimizer &opt)
 {
-		
-	/*
-			avoid self-copy
-	*/
-	if(this != &opt)
-	{
-		delete[] m_bk;
-		delete[] m_pointsAccepted;
-				
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(opt);
-
-			
-		/*
-			own values:
-		*/
-		m_numberOfParallelPoints = opt.m_numberOfParallelPoints;
-		m_Parameters = opt.m_Parameters;
-		m_b0		= opt.m_b0;
-		m_bfac		= opt.m_bfac;
-
-		m_bBreak = opt.m_bBreak;
-
-		m_backupActive	= opt.m_backupActive;
-		m_backupPoint	= opt.m_backupPoint;
-		m_backupPointValue = opt.m_backupPointValue;
-
-		m_bk = new double[m_numberOfParallelPoints];
-		for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-		{
-			m_bk[j] = opt.m_bk[j];
-		}
-
-		m_pointsAccepted = new bool[m_numberOfParallelPoints];
-		for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-		{
-			m_pointsAccepted[j] = opt.m_pointsAccepted[j];
-		}
-		
-
-		m_bThresTimesNotDecreased = opt.m_bThresTimesNotDecreased;
-		m_c0f		= opt.m_c0f;
-		m_c0s		= opt.m_c0s;
-		m_c1f		= opt.m_c1f;
-		m_c1s		= opt.m_c1s;
-		m_D			= opt.m_D;
-		m_initFuncThreshActive = opt.m_initFuncThreshActive;
-		m_initFuncTresh = opt.m_initFuncTresh;
-		
-		// advanced init setup
-		m_advancedInit= opt.m_advancedInit;
-		m_advancedinitLowerBounds= opt.m_advancedinitLowerBounds;
-		m_advancedinitUpperBounds= opt.m_advancedinitUpperBounds;
-		
-	}
-
-  	return *this;
+    
+  /*
+      avoid self-copy
+  */
+  if(this != &opt)
+  {
+    delete[] m_bk;
+    delete[] m_pointsAccepted;
+        
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(opt);
+
+      
+    /*
+      own values:
+    */
+    m_numberOfParallelPoints = opt.m_numberOfParallelPoints;
+    m_Parameters = opt.m_Parameters;
+    m_b0		= opt.m_b0;
+    m_bfac		= opt.m_bfac;
+
+    m_bBreak = opt.m_bBreak;
+
+    m_backupActive	= opt.m_backupActive;
+    m_backupPoint	= opt.m_backupPoint;
+    m_backupPointValue = opt.m_backupPointValue;
+
+    m_bk = new double[m_numberOfParallelPoints];
+    for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+    {
+      m_bk[j] = opt.m_bk[j];
+    }
+
+    m_pointsAccepted = new bool[m_numberOfParallelPoints];
+    for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+    {
+      m_pointsAccepted[j] = opt.m_pointsAccepted[j];
+    }
+    
+
+    m_bThresTimesNotDecreased = opt.m_bThresTimesNotDecreased;
+    m_c0f		= opt.m_c0f;
+    m_c0s		= opt.m_c0s;
+    m_c1f		= opt.m_c1f;
+    m_c1s		= opt.m_c1s;
+    m_D			= opt.m_D;
+    m_initFuncThreshActive = opt.m_initFuncThreshActive;
+    m_initFuncTresh = opt.m_initFuncTresh;
+    
+    // advanced init setup
+    m_advancedInit= opt.m_advancedInit;
+    m_advancedinitLowerBounds= opt.m_advancedinitLowerBounds;
+    m_advancedinitUpperBounds= opt.m_advancedinitUpperBounds;
+    
+  }
+
+    return *this;
 }
 
 
 AdaptiveDirectionRandomSearchOptimizer::~AdaptiveDirectionRandomSearchOptimizer()
 {
-	delete[] m_bk;
-	delete[] m_pointsAccepted;
+  delete[] m_bk;
+  delete[] m_pointsAccepted;
 }
 
 
 void AdaptiveDirectionRandomSearchOptimizer::init()
 {
-	
-	m_Parameters = matrix_type(m_numberOfParameters,m_numberOfParallelPoints);
-	
-	// if not set before, set default value
-	if(m_bThresTimesNotDecreased == 0)
-		m_bThresTimesNotDecreased = static_cast<unsigned int>(m_numberOfParameters * m_numberOfParameters*  5.0);
-	
-	SuperClass::init();
-
-	// "reset"
-	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-	{
-		m_bk[j] = m_b0;
-		m_pointsAccepted[j]= false;
-	}
-	m_backupActive = false;
-
-	/*
-		seed rand
-	*/
-	ice::Randomize();
-
-	/*
-		check if bounds are active! bounds are needed
-		to generate usefull random start points
-	*/
-	if(!(m_upperParameterBoundActive == true && m_lowerParameterBoundActive == true))
-	{
-		if(m_loger)
-		{
-			m_loger->logError("parameter bounds are not active! Please set	proper parameter bounds for the random start point generation. This event is has no further exception handling");
-		}
-		m_lowerParameterBound = m_parameters;
-		m_upperParameterBound = m_parameters;
-		m_lowerParameterBoundActive = true;
-		m_upperParameterBoundActive = true;
-
-	}
-
-	/*
-	generate random start points
-	*/
-	if(m_advancedInit == false)
-	{
-		for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
-		{
-			for(int l = 0; l < static_cast<int>(m_numberOfParallelPoints);l++)
-			{
-				m_Parameters[k][l] = m_parameters[k][0] + m_scales[k][0] *2.0* (ice::RandomD() - 0.5);
-				
-			}
-		}
-	}
-	else
-	{
-		// dimension check
-		assert(m_advancedinitLowerBounds.rows() == (int)m_numberOfParameters && m_advancedinitUpperBounds.rows() == (int)m_numberOfParameters);
-		
-		for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
-		{
-			for(int l = 0; l < static_cast<int>(m_numberOfParallelPoints);l++)
-			{
-				m_Parameters[k][l] = m_advancedinitLowerBounds[k][0] +ice::RandomD() * (m_advancedinitUpperBounds[k][0] - m_advancedinitLowerBounds[k][0] ) ;
-			}
-		}
-	}
-
-
-	/*
-		evaluate SET !!
-	*/
-	m_CurrentCostFunctionValues = evaluateSetCostFunction(m_Parameters);
-	/*
-		If the threshold was set, check if all points are below the threshold
-	*/
-	if(m_initFuncThreshActive)
-	{
-		
-		bool pointOk=false;
-		for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
-		{
-			/*
-				if the are not ok ... generate new points for those, who arent..
-			*/
-			if(m_CurrentCostFunctionValues[u][0] < m_initFuncTresh)
-			{
-				pointOk = true;
-			}
-			else
-			{
-				pointOk = false;
-			}
-			
-			while(pointOk == false)
-			{
-				for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
-				{
-					m_Parameters[k][u] = m_parameters[k][0] + m_scales[k][0] * 2.0*(ice::RandomD() - 0.5);
-						
-				}
-				
-				/*
-					reevaluate the value and check against threshold
-				*/
-				//double tmpNewValue = evaluateCostFunction(m_Parameters.Sub(m_numberOfParameters,1,0,u));
-				double tmpNewValue = evaluateCostFunction(m_Parameters(0,u,m_numberOfParameters-1,u));
-			
-				/*
-					if point is ok now go to next point
-				*/
-				if(tmpNewValue < m_initFuncTresh)
-				{
-					m_CurrentCostFunctionValues[u][0] = tmpNewValue;
-					pointOk = true;				
-				}
-
-			} // while point not ok
-		} // for all points
-	} // if threshold active
-
-	/*if(m_verbose)
-	{
-		std::cout << "AdaptiveDirectionRandomSearch :: Initial parameterSet: ";
-		for(int l=0;l<m_numberOfParallelPoints;l++)
-		{
-			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-			{
-				std::cout<< m_Parameters[r][l] << " ";
-			}
-			std::cout << m_CurrentCostFunctionValues[l][0]  << std::endl;
-		}
-		std::cout << std::endl;
-
-
-		std::cout << "Number of Evaluations needed for a proper initilization: " << m_costFunction->getNumberOfEvaluations() << std::endl;
-	
-		
-	}*/
-
-
-
-	/*
-		(re)set m_bk
-	*/
-	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
-	{
-		m_bk[j] = m_b0;
-	}
+  
+  m_Parameters = OPTIMIZATION::matrix_type(m_numberOfParameters,m_numberOfParallelPoints);
+  
+  // if not set before, set default value
+  if(m_bThresTimesNotDecreased == 0)
+    m_bThresTimesNotDecreased = static_cast<unsigned int>(m_numberOfParameters * m_numberOfParameters*  5.0);
+  
+  SuperClass::init();
+
+  // "reset"
+  for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+  {
+    m_bk[j] = m_b0;
+    m_pointsAccepted[j]= false;
+  }
+  m_backupActive = false;
+
+  /*
+    seed rand
+  */
+  ice::Randomize();
+
+  /*
+    check if bounds are active! bounds are needed
+    to generate usefull random start points
+  */
+  if(!(m_upperParameterBoundActive == true && m_lowerParameterBoundActive == true))
+  {
+    if(m_loger)
+    {
+      m_loger->logError("parameter bounds are not active! Please set	proper parameter bounds for the random start point generation. This event is has no further exception handling");
+    }
+    m_lowerParameterBound = m_parameters;
+    m_upperParameterBound = m_parameters;
+    m_lowerParameterBoundActive = true;
+    m_upperParameterBoundActive = true;
+
+  }
+
+  /*
+  generate random start points
+  */
+  if(m_advancedInit == false)
+  {
+    for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
+    {
+      for(int l = 0; l < static_cast<int>(m_numberOfParallelPoints);l++)
+      {
+        m_Parameters(k,l) = m_parameters(k,0) + m_scales(k,0) *2.0* (ice::RandomD() - 0.5);
+        
+      }
+    }
+  }
+  else
+  {
+    // dimension check
+    assert(m_advancedinitLowerBounds.rows() == (int)m_numberOfParameters && m_advancedinitUpperBounds.rows() == (int)m_numberOfParameters);
+    
+    for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
+    {
+      for(int l = 0; l < static_cast<int>(m_numberOfParallelPoints);l++)
+      {
+        m_Parameters(k,l) = m_advancedinitLowerBounds(k,0) +ice::RandomD() * (m_advancedinitUpperBounds(k,0) - m_advancedinitLowerBounds(k,0) ) ;
+      }
+    }
+  }
+
+
+  /*
+    evaluate SET !!
+  */
+  m_CurrentCostFunctionValues = evaluateSetCostFunction(m_Parameters);
+  /*
+    If the threshold was set, check if all points are below the threshold
+  */
+  if(m_initFuncThreshActive)
+  {
+    
+    bool pointOk=false;
+    for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+    {
+      /*
+        if the are not ok ... generate new points for those, who arent..
+      */
+      if(m_CurrentCostFunctionValues(u,0) < m_initFuncTresh)
+      {
+        pointOk = true;
+      }
+      else
+      {
+        pointOk = false;
+      }
+      
+      while(pointOk == false)
+      {
+        for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
+        {
+          m_Parameters(k,u) = m_parameters(k,0) + m_scales(k,0) * 2.0*(ice::RandomD() - 0.5);
+            
+        }
+        
+        /*
+          reevaluate the value and check against threshold
+        */
+        //double tmpNewValue = evaluateCostFunction(m_Parameters.Sub(m_numberOfParameters,1,0,u));
+        double tmpNewValue = evaluateCostFunction(m_Parameters(0,u,m_numberOfParameters-1,u));
+      
+        /*
+          if point is ok now go to next point
+        */
+        if(tmpNewValue < m_initFuncTresh)
+        {
+          m_CurrentCostFunctionValues(u,0) = tmpNewValue;
+          pointOk = true;				
+        }
+
+      } // while point not ok
+    } // for all points
+  } // if threshold active
+
+  /*if(m_verbose)
+  {
+    std::cout << "AdaptiveDirectionRandomSearch :: Initial parameterSet: ";
+    for(int l=0;l<m_numberOfParallelPoints;l++)
+    {
+      for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+      {
+        std::cout<< m_Parameters(r,l) << " ";
+      }
+      std::cout << m_CurrentCostFunctionValues(l,0)  << std::endl;
+    }
+    std::cout << std::endl;
+
+
+    std::cout << "Number of Evaluations needed for a proper initilization: " << m_costFunction->getNumberOfEvaluations() << std::endl;
+  
+    
+  }*/
+
+
+
+  /*
+    (re)set m_bk
+  */
+  for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+  {
+    m_bk[j] = m_b0;
+  }
 
 }
 
 
 bool AdaptiveDirectionRandomSearchOptimizer::setControlSeqParams(double b0, double bfac,
-	unsigned int bThresTimesNotDecreased,double bBreak)
+  unsigned int bThresTimesNotDecreased,double bBreak)
 {
-	if(b0 <= 0 || bfac <= 0 || bfac > 1 || bThresTimesNotDecreased == 0 || bBreak <= 0)
-	{
-		return false;
-	}
-	
-	m_b0 = b0;
-	m_bfac = bfac;
-	m_bThresTimesNotDecreased = bThresTimesNotDecreased;
-	m_bBreak = bBreak;
-	
-	return true;
+  if(b0 <= 0 || bfac <= 0 || bfac > 1 || bThresTimesNotDecreased == 0 || bBreak <= 0)
+  {
+    return false;
+  }
+  
+  m_b0 = b0;
+  m_bfac = bfac;
+  m_bThresTimesNotDecreased = bThresTimesNotDecreased;
+  m_bBreak = bBreak;
+  
+  return true;
 }
 
 
-void AdaptiveDirectionRandomSearchOptimizer::activateAdvancedInit(bool enable, matrix_type& lowerBounds, matrix_type& upperBounds)
+void AdaptiveDirectionRandomSearchOptimizer::activateAdvancedInit(bool enable, OPTIMIZATION::matrix_type& lowerBounds, OPTIMIZATION::matrix_type& upperBounds)
 {
-	m_advancedInit= enable;
-	m_advancedinitLowerBounds= lowerBounds;
-	m_advancedinitUpperBounds= upperBounds;
+  m_advancedInit= enable;
+  m_advancedinitLowerBounds= lowerBounds;
+  m_advancedinitUpperBounds= upperBounds;
 }
 
 
-matrix_type AdaptiveDirectionRandomSearchOptimizer::generatePoint()
+OPTIMIZATION::matrix_type AdaptiveDirectionRandomSearchOptimizer::generatePoint()
 {
-	matrix_type newPoint(m_numberOfParameters,1);
-
-	for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
-	{
-		if(m_scales[i][0] > 0.0 )
-		{
-			newPoint[i][0] = m_scales[i][0] * 2.0*(ice::RandomD() - 0.5);
-		}
-	}
-	//double div=newPoint.Norm(0);
-	double div=newPoint.Norm(0);
-	
-	if (div > 1.0e-50)
-	{
-		newPoint = newPoint * (1.0/div);
-	}
-	else
-	{
-		newPoint=this->generatePoint();
-	}
-
-	return newPoint;
+  OPTIMIZATION::matrix_type newPoint(m_numberOfParameters,1);
+
+  for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+  {
+    if(m_scales(i,0) > 0.0 )
+    {
+      newPoint(i,0) = m_scales(i,0) * 2.0*(ice::RandomD() - 0.5);
+    }
+  }
+
+//   double div=newPoint.frobeniusNorm();
+  double div=newPoint.frobeniusNorm();
+  
+  if (div > 1.0e-50)
+  {
+    newPoint = newPoint * (1.0/div);
+  }
+  else
+  {
+    newPoint=this->generatePoint();
+  }
+
+  return newPoint;
 }	
 
 
-matrix_type AdaptiveDirectionRandomSearchOptimizer::generatePoints()
+OPTIMIZATION::matrix_type AdaptiveDirectionRandomSearchOptimizer::generatePoints()
 {
-	matrix_type newPoints(m_numberOfParameters,m_numberOfParallelPoints);
-	matrix_type newPoint(m_numberOfParameters,1);
-
-	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
-	{
-		newPoint = this->generatePoint();
-	
-		for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
-		{
-			newPoints[i][j] = newPoint[i][0];
-		}
-	}
-
-	return newPoints;
+  OPTIMIZATION::matrix_type newPoints(m_numberOfParameters,m_numberOfParallelPoints);
+  OPTIMIZATION::matrix_type newPoint(m_numberOfParameters,1);
+
+  for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
+  {
+    newPoint = this->generatePoint();
+  
+    for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+    {
+      newPoints(i,j) = newPoint(i,0);
+    }
+  }
+
+  return newPoints;
 }	
 
 bool AdaptiveDirectionRandomSearchOptimizer::setRecallParams(
-	double c0s,
-	double c1s,
-	double c0f,
-	double c1f,
-	double D)
+  double c0s,
+  double c1s,
+  double c0f,
+  double c1f,
+  double D)
 {
-	if (c0s < 0			||
-		c0s >=1			||
-		c1s <0			||
-		c0s+c1s <= 1	||
-		c0f <= 0		||
-		c0f >= 1		||
-		c1f >= 0		||
-		c0f + c1f < -1.0||
-		c0f + c1f > 1.0 || 
-		D <= 0.0)
-	{
-		return false;
-	}
-
-	m_c0s = c0s;
-	m_c1s = c1s;
-	m_c0f = c0f;
-	m_c1f = c1f;
-	m_D	  = D;
-	
-	return true;
+  if (c0s < 0			||
+    c0s >=1			||
+    c1s <0			||
+    c0s+c1s <= 1	||
+    c0f <= 0		||
+    c0f >= 1		||
+    c1f >= 0		||
+    c0f + c1f < -1.0||
+    c0f + c1f > 1.0 || 
+    D <= 0.0)
+  {
+    return false;
+  }
+
+  m_c0s = c0s;
+  m_c1s = c1s;
+  m_c0f = c0f;
+  m_c1f = c1f;
+  m_D	  = D;
+  
+  return true;
 }
 
-void AdaptiveDirectionRandomSearchOptimizer::acceptPoints(matrix_type oldValues, matrix_type newValues)
+void AdaptiveDirectionRandomSearchOptimizer::acceptPoints(OPTIMIZATION::matrix_type oldValues, OPTIMIZATION::matrix_type newValues)
 {
-	for(int i = 0;i< static_cast<int>(m_numberOfParallelPoints);i++)
-	{
-		if(newValues[i][0] < oldValues[i][0])
-		{
-			m_pointsAccepted[i]=true;
-		}
-		else
-		{
-			m_pointsAccepted[i]=false;
-		}
-	}
+  for(int i = 0;i< static_cast<int>(m_numberOfParallelPoints);i++)
+  {
+    if(newValues(i,0) < oldValues(i,0))
+    {
+      m_pointsAccepted[i]=true;
+    }
+    else
+    {
+      m_pointsAccepted[i]=false;
+    }
+  }
 }
 
 
@@ -442,461 +443,462 @@ int AdaptiveDirectionRandomSearchOptimizer::optimize()
 {
 
 
-	init();
-
-
-	if(m_loger)
-		m_loger->logTrace("ADRS: starting optimization ... \n");
-
-	/*
-		start time criteria
-	*/
-	m_startTime = clock();
-
-
-	bool abort = false;
-
-	/*
-		declare and initialize algorithm specific local variables
-	*/
-	matrix_type		newPoints;
-	matrix_type		newValues;
-	//matrix_type		oldValues;
-
-	matrix_type		deltaX(m_numberOfParameters,m_numberOfParallelPoints);
-	matrix_type		deltaXold(m_numberOfParameters,m_numberOfParallelPoints);
-	
-	matrix_type		dk(m_numberOfParameters,m_numberOfParallelPoints);
-	matrix_type		dkold(m_numberOfParameters,m_numberOfParallelPoints);
-	
-
-
-	int i = 0;
-	int j = 0;
-
-	/*
-		begin with the first step outside the loop
-	*/
-	m_numIter++;
-	unsigned int *timesNotDecreased = new unsigned int[m_numberOfParallelPoints];
-	
-	for(int k = 0; k< static_cast<int>(m_numberOfParallelPoints);k++)
-	{
-		timesNotDecreased[k] = 0; 
-	}
-
-
-	/*
-		generate a new delta X (potential step)
-	*/
-	matrix_type tmp = generatePoints();
-	for(j = 0; j< static_cast<int>(m_numberOfParallelPoints);j++)
-	{
-		for(i = 0;i < static_cast<int>(m_numberOfParameters);i++)
-		{
-			deltaX[i][j] = tmp[i][j] * m_bk[j] ;
-		}
-	}
-
-	/*
-		check costfunction at potential new point
-	*/
-	newPoints = m_Parameters + deltaX;
-	newValues = evaluateSetCostFunction(newPoints);
-
-	/*
-		are the new points better?
-	*/
-	acceptPoints(m_CurrentCostFunctionValues,newValues);
-	
-	for(j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
-	{
-		if(m_pointsAccepted[j] == true)
-		{
-			for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
-			{
-				/*
-					set the new point
-				*/
-				m_Parameters[i][j] = newPoints[i][j];
-
-				/*
-					update the recall factor
-				*/
-				dk[i][j] = dkold[i][j] * m_c0s + deltaXold[i][j] * m_c1s;
-			
-			}
-			m_CurrentCostFunctionValues[j][0] = newValues[j][0];
-
-			/*
-				reset the counter for failed attempts
-			*/
-			timesNotDecreased[j] = 0;
-		}
-		else
-		{
-			for(i =0; i < static_cast<int>(m_numberOfParameters);i++)
-			{
-				/*
-					update the recall factor
-				*/
-				dk[i][j] = dkold[i][j] * m_c0f + deltaXold[i][j] * m_c1f;
-			
-			}
-			
-			/*
-				raise the counter for failed attempts
-			*/
-			timesNotDecreased[j] ++;
-		}
-	}
-
-	/* 
-		do the optimization in the main loop
-	*/
-	while(abort == false)
-	{
-		/*
-			increase iteration counter
-		*/
-		m_numIter++;
-		
-		/*
-			Check iteration limit
-		*/
-		if(m_maxNumIterActive)
-		{
-			if(m_numIter >= m_maxNumIter)
-			{
-				/* set according return status and return */
-				m_returnReason = SUCCESS_MAXITER;
-				abort = true;
-				continue;
-			}
-		}
-		
-		/*
-			save the old deltaX
-		*/
-		deltaXold = deltaX;
-
-		/*
-			generate a new delta X (potential step)
-		*/
-		matrix_type tmp = generatePoints();
-		for(j = 0; j< static_cast<int>(m_numberOfParallelPoints);j++)
-		{
-			for(i = 0; i < static_cast<int>(m_numberOfParameters);i++)
-			{
-				deltaX[i][j] = dk[i][j] + tmp[i][j] * m_bk[j] ;
-			}
-		}
-		
-		/*
-			check costfunction at potential new point
-		*/
-		newPoints = m_Parameters + deltaX;
-		newValues = evaluateSetCostFunction(newPoints);
-
-		/*
-			save the old dk
-		*/
-		dkold = dk;
-
-
-		/*
-			are the new points better?
-		*/
-		acceptPoints(m_CurrentCostFunctionValues,newValues);
-		
-		for(j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
-		{
-			if(m_pointsAccepted[j] == true)
-			{
-				for(i =0; i < static_cast<int>(m_numberOfParameters);i++)
-				{
-					/*
-						set the new point
-					*/
-					m_Parameters[i][j] = newPoints[i][j];
-					
-					/*
-						update the recall factor
-					*/
-					dk[i][j] = dkold[i][j] * m_c0s + deltaXold[i][j] * m_c1s;
-					
-				}
-				m_CurrentCostFunctionValues[j][0] = newValues[j][0];
-				
-				/*
-					reset the counter for failed attempts
-				*/
-				timesNotDecreased[j] = 0;
-			}
-			else
-			{
-				for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
-				{
-					/*
-						update the recall factor
-					*/
-					dk[i][j] = dkold[i][j] * m_c0f + deltaXold[i][j] * m_c1f;
-					
-				}
-				
-				/*
-				raise the counter for failed attempts
-				*/
-				timesNotDecreased[j] ++;
-			}
-
-			/*
-				scaledown m_bk if there was no improvement for a certain time
-			*/
-			if(timesNotDecreased[j] >= m_bThresTimesNotDecreased)
-			{
-				m_bk[j] = m_bk[j] * m_bfac;
-				timesNotDecreased[j] = 0;
-			}
-
-			/*
-				if m_bk < m_bBreak .. 
-			*/
-			if( m_bk[j]  < m_bBreak )
-			{
-				/*  */
-				if(m_backupActive)
-				{
-					if(m_CurrentCostFunctionValues[j][0] < m_backupPointValue)
-					{
-						//m_backupPoint = m_Parameters.Sub(m_numberOfParameters,1,0,j);
-						m_backupPoint = m_Parameters(0,j,m_numberOfParameters-1,j);
-						m_backupPointValue = m_CurrentCostFunctionValues[j][0];
-					}
-				}
-				else
-				{
-					//m_backupPoint = m_Parameters.Sub(m_numberOfParameters,1,0,j);
-					m_backupPoint = m_Parameters(0,j,m_numberOfParameters-1,j);
-					m_backupPointValue = m_CurrentCostFunctionValues[j][0];
-					m_backupActive = true;
-				}
-				
-				/*
-					reset counters
-				*/
-				m_bk[j] = m_b0;
-				timesNotDecreased[j] = 0;
-
-				matrix_type resProb = m_CurrentCostFunctionValues;
-				double maxVal=m_CurrentCostFunctionValues.MaxVal();
-				for(int i=0; i < (int)m_numberOfParallelPoints; ++i)
-				{
-					resProb[i][0] -= maxVal;
-				}
-				
-				double denom = MatrixSum(resProb);
-				/*
-				ensure numerical stability
-				*/
-				if( fabs(denom) < 1.0e-50)
-				{
-					denom = denom < 0.0 ? -1.0e-50 : 1.0e-50;
-				}
-
-				resProb = resProb * (1.0/denom);
-					
-				
-				double sum = 0.0;
-				for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
-				{
-					sum += resProb[u][0];
-					resProb[u][0] = sum;
-				}
-
-				/*
-					generate random number [0,1]
-				*/
-				double choice = ice::RandomD();
-				int u_chosen = 0;
-
-				for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
-				{
-					u_chosen = u;
-					
-					if( choice < resProb[u][0] )
-					{
-						break;
-					}
-				}
-
-				/*
-					set m_parameters 
-				*/
-				for(int v = 0; v < static_cast<int>(m_numberOfParameters); v++)
-				{
-					m_Parameters[v][j]=m_Parameters[v][u_chosen];
-
-				}
-				
-				m_CurrentCostFunctionValues[j][0] = m_CurrentCostFunctionValues[u_chosen][0];
-								
-			}
-
-
-			/*
-				dk has to be <= D * m_bk
-			*/
-			//double norm= dk(0,j,m_numberOfParameters-1,j).Norm(0);
-			double norm= dk(0,j,m_numberOfParameters-1,j).Norm(0);
-			if( norm >= m_D * m_bk[j])
-			{
-				if(norm < 1.0e-50)
-				{
-					//m_loger->logWarning("ADRS Optimizer: Warning Computation gets unstable");
-					norm = 1.0e-50;
-				}
-				
-				for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
-				{
-					/*
-						update the recall factor
-					*/
-					dk[i][j] = dk[i][j] * 1.0/norm;
-					
-				}
-			}
-		}
-		
-		if(m_verbose)
-		{
-			std::cout << "# AdaptiveDirectionRandomSearch :: parameterSet: ";
-			for(int l=0;l < static_cast<int>(m_numberOfParallelPoints);l++)
-			{
-				for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-				{
-					std::cout<< m_Parameters[r][l] << " ";
-				}
-				std::cout << m_bk[l] << " "<< 
-					m_CurrentCostFunctionValues[l][0]  << std::endl;
-			}
-			std::cout <<"# "<< std::endl;
-		}
-		
-		
-		//	fixme wacker for plotting
-		/*
-		for(int l=0;l<m_numberOfParallelPoints;l++)
-		{
-				for(int r = 0; r < 2; r++)
-				{
-					std::cout<< m_Parameters[r][l] << " ";
-				}
-				std::cout << m_CurrentCostFunctionValues[l][0]  << std::endl;
-		}
+  init();
+
+
+  if(m_loger)
+    m_loger->logTrace("ADRS: starting optimization ... \n");
+
+  /*
+    start time criteria
+  */
+  m_startTime = clock();
+
+
+  bool abort = false;
+
+  /*
+    declare and initialize algorithm specific local variables
+  */
+  OPTIMIZATION::matrix_type		newPoints;
+  OPTIMIZATION::matrix_type		newValues;
+  //OPTIMIZATION::matrix_type		oldValues;
+
+  OPTIMIZATION::matrix_type		deltaX(m_numberOfParameters,m_numberOfParallelPoints);
+  OPTIMIZATION::matrix_type		deltaXold(m_numberOfParameters,m_numberOfParallelPoints);
+  
+  OPTIMIZATION::matrix_type		dk(m_numberOfParameters,m_numberOfParallelPoints);
+  OPTIMIZATION::matrix_type		dkold(m_numberOfParameters,m_numberOfParallelPoints);
+  
+
+
+  int i = 0;
+  int j = 0;
+
+  /*
+    begin with the first step outside the loop
+  */
+  m_numIter++;
+  unsigned int *timesNotDecreased = new unsigned int[m_numberOfParallelPoints];
+  
+  for(int k = 0; k< static_cast<int>(m_numberOfParallelPoints);k++)
+  {
+    timesNotDecreased[k] = 0; 
+  }
+
+
+  /*
+    generate a new delta X (potential step)
+  */
+  OPTIMIZATION::matrix_type tmp = generatePoints();
+  for(j = 0; j< static_cast<int>(m_numberOfParallelPoints);j++)
+  {
+    for(i = 0;i < static_cast<int>(m_numberOfParameters);i++)
+    {
+      deltaX(i,j) = tmp(i,j) * m_bk[j] ;
+    }
+  }
+
+  /*
+    check costfunction at potential new point
+  */
+  newPoints = m_Parameters + deltaX;
+  newValues = evaluateSetCostFunction(newPoints);
+
+  /*
+    are the new points better?
+  */
+  acceptPoints(m_CurrentCostFunctionValues,newValues);
+  
+  for(j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
+  {
+    if(m_pointsAccepted[j] == true)
+    {
+      for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
+      {
+        /*
+          set the new point
+        */
+        m_Parameters(i,j) = newPoints(i,j);
+
+        /*
+          update the recall factor
+        */
+        dk(i,j) = dkold(i,j) * m_c0s + deltaXold(i,j) * m_c1s;
+      
+      }
+      m_CurrentCostFunctionValues(j,0) = newValues(j,0);
+
+      /*
+        reset the counter for failed attempts
+      */
+      timesNotDecreased[j] = 0;
+    }
+    else
+    {
+      for(i =0; i < static_cast<int>(m_numberOfParameters);i++)
+      {
+        /*
+          update the recall factor
+        */
+        dk(i,j) = dkold(i,j) * m_c0f + deltaXold(i,j) * m_c1f;
+      
+      }
+      
+      /*
+        raise the counter for failed attempts
+      */
+      timesNotDecreased[j] ++;
+    }
+  }
+
+  /* 
+    do the optimization in the main loop
+  */
+  while(abort == false)
+  {
+    /*
+      increase iteration counter
+    */
+    m_numIter++;
+    
+    /*
+      Check iteration limit
+    */
+    if(m_maxNumIterActive)
+    {
+      if(m_numIter >= m_maxNumIter)
+      {
+        /* set according return status and return */
+        m_returnReason = SUCCESS_MAXITER;
+        abort = true;
+        continue;
+      }
+    }
+    
+    /*
+      save the old deltaX
+    */
+    deltaXold = deltaX;
+
+    /*
+      generate a new delta X (potential step)
+    */
+    OPTIMIZATION::matrix_type tmp = generatePoints();
+    for(j = 0; j< static_cast<int>(m_numberOfParallelPoints);j++)
+    {
+      for(i = 0; i < static_cast<int>(m_numberOfParameters);i++)
+      {
+        deltaX(i,j) = dk(i,j) + tmp(i,j) * m_bk[j] ;
+      }
+    }
+    
+    /*
+      check costfunction at potential new point
+    */
+    newPoints = m_Parameters + deltaX;
+    newValues = evaluateSetCostFunction(newPoints);
+
+    /*
+      save the old dk
+    */
+    dkold = dk;
+
+
+    /*
+      are the new points better?
+    */
+    acceptPoints(m_CurrentCostFunctionValues,newValues);
+    
+    for(j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
+    {
+      if(m_pointsAccepted[j] == true)
+      {
+        for(i =0; i < static_cast<int>(m_numberOfParameters);i++)
+        {
+          /*
+            set the new point
+          */
+          m_Parameters(i,j) = newPoints(i,j);
+          
+          /*
+            update the recall factor
+          */
+          dk(i,j) = dkold(i,j) * m_c0s + deltaXold(i,j) * m_c1s;
+          
+        }
+        m_CurrentCostFunctionValues(j,0) = newValues(j,0);
+        
+        /*
+          reset the counter for failed attempts
+        */
+        timesNotDecreased[j] = 0;
+      }
+      else
+      {
+        for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
+        {
+          /*
+            update the recall factor
+          */
+          dk(i,j) = dkold(i,j) * m_c0f + deltaXold(i,j) * m_c1f;
+          
+        }
+        
+        /*
+        raise the counter for failed attempts
+        */
+        timesNotDecreased[j] ++;
+      }
+
+      /*
+        scaledown m_bk if there was no improvement for a certain time
+      */
+      if(timesNotDecreased[j] >= m_bThresTimesNotDecreased)
+      {
+        m_bk[j] = m_bk[j] * m_bfac;
+        timesNotDecreased[j] = 0;
+      }
+
+      /*
+        if m_bk < m_bBreak .. 
+      */
+      if( m_bk[j]  < m_bBreak )
+      {
+        /*  */
+        if(m_backupActive)
+        {
+          if(m_CurrentCostFunctionValues(j,0) < m_backupPointValue)
+          {
+            //m_backupPoint = m_Parameters.Sub(m_numberOfParameters,1,0,j);
+            m_backupPoint = m_Parameters(0,j,m_numberOfParameters-1,j);
+            m_backupPointValue = m_CurrentCostFunctionValues(j,0);
+          }
+        }
+        else
+        {
+          //m_backupPoint = m_Parameters.Sub(m_numberOfParameters,1,0,j);
+          m_backupPoint = m_Parameters(0,j,m_numberOfParameters-1,j);
+          m_backupPointValue = m_CurrentCostFunctionValues(j,0);
+          m_backupActive = true;
+        }
+        
+        /*
+          reset counters
+        */
+        m_bk[j] = m_b0;
+        timesNotDecreased[j] = 0;
+
+        OPTIMIZATION::matrix_type resProb = m_CurrentCostFunctionValues;
+        double maxVal=m_CurrentCostFunctionValues.Max();
+        for(int i=0; i < (int)m_numberOfParallelPoints; ++i)
+        {
+          resProb(i,0) -= maxVal;
+        }
+        
+        double denom = MatrixSum(resProb);
+        /*
+        ensure numerical stability
+        */
+        if( fabs(denom) < 1.0e-50)
+        {
+          denom = denom < 0.0 ? -1.0e-50 : 1.0e-50;
+        }
+
+        resProb = resProb * (1.0/denom);
+          
+        
+        double sum = 0.0;
+        for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+        {
+          sum += resProb(u,0);
+          resProb(u,0) = sum;
+        }
+
+        /*
+          generate random number [0,1]
+        */
+        double choice = ice::RandomD();
+        int u_chosen = 0;
+
+        for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+        {
+          u_chosen = u;
+          
+          if( choice < resProb(u,0) )
+          {
+            break;
+          }
+        }
+
+        /*
+          set m_parameters 
+        */
+        for(int v = 0; v < static_cast<int>(m_numberOfParameters); v++)
+        {
+          m_Parameters(v,j)=m_Parameters(v,u_chosen);
+
+        }
+        
+        m_CurrentCostFunctionValues(j,0) = m_CurrentCostFunctionValues(u_chosen,0);
+                
+      }
+
+
+      /*
+        dk has to be <= D * m_bk
+      */
+//       double norm= dk(0,j,m_numberOfParameters-1,j).frobeniusNorm();
+      double norm= dk(0,j,m_numberOfParameters-1,j).frobeniusNorm();
+      
+      if( norm >= m_D * m_bk[j])
+      {
+        if(norm < 1.0e-50)
+        {
+          //m_loger->logWarning("ADRS Optimizer: Warning Computation gets unstable");
+          norm = 1.0e-50;
+        }
+        
+        for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
+        {
+          /*
+            update the recall factor
+          */
+          dk(i,j) = dk(i,j) * 1.0/norm;
+          
+        }
+      }
+    }
+    
+    if(m_verbose)
+    {
+      std::cout << "# AdaptiveDirectionRandomSearch :: parameterSet: ";
+      for(int l=0;l < static_cast<int>(m_numberOfParallelPoints);l++)
+      {
+        for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+        {
+          std::cout<< m_Parameters(r,l) << " ";
+        }
+        std::cout << m_bk[l] << " "<< 
+          m_CurrentCostFunctionValues(l,0)  << std::endl;
+      }
+      std::cout <<"# "<< std::endl;
+    }
+    
+    
+    //	fixme wacker for plotting
+    /*
+    for(int l=0;l<m_numberOfParallelPoints;l++)
+    {
+        for(int r = 0; r < 2; r++)
+        {
+          std::cout<< m_Parameters(r,l) << " ";
+        }
+        std::cout << m_CurrentCostFunctionValues(l,0)  << std::endl;
+    }
 
 */
-		
-		/*
-			Check if it is in bounds, maxSeconds
-		*/
-		/*
-		if(!checkParameters(m_parameters))
-		{
-			// set according return status and the last parameters and return 
-			m_returnReason = ERROR_XOUTOFBOUNDS;
-			abort = true;
-		}*/
-
-		/*
-			check kind of paramtol
-		*/
-		if(m_paramTolActive)
-		{
-			if(m_numberOfParallelPoints > 1 )
-			{
-				/*
-					check if distance from one point to all others is below a the threshold
-				*/
-				matrix_type paramSet = m_Parameters;
-				bool abortNow = true;
-				
-				for(int e = 0; e < static_cast<int>(m_numberOfParallelPoints);e++)
-				{
-					if(	(paramSet(0,e,m_numberOfParameters-1,e) 	- paramSet(0,0,m_numberOfParameters-1,0)).Norm(0) > m_paramTol)
-					{
-						abortNow = false;
-					}
-				}
-				if(abortNow)
-				{
-					abort = true;
-					m_returnReason = SUCCESS_PARAMTOL;
-				}
-			}
-
-		}
-
-
-		/*
-			Check Optimization Timelimit, if active
-		*/
-		if(m_maxSecondsActive)
-		{
-			m_currentTime = clock();
-
-			/* time limit exceeded ? */
-			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
-			{
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_TIMELIMIT;
-				abort = true;
-				continue;
-			}
-		}
-
-	}
-
-	
-	/*
-		find the best value..
-	*/
-	unsigned int u_best = 0;
-	for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
-	{
-		if( m_CurrentCostFunctionValues[u][0] < m_CurrentCostFunctionValues[u_best][0] )
-		{
-			u_best = u;
-		}
-		
-	}
-
-
-	/*
-		regular points include the best one 
-	*/
-	//m_parameters = m_Parameters.Sub(m_numberOfParameters,1,0,u_best);
-	m_parameters = m_Parameters(0,u_best,m_numberOfParameters-1,u_best);
-	m_currentCostFunctionValue = m_CurrentCostFunctionValues[u_best][0];
-	
-	if (m_backupActive)
-	{
-		/*
-			compare with backup point
-		*/
-		if( m_backupPointValue < m_CurrentCostFunctionValues[u_best][0] )
-		{
-			/*
-				backup point is best
-			*/
-			m_parameters = m_backupPoint;
-			m_currentCostFunctionValue = m_backupPointValue;
-		}
-	
-	}
-	
-	delete[] timesNotDecreased;
-
-	return m_returnReason;
+    
+    /*
+      Check if it is in bounds, maxSeconds
+    */
+    /*
+    if(!checkParameters(m_parameters))
+    {
+      // set according return status and the last parameters and return 
+      m_returnReason = ERROR_XOUTOFBOUNDS;
+      abort = true;
+    }*/
+
+    /*
+      check kind of paramtol
+    */
+    if(m_paramTolActive)
+    {
+      if(m_numberOfParallelPoints > 1 )
+      {
+        /*
+          check if distance from one point to all others is below a the threshold
+        */
+        OPTIMIZATION::matrix_type paramSet = m_Parameters;
+        bool abortNow = true;
+        
+        for(int e = 0; e < static_cast<int>(m_numberOfParallelPoints);e++)
+        {
+          if(	(paramSet(0,e,m_numberOfParameters-1,e) 	- paramSet(0,0,m_numberOfParameters-1,0)).frobeniusNorm() > m_paramTol)
+          {
+            abortNow = false;
+          }
+        }
+        if(abortNow)
+        {
+          abort = true;
+          m_returnReason = SUCCESS_PARAMTOL;
+        }
+      }
+
+    }
+
+
+    /*
+      Check Optimization Timelimit, if active
+    */
+    if(m_maxSecondsActive)
+    {
+      m_currentTime = clock();
+
+      /* time limit exceeded ? */
+      if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+      {
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_TIMELIMIT;
+        abort = true;
+        continue;
+      }
+    }
+
+  }
+
+  
+  /*
+    find the best value..
+  */
+  unsigned int u_best = 0;
+  for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+  {
+    if( m_CurrentCostFunctionValues(u,0) < m_CurrentCostFunctionValues(u_best,0) )
+    {
+      u_best = u;
+    }
+    
+  }
+
+
+  /*
+    regular points include the best one 
+  */
+  //m_parameters = m_Parameters.Sub(m_numberOfParameters,1,0,u_best);
+  m_parameters = m_Parameters(0,u_best,m_numberOfParameters-1,u_best);
+  m_currentCostFunctionValue = m_CurrentCostFunctionValues(u_best,0);
+  
+  if (m_backupActive)
+  {
+    /*
+      compare with backup point
+    */
+    if( m_backupPointValue < m_CurrentCostFunctionValues(u_best,0) )
+    {
+      /*
+        backup point is best
+      */
+      m_parameters = m_backupPoint;
+      m_currentCostFunctionValue = m_backupPointValue;
+    }
+  
+  }
+  
+  delete[] timesNotDecreased;
+
+  return m_returnReason;
 
 }

+ 273 - 271
AdaptiveDirectionRandomSearchOptimizer.h

@@ -8,277 +8,279 @@
 #define _ADAPTIVE_DIRECTION_RANDOM_SEARCH_OPTIMIZER_
 
 #include "core/optimization/blackbox/SimpleOptimizer.h"
-#include "optimization/Opt_Namespace.h"
-namespace opt=optimization;
-
-///
-///	Class AdaptiveDirectionRandomSearchOptimizer
-///
-///	HowToUse:
-///	
-///	  * specify in the constructor call the num of points that are optimized in a "simultanious" way
-///	  * to use others than the default parameters use the setControlSeqParams call which changes the
-///		parameters that are responsible for a "scale down" of b_k by b_fac if there was no function
-///		decrease for b_times times
-///	  *	to use others than the default parameters use the setRecallParams call wich changes the
-///		parameters that are responsible for the balance between a new random direction an the old
-///		successfull iteration directions
-///	  * use the setLowerParameterBounds and the setUpperParameterBounds to specify the area to generate
-///		random start points in (THIS IS NESSECARY!)
-///	  *	to use a function value threshold for the random start points' values to be below - use the
-///		setInitFunctionValueThresh call
-///	  *	call init()
-///	  * call optimize()
-///
-///	Implemented Abort criteria:
-///		
-///	  * maximum number of iterations
-///	  * parameter tolerance
-///	  *	time limit
-///
-///
-class AdaptiveDirectionRandomSearchOptimizer : public SimpleOptimizer
+#include "core/optimization/blackbox/Definitions_core_opt.h"
+
+namespace OPTIMIZATION
 {
-	public:
-
-		typedef  SimpleOptimizer SuperClass;
-		typedef  opt::matrix_type matrix_type;
-		///	
-		///		Constructor.
-		///		@param numOfPoints: Number of Points to optimize
-		///		@param loger : OptLogBase * to existing log class
-		///
-		AdaptiveDirectionRandomSearchOptimizer(unsigned int numOfPoints, OptLogBase *loger=NULL);
-		
-		///
-		///	Copy constructor
-		///	@param opt .. optimizer to copy
-		///
-		AdaptiveDirectionRandomSearchOptimizer( const AdaptiveDirectionRandomSearchOptimizer &opt);
-
-		///
-		///	operator =
-		///
-		AdaptiveDirectionRandomSearchOptimizer & operator=(const AdaptiveDirectionRandomSearchOptimizer &opt);
-
-	        ///
-		///
-		///		Destructor.
-	        ///
-	        ~AdaptiveDirectionRandomSearchOptimizer();
-
-		///
-		///	enumeration for the return reasons of an optimizer,
-		///	has all elements of the SuperClass optimizer
-		///
-
-	
-		///
-		///	@brief Do internal initializations
-		///
-		void init();
-		
-		///
-		///	@brief start the optmization
-		///
-		int optimize();
-
-		///
-		///	@brief set the parameters for the control sequence
-		///			
-		///	@param b0 > 0
-		///	@param bfac: 0 < bfac < 1
-		///	@param bThresTimesNotDecreased
-		///
-		///	\return true in case of success
-		///
-		bool setControlSeqParams(double b0, double bfac, unsigned int bThresTimesNotDecreased, double bBreak);
-		
-		
-		///
-		/// @brief Enables advanced initialization of random start points. If activated, start points will be randomly distributed according to the upper and lower bound vectors.
-		///
-		/// @param enable if true, advanced initialization will be enabled
-		/// @param lowerBound matrix containing lower bounds for random initialization (must be nx1 with n= number of Parameters)
-		/// @param upperBound matrix containing upper bounds for random initialization (must be nx1 with n= number of Parameters)
-		///
-		void activateAdvancedInit(bool enable, matrix_type& lowerBounds, matrix_type& upperBounds);
-
-		
-		///
-		///	@brief set recall parameters that control the incluence of prior iterations on the actual one. This is the key idea of the adaptive direction approach
-		///	
-		///	The update formula for the iteration scheme is
-		///
-		///	$$
-		///		d_k = c_0 * d_{k-1} + c_1 \triangle x_{k-1}
-		///	$$
-		///	with
-		///		if( $\delta_{k-1} == 1$)
-		///		{
-		///			$ c_0 = c0s,\enskip c_1 = c_1s $
-		///			and
-		///			$0 < c0s < 1;\enskip c1s  >0 ;\enskip c0s + c1s > 1 $
-		///		}
-		///		else
-		///		{
-		///					$ c_0 = c0f,\enskip c_1 = c_1f $
-		///			and
-		///			$0 < c0f < 1;\enskip c1f  < 0 ;\enskip | c0s + c1s |  < 1 $
-		///					}
-		///				and 
-		///					\|d_k\| < D * b_k 
-		///			
-		///	@param c0s
-		///	@param c1s
-		///	@param c0f
-		///	@param c1f
-		///	@param D
-		///
-		///	\return true in case of success
-		///
-		bool setRecallParams(double c0s, double c1s, double c0f, double c1f, double D);
-
-		///
-		///	@brief set Number of Points
-		///	@param num number of points
-		///
-		inline void setNumberOfPoints(unsigned int num){m_numberOfParallelPoints = num;};
-
-		///
-		///	@brief set a threshold for the initial points
-		///	@param active is the threshold active?
-		///	@param threshold .. the threshold to be below (above in case of maximization)
-		///
-		inline void setInitFunctionValueThresh(bool active, double threshold){m_initFuncThreshActive = active;m_initFuncTresh = threshold;};
-
-
-
-	private:
-
-		///
-		///	number of parallel point optimizations
-		///
-		unsigned int m_numberOfParallelPoints;
-
-		///
-		///	matrix of the whole parameter set
-		///
-		matrix_type m_Parameters;
-
-		///
-		///	matrix of costfunction Values
-		///
-		matrix_type m_CurrentCostFunctionValues;
-
-		///
-		///	generate new points
-		///
-		matrix_type generatePoints();
-
-		///
-		///	generate new points
-		///
-		matrix_type generatePoint();
-
-		///
-		///	@brief accept a new point or reject it
-		///	@param newValue : new objective function value
-		///	@param oldValue : old objective function value
-		///	
-		///
-		void acceptPoints(matrix_type oldValues, matrix_type newValues);
-
-		bool *m_pointsAccepted;
-
-		///
-		///	control sequence parameter b0
-		///
-		double m_b0;
-
-		///
-		///	control sequence parameter bfac
-		///
-		double m_bfac;
-
-		///
-		///	control sequence parameter bk
-		///
-		double *m_bk;
-
-		///
-		///	control sequence multiplikation trigger threshold
-		///
-		unsigned int m_bThresTimesNotDecreased;
-
-		///
-		///	control sequence parameter bBreak
-		///
-		double m_bBreak;
-
-		///
-		///	backup point 
-		///
-		matrix_type m_backupPoint;
-
-		///
-		///	backup point Value
-		///
-		double m_backupPointValue;
-
-		///
-		///	backup point in use?
-		///
-		bool m_backupActive;
-
-		///
-		///	direction parameter c0s
-		///
-		double m_c0s;
-
-		///
-		///	direction parameter c1s
-		///
-		double m_c1s;
-
-		///
-		///	direction parameter c0f
-		///
-		double m_c0f;
-
-		///
-		///	direction parameter c1f
-		///
-		double m_c1f;
-
-		///
-		///	recall limit
-		///
-		double m_D;
-
-		///
-		///	is the initial function value threshold	active		
-		///
-		bool m_initFuncThreshActive;
-
-		///
-		///	the initial function value threshold active		
-		///
-		double m_initFuncTresh;
-		
-		///
-		/// enables advanced initialization
-		bool m_advancedInit;
-		
-		///
-		/// lower bounds for advanced initialization
-		///
-		matrix_type m_advancedinitLowerBounds;
-		
-		///
-		/// upper bounds for advanced initialization
-		matrix_type m_advancedinitUpperBounds;
-
-};
+  ///
+  ///	Class AdaptiveDirectionRandomSearchOptimizer
+  ///
+  ///	HowToUse:
+  ///	
+  ///	  * specify in the constructor call the num of points that are optimized in a "simultanious" way
+  ///	  * to use others than the default parameters use the setControlSeqParams call which changes the
+  ///		parameters that are responsible for a "scale down" of b_k by b_fac if there was no function
+  ///		decrease for b_times times
+  ///	  *	to use others than the default parameters use the setRecallParams call wich changes the
+  ///		parameters that are responsible for the balance between a new random direction an the old
+  ///		successfull iteration directions
+  ///	  * use the setLowerParameterBounds and the setUpperParameterBounds to specify the area to generate
+  ///		random start points in (THIS IS NESSECARY!)
+  ///	  *	to use a function value threshold for the random start points' values to be below - use the
+  ///		setInitFunctionValueThresh call
+  ///	  *	call init()
+  ///	  * call optimize()
+  ///
+  ///	Implemented Abort criteria:
+  ///		
+  ///	  * maximum number of iterations
+  ///	  * parameter tolerance
+  ///	  *	time limit
+  ///
+  ///
+  class AdaptiveDirectionRandomSearchOptimizer : public SimpleOptimizer
+  {
+    public:
+
+      typedef  SimpleOptimizer SuperClass;
+      typedef  OPTIMIZATION::matrix_type matrix_type;
+      ///	
+      ///		Constructor.
+      ///		@param numOfPoints: Number of Points to optimize
+      ///		@param loger : OptLogBase * to existing log class
+      ///
+      AdaptiveDirectionRandomSearchOptimizer(unsigned int numOfPoints, OptLogBase *loger=NULL);
+      
+      ///
+      ///	Copy constructor
+      ///	@param opt .. optimizer to copy
+      ///
+      AdaptiveDirectionRandomSearchOptimizer( const AdaptiveDirectionRandomSearchOptimizer &opt);
+
+      ///
+      ///	operator =
+      ///
+      AdaptiveDirectionRandomSearchOptimizer & operator=(const AdaptiveDirectionRandomSearchOptimizer &opt);
+
+            ///
+      ///
+      ///		Destructor.
+            ///
+            ~AdaptiveDirectionRandomSearchOptimizer();
+
+      ///
+      ///	enumeration for the return reasons of an optimizer,
+      ///	has all elements of the SuperClass optimizer
+      ///
+
+    
+      ///
+      ///	@brief Do internal initializations
+      ///
+      void init();
+      
+      ///
+      ///	@brief start the optmization
+      ///
+      int optimize();
+
+      ///
+      ///	@brief set the parameters for the control sequence
+      ///			
+      ///	@param b0 > 0
+      ///	@param bfac: 0 < bfac < 1
+      ///	@param bThresTimesNotDecreased
+      ///
+      ///	\return true in case of success
+      ///
+      bool setControlSeqParams(double b0, double bfac, unsigned int bThresTimesNotDecreased, double bBreak);
+      
+      
+      ///
+      /// @brief Enables advanced initialization of random start points. If activated, start points will be randomly distributed according to the upper and lower bound vectors.
+      ///
+      /// @param enable if true, advanced initialization will be enabled
+      /// @param lowerBound matrix containing lower bounds for random initialization (must be nx1 with n= number of Parameters)
+      /// @param upperBound matrix containing upper bounds for random initialization (must be nx1 with n= number of Parameters)
+      ///
+      void activateAdvancedInit(bool enable, matrix_type& lowerBounds, matrix_type& upperBounds);
+
+      
+      ///
+      ///	@brief set recall parameters that control the incluence of prior iterations on the actual one. This is the key idea of the adaptive direction approach
+      ///	
+      ///	The update formula for the iteration scheme is
+      ///
+      ///	$$
+      ///		d_k = c_0 * d_{k-1} + c_1 \triangle x_{k-1}
+      ///	$$
+      ///	with
+      ///		if( $\delta_{k-1} == 1$)
+      ///		{
+      ///			$ c_0 = c0s,\enskip c_1 = c_1s $
+      ///			and
+      ///			$0 < c0s < 1;\enskip c1s  >0 ;\enskip c0s + c1s > 1 $
+      ///		}
+      ///		else
+      ///		{
+      ///					$ c_0 = c0f,\enskip c_1 = c_1f $
+      ///			and
+      ///			$0 < c0f < 1;\enskip c1f  < 0 ;\enskip | c0s + c1s |  < 1 $
+      ///					}
+      ///				and 
+      ///					\|d_k\| < D * b_k 
+      ///			
+      ///	@param c0s
+      ///	@param c1s
+      ///	@param c0f
+      ///	@param c1f
+      ///	@param D
+      ///
+      ///	\return true in case of success
+      ///
+      bool setRecallParams(double c0s, double c1s, double c0f, double c1f, double D);
+
+      ///
+      ///	@brief set Number of Points
+      ///	@param num number of points
+      ///
+      inline void setNumberOfPoints(unsigned int num){m_numberOfParallelPoints = num;};
+
+      ///
+      ///	@brief set a threshold for the initial points
+      ///	@param active is the threshold active?
+      ///	@param threshold .. the threshold to be below (above in case of maximization)
+      ///
+      inline void setInitFunctionValueThresh(bool active, double threshold){m_initFuncThreshActive = active;m_initFuncTresh = threshold;};
+
+
+
+    private:
+
+      ///
+      ///	number of parallel point optimizations
+      ///
+      unsigned int m_numberOfParallelPoints;
+
+      ///
+      ///	matrix of the whole parameter set
+      ///
+      matrix_type m_Parameters;
+
+      ///
+      ///	matrix of costfunction Values
+      ///
+      matrix_type m_CurrentCostFunctionValues;
+
+      ///
+      ///	generate new points
+      ///
+      matrix_type generatePoints();
+
+      ///
+      ///	generate new points
+      ///
+      matrix_type generatePoint();
+
+      ///
+      ///	@brief accept a new point or reject it
+      ///	@param newValue : new objective function value
+      ///	@param oldValue : old objective function value
+      ///	
+      ///
+      void acceptPoints(matrix_type oldValues, matrix_type newValues);
+
+      bool *m_pointsAccepted;
+
+      ///
+      ///	control sequence parameter b0
+      ///
+      double m_b0;
+
+      ///
+      ///	control sequence parameter bfac
+      ///
+      double m_bfac;
+
+      ///
+      ///	control sequence parameter bk
+      ///
+      double *m_bk;
+
+      ///
+      ///	control sequence multiplikation trigger threshold
+      ///
+      unsigned int m_bThresTimesNotDecreased;
+
+      ///
+      ///	control sequence parameter bBreak
+      ///
+      double m_bBreak;
+
+      ///
+      ///	backup point 
+      ///
+      matrix_type m_backupPoint;
+
+      ///
+      ///	backup point Value
+      ///
+      double m_backupPointValue;
+
+      ///
+      ///	backup point in use?
+      ///
+      bool m_backupActive;
+
+      ///
+      ///	direction parameter c0s
+      ///
+      double m_c0s;
+
+      ///
+      ///	direction parameter c1s
+      ///
+      double m_c1s;
+
+      ///
+      ///	direction parameter c0f
+      ///
+      double m_c0f;
+
+      ///
+      ///	direction parameter c1f
+      ///
+      double m_c1f;
+
+      ///
+      ///	recall limit
+      ///
+      double m_D;
+
+      ///
+      ///	is the initial function value threshold	active		
+      ///
+      bool m_initFuncThreshActive;
+
+      ///
+      ///	the initial function value threshold active		
+      ///
+      double m_initFuncTresh;
+      
+      ///
+      /// enables advanced initialization
+      bool m_advancedInit;
+      
+      ///
+      /// lower bounds for advanced initialization
+      ///
+      matrix_type m_advancedinitLowerBounds;
+      
+      ///
+      /// upper bounds for advanced initialization
+      matrix_type m_advancedinitUpperBounds;
+
+  };//class
+} //namespace
 
 #endif

+ 60 - 54
AdditionalIceUtils.cpp

@@ -1,82 +1,88 @@
 #include "optimization/AdditionalIceUtils.h"
 #include "vectort.h" // ICE
-#include "ludecomp.h" //ICE
-#include "mateigen.h" //ICE
+// #include "ludecomp.h" //ICE
+// #include "mateigen.h" //ICE
 #include <cassert>
+
+#include <core/algebra/LUDecomposition.h>
+#include <core/algebra/EigValues.h>
 using namespace opt;
 using namespace ice;
 
 matrix_type & MatrixAddVal ( matrix_type & mat, double val )
 {
-	for(int i=0; i< mat.rows(); ++i)
-	{
-		for(int j =0; j<mat.cols(); ++j)
-		{
-			mat[i][j] += val;
-		}
-	}
+  for(int i=0; i< mat.rows(); ++i)
+  {
+    for(int j =0; j<mat.cols(); ++j)
+    {
+      mat(i,j) += val;
+    }
+  }
 
-	return mat;
+  return mat;
 }
 
 
 double MatrixSum(const matrix_type &mat)
 {
-	double sum=0.0;
-	for(int i=0; i< mat.rows(); ++i)
-	{
-		for(int j =0; j<mat.cols(); ++j)
-		{
-			sum += mat[i][j];
-		}
-	}
-	return sum;
+  double sum=0.0;
+  for(int i=0; i< mat.rows(); ++i)
+  {
+    for(int j =0; j<mat.cols(); ++j)
+    {
+      sum += mat(i,j);
+    }
+  }
+  return sum;
 }
 
 
 
-void linSolve(const opt::matrix_type &A, opt::matrix_type &x, const opt::matrix_type b)
+void linSolve(const OPTIMIZATION::matrix_type &A, OPTIMIZATION::matrix_type &x, const OPTIMIZATION::matrix_type b)
 {
-	int n=A.cols();
+  int n=A.cols();
 
-	assert(x.rows() == n && b.rows() == n );
+  assert(x.rows() == n && b.rows() == n );
 
-	matrix_type LU;
-	IVector indx;
-	Vector xv(n);
-	Vector iv(n);
+  matrix_type LU;
+  NICE::VectorT<int> indx;
+  NICE::Vector xv(n);
+  NICE::Vector iv(n);
 
-	for(int i=0; i < n;++i)
-	{
-		iv[i] = b[i][0];
-	}
+  for(int i=0; i < n;++i)
+  {
+    iv[i] = b(i,0);
+  }
 
-	// LU-Zerlegung mit Pivotisierung
-	LUDecompositionPacked(A,LU,indx,true);
-	
-	// Lösung M*x1=i1
-	xv = LUSolve(LU,indx,iv);
-	
-	x=matrix_type(n,1);
-	for(int i=0; i < n;++i)
-	{
-		x[i][0] = xv[i];
-	}
-	
+  // LU-Zerlegung mit Pivotisierung
+  NICE::LUDecomposition luDecomp;
+  luDecomp.decomposePacked(A, LU, indx, true); 
+  
+  // Lösung M*x1=i1
+  xv = luDecomp.solve(LU,indx,iv);
+  
+  x=matrix_type(n,1);
+  for(int i=0; i < n;++i)
+  {
+    x(i,0) = xv[i];
+  }
+  
 }
 
-void getEig(const opt::matrix_type &A, opt::matrix_type &L, opt::matrix_type &V)
+void getEig(const OPTIMIZATION::matrix_type &A, OPTIMIZATION::matrix_type &L, OPTIMIZATION::matrix_type &V)
 {
-	int n= A.cols();
-	L = matrix_type(n,1);
-	V = matrix_type(n,n);
-
-	Vector l(n);
-
-	Eigenvalue(A,l,V);
-	for(int i=0; i < n;++i)
-	{
-		L[i][0] = l[i];
-	}
+  std::cerr << "This feature is not supported currently" << std::endl;
+//   int n= A.cols();
+//   L = matrix_type(n,1);
+//   V = matrix_type(n,n);
+// 
+//   NICE::Vector l(n);
+// 
+//   NICE::EVArnoldi eig;
+//   eig.getEigenvalues (A, l, V, n);
+//   for(int i=0; i < n;++i)
+//   {
+//     L(i,0) = l[i];
+//   }
 
 }

+ 6 - 6
AdditionalIceUtils.h

@@ -7,9 +7,9 @@
 #ifndef ADDITIONALICEUTILS_H
 #define ADDITIONALICEUTILS_H
 
-#include "optimization/Opt_Namespace.h"
+#include "core/optimization/blackbox/Definitions_core_opt.h"
 
-namespace opt=optimization;
+namespace opt=OPTIMIZATION;
 
 /*!
 	adds val to every element of mat
@@ -17,14 +17,14 @@ namespace opt=optimization;
 	@param val the value to add
 	@return mat
 */
-opt::matrix_type& MatrixAddVal ( opt::matrix_type & mat, double val );
+OPTIMIZATION::matrix_type& MatrixAddVal ( OPTIMIZATION::matrix_type & mat, double val );
 
 /*!
 	sums up all elements of the matrix
 	@param mat matrix
 	@return $\sum_{i,j} mat(i,j)$
 */
-double MatrixSum(const opt::matrix_type & mat);
+double MatrixSum(const OPTIMIZATION::matrix_type & mat);
 
 /*!
 	Wrapper to solve linear equation A*x=b
@@ -33,7 +33,7 @@ double MatrixSum(const opt::matrix_type & mat);
 	@param b nx1 matrix (inhomogenity)
 
 */
-void linSolve(const opt::matrix_type &A, opt::matrix_type &x, const opt::matrix_type b);
+void linSolve(const OPTIMIZATION::matrix_type &A, OPTIMIZATION::matrix_type &x, const OPTIMIZATION::matrix_type b);
 
 /*!
 	Wrapper to get eigen values of a matrix
@@ -41,6 +41,6 @@ void linSolve(const opt::matrix_type &A, opt::matrix_type &x, const opt::matrix_
 	@param L lambda vector with eigenvalues
 	@param V marix, whose columns are the corresponding eigen vector
 */
-void getEig(const opt::matrix_type &A, opt::matrix_type &L, opt::matrix_type &V);
+void getEig(const OPTIMIZATION::matrix_type &A, OPTIMIZATION::matrix_type &L, OPTIMIZATION::matrix_type &V);
 
 #endif //ADDITIONALICEUTILS_H

+ 74 - 72
ArmijoLineSearcher.cpp

@@ -11,6 +11,8 @@
 #include "optimization/ArmijoLineSearcher.h"
 #include <iostream>
 
+using namespace OPTIMIZATION;
+
 ArmijoLineSearcher::ArmijoLineSearcher() : m_dGamma(0.7), m_dL(1.0),m_dSigma(0.33)
 {
 }
@@ -71,81 +73,81 @@ void ArmijoLineSearcher::setXkGkDk(const matrix_type xk, const matrix_type gk, c
 double ArmijoLineSearcher::optimize()
 {
 	double x=0.0;
-	double ndk2=m_dk.Norm(0);ndk2*=ndk2;
-
-	double fk= evaluateSub(0.0);
-double tmp= (!m_gk * m_dk)[0][0];
-	double sk= - tmp / (m_dL * ndk2 );
-	x=sk;
-	int maxIter=30;
-	int iter=0;
-	// own rule 
-	double fk_old=fk;
-	double x_old=0.0;
-	double fk_new;
-	bool improved = false;
-	
-	while(  iter < maxIter)
-	{
-		fk_new=evaluateSub(x);
-
-		if ( fk_new > fk_old)
-		{
-			x=x_old;
-			//cout << "loop 1: end " << endl;
-			break;
-		}
-		
-		//cout << "loop 1: iter "<< iter << " " << x << " " << fk_new<<endl;
-		fk_old = fk_new;
-		x_old = x;
-		x/=m_dGamma;
-		++iter;
-	}
-	if( iter <= 1 )
-	{
-		iter=0;
-		fk_old=fk;
-		x_old=0.0;
-		x=sk;
-		while( iter < maxIter)
-		{
-			fk_new=evaluateSub(x);
-	
-			// fk_new is worse than old, and we are lower than fk
-			if( fk_new > fk_old && improved==true)
-			{
-				// abort with fk_old which was generated by x_old
-				x = x_old;
-				break;
-			}
-		
-					
-			//cout << "loop 2: iter "<< iter << " " << x << " " << fk_new<<endl;
-			fk_old=fk_new;
-			x_old=x;
-			x*=m_dGamma;
-			++iter;
-			if( fk_old < fk)
-			{
-				improved = true;
-			}
-		}
-	
-	}
-	// own rule till here
-
-	// original implementation 
-	//iter=0.0;
-	//while( fk - evaluateSub(x) < - m_dSigma * x *tmp  && iter < maxIter)
-	//{
-	//	x*=m_dGamma;
-	//	iter++;
-	//}
+  double ndk2=m_dk.frobeniusNorm();ndk2*=ndk2;
+
+  double fk= evaluateSub(0.0);
+double tmp= (!m_gk * m_dk)(0,0);
+  double sk= - tmp / (m_dL * ndk2 );
+  x=sk;
+  int maxIter=30;
+  int iter=0;
+  // own rule 
+  double fk_old=fk;
+  double x_old=0.0;
+  double fk_new;
+  bool improved = false;
+  
+  while(  iter < maxIter)
+  {
+    fk_new=evaluateSub(x);
+
+    if ( fk_new > fk_old)
+    {
+      x=x_old;
+      //cout << "loop 1: end " << endl;
+      break;
+    }
+    
+    //cout << "loop 1: iter "<< iter << " " << x << " " << fk_new<<endl;
+    fk_old = fk_new;
+    x_old = x;
+    x/=m_dGamma;
+    ++iter;
+  }
+  if( iter <= 1 )
+  {
+    iter=0;
+    fk_old=fk;
+    x_old=0.0;
+    x=sk;
+    while( iter < maxIter)
+    {
+      fk_new=evaluateSub(x);
+  
+      // fk_new is worse than old, and we are lower than fk
+      if( fk_new > fk_old && improved==true)
+      {
+        // abort with fk_old which was generated by x_old
+        x = x_old;
+        break;
+      }
+    
+          
+      //cout << "loop 2: iter "<< iter << " " << x << " " << fk_new<<endl;
+      fk_old=fk_new;
+      x_old=x;
+      x*=m_dGamma;
+      ++iter;
+      if( fk_old < fk)
+      {
+        improved = true;
+      }
+    }
+  
+  }
+  // own rule till here
+
+  // original implementation 
+  //iter=0.0;
+  //while( fk - evaluateSub(x) < - m_dSigma * x *tmp  && iter < maxIter)
+  //{
+  //	x*=m_dGamma;
+  //	iter++;
+  //}
 
 
 //	if(m_pLoger)
 //		m_pLoger->logWarning("Brent took too many iterations.. Tol threshold was to low for the number of iterations..\n");
 
-	return x;
+  return x;
 }

+ 88 - 83
ArmijoLineSearcher.h

@@ -11,89 +11,94 @@
 
 #include <cmath>
 #include "LineSearcher.h"
-/*!
-	class for the brent line search
-
-   HowToUse:
-	
-	  * use setX0() to set the offset
-	  * use setH0()	to set the search direction
-	  * use setGk() to set the gradient in x0
-	  * call init()
-	  * call optimize() (returns lambda)
- */
-class  ArmijoLineSearcher : public LineSearcher
+
+namespace OPTIMIZATION
 {
-public:
-
-	typedef LineSearcher	SuperClass;
-	typedef SuperClass::matrix_type matrix_type;
-	
-	/*!
-		default constructor
-	*/
-	ArmijoLineSearcher();
-
-	/*!
-		constructor
-	*/
-	ArmijoLineSearcher(CostFunction *costFunc, OptLogBase *loger);
-
-	/*!
-		Copy constructor
-		\param opt .. optimizer to copy
-	*/
-	ArmijoLineSearcher( const ArmijoLineSearcher &lin);
-	
-	/*!
-		operator =
-	*/
-	ArmijoLineSearcher & operator=(const ArmijoLineSearcher &lin);
-
-	/*!
-		Destructor.
-	*/
-	virtual ~ArmijoLineSearcher();
-
-	/*!
-		set Parameters
-		@param gamma reduction Factor of step width test
-		@param L divisor for computation of sk ( > 0 )
-		@param sigma multiplicative parameter for test (0, 0.5)
-	*/
-	void setGammaLSigma(double gamma, double L, double sigma);
-
-
-	/*!
-		local search setup
-		@param xk start point
-		@param gk gradient at xk
-		@param dk descent direction from xk
-	*/
-	void setXkGkDk(const matrix_type xk, const matrix_type gk, const matrix_type dk);
-		
-	/*!
-		optimize
-		returns a double..
-	*/
-	double optimize();
-
-protected:
-
-	//! an internal parameter
-	double m_dGamma;
-
-	//! an internal paramter
-	double m_dL;
-
-	//! an internal parameter
-	double m_dSigma;
-
-	//! gradient in xk 
-	matrix_type m_gk;
-
-	//! iteration direction from xk
-	matrix_type m_dk;
-};
+
+  /*!
+    class for the brent line search
+
+    HowToUse:
+    
+      * use setX0() to set the offset
+      * use setH0()	to set the search direction
+      * use setGk() to set the gradient in x0
+      * call init()
+      * call optimize() (returns lambda)
+  */
+  class  ArmijoLineSearcher : public LineSearcher
+  {
+  public:
+
+    typedef LineSearcher	SuperClass;
+    typedef SuperClass::matrix_type matrix_type;
+    
+    /*!
+      default constructor
+    */
+    ArmijoLineSearcher();
+
+    /*!
+      constructor
+    */
+    ArmijoLineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+    /*!
+      Copy constructor
+      \param opt .. optimizer to copy
+    */
+    ArmijoLineSearcher( const ArmijoLineSearcher &lin);
+    
+    /*!
+      operator =
+    */
+    ArmijoLineSearcher & operator=(const ArmijoLineSearcher &lin);
+
+    /*!
+      Destructor.
+    */
+    virtual ~ArmijoLineSearcher();
+
+    /*!
+      set Parameters
+      @param gamma reduction Factor of step width test
+      @param L divisor for computation of sk ( > 0 )
+      @param sigma multiplicative parameter for test (0, 0.5)
+    */
+    void setGammaLSigma(double gamma, double L, double sigma);
+
+
+    /*!
+      local search setup
+      @param xk start point
+      @param gk gradient at xk
+      @param dk descent direction from xk
+    */
+    void setXkGkDk(const matrix_type xk, const matrix_type gk, const matrix_type dk);
+      
+    /*!
+      optimize
+      returns a double..
+    */
+    double optimize();
+
+  protected:
+
+    //! an internal parameter
+    double m_dGamma;
+
+    //! an internal paramter
+    double m_dL;
+
+    //! an internal parameter
+    double m_dSigma;
+
+    //! gradient in xk 
+    matrix_type m_gk;
+
+    //! iteration direction from xk
+    matrix_type m_dk;
+  };//class
+}//namespace
 
 #endif

+ 22 - 22
BFGSOptimizer.cpp

@@ -9,7 +9,7 @@
 
 #include "optimization/BFGSOptimizer.h"
 
-
+using namespace OPTIMIZATION;
 
 BFGSOptimizer::BFGSOptimizer(OptLogBase *loger) : SuperClass(loger)
 {
@@ -29,7 +29,7 @@ BFGSOptimizer::~BFGSOptimizer()
 
 void BFGSOptimizer::init()
 {
-	SuperClass::init();
+  SuperClass::init();
 
 }
 
@@ -37,33 +37,33 @@ void BFGSOptimizer::init()
 BFGSOptimizer & BFGSOptimizer::operator=(const BFGSOptimizer &opt)
 {
 
-	/*
-			avoid self-copy
-	*/
-	if(this != &opt)
-	{
-		
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(opt);
+  /*
+      avoid self-copy
+  */
+  if(this != &opt)
+  {
+    
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(opt);
 
-	}
+  }
 
-  	return *this;
+    return *this;
 }
 
 
 
 void BFGSOptimizer::updateIterationMatrix()
 {
-	matrix_type pk = m_parameters - m_oldParams;
-	matrix_type rk = m_gradient - m_oldGradient;
-	matrix_type rkt=!rk;
-	matrix_type pkt=!pk;
-
-	m_IterationMatrix = m_IterationMatrix 
-				+ (rk * rkt) * (1.0/ (rkt * pk)[0][0]) 
-				- (m_IterationMatrix * pk * pkt * m_IterationMatrix) * (1.0/ (pkt * m_IterationMatrix * pk)[0][0]);
+  matrix_type pk = m_parameters - m_oldParams;
+  matrix_type rk = m_gradient - m_oldGradient;
+  matrix_type rkt= rk.transpose();
+  matrix_type pkt= pk.transpose();
+
+  m_IterationMatrix = m_IterationMatrix 
+        + (rk * rkt) * (1.0/ (rkt * pk)(0,0)) 
+        - (m_IterationMatrix * pk * pkt * m_IterationMatrix) * (1.0/ (pkt * m_IterationMatrix * pk)(0,0));
 
 }

+ 70 - 67
BFGSOptimizer.h

@@ -12,73 +12,76 @@
 
 #include "optimization/MatrixIterativeOptimizer.h"
 
-/*!
-	class for the BFGS
-
-   HowToUse:
-	
-	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
-	  * use setParameters() to set the start point
-	  * call init()
-	  * call optimize()
-
-
- 	Implemented Abort criteria:
-		
-	  * maximum number of iterations
-	  *	time limit
-	  * parameter bounds
-	  * function value tolerance
-	  * parameter tolerance
-
-	Additional return reason:
-	
-
- */
-class BFGSOptimizer : public MatrixIterativeOptimizer
+namespace OPTIMIZATION
 {
-public:
-		typedef  MatrixIterativeOptimizer	SuperClass;
-		typedef SuperClass::matrix_type	matrix_type;
-
-	    /*!
-			Constructor.
-			\param loger : OptLogBase * to existing log class
-		*/
-		BFGSOptimizer(OptLogBase *loger=NULL);
-
-		/*!
-			Copy constructor
-			\param opt .. optimizer to copy
-		*/
-		BFGSOptimizer( const BFGSOptimizer &opt);
-
-
-		/*!
-			operator =
-		*/
-		BFGSOptimizer & operator=(const BFGSOptimizer &opt);
-
-
-		/*!
-			Destructor.
-		*/
-		virtual ~BFGSOptimizer();
-
-
-		/*!
-			internal initoializations 
-		*/
-		void init();
-
-protected:
-		
-
-		/*!
-			\brief update the Iteration Matrix
-		*/
-		void updateIterationMatrix() ;
-	
-};
+  /*!
+    class for the BFGS
+
+    HowToUse:
+    
+      * use setStepSize to specify the initial stepsize to compute the numerical gradient
+      * use setParameters() to set the start point
+      * call init()
+      * call optimize()
+
+
+    Implemented Abort criteria:
+      
+      * maximum number of iterations
+      *	time limit
+      * parameter bounds
+      * function value tolerance
+      * parameter tolerance
+
+    Additional return reason:
+    
+
+  */
+  class BFGSOptimizer : public MatrixIterativeOptimizer
+  {
+  public:
+      typedef  MatrixIterativeOptimizer	SuperClass;
+      typedef SuperClass::matrix_type	matrix_type;
+
+        /*!
+        Constructor.
+        \param loger : OptLogBase * to existing log class
+      */
+      BFGSOptimizer(OptLogBase *loger=NULL);
+
+      /*!
+        Copy constructor
+        \param opt .. optimizer to copy
+      */
+      BFGSOptimizer( const BFGSOptimizer &opt);
+
+
+      /*!
+        operator =
+      */
+      BFGSOptimizer & operator=(const BFGSOptimizer &opt);
+
+
+      /*!
+        Destructor.
+      */
+      virtual ~BFGSOptimizer();
+
+
+      /*!
+        internal initoializations 
+      */
+      void init();
+
+  protected:
+      
+
+      /*!
+        \brief update the Iteration Matrix
+      */
+      void updateIterationMatrix() ;
+    
+  }; //class
+}//namespace
 
 #endif

+ 2 - 0
BrentLineSearcher.cpp

@@ -14,6 +14,8 @@
 #define BrentSHFT(a,b,c,d)	(a)=(b);(b)=(c);(c)=(d);
 #define BrentSIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
 
+using namespace OPTIMIZATION;
+
 BrentLineSearcher::BrentLineSearcher() : m_CGOLD(0.38196601125010515179541316563436), m_ZEPS(1.0e-10)
 {
 	m_a = 0.0;

+ 87 - 83
BrentLineSearcher.h

@@ -11,91 +11,95 @@
 
 #include <cmath>
 #include "LineSearcher.h"
-/*!
-	class for the brent line search
-
-   HowToUse:
-	
-	  * use setX0() to set the offset
-	  * use setH0()	to set the search direction
-	  * use setEps() to set the abort criterion
-	  * use setBounds() to set a different search bound for lambda than [0,10]
-	  * call init()
-	  * call optimize() (returns lambda)
- */
-class  BrentLineSearcher : public LineSearcher
+
+namespace OPTIMIZATION
 {
-public:
-
-	typedef  LineSearcher	SuperClass;
-	typedef SuperClass::matrix_type matrix_type;
-	
-
-	/*!
-		default constructor
-	*/
-	BrentLineSearcher();
-
-	/*!
-		constructor
-	*/
-	BrentLineSearcher(CostFunction *costFunc, OptLogBase *loger);
-
-	/*!
-		Copy constructor
-		\param opt .. optimizer to copy
-	*/
-	BrentLineSearcher( const BrentLineSearcher &lin);
-	
-	/*!
-		operator =
-	*/
-	BrentLineSearcher & operator=(const BrentLineSearcher &lin);
+  /*!
+    class for the brent line search
+
+    HowToUse:
+    
+      * use setX0() to set the offset
+      * use setH0()	to set the search direction
+      * use setEps() to set the abort criterion
+      * use setBounds() to set a different search bound for lambda than [0,10]
+      * call init()
+      * call optimize() (returns lambda)
+  */
+  class  BrentLineSearcher : public LineSearcher
+  {
+  public:
+
+    typedef  LineSearcher	SuperClass;
+    typedef SuperClass::matrix_type matrix_type;
+    
+
+    /*!
+      default constructor
+    */
+    BrentLineSearcher();
+
+    /*!
+      constructor
+    */
+    BrentLineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+    /*!
+      Copy constructor
+      \param opt .. optimizer to copy
+    */
+    BrentLineSearcher( const BrentLineSearcher &lin);
+    
+    /*!
+      operator =
+    */
+    BrentLineSearcher & operator=(const BrentLineSearcher &lin);
+
+      /*!
+      Destructor.
+      */
+      virtual ~BrentLineSearcher();
+
+    /*!
+      set the bound parameters
+    */
+    bool setBounds(double a, double c);
+
+    /*!
+      set the abort threshold
+    */
+    bool setEps(double eps);
+
+    /*!
+      optimize
+      returns a double..
+    */
+    double optimize();
+
+  protected:
+
+    /*!
+      approximation of the golden cut ratio
+    */
+    const double m_CGOLD; // = 0.38196601125010515179541316563436;
 
     /*!
-		Destructor.
-     */
-    virtual ~BrentLineSearcher();
-
-	/*!
-		set the bound parameters
-	*/
-	bool setBounds(double a, double c);
-
-	/*!
-		set the abort threshold
-	*/
-	bool setEps(double eps);
-
-	/*!
-		optimize
-		returns a double..
-	*/
-	double optimize();
-
-protected:
-
-	/*!
-		approximation of the golden cut ratio
-	*/
-	const double m_CGOLD; // = 0.38196601125010515179541316563436;
-
-	/*!
-		small number trying to achieve fractional accuracy for a minimum at 0
-	*/
-	const double m_ZEPS;	
-
-	/*!
-		the bounds
-	*/
-	double m_a, m_b, m_c;
-
-	/*!
-		the abort criteria
-	*/
-	double m_eps;
-
-
-};
+      small number trying to achieve fractional accuracy for a minimum at 0
+    */
+    const double m_ZEPS;	
+
+    /*!
+      the bounds
+    */
+    double m_a, m_b, m_c;
+
+    /*!
+      the abort criteria
+    */
+    double m_eps;
+
+
+  }; //class
+}//namespace
 
 #endif

+ 281 - 280
CombinatorialOptimizer.cpp

@@ -13,61 +13,62 @@
 
 #include <iostream>
 
-using namespace opt;
+using namespace OPTIMIZATION;
+
 CombinatorialOptimizer::CombinatorialOptimizer(OptLogBase *loger): SuperClass(loger)
 {
-	m_mode = 1;
-	m_T0 = 1;
-	m_Tk = m_T0;
-	m_alpha = 0.95;
-	//m_stepLength = 1.0;
-	m_fast = false;
+  m_mode = 1;
+  m_T0 = 1;
+  m_Tk = m_T0;
+  m_alpha = 0.95;
+  //m_stepLength = 1.0;
+  m_fast = false;
 }
 
 
 CombinatorialOptimizer::CombinatorialOptimizer( const CombinatorialOptimizer &opt) : SuperClass(opt)
 {
-	m_mode			= opt.m_mode;
-	m_T0			= opt.m_T0;
-	m_Tk			= opt.m_Tk;
-	m_alpha			= opt.m_alpha;
-	//m_stepLength		= opt.m_stepLength;
-	m_fast			= opt.m_fast;
-	
+  m_mode			= opt.m_mode;
+  m_T0			= opt.m_T0;
+  m_Tk			= opt.m_Tk;
+  m_alpha			= opt.m_alpha;
+  //m_stepLength		= opt.m_stepLength;
+  m_fast			= opt.m_fast;
+  
 }
 
 /*
-	operator=
+  operator=
 */
 CombinatorialOptimizer & CombinatorialOptimizer::operator=(const CombinatorialOptimizer &opt)
 {
-		
-	/*
-			avoid self-copy
-	*/
-	if(this != &opt)
-	{
-		
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(opt);
-
-			
-		/*
-			own values:
-		*/
-		m_mode			= opt.m_mode;
-		m_T0			= opt.m_T0;
-		m_Tk			= opt.m_Tk;
-		m_alpha			= opt.m_alpha;
-		//m_stepLength	= opt.m_stepLength;
-		m_fast			= opt.m_fast;
-		
-		
-	}
-
-  	return *this;
+    
+  /*
+      avoid self-copy
+  */
+  if(this != &opt)
+  {
+    
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(opt);
+
+      
+    /*
+      own values:
+    */
+    m_mode			= opt.m_mode;
+    m_T0			= opt.m_T0;
+    m_Tk			= opt.m_Tk;
+    m_alpha			= opt.m_alpha;
+    //m_stepLength	= opt.m_stepLength;
+    m_fast			= opt.m_fast;
+    
+    
+  }
+
+    return *this;
 }
 
 
@@ -78,19 +79,19 @@ CombinatorialOptimizer::~CombinatorialOptimizer()
 
 void CombinatorialOptimizer::init()
 {
-	SuperClass::init();
+  SuperClass::init();
 
-	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+  m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
 
-	/*
-		seed rand
-	*/
-	ice::Randomize();
+  /*
+    seed rand
+  */
+  ice::Randomize();
 
-	/*
-		(re)set m_Tk
-	*/
-	m_Tk = m_T0;
+  /*
+    (re)set m_Tk
+  */
+  m_Tk = m_T0;
 
 
 
@@ -98,158 +99,158 @@ void CombinatorialOptimizer::init()
 
 bool CombinatorialOptimizer::setMode(int mode)
 {
-	switch(mode)
-	{
-	case 1:
-	case 2:
-	case 3:
-	case 4:
-		m_mode = mode;
-		return true;
-	default:
-		return false;
-	}
+  switch(mode)
+  {
+  case 1:
+  case 2:
+  case 3:
+  case 4:
+    m_mode = mode;
+    return true;
+  default:
+    return false;
+  }
 }
 
 bool CombinatorialOptimizer::setControlSeqParam(double T0,double alpha)
 {
-	if(T0 < 0)
-		return false;
+  if(T0 < 0)
+    return false;
 
-	m_T0 = T0;
+  m_T0 = T0;
 
-	if(alpha < 0 || alpha > 1)
-		return false;
+  if(alpha < 0 || alpha > 1)
+    return false;
 
-	return true;
+  return true;
 
 }
 
 bool CombinatorialOptimizer::accepptPoint(double oldValue, double newValue)
 {
-	bool accept = false;
-	//cout<<"old val: "<<oldValue<<endl;
+  bool accept = false;
+  //cout<<"old val: "<<oldValue<<endl;
     //    cout<<"new val: "<<newValue<<endl;
     //    cout<<endl;
-	switch(m_mode)
-	{
-	case 1:
-	{
-		/*
-			simulated annealing
-		*****************************/
-		
-		if(newValue <= oldValue)
-		{
-			accept = true;
-			break;
-		}
-				
-		/*
-			generate uniform distrubuted random number
-		*/
-		double lambda = ice::RandomD();
-		
-		if(lambda <= exp(-(newValue - oldValue)/((m_Tk < 1.0e-20)? 1.0e-20 :m_Tk)))
-		{
-			accept =true;
-		}
-		break;
-	}
-	case 2:
-		/*
-			threshold acceptance
-		*****************************/
-		if((newValue - oldValue) < m_Tk)
-		{
-			accept = true;
-		}
-		break;
-	case 3:
-		/*
-			great deluge algorithm
-		*****************************/
-		if(newValue <= m_Tk)
-		{
-			accept = true;
-		}
-
-		break;
-	case 4:
-		/*
-			stochastic relaxation
-		*****************************/
-		if(newValue <= oldValue)
-		{
-			accept = true;
-		}
-	case 5:
-		{
-		/*
-			annealing & theshold
-		*****************************/
-		
-		/*
-			accept better values
-		*/
-		if(newValue <= oldValue)
-		{
-			accept = true;
-			break;
-		}
-		
-		/*
-			dont accept values bigger than m_threshold
-		*/
-		if(newValue > m_threshold )
-		{
-			accept = false;
-			break;
-		}
-
-		/*
-			generate uniform distrubuted random number
-		*/
-		double lambda = ice::RandomD();
-		
-		if(lambda <= exp(-(newValue - oldValue)/((m_Tk < 1.0e-20)? 1.0e-20 :m_Tk)))
-		{
-			accept =true;
-		}
-		else
-			accept = false;
-		break;
-		}
-	default:
-		accept = false;
-	}
-
-	return accept;
+  switch(m_mode)
+  {
+  case 1:
+  {
+    /*
+      simulated annealing
+    *****************************/
+    
+    if(newValue <= oldValue)
+    {
+      accept = true;
+      break;
+    }
+        
+    /*
+      generate uniform distrubuted random number
+    */
+    double lambda = ice::RandomD();
+    
+    if(lambda <= exp(-(newValue - oldValue)/((m_Tk < 1.0e-20)? 1.0e-20 :m_Tk)))
+    {
+      accept =true;
+    }
+    break;
+  }
+  case 2:
+    /*
+      threshold acceptance
+    *****************************/
+    if((newValue - oldValue) < m_Tk)
+    {
+      accept = true;
+    }
+    break;
+  case 3:
+    /*
+      great deluge algorithm
+    *****************************/
+    if(newValue <= m_Tk)
+    {
+      accept = true;
+    }
+
+    break;
+  case 4:
+    /*
+      stochastic relaxation
+    *****************************/
+    if(newValue <= oldValue)
+    {
+      accept = true;
+    }
+  case 5:
+    {
+    /*
+      annealing & theshold
+    *****************************/
+    
+    /*
+      accept better values
+    */
+    if(newValue <= oldValue)
+    {
+      accept = true;
+      break;
+    }
+    
+    /*
+      dont accept values bigger than m_threshold
+    */
+    if(newValue > m_threshold )
+    {
+      accept = false;
+      break;
+    }
+
+    /*
+      generate uniform distrubuted random number
+    */
+    double lambda = ice::RandomD();
+    
+    if(lambda <= exp(-(newValue - oldValue)/((m_Tk < 1.0e-20)? 1.0e-20 :m_Tk)))
+    {
+      accept =true;
+    }
+    else
+      accept = false;
+    break;
+    }
+  default:
+    accept = false;
+  }
+
+  return accept;
 }
 
 matrix_type CombinatorialOptimizer::generatePoint()
 {
     matrix_type newPoint(m_parameters);
 
-	for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
-	{
-		if(m_scales[i][0] > 0.0 )
-		{
-			if(m_fast == true)
-			{
-				//newPoint[i][0] = m_parameters[i][0] + m_stepLength * m_Tk * ice::GaussRandom(1.0) ;
-				newPoint[i][0] = m_parameters[i][0] + m_scales[i][0] * m_Tk * ice::GaussRandom(1.0) ;
-			}
-			else
-			{
-				newPoint[i][0] = m_parameters[i][0] + m_scales[i][0] * ice::GaussRandom(1.0) ;
-			}
-		}
-	}
+  for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+  {
+    if(m_scales(i,0) > 0.0 )
+    {
+      if(m_fast == true)
+      {
+        //newPoint(i,0) = m_parameters(i,0) + m_stepLength * m_Tk * ice::GaussRandom(1.0) ;
+        newPoint(i,0) = m_parameters(i,0) + m_scales(i,0) * m_Tk * ice::GaussRandom(1.0) ;
+      }
+      else
+      {
+        newPoint(i,0) = m_parameters(i,0) + m_scales(i,0) * ice::GaussRandom(1.0) ;
+      }
+    }
+  }
 
         //cout<<"new Point: "<<newPoint<<endl;
         
-	return newPoint;
+  return newPoint;
 }	
 
 //bool CombinatorialOptimizer::setSteplength(double steplength)
@@ -265,113 +266,113 @@ matrix_type CombinatorialOptimizer::generatePoint()
 int CombinatorialOptimizer::optimize()
 {
 
-	// init
-	init();
-
-
-	/*
-		start time criteria
-	*/
-	m_startTime = clock();
-
-
-
-	bool abort = false;
-
-	matrix_type newPoint;
-	double newValue;
-	
-	/* 
-		do the optimization
-	*/
-	while(abort == false)
-	{
-		/*
-			increase iteration counter
-		*/
-		m_numIter++;
-		
-		/*
-			Check iteration limit
-		*/
-		if(m_maxNumIterActive)
-		{
-			if(m_numIter >= m_maxNumIter)
-			{
-				/* set according return status and return */
-				m_returnReason = SUCCESS_MAXITER;
-				abort = true;
-				continue;
-			}
-		}
-
-		/*
-			update control seq.
-		*/
-		m_Tk *= m_alpha;
-
-
-		/*
-			generate new point
-		*/
-		newPoint = generatePoint();
-				
-
-		/*
-			evaluate cost function
-		*/
-		newValue = evaluateCostFunction(newPoint);
-
-		
-		/*
-			accept new point
-		*/
-		if( accepptPoint(m_currentCostFunctionValue,newValue) )
-		{
-        		m_parameters = newPoint;
-			m_currentCostFunctionValue = newValue;
-			
-		}
+  // init
+  init();
+
+
+  /*
+    start time criteria
+  */
+  m_startTime = clock();
+
+
+
+  bool abort = false;
+
+  matrix_type newPoint;
+  double newValue;
+  
+  /* 
+    do the optimization
+  */
+  while(abort == false)
+  {
+    /*
+      increase iteration counter
+    */
+    m_numIter++;
+    
+    /*
+      Check iteration limit
+    */
+    if(m_maxNumIterActive)
+    {
+      if(m_numIter >= m_maxNumIter)
+      {
+        /* set according return status and return */
+        m_returnReason = SUCCESS_MAXITER;
+        abort = true;
+        continue;
+      }
+    }
+
+    /*
+      update control seq.
+    */
+    m_Tk *= m_alpha;
+
+
+    /*
+      generate new point
+    */
+    newPoint = generatePoint();
+        
+
+    /*
+      evaluate cost function
+    */
+    newValue = evaluateCostFunction(newPoint);
+
+    
+    /*
+      accept new point
+    */
+    if( accepptPoint(m_currentCostFunctionValue,newValue) )
+    {
+            m_parameters = newPoint;
+      m_currentCostFunctionValue = newValue;
+      
+    }
                 
                 if(m_verbose)
                 {
                     std::cout << "CombinatorialOptimizer :: parameter: ";
                     for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
                     {
-                        std::cout<< m_parameters[r][0] << " ";
+                        std::cout<< m_parameters(r,0) << " ";
                     }
                     std::cout << m_currentCostFunctionValue  << std::endl;
                 }
-		
-		/*
-			Check if it is in bounds, paramTol, funcTol, NumIter, gradienttol, maxSeconds
-		*/
-		if(!checkParameters(m_parameters))
-		{
-			/* set according return status and the last parameters and return */
-			m_returnReason = ERROR_XOUTOFBOUNDS;
-			abort = true;
-		}
-
-		/*
-			Check Optimization Timelimit, if active
-		*/
-		if(m_maxSecondsActive)
-		{
-			m_currentTime = clock();
-
-			/* time limit exceeded ? */
-			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
-			{
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_TIMELIMIT;
-				abort = true;
-				continue;
-			}
-		}
-
-	}
-
-	return m_returnReason;
+    
+    /*
+      Check if it is in bounds, paramTol, funcTol, NumIter, gradienttol, maxSeconds
+    */
+    if(!checkParameters(m_parameters))
+    {
+      /* set according return status and the last parameters and return */
+      m_returnReason = ERROR_XOUTOFBOUNDS;
+      abort = true;
+    }
+
+    /*
+      Check Optimization Timelimit, if active
+    */
+    if(m_maxSecondsActive)
+    {
+      m_currentTime = clock();
+
+      /* time limit exceeded ? */
+      if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+      {
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_TIMELIMIT;
+        abort = true;
+        continue;
+      }
+    }
+
+  }
+
+  return m_returnReason;
 
 }

+ 168 - 165
CombinatorialOptimizer.h

@@ -9,171 +9,174 @@
 
 
 #include "core/optimization/blackbox/SimpleOptimizer.h"
-#include "optimization/Opt_Namespace.h"
-namespace opt=optimization;
-
-///
-///	class for the Combinatorial optimizers
-///
-/// HowToUse:
-/// 
-///	  * use setMode to specify which algorithm to use (simulated annealing, etc.)
-///	  * use setStepLength to set the variance for the random number generation 
-///	  * use setParameters() to set the start point
-///	  * use setFastMode() to let the random number generation contract with the control sequence
-///	  * use setControlSeq() to set the parameters for the control sequence
-///	  * call init()
-///	  * call optimize()
-///
-/// 
-///	Implemented Abort criteria:
-///		
-///	  * maximum number of iterations
-///	  *	time limit
-///	  * parameter bounds
-///	  
-///	Additional return reason:
-///	
-///
-///
-///
-///
-class CombinatorialOptimizer : public SimpleOptimizer
+#include "core/optimization/blackbox/Definitions_core_opt.h"
+
+namespace OPTIMIZATION
 {
-	public:
-
-		typedef  SimpleOptimizer SuperClass;
-		typedef  opt::matrix_type matrix_type;
-
-		///
-		///	Constructor.
-		///	@param loger : OptLogBase * to existing log class
-		///
-		CombinatorialOptimizer(OptLogBase *loger=NULL);
-
-		///
-		///	Copy constructor
-		///	@param opt .. optimizer to copy
-		///
-		CombinatorialOptimizer( const CombinatorialOptimizer &opt);
-
-		///
-		///	operator =
-		///
-		CombinatorialOptimizer & operator=(const CombinatorialOptimizer &opt);
-
-      		///
-		///	Destructor.
-         	///
-        	~CombinatorialOptimizer();
-
-		/*!
-			enumeration for the return reasons of an optimizer,
-			has all elements of the SuperClass optimizer
-		*/
-	
-		///
-		///	Do internal initializations
-		///
-		void init();
-		
-		///
-		///	start the optmization
-		///
-		int optimize();
-
-		///
-		///	set mode:
-		///	
-		///	  1 = simulated annealing
-		///	  2 = threshold acceptance
-		///	  3 = great deluge
-		///	  4 = stochastic relaxation
-		///	  5 = mix of simualted annealing and "constant" great deluge
-		///			(that means sa is used but there is a hard acceptance threshold.
-		///			 function values have to be below that threshold to have the 
-		///			 chance to be accepted. This can be used to do some kind of constraint
-		///			 handling when they are penalized enough..)
-		///		
-		///
-		bool setMode(int mode);
-		
-
-		///
-		///	set the parameters for the control sequence
-		///	where t_k = t_0 * alpha^k
-		///	
-		///	@param T0 >= 0
-		///	@param alpha > 0
-		///
-		///	@return true in case of success
-		///
-		bool setControlSeqParam(double T0, double alpha);
-
-		///*!
-		//	@param steplength double value that is used as variance to generate new points
-		//*/
-		//bool setSteplength(double steplength);
-
-		///
-		///	set threshold used in mode 5 as constant deluge control sequence
-		///	@param thres 
-		///
-		inline void setThreshold(double thres){m_threshold = thres;};
-
-		///
-		///	use fast contraction mode: the control sequence is used 
-		///	for the variance to generate new points
-		///
-		inline void setFastMode(bool fastmode){m_fast = fastmode;};
-
-	private:
-
-		///
-		///	generate new point
-		///
-		matrix_type generatePoint();
-
-		///
-		///	accept point
-		///
-		bool accepptPoint(double oldValue, double newValue);
-
-		///
-		///	algorithm mode
-		///
-		int m_mode;
-
-		///
-		///	fast mode point generation?
-		///
-		bool m_fast;
-
-		///
-		///	control sequence Tk= T_0 * m_alpha^m_numIter
-		///
-		double m_T0;
-
-		///
-		///	control sequence Tk= T_0 * m_alpha^m_numIter
-		///
-		double m_Tk;
-
-		///
-		///	control sequence Tk= T_0 * m_alpha^m_numIter
-		///
-		double m_alpha;
-
-		///
-		///	Threshold for mode nr. 5
-		///
-		double m_threshold;
-
-		///
-		///	steplength used as variance to generate new points
-		///
-		double m_stepLength;
-		
-};
+
+  ///
+  ///	class for the Combinatorial optimizers
+  ///
+  /// HowToUse:
+  /// 
+  ///	  * use setMode to specify which algorithm to use (simulated annealing, etc.)
+  ///	  * use setStepLength to set the variance for the random number generation 
+  ///	  * use setParameters() to set the start point
+  ///	  * use setFastMode() to let the random number generation contract with the control sequence
+  ///	  * use setControlSeq() to set the parameters for the control sequence
+  ///	  * call init()
+  ///	  * call optimize()
+  ///
+  /// 
+  ///	Implemented Abort criteria:
+  ///		
+  ///	  * maximum number of iterations
+  ///	  *	time limit
+  ///	  * parameter bounds
+  ///	  
+  ///	Additional return reason:
+  ///	
+  ///
+  ///
+  ///
+  ///
+  class CombinatorialOptimizer : public SimpleOptimizer
+  {
+    public:
+
+      typedef  SimpleOptimizer SuperClass;
+      typedef  OPTIMIZATION::matrix_type matrix_type;
+
+      ///
+      ///	Constructor.
+      ///	@param loger : OptLogBase * to existing log class
+      ///
+      CombinatorialOptimizer(OptLogBase *loger=NULL);
+
+      ///
+      ///	Copy constructor
+      ///	@param opt .. optimizer to copy
+      ///
+      CombinatorialOptimizer( const CombinatorialOptimizer &opt);
+
+      ///
+      ///	operator =
+      ///
+      CombinatorialOptimizer & operator=(const CombinatorialOptimizer &opt);
+
+            ///
+      ///	Destructor.
+            ///
+            ~CombinatorialOptimizer();
+
+      /*!
+        enumeration for the return reasons of an optimizer,
+        has all elements of the SuperClass optimizer
+      */
+    
+      ///
+      ///	Do internal initializations
+      ///
+      void init();
+      
+      ///
+      ///	start the optmization
+      ///
+      int optimize();
+
+      ///
+      ///	set mode:
+      ///	
+      ///	  1 = simulated annealing
+      ///	  2 = threshold acceptance
+      ///	  3 = great deluge
+      ///	  4 = stochastic relaxation
+      ///	  5 = mix of simualted annealing and "constant" great deluge
+      ///			(that means sa is used but there is a hard acceptance threshold.
+      ///			 function values have to be below that threshold to have the 
+      ///			 chance to be accepted. This can be used to do some kind of constraint
+      ///			 handling when they are penalized enough..)
+      ///		
+      ///
+      bool setMode(int mode);
+      
+
+      ///
+      ///	set the parameters for the control sequence
+      ///	where t_k = t_0 * alpha^k
+      ///	
+      ///	@param T0 >= 0
+      ///	@param alpha > 0
+      ///
+      ///	@return true in case of success
+      ///
+      bool setControlSeqParam(double T0, double alpha);
+
+      ///*!
+      //	@param steplength double value that is used as variance to generate new points
+      //*/
+      //bool setSteplength(double steplength);
+
+      ///
+      ///	set threshold used in mode 5 as constant deluge control sequence
+      ///	@param thres 
+      ///
+      inline void setThreshold(double thres){m_threshold = thres;};
+
+      ///
+      ///	use fast contraction mode: the control sequence is used 
+      ///	for the variance to generate new points
+      ///
+      inline void setFastMode(bool fastmode){m_fast = fastmode;};
+
+    private:
+
+      ///
+      ///	generate new point
+      ///
+      matrix_type generatePoint();
+
+      ///
+      ///	accept point
+      ///
+      bool accepptPoint(double oldValue, double newValue);
+
+      ///
+      ///	algorithm mode
+      ///
+      int m_mode;
+
+      ///
+      ///	fast mode point generation?
+      ///
+      bool m_fast;
+
+      ///
+      ///	control sequence Tk= T_0 * m_alpha^m_numIter
+      ///
+      double m_T0;
+
+      ///
+      ///	control sequence Tk= T_0 * m_alpha^m_numIter
+      ///
+      double m_Tk;
+
+      ///
+      ///	control sequence Tk= T_0 * m_alpha^m_numIter
+      ///
+      double m_alpha;
+
+      ///
+      ///	Threshold for mode nr. 5
+      ///
+      double m_threshold;
+
+      ///
+      ///	steplength used as variance to generate new points
+      ///
+      double m_stepLength;
+      
+  };//class
+}//namespace
 
 #endif

+ 2 - 2
Constraints.cpp

@@ -29,7 +29,7 @@ Constraints::Constraints(unsigned int numOfParameters, unsigned int numOfConstra
 	m_weights = matrix_type(numOfConstraints,1);
 	for(int i=0; i < static_cast<int>(numOfConstraints); ++i)
 	{
-		m_weights[i][0] = 1;
+		m_weights(i,0) = 1;
 	}
 
 }
@@ -91,7 +91,7 @@ bool Constraints::setWeightVector(const matrix_type weights)
 	*/
 	for(unsigned int i= 0; i < m_numOfConstraints; i++)
 	{
-		if(weights[i][0] < 0)
+		if(weights(i,0) < 0)
 		{
 			return false;
 		}

+ 3 - 3
Constraints.h

@@ -10,8 +10,8 @@
 #define _CONSTRAINTS_H_
 
 
-#include "optimization/Opt_Namespace.h"
-namespace opt=optimization;
+#include "core/optimization/blackbox/Definitions_core_opt.h"
+namespace opt=OPTIMIZATION;
 
 /*!
     class Abstract base class for all constraints. 
@@ -20,7 +20,7 @@ namespace opt=optimization;
 class Constraints  
 {
 	public:
-		typedef opt::matrix_type matrix_type;
+		typedef OPTIMIZATION::matrix_type matrix_type;
 
 
 		/*!

+ 51 - 51
CostFunction_ndim_2ndOrder.cpp

@@ -8,9 +8,9 @@
 //////////////////////////////////////////////////////////////////////
 
 
-#include "core/optimization/blackbox/CostFunction_ndim_2ndOrder.h"
+#include "CostFunction_ndim_2ndOrder.h"
 
-using namespace opt;
+using namespace OPTIMIZATION;
 
 CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder() : SuperClass()
 {
@@ -19,44 +19,44 @@ CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder() : SuperClass()
 
 CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder(unsigned int dim)  : SuperClass(dim)
 {
-	m_hasAnalyticGradient = true;
-	m_hasAnalyticHessian = false; // not implemented
+  m_hasAnalyticGradient = true;
+  m_hasAnalyticHessian = false; // not implemented
 
-	m_A  = matrix_type(dim,dim);
-	m_bt = matrix_type(1,dim);
-	m_c	 = 0.0;
+  m_A  = matrix_type(dim,dim);
+  m_bt = matrix_type(1,dim);
+  m_c	 = 0.0;
 
 }
 
 CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder(const CostFunction_ndim_2ndOrder &func):SuperClass(func)
 {
-	m_A = func.m_A;
-	m_bt = func.m_bt;
-	m_c = func.m_c;
+  m_A = func.m_A;
+  m_bt = func.m_bt;
+  m_c = func.m_c;
 }
 
 CostFunction_ndim_2ndOrder & CostFunction_ndim_2ndOrder::operator=(const CostFunction_ndim_2ndOrder & func)
 {
-	/*
-			avoid self-copy
-	*/
-	if(this != &func)
-	{
-		
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(func);
-
-		/*
-			own values:
-		*/
-		m_A		= func.m_A;
-		m_bt	= func.m_bt;
-		m_c		= func.m_c;
-	}
-
-  	return *this;
+  /*
+      avoid self-copy
+  */
+  if(this != &func)
+  {
+    
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(func);
+
+    /*
+      own values:
+    */
+    m_A		= func.m_A;
+    m_bt	= func.m_bt;
+    m_c		= func.m_c;
+  }
+
+    return *this;
 }
 
 CostFunction_ndim_2ndOrder::~CostFunction_ndim_2ndOrder()
@@ -70,36 +70,36 @@ void CostFunction_ndim_2ndOrder::init()
 
 int CostFunction_ndim_2ndOrder::setAbc(const matrix_type &A,const matrix_type &b, double c)
 {
-	if(A.rows() == static_cast<int>(m_numOfParameters) && A.cols() == static_cast<int>(m_numOfParameters) &&
-	   b.rows() == static_cast<int>(m_numOfParameters) && b.cols() == 1)
-	{
-
-		m_A = A;
-		m_bt = !b;
-		m_c = c;
-
-		return 0;
-	}
-	else
-	{
-		return -1;
-	}
+  if(A.rows() == static_cast<int>(m_numOfParameters) && A.cols() == static_cast<int>(m_numOfParameters) &&
+    b.rows() == static_cast<int>(m_numOfParameters) && b.cols() == 1)
+  {
+
+    m_A = A;
+    m_bt = b.transpose();
+    m_c = c;
+
+    return 0;
+  }
+  else
+  {
+    return -1;
+  }
 }
 
 
 double CostFunction_ndim_2ndOrder::evaluate(const matrix_type &parameter)
 {
-	
-	//return ( !parameter * 0.5 * m_A * parameter + m_bt * parameter + m_c)(0,0);
-	return ( !parameter * 0.5 * m_A * parameter + m_bt * parameter)[0][0] + m_c;
-	
+  
+  //return ( !parameter * 0.5 * m_A * parameter + m_bt * parameter + m_c)(0,0);
+  return ( parameter.transpose() * 0.5 * m_A * parameter + m_bt * parameter)(0,0) + m_c;
+  
 }
 
 const matrix_type CostFunction_ndim_2ndOrder::getAnalyticGradient(const matrix_type &parameter)
 {
-	matrix_type tmp = parameter * 0;
+  matrix_type tmp = parameter * 0;
 
-	tmp = m_A * parameter + !m_bt;
+  tmp = m_A * parameter + m_bt.transpose();
 
-	return tmp;
+  return tmp;
 }

+ 92 - 89
CostFunction_ndim_2ndOrder.h

@@ -13,94 +13,97 @@
 
 #include "core/optimization/blackbox/CostFunction.h"
 
-class CostFunction_ndim_2ndOrder : public CostFunction
+namespace OPTIMIZATION
 {
-	public:
-
-		typedef CostFunction	SuperClass;
-		typedef SuperClass::matrix_type matrix_type;
-		
-		/*!
-			default constructor
-		*/
-		CostFunction_ndim_2ndOrder();
-
-        /*!
-            Constructor.
-			\param dim of parameter space
-         */
-		CostFunction_ndim_2ndOrder(unsigned int dim);
-        
-		/*!
-			Copy constructor
-			\param func the function to copy
-		*/
-		CostFunction_ndim_2ndOrder(const CostFunction_ndim_2ndOrder &func);
-
-		/*!
-			= operator
-		*/
-		CostFunction_ndim_2ndOrder &operator=(const CostFunction_ndim_2ndOrder & func);
-
-        /*!
-            Destructor.
-         */
-        ~CostFunction_ndim_2ndOrder();
-
-		/*!
-			initializations
-		*/
-		void init();
-
-
-		/*!
-			set A, b, and c
-			\param A
-			\param b
-			\param c
-
-			\return 0 in case of success.
-		*/
-		int setAbc(const matrix_type & A,const matrix_type &b, double c);
-
-		/*!
-			evaluate the costfunction
-			\param parameter to evaluate at 
-		*/
-		double evaluate(const matrix_type & parameter);
-
-
-		/*!
-			get the analytic Gradient 				
-		*/
-		const matrix_type getAnalyticGradient(const matrix_type &parameter);
-				
-
-	protected:
-
-		/*!
-			the matrix A of
-
-			f(x) = 0.5 * x^T * A * x + b^T * x + c 
-		
-		*/
-		matrix_type m_A;
-
-		/*!
-			the vector b^T of
-
-			f(x) = 0.5 * x^T * A * x + b^T * x + c 
-		*/
-		matrix_type m_bt;
-
-
-		/*
-			the value c of
-
-			f(x) = 0.5 * x^T * A * x + b^T * x + c 
-		*/
-		double m_c;
-
-};
-		
+
+  class CostFunction_ndim_2ndOrder : public CostFunction
+  {
+    public:
+
+      typedef CostFunction	SuperClass;
+      typedef SuperClass::matrix_type matrix_type;
+      
+      /*!
+        default constructor
+      */
+      CostFunction_ndim_2ndOrder();
+
+          /*!
+              Constructor.
+        \param dim of parameter space
+          */
+      CostFunction_ndim_2ndOrder(unsigned int dim);
+          
+      /*!
+        Copy constructor
+        \param func the function to copy
+      */
+      CostFunction_ndim_2ndOrder(const CostFunction_ndim_2ndOrder &func);
+
+      /*!
+        = operator
+      */
+      CostFunction_ndim_2ndOrder &operator=(const CostFunction_ndim_2ndOrder & func);
+
+          /*!
+              Destructor.
+          */
+          ~CostFunction_ndim_2ndOrder();
+
+      /*!
+        initializations
+      */
+      void init();
+
+
+      /*!
+        set A, b, and c
+        \param A
+        \param b
+        \param c
+
+        \return 0 in case of success.
+      */
+      int setAbc(const matrix_type & A,const matrix_type &b, double c);
+
+      /*!
+        evaluate the costfunction
+        \param parameter to evaluate at 
+      */
+      double evaluate(const matrix_type & parameter);
+
+
+      /*!
+        get the analytic Gradient 				
+      */
+      const matrix_type getAnalyticGradient(const matrix_type &parameter);
+          
+
+    protected:
+
+      /*!
+        the matrix A of
+
+        f(x) = 0.5 * x^T * A * x + b^T * x + c 
+      
+      */
+      matrix_type m_A;
+
+      /*!
+        the vector b^T of
+
+        f(x) = 0.5 * x^T * A * x + b^T * x + c 
+      */
+      matrix_type m_bt;
+
+
+      /*
+        the value c of
+
+        f(x) = 0.5 * x^T * A * x + b^T * x + c 
+      */
+      double m_c;
+
+  }; //class
+}//namespace
 #endif

+ 44 - 44
DerivativeBasedOptimizer.cpp

@@ -9,23 +9,23 @@
 
 #include "optimization/DerivativeBasedOptimizer.h"
 
-using namespace optimization;
+using namespace OPTIMIZATION;
 
 DerivativeBasedOptimizer::DerivativeBasedOptimizer( OptLogBase *loger): SuperClass(loger)
 {
-	m_analyticalGradients = true;
-	m_gradientTolActive = false;
-	m_gradientTol = 1.0e-3;
+  m_analyticalGradients = true;
+  m_gradientTolActive = false;
+  m_gradientTol = 1.0e-3;
 }
 
 
 DerivativeBasedOptimizer::DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt) : SuperClass(opt)
 {
-	m_analyticalGradients = opt.m_analyticalGradients;
-	m_gradientTolActive = opt.m_gradientTolActive;
-	m_gradientTol = opt.m_gradientTol;
+  m_analyticalGradients = opt.m_analyticalGradients;
+  m_gradientTolActive = opt.m_gradientTolActive;
+  m_gradientTol = opt.m_gradientTol;
 
-	m_gradient = opt.m_gradient;
+  m_gradient = opt.m_gradient;
 }
 
 DerivativeBasedOptimizer::~DerivativeBasedOptimizer()
@@ -34,68 +34,68 @@ DerivativeBasedOptimizer::~DerivativeBasedOptimizer()
 
 void DerivativeBasedOptimizer::setGradientTol(bool active, double norm)
 {
-	m_gradientTol = norm;
-	m_gradientTolActive = active;
+  m_gradientTol = norm;
+  m_gradientTolActive = active;
 }
 
 inline double DerivativeBasedOptimizer::getGradientTol()
 {
-	return m_gradientTol;
+  return m_gradientTol;
 }
 
 void DerivativeBasedOptimizer::init()
 {
-	SuperClass::init();
-	
-	m_gradient = matrix_type(m_numberOfParameters,1);
+  SuperClass::init();
+  
+  m_gradient = matrix_type(m_numberOfParameters,1);
 }
 
 void DerivativeBasedOptimizer::useAnalyticalGradients(bool useAnalyticalGradients)
 {
-	m_analyticalGradients = useAnalyticalGradients;
+  m_analyticalGradients = useAnalyticalGradients;
 }
 
 
 const matrix_type  DerivativeBasedOptimizer::getNumericalGradient(const matrix_type & x , const matrix_type & maskWidth)
 {
-	matrix_type grad(m_numberOfParameters,1);
-	matrix_type grid(m_numberOfParameters, 2 * m_numberOfParameters);
-
-	for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
-	{
-	    for(int j = 0 ; j< 2 * static_cast<int>(m_numberOfParameters);j++)
-	    {
-            grid[i][j] = x[i][0] + (( j == i*2   )? +maskWidth[i][0] : 0.0)
-                                 + (( j == i*2+1 )? -maskWidth[i][0] : 0.0);
-	    }
-	}
-	
-	matrix_type values = evaluateSetCostFunction(grid);
-		
-	for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
-	{
-        if( fabs(m_scales[i][0]) < 1e-5 || fabs(maskWidth[i][0]) < 1e-5 )
-	    {
-            grad[i][0] = 0;
+  matrix_type grad(m_numberOfParameters,1);
+  matrix_type grid(m_numberOfParameters, 2 * m_numberOfParameters);
+
+  for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
+  {
+      for(int j = 0 ; j< 2 * static_cast<int>(m_numberOfParameters);j++)
+      {
+            grid(i,j) = x(i,0) + (( j == i*2   )? +maskWidth(i,0) : 0.0)
+                                + (( j == i*2+1 )? -maskWidth(i,0) : 0.0);
+      }
+  }
+  
+  matrix_type values = evaluateSetCostFunction(grid);
+    
+  for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
+  {
+        if( fabs(m_scales(i,0)) < 1e-5 || fabs(maskWidth(i,0)) < 1e-5 )
+      {
+            grad(i,0) = 0;
             continue;
-	    }
+      }
         
-	    grad[i][0] = ( values[2*i][0] - values[2*i+1][0] )/( 2 * maskWidth[i][0]);
-		
-	}
-	
-	return grad;
+      grad(i,0) = ( values(2*i,0) - values(2*i+1,0) )/( 2 * maskWidth(i,0));
+    
+  }
+  
+  return grad;
 }
 
 const matrix_type  DerivativeBasedOptimizer::getAnalyticalGradient(const matrix_type & x)
 {
-	return (m_maximize == true) ? (m_costFunction->getAnalyticGradient(x) * (-1.0))
-						  :   (m_costFunction->getAnalyticGradient(x)) ;	
+  return (m_maximize == true) ? (m_costFunction->getAnalyticGradient(x) * (-1.0))
+              :   (m_costFunction->getAnalyticGradient(x)) ;	
 }
 
 
 const matrix_type DerivativeBasedOptimizer::getAnalyticalHessian(const matrix_type & x)
 {
-	return (m_maximize == true) ? (m_costFunction->getAnalyticHessian(x) * (-1.0))
-						  :   (m_costFunction->getAnalyticHessian(x)) ;	
+  return (m_maximize == true) ? (m_costFunction->getAnalyticHessian(x) * (-1.0))
+              :   (m_costFunction->getAnalyticHessian(x)) ;	
 }

+ 115 - 110
DerivativeBasedOptimizer.h

@@ -11,117 +11,122 @@
 
 
 #include "core/optimization/blackbox/SimpleOptimizer.h"
+#include "AdaptiveDirectionRandomSearchOptimizer.h"
 
-/*!
-	class Abstract base class of all derivative based optimizers.
- */
-class DerivativeBasedOptimizer : public SimpleOptimizer
+namespace OPTIMIZATION
 {
-public:
-		typedef  SimpleOptimizer	SuperClass;	
-
-		///
-		///	Constructor.
-		///	
-		///	\param loger : OptLogBase * to existing log class
-		///
-		DerivativeBasedOptimizer(OptLogBase *loger=NULL);
-
-		///
-		///	Copy constructor
-		///	\param opt .. optimizer to copy
-		///
-		DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt);
-
-	        ///
-		///	Destructor.
-	        ///
-		virtual ~DerivativeBasedOptimizer();
-
-		///
-		///	enumeration for the return reasons of an optimizer,
-		///	has all elements of the SuperClass optimizer
-		///
-		enum {	SUCCESS_GRADIENTTOL = _to_continue_,
-				_to_continue_
-		};
-
-
-		///
-		///	\brief Set gradient tolerance abort criteria
-		///	
-		///	Set parameter tolerance abort criteria. While iterating, if the gradientnorm gets
-		///	below the given threshold. The optimization stops and returns SUCCESS if 
-		///	active is 'true'
-		///
-		///	\param active : bool to activate the criteria (true == active..)
-		///	\param norm : representing the threshold
-		///
-		void setGradientTol(bool active, double norm);
-		
-		///
-		///	Get the gradient tolerance abort criteria
-		///	\return double representing the threshold
-		///
-		inline double getGradientTol();
-
-		///
-		///	Get numerical Gradient on position x; use central difference with a maskwitdh of maskWidth.
-		///	
-		///	  grad(f(x))_i \approx 
-		///	  {
-		///						[  f( x + (0, ... ,0,maskWidth(i,0),0, ... ,0 ) ) 
-		///						 - f( x - (0, ... ,0,maskWidth(i,0),0, ... ,0 ) )]
-		///						 / (2 * maskWidth(i,0))
-		///	  }
-		///
-		///	  \forall i \in [1, ... ,m_numberOfParameters]
-		///				
-		const optimization::matrix_type  getNumericalGradient(const optimization::matrix_type & x , const optimization::matrix_type & maskWidth);
-
-		///
-		///	Get the anylytical Gradient of the costfunction (if available) sign already inverted for maximization.
-		///
-		const optimization::matrix_type  getAnalyticalGradient(const optimization::matrix_type & x);
-		
-		///
-		///	UseAnalyticalGradients, if possible
-		///
-		void useAnalyticalGradients(bool useAnalyticalGradients);
-
-		///
-		///	Get the analytical Hessian of the costfunction (if available )
-		///	sign already inverted for maximization
-		///
-		const optimization::matrix_type getAnalyticalHessian(const optimization::matrix_type & x);
-
-		
-protected:
-		///
-		///	initialize
-		///
-		void init();
-
-		///
-		///	the gradient
-		///
-		optimization::matrix_type m_gradient;
-		
-		///
-		///	gradient tolerance threshold
-		///
-		double m_gradientTol;
-
-		///
-		///	gradient tolerance active
-		///
-		bool m_gradientTolActive;
-
-		///
-		///	use numerical or analytical Gradient Computation
-		///
-		bool m_analyticalGradients;
-	
-};
+
+  /*!
+    class Abstract base class of all derivative based optimizers.
+  */
+  class DerivativeBasedOptimizer : public SimpleOptimizer
+  {
+  public:
+      typedef  SimpleOptimizer	SuperClass;	
+
+      ///
+      ///	Constructor.
+      ///	
+      ///	\param loger : OptLogBase * to existing log class
+      ///
+      DerivativeBasedOptimizer(OptLogBase *loger=NULL);
+
+      ///
+      ///	Copy constructor
+      ///	\param opt .. optimizer to copy
+      ///
+      DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt);
+
+            ///
+      ///	Destructor.
+            ///
+      virtual ~DerivativeBasedOptimizer();
+
+      ///
+      ///	enumeration for the return reasons of an optimizer,
+      ///	has all elements of the SuperClass optimizer
+      ///
+      enum {	SUCCESS_GRADIENTTOL = _to_continue_,
+          _to_continue_
+      };
+
+
+      ///
+      ///	\brief Set gradient tolerance abort criteria
+      ///	
+      ///	Set parameter tolerance abort criteria. While iterating, if the gradientnorm gets
+      ///	below the given threshold. The optimization stops and returns SUCCESS if 
+      ///	active is 'true'
+      ///
+      ///	\param active : bool to activate the criteria (true == active..)
+      ///	\param norm : representing the threshold
+      ///
+      void setGradientTol(bool active, double norm);
+      
+      ///
+      ///	Get the gradient tolerance abort criteria
+      ///	\return double representing the threshold
+      ///
+      inline double getGradientTol();
+
+      ///
+      ///	Get numerical Gradient on position x; use central difference with a maskwitdh of maskWidth.
+      ///	
+      ///	  grad(f(x))_i \approx 
+      ///	  {
+      ///						[  f( x + (0, ... ,0,maskWidth(i,0),0, ... ,0 ) ) 
+      ///						 - f( x - (0, ... ,0,maskWidth(i,0),0, ... ,0 ) )]
+      ///						 / (2 * maskWidth(i,0))
+      ///	  }
+      ///
+      ///	  \forall i \in [1, ... ,m_numberOfParameters]
+      ///				
+      const OPTIMIZATION::matrix_type  getNumericalGradient(const OPTIMIZATION::matrix_type & x , const OPTIMIZATION::matrix_type & maskWidth);
+
+      ///
+      ///	Get the anylytical Gradient of the costfunction (if available) sign already inverted for maximization.
+      ///
+      const OPTIMIZATION::matrix_type  getAnalyticalGradient(const OPTIMIZATION::matrix_type & x);
+      
+      ///
+      ///	UseAnalyticalGradients, if possible
+      ///
+      void useAnalyticalGradients(bool useAnalyticalGradients);
+
+      ///
+      ///	Get the analytical Hessian of the costfunction (if available )
+      ///	sign already inverted for maximization
+      ///
+      const OPTIMIZATION::matrix_type getAnalyticalHessian(const OPTIMIZATION::matrix_type & x);
+
+      
+  protected:
+      ///
+      ///	initialize
+      ///
+      void init();
+
+      ///
+      ///	the gradient
+      ///
+      OPTIMIZATION::matrix_type m_gradient;
+      
+      ///
+      ///	gradient tolerance threshold
+      ///
+      double m_gradientTol;
+
+      ///
+      ///	gradient tolerance active
+      ///
+      bool m_gradientTolActive;
+
+      ///
+      ///	use numerical or analytical Gradient Computation
+      ///
+      bool m_analyticalGradients;
+    
+  };//class
+}//namespace
 
 #endif

+ 36 - 34
EmptyLog.h

@@ -13,41 +13,43 @@
 #include <sstream>
 #include "core/optimization/blackbox/OptLogBase.h"
 
-
-/*!
-    base class for all log classes
-*/
-class EmptyLog : public OptLogBase
+namespace OPTIMIZATION
 {
-	public:
-		
-        /*!
-            Constructor.
-		*/
-		EmptyLog(){};
-        
-        /*!
-            Destructor.
-         */
-        virtual ~EmptyLog(){};
-		
-		
-
-	protected:
-
-		/**! Write error message to an output device (file, stdio, etc.)
-		 */
-		virtual void writeLogError(const char* szMessage){};
-
-		/**! Write warning message to an output device (file, stdio, etc.)
-		 */
-		virtual void writeLogWarning(const char* szMessage){};
-
-		/**! Write trace message to an output device (file, stdio, etc.)
-		 */
-		virtual void writeLogTrace(const char* szMessage) {};
-	
-};
+  /*!
+      base class for all log classes
+  */
+  class EmptyLog : public OptLogBase
+  {
+    public:
+      
+          /*!
+              Constructor.
+      */
+      EmptyLog(){};
+          
+          /*!
+              Destructor.
+          */
+          virtual ~EmptyLog(){};
+      
+      
+
+    protected:
+
+      /**! Write error message to an output device (file, stdio, etc.)
+      */
+      virtual void writeLogError(const char* szMessage){};
+
+      /**! Write warning message to an output device (file, stdio, etc.)
+      */
+      virtual void writeLogWarning(const char* szMessage){};
+
+      /**! Write trace message to an output device (file, stdio, etc.)
+      */
+      virtual void writeLogTrace(const char* szMessage) {};
+    
+  }; //class
+}//namespace
 
 
 

+ 1 - 0
FileLog.cpp

@@ -10,6 +10,7 @@
 #include <iostream>
 
 using namespace std;
+using namespace OPTIMIZATION;
 
 FileLog::FileLog( std::string file)
 {

+ 41 - 38
FileLog.h

@@ -10,46 +10,49 @@
 #include <fstream>
 #include "core/optimization/blackbox/OptLogBase.h"
 
-class FileLog : public OptLogBase
+namespace OPTIMIZATION
 {
 
-	public:
-	
-	/*!
-		default
-	*/
-	FileLog(){};
-
-	/*!
-		@param file string with the filename to log to
-	*/
-	//FileLog( string file);
-	FileLog( std::string file);
-
-	/*!
-		destructor
-	*/
-	virtual ~FileLog();
-
-	/**! Write error message to an output device (file, stdio, etc.)
-	 */
-	virtual void writeLogError(const char* szMessage);
-
-	/**! Write warning message to an output device (file, stdio, etc.)
-	 */
-	virtual void writeLogWarning(const char* szMessage);
-
-	/**! Write trace message to an output device (file, stdio, etc.)
-	 */
-	virtual void writeLogTrace(const char* szMessage);
-
-	private:
-	
-	std::ofstream m_stream;
-
-	void toFile(const char* szMessage);
-};
-
+  class FileLog : public OptLogBase
+  {
+
+    public:
+    
+    /*!
+      default
+    */
+    FileLog(){};
+
+    /*!
+      @param file string with the filename to log to
+    */
+    //FileLog( string file);
+    FileLog( std::string file);
+
+    /*!
+      destructor
+    */
+    virtual ~FileLog();
+
+    /**! Write error message to an output device (file, stdio, etc.)
+    */
+    virtual void writeLogError(const char* szMessage);
+
+    /**! Write warning message to an output device (file, stdio, etc.)
+    */
+    virtual void writeLogWarning(const char* szMessage);
+
+    /**! Write trace message to an output device (file, stdio, etc.)
+    */
+    virtual void writeLogTrace(const char* szMessage);
+
+    private:
+    
+    std::ofstream m_stream;
+
+    void toFile(const char* szMessage);
+  };//class
+}//namespace
 
 
 

+ 96 - 94
GoldenCutLineSearcher.cpp

@@ -8,46 +8,48 @@
 
 #include "optimization/GoldenCutLineSearcher.h"
 
+using namespace OPTIMIZATION;
+
 GoldenCutLineSearcher::GoldenCutLineSearcher() : m_fac_a(0.38196601125010515179541316563436),m_fac_b(0.61803398874989484820458683436564)
 {
-	m_c = 0.0;
-	m_d = 10.0;
-	m_eps = 0.1;
+  m_c = 0.0;
+  m_d = 10.0;
+  m_eps = 0.1;
 }
 
 GoldenCutLineSearcher::GoldenCutLineSearcher(CostFunction *costFunc,OptLogBase* loger) : SuperClass(costFunc,loger),m_fac_a(0.38196601125010515179541316563436),m_fac_b(0.61803398874989484820458683436564)
 {
-	m_c = 0.0;
-	m_d = 10.0;
-	m_eps = 0.1;
+  m_c = 0.0;
+  m_d = 10.0;
+  m_eps = 0.1;
 }
 
 GoldenCutLineSearcher::GoldenCutLineSearcher(const GoldenCutLineSearcher &lin) : SuperClass(lin),m_fac_a(0.38196601125010515179541316563436),m_fac_b(0.61803398874989484820458683436564)
 {
-	m_c = lin.m_c;
-	m_d = lin.m_d;
-	m_eps = lin.m_eps;
+  m_c = lin.m_c;
+  m_d = lin.m_d;
+  m_eps = lin.m_eps;
 }
 
 GoldenCutLineSearcher & GoldenCutLineSearcher::operator =(const GoldenCutLineSearcher &lin)
 {
-	/*
-			avoid self-copy
-	*/
-	if(this != &lin)
-	{
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(lin);
-
-		m_c = lin.m_c;
-		m_d = lin.m_d;
-		m_eps = lin.m_eps;
-	
-	}
-
-	return *this;
+  /*
+      avoid self-copy
+  */
+  if(this != &lin)
+  {
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(lin);
+
+    m_c = lin.m_c;
+    m_d = lin.m_d;
+    m_eps = lin.m_eps;
+  
+  }
+
+  return *this;
 
 }
 
@@ -57,82 +59,82 @@ GoldenCutLineSearcher::~GoldenCutLineSearcher()
 
 bool GoldenCutLineSearcher::setBounds(double c, double d)
 {
-	if (c >= d)
-	{
-		return false;
-	}
+  if (c >= d)
+  {
+    return false;
+  }
 
-	m_c = c;
-	m_d = d;
+  m_c = c;
+  m_d = d;
 
-	return true;
+  return true;
 }
 
 bool GoldenCutLineSearcher::setEps(double eps)
 {
-	if(eps < 0)
-	{
-		return false;
-	}
-	
-	m_eps = eps;
-	return true;
+  if(eps < 0)
+  {
+    return false;
+  }
+  
+  m_eps = eps;
+  return true;
 }
 
 double GoldenCutLineSearcher::optimize()
 {
-	double dk = m_d;
-	double ck = m_c;
-	double vk=0.0;
-	double wk=0.0;
-
-	bool vk_known = false;
-	bool wk_known = false;
-
-	double phivk=0.0;
-	double phiwk=0.0;
-
-	bool abort = false;
-
-	while(abort == false)
-	{
-		if((dk - ck) <= m_eps)
-		{
-			abort = true;
-			continue;
-		}
-
-		if(vk_known == false)
-		{
-			vk = ck + m_fac_a * (dk - ck);
-			phivk = evaluateSub(vk);
-		}
-		vk_known = false;
-
-		if(wk_known == false)
-		{
-			wk = ck + m_fac_b * (dk - ck);
-			phiwk = evaluateSub(wk);
-		}
-		wk_known = false;
-		
-		if(phivk < phiwk)
-		{
-			dk = wk;
-			wk = vk;
-			phiwk = phivk;
-			wk_known = true;
-		}
-		else
-		{
-			ck = vk;
-			vk = wk;
-			phivk = phiwk;
-			vk_known = true;
-		}
-
-		
-	}
-
-	return (ck + dk)/2;;
+  double dk = m_d;
+  double ck = m_c;
+  double vk=0.0;
+  double wk=0.0;
+
+  bool vk_known = false;
+  bool wk_known = false;
+
+  double phivk=0.0;
+  double phiwk=0.0;
+
+  bool abort = false;
+
+  while(abort == false)
+  {
+    if((dk - ck) <= m_eps)
+    {
+      abort = true;
+      continue;
+    }
+
+    if(vk_known == false)
+    {
+      vk = ck + m_fac_a * (dk - ck);
+      phivk = evaluateSub(vk);
+    }
+    vk_known = false;
+
+    if(wk_known == false)
+    {
+      wk = ck + m_fac_b * (dk - ck);
+      phiwk = evaluateSub(wk);
+    }
+    wk_known = false;
+    
+    if(phivk < phiwk)
+    {
+      dk = wk;
+      wk = vk;
+      phiwk = phivk;
+      wk_known = true;
+    }
+    else
+    {
+      ck = vk;
+      vk = wk;
+      phivk = phiwk;
+      vk_known = true;
+    }
+
+    
+  }
+
+  return (ck + dk)/2;;
 }

+ 87 - 83
GoldenCutLineSearcher.h

@@ -10,92 +10,96 @@
 #define _GOLDEN_CUT_LINE_SEARCHER_H_
 
 #include "optimization/LineSearcher.h"
-/*!
-	class for the golden cut line search
-
-   HowToUse:
-	
-	  * use setX0() to set the offset
-	  * use setH0()	to set the search direction
-	  * use setEps() to set the abort criterion
-	  * use setBounds() to set a different search bound for lambda than [0,10]
-	  * call init()
-	  * call optimize() (returns lambda)
- */
-class  GoldenCutLineSearcher : public LineSearcher
+
+namespace OPTIMIZATION
 {
-public:
-
-	typedef  LineSearcher	SuperClass;
-	typedef  SuperClass::matrix_type matrix_type;
-	
-
-	/*!
-		default constructor
-	*/
-	GoldenCutLineSearcher();
-
-	/*!
-		constructor
-	*/
-	GoldenCutLineSearcher(CostFunction *costFunc, OptLogBase *loger);
-
-	/*!
-		Copy constructor
-		\param opt .. optimizer to copy
-	*/
-	GoldenCutLineSearcher( const GoldenCutLineSearcher &lin);
-	
-	/*!
-		operator =
-	*/
-	GoldenCutLineSearcher & operator=(const GoldenCutLineSearcher &lin);
+  /*!
+    class for the golden cut line search
+
+    HowToUse:
+    
+      * use setX0() to set the offset
+      * use setH0()	to set the search direction
+      * use setEps() to set the abort criterion
+      * use setBounds() to set a different search bound for lambda than [0,10]
+      * call init()
+      * call optimize() (returns lambda)
+  */
+  class  GoldenCutLineSearcher : public LineSearcher
+  {
+  public:
+
+    typedef  LineSearcher	SuperClass;
+    typedef  SuperClass::matrix_type matrix_type;
+    
+
+    /*!
+      default constructor
+    */
+    GoldenCutLineSearcher();
+
+    /*!
+      constructor
+    */
+    GoldenCutLineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+    /*!
+      Copy constructor
+      \param opt .. optimizer to copy
+    */
+    GoldenCutLineSearcher( const GoldenCutLineSearcher &lin);
+    
+    /*!
+      operator =
+    */
+    GoldenCutLineSearcher & operator=(const GoldenCutLineSearcher &lin);
+
+      /*!
+      Destructor.
+      */
+      virtual ~GoldenCutLineSearcher();
 
     /*!
-		Destructor.
-     */
-    virtual ~GoldenCutLineSearcher();
-
-	/*!
-		set the bound parameters
-	*/
-	bool setBounds(double c, double d);
-
-	/*!
-		set the abort threshold
-	*/
-	bool setEps(double eps);
-
-	/*!
-		optimize
-		returns a double..
-	*/
-	double optimize();
-
-protected:
-
-	/*!
-		approximation of the golden cut ratio
-	*/
-	const double m_fac_a; // = 0.38196601125010515179541316563436;
-	
-	/*!
-		approximation of the golden cut ratio 
-	*/
-	const double m_fac_b; // = 0,61803398874989484820458683436564;
-
-	/*!
-		the bounds
-	*/
-	double m_c, m_d;
-
-	/*!
-		the abort criteria
-	*/
-	double m_eps;
-
-
-};
+      set the bound parameters
+    */
+    bool setBounds(double c, double d);
+
+    /*!
+      set the abort threshold
+    */
+    bool setEps(double eps);
+
+    /*!
+      optimize
+      returns a double..
+    */
+    double optimize();
+
+  protected:
+
+    /*!
+      approximation of the golden cut ratio
+    */
+    const double m_fac_a; // = 0.38196601125010515179541316563436;
+    
+    /*!
+      approximation of the golden cut ratio 
+    */
+    const double m_fac_b; // = 0,61803398874989484820458683436564;
+
+    /*!
+      the bounds
+    */
+    double m_c, m_d;
+
+    /*!
+      the abort criteria
+    */
+    double m_eps;
+
+
+  }; //class
+}//namespace
 
 #endif
 

+ 336 - 336
GradientDescentOptimizer.cpp

@@ -9,20 +9,20 @@
 
 #include "GradientDescentOptimizer.h"
 
-using namespace optimization;
+using namespace OPTIMIZATION;
 
 GradientDescentOptimizer::GradientDescentOptimizer(OptLogBase *loger)
-	: SuperClass(loger)
+  : SuperClass(loger)
 {
-	m_stepLength = -1;
+  m_stepLength = -1;
     m_MinimalGradientMagnitude = 1e-7;
 }
 
 
 GradientDescentOptimizer::GradientDescentOptimizer( const GradientDescentOptimizer &opt) : SuperClass(opt)
 {
-	m_stepSize = opt.m_stepSize;
-	m_stepLength = opt.m_stepLength;
+  m_stepSize = opt.m_stepSize;
+  m_stepLength = opt.m_stepLength;
     m_MinimalGradientMagnitude = opt.m_MinimalGradientMagnitude;
 }
 
@@ -32,217 +32,217 @@ GradientDescentOptimizer::~GradientDescentOptimizer()
 
 void GradientDescentOptimizer::setStepSize(const matrix_type & stepSize)
 {
-	m_stepSize = stepSize;
-	m_stepLength = -m_stepSize.MaxVal();
+  m_stepSize = stepSize;
+  m_stepLength = -m_stepSize.Max();
 }
 
 void GradientDescentOptimizer::init()
 {
-	SuperClass::init();
+  SuperClass::init();
 
-	if (m_stepSize.rows() != static_cast<int>(m_numberOfParameters))
-	{
-		m_stepSize = m_scales; 
+  if (m_stepSize.rows() != static_cast<int>(m_numberOfParameters))
+  {
+    m_stepSize = m_scales; 
 
         std::cout << "GradientDescentOptimizer::init(): warning: using optimizer scales as steps, since no steps were specified! Consider, if this is desired behavoir!" << std::endl;
 
-	}
-	else
-	{
-		// check for nonzero stepsize
-		bool tmp=false;
-		for(int i = 0; i < static_cast<int>(m_numberOfParameters); ++i)
-		{
-			if(m_stepSize[i][0] > 0 )
-			{
-				tmp=true;
-			}
-		}
-		if(tmp==false)
-		{
-			std::cerr <<  "GradientDescentOptimizer::init  all stepsizes zero - check your code!"<< std::endl;
-			exit(1);
-		}
-	}
-
-	
-	m_numIter = 0;
-
-	m_analyticalGradients = m_costFunction->hasAnalyticGradient();
-	
-	/*
-		FIXME: WACKER: compute first value and first gradient!
-	*/
-	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
-
-	m_gradient = (m_analyticalGradients == true && 
-				(m_costFunction->hasAnalyticGradient() == true) ) ?
-							getAnalyticalGradient(m_parameters) : 
-							getNumericalGradient(m_parameters, m_stepSize);
+  }
+  else
+  {
+    // check for nonzero stepsize
+    bool tmp=false;
+    for(int i = 0; i < static_cast<int>(m_numberOfParameters); ++i)
+    {
+      if(m_stepSize(i,0) > 0 )
+      {
+        tmp=true;
+      }
+    }
+    if(tmp==false)
+    {
+      std::cerr <<  "GradientDescentOptimizer::init  all stepsizes zero - check your code!"<< std::endl;
+      exit(1);
+    }
+  }
+
+  
+  m_numIter = 0;
+
+  m_analyticalGradients = m_costFunction->hasAnalyticGradient();
+  
+  /*
+    FIXME: WACKER: compute first value and first gradient!
+  */
+  m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+  m_gradient = (m_analyticalGradients == true && 
+        (m_costFunction->hasAnalyticGradient() == true) ) ?
+              getAnalyticalGradient(m_parameters) : 
+              getNumericalGradient(m_parameters, m_stepSize);
 
 
 }
 
 int GradientDescentOptimizer::optimize()
 {
- 	/*
-		call init
-	*/
-	init();
+  /*
+    call init
+  */
+  init();
 
 
-	if(m_loger)
-		m_loger->logTrace("GradientDescentOptimizer: starting optimization ... \n");
+  if(m_loger)
+    m_loger->logTrace("GradientDescentOptimizer: starting optimization ... \n");
 
 
 
-	/*
-		start time criteria
-	*/
-	m_startTime = clock();
+  /*
+    start time criteria
+  */
+  m_startTime = clock();
 
 
-	bool abort = false;
+  bool abort = false;
 //	double stepLength = -1.0;
-	double cosAngle = 0.0;
-	double downScaleFactor = 0.5;
-
-	matrix_type stepSize = m_stepSize;
-	double stepLength = m_stepLength;
-	
-	/*
-		check abort criteria for gradient
-	*/	
-	if(m_gradientTolActive)
-	{
-		if(m_gradient.Norm(0) < m_gradientTol){
-			m_returnReason = SUCCESS_GRADIENTTOL;
-			abort = true;
-
-			if(m_verbose == true)
-			{
-				std::cout << "# Gradient Descenct :: aborting because of GradientTol" << std::endl;
-			}
-		}
-	}	
-
-
-	/* do the optimization */
-	while(abort == false)
-	{
-		/*
-			increase iteration counter
-		*/
-		m_numIter++;
-		
-		if(m_verbose == true)
-		{
-			std::cout << "# Gradient Descenct :: Iteration: " << m_numIter << " : current parameters :\n ";
-			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-			{
-				std::cout<< m_parameters[r][0] << " ";
-			}
-			std::cout << m_currentCostFunctionValue << std::endl;
-
-			std::cout << " current gradient :\n ";
-			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+  double cosAngle = 0.0;
+  double downScaleFactor = 0.5;
+
+  matrix_type stepSize = m_stepSize;
+  double stepLength = m_stepLength;
+  
+  /*
+    check abort criteria for gradient
+  */	
+  if(m_gradientTolActive)
+  {
+    if(m_gradient.frobeniusNorm() < m_gradientTol){
+      m_returnReason = SUCCESS_GRADIENTTOL;
+      abort = true;
+
+      if(m_verbose == true)
+      {
+        std::cout << "# Gradient Descenct :: aborting because of GradientTol" << std::endl;
+      }
+    }
+  }	
+
+
+  /* do the optimization */
+  while(abort == false)
+  {
+    /*
+      increase iteration counter
+    */
+    m_numIter++;
+    
+    if(m_verbose == true)
+    {
+      std::cout << "# Gradient Descenct :: Iteration: " << m_numIter << " : current parameters :\n ";
+      for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+      {
+        std::cout<< m_parameters(r,0) << " ";
+      }
+      std::cout << m_currentCostFunctionValue << std::endl;
+
+      std::cout << " current gradient :\n ";
+      for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
             {
-                std::cout<< m_gradient[r][0] << " ";
+                std::cout<< m_gradient(r,0) << " ";
             }
             std::cout << std::endl;
 
             std::cout << " current stepsize :\n ";
             for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
             {
-                std::cout<< stepSize[r][0] << " ";
+                std::cout<< stepSize(r,0) << " ";
             }
             std::cout << std::endl;
 
-		}
-
-		/*
-			Check iteration limit
-		*/
-		if(m_maxNumIterActive)
-		{
-			if(m_numIter >= m_maxNumIter)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "# Gradient Descenct :: aborting because of numIter" << std::endl;
-				}
-
-
-				/* set according return status and return */
-				m_returnReason = SUCCESS_MAXITER;
-				abort = true;
-				continue;
-			}
-		}
-
-
-		/*
-			store last results for comparison
-		*/
-		matrix_type oldParams = m_parameters;
-		matrix_type oldGradient = m_gradient;
-		double oldFuncValue = m_currentCostFunctionValue;
-		
-
-		/*
-			get gradient
-		*/
-		m_gradient = (m_analyticalGradients == true && 
-				(m_costFunction->hasAnalyticGradient() == true) ) ?
-							getAnalyticalGradient(m_parameters) : 
-							getNumericalGradient(m_parameters, stepSize);
-
-		/*
-			weight with scalings
-		*/
-		//m_gradient = m_gradient.ElMul(m_scales);
-		for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
-		{
-			m_gradient[k][0] *= m_scales[k][0];
-		}
-
-		/*
-			check gradient tol
-		*/
-		if(m_gradientTolActive)
-		{
-			if(m_gradient.Norm(0) < m_gradientTol)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "# Gradient Descenct :: aborting because of GradientTol" << std::endl;
-				}
-
-				m_returnReason = SUCCESS_GRADIENTTOL;
-				abort = true;
-				continue;
-			}
-		}
-
-		/*
-			set gradient length to 1, if length of gradient is too small .. 
-			return ERROR_COMPUTATION_UNSTABLE
-			(this can happen if gradienTol is not active..)
-			FIXME: WACKER think about a "usefull" limit
+    }
+
+    /*
+      Check iteration limit
+    */
+    if(m_maxNumIterActive)
+    {
+      if(m_numIter >= m_maxNumIter)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "# Gradient Descenct :: aborting because of numIter" << std::endl;
+        }
+
+
+        /* set according return status and return */
+        m_returnReason = SUCCESS_MAXITER;
+        abort = true;
+        continue;
+      }
+    }
+
+
+    /*
+      store last results for comparison
+    */
+    matrix_type oldParams = m_parameters;
+    matrix_type oldGradient = m_gradient;
+    double oldFuncValue = m_currentCostFunctionValue;
+    
+
+    /*
+      get gradient
+    */
+    m_gradient = (m_analyticalGradients == true && 
+        (m_costFunction->hasAnalyticGradient() == true) ) ?
+              getAnalyticalGradient(m_parameters) : 
+              getNumericalGradient(m_parameters, stepSize);
+
+    /*
+      weight with scalings
+    */
+    //m_gradient = m_gradient.ElMul(m_scales);
+    for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
+    {
+      m_gradient(k,0) *= m_scales(k,0);
+    }
+
+    /*
+      check gradient tol
+    */
+    if(m_gradientTolActive)
+    {
+      if(m_gradient.frobeniusNorm() < m_gradientTol)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "# Gradient Descenct :: aborting because of GradientTol" << std::endl;
+        }
+
+        m_returnReason = SUCCESS_GRADIENTTOL;
+        abort = true;
+        continue;
+      }
+    }
+
+    /*
+      set gradient length to 1, if length of gradient is too small .. 
+      return ERROR_COMPUTATION_UNSTABLE
+      (this can happen if gradienTol is not active..)
+      FIXME: WACKER think about a "usefull" limit
                 ruehle: now adjustable via variable m_MinimalGradientMagnitude
             It considers a small gradient as having reached the local/global optimum, hello convex function...
-		*/
-        double fGradientLength = m_gradient.Norm(0);
+    */
+        double fGradientLength = m_gradient.frobeniusNorm();
         if (fGradientLength > m_MinimalGradientMagnitude)
-		{
-			for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
-			{
-                m_gradient[k][0] /= fGradientLength;
+    {
+      for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
+      {
+                m_gradient(k,0) /= fGradientLength;
 
-			}
-		}
-		else
-		{
+      }
+    }
+    else
+    {
 
             if(m_verbose == true)
             {
@@ -255,175 +255,175 @@ int GradientDescentOptimizer::optimize()
             /* set according return status and the last parameters and return */
             m_returnReason = SUCCESS_PARAMTOL;
 
-			abort =true;
-			continue;
-		}
-		
-		/*
-			check direction change to scale down for convergence!
-		*/
-		if(( !m_gradient * oldGradient)[0][0] < cosAngle)
-		{
-			/*
-				direction change is more than acos(cosAngle) deg.
-				scaledown by downScaleFactor
-			*/
-
-			for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
-                stepSize[k][0] *= downScaleFactor;
-
-			stepLength *= downScaleFactor;
-			/*FIXME: WACKER: only as long
-			as there is no steplength computation!*/
+      abort =true;
+      continue;
+    }
+    
+    /*
+      check direction change to scale down for convergence!
+    */
+    if(( !m_gradient * oldGradient)(0,0) < cosAngle)
+    {
+      /*
+        direction change is more than acos(cosAngle) deg.
+        scaledown by downScaleFactor
+      */
+
+      for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
+                stepSize(k,0) *= downScaleFactor;
+
+      stepLength *= downScaleFactor;
+      /*FIXME: WACKER: only as long
+      as there is no steplength computation!*/
 
             if(m_verbose == true)
             {
                 std::cout << "# Gradient Descenct :: direction change detected ->perfoming scaledown" << std::endl;
             }
-		}
-		
-				
-		/*
-			set next iteration step
-		*/
+    }
+    
+        
+    /*
+      set next iteration step
+    */
         //weight the stepSize for the next grid search by the gradient;
         //FIXME: using this thought destroys convergence...somehow..
         //     for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
-        //         stepSize[k][0] = stepSize[k][0] * m_gradient[k][0];
+        //         stepSize(k,0) = stepSize(k,0) * m_gradient(k,0);
 
         //old but silly version:
         // m_parameters = m_parameters + m_gradient * stepLength ;
         //new version where each gradient is weighted by the dimensions individual step size (not one fits them all, as before)
         for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
-            m_parameters[k][0] = m_parameters[k][0] - stepSize[k][0] * m_gradient[k][0];
-
-		/*
-			Check if it is in bounds, paramTol, funcTol, NumIter, gradienttol, maxSeconds
-		*/
-		if(m_lowerParameterBoundActive || m_upperParameterBoundActive)
-		{
-			for(int i=0; i <static_cast<int>(m_numberOfParameters); i++)
-			{
-				if( m_upperParameterBoundActive)
-				{
-					if(m_parameters[i][0] >= m_upperParameterBound[i][0])
-					{
-						if(m_verbose == true)
-						{
-							std::cout << "# Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
-						}
-
-						/* set according return status and the last parameters and return */
-						m_returnReason = ERROR_XOUTOFBOUNDS;
-						m_parameters = oldParams;
-						abort = true;
-						continue;
-					}
-				}
-				if( m_lowerParameterBoundActive)
-				{
-
-					if(m_parameters[i][0] <= m_lowerParameterBound[i][0])
-					{
-						if(m_verbose == true)
-						{
-							std::cout << "# Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
-						}
-
-						/* set according return status and return the last parameter*/
-						m_returnReason = ERROR_XOUTOFBOUNDS;
-						m_parameters = oldParams;
-						abort = true;
-						continue;
-					}
-				}
-			}
-		}
-
-		/*
-			Get new Costfunction Value
-		*/
-		m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
-
-		/*
-			Check ParamTol
-		*/
-		if(m_paramTolActive)
-		{
-			if( (m_parameters - oldParams).Norm(0) < m_paramTol)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "Gradient Descenct :: aborting because of param Tol" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_PARAMTOL;
-				/*m_parameters = oldParams;*/
-				abort = true;
-				continue;
-			}
-
-		}
-	
-		if(m_funcTolActive)
-		{
-			if( fabs((m_currentCostFunctionValue - oldFuncValue)) < m_funcTol)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "# Gradient Descenct :: aborting because of Func Tol" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_FUNCTOL;
-				/* m_parameters = oldParams; */
-				abort = true;
-				continue;
-			}
-		}
-
-		/*
-			Check Optimization Timilimit, if active
-		*/
-		if(m_maxSecondsActive)
-		{
-			m_currentTime = clock();
-		
-			//std::cout << (_startTime - _currentTime) << std::endl;
-			//std::cout << CLOCKS_PER_SEC << std::endl;
-			/* time limit exceeded ? */
-			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
-			{
-				/*
-					
-				*/
-				if(m_verbose == true)
-				{
-					std::cout << "# Gradient Descenct :: aborting because of Time Limit" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_TIMELIMIT;
-				m_parameters = oldParams;
-				abort = true;
-				continue;
-			}
-		}
-
-	}
-
-	if(m_verbose)
-	{
-		std::cout << "# Gradient Descenct :: RESULT: ";
-		for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-		{
-			std::cout<< m_parameters[r][0] << " ";
-		}
-		std::cout << "    value:   " << m_currentCostFunctionValue  << std::endl;
-	}
-
-	return m_returnReason;
+            m_parameters(k,0) = m_parameters(k,0) - stepSize(k,0) * m_gradient(k,0);
+
+    /*
+      Check if it is in bounds, paramTol, funcTol, NumIter, gradienttol, maxSeconds
+    */
+    if(m_lowerParameterBoundActive || m_upperParameterBoundActive)
+    {
+      for(int i=0; i <static_cast<int>(m_numberOfParameters); i++)
+      {
+        if( m_upperParameterBoundActive)
+        {
+          if(m_parameters(i,0) >= m_upperParameterBound(i,0))
+          {
+            if(m_verbose == true)
+            {
+              std::cout << "# Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
+            }
+
+            /* set according return status and the last parameters and return */
+            m_returnReason = ERROR_XOUTOFBOUNDS;
+            m_parameters = oldParams;
+            abort = true;
+            continue;
+          }
+        }
+        if( m_lowerParameterBoundActive)
+        {
+
+          if(m_parameters(i,0) <= m_lowerParameterBound(i,0))
+          {
+            if(m_verbose == true)
+            {
+              std::cout << "# Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
+            }
+
+            /* set according return status and return the last parameter*/
+            m_returnReason = ERROR_XOUTOFBOUNDS;
+            m_parameters = oldParams;
+            abort = true;
+            continue;
+          }
+        }
+      }
+    }
+
+    /*
+      Get new Costfunction Value
+    */
+    m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+    /*
+      Check ParamTol
+    */
+    if(m_paramTolActive)
+    {
+      if( (m_parameters - oldParams).frobeniusNorm() < m_paramTol)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "Gradient Descenct :: aborting because of param Tol" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_PARAMTOL;
+        /*m_parameters = oldParams;*/
+        abort = true;
+        continue;
+      }
+
+    }
+  
+    if(m_funcTolActive)
+    {
+      if( fabs((m_currentCostFunctionValue - oldFuncValue)) < m_funcTol)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "# Gradient Descenct :: aborting because of Func Tol" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_FUNCTOL;
+        /* m_parameters = oldParams; */
+        abort = true;
+        continue;
+      }
+    }
+
+    /*
+      Check Optimization Timilimit, if active
+    */
+    if(m_maxSecondsActive)
+    {
+      m_currentTime = clock();
+    
+      //std::cout << (_startTime - _currentTime) << std::endl;
+      //std::cout << CLOCKS_PER_SEC << std::endl;
+      /* time limit exceeded ? */
+      if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+      {
+        /*
+          
+        */
+        if(m_verbose == true)
+        {
+          std::cout << "# Gradient Descenct :: aborting because of Time Limit" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_TIMELIMIT;
+        m_parameters = oldParams;
+        abort = true;
+        continue;
+      }
+    }
+
+  }
+
+  if(m_verbose)
+  {
+    std::cout << "# Gradient Descenct :: RESULT: ";
+    for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+    {
+      std::cout<< m_parameters(r,0) << " ";
+    }
+    std::cout << "    value:   " << m_currentCostFunctionValue  << std::endl;
+  }
+
+  return m_returnReason;
 
 }
 

+ 102 - 99
GradientDescentOptimizer.h

@@ -12,105 +12,108 @@
 #include <cmath>
 #include "optimization/DerivativeBasedOptimizer.h"
 
-///
-///	Class GradientDescentOptimizer
-///
-///  HowToUse:
-///	
-///	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
-///	  * use setParameters() to set the start point
-///	  * call init()
-///	  * call optimize()
-///	  
-///	   
-///	  
-///  Implemented Abort criteria:
-///	  		
-///	* maximum number of iterations
-///	* time limit
-///	* parameter bounds
-///	* function value tolerance
-/// 	* parameter tolerance
-///	* gradient tolerance
-///  
-///	Additional return reason:
-///
-///	* ERROR_COMPUTATION_UNSTABLE
-///
-/// GradientDescent supports the 'scales' feature
-class GradientDescentOptimizer : public DerivativeBasedOptimizer
+namespace OPTIMIZATION
 {
-	public:
-
-		typedef  DerivativeBasedOptimizer SuperClass;
-
-		///
-		///	Constructor.
-		///	\param loger : OptLogBase * to existing log class
-		///
-		GradientDescentOptimizer(OptLogBase *loger=NULL);
-
-		///
-		///	Copy constructor
-		///	\param opt .. optimizer to copy
-		///
-		GradientDescentOptimizer( const GradientDescentOptimizer &opt);
-
-	        ///
-		///	Destructor.
-		///
-		~GradientDescentOptimizer();
-
-
-		///
-		///	\brief Set the initial step size
-		///	The initial stepsize is used to give the optimizer an initial value for the order of 
-		///	magnitude to start with.
-		///	(e.g. step 100000 in x direction or 0.01 ?)
-		///	\param stepSize with the step size for the i-th dimension in the i-th position.
-		///
-		void setStepSize(const optimization::matrix_type & stepSize);
-
-		///
-		///	Get the actual step size
-		///	\return vector<double> with the actual step sizes
-		///
-		inline const optimization::matrix_type & getStepSize(){return m_stepSize;};
-
-
-		///
-		///	do internal initializations		
-		///
-		void init();
-
-
-		///
-		///	start the optimization
-		///	\return the return status that can be found in the corresponding enumeration
-		///
-		int optimize();
-
-		inline void setStepLength(double stepLength){m_stepLength=stepLength;}
-
-        inline void setMinimalGradientMagnitude(double minGradientMag){m_MinimalGradientMagnitude=minGradientMag;}
-	private:
-
-		///
-		///	step size vector
-		///
-		optimization::matrix_type m_stepSize;
-		
-		///
-		/// .. steplength
-		///
-		double m_stepLength;
-
-        ///
-        ///	Minimal threshold for the L2-Norm of the gradient, so that the gradient descent
-        /// is aborted.
-        ///
-        double m_MinimalGradientMagnitude;
-		
-};
+  ///
+  ///	Class GradientDescentOptimizer
+  ///
+  ///  HowToUse:
+  ///	
+  ///	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
+  ///	  * use setParameters() to set the start point
+  ///	  * call init()
+  ///	  * call optimize()
+  ///	  
+  ///	   
+  ///	  
+  ///  Implemented Abort criteria:
+  ///	  		
+  ///	* maximum number of iterations
+  ///	* time limit
+  ///	* parameter bounds
+  ///	* function value tolerance
+  /// 	* parameter tolerance
+  ///	* gradient tolerance
+  ///  
+  ///	Additional return reason:
+  ///
+  ///	* ERROR_COMPUTATION_UNSTABLE
+  ///
+  /// GradientDescent supports the 'scales' feature
+  class GradientDescentOptimizer : public DerivativeBasedOptimizer
+  {
+    public:
+
+      typedef  DerivativeBasedOptimizer SuperClass;
+
+      ///
+      ///	Constructor.
+      ///	\param loger : OptLogBase * to existing log class
+      ///
+      GradientDescentOptimizer(OptLogBase *loger=NULL);
+
+      ///
+      ///	Copy constructor
+      ///	\param opt .. optimizer to copy
+      ///
+      GradientDescentOptimizer( const GradientDescentOptimizer &opt);
+
+            ///
+      ///	Destructor.
+      ///
+      ~GradientDescentOptimizer();
+
+
+      ///
+      ///	\brief Set the initial step size
+      ///	The initial stepsize is used to give the optimizer an initial value for the order of 
+      ///	magnitude to start with.
+      ///	(e.g. step 100000 in x direction or 0.01 ?)
+      ///	\param stepSize with the step size for the i-th dimension in the i-th position.
+      ///
+      void setStepSize(const OPTIMIZATION::matrix_type & stepSize);
+
+      ///
+      ///	Get the actual step size
+      ///	\return vector<double> with the actual step sizes
+      ///
+      inline const OPTIMIZATION::matrix_type & getStepSize(){return m_stepSize;};
+
+
+      ///
+      ///	do internal initializations		
+      ///
+      void init();
+
+
+      ///
+      ///	start the optimization
+      ///	\return the return status that can be found in the corresponding enumeration
+      ///
+      int optimize();
+
+      inline void setStepLength(double stepLength){m_stepLength=stepLength;}
+
+          inline void setMinimalGradientMagnitude(double minGradientMag){m_MinimalGradientMagnitude=minGradientMag;}
+    private:
+
+      ///
+      ///	step size vector
+      ///
+      OPTIMIZATION::matrix_type m_stepSize;
+      
+      ///
+      /// .. steplength
+      ///
+      double m_stepLength;
+
+          ///
+          ///	Minimal threshold for the L2-Norm of the gradient, so that the gradient descent
+          /// is aborted.
+          ///
+          double m_MinimalGradientMagnitude;
+      
+  };//class
+}//namespace
 
 #endif

+ 158 - 158
LineSearcher.cpp

@@ -9,68 +9,68 @@
 #include "optimization/LineSearcher.h"
 #include <iostream>
 
-using namespace optimization;
+using namespace OPTIMIZATION;
 
 LineSearcher::LineSearcher()
 {
-	m_maximize = false;
-	m_pEq = NULL;
-	m_numEq = 0;
-	m_pIneq = NULL;
-	m_numIneq = 0;
-	m_PP = 1000.0;
-	m_verbose= false;
+  m_maximize = false;
+  m_pEq = NULL;
+  m_numEq = 0;
+  m_pIneq = NULL;
+  m_numIneq = 0;
+  m_PP = 1000.0;
+  m_verbose= false;
 }
 
 LineSearcher::LineSearcher(CostFunction *costFunc,OptLogBase* loger) : m_pFunc(costFunc), m_pLoger(loger)
 {
-	m_maximize = false;
-	m_pEq = NULL;
-	m_numEq = 0;
-	m_pIneq = NULL;
-	m_numIneq = 0;
-	m_PP = 10.0;
-	m_verbose= false;
+  m_maximize = false;
+  m_pEq = NULL;
+  m_numEq = 0;
+  m_pIneq = NULL;
+  m_numIneq = 0;
+  m_PP = 10.0;
+  m_verbose= false;
 }
 
 LineSearcher::LineSearcher(const LineSearcher &lin)
 {
-	m_pFunc		= lin.m_pFunc;
-	m_pLoger	= lin.m_pLoger;
-	m_maximize	= lin.m_maximize;
-	m_pEq		= lin.m_pEq;
-	m_numEq		= lin.m_numEq;
-	m_pIneq		= lin.m_pIneq;
-	m_numIneq	= lin.m_numIneq;
-	m_PP		= lin.m_PP;
-	m_verbose	= lin.m_verbose;
+  m_pFunc		= lin.m_pFunc;
+  m_pLoger	= lin.m_pLoger;
+  m_maximize	= lin.m_maximize;
+  m_pEq		= lin.m_pEq;
+  m_numEq		= lin.m_numEq;
+  m_pIneq		= lin.m_pIneq;
+  m_numIneq	= lin.m_numIneq;
+  m_PP		= lin.m_PP;
+  m_verbose	= lin.m_verbose;
 }
 
 LineSearcher & LineSearcher::operator =(const LineSearcher &lin)
 {
-	/*
-			avoid self-copy
-	*/
-	if(this != &lin)
-	{
-		/*
-			=operator of SuperClass
-		*/
-		//SuperClass::operator=(opt);
-		
-		m_pFunc = lin.m_pFunc;
-		m_pLoger = lin.m_pLoger;
-		m_maximize = lin.m_maximize;
-		m_pEq		= lin.m_pEq;
-		m_numEq		= lin.m_numEq;
-		m_pIneq		= lin.m_pIneq;
-		m_numIneq	= lin.m_numIneq;
-		m_PP		= lin.m_PP;
-		m_verbose	= lin.m_verbose;
-
-	}
-
-	return *this;
+  /*
+      avoid self-copy
+  */
+  if(this != &lin)
+  {
+    /*
+      =operator of SuperClass
+    */
+    //SuperClass::operator=(opt);
+    
+    m_pFunc = lin.m_pFunc;
+    m_pLoger = lin.m_pLoger;
+    m_maximize = lin.m_maximize;
+    m_pEq		= lin.m_pEq;
+    m_numEq		= lin.m_numEq;
+    m_pIneq		= lin.m_pIneq;
+    m_numIneq	= lin.m_numIneq;
+    m_PP		= lin.m_PP;
+    m_verbose	= lin.m_verbose;
+
+  }
+
+  return *this;
 
 }
 
@@ -81,134 +81,134 @@ LineSearcher::~LineSearcher()
 
 bool LineSearcher::setH0(const matrix_type &H0)
 {
-	bool tmp = true;
-	if(m_pFunc)
-	{
-		if(m_pFunc->setH0(H0) == false)
-		{
-			tmp = false;
-		}
-	}
-	if(m_pEq)
-	{
-		if(m_pEq->setH0(H0) == false)
-		{
-			tmp = false;
-		}
-	}
-	if(m_pIneq)
-	{
-		if(m_pIneq->setH0(H0) == false)
-		{
-			tmp = false;
-		}
-	}
-
-	return tmp;
+  bool tmp = true;
+  if(m_pFunc)
+  {
+    if(m_pFunc->setH0(H0) == false)
+    {
+      tmp = false;
+    }
+  }
+  if(m_pEq)
+  {
+    if(m_pEq->setH0(H0) == false)
+    {
+      tmp = false;
+    }
+  }
+  if(m_pIneq)
+  {
+    if(m_pIneq->setH0(H0) == false)
+    {
+      tmp = false;
+    }
+  }
+
+  return tmp;
 
 
 }
 
 bool LineSearcher::setX0(const matrix_type &x0)
 {
-	bool tmp = true;
-	if(m_pFunc)
-	{
-		if(m_pFunc->setX0(x0) == false)
-		{
-			tmp = false;
-		}
-	}
-	if(m_pEq)
-	{
-		if(m_pEq->setX0(x0) == false)
-		{
-			tmp = false;
-		}
-	}
-	if(m_pIneq)
-	{
-		if(m_pIneq->setX0(x0) == false)
-		{
-			tmp = false;
-		}
-	}
-
-	return tmp;
-	
+  bool tmp = true;
+  if(m_pFunc)
+  {
+    if(m_pFunc->setX0(x0) == false)
+    {
+      tmp = false;
+    }
+  }
+  if(m_pEq)
+  {
+    if(m_pEq->setX0(x0) == false)
+    {
+      tmp = false;
+    }
+  }
+  if(m_pIneq)
+  {
+    if(m_pIneq->setX0(x0) == false)
+    {
+      tmp = false;
+    }
+  }
+
+  return tmp;
+  
 }
 
 bool LineSearcher::setConstraints(InequalityConstraints *ineq,EqualityConstraints *eq)
 {
-	
-	if(ineq)
-	{
-		if(ineq->getNumOfParameters() != m_pFunc->getNumOfParameters() )
-		{
-			return false;
-		}
-		m_numIneq = ineq->getNumOfConstraints();
-		m_pIneq = ineq;
-	}
-	else
-	{
-		m_numIneq = 0;
-	}
-
-	if(eq)
-	{
-		if(eq->getNumOfParameters() != m_pFunc->getNumOfParameters() )
-		{
-			return false;
-		}
-		m_numEq = eq->getNumOfConstraints();
-		m_pEq = eq;
-	}
-	else
-	{
-		m_numEq = 0;
-	}
-
-
-	return true;
+  
+  if(ineq)
+  {
+    if(ineq->getNumOfParameters() != m_pFunc->getNumOfParameters() )
+    {
+      return false;
+    }
+    m_numIneq = ineq->getNumOfConstraints();
+    m_pIneq = ineq;
+  }
+  else
+  {
+    m_numIneq = 0;
+  }
+
+  if(eq)
+  {
+    if(eq->getNumOfParameters() != m_pFunc->getNumOfParameters() )
+    {
+      return false;
+    }
+    m_numEq = eq->getNumOfConstraints();
+    m_pEq = eq;
+  }
+  else
+  {
+    m_numEq = 0;
+  }
+
+
+  return true;
 
 }
 
 bool LineSearcher::setPP(double pp)
 { 
-	if( pp> 0.0)
-	{
-		m_PP = pp;
-		return true;
-	}
-	else
-	{
-		return false;
-	}
+  if( pp> 0.0)
+  {
+    m_PP = pp;
+    return true;
+  }
+  else
+  {
+    return false;
+  }
 }
 
 double LineSearcher::evaluateSub(double lambda)
 {
-	double val = 0;
-
-	val = (m_maximize == true ? (-1.0 * m_pFunc->evaluateSub(lambda)): (m_pFunc->evaluateSub(lambda)));
-	if(m_numEq)
-	{
-		matrix_type tmpval = m_pEq->evaluateSub(lambda);
-		for(int j = 0; j<static_cast<int>(m_numEq);j++)
-		{
-			val += m_PP * (m_maximize == true ? (-1.0 * (1+(tmpval[j][0]* tmpval[j][0]))) : (1+ (tmpval[j][0]* tmpval[j][0])));
-		}
-	}
-	if(m_numIneq)
-	{
-		matrix_type tmpval = m_pIneq->evaluateSub(lambda);
-		for(int j = 0; j<static_cast<int>(m_numEq);j++)
-		{
-			val += m_PP * (m_maximize == true ? (-1.0 * (tmpval[j][0] > 0.0 ?(1 + tmpval[j][0] * tmpval[j][0]) : 0.0))
-											  :         (tmpval[j][0]) > 0.0 ?(1+ tmpval[j][0] * tmpval[j][0]) : 0.0);
-		}
-	}
-
-	return val;
+  double val = 0;
+
+  val = (m_maximize == true ? (-1.0 * m_pFunc->evaluateSub(lambda)): (m_pFunc->evaluateSub(lambda)));
+  if(m_numEq)
+  {
+    matrix_type tmpval = m_pEq->evaluateSub(lambda);
+    for(int j = 0; j<static_cast<int>(m_numEq);j++)
+    {
+      val += m_PP * (m_maximize == true ? (-1.0 * (1+(tmpval(j,0)* tmpval(j,0)))) : (1+ (tmpval(j,0)* tmpval(j,0))));
+    }
+  }
+  if(m_numIneq)
+  {
+    matrix_type tmpval = m_pIneq->evaluateSub(lambda);
+    for(int j = 0; j<static_cast<int>(m_numEq);j++)
+    {
+      val += m_PP * (m_maximize == true ? (-1.0 * (tmpval(j,0) > 0.0 ?(1 + tmpval(j,0) * tmpval(j,0)) : 0.0))
+                        :         (tmpval(j,0)) > 0.0 ?(1+ tmpval(j,0) * tmpval(j,0)) : 0.0);
+    }
+  }
+
+  return val;
 }

+ 128 - 124
LineSearcher.h

@@ -14,134 +14,138 @@
 
 #include "core/optimization/blackbox/CostFunction.h"
 #include "core/optimization/blackbox/OptLogBase.h"
-#include "optimization/Opt_Namespace.h"
+#include "core/optimization/blackbox/Definitions_core_opt.h"
 #include "optimization/InequalityConstraints.h"
 #include "optimization/EqualityConstraints.h"
 
-/*!
-	Abstract base class of all line search optimizer.
-*/
-class LineSearcher
+namespace OPTIMIZATION
 {
-public:
-	typedef optimization::matrix_type matrix_type;
-
-	/*!
-		default constructor
-	*/
-	LineSearcher();
-
-	/*!
-		constructor
-	*/
-	LineSearcher(CostFunction *costFunc, OptLogBase *loger);
-
-	/*!
-		Copy constructor
-		\param opt .. optimizer to copy
-	*/
-	LineSearcher( const LineSearcher &lin);
-	
-	/*!
-		operator =
-	*/
-	LineSearcher & operator=(const LineSearcher &lin);
-
-    /*!
-		Destructor.
-     */
-    virtual ~LineSearcher();
-
-	/*!
-		search maximum?
-	*/
-	inline void setMaximize(bool maximize){m_maximize = maximize;};
-
-	/*!
-		set x0
-	*/
-	bool setX0(const matrix_type & x0);
-
-	/*!
-		set a direction
-	*/
-	bool setH0(const matrix_type & H0);
-
-	/*!
-		optimize
-		returns a double..
-	*/
-	virtual double optimize() = 0;
-	
-	/*!
-		set Constraints for penalty terms
-		\returns true in case of success
-	*/
-	bool setConstraints(InequalityConstraints *ineq,EqualityConstraints *eq);
-
-
-	/*!
-		evaluate penalized 1d function
-	*/
-	double evaluateSub(double lambda);
-
-	/*!
-		penalty parameter	
-	*/
-	bool setPP(double pp);
-
-	/*!
-		set vebose to true to get more information
-	*/
-	inline void setVerbose(bool verb){m_verbose = verb;};	
-
-protected:
-
-	/*!
-		maximization?
-	*/
-	bool m_maximize;
-	
-	/*!
-		the costfunction
-	*/
-	CostFunction *m_pFunc;
-
-	/*!
-		number of Inequality Constraints 
-	*/
-	unsigned int		   m_numIneq;
-
-	/*!
-		Inequality Constraints 
-	*/
-	InequalityConstraints *m_pIneq;
-
-	/*!
-		number of Equality Constraints 
-	*/
-	unsigned int		 m_numEq;
-
-	/*!
-		Equality Constraints 
-	*/
-	EqualityConstraints *m_pEq;
-
-	/*
-		penalty parameter
-	*/
-	double m_PP;
-	
-	/*!
-		Point to a loger
-	*/
-	OptLogBase *m_pLoger;
-
-	/*!
-		verbose?
-	*/
-	bool m_verbose;
-};
+
+  /*!
+    Abstract base class of all line search optimizer.
+  */
+  class LineSearcher
+  {
+  public:
+    typedef OPTIMIZATION::matrix_type matrix_type;
+
+    /*!
+      default constructor
+    */
+    LineSearcher();
+
+    /*!
+      constructor
+    */
+    LineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+    /*!
+      Copy constructor
+      \param opt .. optimizer to copy
+    */
+    LineSearcher( const LineSearcher &lin);
+    
+    /*!
+      operator =
+    */
+    LineSearcher & operator=(const LineSearcher &lin);
+
+      /*!
+      Destructor.
+      */
+      virtual ~LineSearcher();
+
+    /*!
+      search maximum?
+    */
+    inline void setMaximize(bool maximize){m_maximize = maximize;};
+
+    /*!
+      set x0
+    */
+    bool setX0(const matrix_type & x0);
+
+    /*!
+      set a direction
+    */
+    bool setH0(const matrix_type & H0);
+
+    /*!
+      optimize
+      returns a double..
+    */
+    virtual double optimize() = 0;
+    
+    /*!
+      set Constraints for penalty terms
+      \returns true in case of success
+    */
+    bool setConstraints(InequalityConstraints *ineq,EqualityConstraints *eq);
+
+
+    /*!
+      evaluate penalized 1d function
+    */
+    double evaluateSub(double lambda);
+
+    /*!
+      penalty parameter	
+    */
+    bool setPP(double pp);
+
+    /*!
+      set vebose to true to get more information
+    */
+    inline void setVerbose(bool verb){m_verbose = verb;};	
+
+  protected:
+
+    /*!
+      maximization?
+    */
+    bool m_maximize;
+    
+    /*!
+      the costfunction
+    */
+    CostFunction *m_pFunc;
+
+    /*!
+      number of Inequality Constraints 
+    */
+    unsigned int		   m_numIneq;
+
+    /*!
+      Inequality Constraints 
+    */
+    InequalityConstraints *m_pIneq;
+
+    /*!
+      number of Equality Constraints 
+    */
+    unsigned int		 m_numEq;
+
+    /*!
+      Equality Constraints 
+    */
+    EqualityConstraints *m_pEq;
+
+    /*
+      penalty parameter
+    */
+    double m_PP;
+    
+    /*!
+      Point to a loger
+    */
+    OptLogBase *m_pLoger;
+
+    /*!
+      verbose?
+    */
+    bool m_verbose;
+  };//class
+}//namespace
 
 #endif
 

+ 448 - 444
MatrixIterativeOptimizer.cpp

@@ -12,16 +12,17 @@
 #include "optimization/AdditionalIceUtils.h" // for linsolve
 #include "core/optimization/blackbox/DownhillSimplexOptimizer.h" //FIXME WACKER: INCLUDE DOWNHILL SIMPLEX FOR THE GET RANK FUNCTION -> EXPORT IT 
 
+using namespace OPTIMIZATION;
 
 MatrixIterativeOptimizer::MatrixIterativeOptimizer(OptLogBase *loger) : SuperClass(loger)
 {
-	m_IterationMatrix	= matrix_type(m_numberOfParameters,m_numberOfParameters);
-	m_hk				= matrix_type(m_numberOfParameters,1);
-	m_stepSize			= matrix_type(m_numberOfParameters,1);
-	m_oldGradient		= matrix_type(m_numberOfParameters,1);
-	m_oldParams			= matrix_type(m_numberOfParameters,1);
+  m_IterationMatrix	= matrix_type(m_numberOfParameters,m_numberOfParameters);
+  m_hk				= matrix_type(m_numberOfParameters,1);
+  m_stepSize			= matrix_type(m_numberOfParameters,1);
+  m_oldGradient		= matrix_type(m_numberOfParameters,1);
+  m_oldParams			= matrix_type(m_numberOfParameters,1);
 
-	m_lambdak			= 1.0;
+  m_lambdak			= 1.0;
 
 }
 
@@ -29,13 +30,13 @@ MatrixIterativeOptimizer::MatrixIterativeOptimizer(OptLogBase *loger) : SuperCla
 MatrixIterativeOptimizer::MatrixIterativeOptimizer( const MatrixIterativeOptimizer &opt) : SuperClass(opt)
 {
 
-	m_IterationMatrix	= opt.m_IterationMatrix;
-	m_hk				= opt.m_hk;
-	m_lin				= opt.m_lin;
-	m_stepSize			= opt.m_stepSize;
-	m_lambdak			= opt.m_lambdak;
-	m_oldParams			= opt.m_oldParams;
-	m_oldGradient		= opt.m_oldGradient;
+  m_IterationMatrix	= opt.m_IterationMatrix;
+  m_hk				= opt.m_hk;
+  m_lin				= opt.m_lin;
+  m_stepSize			= opt.m_stepSize;
+  m_lambdak			= opt.m_lambdak;
+  m_oldParams			= opt.m_oldParams;
+  m_oldGradient		= opt.m_oldGradient;
 
 }
 
@@ -48,458 +49,461 @@ MatrixIterativeOptimizer::~MatrixIterativeOptimizer()
 
 void MatrixIterativeOptimizer::init()
 {
-	if(m_loger)
-		m_loger->logTrace("entering MatrixIterativeOptimizer:: init ...  \n");
-
-	SuperClass::init();
-
-	m_lin= ArmijoLineSearcher(m_costFunction,m_loger);
-
-	//if(m_verbose==true) //FIXME commented out as output is not human readable
-	//	m_lin.setVerbose(true);
-	
-	if(m_maximize == true)
-	{
-		m_lin.setMaximize(true);
-	}
-
-	m_IterationMatrix=matrix_type(m_numberOfParameters,m_numberOfParameters);
-	for(int i =0;i < static_cast<int>(m_numberOfParameters); i++)
-	{
-		m_IterationMatrix[i][i] = 1.0;
-	}
-
-	
-	if(m_loger)
-		m_loger->logTrace("leaving MatrixIterativeOptimizer:: init ...\n");
+  if(m_loger)
+    m_loger->logTrace("entering MatrixIterativeOptimizer:: init ...  \n");
+
+  SuperClass::init();
+
+  m_lin= ArmijoLineSearcher(m_costFunction,m_loger);
+
+  //if(m_verbose==true) //FIXME commented out as output is not human readable
+  //	m_lin.setVerbose(true);
+  
+  if(m_maximize == true)
+  {
+    m_lin.setMaximize(true);
+  }
+
+  m_IterationMatrix=matrix_type(m_numberOfParameters,m_numberOfParameters);
+  for(int i =0;i < static_cast<int>(m_numberOfParameters); i++)
+  {
+    m_IterationMatrix(i,i) = 1.0;
+  }
+
+  
+  if(m_loger)
+    m_loger->logTrace("leaving MatrixIterativeOptimizer:: init ...\n");
 }
 
 
 MatrixIterativeOptimizer & MatrixIterativeOptimizer::operator=(const MatrixIterativeOptimizer &opt)
 {
 
-	/*
-			avoid self-copy
-	*/
-	if(this != &opt)
-	{
-		
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(opt);
-
-		/*
-			own values:
-		*/
-		m_IterationMatrix	= opt.m_IterationMatrix;
-		m_hk				= opt.m_hk;
-		m_lin				= opt.m_lin;
-		m_stepSize			= opt.m_stepSize;
-		m_lambdak			= opt.m_lambdak;
-		m_oldParams			= opt.m_oldParams;
-		m_oldGradient		= opt.m_oldGradient;
-	}
-
-  	return *this;
+  /*
+      avoid self-copy
+  */
+  if(this != &opt)
+  {
+    
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(opt);
+
+    /*
+      own values:
+    */
+    m_IterationMatrix	= opt.m_IterationMatrix;
+    m_hk				= opt.m_hk;
+    m_lin				= opt.m_lin;
+    m_stepSize			= opt.m_stepSize;
+    m_lambdak			= opt.m_lambdak;
+    m_oldParams			= opt.m_oldParams;
+    m_oldGradient		= opt.m_oldGradient;
+  }
+
+    return *this;
 }
 
 void MatrixIterativeOptimizer::computeDescentDirection()
 {
-	if(m_loger)
-		m_loger->logTrace("entering MatrixIterativeOptimizer::computeDescentDirection ... \n");
-	
-	if(m_numberOfParameters == 1)
-	{
-		if(fabs(m_IterationMatrix[0][0]) >= 1.0e-20 )
-		{
-			m_hk[0][0] = -m_gradient[0][0] / m_IterationMatrix[0][0];
-		}
-		else
-		{
-			m_hk = -m_gradient;
-		}
-	}
-	else
-	{
-		
-		//if(getRank(m_IterationMatrix, 1.0e-5 ) == m_numberOfParameters)
-		if(true)
-		{
-			/*
-				solve system equation for descent direction
-			*/
-			matrix_type tmp=(m_gradient * -1.0 );
-			linSolve(m_IterationMatrix, m_hk, tmp );
-
-			/*
-				direction orthogonal to gradient? -> use gradient
-			*/
-			if(fabs((!m_hk * m_gradient)[0][0]) <= 1.0e-3)
-			{
-				m_hk = -m_gradient;
-			}
-		}
-		else
-		{
-			m_hk = -m_gradient;
-		}
-	}
-
-	if(m_loger)
-		m_loger->logTrace("leaving MatrixIterativeOptimizer::computeDescentDirection ... \n");
+  if(m_loger)
+    m_loger->logTrace("entering MatrixIterativeOptimizer::computeDescentDirection ... \n");
+  
+  if(m_numberOfParameters == 1)
+  {
+    if(fabs(m_IterationMatrix(0,0)) >= 1.0e-20 )
+    {
+      m_hk(0,0) = -m_gradient(0,0) / m_IterationMatrix(0,0);
+    }
+    else
+    {
+      m_hk = m_gradient;
+      m_hk *= -1.0;
+    }
+  }
+  else
+  {
+    
+    //if(getRank(m_IterationMatrix, 1.0e-5 ) == m_numberOfParameters)
+    if(true)
+    {
+      /*
+        solve system equation for descent direction
+      */
+      matrix_type tmp=(m_gradient * -1.0 );
+      linSolve(m_IterationMatrix, m_hk, tmp );
+
+      /*
+        direction orthogonal to gradient? -> use gradient
+      */
+      if(fabs((!m_hk * m_gradient)(0,0)) <= 1.0e-3)
+      {
+        m_hk = m_gradient;
+        m_hk *= -1.0;
+      }
+    }
+    else
+    {
+      m_hk = m_gradient;
+      m_hk *= -1.0;
+    }
+  }
+
+  if(m_loger)
+    m_loger->logTrace("leaving MatrixIterativeOptimizer::computeDescentDirection ... \n");
 }
 
 void MatrixIterativeOptimizer::resetMatrix()
 {
-	m_IterationMatrix=matrix_type(m_numberOfParameters,m_numberOfParameters);
-	for(int i =0;i < static_cast<int>(m_numberOfParameters); i++)
-	{
-		m_IterationMatrix[i][i] = 1.0;
-	}
+  m_IterationMatrix=matrix_type(m_numberOfParameters,m_numberOfParameters);
+  for(int i =0;i < static_cast<int>(m_numberOfParameters); i++)
+  {
+    m_IterationMatrix(i,i) = 1.0;
+  }
 }
 
 void MatrixIterativeOptimizer::setStepSize(const matrix_type & stepSize)
 {
-	m_stepSize = stepSize;
+  m_stepSize = stepSize;
 }
 
 int MatrixIterativeOptimizer::optimize()
 {
-	if(m_loger)
-		m_loger->logTrace("entering MatrixIterativeOptimizer:optimize ... \n");
-
-	init();
-
-	bool abort = false;
-	
-	
-	/******************************
-		start time criteria
-	******************************/
-	m_startTime = clock();
-
-	/******************************
-		compute start value 
-		and first gradient
-	******************************/
-	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
-
-	m_gradient = (m_analyticalGradients == true && 
-				 (m_costFunction->hasAnalyticGradient() == true) ) ?
-							getAnalyticalGradient(m_parameters) : 
-							getNumericalGradient(m_parameters, m_stepSize);
-
-
-	m_hk = m_gradient * -1.0;
-
-	matrix_type hk_norm;
-	double factor = m_hk.Norm(0);
-	if(fabs(factor) > 1.0e-12)
-	{
-		hk_norm = m_hk * (1.0/ factor); 
-	}
-	matrix_type grad_norm;
-	double factor2 = m_gradient.Norm(0);
-	if(fabs(factor2) > 1.0e-12)
-	{
-		grad_norm = m_gradient * (1.0/ factor2); 
-	}
-
-	/*******************************
-		optimize step length
-	*******************************/
-	/// ARMIJO
-	//m_lin.setXkGkDk(m_parameters, m_gradient, hk_norm);
-	m_lin.setXkGkDk(m_parameters, grad_norm, hk_norm);
-	m_lambdak = m_lin.optimize();
-	if(m_verbose == true)
-	{
-		std::cout << "stepsize lambda: " << m_lambdak << std::endl;
-		std::cout << "in direction: " << std::endl;
-		for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-		{
-			std::cout<< hk_norm[r][0] << " ";
-		}
-		std::cout << std::endl;
-			
-	}
-
-	
-	/*******************************
-		update the parameters
-	*******************************/
-	m_oldParams = m_parameters;
-	m_parameters = m_parameters + hk_norm * m_lambdak;
-
-
-	/******************************
-		check abort criteria 
-		   for gradient
-	******************************/	
-	if(m_gradientTolActive)
-	{
-		if(m_gradient.Norm(0) < m_gradientTol){
-			m_returnReason = SUCCESS_GRADIENTTOL;
-			abort = true;
-		}
-	}	
-
-
-	/* do the optimization loop */
-	while(abort == false)
-	{
-		/******************************
-			increase iteration 
-			      counter
-		******************************/
-		m_numIter++;
-		
-
-		/******************************
-			cout actual iteration
-		******************************/
-
-		if(m_verbose == true)
-		{
-			std::cout << "MatrixIterativeOptimizer :: Iteration: " << m_numIter << " : current parameters : ";
-			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-			{
-				std::cout<< m_parameters[r][0] << "\t ";
-			}
-			std::cout << "    value:   " << m_currentCostFunctionValue << std::endl;
-		}
-
-
-		/******************************
-			Check iteration limit
-		******************************/
-		if(m_maxNumIterActive)
-		{
-			if(m_numIter >= m_maxNumIter)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "Gradient Descenct :: aborting because of numIter" << std::endl;
-				}
-
-
-				/* set according return status and return */
-				m_returnReason = SUCCESS_MAXITER;
-				abort = true;
-				continue;
-			}
-		}
-
-		/******************************
-			get the new Gradient
-		******************************/
-		m_oldGradient	= m_gradient;
-		m_gradient =	(m_analyticalGradients == true && 
-						(m_costFunction->hasAnalyticGradient() == true) ) ?
-							getAnalyticalGradient(m_parameters) : 
-							getNumericalGradient(m_parameters, m_stepSize);
-
-
-		/******************************
-			check gradient tol
-		******************************/
-		if(m_gradientTolActive)
-		{
-			if(m_gradient.Norm(0) < m_gradientTol)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "MatrixIterativeOptimizer :: aborting because of GradientTol" << std::endl;
-				}
-
-				m_returnReason = SUCCESS_GRADIENTTOL;
-				abort = true;
-				continue;
-			}
-		}
-
-		/******************************
-		      Update the Iteration 
-			       Matrix
-		******************************/
-		//matrix_type oldMatrix	= m_IterationMatrix;
-		updateIterationMatrix();
-
-		/******************************
-			compute a descent 
-			    direction
-		******************************/
-		computeDescentDirection();
-
-
-		// check if descent direction is really a descent direction
-		// if not: reset the iteration matrix and take
-		// the gradient as iteration direction
-		if( (!m_hk *m_gradient)[0][0] > 0.0)
-		{
-			std::cout << " resetting matrix" << std::endl;
-			resetMatrix();
-			m_hk=m_gradient * -1.0;
-		}
-	
-
-
-		double factor = m_hk.Norm(0);
-		if(fabs(factor) > 1.0e-12)
-		{
-			hk_norm = m_hk * (1.0/ factor); 
-		}
-		matrix_type grad_norm;
-		double factor2 = m_gradient.Norm(0);
-		if(fabs(factor2) > 1.0e-12)
-		{
-			grad_norm = m_gradient * (1.0/ factor2); 
-		}
-
-		/*******************************
-			optimize step length
-		*******************************/
-		//m_lin.setXkGkDk(m_parameters, m_gradient, hk_norm);
-		m_lin.setXkGkDk(m_parameters, grad_norm, hk_norm);
-		m_lambdak = m_lin.optimize();
-		
-
-		
-		/*******************************
-			update the parameters
-		*******************************/
-		m_oldParams = m_parameters;
-		m_parameters = m_parameters + hk_norm * m_lambdak;
-		if(m_verbose == true)
-		{
-			//matrix_type hk_normu=S2U(hk_norm);
-			std::cout << "MatrixIterativeOptimizer :: Iterating with lambda = " << m_lambdak << std::endl;
-
-			std::cout << "MatrixIterativeOptimizer :: in direction : ";
-			for(int mu = 0; mu < static_cast<int>(m_numberOfParameters); mu++)
-			{
-				//std::cout<< hk_normu[mu][0] << "\t ";
-				std::cout<< hk_norm[mu][0] << "\t ";
-			}
-			std::cout << std::endl;
-
-			//matrix_type m_gradientu=S2U(m_gradient);
-			std::cout << "MatrixIterativeOptimizer :: Gradient was ";
-			for(int nu = 0; nu < static_cast<int>(m_numberOfParameters); nu++)
-			{
-				//std::cout<< m_gradientu[nu][0] << "\t ";
-				std::cout<< m_gradient[nu][0] << "\t ";
-			}
-			std::cout << std::endl;
-			
-		}
-
-			
-				
-		/*******************************
-			Check new parameters 
-				are it is in bounds, 
-				paramTol,
-				funcTol,
-				maxSeconds
-		*******************************/
-		if(!checkParameters(m_parameters))
-		{
-			if(m_verbose == true)
-			{
-				std::cout << "Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
-			}
-
-			/* set according return status and the last parameters and return */
-			m_returnReason = ERROR_XOUTOFBOUNDS;
-			m_parameters = m_oldParams;
-			abort = true;
-			continue;
-		}
-
-		/*
-			Get new Costfunction Value
-		*/
-		double oldFuncValue		= m_currentCostFunctionValue;
-		m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
-
-
-		/*
-			Check ParamTol
-		*/
-		if(m_paramTolActive)
-		{
-			if( (m_parameters - m_oldParams).Norm(0) < m_paramTol)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "Gradient Descenct :: aborting because of param Tol" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_PARAMTOL;
-				/*m_parameters = oldParams;*/
-				abort = true;
-				continue;
-			}
-
-		}
-	
-		if(m_funcTolActive)
-		{
-			if( abs((m_currentCostFunctionValue - oldFuncValue)) < m_funcTol)
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "MatrixIterativeOptimizer :: aborting because of Func Tol" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_FUNCTOL;
-				abort = true;
-				continue;
-			}
-		}
-
-		/*
-			Check Optimization Timilimit, if active
-		*/
-		if(m_maxSecondsActive)
-		{
-			m_currentTime = clock();
-		
-			/* time limit exceeded ? */
-			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "MatrixIterativeOptimizer :: aborting because of Time Limit" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_TIMELIMIT;
-				m_parameters = m_oldParams;
-				abort = true;
-				continue;
-			}
-		}
-
-	}
-
-	if(m_verbose)
-	{
-		std::cout << "MatrixIterativeOptimizer :: RESULT: ";
-		for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
-		{
-			std::cout<< m_parameters[r][0] << " ";
-		}
-		std::cout << "    value:   " << m_currentCostFunctionValue  << std::endl;
-	}
-
-	if(m_loger)
-		m_loger->logTrace("leaving MatrixIterativeOptimizer:optimize ... \n");
-	
-	return m_returnReason;
+  if(m_loger)
+    m_loger->logTrace("entering MatrixIterativeOptimizer:optimize ... \n");
+
+  init();
+
+  bool abort = false;
+  
+  
+  /******************************
+    start time criteria
+  ******************************/
+  m_startTime = clock();
+
+  /******************************
+    compute start value 
+    and first gradient
+  ******************************/
+  m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+  m_gradient = (m_analyticalGradients == true && 
+        (m_costFunction->hasAnalyticGradient() == true) ) ?
+              getAnalyticalGradient(m_parameters) : 
+              getNumericalGradient(m_parameters, m_stepSize);
+
+
+  m_hk = m_gradient * -1.0;
+
+  matrix_type hk_norm;
+  double factor = m_hk.frobeniusNorm();
+  if(fabs(factor) > 1.0e-12)
+  {
+    hk_norm = m_hk * (1.0/ factor); 
+  }
+  matrix_type grad_norm;
+  double factor2 = m_gradient.frobeniusNorm();
+  if(fabs(factor2) > 1.0e-12)
+  {
+    grad_norm = m_gradient * (1.0/ factor2); 
+  }
+
+  /*******************************
+    optimize step length
+  *******************************/
+  /// ARMIJO
+  //m_lin.setXkGkDk(m_parameters, m_gradient, hk_norm);
+  m_lin.setXkGkDk(m_parameters, grad_norm, hk_norm);
+  m_lambdak = m_lin.optimize();
+  if(m_verbose == true)
+  {
+    std::cout << "stepsize lambda: " << m_lambdak << std::endl;
+    std::cout << "in direction: " << std::endl;
+    for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+    {
+      std::cout<< hk_norm(r,0) << " ";
+    }
+    std::cout << std::endl;
+      
+  }
+
+  
+  /*******************************
+    update the parameters
+  *******************************/
+  m_oldParams = m_parameters;
+  m_parameters = m_parameters + hk_norm * m_lambdak;
+
+
+  /******************************
+    check abort criteria 
+      for gradient
+  ******************************/	
+  if(m_gradientTolActive)
+  {
+    if(m_gradient.frobeniusNorm() < m_gradientTol){
+      m_returnReason = SUCCESS_GRADIENTTOL;
+      abort = true;
+    }
+  }	
+
+
+  /* do the optimization loop */
+  while(abort == false)
+  {
+    /******************************
+      increase iteration 
+            counter
+    ******************************/
+    m_numIter++;
+    
+
+    /******************************
+      cout actual iteration
+    ******************************/
+
+    if(m_verbose == true)
+    {
+      std::cout << "MatrixIterativeOptimizer :: Iteration: " << m_numIter << " : current parameters : ";
+      for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+      {
+        std::cout<< m_parameters(r,0) << "\t ";
+      }
+      std::cout << "    value:   " << m_currentCostFunctionValue << std::endl;
+    }
+
+
+    /******************************
+      Check iteration limit
+    ******************************/
+    if(m_maxNumIterActive)
+    {
+      if(m_numIter >= m_maxNumIter)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "Gradient Descenct :: aborting because of numIter" << std::endl;
+        }
+
+
+        /* set according return status and return */
+        m_returnReason = SUCCESS_MAXITER;
+        abort = true;
+        continue;
+      }
+    }
+
+    /******************************
+      get the new Gradient
+    ******************************/
+    m_oldGradient	= m_gradient;
+    m_gradient =	(m_analyticalGradients == true && 
+            (m_costFunction->hasAnalyticGradient() == true) ) ?
+              getAnalyticalGradient(m_parameters) : 
+              getNumericalGradient(m_parameters, m_stepSize);
+
+
+    /******************************
+      check gradient tol
+    ******************************/
+    if(m_gradientTolActive)
+    {
+      if(m_gradient.frobeniusNorm() < m_gradientTol)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "MatrixIterativeOptimizer :: aborting because of GradientTol" << std::endl;
+        }
+
+        m_returnReason = SUCCESS_GRADIENTTOL;
+        abort = true;
+        continue;
+      }
+    }
+
+    /******************************
+          Update the Iteration 
+            Matrix
+    ******************************/
+    //matrix_type oldMatrix	= m_IterationMatrix;
+    updateIterationMatrix();
+
+    /******************************
+      compute a descent 
+          direction
+    ******************************/
+    computeDescentDirection();
+
+
+    // check if descent direction is really a descent direction
+    // if not: reset the iteration matrix and take
+    // the gradient as iteration direction
+    if( (!m_hk *m_gradient)(0,0) > 0.0)
+    {
+      std::cout << " resetting matrix" << std::endl;
+      resetMatrix();
+      m_hk=m_gradient * -1.0;
+    }
+  
+
+
+    double factor = m_hk.frobeniusNorm();
+    if(fabs(factor) > 1.0e-12)
+    {
+      hk_norm = m_hk * (1.0/ factor); 
+    }
+    matrix_type grad_norm;
+    double factor2 = m_gradient.frobeniusNorm();
+    if(fabs(factor2) > 1.0e-12)
+    {
+      grad_norm = m_gradient * (1.0/ factor2); 
+    }
+
+    /*******************************
+      optimize step length
+    *******************************/
+    //m_lin.setXkGkDk(m_parameters, m_gradient, hk_norm);
+    m_lin.setXkGkDk(m_parameters, grad_norm, hk_norm);
+    m_lambdak = m_lin.optimize();
+    
+
+    
+    /*******************************
+      update the parameters
+    *******************************/
+    m_oldParams = m_parameters;
+    m_parameters = m_parameters + hk_norm * m_lambdak;
+    if(m_verbose == true)
+    {
+      //matrix_type hk_normu=S2U(hk_norm);
+      std::cout << "MatrixIterativeOptimizer :: Iterating with lambda = " << m_lambdak << std::endl;
+
+      std::cout << "MatrixIterativeOptimizer :: in direction : ";
+      for(int mu = 0; mu < static_cast<int>(m_numberOfParameters); mu++)
+      {
+        //std::cout<< hk_normu(mu,0) << "\t ";
+        std::cout<< hk_norm(mu,0) << "\t ";
+      }
+      std::cout << std::endl;
+
+      //matrix_type m_gradientu=S2U(m_gradient);
+      std::cout << "MatrixIterativeOptimizer :: Gradient was ";
+      for(int nu = 0; nu < static_cast<int>(m_numberOfParameters); nu++)
+      {
+        //std::cout<< m_gradientu(nu,0) << "\t ";
+        std::cout<< m_gradient(nu,0) << "\t ";
+      }
+      std::cout << std::endl;
+      
+    }
+
+      
+        
+    /*******************************
+      Check new parameters 
+        are it is in bounds, 
+        paramTol,
+        funcTol,
+        maxSeconds
+    *******************************/
+    if(!checkParameters(m_parameters))
+    {
+      if(m_verbose == true)
+      {
+        std::cout << "Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
+      }
+
+      /* set according return status and the last parameters and return */
+      m_returnReason = ERROR_XOUTOFBOUNDS;
+      m_parameters = m_oldParams;
+      abort = true;
+      continue;
+    }
+
+    /*
+      Get new Costfunction Value
+    */
+    double oldFuncValue		= m_currentCostFunctionValue;
+    m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+
+    /*
+      Check ParamTol
+    */
+    if(m_paramTolActive)
+    {
+      if( (m_parameters - m_oldParams).frobeniusNorm() < m_paramTol)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "Gradient Descenct :: aborting because of param Tol" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_PARAMTOL;
+        /*m_parameters = oldParams;*/
+        abort = true;
+        continue;
+      }
+
+    }
+  
+    if(m_funcTolActive)
+    {
+      if( abs((m_currentCostFunctionValue - oldFuncValue)) < m_funcTol)
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "MatrixIterativeOptimizer :: aborting because of Func Tol" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_FUNCTOL;
+        abort = true;
+        continue;
+      }
+    }
+
+    /*
+      Check Optimization Timilimit, if active
+    */
+    if(m_maxSecondsActive)
+    {
+      m_currentTime = clock();
+    
+      /* time limit exceeded ? */
+      if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "MatrixIterativeOptimizer :: aborting because of Time Limit" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_TIMELIMIT;
+        m_parameters = m_oldParams;
+        abort = true;
+        continue;
+      }
+    }
+
+  }
+
+  if(m_verbose)
+  {
+    std::cout << "MatrixIterativeOptimizer :: RESULT: ";
+    for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+    {
+      std::cout<< m_parameters(r,0) << " ";
+    }
+    std::cout << "    value:   " << m_currentCostFunctionValue  << std::endl;
+  }
+
+  if(m_loger)
+    m_loger->logTrace("leaving MatrixIterativeOptimizer:optimize ... \n");
+  
+  return m_returnReason;
 
 }

+ 133 - 129
MatrixIterativeOptimizer.h

@@ -15,136 +15,140 @@
 //#include "optimization/BrentLineSearcher.h"
 #include "optimization/ArmijoLineSearcher.h"
 
-/*!
-	abstract base class of all matrix iterative based optimizers.
 
-	Methods like the newton iterative method or BFGS inherit from this class
- */
-class MatrixIterativeOptimizer : public DerivativeBasedOptimizer
+namespace OPTIMIZATION
 {
-public:
-		typedef  DerivativeBasedOptimizer	SuperClass;	
-		typedef  SuperClass::matrix_type	matrix_type;	
-
-	    /*!
-			Constructor.
-			\param numOfParameters : Number of parameters to optimize
-			\param objectiveFunction : CostFunction*
-			\param loger : OptLogBase * to existing log class
-		*/
-		MatrixIterativeOptimizer(OptLogBase *loger);
-
-		/*!
-			Copy constructor
-			\param opt .. optimizer to copy
-		*/
-		MatrixIterativeOptimizer( const MatrixIterativeOptimizer &opt);
-
-
-		/*!
-			operator =
-		*/
-		MatrixIterativeOptimizer & operator=(const MatrixIterativeOptimizer &opt);
-
-
-		/*!
-				Destructor.
-		 */
-		virtual ~MatrixIterativeOptimizer();
-
-		/*!
-			\brief Set the initial step size
-			The initial stepsize is used to give the optimizer an initial value for the order of 
-			magnitude to start with.
-			(e.g. step 100000 in x direction or 0.01 ?)
-			\param stepSize with the step size for the i-th dimension in the (i,0)-th position.
-		*/
-		void setStepSize(const matrix_type & stepSize);
-		
-
-		/*!
-			\brief to set options for the internal line searcher, get the access
-		
-		*/
-		ArmijoLineSearcher * getLineSearcher(){return &m_lin;};
-
-		/*!
-			initialize
-		*/
-		void init();
-
-		/*!
-			optimize
-		*/
-		int optimize();
-
-protected:
-		
-
-		/*!
-			\brief update the Iteration Matrix
-		*/
-		virtual void updateIterationMatrix() = 0;
-
-		/*!
-			the iteration matrix
-		*/
-		matrix_type m_IterationMatrix;
-
-		/*!
-			\brief compute the descent direction
-		*/
-		void computeDescentDirection();
-		
-		/*!
-			the descent direction
-		*/
-		matrix_type m_hk;
-
-		/*!
-			\brief compute the steplength
-		*/
-		void ComputeStepLength();
-
-
-		/*!
-			\brief reset the matrix
-		*/
-		void resetMatrix();
-
-		/*!
-			the steplength
-		*/
-		double m_lambdak;
-
-		/*!
-			1-dim search to optimize the steplength
-		*/
-		//GoldenCutLineSearcher m_lin;
-		//BrentLineSearcher m_lin;
-		ArmijoLineSearcher m_lin;
-		
-		/*!
-			step size vector
-		*/
-		matrix_type m_stepSize;
-
-		/*
-			needed for updates
-		*/
-		matrix_type m_oldParams;
-
-		/*
-			needed for updates
-		*/
-		matrix_type m_oldGradient;
-
-		//double m_lin_Lower;
-		//double m_lin_Upper;
-		//double m_lin_Eps;
-
-
-		
-};
+  /*!
+    abstract base class of all matrix iterative based optimizers.
+
+    Methods like the newton iterative method or BFGS inherit from this class
+  */
+  class MatrixIterativeOptimizer : public DerivativeBasedOptimizer
+  {
+  public:
+      typedef  DerivativeBasedOptimizer	SuperClass;	
+      typedef  SuperClass::matrix_type	matrix_type;	
+
+        /*!
+        Constructor.
+        \param numOfParameters : Number of parameters to optimize
+        \param objectiveFunction : CostFunction*
+        \param loger : OptLogBase * to existing log class
+      */
+      MatrixIterativeOptimizer(OptLogBase *loger);
+
+      /*!
+        Copy constructor
+        \param opt .. optimizer to copy
+      */
+      MatrixIterativeOptimizer( const MatrixIterativeOptimizer &opt);
+
+
+      /*!
+        operator =
+      */
+      MatrixIterativeOptimizer & operator=(const MatrixIterativeOptimizer &opt);
+
+
+      /*!
+          Destructor.
+      */
+      virtual ~MatrixIterativeOptimizer();
+
+      /*!
+        \brief Set the initial step size
+        The initial stepsize is used to give the optimizer an initial value for the order of 
+        magnitude to start with.
+        (e.g. step 100000 in x direction or 0.01 ?)
+        \param stepSize with the step size for the i-th dimension in the (i,0)-th position.
+      */
+      void setStepSize(const matrix_type & stepSize);
+      
+
+      /*!
+        \brief to set options for the internal line searcher, get the access
+      
+      */
+      ArmijoLineSearcher * getLineSearcher(){return &m_lin;};
+
+      /*!
+        initialize
+      */
+      void init();
+
+      /*!
+        optimize
+      */
+      int optimize();
+
+  protected:
+      
+
+      /*!
+        \brief update the Iteration Matrix
+      */
+      virtual void updateIterationMatrix() = 0;
+
+      /*!
+        the iteration matrix
+      */
+      matrix_type m_IterationMatrix;
+
+      /*!
+        \brief compute the descent direction
+      */
+      void computeDescentDirection();
+      
+      /*!
+        the descent direction
+      */
+      matrix_type m_hk;
+
+      /*!
+        \brief compute the steplength
+      */
+      void ComputeStepLength();
+
+
+      /*!
+        \brief reset the matrix
+      */
+      void resetMatrix();
+
+      /*!
+        the steplength
+      */
+      double m_lambdak;
+
+      /*!
+        1-dim search to optimize the steplength
+      */
+      //GoldenCutLineSearcher m_lin;
+      //BrentLineSearcher m_lin;
+      ArmijoLineSearcher m_lin;
+      
+      /*!
+        step size vector
+      */
+      matrix_type m_stepSize;
+
+      /*
+        needed for updates
+      */
+      matrix_type m_oldParams;
+
+      /*
+        needed for updates
+      */
+      matrix_type m_oldGradient;
+
+      //double m_lin_Lower;
+      //double m_lin_Upper;
+      //double m_lin_Eps;
+
+
+      
+  };//class
+}//namespace
 
 #endif

+ 15 - 13
NewtonMethodOptimizer.cpp

@@ -9,6 +9,8 @@
 
 #include "optimization/NewtonMethodOptimizer.h"
 
+using namespace OPTIMIZATION;
+
 NewtonMethodOptimizer::NewtonMethodOptimizer(OptLogBase *loger) : SuperClass(loger)
 {
 }
@@ -26,7 +28,7 @@ NewtonMethodOptimizer::~NewtonMethodOptimizer()
 
 void NewtonMethodOptimizer::init()
 {
-	SuperClass::init();
+  SuperClass::init();
 }
 
 
@@ -35,16 +37,16 @@ void NewtonMethodOptimizer::init()
 
 void NewtonMethodOptimizer::updateIterationMatrix()
 {
-	if(m_costFunction->hasAnalyticHessian() )
-	{
-		m_IterationMatrix = getAnalyticalHessian(m_parameters);
-
-	}
-	else
-	{
-		std::cerr << " NewtonMethodOptimizer::updateIterationMatrix() you cannot use this optimizer with the given costfunction" << std::endl;
-		if(m_loger)
-			m_loger->logError("NewtonMethodOptimizer::updateIterationMatrix  you cannot use this optimizer with the given costfunction");
-		assert(false);
-	}
+  if(m_costFunction->hasAnalyticHessian() )
+  {
+    m_IterationMatrix = getAnalyticalHessian(m_parameters);
+
+  }
+  else
+  {
+    std::cerr << " NewtonMethodOptimizer::updateIterationMatrix() you cannot use this optimizer with the given costfunction" << std::endl;
+    if(m_loger)
+      m_loger->logError("NewtonMethodOptimizer::updateIterationMatrix  you cannot use this optimizer with the given costfunction");
+    assert(false);
+  }
 }

+ 70 - 68
NewtonMethodOptimizer.h

@@ -12,74 +12,76 @@
 
 #include "optimization/MatrixIterativeOptimizer.h"
 
-
-/*!
-	class for the newton method
-
-   Note: your objective function has to have analytical Hessian computation implemented!
-
-   HowToUse:
-	
-	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
-	  * use setParameters() to set the start point
-	  * call init()
-	  * call optimize()
-
-
- 	Implemented Abort criteria:
-		
-	  * maximum number of iterations
-	  *	time limit
-	  * parameter bounds
-	  * function value tolerance
-	  * parameter tolerance
-
-	Additional return reason:
-	
- */
-class NewtonMethodOptimizer : public MatrixIterativeOptimizer
+namespace OPTIMIZATION
 {
-public:
-		typedef MatrixIterativeOptimizer	SuperClass;	
-		typedef	SuperClass::matrix_type	matrix_type;
-
-	   	///
-		///	Constructor.
-		///	\param loger : OptLogBase * to existing log class
-		///
-		NewtonMethodOptimizer(OptLogBase *loger);
-
-		///
-		///	Copy constructor
-		///	\param opt .. optimizer to copy
-		///
-		NewtonMethodOptimizer( const NewtonMethodOptimizer &opt);
-
-
-		///
-		///	operator =
-		///
-		NewtonMethodOptimizer & operator=(const NewtonMethodOptimizer &opt);
-
-
-	        ///
-		///		Destructor.
-	        ///
-	        virtual ~NewtonMethodOptimizer();
-
-		///
-		///	internal initializations
-		///
-		void init();
-
-protected:
-		
-
-		/*!
-			\brief update the Iteration Matrix
-		*/
-		void updateIterationMatrix() ;
-	
-};
+  /*!
+    class for the newton method
+
+    Note: your objective function has to have analytical Hessian computation implemented!
+
+    HowToUse:
+    
+      * use setStepSize to specify the initial stepsize to compute the numerical gradient
+      * use setParameters() to set the start point
+      * call init()
+      * call optimize()
+
+
+    Implemented Abort criteria:
+      
+      * maximum number of iterations
+      *	time limit
+      * parameter bounds
+      * function value tolerance
+      * parameter tolerance
+
+    Additional return reason:
+    
+  */
+  class NewtonMethodOptimizer : public MatrixIterativeOptimizer
+  {
+  public:
+      typedef MatrixIterativeOptimizer	SuperClass;	
+      typedef	SuperClass::matrix_type	matrix_type;
+
+        ///
+      ///	Constructor.
+      ///	\param loger : OptLogBase * to existing log class
+      ///
+      NewtonMethodOptimizer(OptLogBase *loger);
+
+      ///
+      ///	Copy constructor
+      ///	\param opt .. optimizer to copy
+      ///
+      NewtonMethodOptimizer( const NewtonMethodOptimizer &opt);
+
+
+      ///
+      ///	operator =
+      ///
+      NewtonMethodOptimizer & operator=(const NewtonMethodOptimizer &opt);
+
+
+            ///
+      ///		Destructor.
+            ///
+            virtual ~NewtonMethodOptimizer();
+
+      ///
+      ///	internal initializations
+      ///
+      void init();
+
+  protected:
+      
+
+      /*!
+        \brief update the Iteration Matrix
+      */
+      void updateIterationMatrix() ;
+    
+  }; //class
+}//namespace
 
 #endif

+ 16 - 14
OptTestSuite.cpp

@@ -4,7 +4,7 @@
 //
 //
 
-#include "DownhillSimplexOptimizer.h"
+#include "core/optimization/blackbox/DownhillSimplexOptimizer.h"
 #include "GradientDescentOptimizer.h"
 #include "BFGSOptimizer.h"
 #include "AdaptiveDirectionRandomSearchOptimizer.h"
@@ -12,7 +12,8 @@
 #include "AdditionalIceUtils.h"
 #include "CostFunction_ndim_2ndOrder.h"
 
-using namespace optimization;
+using namespace OPTIMIZATION;
+
 #include <cstdlib> // rand
 //#include <cmath> 
 #include <time.h> // time for srand
@@ -137,14 +138,14 @@ void OptTestSuite::insertOptProbsInGrid(SimpleOptTestGrid &testGrid, std::vector
 	CostFunction_ndim_2ndOrder *secOrder1 = new CostFunction_ndim_2ndOrder(dim);
 
 	cfns.push_back(secOrder1);
-	matrix_type A(dim,dim);	A[0][0]= 3;
-	matrix_type b(dim,1); b[0][0]=1;
+	matrix_type A(dim,dim);	A(0,0)= 3;
+	matrix_type b(dim,1); b(0,0)=1;
 	double c=14.378; secOrder1->setAbc(A,b,c);
 	
-	matrix_type secOrder1_solution(dim,1); secOrder1_solution[0][0]=-2.0/6.0; double secOrder1_succdist=1.0e-3;
+	matrix_type secOrder1_solution(dim,1); secOrder1_solution(0,0)=-2.0/6.0; double secOrder1_succdist=1.0e-3;
 
-	matrix_type secOrder1_inits(dim,1); secOrder1_inits[0][0]=-15.789;
-	matrix_type secOrder1_scales(dim,1); secOrder1_scales[0][0]=1.0;
+	matrix_type secOrder1_inits(dim,1); secOrder1_inits(0,0)=-15.789;
+	matrix_type secOrder1_scales(dim,1); secOrder1_scales(0,0)=1.0;
 	SimpleOptProblem secOrderProb1(secOrder1,secOrder1_inits,secOrder1_scales);
 	testGrid.addSimpleOptProblem( secOrderProb1, secOrder1_solution, secOrder1_succdist);
 
@@ -163,23 +164,24 @@ void OptTestSuite::insertOptProbsInGrid(SimpleOptTestGrid &testGrid, std::vector
 		{
 			if(i == j)
 			{
-				A2[i][j]=dim+getRand()*200;
+				A2(i,j)=dim+getRand()*200;
 			}
 			else
 			{
-				A2[i][j]=getRand();
-				A2[j][i]=A2[i][j];
+				A2(i,j)=getRand();
+				A2(j,i)=A2(i,j);
 			}
 		}
-		b2[i][0]=(getRand()-0.5) *dim ;
-		secOrder2_inits[i][0]=(getRand()-0.5) *dim;
-		secOrder2_scales[i][0]=1.0;
+		b2(i,0)=(getRand()-0.5) *dim ;
+		secOrder2_inits(i,0)=(getRand()-0.5) *dim;
+		secOrder2_scales(i,0)=1.0;
 	}
 	//A2 = A2 * (!A2);
 	double c2=1.23546; secOrder2->setAbc(A2,b2,c2);
 
 	matrix_type secOrder2_solution(dim,1);
-	matrix_type mb2=-b2;
+	matrix_type mb2=b2;
+  mb2*=-1.0;
 	linSolve(A2, secOrder2_solution, mb2);
 	double secOrder2_succdist=1.0e-3;
 	// test if matrix is spd

+ 37 - 34
OptTestSuite.h

@@ -11,40 +11,43 @@
 
 #include "SimpleOptTestGrid.h"
 
-///
-///	class that implements a test suite for the optimization lib
-///
-class OptTestSuite 
+namespace OPTIMIZATION
 {
-public:
-
-	///
-	/// Default constructor
-	///
-	OptTestSuite(){};
-
-	///
-	/// Default destructor
-	///
-	~OptTestSuite(){};
-
-	///
-	/// runs all implemented tests, including output 
-	///
-	///
-	void runAllTests();
-
-private:
-
-	/// run simple opt test grid
-	void runSimpleOptTestGrid();
-
-	/// instert optimizers
-	void insertOptsInGrid(SimpleOptTestGrid &testGrid, std::vector<Optimizer*> &opts);
-
-	/// insert opt probs
-	void insertOptProbsInGrid(SimpleOptTestGrid &testGrid, std::vector<CostFunction*> &cfns);
-
-};
+  ///
+  ///	class that implements a test suite for the optimization lib
+  ///
+  class OptTestSuite 
+  {
+  public:
+
+    ///
+    /// Default constructor
+    ///
+    OptTestSuite(){};
+
+    ///
+    /// Default destructor
+    ///
+    ~OptTestSuite(){};
+
+    ///
+    /// runs all implemented tests, including output 
+    ///
+    ///
+    void runAllTests();
+
+  private:
+
+    /// run simple opt test grid
+    void runSimpleOptTestGrid();
+
+    /// instert optimizers
+    void insertOptsInGrid(SimpleOptTestGrid &testGrid, std::vector<Optimizer*> &opts);
+
+    /// insert opt probs
+    void insertOptProbsInGrid(SimpleOptTestGrid &testGrid, std::vector<CostFunction*> &cfns);
+
+  }; //class
+}//namespace
 
 #endif // _OPTTESTSUITE_H_

+ 3 - 2
Opt_Namespace.h

@@ -13,10 +13,11 @@
 #include <MatrixO.h>
 //using namespace ice;
 
-namespace optimization
+namespace OPTIMIZATION
 {
 
-	typedef ice::Matrix matrix_type; // fixme
+  // already defined in core/optimization/blackbox/Definitions_core_opt.h
+// 	typedef ice::Matrix matrix_type; // fixme
 };
 
 

BIN
OptimizerToolBox.pdf


+ 7 - 6
ParamLog.cpp

@@ -8,17 +8,18 @@
 #include <iostream>
 
 using namespace std;
+using namespace OPTIMIZATION;
 
 ParamLog::ParamLog( std::string file)
 {
-	// open file
-	m_stream.open(file.c_str(), ios_base::out);
+  // open file
+  m_stream.open(file.c_str(), ios_base::out);
 }
 
 ParamLog::~ParamLog()
 {
-	// close file
-	m_stream.close();
+  // close file
+  m_stream.close();
 }
 
 void ParamLog::writeLogError(const char* szMessage)
@@ -34,14 +35,14 @@ void ParamLog::writeLogTrace(const char* szMessage)
 }
 
 
-void ParamLog::writeParamsToFile(optimization::matrix_type& parammatrix)
+void ParamLog::writeParamsToFile(OPTIMIZATION::matrix_type& parammatrix)
 {
     int numParams= parammatrix.rows();
     if(m_stream.good())
     {
         for(int i= 0; i< numParams; ++i)
         {
-            m_stream <<parammatrix[i][0]<<" ";
+            m_stream <<parammatrix(i,0)<<" ";
         }
         m_stream<<endl;
         m_stream.flush();

+ 46 - 43
ParamLog.h

@@ -10,49 +10,52 @@
 #include <fstream>
 #include "core/optimization/blackbox/OptLogBase.h"
 
-
-class ParamLog : public OptLogBase
+namespace OPTIMIZATION
 {
-
-	public:
-	
-	/*!
-		default
-	*/
-	ParamLog(){};
-
-	/*!
-		@param file string with the filename to log to
-	*/
-	//FileLog( string file);
-	ParamLog( std::string file);
-
-	/*!
-		destructor
-	*/
-	virtual ~ParamLog();
-
-	/**! Write error message to an output device (file, stdio, etc.)
-	 */
-	virtual void writeLogError(const char* szMessage);
-
-	/**! Write warning message to an output device (file, stdio, etc.)
-	 */
-	virtual void writeLogWarning(const char* szMessage);
-
-	/**! Write trace message to an output device (file, stdio, etc.)
-	 */
-	virtual void writeLogTrace(const char* szMessage);
-
-	/**! Write parameter vector to output device (file, stdio, etc.)
-	 *
-	 * @param parammatrix parameter matrix
-	 */
-	virtual void writeParamsToFile(optimization::matrix_type& parammatrix);
-
-	private:
-	
-	std::ofstream m_stream;
-};
+    
+  class ParamLog : public OptLogBase
+  {
+
+    public:
+    
+    /*!
+      default
+    */
+    ParamLog(){};
+
+    /*!
+      @param file string with the filename to log to
+    */
+    //FileLog( string file);
+    ParamLog( std::string file);
+
+    /*!
+      destructor
+    */
+    virtual ~ParamLog();
+
+    /**! Write error message to an output device (file, stdio, etc.)
+    */
+    virtual void writeLogError(const char* szMessage);
+
+    /**! Write warning message to an output device (file, stdio, etc.)
+    */
+    virtual void writeLogWarning(const char* szMessage);
+
+    /**! Write trace message to an output device (file, stdio, etc.)
+    */
+    virtual void writeLogTrace(const char* szMessage);
+
+    /**! Write parameter vector to output device (file, stdio, etc.)
+    *
+    * @param parammatrix parameter matrix
+    */
+    virtual void writeParamsToFile(OPTIMIZATION::matrix_type& parammatrix);
+
+    private:
+    
+    std::ofstream m_stream;
+  };//class
+}//namespace
 
 #endif /* _PARAMLOG_H_ */

+ 38 - 38
Plotter.cpp

@@ -19,12 +19,12 @@ Copyright (C) 2009, Esther-Sabrina Platzer <esther.platzer@uni-jena.de>
 */
 
 #include "optimization/Plotter.h"
-#include "optimization/Opt_Namespace.h"
+#include "core/optimization/blackbox/Definitions_core_opt.h"
 
 #include <iostream>
 #include <fstream>
 
-using namespace optimization;
+using namespace OPTIMIZATION;
 
 Plotter::Plotter() : m_absoluteBounds(true),
 						  m_pCostfunc(NULL)
@@ -88,20 +88,20 @@ void Plotter::plot1D(int paramnum, const float from, const float to, const float
 	// else compute relative bounds
 	else
 	{
-		start= tmpParams[paramnum][0] + from;
-		end= tmpParams[paramnum][0] + to;
+		start= tmpParams(paramnum,0) + from;
+		end= tmpParams(paramnum,0) + to;
 	}
 	
 	// setting relevant parameter to start value
-	tmpParams[paramnum][0]= start;
+	tmpParams(paramnum,0)= start;
 	
-	while(tmpParams[paramnum][0] <= end)
+	while(tmpParams(paramnum,0) <= end)
 	{
 		// evaluate costfunction and write to plotfile
-		plotfile << tmpParams[paramnum][0] << " " << m_pCostfunc->evaluate(tmpParams) << std::endl;
+		plotfile << tmpParams(paramnum,0) << " " << m_pCostfunc->evaluate(tmpParams) << std::endl;
 		
 		// increment evaluation value
-		tmpParams[paramnum][0]+= stepwidth;
+		tmpParams(paramnum,0)+= stepwidth;
 	}
 	
 	plotfile.close();
@@ -134,32 +134,32 @@ void Plotter::plot2D(int paramnum1, const float from1, const float to1, const fl
 	// else compute relative bounds
 	else
 	{
-		start1= tmpParams[paramnum1][0] + from1;
-		end1= tmpParams[paramnum1][0] + to1;
-		start2= tmpParams[paramnum2][0] + from2;
-		end2= tmpParams[paramnum2][0] + to2;
+		start1= tmpParams(paramnum1,0) + from1;
+		end1= tmpParams(paramnum1,0) + to1;
+		start2= tmpParams(paramnum2,0) + from2;
+		end2= tmpParams(paramnum2,0) + to2;
 	}
 	
 	// setting relevant parameter to start value
-	tmpParams[paramnum1][0]= start1;
-	tmpParams[paramnum2][0]= start2;
+	tmpParams(paramnum1,0)= start1;
+	tmpParams(paramnum2,0)= start2;
 	
-	while(tmpParams[paramnum1][0] <= end1)
+	while(tmpParams(paramnum1,0) <= end1)
 	{
-		while(tmpParams[paramnum2][0] <= end2)
+		while(tmpParams(paramnum2,0) <= end2)
 		{
 			// evaluate costfunction and write to plotfile
-			plotfile << tmpParams[paramnum1][0] << " " << tmpParams[paramnum2][0] << " " << m_pCostfunc->evaluate(tmpParams) << std::endl;
+			plotfile << tmpParams(paramnum1,0) << " " << tmpParams(paramnum2,0) << " " << m_pCostfunc->evaluate(tmpParams) << std::endl;
 		
 			// increment evaluation value of parameter 2
-			tmpParams[paramnum2][0]+= stepwidth2;
+			tmpParams(paramnum2,0)+= stepwidth2;
 		}
 		plotfile<<std::endl;
 		// reset inner loop
-		tmpParams[paramnum2][0]= start2;
+		tmpParams(paramnum2,0)= start2;
 		
 		// increment evaluation value of parameter 1
-		tmpParams[paramnum1][0]+= stepwidth1;
+		tmpParams(paramnum1,0)+= stepwidth1;
 	}
 	
 	plotfile.close();
@@ -186,12 +186,12 @@ void Plotter::plot3D(int paramnum1, const float from1, const float to1, const fl
 	// else compute relative bounds
 	else
 	{
-		start1= tmpParams[paramnum1][0] + from1;
-		end1= tmpParams[paramnum1][0] + to1;
+		start1= tmpParams(paramnum1,0) + from1;
+		end1= tmpParams(paramnum1,0) + to1;
 	}
 	
 	// iterating over the first parameter
-	tmpParams[paramnum1][0]= start1;
+	tmpParams(paramnum1,0)= start1;
 	std::string filename(const_cast<char*>(path));
 	
 	// cutting suffix ".txt", if there
@@ -200,11 +200,11 @@ void Plotter::plot3D(int paramnum1, const float from1, const float to1, const fl
 		filename.erase(filename.find(".txt"),4);
 	}
 	
-	while(tmpParams[paramnum1][0] <= end1)
+	while(tmpParams(paramnum1,0) <= end1)
 	{
 		// generate name of plotfile
 		char number[5];
-		sprintf(number,"%f",tmpParams[paramnum1][0]);
+		sprintf(number,"%f",tmpParams(paramnum1,0));
 		std::string numfilename= filename + "_" + number + ".txt";
 		
 		// open plot file for writing
@@ -224,38 +224,38 @@ void Plotter::plot3D(int paramnum1, const float from1, const float to1, const fl
 		// else compute relative bounds
 		else
 		{
-			start2= tmpParams[paramnum2][0] + from2;
-			end2= tmpParams[paramnum2][0] + to2;
-			start3= tmpParams[paramnum3][0] + from3;
-			end3= tmpParams[paramnum3][0] + to3;
+			start2= tmpParams(paramnum2,0) + from2;
+			end2= tmpParams(paramnum2,0) + to2;
+			start3= tmpParams(paramnum3,0) + from3;
+			end3= tmpParams(paramnum3,0) + to3;
 		}
 	
 		// setting relevant parameter to start value
-		tmpParams[paramnum2][0]= start2;
-		tmpParams[paramnum3][0]= start3;
+		tmpParams(paramnum2,0)= start2;
+		tmpParams(paramnum3,0)= start3;
 	
-		while(tmpParams[paramnum2][0] <= end2)
+		while(tmpParams(paramnum2,0) <= end2)
 		{
-			while(tmpParams[paramnum3][0] <= end3)
+			while(tmpParams(paramnum3,0) <= end3)
 			{
 				// evaluate costfunction and write to plotfile
-				plotfile << tmpParams[paramnum2][0] << " " << tmpParams[paramnum3][0] << " " << m_pCostfunc->evaluate(tmpParams) << std::endl;
+				plotfile << tmpParams(paramnum2,0) << " " << tmpParams(paramnum3,0) << " " << m_pCostfunc->evaluate(tmpParams) << std::endl;
 			
 				// increment evaluation value of parameter 3
-				tmpParams[paramnum3][0]+= stepwidth3;
+				tmpParams(paramnum3,0)+= stepwidth3;
 			}
 			plotfile<<std::endl;
 			// reset inner loop
-			tmpParams[paramnum3][0]= start3;
+			tmpParams(paramnum3,0)= start3;
 			
 			// increment evaluation value of parameter 2
-			tmpParams[paramnum2][0]+= stepwidth2;
+			tmpParams(paramnum2,0)+= stepwidth2;
 		}
 		// close plotfile
 		plotfile.close();
 		
 		// increment parameter 1
-		tmpParams[paramnum1][0]+= stepwidth1;
+		tmpParams(paramnum1,0)+= stepwidth1;
 	}
 }
 

+ 103 - 99
Plotter.h

@@ -28,106 +28,110 @@ Matthias Wacker <matthias.wacker@mti.uni-jena.de>
 #define _PLOTTER_H_
 
 #include "core/optimization/blackbox/SimpleOptProblem.h"
+#include "AdaptiveDirectionRandomSearchOptimizer.h"
 
-
-class Plotter
+namespace OPTIMIZATION
 {
-	public:
-		///
-		/// defaultConstructor
-		///
-		Plotter();
-				
-		///
-		///	Copy constructor
-		///	@param plotter plotter to copy
-		///
-		Plotter(const Plotter &plotter);
-
-        ///
-		/// Assignment operator
-		/// @param pkitter plotter to assign
-		Plotter& operator=(const Plotter &plotter);
-
-
-		///
-		///	Destructor.
-        ///	
-		~Plotter();
-		
-	 
-		///
-		/// Set optimization problem
-		///
-		void setOptProblem(const SimpleOptProblem& optprob);
-
-		
-		///
-		/// Performs a 1d evaluation of the costfunction defined in the optimization problem.
-		/// @param paramnum parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
-		/// @param from lower border of parameter value
-		/// @param to upper border of parameter value
-		/// @param stepwidth sampling rate for costfunction evaluation
-		/// @param path path to the result file
-		///
-		void plot1D(int paramnum, const float from, const float to, const float stepwidth, const char* path);
-		
-		
-		///
-		/// Performs a 2d evaluation of the costfunction defined in the optimization problem.
-		/// @param paramnum1 first parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
-		/// @param from1 lower border of parameter value for parameter 1
-		/// @param to1 upper border of parameter value for parameter 1
-		/// @param stepwidth1 sampling rate for costfunction evaluation of parameter 1
-		/// @param paramnum2 second parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
-		/// @param from2 lower border of parameter value for parameter 2
-		/// @param to2 upper border of parameter value for parameter2
-		/// @param stepwidth2 sampling rate for costfunction evaluation of parameter 2
-		/// @param path path to the result file
-		///
-		void plot2D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, const char* path);
-		
-		
-		///
-		/// Performs a 3d evaluation of the costfunction defined in the optimization problem. Note, that the first to parameters will vary while the third one stays fixed for the generation of one evaluation cycle. Afterwards a new plot file will be generated with a new initialization of parameter 3 and again parameter 1 and 2 will be varied within their ranges. This function will result in a fixed number of plot files giving the behavior of the first two parameters over time for a varied third one. The files will be named given the path and a evaluation number corresponding to the parameter value of parameter 1 will be added to the path name. 
-		/// @param paramnum1 first parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
-		/// @param from1 lower border of parameter value for parameter 1
-		/// @param to1 upper border of parameter value for parameter 1
-		/// @param stepwidth1 sampling rate for costfunction evaluation of parameter 1
-		/// @param paramnum2 second parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
-		/// @param from2 lower border of parameter value for parameter 2
-		/// @param to2 upper border of parameter value for parameter 2
-		/// @param stepwidth2 sampling rate for costfunction evaluation of parameter 2
-		/// @param paramnum3 third parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
-		/// @param from3 lower border of parameter value for parameter 3
-		/// @param to3 upper border of parameter value for parameter 3
-		/// @param stepwidth3 sampling rate for costfunction evaluation of parameter 3
-		/// @param path path to the result file
-		///
-		void plot3D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, int paramnum3, const float from3, const float to3, const float stepwidth3, const char* path);
-
-		///
-		/// Setting the bound interpretation status: if absolute= true, the bounds (from,to) will be interpreted as absolute values and the evaluation goes from from till to; if absolute= false, the bounds will be interpreted relative to the current parameter values, meaning the the evaluation goes from start= actualval + from till end= actualval + to. Default is true.
-		/// @param absolute bound interpretation status
-		///
-		void setBoundInterpretationStatus(bool absolute);
-
-		/// Get the bound interpretation status
-		/// @retval true, if bounds are interpreted as absolute bounds (start= from, end=to)
-		/// @retval false, if bounds are interpreted relative to the actual parameter value (start= actualval + from, end= actualval + to)	
-		///
-		bool getBoundInterpretationStatus();
-
-	protected:
-
-		//! Flag giving the status of the bound interpretation: if true, the bounds (from,to) will be interpreted as absolute values and the evaluation goes from from till to; if false, the bounds will be interpreted relative to the current parameter values, meaning the the evaluation goes from start= actualval + from till end= actualval + to. Default is true.
-		bool m_absoluteBounds;
-
-		//! Costfunction
-		CostFunction* m_pCostfunc;
-		
-		//! Optproblem pointer
-		const SimpleOptProblem* m_pOptprob;
-};
+  
+  class Plotter
+  {
+    public:
+      ///
+      /// defaultConstructor
+      ///
+      Plotter();
+          
+      ///
+      ///	Copy constructor
+      ///	@param plotter plotter to copy
+      ///
+      Plotter(const Plotter &plotter);
+
+          ///
+      /// Assignment operator
+      /// @param pkitter plotter to assign
+      Plotter& operator=(const Plotter &plotter);
+
+
+      ///
+      ///	Destructor.
+          ///	
+      ~Plotter();
+      
+    
+      ///
+      /// Set optimization problem
+      ///
+      void setOptProblem(const SimpleOptProblem& optprob);
+
+      
+      ///
+      /// Performs a 1d evaluation of the costfunction defined in the optimization problem.
+      /// @param paramnum parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+      /// @param from lower border of parameter value
+      /// @param to upper border of parameter value
+      /// @param stepwidth sampling rate for costfunction evaluation
+      /// @param path path to the result file
+      ///
+      void plot1D(int paramnum, const float from, const float to, const float stepwidth, const char* path);
+      
+      
+      ///
+      /// Performs a 2d evaluation of the costfunction defined in the optimization problem.
+      /// @param paramnum1 first parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+      /// @param from1 lower border of parameter value for parameter 1
+      /// @param to1 upper border of parameter value for parameter 1
+      /// @param stepwidth1 sampling rate for costfunction evaluation of parameter 1
+      /// @param paramnum2 second parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+      /// @param from2 lower border of parameter value for parameter 2
+      /// @param to2 upper border of parameter value for parameter2
+      /// @param stepwidth2 sampling rate for costfunction evaluation of parameter 2
+      /// @param path path to the result file
+      ///
+      void plot2D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, const char* path);
+      
+      
+      ///
+      /// Performs a 3d evaluation of the costfunction defined in the optimization problem. Note, that the first to parameters will vary while the third one stays fixed for the generation of one evaluation cycle. Afterwards a new plot file will be generated with a new initialization of parameter 3 and again parameter 1 and 2 will be varied within their ranges. This function will result in a fixed number of plot files giving the behavior of the first two parameters over time for a varied third one. The files will be named given the path and a evaluation number corresponding to the parameter value of parameter 1 will be added to the path name. 
+      /// @param paramnum1 first parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+      /// @param from1 lower border of parameter value for parameter 1
+      /// @param to1 upper border of parameter value for parameter 1
+      /// @param stepwidth1 sampling rate for costfunction evaluation of parameter 1
+      /// @param paramnum2 second parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+      /// @param from2 lower border of parameter value for parameter 2
+      /// @param to2 upper border of parameter value for parameter 2
+      /// @param stepwidth2 sampling rate for costfunction evaluation of parameter 2
+      /// @param paramnum3 third parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+      /// @param from3 lower border of parameter value for parameter 3
+      /// @param to3 upper border of parameter value for parameter 3
+      /// @param stepwidth3 sampling rate for costfunction evaluation of parameter 3
+      /// @param path path to the result file
+      ///
+      void plot3D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, int paramnum3, const float from3, const float to3, const float stepwidth3, const char* path);
+
+      ///
+      /// Setting the bound interpretation status: if absolute= true, the bounds (from,to) will be interpreted as absolute values and the evaluation goes from from till to; if absolute= false, the bounds will be interpreted relative to the current parameter values, meaning the the evaluation goes from start= actualval + from till end= actualval + to. Default is true.
+      /// @param absolute bound interpretation status
+      ///
+      void setBoundInterpretationStatus(bool absolute);
+
+      /// Get the bound interpretation status
+      /// @retval true, if bounds are interpreted as absolute bounds (start= from, end=to)
+      /// @retval false, if bounds are interpreted relative to the actual parameter value (start= actualval + from, end= actualval + to)	
+      ///
+      bool getBoundInterpretationStatus();
+
+    protected:
+
+      //! Flag giving the status of the bound interpretation: if true, the bounds (from,to) will be interpreted as absolute values and the evaluation goes from from till to; if false, the bounds will be interpreted relative to the current parameter values, meaning the the evaluation goes from start= actualval + from till end= actualval + to. Default is true.
+      bool m_absoluteBounds;
+
+      //! Costfunction
+      CostFunction* m_pCostfunc;
+      
+      //! Optproblem pointer
+      const SimpleOptProblem* m_pOptprob;
+  }; //class
+} //namespace
 
 #endif

+ 296 - 295
PowellBrentOptimizer.cpp

@@ -10,60 +10,61 @@
 //////////////////////////////////////////////////////////////////////
 
 #include "optimization/PowellBrentOptimizer.h"
-#include "optimization/Opt_Namespace.h"
+#include "core/optimization/blackbox/Definitions_core_opt.h"
 //#include <iostream>
 
+using namespace OPTIMIZATION;
 
 PowellBrentOptimizer::PowellBrentOptimizer(OptLogBase *loger) : SuperClass(loger)
 {
-	//xi = matrix_type(m_numberOfParameters, m_numberOfParameters);
-	//m_lin = BrentLineSearcher(m_costFunction,loger);
-	m_lin_Lower		= -5.0;
-	m_lin_Upper		= 5.0;
-	m_lin_Eps		= 1.0e-3;//1.0e-2;
+  //xi = matrix_type(m_numberOfParameters, m_numberOfParameters);
+  //m_lin = BrentLineSearcher(m_costFunction,loger);
+  m_lin_Lower		= -5.0;
+  m_lin_Upper		= 5.0;
+  m_lin_Eps		= 1.0e-3;//1.0e-2;
 }
 
 
 PowellBrentOptimizer::PowellBrentOptimizer( const PowellBrentOptimizer &opt) : SuperClass(opt)
 {
-	m_lin= opt.m_lin;
-	xi= opt.xi;
-	m_lin_Lower= opt.m_lin_Lower;
-	m_lin_Upper= opt.m_lin_Upper;
-	m_lin_Eps= opt.m_lin_Eps;
-	
+  m_lin= opt.m_lin;
+  xi= opt.xi;
+  m_lin_Lower= opt.m_lin_Lower;
+  m_lin_Upper= opt.m_lin_Upper;
+  m_lin_Eps= opt.m_lin_Eps;
+  
 }
 
 /*
-	operator=
+  operator=
 */
 PowellBrentOptimizer & PowellBrentOptimizer::operator=(const PowellBrentOptimizer &opt)
 {
-		
-	/*
-			avoid self-copy
-	*/
-	if(this != &opt)
-	{
-		
-				
-		/*
-			=operator of SuperClass
-		*/
-		SuperClass::operator=(opt);
-
-			
-		/*
-			own values:
-		*/
-		m_lin= opt.m_lin;
-		xi= opt.xi;
-		m_lin_Lower= opt.m_lin_Lower;
-		m_lin_Upper= opt.m_lin_Upper;
-		m_lin_Eps= opt.m_lin_Eps;
-		
-	}
-  	return *this;
+    
+  /*
+      avoid self-copy
+  */
+  if(this != &opt)
+  {
+    
+        
+    /*
+      =operator of SuperClass
+    */
+    SuperClass::operator=(opt);
+
+      
+    /*
+      own values:
+    */
+    m_lin= opt.m_lin;
+    xi= opt.xi;
+    m_lin_Lower= opt.m_lin_Lower;
+    m_lin_Upper= opt.m_lin_Upper;
+    m_lin_Eps= opt.m_lin_Eps;
+    
+  }
+    return *this;
 }
 
 
@@ -74,277 +75,277 @@ PowellBrentOptimizer::~PowellBrentOptimizer()
 
 void PowellBrentOptimizer::init()
 {
-	SuperClass::init();
+  SuperClass::init();
 
-	//xi =0.0;
-	xi = matrix_type(m_numberOfParameters, m_numberOfParameters);
-	m_lin = BrentLineSearcher(m_costFunction,m_loger);
+  //xi =0.0;
+  xi = matrix_type(m_numberOfParameters, m_numberOfParameters);
+  m_lin = BrentLineSearcher(m_costFunction,m_loger);
 
 
-	for(uint i = 0; i < m_numberOfParameters;i++)
-	{
-		for(uint j = 0; j < m_numberOfParameters; j++)
-		{
-			if(i == j)
-			{
-				//xi(i,j) = 1.0;
-				xi[i][j] = 1.0;
-			}
-		}
-	}
+  for(uint i = 0; i < m_numberOfParameters;i++)
+  {
+    for(uint j = 0; j < m_numberOfParameters; j++)
+    {
+      if(i == j)
+      {
+        //xi(i,j) = 1.0;
+        xi(i,j) = 1.0;
+      }
+    }
+  }
 
-	if(m_funcTolActive)
-		m_lin.setEps(m_funcTol);
+  if(m_funcTolActive)
+    m_lin.setEps(m_funcTol);
 
-	if(m_maximize == true)
-		m_lin.setMaximize(true);
+  if(m_maximize == true)
+    m_lin.setMaximize(true);
 
-	//if(m_verbose)
-	//	m_lin.setVerbose(true);
+  //if(m_verbose)
+  //	m_lin.setVerbose(true);
 }
 
 
 void PowellBrentOptimizer::setLineSearchOptions(double lower, double upper, double eps)
 {
-	m_lin_Lower= lower;
-	m_lin_Upper= upper;
-	m_lin_Eps= eps;
-	m_lin.setBounds(lower, upper);
-	m_lin.setEps(eps);
+  m_lin_Lower= lower;
+  m_lin_Upper= upper;
+  m_lin_Eps= eps;
+  m_lin.setBounds(lower, upper);
+  m_lin.setEps(eps);
 }
 
 
 int PowellBrentOptimizer::optimize()
 {
-	this->init();
-	/*
-		start time criteria
-	*/
-	m_startTime = clock();
-
-	int n = m_numberOfParameters;
-	
-	double ftol = m_funcTol;
-	double TINY = 1.0e-25;
-	
-	bool abort = false;
-	
-	int i,ibig, j;
-	double del, fp, fptt, t;
-	
-	matrix_type pt(m_numberOfParameters,1);
-	matrix_type ptt(m_numberOfParameters,1);
-	matrix_type xit(m_numberOfParameters,1);
-	
-	/*
-		eval at parameters
-	*/
-	double fret = evaluateCostFunction(m_parameters);
-	
-	/*
-		save the initial point
-	*/
-	pt = m_parameters;
-
-	if(m_verbose)
-	{
-		for(uint r = 0; r < m_numberOfParameters; r++)
-		{
-			std::cout<< m_parameters[r][0] << " ";
-		}
-		
-		std::cout<< fret << std::endl;
-	}
-	
-	while(abort == false)
-	{
-		/*
-			Check iteration limit
-		*/
-		if(m_maxNumIterActive)
-		{
-			if(m_numIter >= m_maxNumIter)
-			{
-				/* set according return status and return */
-				m_returnReason = SUCCESS_MAXITER;
-				abort = true;
-				continue;
-			}
-		}
-		 /*
-		  increase iteration counter
-		 */
-		++m_numIter;
-		
-		fp = fret;
-		ibig = 0;
-		del = 0.0;
-		
-		for(i = 0; i < n;i++)
-		{
-			for(j = 0; j<n;j++)
-			{
-				//xit(j,0) = xi(j,i) * m_scales(j,0);
-				xit[j][0] = xi[j][i] * m_scales[j][0];
-			}
-			fptt=fret;
-			
-			/* do a one dimensional line search */
-			m_lin.setBounds(m_lin_Lower ,m_lin_Upper);
-			m_lin.setEps(m_lin_Eps);
-			m_lin.setX0(m_parameters);
-			m_lin.setH0(xit);
-			double lambda = m_lin.optimize();
-			
-			fret = m_lin.evaluateSub(lambda);
-			xit = xit * lambda;
-			
-			// if value improved, accept new point
-			if(fret < fptt )
-			{
-				m_parameters= m_parameters + xit;
-			}
-			
-			if(m_verbose)
-			{
-				for(uint r = 0; r < m_numberOfParameters; r++)
-				{
-					std::cout<< m_parameters[r][0] << " ";
-				}
-				
-				std::cout <<fret<< std::endl;
-			}
-			
-			
-			if(fptt - fret > del)
-			{
-				del = fptt - fret;
-				ibig = i;
-			}
-		}
-	
-		// check for improvement -> if there is none, abort iteration!
-		if(fret >= fp)
-		{
-			m_returnReason=SUCCESS_NO_BETTER_POINT; 
-			abort=true;
-			continue;
-		}
-
-
-
-		if(m_funcTolActive)
-		{
-			/*
-				a recommended abort criteria
-			*/
-			if(2.0 * (fp - fret ) <= ftol *(fabs(fp) + fabs(fret) ) + TINY)
-			{
-				m_returnReason = SUCCESS_FUNCTOL;
-				abort = true;
-				continue;
-			}
-		}
-
-
-		/*
-			construct extrapolated point and the average direction moved
-			save old starting point
-		*/
-		for (j=0; j<n;j++)
-		{
-			//ptt(j,0)= 2.0*m_parameters(j,0)-pt(j,0);
-			//xit(j,0)= m_parameters(j,0)-pt(j,0);
-			//pt(j,0) = m_parameters(j,0);
-			ptt[j][0]= 2.0*m_parameters[j][0]-pt[j][0];
-			xit[j][0]= m_parameters[j][0]-pt[j][0];
-			pt[j][0] = m_parameters[j][0];
-		}
-
-		fptt = evaluateCostFunction(ptt);
-
-		if(fptt < fp)
-		{
-			t = 2.0*(fp-2.0*fret+fptt)*sqrt(fp-fret-del) - del *sqrt(fp-fptt);
-			if(t < 0.0)
-			{
-				/* 
-					move to minimum of new 
-					direction and save the new direction
-				*/
-				m_lin.setBounds(m_lin_Lower,m_lin_Upper);
-				m_lin.setX0(m_parameters);
-				m_lin.setH0(xit);
-
-				double lambda = m_lin.optimize();
-			
-				fret = m_lin.evaluateSub(lambda);
-				xit= xit*lambda;
-				m_parameters= m_parameters+ xit;
-				
-				// normalize direction
-				xit = xit * (1.0 / xit.Norm(0));  
-				for(j=0; j < n ;j++)
-				{
-					xi[j][ibig] = xi[j][n-1];
-					xi[j][n-1]= xit[j][0];
-				}
-			}
-		}
-
-
-		
-		/*
-			Check Optimization Timilimit, if active
-		*/
-		if(m_maxSecondsActive)
-		{
-			m_currentTime = clock();
-		
-			/* time limit exceeded ? */
-			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
-			{
-				if(m_verbose == true)
-				{
-					std::cout << "# aborting because of Time Limit" << std::endl;
-				}
-
-				/* set according return status and the last parameters and return */
-				m_returnReason = SUCCESS_TIMELIMIT;
-				abort = true;
-				continue;
-			}
-		}
-
-		/*
-			Check Bounds if Active
-		*/
-		if(!checkParameters(m_parameters))
-		{
-			if(m_verbose == true)
-			{
-				std::cout << "PowellBrent :: aborting because of parameter Bounds" << std::endl;
-			}
-
-			/* set according return status and the last parameters and return */
-			m_returnReason = ERROR_XOUTOFBOUNDS;
-			abort = true;
-			continue;
-		}
-
-		/*if(m_verbose)
-		{
-			for(int r = 0; r < m_numberOfParameters; r++)
-			{
-				std::cout<< m_parameters(r,0) << " ";
-			}
-			
-			std::cout << std::endl;
-		}*/
-		
-	}
-	
-	
-	return m_returnReason;
-	
+  this->init();
+  /*
+    start time criteria
+  */
+  m_startTime = clock();
+
+  int n = m_numberOfParameters;
+  
+  double ftol = m_funcTol;
+  double TINY = 1.0e-25;
+  
+  bool abort = false;
+  
+  int i,ibig, j;
+  double del, fp, fptt, t;
+  
+  matrix_type pt(m_numberOfParameters,1);
+  matrix_type ptt(m_numberOfParameters,1);
+  matrix_type xit(m_numberOfParameters,1);
+  
+  /*
+    eval at parameters
+  */
+  double fret = evaluateCostFunction(m_parameters);
+  
+  /*
+    save the initial point
+  */
+  pt = m_parameters;
+
+  if(m_verbose)
+  {
+    for(uint r = 0; r < m_numberOfParameters; r++)
+    {
+      std::cout<< m_parameters(r,0) << " ";
+    }
+    
+    std::cout<< fret << std::endl;
+  }
+  
+  while(abort == false)
+  {
+    /*
+      Check iteration limit
+    */
+    if(m_maxNumIterActive)
+    {
+      if(m_numIter >= m_maxNumIter)
+      {
+        /* set according return status and return */
+        m_returnReason = SUCCESS_MAXITER;
+        abort = true;
+        continue;
+      }
+    }
+    /*
+      increase iteration counter
+    */
+    ++m_numIter;
+    
+    fp = fret;
+    ibig = 0;
+    del = 0.0;
+    
+    for(i = 0; i < n;i++)
+    {
+      for(j = 0; j<n;j++)
+      {
+        //xit(j,0) = xi(j,i) * m_scales(j,0);
+        xit(j,0) = xi(j,i) * m_scales(j,0);
+      }
+      fptt=fret;
+      
+      /* do a one dimensional line search */
+      m_lin.setBounds(m_lin_Lower ,m_lin_Upper);
+      m_lin.setEps(m_lin_Eps);
+      m_lin.setX0(m_parameters);
+      m_lin.setH0(xit);
+      double lambda = m_lin.optimize();
+      
+      fret = m_lin.evaluateSub(lambda);
+      xit = xit * lambda;
+      
+      // if value improved, accept new point
+      if(fret < fptt )
+      {
+        m_parameters= m_parameters + xit;
+      }
+      
+      if(m_verbose)
+      {
+        for(uint r = 0; r < m_numberOfParameters; r++)
+        {
+          std::cout<< m_parameters(r,0) << " ";
+        }
+        
+        std::cout <<fret<< std::endl;
+      }
+      
+      
+      if(fptt - fret > del)
+      {
+        del = fptt - fret;
+        ibig = i;
+      }
+    }
+  
+    // check for improvement -> if there is none, abort iteration!
+    if(fret >= fp)
+    {
+      m_returnReason=SUCCESS_NO_BETTER_POINT; 
+      abort=true;
+      continue;
+    }
+
+
+
+    if(m_funcTolActive)
+    {
+      /*
+        a recommended abort criteria
+      */
+      if(2.0 * (fp - fret ) <= ftol *(fabs(fp) + fabs(fret) ) + TINY)
+      {
+        m_returnReason = SUCCESS_FUNCTOL;
+        abort = true;
+        continue;
+      }
+    }
+
+
+    /*
+      construct extrapolated point and the average direction moved
+      save old starting point
+    */
+    for (j=0; j<n;j++)
+    {
+      //ptt(j,0)= 2.0*m_parameters(j,0)-pt(j,0);
+      //xit(j,0)= m_parameters(j,0)-pt(j,0);
+      //pt(j,0) = m_parameters(j,0);
+      ptt(j,0)= 2.0*m_parameters(j,0)-pt(j,0);
+      xit(j,0)= m_parameters(j,0)-pt(j,0);
+      pt(j,0) = m_parameters(j,0);
+    }
+
+    fptt = evaluateCostFunction(ptt);
+
+    if(fptt < fp)
+    {
+      t = 2.0*(fp-2.0*fret+fptt)*sqrt(fp-fret-del) - del *sqrt(fp-fptt);
+      if(t < 0.0)
+      {
+        /* 
+          move to minimum of new 
+          direction and save the new direction
+        */
+        m_lin.setBounds(m_lin_Lower,m_lin_Upper);
+        m_lin.setX0(m_parameters);
+        m_lin.setH0(xit);
+
+        double lambda = m_lin.optimize();
+      
+        fret = m_lin.evaluateSub(lambda);
+        xit= xit*lambda;
+        m_parameters= m_parameters+ xit;
+        
+        // normalize direction
+        xit = xit * (1.0 / xit.frobeniusNorm());  
+        for(j=0; j < n ;j++)
+        {
+          xi(j,ibig) = xi(j,n-1);
+          xi(j,n-1)= xit(j,0);
+        }
+      }
+    }
+
+
+    
+    /*
+      Check Optimization Timilimit, if active
+    */
+    if(m_maxSecondsActive)
+    {
+      m_currentTime = clock();
+    
+      /* time limit exceeded ? */
+      if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+      {
+        if(m_verbose == true)
+        {
+          std::cout << "# aborting because of Time Limit" << std::endl;
+        }
+
+        /* set according return status and the last parameters and return */
+        m_returnReason = SUCCESS_TIMELIMIT;
+        abort = true;
+        continue;
+      }
+    }
+
+    /*
+      Check Bounds if Active
+    */
+    if(!checkParameters(m_parameters))
+    {
+      if(m_verbose == true)
+      {
+        std::cout << "PowellBrent :: aborting because of parameter Bounds" << std::endl;
+      }
+
+      /* set according return status and the last parameters and return */
+      m_returnReason = ERROR_XOUTOFBOUNDS;
+      abort = true;
+      continue;
+    }
+
+    /*if(m_verbose)
+    {
+      for(int r = 0; r < m_numberOfParameters; r++)
+      {
+        std::cout<< m_parameters(r,0) << " ";
+      }
+      
+      std::cout << std::endl;
+    }*/
+    
+  }
+  
+  
+  return m_returnReason;
+  
 }

+ 101 - 97
PowellBrentOptimizer.h

@@ -16,103 +16,107 @@
 #include "core/optimization/blackbox/SimpleOptimizer.h"
 #include "optimization/BrentLineSearcher.h"
 
-/*!
-	class for the PowellBrentOptimizer
-
-   HowToUse:
-
-	  * use setParameters() to set the start point
-	  * you may use m_scales to regulate the inner search interval 
-		(set it to a vector with 1/x as elements to divide it by x);
-	  * call init()
-	  * call optimize()
-
-
- 	Implemented Abort criteria:
-		
-	  * maximum number of iterations
-	  *	time limit
-	  * parameter bounds
-	  * function value tolerance
-		  
-	Additional return reason:
-
- */
-class PowellBrentOptimizer : public SimpleOptimizer
+namespace OPTIMIZATION
 {
-	public:
-
-		typedef  SimpleOptimizer SuperClass;
-		typedef SuperClass::matrix_type matrix_type;
-
-		/*!
-			Constructor.
-			\param loger : OptLogBase * to existing log class
-		*/
-		PowellBrentOptimizer(OptLogBase *loger=NULL);
-
-		/*!
-			Copy constructor
-			\param opt .. optimizer to copy
-		*/
-		PowellBrentOptimizer( const PowellBrentOptimizer &opt);
-
-		/*!
-			operator =
-		*/
-		PowellBrentOptimizer & operator=(const PowellBrentOptimizer &opt);
-
-	        /*!
-				Destructor.
-	         */
-	        ~PowellBrentOptimizer();
-
-		///
-		///	enumeration for the return reasons of an optimizer,
-		///	has all elements of the SuperClass optimizer
-		///
-		enum {	SUCCESS_NO_BETTER_POINT = _to_continue_,
-				_to_continue_
-		};
-
-	
-		/*!
-			\brief Do internal initializations
-		*/
-		void init();
-		
-		/*!
-			\brief start the optmization
-		*/
-		int optimize();
-
-
-		/*!
-			\brief set options for the internal line searcher
-		
-		*/
-		void setLineSearchOptions(double lower, double upper, double eps);
-		
-
-	private:
-
-		/*!
-			the direction set
-		*/
-		matrix_type xi;
-
-		/*!
-			the brent line searcher
-		*/
-		BrentLineSearcher m_lin;
-
-		/*!
-			parameter brent linesseracher	
-		*/
-		double m_lin_Lower;
-		double m_lin_Upper;
-		double m_lin_Eps;
-	
-};
+
+  /*!
+    class for the PowellBrentOptimizer
+
+    HowToUse:
+
+      * use setParameters() to set the start point
+      * you may use m_scales to regulate the inner search interval 
+      (set it to a vector with 1/x as elements to divide it by x);
+      * call init()
+      * call optimize()
+
+
+    Implemented Abort criteria:
+      
+      * maximum number of iterations
+      *	time limit
+      * parameter bounds
+      * function value tolerance
+        
+    Additional return reason:
+
+  */
+  class PowellBrentOptimizer : public SimpleOptimizer
+  {
+    public:
+
+      typedef  SimpleOptimizer SuperClass;
+      typedef SuperClass::matrix_type matrix_type;
+
+      /*!
+        Constructor.
+        \param loger : OptLogBase * to existing log class
+      */
+      PowellBrentOptimizer(OptLogBase *loger=NULL);
+
+      /*!
+        Copy constructor
+        \param opt .. optimizer to copy
+      */
+      PowellBrentOptimizer( const PowellBrentOptimizer &opt);
+
+      /*!
+        operator =
+      */
+      PowellBrentOptimizer & operator=(const PowellBrentOptimizer &opt);
+
+            /*!
+          Destructor.
+            */
+            ~PowellBrentOptimizer();
+
+      ///
+      ///	enumeration for the return reasons of an optimizer,
+      ///	has all elements of the SuperClass optimizer
+      ///
+      enum {	SUCCESS_NO_BETTER_POINT = _to_continue_,
+          _to_continue_
+      };
+
+    
+      /*!
+        \brief Do internal initializations
+      */
+      void init();
+      
+      /*!
+        \brief start the optmization
+      */
+      int optimize();
+
+
+      /*!
+        \brief set options for the internal line searcher
+      
+      */
+      void setLineSearchOptions(double lower, double upper, double eps);
+      
+
+    private:
+
+      /*!
+        the direction set
+      */
+      matrix_type xi;
+
+      /*!
+        the brent line searcher
+      */
+      BrentLineSearcher m_lin;
+
+      /*!
+        parameter brent linesseracher	
+      */
+      double m_lin_Lower;
+      double m_lin_Upper;
+      double m_lin_Eps;
+    
+  }; //class
+}//namespace
 
 #endif

+ 46 - 45
SimpleOptTestGrid.cpp

@@ -5,11 +5,12 @@
 #include "SimpleOptTestGrid.h"
 
 using namespace std;
-using namespace opt;
+using namespace OPTIMIZATION;
+
 SimpleOptTestGrid::SimpleOptTestGrid()
 {
-	m_iNumberOfOptimizers = 0;
-	m_iNumberOfProblems = 0;
+  m_iNumberOfOptimizers = 0;
+  m_iNumberOfProblems = 0;
 }
 
 
@@ -18,67 +19,67 @@ SimpleOptTestGrid::~SimpleOptTestGrid()
 }
 
 
-void SimpleOptTestGrid::addSimpleOptProblem(const SimpleOptProblem & optProb, const opt::matrix_type &solution, double successDist)
+void SimpleOptTestGrid::addSimpleOptProblem(const SimpleOptProblem & optProb, const OPTIMIZATION::matrix_type &solution, double successDist)
 {
-	m_vSimpleOptProblem.push_back(optProb);
-	m_vSolutions.push_back(solution);
-	m_vSuccessDistances.push_back(successDist);
+  m_vSimpleOptProblem.push_back(optProb);
+  m_vSolutions.push_back(solution);
+  m_vSuccessDistances.push_back(successDist);
 
-	m_iNumberOfProblems = m_vSimpleOptProblem.size();
+  m_iNumberOfProblems = m_vSimpleOptProblem.size();
 }
 
 
 void SimpleOptTestGrid::addSimpleOptimizer(SimpleOptimizer *opt)
 {
-	m_vSimpleOptimzers.push_back(opt);
-	m_iNumberOfOptimizers = m_vSimpleOptimzers.size();
+  m_vSimpleOptimzers.push_back(opt);
+  m_iNumberOfOptimizers = m_vSimpleOptimzers.size();
 }
 
 
 bool SimpleOptTestGrid::test()
 {
-	bool success = true;
-
-	// for every optimizer
-	for(int optIdx=0; optIdx < m_iNumberOfOptimizers; ++optIdx)
-	{
-		// run every test
-		for(int probIdx=0; probIdx < m_iNumberOfProblems; ++probIdx)
-		{
-			bool tmpResult = test(optIdx,probIdx);
-			cout << "SOTG: single test with opt: " << optIdx << " and prob: " << probIdx;
-			if(tmpResult == true)
-			{
-				cout << " SUCCEEDED"<<endl; 
-			}
-			else
-			{
-				cout << " FAILED"<<endl;
-			}
-
-		}
-	}
-
-	return success;
+  bool success = true;
+
+  // for every optimizer
+  for(int optIdx=0; optIdx < m_iNumberOfOptimizers; ++optIdx)
+  {
+    // run every test
+    for(int probIdx=0; probIdx < m_iNumberOfProblems; ++probIdx)
+    {
+      bool tmpResult = test(optIdx,probIdx);
+      cout << "SOTG: single test with opt: " << optIdx << " and prob: " << probIdx;
+      if(tmpResult == true)
+      {
+        cout << " SUCCEEDED"<<endl; 
+      }
+      else
+      {
+        cout << " FAILED"<<endl;
+      }
+
+    }
+  }
+
+  return success;
 }
 
 
 
 bool SimpleOptTestGrid::test(int optIdx, int probIdx)
 {
-	// get a copy of the prob
-	SimpleOptProblem optProb = m_vSimpleOptProblem[probIdx];
-	matrix_type init = optProb.getAllCurrentParams();
-	
-	// start Optimization
-	m_vSimpleOptimzers[optIdx]->optimizeProb(optProb);
+  // get a copy of the prob
+  SimpleOptProblem optProb = m_vSimpleOptProblem[probIdx];
+  matrix_type init = optProb.getAllCurrentParams();
+  
+  // start Optimization
+  m_vSimpleOptimzers[optIdx]->optimizeProb(optProb);
 
-	// check for success
-	matrix_type result = optProb.getAllCurrentParams();
-	double distres = ( result - m_vSolutions[probIdx] ).Norm(0);
-	double distinit = ( init - m_vSolutions[probIdx] ).Norm(0);
+  // check for success
+  matrix_type result = optProb.getAllCurrentParams();
+  double distres = ( result - m_vSolutions[probIdx] ).frobeniusNorm();
+  double distinit = ( init - m_vSolutions[probIdx] ).frobeniusNorm();
 
-	cout << "resDist: "<< distres << " from initial: "<< distinit << endl;
+  cout << "resDist: "<< distres << " from initial: "<< distinit << endl;
 
-	return distres < m_vSuccessDistances[probIdx];
+  return distres < m_vSuccessDistances[probIdx];
 }

+ 83 - 80
SimpleOptTestGrid.h

@@ -10,88 +10,91 @@
 #define _SIMPLEOPTTESTGRID_H_
 
 #include "Opt_Namespace.h"
-#include "SimpleOptimizer.h"
-#include "SimpleOptProblem.h"
+#include "core/optimization/blackbox/SimpleOptimizer.h"
+#include "core/optimization/blackbox/SimpleOptProblem.h"
 
 #include <vector>
 
-namespace opt=optimization;
-
-///
-/// @class SimpleOptTestGrid defines a class to test simpleOptimizer(s) with SimpleOptProblem(s)
-///
-/// gets a number of ploblems and a number of optimizers and tries to solve every problem with each optimizer
-///
-class SimpleOptTestGrid
+namespace OPTIMIZATION
 {
-public:
-	///
-	/// Default Constructor
-	///
-	SimpleOptTestGrid();
-
-	///
-	/// Default Destructor
-	///
-	~SimpleOptTestGrid();
-
-	///
-	/// adds a SimpleOptProb to the optimizer
-	///
-	///	@param optProb the optProb to test (is copied in this routine -> only works with deep copy implemented copy constructors!)
-	///	@param solution the solution to the problem
-	///	@param successDist the maximum distance to the solution that is considered as success
-	///
-	void addSimpleOptProblem( const SimpleOptProblem & optProb, const opt::matrix_type &solution, double successDist);
-
-	///
-	/// adds a SimpleOptimizer
-	///
-	///	@param opt the optimizer (already configured!!)
-	///
-	void addSimpleOptimizer(SimpleOptimizer *opt);
-
-
-	///
-	/// runs the tests on the test grid
-	///
-	/// 	@return true in case of a full success, false in case of at least 1 failed test case
-	///
-	bool test();
-
-	///
-	/// runs a specific test 
-	///	
-	///	@param optIdx index of the optimizer (starting with 0)
-	///	@param probIdx index of the problem (starting with 0)
-	///
-	bool test(int optIdx, int probIdx);
-
-
-	///
-	///	get optimizers
-	///
-	std::vector<SimpleOptimizer*> getOptimizers() const {return m_vSimpleOptimzers;};
-	
-private:
-	/// the optimizers
-	std::vector<SimpleOptimizer*> m_vSimpleOptimzers;
-	
-	/// number of optimizers
-	int m_iNumberOfOptimizers;
-
-	/// the problems
-	std::vector<SimpleOptProblem> m_vSimpleOptProblem;
-
-	/// the solutions
-	std::vector<opt::matrix_type> m_vSolutions;
-
-	/// the success distances
-	std::vector<double> m_vSuccessDistances;
-
-	/// number of Problems
-	int m_iNumberOfProblems;
-
-
-};
+
+
+  ///
+  /// @class SimpleOptTestGrid defines a class to test simpleOptimizer(s) with SimpleOptProblem(s)
+  ///
+  /// gets a number of ploblems and a number of optimizers and tries to solve every problem with each optimizer
+  ///
+  class SimpleOptTestGrid
+  {
+  public:
+    ///
+    /// Default Constructor
+    ///
+    SimpleOptTestGrid();
+
+    ///
+    /// Default Destructor
+    ///
+    ~SimpleOptTestGrid();
+
+    ///
+    /// adds a SimpleOptProb to the optimizer
+    ///
+    ///	@param optProb the optProb to test (is copied in this routine -> only works with deep copy implemented copy constructors!)
+    ///	@param solution the solution to the problem
+    ///	@param successDist the maximum distance to the solution that is considered as success
+    ///
+    void addSimpleOptProblem( const SimpleOptProblem & optProb, const OPTIMIZATION::matrix_type &solution, double successDist);
+
+    ///
+    /// adds a SimpleOptimizer
+    ///
+    ///	@param opt the optimizer (already configured!!)
+    ///
+    void addSimpleOptimizer(SimpleOptimizer *opt);
+
+
+    ///
+    /// runs the tests on the test grid
+    ///
+    /// 	@return true in case of a full success, false in case of at least 1 failed test case
+    ///
+    bool test();
+
+    ///
+    /// runs a specific test 
+    ///	
+    ///	@param optIdx index of the optimizer (starting with 0)
+    ///	@param probIdx index of the problem (starting with 0)
+    ///
+    bool test(int optIdx, int probIdx);
+
+
+    ///
+    ///	get optimizers
+    ///
+    std::vector<SimpleOptimizer*> getOptimizers() const {return m_vSimpleOptimzers;};
+    
+  private:
+    /// the optimizers
+    std::vector<SimpleOptimizer*> m_vSimpleOptimzers;
+    
+    /// number of optimizers
+    int m_iNumberOfOptimizers;
+
+    /// the problems
+    std::vector<SimpleOptProblem> m_vSimpleOptProblem;
+
+    /// the solutions
+    std::vector<OPTIMIZATION::matrix_type> m_vSolutions;
+
+    /// the success distances
+    std::vector<double> m_vSuccessDistances;
+
+    /// number of Problems
+    int m_iNumberOfProblems;
+
+
+  }; //class
+}//namespace
 #endif // _SIMPLEOPTTESTGRID_H_

+ 1 - 0
libdepend.inc

@@ -1,3 +1,4 @@
+$(call PKG_DEPEND_INT,core/optimization)
 $(call PKG_DESCRIPTION,Optimization toolbox)
 $(call PKG_VERSION,0.0.1)
 $(call PKG_DEPEND_EXT_ESSENTIAL,ICE)

+ 7 - 5
tests/MyCostFunction.cpp

@@ -1,6 +1,8 @@
 #include "MyCostFunction.h"
 
-double MyCostFunction::evaluate(const optimization::matrix_type & x)
+using namespace OPTIMIZATION;
+
+double MyCostFunction::evaluate(const OPTIMIZATION::matrix_type & x)
 {
     double f;
 
@@ -9,10 +11,10 @@ double MyCostFunction::evaluate(const optimization::matrix_type & x)
     if ( x.rows() == 1 )
     {
         if (m_bVerbose)
-            std::cerr << "current position: " << x[0][0] << std::endl;
+            std::cerr << "current position: " << x(0,0) << std::endl;
 
         //our cost function is f(x) = (x-5)^2
-        f = pow(x[0][0] - 4.2, 2.0);
+        f = pow(x(0,0) - 4.2, 2.0);
 
         if (m_bVerbose)
             std::cerr << "function value: " << f << std::endl;
@@ -21,10 +23,10 @@ double MyCostFunction::evaluate(const optimization::matrix_type & x)
     //two-dimensional data
     else {
         if (m_bVerbose)
-            std::cerr << "current position: " << x[0][0] << " " << x[1][0] << std::endl;
+            std::cerr << "current position: " << x(0,0) << " " << x(1,0) << std::endl;
 
         //our cost function is f(x,y) = (x-4.7)^2 + (y-1.1)^2
-        f = pow(x[0][0] - 4.7, 2.0) + pow( x[1][0] - 1.1, 2.0 );
+        f = pow(x(0,0) - 4.7, 2.0) + pow( x(1,0) - 1.1, 2.0 );
 
         if (m_bVerbose)
             std::cerr << "function value: " << f << std::endl;

+ 13 - 9
tests/MyCostFunction.h

@@ -4,19 +4,23 @@
 //#include <cppunit/extensions/HelperMacros.h>
 #include "core/optimization/blackbox/CostFunction.h"
 
-//define a simple cost function for one-dimensional or two-dimensional data
-class MyCostFunction : public CostFunction
+namespace OPTIMIZATION
 {
-public:
 
-    MyCostFunction(const int & dim, bool verbose) : CostFunction(dim), m_bVerbose(verbose)
-    {}
+  //define a simple cost function for one-dimensional or two-dimensional data
+  class MyCostFunction : public CostFunction
+  {
+  public:
 
-    virtual double evaluate(const optimization::matrix_type & x);
+      MyCostFunction(const int & dim, bool verbose) : CostFunction(dim), m_bVerbose(verbose)
+      {}
 
-private:
-    bool m_bVerbose;
+      virtual double evaluate(const OPTIMIZATION::matrix_type & x);
 
-}; 
+  private:
+      bool m_bVerbose;
+
+  }; //class
+}//namespace
 
 #endif

+ 22 - 21
tests/TestGradientDescent.cpp

@@ -9,6 +9,7 @@
 #include "MyCostFunction.h"
 
 using namespace std;
+using namespace OPTIMIZATION;
 
 const bool verboseStartEnd = true;
 const bool verbose = true;
@@ -35,12 +36,12 @@ void TestGradientDescent::testGD_1Dim ()
   CostFunction *func = new MyCostFunction(dim, verbose);
    
   //initial guess: 2.0
-  optimization::matrix_type initialParams (dim, 1);
-  initialParams.Set(2.0);
+  OPTIMIZATION::matrix_type initialParams (dim, 1);
+  initialParams.set(2.0);
 
   //we use a dimension scale of 1.0
-  optimization::matrix_type scales (dim, 1);
-  scales.Set(1.0);
+  OPTIMIZATION::matrix_type scales (dim, 1);
+  scales.set(1.0);
 
   //setup the optimization problem
   SimpleOptProblem optProblem ( func, initialParams, scales );
@@ -48,8 +49,8 @@ void TestGradientDescent::testGD_1Dim ()
 
   GradientDescentOptimizer optimizer;
   //we search with step-width of 1.0
-  optimization::matrix_type searchSteps (dim, 1);
-  searchSteps[0][0] = 1.0f;
+  OPTIMIZATION::matrix_type searchSteps (dim, 1);
+  searchSteps(0,0) = 1.0f;
 
   optimizer.setVerbose(true);
   optimizer.setStepSize( searchSteps );
@@ -57,14 +58,14 @@ void TestGradientDescent::testGD_1Dim ()
 //  optimizer.setFuncTol(true, 1e-8);
   optimizer.optimizeProb ( optProblem );  
   
-  optimization::matrix_type optimizedParams (optProblem.getAllCurrentParams());
+  OPTIMIZATION::matrix_type optimizedParams (optProblem.getAllCurrentParams());
   
   double goal(4.2);  
   
   if (verbose)
-    std::cerr << "1d optimization -- result " << optimizedParams[0][0] << " -- goal: " << goal << std::endl;
+    std::cerr << "1d optimization -- result " << optimizedParams(0,0) << " -- goal: " << goal << std::endl;
 
-  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams[0][0], goal, 1e-5 /* tolerance */);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams(0,0), goal, 1e-5 /* tolerance */);
 
   if (verboseStartEnd)
     std::cerr << "================== TestGradientDescent::testGD_1Dim done ===================== " << std::endl;
@@ -82,12 +83,12 @@ void TestGradientDescent::testGD_2Dim()
   CostFunction *func = new MyCostFunction(dim, verbose);
    
   //initial guess: 2.0
-  optimization::matrix_type initialParams (dim, 1);
-  initialParams.Set(2.0);
+  OPTIMIZATION::matrix_type initialParams (dim, 1);
+  initialParams.set(2.0);
 
   //we use a dimension scale of 1.0
-  optimization::matrix_type scales (dim, 1);
-  scales.Set(1.0);
+  OPTIMIZATION::matrix_type scales (dim, 1);
+  scales.set(1.0);
 
   //setup the optimization problem
   SimpleOptProblem optProblem ( func, initialParams, scales );
@@ -95,9 +96,9 @@ void TestGradientDescent::testGD_2Dim()
 
   GradientDescentOptimizer optimizer;
   //we search with step-width of 1.0
-  optimization::matrix_type searchSteps (dim, 1);
-  searchSteps[0][0] = 1.0f;
-  searchSteps[1][0] = 1.0f;
+  OPTIMIZATION::matrix_type searchSteps (dim, 1);
+  searchSteps(0,0) = 1.0f;
+  searchSteps(1,0) = 1.0f;
 
   optimizer.setVerbose(true);
   optimizer.setStepSize( searchSteps );
@@ -106,19 +107,19 @@ void TestGradientDescent::testGD_2Dim()
 //  optimizer.setFuncTol(true, 1e-8);
   optimizer.optimizeProb ( optProblem );
   
-  optimization::matrix_type optimizedParams (optProblem.getAllCurrentParams());
+  OPTIMIZATION::matrix_type optimizedParams (optProblem.getAllCurrentParams());
 
   double goalFirstDim(4.7);
   double goalSecondDim(1.1);
   
   if (verbose)
   {
-    std::cerr << "2d optimization  1st dim-- result " << optimizedParams[0][0] << " -- goal: " << goalFirstDim << std::endl;
-    std::cerr << "2d optimization  1st dim-- result " << optimizedParams[1][0] << " -- goal: " << goalSecondDim << std::endl;
+    std::cerr << "2d optimization  1st dim-- result " << optimizedParams(0,0) << " -- goal: " << goalFirstDim << std::endl;
+    std::cerr << "2d optimization  1st dim-- result " << optimizedParams(1,0) << " -- goal: " << goalSecondDim << std::endl;
   }
 
-  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams[0][0], goalFirstDim, 1e-5 /* tolerance */);
-  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams[1][0], goalSecondDim, 1e-5 /* tolerance */);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams(0,0), goalFirstDim, 1e-5 /* tolerance */);
+  CPPUNIT_ASSERT_DOUBLES_EQUAL( optimizedParams(1,0), goalSecondDim, 1e-5 /* tolerance */);
   
   
   if (verboseStartEnd)