Bjoern Froehlich 13 years ago
commit
6429a05320
67 changed files with 10355 additions and 0 deletions
  1. 902 0
      AdaptiveDirectionRandomSearchOptimizer.cpp
  2. 284 0
      AdaptiveDirectionRandomSearchOptimizer.h
  3. 82 0
      AdditionalIceUtils.cpp
  4. 46 0
      AdditionalIceUtils.h
  5. 151 0
      ArmijoLineSearcher.cpp
  6. 99 0
      ArmijoLineSearcher.h
  7. 69 0
      BFGSOptimizer.cpp
  8. 84 0
      BFGSOptimizer.h
  9. 246 0
      BrentLineSearcher.cpp
  10. 101 0
      BrentLineSearcher.h
  11. 377 0
      CombinatorialOptimizer.cpp
  12. 179 0
      CombinatorialOptimizer.h
  13. 135 0
      Constraints.cpp
  14. 209 0
      Constraints.h
  15. 119 0
      CostFunction.cpp
  16. 142 0
      CostFunction.h
  17. 105 0
      CostFunction_ndim_2ndOrder.cpp
  18. 106 0
      CostFunction_ndim_2ndOrder.h
  19. 100 0
      DerivativeBasedOptimizer.cpp
  20. 127 0
      DerivativeBasedOptimizer.h
  21. 155 0
      DimWrapperCostFunction.cpp
  22. 132 0
      DimWrapperCostFunction.h
  23. 544 0
      DownhillSimplexOptimizer.cpp
  24. 196 0
      DownhillSimplexOptimizer.h
  25. 1 0
      EmptyLog.cpp
  26. 54 0
      EmptyLog.h
  27. 66 0
      EqualityConstraints.h
  28. 52 0
      FileLog.cpp
  29. 56 0
      FileLog.h
  30. 138 0
      GoldenCutLineSearcher.cpp
  31. 101 0
      GoldenCutLineSearcher.h
  32. 409 0
      GradientDescentOptimizer.cpp
  33. 112 0
      GradientDescentOptimizer.h
  34. 71 0
      InequalityConstraints.h
  35. 214 0
      LineSearcher.cpp
  36. 147 0
      LineSearcher.h
  37. 8 0
      Makefile
  38. 103 0
      Makefile.inc
  39. 505 0
      MatrixIterativeOptimizer.cpp
  40. 150 0
      MatrixIterativeOptimizer.h
  41. 50 0
      NewtonMethodOptimizer.cpp
  42. 85 0
      NewtonMethodOptimizer.h
  43. 65 0
      OptLogBase.cpp
  44. 85 0
      OptLogBase.h
  45. 202 0
      OptTestSuite.cpp
  46. 50 0
      OptTestSuite.h
  47. 23 0
      Opt_Namespace.h
  48. 61 0
      Optimizable.cpp
  49. 81 0
      Optimizable.h
  50. 3 0
      OptimizationVersions.txt
  51. 260 0
      Optimizer.cpp
  52. 389 0
      Optimizer.h
  53. BIN
      OptimizerToolBox.pdf
  54. 53 0
      ParamLog.cpp
  55. 58 0
      ParamLog.h
  56. 274 0
      Plotter.cpp
  57. 133 0
      Plotter.h
  58. 350 0
      PowellBrentOptimizer.cpp
  59. 118 0
      PowellBrentOptimizer.h
  60. 412 0
      SimpleOptProblem.cpp
  61. 335 0
      SimpleOptProblem.h
  62. 84 0
      SimpleOptTestGrid.cpp
  63. 97 0
      SimpleOptTestGrid.h
  64. 95 0
      SimpleOptimizer.cpp
  65. 85 0
      SimpleOptimizer.h
  66. 3 0
      libdepend.inc
  67. 27 0
      liboptimization.h

+ 902 - 0
AdaptiveDirectionRandomSearchOptimizer.cpp

@@ -0,0 +1,902 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	AdaptiveDirectionRandomSearchOptimizer.cpp: Implementation of the 
+//		ADRS Optimizer
+//
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "AdaptiveDirectionRandomSearchOptimizer.h"
+
+#include <stdlib.h>
+#include <time.h>
+#include <iostream>
+
+#include "numbase.h" // ice random
+#include "optimization/AdditionalIceUtils.h"
+#include "optimization/Opt_Namespace.h"
+
+using namespace optimization;
+
+AdaptiveDirectionRandomSearchOptimizer::AdaptiveDirectionRandomSearchOptimizer(
+	unsigned int numOfPoints,OptLogBase *loger)	: SuperClass(loger)
+{
+	m_numberOfParallelPoints = numOfPoints;
+
+	
+	m_b0 = 1.0;
+	m_bfac = 0.5;
+	m_bThresTimesNotDecreased = 0;
+	
+	m_bk = new double[m_numberOfParallelPoints];
+	m_pointsAccepted = new bool[m_numberOfParallelPoints];
+	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+	{
+		m_bk[j] = m_b0;
+		m_pointsAccepted[j]= false;
+	}
+	m_bBreak = 0.1;
+	m_backupActive = false;
+
+	
+	m_c0f = 0.75;
+	m_c0s = 0.75;
+	m_c1f = -0.75;
+	m_c1s = 1.25;
+	m_D = 3;
+
+	m_initFuncThreshActive = false;
+	m_initFuncTresh = 0.0;
+	
+	// advanced initialization is turned off per default
+	m_advancedInit= false;
+}
+
+
+AdaptiveDirectionRandomSearchOptimizer::AdaptiveDirectionRandomSearchOptimizer( const AdaptiveDirectionRandomSearchOptimizer &opt) : SuperClass(opt)
+{
+
+	m_numberOfParallelPoints = opt.m_numberOfParallelPoints;
+	
+	m_Parameters = opt.m_Parameters;
+	m_b0		= opt.m_b0;
+	m_bfac		= opt.m_bfac;
+
+	m_bk = new double[m_numberOfParallelPoints];
+	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+	{
+		m_bk[j] = opt.m_bk[j];
+	}
+
+	m_pointsAccepted = new bool[m_numberOfParallelPoints];
+	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+	{
+		m_pointsAccepted[j] = opt.m_pointsAccepted[j];
+	}
+	
+	m_bBreak = opt.m_bBreak;
+	m_backupActive	= opt.m_backupActive;
+	m_backupPoint	= opt.m_backupPoint;
+	m_backupPointValue = opt.m_backupPointValue;
+
+	m_bThresTimesNotDecreased = opt.m_bThresTimesNotDecreased;
+	m_c0f		= opt.m_c0f;
+	m_c0s		= opt.m_c0s;
+	m_c1f		= opt.m_c1f;
+	m_c1s		= opt.m_c1s;
+	m_D			= opt.m_D;
+	m_initFuncThreshActive = opt.m_initFuncThreshActive;
+	m_initFuncTresh = opt.m_initFuncTresh;
+	
+	// advanced init setup
+	m_advancedInit= opt.m_advancedInit;
+	m_advancedinitLowerBounds= opt.m_advancedinitLowerBounds;
+	m_advancedinitUpperBounds= opt.m_advancedinitUpperBounds;
+}
+
+/*
+	operator=
+*/
+AdaptiveDirectionRandomSearchOptimizer & AdaptiveDirectionRandomSearchOptimizer::operator=(const AdaptiveDirectionRandomSearchOptimizer &opt)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		delete[] m_bk;
+		delete[] m_pointsAccepted;
+				
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(opt);
+
+			
+		/*
+			own values:
+		*/
+		m_numberOfParallelPoints = opt.m_numberOfParallelPoints;
+		m_Parameters = opt.m_Parameters;
+		m_b0		= opt.m_b0;
+		m_bfac		= opt.m_bfac;
+
+		m_bBreak = opt.m_bBreak;
+
+		m_backupActive	= opt.m_backupActive;
+		m_backupPoint	= opt.m_backupPoint;
+		m_backupPointValue = opt.m_backupPointValue;
+
+		m_bk = new double[m_numberOfParallelPoints];
+		for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+		{
+			m_bk[j] = opt.m_bk[j];
+		}
+
+		m_pointsAccepted = new bool[m_numberOfParallelPoints];
+		for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+		{
+			m_pointsAccepted[j] = opt.m_pointsAccepted[j];
+		}
+		
+
+		m_bThresTimesNotDecreased = opt.m_bThresTimesNotDecreased;
+		m_c0f		= opt.m_c0f;
+		m_c0s		= opt.m_c0s;
+		m_c1f		= opt.m_c1f;
+		m_c1s		= opt.m_c1s;
+		m_D			= opt.m_D;
+		m_initFuncThreshActive = opt.m_initFuncThreshActive;
+		m_initFuncTresh = opt.m_initFuncTresh;
+		
+		// advanced init setup
+		m_advancedInit= opt.m_advancedInit;
+		m_advancedinitLowerBounds= opt.m_advancedinitLowerBounds;
+		m_advancedinitUpperBounds= opt.m_advancedinitUpperBounds;
+		
+	}
+
+  	return *this;
+}
+
+
+AdaptiveDirectionRandomSearchOptimizer::~AdaptiveDirectionRandomSearchOptimizer()
+{
+	delete[] m_bk;
+	delete[] m_pointsAccepted;
+}
+
+
+void AdaptiveDirectionRandomSearchOptimizer::init()
+{
+	
+	m_Parameters = matrix_type(m_numberOfParameters,m_numberOfParallelPoints);
+	
+	// if not set before, set default value
+	if(m_bThresTimesNotDecreased == 0)
+		m_bThresTimesNotDecreased = static_cast<unsigned int>(m_numberOfParameters * m_numberOfParameters*  5.0);
+	
+	SuperClass::init();
+
+	// "reset"
+	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+	{
+		m_bk[j] = m_b0;
+		m_pointsAccepted[j]= false;
+	}
+	m_backupActive = false;
+
+	/*
+		seed rand
+	*/
+	ice::Randomize();
+
+	/*
+		check if bounds are active! bounds are needed
+		to generate usefull random start points
+	*/
+	if(!(m_upperParameterBoundActive == true && m_lowerParameterBoundActive == true))
+	{
+		if(m_loger)
+		{
+			m_loger->logError("parameter bounds are not active! Please set	proper parameter bounds for the random start point generation. This event is has no further exception handling");
+		}
+		m_lowerParameterBound = m_parameters;
+		m_upperParameterBound = m_parameters;
+		m_lowerParameterBoundActive = true;
+		m_upperParameterBoundActive = true;
+
+	}
+
+	/*
+	generate random start points
+	*/
+	if(m_advancedInit == false)
+	{
+		for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
+		{
+			for(int l = 0; l < static_cast<int>(m_numberOfParallelPoints);l++)
+			{
+				m_Parameters[k][l] = m_parameters[k][0] + m_scales[k][0] *2.0* (ice::RandomD() - 0.5);
+				
+			}
+		}
+	}
+	else
+	{
+		// dimension check
+		assert(m_advancedinitLowerBounds.rows() == (int)m_numberOfParameters && m_advancedinitUpperBounds.rows() == (int)m_numberOfParameters);
+		
+		for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
+		{
+			for(int l = 0; l < static_cast<int>(m_numberOfParallelPoints);l++)
+			{
+				m_Parameters[k][l] = m_advancedinitLowerBounds[k][0] +ice::RandomD() * (m_advancedinitUpperBounds[k][0] - m_advancedinitLowerBounds[k][0] ) ;
+			}
+		}
+	}
+
+
+	/*
+		evaluate SET !!
+	*/
+	m_CurrentCostFunctionValues = evaluateSetCostFunction(m_Parameters);
+	/*
+		If the threshold was set, check if all points are below the threshold
+	*/
+	if(m_initFuncThreshActive)
+	{
+		
+		bool pointOk=false;
+		for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+		{
+			/*
+				if the are not ok ... generate new points for those, who arent..
+			*/
+			if(m_CurrentCostFunctionValues[u][0] < m_initFuncTresh)
+			{
+				pointOk = true;
+			}
+			else
+			{
+				pointOk = false;
+			}
+			
+			while(pointOk == false)
+			{
+				for(int k = 0; k < static_cast<int>(m_numberOfParameters);k++)
+				{
+					m_Parameters[k][u] = m_parameters[k][0] + m_scales[k][0] * 2.0*(ice::RandomD() - 0.5);
+						
+				}
+				
+				/*
+					reevaluate the value and check against threshold
+				*/
+				//double tmpNewValue = evaluateCostFunction(m_Parameters.Sub(m_numberOfParameters,1,0,u));
+				double tmpNewValue = evaluateCostFunction(m_Parameters(0,u,m_numberOfParameters-1,u));
+			
+				/*
+					if point is ok now go to next point
+				*/
+				if(tmpNewValue < m_initFuncTresh)
+				{
+					m_CurrentCostFunctionValues[u][0] = tmpNewValue;
+					pointOk = true;				
+				}
+
+			} // while point not ok
+		} // for all points
+	} // if threshold active
+
+	/*if(m_verbose)
+	{
+		std::cout << "AdaptiveDirectionRandomSearch :: Initial parameterSet: ";
+		for(int l=0;l<m_numberOfParallelPoints;l++)
+		{
+			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+			{
+				std::cout<< m_Parameters[r][l] << " ";
+			}
+			std::cout << m_CurrentCostFunctionValues[l][0]  << std::endl;
+		}
+		std::cout << std::endl;
+
+
+		std::cout << "Number of Evaluations needed for a proper initilization: " << m_costFunction->getNumberOfEvaluations() << std::endl;
+	
+		
+	}*/
+
+
+
+	/*
+		(re)set m_bk
+	*/
+	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints); j++)
+	{
+		m_bk[j] = m_b0;
+	}
+
+}
+
+
+bool AdaptiveDirectionRandomSearchOptimizer::setControlSeqParams(double b0, double bfac,
+	unsigned int bThresTimesNotDecreased,double bBreak)
+{
+	if(b0 <= 0 || bfac <= 0 || bfac > 1 || bThresTimesNotDecreased == 0 || bBreak <= 0)
+	{
+		return false;
+	}
+	
+	m_b0 = b0;
+	m_bfac = bfac;
+	m_bThresTimesNotDecreased = bThresTimesNotDecreased;
+	m_bBreak = bBreak;
+	
+	return true;
+}
+
+
+void AdaptiveDirectionRandomSearchOptimizer::activateAdvancedInit(bool enable, matrix_type& lowerBounds, matrix_type& upperBounds)
+{
+	m_advancedInit= enable;
+	m_advancedinitLowerBounds= lowerBounds;
+	m_advancedinitUpperBounds= upperBounds;
+}
+
+
+matrix_type AdaptiveDirectionRandomSearchOptimizer::generatePoint()
+{
+	matrix_type newPoint(m_numberOfParameters,1);
+
+	for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+	{
+		if(m_scales[i][0] > 0.0 )
+		{
+			newPoint[i][0] = m_scales[i][0] * 2.0*(ice::RandomD() - 0.5);
+		}
+	}
+	//double div=newPoint.Norm(0);
+	double div=newPoint.Norm(0);
+	
+	if (div > 1.0e-50)
+	{
+		newPoint = newPoint * (1.0/div);
+	}
+	else
+	{
+		newPoint=this->generatePoint();
+	}
+
+	return newPoint;
+}	
+
+
+matrix_type AdaptiveDirectionRandomSearchOptimizer::generatePoints()
+{
+	matrix_type newPoints(m_numberOfParameters,m_numberOfParallelPoints);
+	matrix_type newPoint(m_numberOfParameters,1);
+
+	for(int j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
+	{
+		newPoint = this->generatePoint();
+	
+		for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+		{
+			newPoints[i][j] = newPoint[i][0];
+		}
+	}
+
+	return newPoints;
+}	
+
+bool AdaptiveDirectionRandomSearchOptimizer::setRecallParams(
+	double c0s,
+	double c1s,
+	double c0f,
+	double c1f,
+	double D)
+{
+	if (c0s < 0			||
+		c0s >=1			||
+		c1s <0			||
+		c0s+c1s <= 1	||
+		c0f <= 0		||
+		c0f >= 1		||
+		c1f >= 0		||
+		c0f + c1f < -1.0||
+		c0f + c1f > 1.0 || 
+		D <= 0.0)
+	{
+		return false;
+	}
+
+	m_c0s = c0s;
+	m_c1s = c1s;
+	m_c0f = c0f;
+	m_c1f = c1f;
+	m_D	  = D;
+	
+	return true;
+}
+
+void AdaptiveDirectionRandomSearchOptimizer::acceptPoints(matrix_type oldValues, matrix_type newValues)
+{
+	for(int i = 0;i< static_cast<int>(m_numberOfParallelPoints);i++)
+	{
+		if(newValues[i][0] < oldValues[i][0])
+		{
+			m_pointsAccepted[i]=true;
+		}
+		else
+		{
+			m_pointsAccepted[i]=false;
+		}
+	}
+}
+
+
+int AdaptiveDirectionRandomSearchOptimizer::optimize()
+{
+
+
+	init();
+
+
+	if(m_loger)
+		m_loger->logTrace("ADRS: starting optimization ... \n");
+
+	/*
+		start time criteria
+	*/
+	m_startTime = clock();
+
+
+	bool abort = false;
+
+	/*
+		declare and initialize algorithm specific local variables
+	*/
+	matrix_type		newPoints;
+	matrix_type		newValues;
+	//matrix_type		oldValues;
+
+	matrix_type		deltaX(m_numberOfParameters,m_numberOfParallelPoints);
+	matrix_type		deltaXold(m_numberOfParameters,m_numberOfParallelPoints);
+	
+	matrix_type		dk(m_numberOfParameters,m_numberOfParallelPoints);
+	matrix_type		dkold(m_numberOfParameters,m_numberOfParallelPoints);
+	
+
+
+	int i = 0;
+	int j = 0;
+
+	/*
+		begin with the first step outside the loop
+	*/
+	m_numIter++;
+	unsigned int *timesNotDecreased = new unsigned int[m_numberOfParallelPoints];
+	
+	for(int k = 0; k< static_cast<int>(m_numberOfParallelPoints);k++)
+	{
+		timesNotDecreased[k] = 0; 
+	}
+
+
+	/*
+		generate a new delta X (potential step)
+	*/
+	matrix_type tmp = generatePoints();
+	for(j = 0; j< static_cast<int>(m_numberOfParallelPoints);j++)
+	{
+		for(i = 0;i < static_cast<int>(m_numberOfParameters);i++)
+		{
+			deltaX[i][j] = tmp[i][j] * m_bk[j] ;
+		}
+	}
+
+	/*
+		check costfunction at potential new point
+	*/
+	newPoints = m_Parameters + deltaX;
+	newValues = evaluateSetCostFunction(newPoints);
+
+	/*
+		are the new points better?
+	*/
+	acceptPoints(m_CurrentCostFunctionValues,newValues);
+	
+	for(j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
+	{
+		if(m_pointsAccepted[j] == true)
+		{
+			for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
+			{
+				/*
+					set the new point
+				*/
+				m_Parameters[i][j] = newPoints[i][j];
+
+				/*
+					update the recall factor
+				*/
+				dk[i][j] = dkold[i][j] * m_c0s + deltaXold[i][j] * m_c1s;
+			
+			}
+			m_CurrentCostFunctionValues[j][0] = newValues[j][0];
+
+			/*
+				reset the counter for failed attempts
+			*/
+			timesNotDecreased[j] = 0;
+		}
+		else
+		{
+			for(i =0; i < static_cast<int>(m_numberOfParameters);i++)
+			{
+				/*
+					update the recall factor
+				*/
+				dk[i][j] = dkold[i][j] * m_c0f + deltaXold[i][j] * m_c1f;
+			
+			}
+			
+			/*
+				raise the counter for failed attempts
+			*/
+			timesNotDecreased[j] ++;
+		}
+	}
+
+	/* 
+		do the optimization in the main loop
+	*/
+	while(abort == false)
+	{
+		/*
+			increase iteration counter
+		*/
+		m_numIter++;
+		
+		/*
+			Check iteration limit
+		*/
+		if(m_maxNumIterActive)
+		{
+			if(m_numIter >= m_maxNumIter)
+			{
+				/* set according return status and return */
+				m_returnReason = SUCCESS_MAXITER;
+				abort = true;
+				continue;
+			}
+		}
+		
+		/*
+			save the old deltaX
+		*/
+		deltaXold = deltaX;
+
+		/*
+			generate a new delta X (potential step)
+		*/
+		matrix_type tmp = generatePoints();
+		for(j = 0; j< static_cast<int>(m_numberOfParallelPoints);j++)
+		{
+			for(i = 0; i < static_cast<int>(m_numberOfParameters);i++)
+			{
+				deltaX[i][j] = dk[i][j] + tmp[i][j] * m_bk[j] ;
+			}
+		}
+		
+		/*
+			check costfunction at potential new point
+		*/
+		newPoints = m_Parameters + deltaX;
+		newValues = evaluateSetCostFunction(newPoints);
+
+		/*
+			save the old dk
+		*/
+		dkold = dk;
+
+
+		/*
+			are the new points better?
+		*/
+		acceptPoints(m_CurrentCostFunctionValues,newValues);
+		
+		for(j = 0; j < static_cast<int>(m_numberOfParallelPoints);j++)
+		{
+			if(m_pointsAccepted[j] == true)
+			{
+				for(i =0; i < static_cast<int>(m_numberOfParameters);i++)
+				{
+					/*
+						set the new point
+					*/
+					m_Parameters[i][j] = newPoints[i][j];
+					
+					/*
+						update the recall factor
+					*/
+					dk[i][j] = dkold[i][j] * m_c0s + deltaXold[i][j] * m_c1s;
+					
+				}
+				m_CurrentCostFunctionValues[j][0] = newValues[j][0];
+				
+				/*
+					reset the counter for failed attempts
+				*/
+				timesNotDecreased[j] = 0;
+			}
+			else
+			{
+				for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
+				{
+					/*
+						update the recall factor
+					*/
+					dk[i][j] = dkold[i][j] * m_c0f + deltaXold[i][j] * m_c1f;
+					
+				}
+				
+				/*
+				raise the counter for failed attempts
+				*/
+				timesNotDecreased[j] ++;
+			}
+
+			/*
+				scaledown m_bk if there was no improvement for a certain time
+			*/
+			if(timesNotDecreased[j] >= m_bThresTimesNotDecreased)
+			{
+				m_bk[j] = m_bk[j] * m_bfac;
+				timesNotDecreased[j] = 0;
+			}
+
+			/*
+				if m_bk < m_bBreak .. 
+			*/
+			if( m_bk[j]  < m_bBreak )
+			{
+				/*  */
+				if(m_backupActive)
+				{
+					if(m_CurrentCostFunctionValues[j][0] < m_backupPointValue)
+					{
+						//m_backupPoint = m_Parameters.Sub(m_numberOfParameters,1,0,j);
+						m_backupPoint = m_Parameters(0,j,m_numberOfParameters-1,j);
+						m_backupPointValue = m_CurrentCostFunctionValues[j][0];
+					}
+				}
+				else
+				{
+					//m_backupPoint = m_Parameters.Sub(m_numberOfParameters,1,0,j);
+					m_backupPoint = m_Parameters(0,j,m_numberOfParameters-1,j);
+					m_backupPointValue = m_CurrentCostFunctionValues[j][0];
+					m_backupActive = true;
+				}
+				
+				/*
+					reset counters
+				*/
+				m_bk[j] = m_b0;
+				timesNotDecreased[j] = 0;
+
+				matrix_type resProb = m_CurrentCostFunctionValues;
+				double maxVal=m_CurrentCostFunctionValues.MaxVal();
+				for(int i=0; i < (int)m_numberOfParallelPoints; ++i)
+				{
+					resProb[i][0] -= maxVal;
+				}
+				
+				double denom = MatrixSum(resProb);
+				/*
+				ensure numerical stability
+				*/
+				if( fabs(denom) < 1.0e-50)
+				{
+					denom = denom < 0.0 ? -1.0e-50 : 1.0e-50;
+				}
+
+				resProb = resProb * (1.0/denom);
+					
+				
+				double sum = 0.0;
+				for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+				{
+					sum += resProb[u][0];
+					resProb[u][0] = sum;
+				}
+
+				/*
+					generate random number [0,1]
+				*/
+				double choice = ice::RandomD();
+				int u_chosen = 0;
+
+				for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+				{
+					u_chosen = u;
+					
+					if( choice < resProb[u][0] )
+					{
+						break;
+					}
+				}
+
+				/*
+					set m_parameters 
+				*/
+				for(int v = 0; v < static_cast<int>(m_numberOfParameters); v++)
+				{
+					m_Parameters[v][j]=m_Parameters[v][u_chosen];
+
+				}
+				
+				m_CurrentCostFunctionValues[j][0] = m_CurrentCostFunctionValues[u_chosen][0];
+								
+			}
+
+
+			/*
+				dk has to be <= D * m_bk
+			*/
+			//double norm= dk(0,j,m_numberOfParameters-1,j).Norm(0);
+			double norm= dk(0,j,m_numberOfParameters-1,j).Norm(0);
+			if( norm >= m_D * m_bk[j])
+			{
+				if(norm < 1.0e-50)
+				{
+					//m_loger->logWarning("ADRS Optimizer: Warning Computation gets unstable");
+					norm = 1.0e-50;
+				}
+				
+				for(i =0;i < static_cast<int>(m_numberOfParameters);i++)
+				{
+					/*
+						update the recall factor
+					*/
+					dk[i][j] = dk[i][j] * 1.0/norm;
+					
+				}
+			}
+		}
+		
+		if(m_verbose)
+		{
+			std::cout << "# AdaptiveDirectionRandomSearch :: parameterSet: ";
+			for(int l=0;l < static_cast<int>(m_numberOfParallelPoints);l++)
+			{
+				for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+				{
+					std::cout<< m_Parameters[r][l] << " ";
+				}
+				std::cout << m_bk[l] << " "<< 
+					m_CurrentCostFunctionValues[l][0]  << std::endl;
+			}
+			std::cout <<"# "<< std::endl;
+		}
+		
+		
+		//	fixme wacker for plotting
+		/*
+		for(int l=0;l<m_numberOfParallelPoints;l++)
+		{
+				for(int r = 0; r < 2; r++)
+				{
+					std::cout<< m_Parameters[r][l] << " ";
+				}
+				std::cout << m_CurrentCostFunctionValues[l][0]  << std::endl;
+		}
+
+*/
+		
+		/*
+			Check if it is in bounds, maxSeconds
+		*/
+		/*
+		if(!checkParameters(m_parameters))
+		{
+			// set according return status and the last parameters and return 
+			m_returnReason = ERROR_XOUTOFBOUNDS;
+			abort = true;
+		}*/
+
+		/*
+			check kind of paramtol
+		*/
+		if(m_paramTolActive)
+		{
+			if(m_numberOfParallelPoints > 1 )
+			{
+				/*
+					check if distance from one point to all others is below a the threshold
+				*/
+				matrix_type paramSet = m_Parameters;
+				bool abortNow = true;
+				
+				for(int e = 0; e < static_cast<int>(m_numberOfParallelPoints);e++)
+				{
+					if(	(paramSet(0,e,m_numberOfParameters-1,e) 	- paramSet(0,0,m_numberOfParameters-1,0)).Norm(0) > m_paramTol)
+					{
+						abortNow = false;
+					}
+				}
+				if(abortNow)
+				{
+					abort = true;
+					m_returnReason = SUCCESS_PARAMTOL;
+				}
+			}
+
+		}
+
+
+		/*
+			Check Optimization Timelimit, if active
+		*/
+		if(m_maxSecondsActive)
+		{
+			m_currentTime = clock();
+
+			/* time limit exceeded ? */
+			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+			{
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_TIMELIMIT;
+				abort = true;
+				continue;
+			}
+		}
+
+	}
+
+	
+	/*
+		find the best value..
+	*/
+	unsigned int u_best = 0;
+	for(int u = 0; u < static_cast<int>(m_numberOfParallelPoints); u++)
+	{
+		if( m_CurrentCostFunctionValues[u][0] < m_CurrentCostFunctionValues[u_best][0] )
+		{
+			u_best = u;
+		}
+		
+	}
+
+
+	/*
+		regular points include the best one 
+	*/
+	//m_parameters = m_Parameters.Sub(m_numberOfParameters,1,0,u_best);
+	m_parameters = m_Parameters(0,u_best,m_numberOfParameters-1,u_best);
+	m_currentCostFunctionValue = m_CurrentCostFunctionValues[u_best][0];
+	
+	if (m_backupActive)
+	{
+		/*
+			compare with backup point
+		*/
+		if( m_backupPointValue < m_CurrentCostFunctionValues[u_best][0] )
+		{
+			/*
+				backup point is best
+			*/
+			m_parameters = m_backupPoint;
+			m_currentCostFunctionValue = m_backupPointValue;
+		}
+	
+	}
+	
+	delete[] timesNotDecreased;
+
+	return m_returnReason;
+
+}

+ 284 - 0
AdaptiveDirectionRandomSearchOptimizer.h

@@ -0,0 +1,284 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	AdaptiveDirectionRandomSearchOptimizer.h: interface of the ADRS optimizer.
+//
+/////////////////////////////////////////////////////////////////////////
+
+#ifndef _ADAPTIVE_DIRECTION_RANDOM_SEARCH_OPTIMIZER_
+#define _ADAPTIVE_DIRECTION_RANDOM_SEARCH_OPTIMIZER_
+
+#include "optimization/SimpleOptimizer.h"
+#include "optimization/Opt_Namespace.h"
+namespace opt=optimization;
+
+///
+///	Class AdaptiveDirectionRandomSearchOptimizer
+///
+///	HowToUse:
+///	
+///	  * specify in the constructor call the num of points that are optimized in a "simultanious" way
+///	  * to use others than the default parameters use the setControlSeqParams call which changes the
+///		parameters that are responsible for a "scale down" of b_k by b_fac if there was no function
+///		decrease for b_times times
+///	  *	to use others than the default parameters use the setRecallParams call wich changes the
+///		parameters that are responsible for the balance between a new random direction an the old
+///		successfull iteration directions
+///	  * use the setLowerParameterBounds and the setUpperParameterBounds to specify the area to generate
+///		random start points in (THIS IS NESSECARY!)
+///	  *	to use a function value threshold for the random start points' values to be below - use the
+///		setInitFunctionValueThresh call
+///	  *	call init()
+///	  * call optimize()
+///
+///	Implemented Abort criteria:
+///		
+///	  * maximum number of iterations
+///	  * parameter tolerance
+///	  *	time limit
+///
+///
+class AdaptiveDirectionRandomSearchOptimizer : public SimpleOptimizer
+{
+	public:
+
+		typedef  SimpleOptimizer SuperClass;
+		typedef  opt::matrix_type matrix_type;
+		///	
+		///		Constructor.
+		///		@param numOfPoints: Number of Points to optimize
+		///		@param loger : OptLogBase * to existing log class
+		///
+		AdaptiveDirectionRandomSearchOptimizer(unsigned int numOfPoints, OptLogBase *loger=NULL);
+		
+		///
+		///	Copy constructor
+		///	@param opt .. optimizer to copy
+		///
+		AdaptiveDirectionRandomSearchOptimizer( const AdaptiveDirectionRandomSearchOptimizer &opt);
+
+		///
+		///	operator =
+		///
+		AdaptiveDirectionRandomSearchOptimizer & operator=(const AdaptiveDirectionRandomSearchOptimizer &opt);
+
+	        ///
+		///
+		///		Destructor.
+	        ///
+	        ~AdaptiveDirectionRandomSearchOptimizer();
+
+		///
+		///	enumeration for the return reasons of an optimizer,
+		///	has all elements of the SuperClass optimizer
+		///
+
+	
+		///
+		///	@brief Do internal initializations
+		///
+		void init();
+		
+		///
+		///	@brief start the optmization
+		///
+		int optimize();
+
+		///
+		///	@brief set the parameters for the control sequence
+		///			
+		///	@param b0 > 0
+		///	@param bfac: 0 < bfac < 1
+		///	@param bThresTimesNotDecreased
+		///
+		///	\return true in case of success
+		///
+		bool setControlSeqParams(double b0, double bfac, unsigned int bThresTimesNotDecreased, double bBreak);
+		
+		
+		///
+		/// @brief Enables advanced initialization of random start points. If activated, start points will be randomly distributed according to the upper and lower bound vectors.
+		///
+		/// @param enable if true, advanced initialization will be enabled
+		/// @param lowerBound matrix containing lower bounds for random initialization (must be nx1 with n= number of Parameters)
+		/// @param upperBound matrix containing upper bounds for random initialization (must be nx1 with n= number of Parameters)
+		///
+		void activateAdvancedInit(bool enable, matrix_type& lowerBounds, matrix_type& upperBounds);
+
+		
+		///
+		///	@brief set recall parameters that control the incluence of prior iterations on the actual one. This is the key idea of the adaptive direction approach
+		///	
+		///	The update formula for the iteration scheme is
+		///
+		///	$$
+		///		d_k = c_0 * d_{k-1} + c_1 \triangle x_{k-1}
+		///	$$
+		///	with
+		///		if( $\delta_{k-1} == 1$)
+		///		{
+		///			$ c_0 = c0s,\enskip c_1 = c_1s $
+		///			and
+		///			$0 < c0s < 1;\enskip c1s  >0 ;\enskip c0s + c1s > 1 $
+		///		}
+		///		else
+		///		{
+		///					$ c_0 = c0f,\enskip c_1 = c_1f $
+		///			and
+		///			$0 < c0f < 1;\enskip c1f  < 0 ;\enskip | c0s + c1s |  < 1 $
+		///					}
+		///				and 
+		///					\|d_k\| < D * b_k 
+		///			
+		///	@param c0s
+		///	@param c1s
+		///	@param c0f
+		///	@param c1f
+		///	@param D
+		///
+		///	\return true in case of success
+		///
+		bool setRecallParams(double c0s, double c1s, double c0f, double c1f, double D);
+
+		///
+		///	@brief set Number of Points
+		///	@param num number of points
+		///
+		inline void setNumberOfPoints(unsigned int num){m_numberOfParallelPoints = num;};
+
+		///
+		///	@brief set a threshold for the initial points
+		///	@param active is the threshold active?
+		///	@param threshold .. the threshold to be below (above in case of maximization)
+		///
+		inline void setInitFunctionValueThresh(bool active, double threshold){m_initFuncThreshActive = active;m_initFuncTresh = threshold;};
+
+
+
+	private:
+
+		///
+		///	number of parallel point optimizations
+		///
+		unsigned int m_numberOfParallelPoints;
+
+		///
+		///	matrix of the whole parameter set
+		///
+		matrix_type m_Parameters;
+
+		///
+		///	matrix of costfunction Values
+		///
+		matrix_type m_CurrentCostFunctionValues;
+
+		///
+		///	generate new points
+		///
+		matrix_type generatePoints();
+
+		///
+		///	generate new points
+		///
+		matrix_type generatePoint();
+
+		///
+		///	@brief accept a new point or reject it
+		///	@param newValue : new objective function value
+		///	@param oldValue : old objective function value
+		///	
+		///
+		void acceptPoints(matrix_type oldValues, matrix_type newValues);
+
+		bool *m_pointsAccepted;
+
+		///
+		///	control sequence parameter b0
+		///
+		double m_b0;
+
+		///
+		///	control sequence parameter bfac
+		///
+		double m_bfac;
+
+		///
+		///	control sequence parameter bk
+		///
+		double *m_bk;
+
+		///
+		///	control sequence multiplikation trigger threshold
+		///
+		unsigned int m_bThresTimesNotDecreased;
+
+		///
+		///	control sequence parameter bBreak
+		///
+		double m_bBreak;
+
+		///
+		///	backup point 
+		///
+		matrix_type m_backupPoint;
+
+		///
+		///	backup point Value
+		///
+		double m_backupPointValue;
+
+		///
+		///	backup point in use?
+		///
+		bool m_backupActive;
+
+		///
+		///	direction parameter c0s
+		///
+		double m_c0s;
+
+		///
+		///	direction parameter c1s
+		///
+		double m_c1s;
+
+		///
+		///	direction parameter c0f
+		///
+		double m_c0f;
+
+		///
+		///	direction parameter c1f
+		///
+		double m_c1f;
+
+		///
+		///	recall limit
+		///
+		double m_D;
+
+		///
+		///	is the initial function value threshold	active		
+		///
+		bool m_initFuncThreshActive;
+
+		///
+		///	the initial function value threshold active		
+		///
+		double m_initFuncTresh;
+		
+		///
+		/// enables advanced initialization
+		bool m_advancedInit;
+		
+		///
+		/// lower bounds for advanced initialization
+		///
+		matrix_type m_advancedinitLowerBounds;
+		
+		///
+		/// upper bounds for advanced initialization
+		matrix_type m_advancedinitUpperBounds;
+
+};
+
+#endif

+ 82 - 0
AdditionalIceUtils.cpp

@@ -0,0 +1,82 @@
+#include "optimization/AdditionalIceUtils.h"
+#include "vectort.h" // ICE
+#include "ludecomp.h" //ICE
+#include "mateigen.h" //ICE
+#include <cassert>
+using namespace opt;
+using namespace ice;
+
+matrix_type & MatrixAddVal ( matrix_type & mat, double val )
+{
+	for(int i=0; i< mat.rows(); ++i)
+	{
+		for(int j =0; j<mat.cols(); ++j)
+		{
+			mat[i][j] += val;
+		}
+	}
+
+	return mat;
+}
+
+
+double MatrixSum(const matrix_type &mat)
+{
+	double sum=0.0;
+	for(int i=0; i< mat.rows(); ++i)
+	{
+		for(int j =0; j<mat.cols(); ++j)
+		{
+			sum += mat[i][j];
+		}
+	}
+	return sum;
+}
+
+
+
+void linSolve(const opt::matrix_type &A, opt::matrix_type &x, const opt::matrix_type b)
+{
+	int n=A.cols();
+
+	assert(x.rows() == n && b.rows() == n );
+
+	matrix_type LU;
+	IVector indx;
+	Vector xv(n);
+	Vector iv(n);
+
+	for(int i=0; i < n;++i)
+	{
+		iv[i] = b[i][0];
+	}
+
+	// LU-Zerlegung mit Pivotisierung
+	LUDecompositionPacked(A,LU,indx,true);
+	
+	// Lösung M*x1=i1
+	xv = LUSolve(LU,indx,iv);
+	
+	x=matrix_type(n,1);
+	for(int i=0; i < n;++i)
+	{
+		x[i][0] = xv[i];
+	}
+	
+}
+
+void getEig(const opt::matrix_type &A, opt::matrix_type &L, opt::matrix_type &V)
+{
+	int n= A.cols();
+	L = matrix_type(n,1);
+	V = matrix_type(n,n);
+
+	Vector l(n);
+
+	Eigenvalue(A,l,V);
+	for(int i=0; i < n;++i)
+	{
+		L[i][0] = l[i];
+	}
+
+}

+ 46 - 0
AdditionalIceUtils.h

@@ -0,0 +1,46 @@
+/*!
+	@file:	AdditionalIceUtils.h
+
+	adds some from to ICE 
+*/
+
+#ifndef ADDITIONALICEUTILS_H
+#define ADDITIONALICEUTILS_H
+
+#include "optimization/Opt_Namespace.h"
+
+namespace opt=optimization;
+
+/*!
+	adds val to every element of mat
+	@param mat matrix
+	@param val the value to add
+	@return mat
+*/
+opt::matrix_type& MatrixAddVal ( opt::matrix_type & mat, double val );
+
+/*!
+	sums up all elements of the matrix
+	@param mat matrix
+	@return $\sum_{i,j} mat(i,j)$
+*/
+double MatrixSum(const opt::matrix_type & mat);
+
+/*!
+	Wrapper to solve linear equation A*x=b
+	@param A matrix 
+	@param x solution
+	@param b nx1 matrix (inhomogenity)
+
+*/
+void linSolve(const opt::matrix_type &A, opt::matrix_type &x, const opt::matrix_type b);
+
+/*!
+	Wrapper to get eigen values of a matrix
+	@param A matrix
+	@param L lambda vector with eigenvalues
+	@param V marix, whose columns are the corresponding eigen vector
+*/
+void getEig(const opt::matrix_type &A, opt::matrix_type &L, opt::matrix_type &V);
+
+#endif //ADDITIONALICEUTILS_H

+ 151 - 0
ArmijoLineSearcher.cpp

@@ -0,0 +1,151 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	ArmijoLineSearcher.cpp: Implementation of the Brent LineSearcher class.
+//
+//	Written by Matthias Wacker
+//
+//	Big parts of code are from numerical recipes in C
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/ArmijoLineSearcher.h"
+#include <iostream>
+
+ArmijoLineSearcher::ArmijoLineSearcher() : m_dGamma(0.7), m_dL(1.0),m_dSigma(0.33)
+{
+}
+
+ArmijoLineSearcher::ArmijoLineSearcher(CostFunction *costFunc,OptLogBase* loger) : SuperClass(costFunc,loger),m_dGamma(0.5), m_dL(1.0),m_dSigma(0.33)
+{
+}
+
+ArmijoLineSearcher::ArmijoLineSearcher(const ArmijoLineSearcher &lin) : SuperClass(lin)
+{
+	m_dGamma = lin.m_dGamma;
+	m_dL = lin.m_dL;
+	m_dSigma = lin.m_dSigma;
+}
+
+ArmijoLineSearcher & ArmijoLineSearcher::operator =(const ArmijoLineSearcher &lin)
+{
+	/*
+			avoid self-copy
+	*/
+	if(this != &lin)
+	{
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(lin);
+
+		m_dGamma = lin.m_dGamma;
+		m_dL = lin.m_dL;
+		m_dSigma = lin.m_dSigma;
+	
+	}
+
+	return *this;
+
+}
+
+ArmijoLineSearcher::~ArmijoLineSearcher()
+{
+}
+
+void ArmijoLineSearcher::setGammaLSigma(double gamma, double L, double sigma)
+{
+	m_dGamma=gamma;
+	m_dL=L;
+	m_dSigma=sigma;
+}
+
+void ArmijoLineSearcher::setXkGkDk(const matrix_type xk, const matrix_type gk, const matrix_type dk)
+{
+	this->setX0(xk);
+	this->setH0(dk);
+
+	m_gk=gk;
+	m_dk=dk;
+}
+
+double ArmijoLineSearcher::optimize()
+{
+	double x=0.0;
+	double ndk2=m_dk.Norm(0);ndk2*=ndk2;
+
+	double fk= evaluateSub(0.0);
+double tmp= (!m_gk * m_dk)[0][0];
+	double sk= - tmp / (m_dL * ndk2 );
+	x=sk;
+	int maxIter=30;
+	int iter=0;
+	// own rule 
+	double fk_old=fk;
+	double x_old=0.0;
+	double fk_new;
+	bool improved = false;
+	
+	while(  iter < maxIter)
+	{
+		fk_new=evaluateSub(x);
+
+		if ( fk_new > fk_old)
+		{
+			x=x_old;
+			//cout << "loop 1: end " << endl;
+			break;
+		}
+		
+		//cout << "loop 1: iter "<< iter << " " << x << " " << fk_new<<endl;
+		fk_old = fk_new;
+		x_old = x;
+		x/=m_dGamma;
+		++iter;
+	}
+	if( iter <= 1 )
+	{
+		iter=0;
+		fk_old=fk;
+		x_old=0.0;
+		x=sk;
+		while( iter < maxIter)
+		{
+			fk_new=evaluateSub(x);
+	
+			// fk_new is worse than old, and we are lower than fk
+			if( fk_new > fk_old && improved==true)
+			{
+				// abort with fk_old which was generated by x_old
+				x = x_old;
+				break;
+			}
+		
+					
+			//cout << "loop 2: iter "<< iter << " " << x << " " << fk_new<<endl;
+			fk_old=fk_new;
+			x_old=x;
+			x*=m_dGamma;
+			++iter;
+			if( fk_old < fk)
+			{
+				improved = true;
+			}
+		}
+	
+	}
+	// own rule till here
+
+	// original implementation 
+	//iter=0.0;
+	//while( fk - evaluateSub(x) < - m_dSigma * x *tmp  && iter < maxIter)
+	//{
+	//	x*=m_dGamma;
+	//	iter++;
+	//}
+
+
+//	if(m_pLoger)
+//		m_pLoger->logWarning("Brent took too many iterations.. Tol threshold was to low for the number of iterations..\n");
+
+	return x;
+}

+ 99 - 0
ArmijoLineSearcher.h

@@ -0,0 +1,99 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	ArmijoLineSearcher.h: interface of the AxA3dNUMArmijoLineSearcher class.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _ARMIJO_LINE_SEARCHER_H_
+#define _ARMIJO_LINE_SEARCHER_H_
+
+#include <cmath>
+#include "LineSearcher.h"
+/*!
+	class for the brent line search
+
+   HowToUse:
+	
+	  * use setX0() to set the offset
+	  * use setH0()	to set the search direction
+	  * use setGk() to set the gradient in x0
+	  * call init()
+	  * call optimize() (returns lambda)
+ */
+class  ArmijoLineSearcher : public LineSearcher
+{
+public:
+
+	typedef LineSearcher	SuperClass;
+	typedef SuperClass::matrix_type matrix_type;
+	
+	/*!
+		default constructor
+	*/
+	ArmijoLineSearcher();
+
+	/*!
+		constructor
+	*/
+	ArmijoLineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+	/*!
+		Copy constructor
+		\param opt .. optimizer to copy
+	*/
+	ArmijoLineSearcher( const ArmijoLineSearcher &lin);
+	
+	/*!
+		operator =
+	*/
+	ArmijoLineSearcher & operator=(const ArmijoLineSearcher &lin);
+
+	/*!
+		Destructor.
+	*/
+	virtual ~ArmijoLineSearcher();
+
+	/*!
+		set Parameters
+		@param gamma reduction Factor of step width test
+		@param L divisor for computation of sk ( > 0 )
+		@param sigma multiplicative parameter for test (0, 0.5)
+	*/
+	void setGammaLSigma(double gamma, double L, double sigma);
+
+
+	/*!
+		local search setup
+		@param xk start point
+		@param gk gradient at xk
+		@param dk descent direction from xk
+	*/
+	void setXkGkDk(const matrix_type xk, const matrix_type gk, const matrix_type dk);
+		
+	/*!
+		optimize
+		returns a double..
+	*/
+	double optimize();
+
+protected:
+
+	//! an internal parameter
+	double m_dGamma;
+
+	//! an internal paramter
+	double m_dL;
+
+	//! an internal parameter
+	double m_dSigma;
+
+	//! gradient in xk 
+	matrix_type m_gk;
+
+	//! iteration direction from xk
+	matrix_type m_dk;
+};
+
+#endif

+ 69 - 0
BFGSOptimizer.cpp

@@ -0,0 +1,69 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	NewtonMethodOptimizer.cpp: Implementation of the NewtonMethodOptimizer
+//								  Optimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/BFGSOptimizer.h"
+
+
+
+BFGSOptimizer::BFGSOptimizer(OptLogBase *loger) : SuperClass(loger)
+{
+}
+
+
+BFGSOptimizer::BFGSOptimizer( const BFGSOptimizer &opt) : SuperClass(opt)
+{	
+}
+
+
+BFGSOptimizer::~BFGSOptimizer()
+{
+}
+
+
+
+void BFGSOptimizer::init()
+{
+	SuperClass::init();
+
+}
+
+
+BFGSOptimizer & BFGSOptimizer::operator=(const BFGSOptimizer &opt)
+{
+
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(opt);
+
+	}
+
+  	return *this;
+}
+
+
+
+void BFGSOptimizer::updateIterationMatrix()
+{
+	matrix_type pk = m_parameters - m_oldParams;
+	matrix_type rk = m_gradient - m_oldGradient;
+	matrix_type rkt=!rk;
+	matrix_type pkt=!pk;
+
+	m_IterationMatrix = m_IterationMatrix 
+				+ (rk * rkt) * (1.0/ (rkt * pk)[0][0]) 
+				- (m_IterationMatrix * pk * pkt * m_IterationMatrix) * (1.0/ (pkt * m_IterationMatrix * pk)[0][0]);
+
+}

+ 84 - 0
BFGSOptimizer.h

@@ -0,0 +1,84 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	AxA3dNUMBFGSOptimizer.h: interface for MatrixItertiveMethods class
+//
+//	Written by: Matthias Wacker
+//
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _BFGS_OPTIMIZER_H_
+#define _BFGS_OPTIMIZER_H_
+
+#include "optimization/MatrixIterativeOptimizer.h"
+
+/*!
+	class for the BFGS
+
+   HowToUse:
+	
+	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
+	  * use setParameters() to set the start point
+	  * call init()
+	  * call optimize()
+
+
+ 	Implemented Abort criteria:
+		
+	  * maximum number of iterations
+	  *	time limit
+	  * parameter bounds
+	  * function value tolerance
+	  * parameter tolerance
+
+	Additional return reason:
+	
+
+ */
+class BFGSOptimizer : public MatrixIterativeOptimizer
+{
+public:
+		typedef  MatrixIterativeOptimizer	SuperClass;
+		typedef SuperClass::matrix_type	matrix_type;
+
+	    /*!
+			Constructor.
+			\param loger : OptLogBase * to existing log class
+		*/
+		BFGSOptimizer(OptLogBase *loger=NULL);
+
+		/*!
+			Copy constructor
+			\param opt .. optimizer to copy
+		*/
+		BFGSOptimizer( const BFGSOptimizer &opt);
+
+
+		/*!
+			operator =
+		*/
+		BFGSOptimizer & operator=(const BFGSOptimizer &opt);
+
+
+		/*!
+			Destructor.
+		*/
+		virtual ~BFGSOptimizer();
+
+
+		/*!
+			internal initoializations 
+		*/
+		void init();
+
+protected:
+		
+
+		/*!
+			\brief update the Iteration Matrix
+		*/
+		void updateIterationMatrix() ;
+	
+};
+
+#endif

+ 246 - 0
BrentLineSearcher.cpp

@@ -0,0 +1,246 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	BrentLineSearcher.cpp: Implementation of the Brent LineSearcher class.
+//
+//	Written by Matthias Wacker
+//
+//	Big parts of code are from numerical recipes in C
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/BrentLineSearcher.h"
+#include <iostream>
+
+#define BrentSHFT(a,b,c,d)	(a)=(b);(b)=(c);(c)=(d);
+#define BrentSIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
+
+BrentLineSearcher::BrentLineSearcher() : m_CGOLD(0.38196601125010515179541316563436), m_ZEPS(1.0e-10)
+{
+	m_a = 0.0;
+	m_b = 3.8196601125010515179541316563436;
+	m_c = 10.0;
+	m_eps = 0.1;
+}
+
+BrentLineSearcher::BrentLineSearcher(CostFunction *costFunc,OptLogBase* loger) : SuperClass(costFunc,loger),m_CGOLD(0.38196601125010515179541316563436), m_ZEPS(1.0e-10)
+{
+	m_a = 0.0;
+	m_b = 3.8196601125010515179541316563436;
+	m_c = 10.0;
+	m_eps = 0.1;
+}
+
+BrentLineSearcher::BrentLineSearcher(const BrentLineSearcher &lin) : SuperClass(lin),m_CGOLD(0.38196601125010515179541316563436), m_ZEPS(1.0e-10)
+{
+	m_a = lin.m_a;
+	m_b = lin.m_b;
+	m_c = lin.m_c;
+	m_eps = lin.m_eps;
+}
+
+BrentLineSearcher & BrentLineSearcher::operator =(const BrentLineSearcher &lin)
+{
+	/*
+			avoid self-copy
+	*/
+	if(this != &lin)
+	{
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(lin);
+
+		m_a = lin.m_a;
+		m_b = lin.m_b;
+		m_c = lin.m_c;
+
+		m_eps = lin.m_eps;
+	
+	}
+
+	return *this;
+
+}
+
+BrentLineSearcher::~BrentLineSearcher()
+{
+}
+
+bool BrentLineSearcher::setBounds(double a, double c)
+{
+	if (a >= c)
+	{
+		return false;
+	}
+
+	m_a = a;
+	m_b = a + m_CGOLD * (c - a);
+	m_c = c;
+
+	return true;
+}
+
+bool BrentLineSearcher::setEps(double eps)
+{
+	if(eps < 0)
+	{
+		return false;
+	}
+	
+	m_eps = eps;
+	return true;
+}
+
+double BrentLineSearcher::optimize()
+{
+	int numIter=0;
+	double a=0.0;
+	double b=0.0;
+	double d=0.0;
+	double etemp=0.0;
+	double fu=0.0;
+	double fv=0.0;
+	double fw=0.0;
+	double fx=0.0;
+	double p=0.0;
+	double q=0.0;
+	double r=0.0;
+	double tol1=0.0;
+	double tol2=0.0;
+	double u=0.0;
+	double v=0.0;
+	double w=0.0;
+	double x=0.0;
+	double xm=0.0;
+	double e = 0.0;
+	double xmin=0.0;
+	int maxNumIter = 100;
+
+	x=w=v= m_b;
+	fw=fv=fx= evaluateSub(x);
+
+	a = m_a;
+	b = m_c;
+
+	/*
+		main loop
+	*/
+	for (numIter = 1;numIter <= maxNumIter; numIter++)
+	{
+		xm = 0.5* (a+b);
+		tol2 = 2.0 * (tol1=m_eps*fabs(x)+m_ZEPS);
+		
+		if(fabs(x-xm) <= (tol2-0.5*(b-a)))
+		{
+			xmin = x;
+			return xmin;
+		}
+
+		/*
+			construct trial parabolic fit
+		*/
+		if(fabs(e) > tol1)
+		{
+			r=(x-w)*(fx-fv);
+			q=(x-v)*(fx-fw);  // FIXME WACKER CHECK IF NR IS WRONG HERE!?
+			p=(x-v)*q-(x-w)*r;
+			q=2.0*(q-r);
+			if(q > 0.0) p = -p;
+			q = fabs(q);
+			etemp = e;
+			e=d;
+			if(fabs(p) >= fabs(0.5*q*etemp) || p <= q *(a-x) || p >= q*(b-x))
+			{
+				d=m_CGOLD * (e=(x >= xm ? a-x : b-x ));
+			}
+			else
+			{
+				d=p/q; // take the parabolic step
+				u = x+d;
+				if(u-a < tol2 || b-u < tol2)
+				{
+					d=BrentSIGN(tol1, xm - x);
+				}
+			}
+		}
+		else
+		{
+			d=m_CGOLD * (e=(x >= xm ? a-x : b-x ));
+		}
+
+		u = (fabs(d) >= tol1 ? x+d : x + BrentSIGN(tol1,d));
+		
+		/*
+			this is the one function evaluation per step
+		*/
+		fu= evaluateSub(u);
+
+		if (fu <= fx)
+		{
+			if(u >= x )
+			{
+				a=x;
+			}
+			else
+			{
+				b=x;
+			}
+
+			BrentSHFT(v,w,x,u);
+			BrentSHFT(fv,fw,fx,fu);
+		}
+		else
+		{
+			if (u < x)
+			{
+				a=u;				
+			}
+			else
+			{
+				b=u;
+			}
+			if(fu <= fw || w == x)
+			{
+				v=w;
+				w=u;
+				fv=fw;
+				fw=fu;
+			}
+			else if (fu <= fv || v== x || v == w)
+			{
+				v=u;
+				fv=fu;
+			}
+
+		}
+
+		if(m_verbose)
+		{
+			std::cout //<< "a,b,d,etemp,fu,fv,fw,fx,p,q,r,u,v,w,x,xm;" << std::endl
+				<< a << " "
+				<< b << " "
+				<< d << " "
+				<< etemp << " "
+				<< fu << " "
+				<< fv << " "
+				<< fw << " "
+				<< fx << " "
+				<< p << " "
+				<< q << " "
+				<< r << " "
+				<< u << " "
+				<< v << " "
+				<< w << " "
+				<< x << " "
+				<< xm << " "
+				<< std::endl;
+
+		}
+
+	} // main loop
+	
+	if(m_pLoger)
+		m_pLoger->logWarning("Brent took too many iterations.. Tol threshold was to low for the number of iterations..\n");
+
+	return x;
+}

+ 101 - 0
BrentLineSearcher.h

@@ -0,0 +1,101 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	BrentLineSearcher.h: interface of the BrentLineSearcher class.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _BRENT_LINE_SEARCHER_H
+#define _BRENT_LINE_SEARCHER_H
+
+#include <cmath>
+#include "LineSearcher.h"
+/*!
+	class for the brent line search
+
+   HowToUse:
+	
+	  * use setX0() to set the offset
+	  * use setH0()	to set the search direction
+	  * use setEps() to set the abort criterion
+	  * use setBounds() to set a different search bound for lambda than [0,10]
+	  * call init()
+	  * call optimize() (returns lambda)
+ */
+class  BrentLineSearcher : public LineSearcher
+{
+public:
+
+	typedef  LineSearcher	SuperClass;
+	typedef SuperClass::matrix_type matrix_type;
+	
+
+	/*!
+		default constructor
+	*/
+	BrentLineSearcher();
+
+	/*!
+		constructor
+	*/
+	BrentLineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+	/*!
+		Copy constructor
+		\param opt .. optimizer to copy
+	*/
+	BrentLineSearcher( const BrentLineSearcher &lin);
+	
+	/*!
+		operator =
+	*/
+	BrentLineSearcher & operator=(const BrentLineSearcher &lin);
+
+    /*!
+		Destructor.
+     */
+    virtual ~BrentLineSearcher();
+
+	/*!
+		set the bound parameters
+	*/
+	bool setBounds(double a, double c);
+
+	/*!
+		set the abort threshold
+	*/
+	bool setEps(double eps);
+
+	/*!
+		optimize
+		returns a double..
+	*/
+	double optimize();
+
+protected:
+
+	/*!
+		approximation of the golden cut ratio
+	*/
+	const double m_CGOLD; // = 0.38196601125010515179541316563436;
+
+	/*!
+		small number trying to achieve fractional accuracy for a minimum at 0
+	*/
+	const double m_ZEPS;	
+
+	/*!
+		the bounds
+	*/
+	double m_a, m_b, m_c;
+
+	/*!
+		the abort criteria
+	*/
+	double m_eps;
+
+
+};
+
+#endif

+ 377 - 0
CombinatorialOptimizer.cpp

@@ -0,0 +1,377 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	CombinatorialOptimizer.cpp: Implementation of the 
+//		CombinatorialOptimizer class.
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/CombinatorialOptimizer.h"
+#include <stdlib.h>
+#include <time.h>
+
+#include "numbase.h" // ice random
+
+#include <iostream>
+
+using namespace opt;
+CombinatorialOptimizer::CombinatorialOptimizer(OptLogBase *loger): SuperClass(loger)
+{
+	m_mode = 1;
+	m_T0 = 1;
+	m_Tk = m_T0;
+	m_alpha = 0.95;
+	//m_stepLength = 1.0;
+	m_fast = false;
+}
+
+
+CombinatorialOptimizer::CombinatorialOptimizer( const CombinatorialOptimizer &opt) : SuperClass(opt)
+{
+	m_mode			= opt.m_mode;
+	m_T0			= opt.m_T0;
+	m_Tk			= opt.m_Tk;
+	m_alpha			= opt.m_alpha;
+	//m_stepLength		= opt.m_stepLength;
+	m_fast			= opt.m_fast;
+	
+}
+
+/*
+	operator=
+*/
+CombinatorialOptimizer & CombinatorialOptimizer::operator=(const CombinatorialOptimizer &opt)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(opt);
+
+			
+		/*
+			own values:
+		*/
+		m_mode			= opt.m_mode;
+		m_T0			= opt.m_T0;
+		m_Tk			= opt.m_Tk;
+		m_alpha			= opt.m_alpha;
+		//m_stepLength	= opt.m_stepLength;
+		m_fast			= opt.m_fast;
+		
+		
+	}
+
+  	return *this;
+}
+
+
+CombinatorialOptimizer::~CombinatorialOptimizer()
+{
+}
+
+
+void CombinatorialOptimizer::init()
+{
+	SuperClass::init();
+
+	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+	/*
+		seed rand
+	*/
+	ice::Randomize();
+
+	/*
+		(re)set m_Tk
+	*/
+	m_Tk = m_T0;
+
+
+
+}
+
+bool CombinatorialOptimizer::setMode(int mode)
+{
+	switch(mode)
+	{
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+		m_mode = mode;
+		return true;
+	default:
+		return false;
+	}
+}
+
+bool CombinatorialOptimizer::setControlSeqParam(double T0,double alpha)
+{
+	if(T0 < 0)
+		return false;
+
+	m_T0 = T0;
+
+	if(alpha < 0 || alpha > 1)
+		return false;
+
+	return true;
+
+}
+
+bool CombinatorialOptimizer::accepptPoint(double oldValue, double newValue)
+{
+	bool accept = false;
+	//cout<<"old val: "<<oldValue<<endl;
+    //    cout<<"new val: "<<newValue<<endl;
+    //    cout<<endl;
+	switch(m_mode)
+	{
+	case 1:
+	{
+		/*
+			simulated annealing
+		*****************************/
+		
+		if(newValue <= oldValue)
+		{
+			accept = true;
+			break;
+		}
+				
+		/*
+			generate uniform distrubuted random number
+		*/
+		double lambda = ice::RandomD();
+		
+		if(lambda <= exp(-(newValue - oldValue)/((m_Tk < 1.0e-20)? 1.0e-20 :m_Tk)))
+		{
+			accept =true;
+		}
+		break;
+	}
+	case 2:
+		/*
+			threshold acceptance
+		*****************************/
+		if((newValue - oldValue) < m_Tk)
+		{
+			accept = true;
+		}
+		break;
+	case 3:
+		/*
+			great deluge algorithm
+		*****************************/
+		if(newValue <= m_Tk)
+		{
+			accept = true;
+		}
+
+		break;
+	case 4:
+		/*
+			stochastic relaxation
+		*****************************/
+		if(newValue <= oldValue)
+		{
+			accept = true;
+		}
+	case 5:
+		{
+		/*
+			annealing & theshold
+		*****************************/
+		
+		/*
+			accept better values
+		*/
+		if(newValue <= oldValue)
+		{
+			accept = true;
+			break;
+		}
+		
+		/*
+			dont accept values bigger than m_threshold
+		*/
+		if(newValue > m_threshold )
+		{
+			accept = false;
+			break;
+		}
+
+		/*
+			generate uniform distrubuted random number
+		*/
+		double lambda = ice::RandomD();
+		
+		if(lambda <= exp(-(newValue - oldValue)/((m_Tk < 1.0e-20)? 1.0e-20 :m_Tk)))
+		{
+			accept =true;
+		}
+		else
+			accept = false;
+		break;
+		}
+	default:
+		accept = false;
+	}
+
+	return accept;
+}
+
+matrix_type CombinatorialOptimizer::generatePoint()
+{
+    matrix_type newPoint(m_parameters);
+
+	for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+	{
+		if(m_scales[i][0] > 0.0 )
+		{
+			if(m_fast == true)
+			{
+				//newPoint[i][0] = m_parameters[i][0] + m_stepLength * m_Tk * ice::GaussRandom(1.0) ;
+				newPoint[i][0] = m_parameters[i][0] + m_scales[i][0] * m_Tk * ice::GaussRandom(1.0) ;
+			}
+			else
+			{
+				newPoint[i][0] = m_parameters[i][0] + m_scales[i][0] * ice::GaussRandom(1.0) ;
+			}
+		}
+	}
+
+        //cout<<"new Point: "<<newPoint<<endl;
+        
+	return newPoint;
+}	
+
+//bool CombinatorialOptimizer::setSteplength(double steplength)
+//{
+//	if(steplength <= 0)
+//		return false;
+//
+//	m_stepLength = steplength;
+//
+//	return true;
+//}
+
+int CombinatorialOptimizer::optimize()
+{
+
+	// init
+	init();
+
+
+	/*
+		start time criteria
+	*/
+	m_startTime = clock();
+
+
+
+	bool abort = false;
+
+	matrix_type newPoint;
+	double newValue;
+	
+	/* 
+		do the optimization
+	*/
+	while(abort == false)
+	{
+		/*
+			increase iteration counter
+		*/
+		m_numIter++;
+		
+		/*
+			Check iteration limit
+		*/
+		if(m_maxNumIterActive)
+		{
+			if(m_numIter >= m_maxNumIter)
+			{
+				/* set according return status and return */
+				m_returnReason = SUCCESS_MAXITER;
+				abort = true;
+				continue;
+			}
+		}
+
+		/*
+			update control seq.
+		*/
+		m_Tk *= m_alpha;
+
+
+		/*
+			generate new point
+		*/
+		newPoint = generatePoint();
+				
+
+		/*
+			evaluate cost function
+		*/
+		newValue = evaluateCostFunction(newPoint);
+
+		
+		/*
+			accept new point
+		*/
+		if( accepptPoint(m_currentCostFunctionValue,newValue) )
+		{
+        		m_parameters = newPoint;
+			m_currentCostFunctionValue = newValue;
+			
+		}
+                
+                if(m_verbose)
+                {
+                    std::cout << "CombinatorialOptimizer :: parameter: ";
+                    for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+                    {
+                        std::cout<< m_parameters[r][0] << " ";
+                    }
+                    std::cout << m_currentCostFunctionValue  << std::endl;
+                }
+		
+		/*
+			Check if it is in bounds, paramTol, funcTol, NumIter, gradienttol, maxSeconds
+		*/
+		if(!checkParameters(m_parameters))
+		{
+			/* set according return status and the last parameters and return */
+			m_returnReason = ERROR_XOUTOFBOUNDS;
+			abort = true;
+		}
+
+		/*
+			Check Optimization Timelimit, if active
+		*/
+		if(m_maxSecondsActive)
+		{
+			m_currentTime = clock();
+
+			/* time limit exceeded ? */
+			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+			{
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_TIMELIMIT;
+				abort = true;
+				continue;
+			}
+		}
+
+	}
+
+	return m_returnReason;
+
+}

+ 179 - 0
CombinatorialOptimizer.h

@@ -0,0 +1,179 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	CombinatorialOptimizer.h: interface of combinatorial optimizers.
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _COMBINATORIAL_OPTIMIZER_
+#define _COMBINATORIAL_OPTIMIZER_
+
+
+#include "optimization/SimpleOptimizer.h"
+#include "optimization/Opt_Namespace.h"
+namespace opt=optimization;
+
+///
+///	class for the Combinatorial optimizers
+///
+/// HowToUse:
+/// 
+///	  * use setMode to specify which algorithm to use (simulated annealing, etc.)
+///	  * use setStepLength to set the variance for the random number generation 
+///	  * use setParameters() to set the start point
+///	  * use setFastMode() to let the random number generation contract with the control sequence
+///	  * use setControlSeq() to set the parameters for the control sequence
+///	  * call init()
+///	  * call optimize()
+///
+/// 
+///	Implemented Abort criteria:
+///		
+///	  * maximum number of iterations
+///	  *	time limit
+///	  * parameter bounds
+///	  
+///	Additional return reason:
+///	
+///
+///
+///
+///
+class CombinatorialOptimizer : public SimpleOptimizer
+{
+	public:
+
+		typedef  SimpleOptimizer SuperClass;
+		typedef  opt::matrix_type matrix_type;
+
+		///
+		///	Constructor.
+		///	@param loger : OptLogBase * to existing log class
+		///
+		CombinatorialOptimizer(OptLogBase *loger=NULL);
+
+		///
+		///	Copy constructor
+		///	@param opt .. optimizer to copy
+		///
+		CombinatorialOptimizer( const CombinatorialOptimizer &opt);
+
+		///
+		///	operator =
+		///
+		CombinatorialOptimizer & operator=(const CombinatorialOptimizer &opt);
+
+      		///
+		///	Destructor.
+         	///
+        	~CombinatorialOptimizer();
+
+		/*!
+			enumeration for the return reasons of an optimizer,
+			has all elements of the SuperClass optimizer
+		*/
+	
+		///
+		///	Do internal initializations
+		///
+		void init();
+		
+		///
+		///	start the optmization
+		///
+		int optimize();
+
+		///
+		///	set mode:
+		///	
+		///	  1 = simulated annealing
+		///	  2 = threshold acceptance
+		///	  3 = great deluge
+		///	  4 = stochastic relaxation
+		///	  5 = mix of simualted annealing and "constant" great deluge
+		///			(that means sa is used but there is a hard acceptance threshold.
+		///			 function values have to be below that threshold to have the 
+		///			 chance to be accepted. This can be used to do some kind of constraint
+		///			 handling when they are penalized enough..)
+		///		
+		///
+		bool setMode(int mode);
+		
+
+		///
+		///	set the parameters for the control sequence
+		///	where t_k = t_0 * alpha^k
+		///	
+		///	@param T0 >= 0
+		///	@param alpha > 0
+		///
+		///	@return true in case of success
+		///
+		bool setControlSeqParam(double T0, double alpha);
+
+		///*!
+		//	@param steplength double value that is used as variance to generate new points
+		//*/
+		//bool setSteplength(double steplength);
+
+		///
+		///	set threshold used in mode 5 as constant deluge control sequence
+		///	@param thres 
+		///
+		inline void setThreshold(double thres){m_threshold = thres;};
+
+		///
+		///	use fast contraction mode: the control sequence is used 
+		///	for the variance to generate new points
+		///
+		inline void setFastMode(bool fastmode){m_fast = fastmode;};
+
+	private:
+
+		///
+		///	generate new point
+		///
+		matrix_type generatePoint();
+
+		///
+		///	accept point
+		///
+		bool accepptPoint(double oldValue, double newValue);
+
+		///
+		///	algorithm mode
+		///
+		int m_mode;
+
+		///
+		///	fast mode point generation?
+		///
+		bool m_fast;
+
+		///
+		///	control sequence Tk= T_0 * m_alpha^m_numIter
+		///
+		double m_T0;
+
+		///
+		///	control sequence Tk= T_0 * m_alpha^m_numIter
+		///
+		double m_Tk;
+
+		///
+		///	control sequence Tk= T_0 * m_alpha^m_numIter
+		///
+		double m_alpha;
+
+		///
+		///	Threshold for mode nr. 5
+		///
+		double m_threshold;
+
+		///
+		///	steplength used as variance to generate new points
+		///
+		double m_stepLength;
+		
+};
+
+#endif

+ 135 - 0
Constraints.cpp

@@ -0,0 +1,135 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	Constraints.cpp: implementation of the CostFunction class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/Constraints.h"
+
+using namespace opt;
+Constraints::Constraints():		m_numOfParameters(0),
+								m_numOfConstraints(0),
+								m_hasAnalyticGradient(false),
+								m_hasAnalyticHessian(false)
+{
+
+}
+
+/*
+	constructor
+*/
+Constraints::Constraints(unsigned int numOfParameters, unsigned int numOfConstraints) : 
+								m_numOfParameters(numOfParameters),
+								m_numOfConstraints(numOfConstraints),
+								m_hasAnalyticGradient(false),
+								m_hasAnalyticHessian(false)
+{
+	m_weights = matrix_type(numOfConstraints,1);
+	for(int i=0; i < static_cast<int>(numOfConstraints); ++i)
+	{
+		m_weights[i][0] = 1;
+	}
+
+}
+
+/*
+	copy constructor							
+*/
+Constraints::Constraints(const Constraints &con)
+{
+	m_numOfParameters = con.m_numOfParameters;
+	m_numOfConstraints = con.m_numOfConstraints;
+	m_hasAnalyticGradient = con.m_hasAnalyticGradient;
+	m_hasAnalyticHessian = con.m_hasAnalyticHessian;
+	m_weights = con.m_weights;
+}
+
+
+/*
+	destructor
+*/
+Constraints::~Constraints()
+{
+}
+
+
+
+/*
+	return the AnalyticGradient
+*/
+const matrix_type Constraints::getAnalyticGradient(const matrix_type &x)
+{
+	matrix_type tmp(m_numOfParameters, m_numOfConstraints);
+	
+	return tmp;
+}
+
+/*
+	return the AnalyticGradient
+*/
+const matrix_type Constraints::getAnalyticHessian(const matrix_type &x)
+{
+	matrix_type tmp(m_numOfParameters, m_numOfConstraints * m_numOfParameters);
+	
+	return tmp;
+}
+
+bool Constraints::setWeightVector(const matrix_type weights)
+{
+	/*
+		dimensions have to match:
+	*/
+	if(static_cast<unsigned int >(weights.rows()) != m_numOfConstraints || weights.cols() != 1)
+	{
+		return false;
+	}
+
+	/*
+		weights have to be nonnegative
+	*/
+	for(unsigned int i= 0; i < m_numOfConstraints; i++)
+	{
+		if(weights[i][0] < 0)
+		{
+			return false;
+		}
+	}
+	
+	/*
+		everything is fine so far, copy weights and return true
+	*/
+	m_weights = weights;
+
+	return true;
+
+}
+
+matrix_type Constraints::evaluateSub(double lambda)
+{
+	return evaluate(m_x_0 + m_h_0 * lambda);
+}
+
+
+bool Constraints::setX0(const matrix_type &x0)
+{
+	if(x0.rows() == static_cast<int>(m_numOfParameters) && x0.cols() == 1)
+	{
+		m_x_0 = x0;
+		return true;
+	}
+	else
+		return false;
+}
+
+bool Constraints::setH0(const matrix_type &H0)
+{
+	if(H0.rows() == static_cast<int>(m_numOfParameters) && H0.cols() == 1)
+	{
+		m_h_0 = H0;
+		return true;
+	}
+	else
+		return false;
+}

+ 209 - 0
Constraints.h

@@ -0,0 +1,209 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	Constraints.h: interface of the Constaints class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _CONSTRAINTS_H_
+#define _CONSTRAINTS_H_
+
+
+#include "optimization/Opt_Namespace.h"
+namespace opt=optimization;
+
+/*!
+    class Abstract base class for all constraints. 
+*/
+
+class Constraints  
+{
+	public:
+		typedef opt::matrix_type matrix_type;
+
+
+		/*!
+			default constructor
+		*/
+		Constraints();
+
+	     /*!
+            Constructor.
+			\param numOfParameters
+			\param numOfConstraints
+         */
+		Constraints(unsigned int numOfParameters, unsigned int numOfConstraints);
+        
+		/*
+			Copy constructor
+			\param con the constraint to copy
+		*/
+		Constraints(const Constraints &con);
+
+        /*!
+            Destructor.
+         */
+        virtual ~Constraints();
+
+		/*!
+			get Number of Parameters
+			
+			  \return the number of parameters
+		*/
+		inline unsigned int getNumOfParameters(){return m_numOfParameters;};
+
+		/*!
+			get Number of Constraints
+
+			\return the number of Constraints
+		*/
+		inline unsigned int getNumOfConstraints(){return m_numOfConstraints;};
+		
+
+		/*!
+			do the constraints provide analytic gradient computation?
+			
+			\return true if they do
+		*/
+		inline bool hasAnalyticGradient(){return m_hasAnalyticGradient;};
+
+		/*!
+			get the analytic gradient
+			  \param x =^ position
+			  \return matrix_type =[gradConstraint(i,j)]_{i,j=(0,0)}^{(m_numOfParameters,m_numOfConstraints)}
+			
+							where
+								(i,j) indicates the i-th component
+								of the j-th Constraint
+		*/
+		virtual const matrix_type getAnalyticGradient(const matrix_type &x);
+		
+		/*!
+			do the constraints provide analytic hessian computation?
+			
+			  \return true if they do
+		*/		
+		inline bool hasAnalyticHessian(){return m_hasAnalyticHessian;};
+
+		/*!
+			get the analytic hessian(s)
+			
+			  \param x =^ position
+			  \return matrix_type =[hessianConstraint(i,j)]_{i,j=(0,0)}^{(m_numOfParameters, 
+																		 m_numOfParameters * m_numOfConstraints)}
+			
+							where
+								(i,j) indicates the (i,[j%m_numOfConstraints])-th component
+								of the Hessian of the ground(j/m_numOfConstraints)-th Constraint
+								where gound means cuttung the number after '.'
+
+			example:
+
+			2 dimensional parameters
+			3 constraints
+			
+			  return= [H_1(i,j),			H_2(i,j),			H_3(i,j)], 
+			  
+					where H_v are matrices of 2x2:
+					
+					  [h_1_1,1	h_1_1,2		h_2_1,1	h_2_1,2		h_3_1,1 h_3_1,2 
+					   h_1_2,1	h_1_2,2		h_2_2,1	h_2_2,2		h_3_2,1 h_3_2,2 ]
+
+
+
+
+		*/
+		virtual const matrix_type getAnalyticHessian(const matrix_type & x);
+
+		/*!
+			weight handling: setWeightVector
+			Set a weight vector for normalization of the constraint ranges. The value of the i-th constraint,
+			gets multiplied with the i-th value of the weight vector.
+			The weights have te be non-negative, but can be 0 to disable the i-th constraint
+
+			The default is [1,1, ... , 1] what weights the constraints equally.
+			
+			  \param weight
+			  \returns true, if everything went fine 
+		*/
+		bool setWeightVector(const matrix_type weights);
+
+		/*!
+			get the weight vector
+		*/
+		inline matrix_type getWeightVector(){return m_weights;};
+
+		/*!
+			Initialization for the cost function
+		 */
+		virtual void init() = 0;
+
+		/*!
+			evaluate the constraints
+			\param x =^ position
+			\return matrix_type containing the i-th constraint result in the i-th component
+		*/
+		virtual matrix_type evaluate(const matrix_type & x) = 0;
+
+
+		/*!
+			set an x0 for 1dim line search
+			\param x0 affine part of affine linear parameter space
+		*/
+		bool setX0(const matrix_type &x0);
+
+		/*!
+			set an h0 for 1dim line search
+			\param h0 linear part of affine linear parameter space
+		*/
+		bool setH0(const matrix_type &H0);
+
+		/*!
+			evaluate 1dimension sub function fsub(lambda) = f(x0 + lambda * h0) 
+			\param lambda
+		*/
+		matrix_type evaluateSub(double lambda);
+
+
+	protected:
+
+		/*!
+			number of parameters		
+		*/
+		unsigned int m_numOfParameters;
+
+		/*!
+			number of constraints
+		*/
+		unsigned int m_numOfConstraints;
+
+		/*!
+			has analytic gradient ?
+		*/
+		bool m_hasAnalyticGradient;
+	
+		/*!
+			has analytic hessian ?
+		*/
+		bool m_hasAnalyticHessian;
+
+		/*!
+			internal weight vector 
+		*/
+		matrix_type m_weights;
+
+		/*!
+			x_0		
+		*/
+		matrix_type m_x_0;
+
+		/*!
+			direction h_0
+		*/
+		matrix_type m_h_0;
+
+
+};
+
+#endif

+ 119 - 0
CostFunction.cpp

@@ -0,0 +1,119 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	CostFunction.cpp: implementation of the CostFunction class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/CostFunction.h"
+
+using namespace opt;
+
+CostFunction::CostFunction() : SuperClass()
+{
+	m_hasAnalyticGradient = false;
+	m_hasAnalyticHessian = false;
+	m_numEval = 0;
+
+}
+
+CostFunction::CostFunction(unsigned int numOfParameters) : SuperClass(numOfParameters)
+{
+	m_hasAnalyticGradient = false;
+	m_hasAnalyticHessian = false;
+	m_numEval = 0;
+}
+
+
+CostFunction::CostFunction(const CostFunction &func) : SuperClass(func)
+{
+	m_hasAnalyticGradient = func.m_hasAnalyticGradient;
+	m_hasAnalyticHessian = func.m_hasAnalyticHessian;
+	m_numEval = func.m_numEval;
+}
+
+CostFunction::~CostFunction()
+{
+}
+
+CostFunction &CostFunction::operator=(const CostFunction &func)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &func)
+	{
+		
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(func);
+
+		/*
+			own values:
+		*/
+		m_hasAnalyticGradient = func.m_hasAnalyticGradient;
+		m_hasAnalyticHessian = func.m_hasAnalyticHessian;
+		m_numEval = func.m_numEval;
+	
+	}
+
+  	return *this;
+}
+
+
+const matrix_type CostFunction::getAnalyticGradient(const matrix_type &x)
+{
+	/*
+		FIXME: WACKER: Throw exception as default behaviour ?
+	*/
+	
+	
+	return x * 0;
+}
+
+void CostFunction::init()
+{
+}
+
+const matrix_type CostFunction::getAnalyticHessian(const matrix_type &x)
+{
+	/*
+		FIXME: WACKER: Throw exception as default behaviour
+	*/
+	
+	
+	return x * !x *0.0;
+}
+
+bool CostFunction::setX0(const matrix_type &x0)
+{
+	if(x0.rows() == static_cast<int>(m_numOfParameters) && x0.cols() == 1)
+	{
+		m_x_0 = x0;
+		return true;
+	}
+	else
+		return false;
+}
+
+
+bool CostFunction::setH0(const matrix_type &H0)
+{
+	if(H0.rows() == static_cast<int>(m_numOfParameters) && H0.cols() == 1)
+	{
+		m_h_0 = H0;
+		return true;
+	}
+	else
+		return false;
+}
+
+double CostFunction::evaluateSub(double lambda)
+{
+	return evaluate(m_x_0 + m_h_0 * lambda);
+}
+
+

+ 142 - 0
CostFunction.h

@@ -0,0 +1,142 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	CostFunction.h: interface of the CostFunction class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _COST_FUNCTION_H_
+#define _COST_FUNCTION_H_
+
+#include "optimization/Optimizable.h"
+#include "optimization/Opt_Namespace.h"
+
+namespace opt=optimization;
+
+/*!
+    class Abstract base class for all cost functions.
+ */
+
+class CostFunction : public Optimizable
+{
+	public:
+
+		typedef		Optimizable		SuperClass;
+		typedef	opt::matrix_type	matrix_type;
+		
+		/*!
+			DefaultConstructor
+		*/
+		CostFunction();
+
+		/*!
+			Constructor.
+			\param numOfParameters
+         	*/
+		CostFunction(unsigned int numOfParamters);
+        
+		/*
+			Copy constructor
+		*/
+		CostFunction(const CostFunction &func);
+
+		
+		/*!
+			Destructor.
+		*/
+	    virtual ~CostFunction();
+
+		/*!
+			=operator
+		*/
+		CostFunction &operator=(const CostFunction &costFunc);
+
+
+		inline bool hasAnalyticGradient(){return m_hasAnalyticGradient;}
+
+		/*!
+			get the analytic gradient
+		*/
+		virtual const opt::matrix_type getAnalyticGradient(const opt::matrix_type &x);
+		
+		
+		inline bool hasAnalyticHessian(){return m_hasAnalyticHessian;}
+
+		/*!
+			get the analytic hessian
+		*/
+		virtual const opt::matrix_type getAnalyticHessian(const opt::matrix_type & x);
+
+		/*!
+			get number of Evaluations
+		*/
+		inline unsigned int getNumberOfEvaluations(){return m_numEval;};
+
+		/*!
+			reset the evaluation counter
+		*/
+		inline void resetNumberOfEvaluations(){m_numEval = 0;};
+
+		/*!
+			Initialization for the cost function
+		 */
+		virtual void init();
+
+		/*!
+			set an x0 for 1dim line search
+		*/
+		bool setX0(const opt::matrix_type &x0);
+
+		/*!
+			set an h0 for 1dim line search
+		*/
+		bool setH0(const opt::matrix_type &H0);
+
+		/*!
+			evaluate 1dimension sub function fsub(lambda) = f(x0 + lambda * h0) 
+		*/
+		double evaluateSub(double lambda);
+
+		/*!
+		 * This function returns the full parameter vector. If the costfunction performs dimension reduction
+		 * this method will return the current parameter vector with full dimension. Otherwise the costfunction works with
+		 * the full parameter vector in any case and it is returned unchanged.
+		 * @param x current parameter vector (from optimizer)
+		 * @return full parameter vector
+		 */
+		virtual opt::matrix_type getFullParamsFromSubParams(const opt::matrix_type &x)
+		{
+		  matrix_type fullparamvec(x);
+		  return x;
+		}
+
+	protected:
+
+		/*!
+			has analytic gradient ?
+		*/
+		bool m_hasAnalyticGradient;
+	
+		/*!
+			has analytic hessian ?
+		*/
+		bool m_hasAnalyticHessian;
+
+		/*!
+			x_0		
+		*/
+		opt::matrix_type m_x_0;
+
+		/*!
+			direction h_0
+		*/
+		opt::matrix_type m_h_0;
+
+		/*!
+			number of evaluations
+		*/
+		unsigned int m_numEval;
+};
+
+#endif

+ 105 - 0
CostFunction_ndim_2ndOrder.cpp

@@ -0,0 +1,105 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	CostFunction_ndim_2ndOrder.cpp: implementation of a test
+//	CostFunction class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+
+#include "optimization/CostFunction_ndim_2ndOrder.h"
+
+using namespace opt;
+
+CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder() : SuperClass()
+{
+}
+
+
+CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder(unsigned int dim)  : SuperClass(dim)
+{
+	m_hasAnalyticGradient = true;
+	m_hasAnalyticHessian = false; // not implemented
+
+	m_A  = matrix_type(dim,dim);
+	m_bt = matrix_type(1,dim);
+	m_c	 = 0.0;
+
+}
+
+CostFunction_ndim_2ndOrder::CostFunction_ndim_2ndOrder(const CostFunction_ndim_2ndOrder &func):SuperClass(func)
+{
+	m_A = func.m_A;
+	m_bt = func.m_bt;
+	m_c = func.m_c;
+}
+
+CostFunction_ndim_2ndOrder & CostFunction_ndim_2ndOrder::operator=(const CostFunction_ndim_2ndOrder & func)
+{
+	/*
+			avoid self-copy
+	*/
+	if(this != &func)
+	{
+		
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(func);
+
+		/*
+			own values:
+		*/
+		m_A		= func.m_A;
+		m_bt	= func.m_bt;
+		m_c		= func.m_c;
+	}
+
+  	return *this;
+}
+
+CostFunction_ndim_2ndOrder::~CostFunction_ndim_2ndOrder()
+{
+}
+
+
+void CostFunction_ndim_2ndOrder::init()
+{
+}
+
+int CostFunction_ndim_2ndOrder::setAbc(const matrix_type &A,const matrix_type &b, double c)
+{
+	if(A.rows() == static_cast<int>(m_numOfParameters) && A.cols() == static_cast<int>(m_numOfParameters) &&
+	   b.rows() == static_cast<int>(m_numOfParameters) && b.cols() == 1)
+	{
+
+		m_A = A;
+		m_bt = !b;
+		m_c = c;
+
+		return 0;
+	}
+	else
+	{
+		return -1;
+	}
+}
+
+
+double CostFunction_ndim_2ndOrder::evaluate(const matrix_type &parameter)
+{
+	
+	//return ( !parameter * 0.5 * m_A * parameter + m_bt * parameter + m_c)(0,0);
+	return ( !parameter * 0.5 * m_A * parameter + m_bt * parameter)[0][0] + m_c;
+	
+}
+
+const matrix_type CostFunction_ndim_2ndOrder::getAnalyticGradient(const matrix_type &parameter)
+{
+	matrix_type tmp = parameter * 0;
+
+	tmp = m_A * parameter + !m_bt;
+
+	return tmp;
+}

+ 106 - 0
CostFunction_ndim_2ndOrder.h

@@ -0,0 +1,106 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	CostFunction_ndim_2ndOrder.h: interface of a test CostFunction class.
+//
+//	Written By: Matthias Wacker
+//
+//	f(x) = 0.5 * x^T * A * x + b^T * x + c 
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _COSTFUNCTION_NDIM_2NDORDER_H_
+#define _COSTFUNCTION_NDIM_2NDORDER_H_
+
+#include "optimization/CostFunction.h"
+
+class CostFunction_ndim_2ndOrder : public CostFunction
+{
+	public:
+
+		typedef CostFunction	SuperClass;
+		typedef SuperClass::matrix_type matrix_type;
+		
+		/*!
+			default constructor
+		*/
+		CostFunction_ndim_2ndOrder();
+
+        /*!
+            Constructor.
+			\param dim of parameter space
+         */
+		CostFunction_ndim_2ndOrder(unsigned int dim);
+        
+		/*!
+			Copy constructor
+			\param func the function to copy
+		*/
+		CostFunction_ndim_2ndOrder(const CostFunction_ndim_2ndOrder &func);
+
+		/*!
+			= operator
+		*/
+		CostFunction_ndim_2ndOrder &operator=(const CostFunction_ndim_2ndOrder & func);
+
+        /*!
+            Destructor.
+         */
+        ~CostFunction_ndim_2ndOrder();
+
+		/*!
+			initializations
+		*/
+		void init();
+
+
+		/*!
+			set A, b, and c
+			\param A
+			\param b
+			\param c
+
+			\return 0 in case of success.
+		*/
+		int setAbc(const matrix_type & A,const matrix_type &b, double c);
+
+		/*!
+			evaluate the costfunction
+			\param parameter to evaluate at 
+		*/
+		double evaluate(const matrix_type & parameter);
+
+
+		/*!
+			get the analytic Gradient 				
+		*/
+		const matrix_type getAnalyticGradient(const matrix_type &parameter);
+				
+
+	protected:
+
+		/*!
+			the matrix A of
+
+			f(x) = 0.5 * x^T * A * x + b^T * x + c 
+		
+		*/
+		matrix_type m_A;
+
+		/*!
+			the vector b^T of
+
+			f(x) = 0.5 * x^T * A * x + b^T * x + c 
+		*/
+		matrix_type m_bt;
+
+
+		/*
+			the value c of
+
+			f(x) = 0.5 * x^T * A * x + b^T * x + c 
+		*/
+		double m_c;
+
+};
+		
+#endif

+ 100 - 0
DerivativeBasedOptimizer.cpp

@@ -0,0 +1,100 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	DerivativeBasedOptimizer.cpp: Implementation of the DerivativeBased
+//				  Optimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/DerivativeBasedOptimizer.h"
+
+using namespace optimization;
+
+DerivativeBasedOptimizer::DerivativeBasedOptimizer( OptLogBase *loger): SuperClass(loger)
+{
+	m_analyticalGradients = true;
+	m_gradientTolActive = false;
+	m_gradientTol = 1.0e-3;
+}
+
+
+DerivativeBasedOptimizer::DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt) : SuperClass(opt)
+{
+	m_analyticalGradients = opt.m_analyticalGradients;
+	m_gradientTolActive = opt.m_gradientTolActive;
+	m_gradientTol = opt.m_gradientTol;
+
+	m_gradient = opt.m_gradient;
+}
+
+DerivativeBasedOptimizer::~DerivativeBasedOptimizer()
+{
+}
+
+void DerivativeBasedOptimizer::setGradientTol(bool active, double norm)
+{
+	m_gradientTol = norm;
+	m_gradientTolActive = active;
+}
+
+inline double DerivativeBasedOptimizer::getGradientTol()
+{
+	return m_gradientTol;
+}
+
+void DerivativeBasedOptimizer::init()
+{
+	SuperClass::init();
+	
+	m_gradient = matrix_type(m_numberOfParameters,1);
+}
+
+void DerivativeBasedOptimizer::useAnalyticalGradients(bool useAnalyticalGradients)
+{
+	m_analyticalGradients = useAnalyticalGradients;
+}
+
+
+const matrix_type  DerivativeBasedOptimizer::getNumericalGradient(const matrix_type & x , const matrix_type & maskWidth)
+{
+	matrix_type grad(m_numberOfParameters,1);
+	matrix_type grid(m_numberOfParameters, 2 * m_numberOfParameters);
+
+	for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
+	{
+		for(int j = 0 ; j< 2 * static_cast<int>(m_numberOfParameters);j++)
+		{
+			grid[i][j] = x[i][0] + ((j == i)? maskWidth[i][0] : 0.0)
+				+ ((j == i+1)? maskWidth[i][0] *(-1.0): 0.0);
+		}
+	}
+	
+	matrix_type values = evaluateSetCostFunction(grid);
+		
+	for(int i=0; i < static_cast<int>(m_numberOfParameters);i++)
+	{
+		grad[i][0] = ( values[i][0] - values[i+1][0] )/( 2 * maskWidth[i][0]);
+		
+		if(m_scales[i][0] == 0 )
+		{
+			grad[i][0] = 0;
+			continue;
+		}
+	}
+	
+	return grad;
+}
+
+const matrix_type  DerivativeBasedOptimizer::getAnalyticalGradient(const matrix_type & x)
+{
+	return (m_maximize == true) ? (m_costFunction->getAnalyticGradient(x) * (-1.0))
+						  :   (m_costFunction->getAnalyticGradient(x)) ;	
+}
+
+
+const matrix_type DerivativeBasedOptimizer::getAnalyticalHessian(const matrix_type & x)
+{
+	return (m_maximize == true) ? (m_costFunction->getAnalyticHessian(x) * (-1.0))
+						  :   (m_costFunction->getAnalyticHessian(x)) ;	
+}

+ 127 - 0
DerivativeBasedOptimizer.h

@@ -0,0 +1,127 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	DerivativeBasedOptimizer.h: interface of DerivativeBasedOptimizer class.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _DERIVATIVE_BASED_OPTIMIZER_H_
+#define _DERIVATIVE_BASED_OPTIMIZER_H_
+
+
+#include "optimization/SimpleOptimizer.h"
+
+/*!
+	class Abstract base class of all derivative based optimizers.
+ */
+class DerivativeBasedOptimizer : public SimpleOptimizer
+{
+public:
+		typedef  SimpleOptimizer	SuperClass;	
+
+		///
+		///	Constructor.
+		///	
+		///	\param loger : OptLogBase * to existing log class
+		///
+		DerivativeBasedOptimizer(OptLogBase *loger=NULL);
+
+		///
+		///	Copy constructor
+		///	\param opt .. optimizer to copy
+		///
+		DerivativeBasedOptimizer( const DerivativeBasedOptimizer &opt);
+
+	        ///
+		///	Destructor.
+	        ///
+		virtual ~DerivativeBasedOptimizer();
+
+		///
+		///	enumeration for the return reasons of an optimizer,
+		///	has all elements of the SuperClass optimizer
+		///
+		enum {	SUCCESS_GRADIENTTOL = _to_continue_,
+				_to_continue_
+		};
+
+
+		///
+		///	\brief Set gradient tolerance abort criteria
+		///	
+		///	Set parameter tolerance abort criteria. While iterating, if the gradientnorm gets
+		///	below the given threshold. The optimization stops and returns SUCCESS if 
+		///	active is 'true'
+		///
+		///	\param active : bool to activate the criteria (true == active..)
+		///	\param norm : representing the threshold
+		///
+		void setGradientTol(bool active, double norm);
+		
+		///
+		///	Get the gradient tolerance abort criteria
+		///	\return double representing the threshold
+		///
+		inline double getGradientTol();
+
+		///
+		///	Get numerical Gradient on position x; use central difference with a maskwitdh of maskWidth.
+		///	
+		///	  grad(f(x))_i \approx 
+		///	  {
+		///						[  f( x + (0, ... ,0,maskWidth(i,0),0, ... ,0 ) ) 
+		///						 - f( x - (0, ... ,0,maskWidth(i,0),0, ... ,0 ) )]
+		///						 / (2 * maskWidth(i,0))
+		///	  }
+		///
+		///	  \forall i \in [1, ... ,m_numberOfParameters]
+		///				
+		const optimization::matrix_type  getNumericalGradient(const optimization::matrix_type & x , const optimization::matrix_type & maskWidth);
+
+		///
+		///	Get the anylytical Gradient of the costfunction (if available) sign already inverted for maximization.
+		///
+		const optimization::matrix_type  getAnalyticalGradient(const optimization::matrix_type & x);
+		
+		///
+		///	UseAnalyticalGradients, if possible
+		///
+		void useAnalyticalGradients(bool useAnalyticalGradients);
+
+		///
+		///	Get the analytical Hessian of the costfunction (if available )
+		///	sign already inverted for maximization
+		///
+		const optimization::matrix_type getAnalyticalHessian(const optimization::matrix_type & x);
+
+		
+protected:
+		///
+		///	initialize
+		///
+		void init();
+
+		///
+		///	the gradient
+		///
+		optimization::matrix_type m_gradient;
+		
+		///
+		///	gradient tolerance threshold
+		///
+		double m_gradientTol;
+
+		///
+		///	gradient tolerance active
+		///
+		bool m_gradientTolActive;
+
+		///
+		///	use numerical or analytical Gradient Computation
+		///
+		bool m_analyticalGradients;
+	
+};
+
+#endif

+ 155 - 0
DimWrapperCostFunction.cpp

@@ -0,0 +1,155 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	DimWrapperCostFunction.cpp: implementation of the DimWrapperCostFunction class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include <iostream>
+#include "DimWrapperCostFunction.h"
+
+DimWrapperCostFunction::DimWrapperCostFunction() : SuperClass()
+{
+	m_pOrigCostFunc=NULL;
+	m_numOfParameters=0;
+}
+
+
+DimWrapperCostFunction::DimWrapperCostFunction(const DimWrapperCostFunction &func) : SuperClass(func)
+{
+	m_pOrigCostFunc= func.m_pOrigCostFunc;
+	m_selMatTransposed = func.m_selMatTransposed;
+	m_fixedValues = func.m_fixedValues;
+}
+
+DimWrapperCostFunction::~DimWrapperCostFunction()
+{
+	// nothing dynamically allocated..
+}
+
+DimWrapperCostFunction::DimWrapperCostFunction(CostFunction *orig, const matrix_type &selectionVector,
+						const matrix_type &fixedValues)
+{
+	if(orig == NULL)
+	{
+		std::cerr << "DimWrapperCostFunction::DimWrapperCostFunction NULL pointer error" << std::endl;
+		exit(1);
+	}
+	m_pOrigCostFunc = orig;
+
+	changeSelection(selectionVector,fixedValues);
+
+}
+
+
+void DimWrapperCostFunction::changeSelection(const matrix_type & selectionVector, const matrix_type &fixedValues)
+{
+	// BUILD MATRIX
+	//count nonzero values:
+	m_fixedValues=fixedValues;
+	int count=0;
+	int dim = selectionVector.rows();
+	std::vector<int> indices;
+	for(int i=0; i < dim ; ++i)
+	{
+		if (selectionVector[i][0] != 0.0 )
+		{
+			count++;
+			indices.push_back(i);
+			m_fixedValues[i][0]= 0.0; // resets fixed values to zero for active params
+		}
+	}
+	m_numOfParameters = count;	
+	m_selMatTransposed = matrix_type(dim,count);
+	for(int i =0; i < m_selMatTransposed.rows(); ++i)
+	{
+		for(int j =0; j < m_selMatTransposed.cols(); ++j)
+		{
+			if( indices[j]==i )
+			{
+				m_selMatTransposed[i][j]=1.0;
+			}
+			else
+			{
+				m_selMatTransposed[i][j]=0.0;
+			}
+		}
+	}
+}
+
+DimWrapperCostFunction &DimWrapperCostFunction::operator=(const DimWrapperCostFunction &func)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &func)
+	{
+		
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(func);
+
+		/*
+			own values:
+		*/
+		m_pOrigCostFunc= func.m_pOrigCostFunc;
+		m_selMatTransposed = func.m_selMatTransposed;
+		m_fixedValues = func.m_fixedValues;
+	
+	}
+
+  	return *this;
+}
+
+
+void DimWrapperCostFunction::init()
+{
+	m_pOrigCostFunc->init();
+}
+
+double DimWrapperCostFunction::evaluate(const matrix_type &x)
+{
+	return m_pOrigCostFunc->evaluate(m_selMatTransposed * x + m_fixedValues);
+}
+
+opt::matrix_type DimWrapperCostFunction::getFullParamsFromSubParams(const opt::matrix_type &x)
+{
+    return m_selMatTransposed * x + m_fixedValues;
+}
+
+/*bool DimWrapperCostFunction::setX0(const opt::matrix_type &x0)
+{
+	cout<<"dimWrapperCF - setX0"<<endl;
+	if(x0.rows() == static_cast<int>(m_numOfParameters) && x0.cols() == 1)
+	{
+		cout<<"set m_x_0"<<endl;
+		m_x_0 = x0;
+		return true;
+	}
+	else
+		return false;
+}
+		
+
+bool DimWrapperCostFunction::setH0(const opt::matrix_type &H0)
+{
+	cout<<"dimWrapperCF - setH0"<<endl;
+	if(H0.rows() == static_cast<int>(m_numOfParameters) && H0.cols() == 1)
+	{
+		m_h_0= H0;
+		return true;
+	}
+	else
+		return false;
+}
+		
+
+double DimWrapperCostFunction::evaluateSub(double lambda)
+{
+	cout<<"dimWrapperCF - evalSub"<<endl;
+	double eval= evaluate(m_x_0 + m_h_0 * lambda);
+	return eval;
+}*/

+ 132 - 0
DimWrapperCostFunction.h

@@ -0,0 +1,132 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	DimWrapperCostFunction.h: 
+//  Defines a wrapper around a cost function. Given a parameter matrix (parameter initalizations) and a selection matrix a flexible optimization of different, individual parameters becomes possible. 
+//	Written By: Matthias Wacker, Esther Platzer
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _DIMWRAP_COST_FUNCTION_H_
+#define _DIMWRAP_COST_FUNCTION_H_
+
+//#include "Optimizable.h"
+#include "optimization/CostFunction.h"
+#include "optimization/Opt_Namespace.h"
+
+namespace opt=optimization;
+
+class DimWrapperCostFunction : public CostFunction 
+{
+	public:
+
+		typedef	 CostFunction SuperClass;
+		
+		/*!
+			DefaultConstructor
+		*/
+		DimWrapperCostFunction();
+
+		/*!
+		 *	Useful constructor
+		 *
+		 *	@param orig pointer to the original cost function with paramDim dim_orig
+		 *	@param selectionVecotor (dim_orig x 1) selection vector with nonzero values 
+		 *		for the ones that should be used in new costfunction
+		 *	@param fixedValues vector to be able to specify fixed values for the unused paramters
+		 *
+		 *	the parameters of this costfunction will be mapped to the original ones. Offset will be 
+		 *	added to the paramter vector. evaluate() will return the result of the original function
+		 *
+		 *	x_orig = m_selMatTransposed * x_this + fixedValues
+		 * */
+		DimWrapperCostFunction(CostFunction *orig, const opt::matrix_type &selectionVector, const opt::matrix_type &fixedValues);
+
+		/*!
+		 *	copy constructor
+		 *
+		 * */
+		DimWrapperCostFunction(const DimWrapperCostFunction & costFunc);
+        
+	   /*!
+            Destructor.
+	   */
+	   virtual ~DimWrapperCostFunction();
+
+		/*!
+			=operator
+		*/
+		DimWrapperCostFunction &operator=(const DimWrapperCostFunction &costFunc);
+
+		/*!
+		 *	
+		 *	@param selectionVecotor (dim_orig x 1) selection vector with nonzero values 
+		 *		for the ones that should be used in new costfunction
+		 *	@param fixedValues vector to be able to specify fixed values for the unused paramters
+		 * */
+		void changeSelection(const opt::matrix_type & selectionVector, const opt::matrix_type &fixedValues);
+
+
+		/*!
+			Initialization for the cost function
+		 */
+		virtual void init();
+       		
+		/*!
+		 *
+		 * */
+		virtual double evaluate(const opt::matrix_type &parameter);
+		
+		/**
+		 * Returns the current full parameter vector (no dimension reduction for inactive params)
+		 * @param x the current parameter vector containing just active parameters
+		 * @return the full parameter vector with current active params
+		 */
+		virtual opt::matrix_type getFullParamsFromSubParams(const opt::matrix_type &x);
+
+	
+		/*!
+		set an x0 for 1dim line search
+		*/
+		//virtual bool setX0(const opt::matrix_type &x0);
+		
+		/*!
+		set an h0 for 1dim line search
+		*/
+		//virtual bool setH0(const opt::matrix_type &H0);
+		
+		/*!
+		evaluate 1dimension sub function fsub(lambda) = f(x0 + lambda * h0) 
+		*/
+		//virtual double evaluateSub(double lambda);
+
+	private:
+	
+		/*!
+		 *	the original costfunciton
+		 *
+		 *
+		 * */
+		CostFunction *m_pOrigCostFunc;
+
+
+		/*!
+		 *	this is the mapping matrix
+		 *
+		 *	x_orig = m_selMatTransposed * x + offset
+		 *
+		 * */
+		opt::matrix_type	m_selMatTransposed;
+
+		/*!
+		 *	fixed values
+		 *
+		 * */
+		opt::matrix_type	m_fixedValues;
+		
+		// //! x_0
+		//opt::matrix_type m_x_0;
+		// //! direction h_0
+		//opt::matrix_type m_h_0;
+};
+
+#endif

+ 544 - 0
DownhillSimplexOptimizer.cpp

@@ -0,0 +1,544 @@
+/**
+*
+*	@file:		DownhillSimplexOptimizer.cpp: implementation of the downhill Simplex Optimier
+*
+*	@author:	Matthias Wacker, Esther Platzer
+*
+*/
+
+#include "optimization/DownhillSimplexOptimizer.h"
+#include "optimization/Opt_Namespace.h"
+#include "mateigen.h" // for SVD
+
+#include <iostream>
+
+using namespace optimization;
+
+DownhillSimplexOptimizer::DownhillSimplexOptimizer(OptLogBase *loger): SuperClass(loger)
+{
+	m_abort			= false;
+	m_simplexInitialized	= false;
+	m_alpha			= 1.0;
+	m_beta			= 0.5;
+	m_gamma			= 1.0;
+	m_rankdeficiencythresh= 0.01;
+	m_rankcheckenabled= false;
+}
+
+DownhillSimplexOptimizer::DownhillSimplexOptimizer(const DownhillSimplexOptimizer &opt) : SuperClass(opt)
+{
+	m_abort					= opt.m_abort;
+	m_simplexInitialized	= opt.m_simplexInitialized;
+	m_vertices				= opt.m_vertices;
+	m_y						= opt.m_y;
+	m_alpha					= opt.m_alpha;
+	m_beta					= opt.m_beta;
+	m_gamma					= opt.m_gamma;
+	m_rankdeficiencythresh= 0.01;
+	m_rankcheckenabled= false;
+}
+
+DownhillSimplexOptimizer::~DownhillSimplexOptimizer()
+{
+}
+
+bool DownhillSimplexOptimizer::setWholeSimplex(const matrix_type &simplex)
+{
+	if(simplex.rows() == static_cast<int>(m_numberOfParameters) && simplex.cols() == static_cast<int>(m_numberOfParameters + 1))
+	{
+		/*
+		*	Check if simplex has rank(simplex)=numberOfParameters
+		*	otherwise return false because of bad posed simplex 
+		*/
+//#ifdef OPTIMIZATION_DOWNHILLSIMPLEX_ENABLE_RANK_CHECK
+        if(m_rankcheckenabled)
+        {
+            int rank = getRank(simplex, m_rankdeficiencythresh);
+            
+            if(rank < static_cast<int>(m_numberOfParameters))
+            {
+                m_simplexInitialized = false;
+                return false;
+            }
+        }
+//#endif
+
+		m_vertices = simplex;
+		m_simplexInitialized = true;
+		
+        for (int k = 0; k < static_cast<int>(m_numberOfParameters +1); k++)
+        {
+            m_y[k][0] = evaluateCostFunction(m_vertices(0,k,m_numberOfParameters-1,k));
+        }
+        
+		return true;
+	}
+	else
+	{
+		m_simplexInitialized = false;
+		return false;
+	}
+}
+
+void DownhillSimplexOptimizer::init()
+{
+	SuperClass::init();
+	
+	// allocate matrices
+	m_vertices		= matrix_type(m_numberOfParameters, m_numberOfParameters + 1);
+	m_y			= matrix_type(m_numberOfParameters + 1,1);
+
+
+	m_abort=false;
+	m_simplexInitialized == false;// force a random initialization
+	
+	/*
+		compute the function values corresponding to the simplex
+	*/
+	/* do a random initialization */
+	if (m_simplexInitialized == false)
+	{
+		bool abort = false;
+		while(abort == false)
+		{
+			matrix_type simplex(m_numberOfParameters,m_numberOfParameters+1);
+			
+			//	random initialization of the simplex
+			for(int i = 0; i < static_cast<int>(m_numberOfParameters); i++)
+			{
+				for(int j = 0; j < static_cast<int>(m_numberOfParameters+1); j++)
+				{
+					simplex[i][j] = m_parameters[i][0];
+					
+					if( j == i+1 )
+					{
+						double tmpRand = m_scales[i][0];// * ((double)rand()) / RAND_MAX;
+						simplex[i][j] += tmpRand;		
+					}
+				}
+			}
+			if(this->setWholeSimplex(simplex) == true)
+			{
+				/* simplexInitializen is only to indicate manual initialization .. */
+				m_simplexInitialized = false;
+				abort =true;
+			}
+		}
+	}
+
+	for (int k = 0; k < static_cast<int>(m_numberOfParameters +1); k++)
+	{
+		m_y[k][0] = evaluateCostFunction(m_vertices(0,k,m_numberOfParameters-1,k));
+	}
+}
+
+
+int DownhillSimplexOptimizer::optimize()
+{
+    //if(m_simplexInitialized == false)
+    //{
+       this->init();
+    //}
+  
+   	if(m_loger)
+		m_loger->logTrace("Starting Downhill Simplex Optimization\n");
+
+	int tmp=amoeba();
+	m_parameters = m_vertices(0,tmp,m_numberOfParameters-1,tmp);
+	
+	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+	return m_returnReason;
+}
+
+
+/*
+*	Numerical Recipies in C
+*/
+int DownhillSimplexOptimizer::amoeba()
+{
+	
+	double tol =m_funcTol;
+
+    const int ndim=m_numberOfParameters;     	// dimension
+    //int max_not_improve=10*ndim;				// maximum number of not 
+                                                // successful steps 
+    //int count_not_improve=0;
+    //double last_best_value=1e99;	
+ 
+	const int spts=ndim+1;						// number of vertices
+    int i,j, max_val;
+    
+    int ilo; 							    	// index of lowest vertex-point
+    int ihi;									// index of hightest (best)
+                                                // vertex-point
+    int inhi; 									// index of next hightest 
+                                                // vertex-point
+    double rtol,ytry;
+    
+	double ysave;								// save function value 
+    matrix_type psum(1,ndim);
+
+	
+	/*
+		start time criteria
+	*/
+	m_startTime = clock();
+
+    
+    for (j=0;j<ndim;j++)
+	{ 											// get sum of vertex-coordinates
+		double sum=0.0;
+		
+		for (i=0;i<spts;i++)
+			sum += m_vertices[j][i];
+		psum[0][j]=sum;
+    }
+    
+    for (;;)
+	{											// loop until terminating
+		if(m_verbose)
+		{
+			for(int u = 0; u < ndim+1; u++)
+			{
+				for(int v = 0; v < ndim ; v++)
+				{
+					std::cout << m_vertices[v][u] << " ";
+				}
+				std::cout<< " " << m_y[u][0] << std::endl;
+			}
+			std::cout << std::endl;
+		}
+
+
+		ilo = 0;    								
+		// first we must determine 
+		// which point is highest, 
+		// next-highest
+		// and lowest
+		// by looping over the simplex
+
+		/*
+			this works only, if m_numberOfParameters=ndim >= 2
+		*/
+		if(ndim >= 2)
+		{
+			ihi = m_y[1][0]>m_y[2][0] ? (inhi=1,2) : (inhi=2,1); 	 
+			for (i=0;i<spts;i++)
+			{					 	               		
+				if (m_y[i][0] <= m_y[ilo][0]) ilo=i;
+				if (m_y[i][0] > m_y[ihi][0]) {
+					inhi=ihi;
+					ihi=i;
+				}
+				else
+					if (m_y[i][0] > m_y[inhi][0])
+						if (i != ihi) 
+							inhi=i;
+			}
+		}
+		else
+		{
+			if(m_y[0][0]>m_y[1][0])
+			{
+				ilo  = 1;
+				inhi = 1;
+				ihi  = 0;
+			}
+			else
+			{
+				ilo  = 0;
+				inhi = 0;
+				ihi  = 1;
+			}
+		}
+		
+		// log parameters if parameter logger is given (does nothing for other loggers!)
+        if(m_loger)
+        {
+          // extract optimal parameters from simplex
+          matrix_type optparams(m_vertices.rows(),1,0);
+
+          for(int i= 0; i< m_vertices.rows(); ++i)
+          {
+              optparams[i][0]= m_vertices[i][ilo];
+          }
+          matrix_type fullparams= m_costFunction->getFullParamsFromSubParams(optparams);
+          m_loger->writeParamsToFile(fullparams);
+        }
+
+//#define DEBUGESTHER
+//#ifdef DEBUGESTHER
+//           cout<<"Iteration: "<<m_numIter<<" ";
+//            for(unsigned int paramnum= 0; paramnum< m_numberOfParameters; paramnum++)
+//            {
+//                cout<<m_vertices[paramnum][ilo]<<" ";
+//            }
+//            cout<<endl;
+//			//std::cout<<"---------- ihi inhi ilo ---------- " <<ihi << " "<<inhi << " "<<ilo << " " << std::endl;
+//#endif
+		
+		/*
+			Check m_abort .. outofbounds may have occurred in amotry() last iteration
+		*/
+		if(m_abort == true)
+		{
+			break;
+		}
+
+		/*
+			Check time criterion
+		*/
+		if(m_maxSecondsActive)
+		{
+			m_currentTime = clock();
+
+			/* time limit exceeded ? */
+			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+			{
+				/* set according return status and end optimization */
+				m_returnReason = SUCCESS_TIMELIMIT;
+				break;
+			}
+		}
+
+		
+		/*
+			check functol criterion
+		*/
+		if(m_funcTolActive == true)
+		{
+			rtol=2.0*fabs(m_y[ihi][0]-m_y[ilo][0])/(fabs(m_y[ihi][0])+fabs(m_y[ilo][0])+1e-10);
+												// compute the fractional 
+			// range from highest to lowest
+			
+			#ifdef OPT_DEBUG        
+				std::cout<<"rtol"<<"  "<<rtol<< std::endl;
+			#endif    
+			
+			if (rtol<tol)  
+			{  										// if satisfactory (terminating
+				//  criterion)
+				max_val=(int)m_y[ilo][0];				// save lowest value 
+				m_returnReason = SUCCESS_FUNCTOL;
+				break;          							// return
+			}
+		}
+		
+		/*
+			check param tol criterion
+		*/
+		if (m_paramTolActive == true)
+		{
+			// get norm of
+			//matrix_type tmp = m_vertices(0,ihi,m_numberOfParameters-1,ihi) - m_vertices(0,ilo,m_numberOfParameters,ilo);
+			//double norm)
+
+			if (   (m_vertices(0,ihi,m_numberOfParameters-1,ihi) - 
+				m_vertices(0,ilo,m_numberOfParameters-1,ilo)).Norm(0) < m_paramTol)
+			{
+				/*
+					set according return reason and end optimization
+				*/
+				m_returnReason = SUCCESS_PARAMTOL;
+				break;
+			}
+		}
+		
+		
+		
+		m_numIter++;
+
+		
+		/*
+			check max num iter criterion
+		*/
+		if(m_maxNumIterActive == true)
+		{
+			if (m_numIter >= m_maxNumIter)
+			{										
+				//max_val=(int)m_y[ilo][0];
+				m_returnReason = SUCCESS_MAXITER;
+				break;
+			}
+		}
+        
+		
+													// Begin a new iteration. 
+													// First extrapolate by the 
+													// factor alpha through the 
+													// face of the simplex across 
+													// from the high point, i.e. 
+													// reflect the simplex from the
+													// high point:
+		ytry=amotry(psum,ihi,-m_alpha);
+		if (ytry < m_y[ilo][0])
+		{
+			
+							 						// result is better than best
+													// point, try additional 
+													// extrapolation
+			ytry=amotry(psum,ihi,m_gamma);
+			
+			#ifdef OPT_DEBUG        
+				std::cout<<"Case one .. reflected highest through simplex" << std::endl;
+			#endif   
+
+		}
+		else
+		{
+		
+			if (ytry >= m_y[inhi][0]) 
+			{ 			
+				
+														// The reflected point is worse
+														// than the second
+				ysave=m_y[ihi][0];        					// highest, so look for 
+														// intermediate lower point.
+				
+				ytry=amotry(psum,ihi,m_beta);
+				#ifdef OPT_DEBUG        
+					std::cout<<"Case two .. looking for intermediate point" << std::endl;
+				#endif   
+				if (ytry >= ysave)
+				{					 					// Can't seem to get rid 
+														// of that high point. Better
+					#ifdef OPT_DEBUG        
+						std::cout<<"Case three .. contract around lowest point" << std::endl;
+					#endif 
+
+
+					for (i=0;i<spts;i++) 
+					{						 			//  contract around lowest 
+						// (best) point 
+						if (i!=ilo) 
+						{
+							for (j=0;j<ndim;j++) 
+							{
+								psum[0][j]=0.5*(m_vertices[j][i]+m_vertices[j][ilo]); 
+								#ifdef OPT_DEBUG                    
+								printf("psum(%d)=%f\n",j,psum[0][j]);
+								#endif                  
+								m_vertices[j][i]=psum[0][j];
+							}
+							if (checkParameters(!psum)) 
+							{
+								m_y[i][0]=	evaluateCostFunction(!psum); 
+								//eval_count++; 				// count function evaluations
+							}
+							else
+							{    
+								m_returnReason = ERROR_XOUTOFBOUNDS; // out of domain !!!
+								break;
+							}
+						}
+						for (j=0;j<ndim;j++)
+						{							 				// get sum of vertex-coordinates
+							double sum=0.0;
+							for (int ii=0;ii<spts;ii++)
+								sum += m_vertices[j][ii];
+							psum[0][j]=sum;
+						}//for
+					}//for								
+				}//if (ytry >= ysave)
+			}//if (ytry >= m_y(inhi))
+		}//else
+						
+    }											// next iteration
+    
+    return ilo; 								// return index of best point
+}
+
+
+double DownhillSimplexOptimizer::amotry(matrix_type & psum, int ihi, double fac)
+{
+    // extrapolates by a factor fac through the face of the simplex across
+    // from the low point, tries it, and replaces the low point if
+    // the new point is better
+    
+    const double maxreal= (m_maximize == true)?	-1.0e+300 : 1.0e+300;
+    double fac1,fac2,ytry;
+    int ndim=m_numberOfParameters;
+    matrix_type ptry(1,ndim);
+    fac1=(1.0-fac)/ndim;
+    fac2=fac1-fac;
+    
+    for (int j=0;j<ndim;j++) 
+    ptry[0][j]=psum[0][j]*fac1-m_vertices[j][ihi]*fac2;
+    
+    if (checkParameters(!ptry)) 
+	{ 
+		ytry=evaluateCostFunction(!ptry);	// evaluate function at the 
+														// trial point
+//		eval_count++;	    							// count function evaluations 
+		if (ytry<m_y[ihi][0])  {	    					// if trial point is better
+														// than lowest point,
+			m_y[ihi][0]=ytry;	    						// then replace the lowest
+			for (int j=0; j<ndim;j++) {
+				psum[0][j] = psum[0][j] + ptry[0][j]-m_vertices[j][ihi]; 	// update psum
+				m_vertices[j][ihi]=ptry[0][j];		  	// replace lowest point
+			}
+		}
+    }
+    else 
+	{
+		ytry=maxreal;  								// out of domain
+		m_abort = true;
+		m_returnReason = ERROR_XOUTOFBOUNDS;
+    
+	}
+    return ytry;
+}
+
+
+void DownhillSimplexOptimizer::setDownhillParams(double alpha, double beta, double gamma)
+{
+
+	m_alpha = alpha;
+	m_beta = beta;
+	m_gamma = gamma;
+}
+
+
+void DownhillSimplexOptimizer::setRankDeficiencyThresh(float rankdeficiencythresh)
+{
+    m_rankdeficiencythresh= rankdeficiencythresh;
+}
+
+
+void DownhillSimplexOptimizer::setRankCheckStatus(bool status)
+{
+    m_rankcheckenabled= status;
+}
+
+
+bool DownhillSimplexOptimizer::getRankCheckStatus()
+{
+    return m_rankcheckenabled;
+}
+
+
+unsigned int getRank(const matrix_type &A,double numZero)
+{
+	unsigned int tmpCount = 0;
+	matrix_type U,s,Vt;
+
+	//std::cout << "A.rows " << A.rows() << "A.cols() " << A.cols() << std::endl;
+	if(A.rows() < A.cols())
+	{
+		SingularValueDcmp(!A, U, s, Vt);			// call of the singular value decomp.
+	}
+	else
+	{
+		SingularValueDcmp(A, U, s, Vt);			// call of the singular value decomp.
+	}
+	
+	for(int i= 0; i < s.rows();i++)		// count singular values > numZero
+	{
+		if( s[i][i] > numZero )
+		{
+			tmpCount++;
+		}
+	}
+	
+	return tmpCount;
+}

+ 196 - 0
DownhillSimplexOptimizer.h

@@ -0,0 +1,196 @@
+///
+///
+///	@file:		DownhillSimplexOptimizer.h: interface of the downhill Simplex Optimier
+///	@author:	Matthias Wacker, Esther Plater 
+///
+///
+
+#ifndef _DOWNHILL_SIMPLEX_OPTIMIZER_H_
+#define _DOWNHILL_SIMPLEX_OPTIMIZER_H_
+
+#include <cmath>
+#include "optimization/SimpleOptimizer.h"
+
+/// To enabable Rank check, define
+/// #define OPTIMIZATION_DOWNHILLSIMPLEX_ENABLE_RANK_CHECK
+
+
+///
+///	@class DownhillSimplexOptimizer
+///
+///  HowToUse:
+///	
+///	  * use setWholeSimplex to initialize the whole simplex OR use
+///	  * use setParameters() to specify one point of the simplex
+///		the init call will then generate random disturbations to 
+///		generate a full rank simplex
+///	  * use setDownhillParams() to use others than the default to "move"
+///		the simplex
+///	  * call init()
+///	  * call optimize()
+///
+///
+///	Implemented Abort criteria:
+///	 		
+///		  * maximum number of iterations
+///		  *	time limit
+///		  * parameter bounds
+///		  * function value tolerance
+///		  * parameter tolerance
+///		  
+///		Additional return reason:
+///		
+///
+class DownhillSimplexOptimizer : public SimpleOptimizer
+{
+	public:
+
+		typedef SimpleOptimizer	SuperClass;
+		typedef SuperClass::matrix_type matrix_type;
+		///
+		///	Constructor
+		///	@param loger : OptLogBase * to existing log class or NULL
+		///
+		DownhillSimplexOptimizer(OptLogBase *loger=NULL);
+
+		///
+		///	CopyConstructor
+		///	@param opt : DownhillSimplexOptimizer to copy
+		///
+		DownhillSimplexOptimizer(const DownhillSimplexOptimizer &opt);
+
+		///
+		///
+		///
+		~DownhillSimplexOptimizer();
+		
+
+
+		///
+		///	set downhill specific parameters
+		///	@param alpha
+		///	@param beta
+		///	@param gamma
+		///	
+		///
+		void setDownhillParams(double alpha, double beta, double gamma);
+        
+        ///
+        /// Sets the rank deficiency threshold
+        /// @param rankdeficiencythresh rank deficiency threshold (DEFAULT= 0.01)
+        ///
+        void setRankDeficiencyThresh(float rankdeficiencythresh);
+        
+        ///
+        /// Enable or disable the rank check
+        /// @param status give the status for a rank check
+        ///
+        void setRankCheckStatus(bool status);
+        
+        ///
+        /// Returns the status of rankcheck
+        /// @retval true, if rankcheck is enabled
+        /// @retval false, if rankcheck is disabled
+        ///
+        bool getRankCheckStatus();
+
+		///
+		///	Do internal initializations
+		///
+		void init();
+
+	protected:
+		///
+		///	start optimization
+		///
+		virtual int optimize();
+
+		
+	private:
+		///
+		///	Initialize the whole simplex, for that, numOfParameters+1 points a needed.
+		///	(You might want to detemine these points by a random search before)
+		///	@param simplex : Matrix containing numOfParameters+1 points 
+		///					 that are numOfParameters-dimensional
+		///	@return bool : indicating if attempt was successfull 
+		///				   (might not, if dimensions don't match)
+		///
+		bool setWholeSimplex(const matrix_type &simplex);
+		
+		///
+		///	internal bool to store if simplex is initialized
+		///
+		bool m_simplexInitialized;
+		
+		
+		///
+		///	internal bool to offer a break in all internal functions
+		///
+		bool m_abort;
+
+		///
+		///	internal parameter to control the transformations of the simplex
+		///
+		double m_alpha;
+		
+		///
+		///	internal parameter to control the transformations of the simplex
+		///
+		double m_gamma;
+		
+		///
+		///	internal parameter to control the transformations of the simplex
+		///
+		double m_beta;
+
+
+
+		///
+        	///This method does the optimization itself.
+		///	It is called by optimize and returns the index of the "final"
+		///	vertice matrix that is the lowest / highest point found.
+		///
+        int amoeba();
+
+		///
+        	/// This method does the extrapolation of highest vertice through the
+		///	simplex by the factor fac. It is called by amoeba()
+        ///
+        double amotry(matrix_type& psum,int ihi, double fac);
+
+		///
+		///	Matrix containing the vertices of the simplex
+		///
+		matrix_type m_vertices;
+		
+		///
+		///	Matrix(1dim) containing the function values corresponding to 
+		///	vertices
+		///
+		matrix_type m_y;
+
+		///
+        /// Rang deficiency threshold
+        ///
+        float m_rankdeficiencythresh;
+        
+        ///
+        /// Rank check status: if false, a rank check is disabled (default)
+        ///
+        bool m_rankcheckenabled;
+
+};
+
+		///
+		///	Check Rank of a matrix_type where singular values lower than numZero
+		///	are treated as zero. The computation is done by singular value
+		///	decomposition. Returns the numerical rank 
+		///	of the matrix
+		///
+		///	@param A matrix to compute rank of..
+		///	@param numZero threshold to decide for numerical zero
+		///	@return (numerical) rank of A
+		///
+		unsigned int getRank(const optimization::matrix_type& A, double numZero);
+
+#endif

+ 1 - 0
EmptyLog.cpp

@@ -0,0 +1 @@
+#include "optimization/EmptyLog.h" 

+ 54 - 0
EmptyLog.h

@@ -0,0 +1,54 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	OptLogBase.h: interface of the Log class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _OPT_LOG_EMPTY_H_
+#define _OPT_LOG_EMPTY_H_
+
+#include <string>
+#include <sstream>
+#include "optimization/OptLogBase.h"
+
+
+/*!
+    base class for all log classes
+*/
+class EmptyLog : public OptLogBase
+{
+	public:
+		
+        /*!
+            Constructor.
+		*/
+		EmptyLog(){};
+        
+        /*!
+            Destructor.
+         */
+        virtual ~EmptyLog(){};
+		
+		
+
+	protected:
+
+		/**! Write error message to an output device (file, stdio, etc.)
+		 */
+		virtual void writeLogError(const char* szMessage){};
+
+		/**! Write warning message to an output device (file, stdio, etc.)
+		 */
+		virtual void writeLogWarning(const char* szMessage){};
+
+		/**! Write trace message to an output device (file, stdio, etc.)
+		 */
+		virtual void writeLogTrace(const char* szMessage) {};
+	
+};
+
+
+
+#endif

+ 66 - 0
EqualityConstraints.h

@@ -0,0 +1,66 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	EqualityConstraints.h: interface of the EqualityConstaints class.
+//
+//	Written By: Matthias Wacker
+//
+//	Constraints of the form: H(x) = 0 
+//					where H is the vector of all equality constraints
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _EQUALITYCONSTRAINTS_H_
+#define _EQUALITYCONSTRAINTS_H_
+
+
+#include "optimization/Constraints.h"
+
+
+/*!
+    class Abstract base class for all equality constraints. 
+*/
+
+class EqualityConstraints  : public Constraints
+{
+	public:
+		
+		typedef Constraints		SuperClass;
+		typedef SuperClass::matrix_type	matrix_type;
+
+		EqualityConstraints() : SuperClass() {};
+
+	     /*!
+            Constructor.
+			\param numOfParameters
+			\param numOfConstraints
+         */
+		EqualityConstraints(unsigned int numOfParameters, unsigned int numOfConstraints): SuperClass(numOfParameters,numOfConstraints){};
+        
+		/*
+			Copy constructor
+		*/
+		EqualityConstraints(const Constraints &con):SuperClass(con){};
+
+        /*!
+            Destructor.
+         */
+        ~EqualityConstraints(){};
+		
+		/*!
+			Initialization for the cost function
+		 */
+		virtual void init() = 0;
+
+		/*!
+			evaluate the constraints
+			\param x =^ position
+			\return matrix_type containing the i-th constraint result in the i-th component
+		*/
+		virtual matrix_type evaluate(const matrix_type & x) = 0;
+
+
+	protected:
+
+};
+
+#endif

+ 52 - 0
FileLog.cpp

@@ -0,0 +1,52 @@
+/*!
+	@file: optimization/FileLog.cpp
+	@author: Matthias Wacker
+
+
+*/
+
+
+#include "optimization/FileLog.h"
+#include <iostream>
+
+using namespace std;
+
+FileLog::FileLog( std::string file)
+{
+	// open file
+	m_stream.open(file.c_str(), ios_base::out);
+}
+
+FileLog::~FileLog()
+{
+	// close file
+	m_stream.close();
+}
+
+void FileLog::writeLogError(const char* szMessage)
+{
+	toFile(szMessage);
+}
+
+void FileLog::writeLogWarning(const char* szMessage)
+{
+	toFile(szMessage);
+}
+
+void FileLog::writeLogTrace(const char* szMessage)
+{
+	toFile(szMessage);
+}
+
+void FileLog::toFile(const char* szMessage)
+{
+	if(m_stream.good())
+	{
+		m_stream << szMessage << endl;
+		m_stream.flush();
+	}
+	else
+	{
+		std::cerr << " the log file stream is bad! " <<endl;
+	}
+}

+ 56 - 0
FileLog.h

@@ -0,0 +1,56 @@
+/*!
+	@file: optimization/FileLog.h
+	@author: Matthias Wacker
+*/
+
+#ifndef _FILELOG_H_
+#define _FILELOG_H_
+
+#include <string>
+#include <fstream>
+#include "optimization/OptLogBase.h"
+
+class FileLog : public OptLogBase
+{
+
+	public:
+	
+	/*!
+		default
+	*/
+	FileLog(){};
+
+	/*!
+		@param file string with the filename to log to
+	*/
+	//FileLog( string file);
+	FileLog( std::string file);
+
+	/*!
+		destructor
+	*/
+	virtual ~FileLog();
+
+	/**! Write error message to an output device (file, stdio, etc.)
+	 */
+	virtual void writeLogError(const char* szMessage);
+
+	/**! Write warning message to an output device (file, stdio, etc.)
+	 */
+	virtual void writeLogWarning(const char* szMessage);
+
+	/**! Write trace message to an output device (file, stdio, etc.)
+	 */
+	virtual void writeLogTrace(const char* szMessage);
+
+	private:
+	
+	std::ofstream m_stream;
+
+	void toFile(const char* szMessage);
+};
+
+
+
+
+#endif

+ 138 - 0
GoldenCutLineSearcher.cpp

@@ -0,0 +1,138 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	GoldenCutLineSearcher.cpp: Implementation of the LineSearcher class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/GoldenCutLineSearcher.h"
+
+GoldenCutLineSearcher::GoldenCutLineSearcher() : m_fac_a(0.38196601125010515179541316563436),m_fac_b(0.61803398874989484820458683436564)
+{
+	m_c = 0.0;
+	m_d = 10.0;
+	m_eps = 0.1;
+}
+
+GoldenCutLineSearcher::GoldenCutLineSearcher(CostFunction *costFunc,OptLogBase* loger) : SuperClass(costFunc,loger),m_fac_a(0.38196601125010515179541316563436),m_fac_b(0.61803398874989484820458683436564)
+{
+	m_c = 0.0;
+	m_d = 10.0;
+	m_eps = 0.1;
+}
+
+GoldenCutLineSearcher::GoldenCutLineSearcher(const GoldenCutLineSearcher &lin) : SuperClass(lin),m_fac_a(0.38196601125010515179541316563436),m_fac_b(0.61803398874989484820458683436564)
+{
+	m_c = lin.m_c;
+	m_d = lin.m_d;
+	m_eps = lin.m_eps;
+}
+
+GoldenCutLineSearcher & GoldenCutLineSearcher::operator =(const GoldenCutLineSearcher &lin)
+{
+	/*
+			avoid self-copy
+	*/
+	if(this != &lin)
+	{
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(lin);
+
+		m_c = lin.m_c;
+		m_d = lin.m_d;
+		m_eps = lin.m_eps;
+	
+	}
+
+	return *this;
+
+}
+
+GoldenCutLineSearcher::~GoldenCutLineSearcher()
+{
+}
+
+bool GoldenCutLineSearcher::setBounds(double c, double d)
+{
+	if (c >= d)
+	{
+		return false;
+	}
+
+	m_c = c;
+	m_d = d;
+
+	return true;
+}
+
+bool GoldenCutLineSearcher::setEps(double eps)
+{
+	if(eps < 0)
+	{
+		return false;
+	}
+	
+	m_eps = eps;
+	return true;
+}
+
+double GoldenCutLineSearcher::optimize()
+{
+	double dk = m_d;
+	double ck = m_c;
+	double vk=0.0;
+	double wk=0.0;
+
+	bool vk_known = false;
+	bool wk_known = false;
+
+	double phivk=0.0;
+	double phiwk=0.0;
+
+	bool abort = false;
+
+	while(abort == false)
+	{
+		if((dk - ck) <= m_eps)
+		{
+			abort = true;
+			continue;
+		}
+
+		if(vk_known == false)
+		{
+			vk = ck + m_fac_a * (dk - ck);
+			phivk = evaluateSub(vk);
+		}
+		vk_known = false;
+
+		if(wk_known == false)
+		{
+			wk = ck + m_fac_b * (dk - ck);
+			phiwk = evaluateSub(wk);
+		}
+		wk_known = false;
+		
+		if(phivk < phiwk)
+		{
+			dk = wk;
+			wk = vk;
+			phiwk = phivk;
+			wk_known = true;
+		}
+		else
+		{
+			ck = vk;
+			vk = wk;
+			phivk = phiwk;
+			vk_known = true;
+		}
+
+		
+	}
+
+	return (ck + dk)/2;;
+}

+ 101 - 0
GoldenCutLineSearcher.h

@@ -0,0 +1,101 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	GoldenCutLineSearcher.h: interface of the GoldenCutLineSearcher class.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _GOLDEN_CUT_LINE_SEARCHER_H_
+#define _GOLDEN_CUT_LINE_SEARCHER_H_
+
+#include "optimization/LineSearcher.h"
+/*!
+	class for the golden cut line search
+
+   HowToUse:
+	
+	  * use setX0() to set the offset
+	  * use setH0()	to set the search direction
+	  * use setEps() to set the abort criterion
+	  * use setBounds() to set a different search bound for lambda than [0,10]
+	  * call init()
+	  * call optimize() (returns lambda)
+ */
+class  GoldenCutLineSearcher : public LineSearcher
+{
+public:
+
+	typedef  LineSearcher	SuperClass;
+	typedef  SuperClass::matrix_type matrix_type;
+	
+
+	/*!
+		default constructor
+	*/
+	GoldenCutLineSearcher();
+
+	/*!
+		constructor
+	*/
+	GoldenCutLineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+	/*!
+		Copy constructor
+		\param opt .. optimizer to copy
+	*/
+	GoldenCutLineSearcher( const GoldenCutLineSearcher &lin);
+	
+	/*!
+		operator =
+	*/
+	GoldenCutLineSearcher & operator=(const GoldenCutLineSearcher &lin);
+
+    /*!
+		Destructor.
+     */
+    virtual ~GoldenCutLineSearcher();
+
+	/*!
+		set the bound parameters
+	*/
+	bool setBounds(double c, double d);
+
+	/*!
+		set the abort threshold
+	*/
+	bool setEps(double eps);
+
+	/*!
+		optimize
+		returns a double..
+	*/
+	double optimize();
+
+protected:
+
+	/*!
+		approximation of the golden cut ratio
+	*/
+	const double m_fac_a; // = 0.38196601125010515179541316563436;
+	
+	/*!
+		approximation of the golden cut ratio 
+	*/
+	const double m_fac_b; // = 0,61803398874989484820458683436564;
+
+	/*!
+		the bounds
+	*/
+	double m_c, m_d;
+
+	/*!
+		the abort criteria
+	*/
+	double m_eps;
+
+
+};
+
+#endif
+

+ 409 - 0
GradientDescentOptimizer.cpp

@@ -0,0 +1,409 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	GradientDescentOptimizer.cpp: Implementation of the GradienDescentOptimizer
+//								  Optimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/GradientDescentOptimizer.h"
+
+using namespace optimization;
+
+//#include <iostream>
+
+
+GradientDescentOptimizer::GradientDescentOptimizer(OptLogBase *loger)
+	: SuperClass(loger)
+{
+	m_stepLength = -1;
+}
+
+
+GradientDescentOptimizer::GradientDescentOptimizer( const GradientDescentOptimizer &opt) : SuperClass(opt)
+{
+	m_stepSize = opt.m_stepSize;
+	m_stepLength = opt.m_stepLength;
+}
+
+GradientDescentOptimizer::~GradientDescentOptimizer()
+{
+}
+
+void GradientDescentOptimizer::setStepSize(const matrix_type & stepSize)
+{
+	m_stepSize = stepSize;
+	m_stepLength = -m_stepSize.MaxVal();
+}
+
+void GradientDescentOptimizer::init()
+{
+	SuperClass::init();
+
+	if (m_stepSize.rows() != static_cast<int>(m_numberOfParameters))
+	{
+		m_stepSize = m_scales; 
+	}
+	else
+	{
+		// check for nonzero stepsize
+		bool tmp=false;
+		for(int i = 0; i < static_cast<int>(m_numberOfParameters); ++i)
+		{
+			if(m_stepSize[i][0] > 0 )
+			{
+				tmp=true;
+			}
+		}
+		if(tmp==false)
+		{
+			std::cerr <<  "GradientDescentOptimizer::init  all stepsizes zero - check your code!"<< endl;
+			exit(1);
+		}
+	}
+
+	
+	m_numIter = 0;
+
+	m_analyticalGradients = m_costFunction->hasAnalyticGradient();
+	
+	/*
+		FIXME: WACKER: compute first value and first gradient!
+	*/
+	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+	m_gradient = (m_analyticalGradients == true && 
+				(m_costFunction->hasAnalyticGradient() == true) ) ?
+							getAnalyticalGradient(m_parameters) : 
+							getNumericalGradient(m_parameters, m_stepSize);
+
+
+}
+
+int GradientDescentOptimizer::optimize()
+{
+ 	/*
+		call init
+	*/
+	init();
+
+
+	if(m_loger)
+		m_loger->logTrace("GradientDescentOptimizer: starting optimization ... \n");
+
+
+
+	/*
+		start time criteria
+	*/
+	m_startTime = clock();
+
+
+	bool abort = false;
+//	double stepLength = -1.0;
+	double cosAngle = 0.0;
+	double downScaleFactor = 0.5;
+
+	matrix_type stepSize = m_stepSize;
+	double stepLength = m_stepLength;
+	
+
+	/*
+		compute start value and first gradient!
+	*/
+	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+	m_gradient = (m_analyticalGradients == true && 
+				(m_costFunction->hasAnalyticGradient() == true) ) ?
+							getAnalyticalGradient(m_parameters) : 
+							getNumericalGradient(m_parameters, m_stepSize);
+
+
+
+	/*
+		check abort criteria for gradient
+	*/	
+	if(m_gradientTolActive)
+	{
+		if(m_gradient.Norm(0) < m_gradientTol){
+			m_returnReason = SUCCESS_GRADIENTTOL;
+			abort = true;
+
+			if(m_verbose == true)
+			{
+				std::cout << "# Gradient Descenct :: aborting because of GradientTol" << std::endl;
+			}
+		}
+	}	
+
+
+	/* do the optimization */
+	while(abort == false)
+	{
+		/*
+			increase iteration counter
+		*/
+		m_numIter++;
+		
+		if(m_verbose == true)
+		{
+			std::cout << "# Gradient Descenct :: Iteration: " << m_numIter << " : current parameters :\n ";
+			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+			{
+				std::cout<< m_parameters[r][0] << " ";
+			}
+			std::cout << m_currentCostFunctionValue << std::endl;
+
+			std::cout << " current gradient :\n ";
+			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+            {
+                std::cout<< m_gradient[r][0] << " ";
+            }
+            std::cout << std::endl;
+
+		}
+
+		/*
+			Check iteration limit
+		*/
+		if(m_maxNumIterActive)
+		{
+			if(m_numIter >= m_maxNumIter)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "# Gradient Descenct :: aborting because of numIter" << std::endl;
+				}
+
+
+				/* set according return status and return */
+				m_returnReason = SUCCESS_MAXITER;
+				abort = true;
+				continue;
+			}
+		}
+
+
+		/*
+			store last results for comparison
+		*/
+		matrix_type oldParams = m_parameters;
+		matrix_type oldGradient = m_gradient;
+		double oldFuncValue = m_currentCostFunctionValue;
+		
+
+		/*
+			get gradient
+		*/
+		//m_gradient = (m_analyticalGradients == true) ? getAnalyticalGradient(m_parameters) : getNumericalGradient(m_parameters, stepSize);
+		m_gradient = (m_analyticalGradients == true && 
+				(m_costFunction->hasAnalyticGradient() == true) ) ?
+							getAnalyticalGradient(m_parameters) : 
+							getNumericalGradient(m_parameters, stepSize);
+
+		/*
+			weight with scalings
+		*/
+		//m_gradient = m_gradient.ElMul(m_scales);
+		for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
+		{
+			m_gradient[k][0] *= m_scales[k][0];
+		}
+
+		/*
+			check gradient tol
+		*/
+		if(m_gradientTolActive)
+		{
+			if(m_gradient.Norm(0) < m_gradientTol)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "# Gradient Descenct :: aborting because of GradientTol" << std::endl;
+				}
+
+				m_returnReason = SUCCESS_GRADIENTTOL;
+				abort = true;
+				continue;
+			}
+		}
+
+		/*
+			set gradient length to 1, if length of gradient is too small .. 
+			return ERROR_COMPUTATION_UNSTABLE
+			(this can happen if gradienTol is not active..)
+			FIXME: WACKER think about a "usefull" limit
+		*/
+		if (m_gradient.Norm(0) > 1.0e-50)
+		{
+			for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
+			{
+				m_gradient[k][0] /= m_gradient.Norm(0);
+			}
+		}
+		else
+		{
+			m_returnReason = ERROR_COMPUTATION_UNSTABLE;
+			if(m_verbose == true)
+			{
+				std::cout << "# Gradient Descenct :: aborting because of ERROR_COMPUTATION_UNSTABLE " << std::endl;
+			}
+
+			abort =true;
+			continue;
+		}
+		
+		/*
+			check direction change to scale down for convergence!
+		*/
+		if(( !m_gradient * oldGradient)[0][0] < cosAngle)
+		{
+			/*
+				direction change is more than acos(cosAngle) deg.
+				scaledown by downScaleFactor
+			*/
+
+			for(int k=0; k < static_cast<int>(m_numberOfParameters); ++k)
+				stepSize[k][0] *= downScaleFactor;
+
+			stepLength *= downScaleFactor;
+			/*FIXME: WACKER: only as long
+			as there is no steplength computation!*/
+		}
+		
+				
+		/*
+			set next iteration step
+		*/
+		m_parameters = m_parameters + m_gradient * stepLength ; 
+
+		/*
+			Check if it is in bounds, paramTol, funcTol, NumIter, gradienttol, maxSeconds
+		*/
+		if(m_lowerParameterBoundActive || m_upperParameterBoundActive)
+		{
+			for(int i=0; i <static_cast<int>(m_numberOfParameters); i++)
+			{
+				if( m_upperParameterBoundActive)
+				{
+					if(m_parameters[i][0] >= m_upperParameterBound[i][0])
+					{
+						if(m_verbose == true)
+						{
+							std::cout << "# Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
+						}
+
+						/* set according return status and the last parameters and return */
+						m_returnReason = ERROR_XOUTOFBOUNDS;
+						m_parameters = oldParams;
+						abort = true;
+						continue;
+					}
+				}
+				if( m_lowerParameterBoundActive)
+				{
+
+					if(m_parameters[i][0] <= m_lowerParameterBound[i][0])
+					{
+						if(m_verbose == true)
+						{
+							std::cout << "# Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
+						}
+
+						/* set according return status and return the last parameter*/
+						m_returnReason = ERROR_XOUTOFBOUNDS;
+						m_parameters = oldParams;
+						abort = true;
+						continue;
+					}
+				}
+			}
+		}
+
+		/*
+			Get new Costfunction Value
+		*/
+		m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+		/*
+			Check ParamTol
+		*/
+		if(m_paramTolActive)
+		{
+			if( (m_parameters - oldParams).Norm(0) < m_paramTol)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "Gradient Descenct :: aborting because of param Tol" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_PARAMTOL;
+				/*m_parameters = oldParams;*/
+				abort = true;
+				continue;
+			}
+
+		}
+	
+		if(m_funcTolActive)
+		{
+			if( fabs((m_currentCostFunctionValue - oldFuncValue)) < m_funcTol)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "# Gradient Descenct :: aborting because of Func Tol" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_FUNCTOL;
+				/* m_parameters = oldParams; */
+				abort = true;
+				continue;
+			}
+		}
+
+		/*
+			Check Optimization Timilimit, if active
+		*/
+		if(m_maxSecondsActive)
+		{
+			m_currentTime = clock();
+		
+			//std::cout << (_startTime - _currentTime) << std::endl;
+			//std::cout << CLOCKS_PER_SEC << std::endl;
+			/* time limit exceeded ? */
+			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+			{
+				/*
+					
+				*/
+				if(m_verbose == true)
+				{
+					std::cout << "# Gradient Descenct :: aborting because of Time Limit" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_TIMELIMIT;
+				m_parameters = oldParams;
+				abort = true;
+				continue;
+			}
+		}
+
+	}
+
+	if(m_verbose)
+	{
+		std::cout << "# Gradient Descenct :: RESULT: ";
+		for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+		{
+			std::cout<< m_parameters[r][0] << " ";
+		}
+		std::cout << "    value:   " << m_currentCostFunctionValue  << std::endl;
+	}
+
+	return m_returnReason;
+
+}

+ 112 - 0
GradientDescentOptimizer.h

@@ -0,0 +1,112 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	GradientDescentOptimizer.h: interface of the optimizer GradientDescent.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _GRADIENT_DESCENT_OPTIMIZER_
+#define _GRADIENT_DESCENT_OPTIMIZER_
+
+#include <cmath>
+#include "optimization/DerivativeBasedOptimizer.h"
+
+
+///
+///	Class GradientDescentOptimizer
+///
+///  HowToUse:
+///	
+///	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
+///	  * use setParameters() to set the start point
+///	  * call init()
+///	  * call optimize()
+///	  
+///	   
+///	  
+///  Implemented Abort criteria:
+///	  		
+///	* maximum number of iterations
+///	* time limit
+///	* parameter bounds
+///	* function value tolerance
+/// 	* parameter tolerance
+///	* gradient tolerance
+///  
+///	Additional return reason:
+///
+///	* ERROR_COMPUTATION_UNSTABLE
+///
+/// GradientDescent supports the 'scales' feature
+class GradientDescentOptimizer : public DerivativeBasedOptimizer
+{
+	public:
+
+		typedef  DerivativeBasedOptimizer SuperClass;
+
+		///
+		///	Constructor.
+		///	\param loger : OptLogBase * to existing log class
+		///
+		GradientDescentOptimizer(OptLogBase *loger=NULL);
+
+		///
+		///	Copy constructor
+		///	\param opt .. optimizer to copy
+		///
+		GradientDescentOptimizer( const GradientDescentOptimizer &opt);
+
+	        ///
+		///	Destructor.
+		///
+		~GradientDescentOptimizer();
+
+
+		///
+		///	\brief Set the initial step size
+		///	The initial stepsize is used to give the optimizer an initial value for the order of 
+		///	magnitude to start with.
+		///	(e.g. step 100000 in x direction or 0.01 ?)
+		///	\param stepSize with the step size for the i-th dimension in the i-th position.
+		///
+		void setStepSize(const optimization::matrix_type & stepSize);
+
+		///
+		///	Get the actual step size
+		///	\return vector<double> with the actual step sizes
+		///
+		inline const optimization::matrix_type & getStepSize(){return m_stepSize;};
+
+
+		///
+		///	do internal initializations		
+		///
+		void init();
+
+
+		///
+		///	start the optimization
+		///	\return the return status that can be found in the corresponding enumeration
+		///
+		int optimize();
+
+		inline void setStepLength(double stepLength){m_stepLength=stepLength;}
+
+
+	private:
+
+		///
+		///	step size vector
+		///
+		optimization::matrix_type m_stepSize;
+		
+		///
+		/// .. steplength
+		///
+		double m_stepLength;
+
+		
+};
+
+#endif

+ 71 - 0
InequalityConstraints.h

@@ -0,0 +1,71 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	InequalityConstraints.h: interface of the InequalityConstaints class.
+//
+//	Written By: Matthias Wacker
+//
+//
+//	Constraints of the form: G(x) \le 0   (less or equal)
+//								where G is the vector of all
+//								inequality constraints
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _INEQUALITYCONSTRAINTS_H_
+#define _INEQUALITYCONSTRAINTS_H_
+
+
+#include "optimization/Constraints.h"
+
+
+/*!
+    class Abstract base class for all equality constraints. 
+*/
+
+class InequalityConstraints  : public Constraints
+{
+	public:
+		
+		typedef Constraints		SuperClass;
+		typedef SuperClass::matrix_type matrix_type;
+		
+		/*!
+			default constructor
+		*/
+		InequalityConstraints() : SuperClass(){};
+
+	     /*!
+            Constructor.
+			\param numOfParameters
+			\param numOfConstraints
+         */
+		InequalityConstraints(unsigned int numOfParameters, unsigned int numOfConstraints): SuperClass(numOfParameters,numOfConstraints){};
+        
+		/*!
+			Copy constructor
+		*/
+		InequalityConstraints(const Constraints &con):SuperClass(con){};
+
+        /*!
+            Destructor.
+         */
+        ~InequalityConstraints(){};
+		
+		/*!
+			Initialization for the cost function
+		 */
+		virtual void init() = 0;
+
+		/*!
+			evaluate the constraints
+			\param x =^ position
+			\return matrix_type containing the i-th constraint result in the i-th component
+		*/
+		virtual matrix_type evaluate(const matrix_type & x) = 0;
+
+
+	protected:
+
+};
+
+#endif

+ 214 - 0
LineSearcher.cpp

@@ -0,0 +1,214 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	LineSearcher.cpp: Implementation of the LineSearcher class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/LineSearcher.h"
+#include <iostream>
+
+using namespace optimization;
+
+LineSearcher::LineSearcher()
+{
+	m_maximize = false;
+	m_pEq = NULL;
+	m_numEq = 0;
+	m_pIneq = NULL;
+	m_numIneq = 0;
+	m_PP = 1000.0;
+	m_verbose= false;
+}
+
+LineSearcher::LineSearcher(CostFunction *costFunc,OptLogBase* loger) : m_pFunc(costFunc), m_pLoger(loger)
+{
+	m_maximize = false;
+	m_pEq = NULL;
+	m_numEq = 0;
+	m_pIneq = NULL;
+	m_numIneq = 0;
+	m_PP = 10.0;
+	m_verbose= false;
+}
+
+LineSearcher::LineSearcher(const LineSearcher &lin)
+{
+	m_pFunc		= lin.m_pFunc;
+	m_pLoger	= lin.m_pLoger;
+	m_maximize	= lin.m_maximize;
+	m_pEq		= lin.m_pEq;
+	m_numEq		= lin.m_numEq;
+	m_pIneq		= lin.m_pIneq;
+	m_numIneq	= lin.m_numIneq;
+	m_PP		= lin.m_PP;
+	m_verbose	= lin.m_verbose;
+}
+
+LineSearcher & LineSearcher::operator =(const LineSearcher &lin)
+{
+	/*
+			avoid self-copy
+	*/
+	if(this != &lin)
+	{
+		/*
+			=operator of SuperClass
+		*/
+		//SuperClass::operator=(opt);
+		
+		m_pFunc = lin.m_pFunc;
+		m_pLoger = lin.m_pLoger;
+		m_maximize = lin.m_maximize;
+		m_pEq		= lin.m_pEq;
+		m_numEq		= lin.m_numEq;
+		m_pIneq		= lin.m_pIneq;
+		m_numIneq	= lin.m_numIneq;
+		m_PP		= lin.m_PP;
+		m_verbose	= lin.m_verbose;
+
+	}
+
+	return *this;
+
+}
+
+LineSearcher::~LineSearcher()
+{
+}
+
+
+bool LineSearcher::setH0(const matrix_type &H0)
+{
+	bool tmp = true;
+	if(m_pFunc)
+	{
+		if(m_pFunc->setH0(H0) == false)
+		{
+			tmp = false;
+		}
+	}
+	if(m_pEq)
+	{
+		if(m_pEq->setH0(H0) == false)
+		{
+			tmp = false;
+		}
+	}
+	if(m_pIneq)
+	{
+		if(m_pIneq->setH0(H0) == false)
+		{
+			tmp = false;
+		}
+	}
+
+	return tmp;
+
+
+}
+
+bool LineSearcher::setX0(const matrix_type &x0)
+{
+	bool tmp = true;
+	if(m_pFunc)
+	{
+		if(m_pFunc->setX0(x0) == false)
+		{
+			tmp = false;
+		}
+	}
+	if(m_pEq)
+	{
+		if(m_pEq->setX0(x0) == false)
+		{
+			tmp = false;
+		}
+	}
+	if(m_pIneq)
+	{
+		if(m_pIneq->setX0(x0) == false)
+		{
+			tmp = false;
+		}
+	}
+
+	return tmp;
+	
+}
+
+bool LineSearcher::setConstraints(InequalityConstraints *ineq,EqualityConstraints *eq)
+{
+	
+	if(ineq)
+	{
+		if(ineq->getNumOfParameters() != m_pFunc->getNumOfParameters() )
+		{
+			return false;
+		}
+		m_numIneq = ineq->getNumOfConstraints();
+		m_pIneq = ineq;
+	}
+	else
+	{
+		m_numIneq = 0;
+	}
+
+	if(eq)
+	{
+		if(eq->getNumOfParameters() != m_pFunc->getNumOfParameters() )
+		{
+			return false;
+		}
+		m_numEq = eq->getNumOfConstraints();
+		m_pEq = eq;
+	}
+	else
+	{
+		m_numEq = 0;
+	}
+
+
+	return true;
+
+}
+
+bool LineSearcher::setPP(double pp)
+{ 
+	if( pp> 0.0)
+	{
+		m_PP = pp;
+		return true;
+	}
+	else
+	{
+		return false;
+	}
+}
+
+double LineSearcher::evaluateSub(double lambda)
+{
+	double val = 0;
+
+	val = (m_maximize == true ? (-1.0 * m_pFunc->evaluateSub(lambda)): (m_pFunc->evaluateSub(lambda)));
+	if(m_numEq)
+	{
+		matrix_type tmpval = m_pEq->evaluateSub(lambda);
+		for(int j = 0; j<static_cast<int>(m_numEq);j++)
+		{
+			val += m_PP * (m_maximize == true ? (-1.0 * (1+(tmpval[j][0]* tmpval[j][0]))) : (1+ (tmpval[j][0]* tmpval[j][0])));
+		}
+	}
+	if(m_numIneq)
+	{
+		matrix_type tmpval = m_pIneq->evaluateSub(lambda);
+		for(int j = 0; j<static_cast<int>(m_numEq);j++)
+		{
+			val += m_PP * (m_maximize == true ? (-1.0 * (tmpval[j][0] > 0.0 ?(1 + tmpval[j][0] * tmpval[j][0]) : 0.0))
+											  :         (tmpval[j][0]) > 0.0 ?(1+ tmpval[j][0] * tmpval[j][0]) : 0.0);
+		}
+	}
+
+	return val;
+}

+ 147 - 0
LineSearcher.h

@@ -0,0 +1,147 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	LineSearcher.h: interface of the LineSearcher class.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef _LINE_SEARCHER_H_
+#define _LINE_SEARCHER_H_
+
+
+#include "optimization/CostFunction.h"
+#include "optimization/OptLogBase.h"
+#include "optimization/Opt_Namespace.h"
+#include "optimization/InequalityConstraints.h"
+#include "optimization/EqualityConstraints.h"
+
+/*!
+	Abstract base class of all line search optimizer.
+*/
+class LineSearcher
+{
+public:
+	typedef optimization::matrix_type matrix_type;
+
+	/*!
+		default constructor
+	*/
+	LineSearcher();
+
+	/*!
+		constructor
+	*/
+	LineSearcher(CostFunction *costFunc, OptLogBase *loger);
+
+	/*!
+		Copy constructor
+		\param opt .. optimizer to copy
+	*/
+	LineSearcher( const LineSearcher &lin);
+	
+	/*!
+		operator =
+	*/
+	LineSearcher & operator=(const LineSearcher &lin);
+
+    /*!
+		Destructor.
+     */
+    virtual ~LineSearcher();
+
+	/*!
+		search maximum?
+	*/
+	inline void setMaximize(bool maximize){m_maximize = maximize;};
+
+	/*!
+		set x0
+	*/
+	bool setX0(const matrix_type & x0);
+
+	/*!
+		set a direction
+	*/
+	bool setH0(const matrix_type & H0);
+
+	/*!
+		optimize
+		returns a double..
+	*/
+	virtual double optimize() = 0;
+	
+	/*!
+		set Constraints for penalty terms
+		\returns true in case of success
+	*/
+	bool setConstraints(InequalityConstraints *ineq,EqualityConstraints *eq);
+
+
+	/*!
+		evaluate penalized 1d function
+	*/
+	double evaluateSub(double lambda);
+
+	/*!
+		penalty parameter	
+	*/
+	bool setPP(double pp);
+
+	/*!
+		set vebose to true to get more information
+	*/
+	inline void setVerbose(bool verb){m_verbose = verb;};	
+
+protected:
+
+	/*!
+		maximization?
+	*/
+	bool m_maximize;
+	
+	/*!
+		the costfunction
+	*/
+	CostFunction *m_pFunc;
+
+	/*!
+		number of Inequality Constraints 
+	*/
+	unsigned int		   m_numIneq;
+
+	/*!
+		Inequality Constraints 
+	*/
+	InequalityConstraints *m_pIneq;
+
+	/*!
+		number of Equality Constraints 
+	*/
+	unsigned int		 m_numEq;
+
+	/*!
+		Equality Constraints 
+	*/
+	EqualityConstraints *m_pEq;
+
+	/*
+		penalty parameter
+	*/
+	double m_PP;
+	
+	/*!
+		Point to a loger
+	*/
+	OptLogBase *m_pLoger;
+
+	/*!
+		verbose?
+	*/
+	bool m_verbose;
+};
+
+#endif
+

+ 8 - 0
Makefile

@@ -0,0 +1,8 @@
+#TARGETS_FROM:=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM)
+#$(info recursivly going up: $(TARGETS_FROM) ($(shell pwd)))
+
+all:
+
+%:
+	$(MAKE) TARGETS_FROM=$(notdir $(patsubst %/,%,$(shell pwd)))/$(TARGETS_FROM) -C .. $@
+

+ 103 - 0
Makefile.inc

@@ -0,0 +1,103 @@
+# LIBRARY-DIRECTORY-MAKEFILE
+# conventions:
+# - all subdirectories containing a "Makefile.inc" are considered sublibraries
+#   exception: "progs/" and "tests/" subdirectories!
+# - all ".C", ".cpp" and ".c" files in the current directory are linked to a
+#   library
+# - the library depends on all sublibraries 
+# - the library name is created with $(LIBNAME), i.e. it will be somehow
+#   related to the directory name and with the extension .a
+#   (e.g. lib1/sublib -> lib1_sublib.a)
+# - the library will be added to the default build list ALL_LIBRARIES
+
+# --------------------------------
+# - remember the last subdirectory
+#
+# set the variable $(SUBDIR) correctly to the current subdirectory. this
+# variable can be used throughout the current makefile.inc. The many 
+# SUBDIR_before, _add, and everything are only required so that we can recover
+# the previous content of SUBDIR before exitting the makefile.inc
+
+SUBDIR_add:=$(dir $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+SUBDIR_before:=$(SUBDIR)
+SUBDIR:=$(strip $(SUBDIR_add))
+SUBDIR_before_$(SUBDIR):=$(SUBDIR_before)
+ifeq "$(SUBDIR)" "./"
+SUBDIR:=
+endif
+
+# ------------------------
+# - include subdirectories
+#
+# note the variables $(SUBDIRS_OF_$(SUBDIR)) are required later on to recover
+# the dependencies automatically. if you handle dependencies on your own, you
+# can also dump the $(SUBDIRS_OF_$(SUBDIR)) variable, and include the
+# makefile.inc of the subdirectories on your own...
+
+SUBDIRS_OF_$(SUBDIR):=$(patsubst %/Makefile.inc,%,$(wildcard $(SUBDIR)*/Makefile.inc))
+include $(SUBDIRS_OF_$(SUBDIR):%=%/Makefile.inc)
+
+# ----------------------------
+# - include local dependencies
+#
+# you can specify libraries needed by the individual objects or by the whole
+# directory. the object specific additional libraries are only considered
+# when compiling the specific object files
+# TODO: update documentation...
+
+-include $(SUBDIR)libdepend.inc
+
+$(foreach d,$(filter-out %progs %tests,$(SUBDIRS_OF_$(SUBDIR))),$(eval $(call PKG_DEPEND_INT,$(d))))
+
+# ---------------------------
+# - objects in this directory
+#
+# the use of the variable $(OBJS) is not mandatory. it is mandatory however
+# to update $(ALL_OBJS) in a way that it contains the path and name of
+# all objects. otherwise we can not include the appropriate .d files.
+
+OBJS:=$(patsubst %.cpp,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.cpp))) \
+      $(patsubst %.C,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.C))) \
+	  $(shell grep -ls Q_OBJECT $(SUBDIR)*.h | sed -e's@^@/@;s@.*/@$(OBJDIR)moc_@;s@\.h$$@.o@') \
+      $(patsubst %.c,$(OBJDIR)%.o,$(notdir $(wildcard $(SUBDIR)*.c)))
+ALL_OBJS += $(OBJS)
+
+# ----------------------------
+# - binaries in this directory
+#
+# output of binaries in this directory. none of the variables has to be used.
+# but everything you add to $(ALL_LIBRARIES) and $(ALL_BINARIES) will be
+# compiled with `make all`. be sure again to add the files with full path.
+
+LIBRARY_BASENAME:=$(call LIBNAME,$(SUBDIR))
+ifneq "$(SUBDIR)" ""
+ALL_LIBRARIES+=$(LIBDIR)$(LIBRARY_BASENAME).$(LINK_FILE_EXTENSION)
+endif
+
+# ---------------------
+# - binary dependencies
+#
+# there is no way of determining the binary dependencies automatically, so we
+# follow conventions. the current library depends on all sublibraries.
+# all other dependencies have to be added manually by specifying, that the
+# current .pc file depends on some other .pc file. binaries depending on
+# libraries should exclusivelly use the .pc files as well.
+
+ifeq "$(SKIP_BUILD_$(OBJDIR))" "1"
+$(LIBDIR)$(LIBRARY_BASENAME).a:
+else
+$(LIBDIR)$(LIBRARY_BASENAME).a:$(OBJS) \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).a,.$(LINK_FILE_EXTENSION))
+endif
+
+$(PKGDIR)$(LIBRARY_BASENAME).pc: \
+	$(call PRINT_INTLIB_DEPS,$(PKGDIR)$(LIBRARY_BASENAME).pc,.pc)
+
+# -------------------
+# - subdir management
+#
+# as the last step, always add this line to correctly recover the subdirectory
+# of the makefile including this one!
+
+SUBDIR:=$(SUBDIR_before_$(SUBDIR))
+

+ 505 - 0
MatrixIterativeOptimizer.cpp

@@ -0,0 +1,505 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	MatrixIterativeOptimizer.cpp: Implementation of the MatrixIterativeOptimizer
+//								  Optimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/MatrixIterativeOptimizer.h"
+#include <iostream>
+#include "optimization/AdditionalIceUtils.h" // for linsolve
+#include "optimization/DownhillSimplexOptimizer.h" //FIXME WACKER: INCLUDE DOWNHILL SIMPLEX FOR THE GET RANK FUNCTION -> EXPORT IT 
+
+
+MatrixIterativeOptimizer::MatrixIterativeOptimizer(OptLogBase *loger) : SuperClass(loger)
+{
+	m_IterationMatrix	= matrix_type(m_numberOfParameters,m_numberOfParameters);
+	m_hk				= matrix_type(m_numberOfParameters,1);
+	m_stepSize			= matrix_type(m_numberOfParameters,1);
+	m_oldGradient		= matrix_type(m_numberOfParameters,1);
+	m_oldParams			= matrix_type(m_numberOfParameters,1);
+
+	m_lambdak			= 1.0;
+
+}
+
+
+MatrixIterativeOptimizer::MatrixIterativeOptimizer( const MatrixIterativeOptimizer &opt) : SuperClass(opt)
+{
+
+	m_IterationMatrix	= opt.m_IterationMatrix;
+	m_hk				= opt.m_hk;
+	m_lin				= opt.m_lin;
+	m_stepSize			= opt.m_stepSize;
+	m_lambdak			= opt.m_lambdak;
+	m_oldParams			= opt.m_oldParams;
+	m_oldGradient		= opt.m_oldGradient;
+
+}
+
+
+MatrixIterativeOptimizer::~MatrixIterativeOptimizer()
+{
+}
+
+
+
+void MatrixIterativeOptimizer::init()
+{
+	if(m_loger)
+		m_loger->logTrace("entering MatrixIterativeOptimizer:: init ...  \n");
+
+	SuperClass::init();
+
+	m_lin= ArmijoLineSearcher(m_costFunction,m_loger);
+
+	//if(m_verbose==true) //FIXME commented out as output is not human readable
+	//	m_lin.setVerbose(true);
+	
+	if(m_maximize == true)
+	{
+		m_lin.setMaximize(true);
+	}
+
+	m_IterationMatrix=matrix_type(m_numberOfParameters,m_numberOfParameters);
+	for(int i =0;i < static_cast<int>(m_numberOfParameters); i++)
+	{
+		m_IterationMatrix[i][i] = 1.0;
+	}
+
+	
+	if(m_loger)
+		m_loger->logTrace("leaving MatrixIterativeOptimizer:: init ...\n");
+}
+
+
+MatrixIterativeOptimizer & MatrixIterativeOptimizer::operator=(const MatrixIterativeOptimizer &opt)
+{
+
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(opt);
+
+		/*
+			own values:
+		*/
+		m_IterationMatrix	= opt.m_IterationMatrix;
+		m_hk				= opt.m_hk;
+		m_lin				= opt.m_lin;
+		m_stepSize			= opt.m_stepSize;
+		m_lambdak			= opt.m_lambdak;
+		m_oldParams			= opt.m_oldParams;
+		m_oldGradient		= opt.m_oldGradient;
+	}
+
+  	return *this;
+}
+
+void MatrixIterativeOptimizer::computeDescentDirection()
+{
+	if(m_loger)
+		m_loger->logTrace("entering MatrixIterativeOptimizer::computeDescentDirection ... \n");
+	
+	if(m_numberOfParameters == 1)
+	{
+		if(fabs(m_IterationMatrix[0][0]) >= 1.0e-20 )
+		{
+			m_hk[0][0] = -m_gradient[0][0] / m_IterationMatrix[0][0];
+		}
+		else
+		{
+			m_hk = -m_gradient;
+		}
+	}
+	else
+	{
+		
+		//if(getRank(m_IterationMatrix, 1.0e-5 ) == m_numberOfParameters)
+		if(true)
+		{
+			/*
+				solve system equation for descent direction
+			*/
+			matrix_type tmp=(m_gradient * -1.0 );
+			linSolve(m_IterationMatrix, m_hk, tmp );
+
+			/*
+				direction orthogonal to gradient? -> use gradient
+			*/
+			if(fabs((!m_hk * m_gradient)[0][0]) <= 1.0e-3)
+			{
+				m_hk = -m_gradient;
+			}
+		}
+		else
+		{
+			m_hk = -m_gradient;
+		}
+	}
+
+	if(m_loger)
+		m_loger->logTrace("leaving MatrixIterativeOptimizer::computeDescentDirection ... \n");
+}
+
+void MatrixIterativeOptimizer::resetMatrix()
+{
+	m_IterationMatrix=matrix_type(m_numberOfParameters,m_numberOfParameters);
+	for(int i =0;i < static_cast<int>(m_numberOfParameters); i++)
+	{
+		m_IterationMatrix[i][i] = 1.0;
+	}
+}
+
+void MatrixIterativeOptimizer::setStepSize(const matrix_type & stepSize)
+{
+	m_stepSize = stepSize;
+}
+
+int MatrixIterativeOptimizer::optimize()
+{
+	if(m_loger)
+		m_loger->logTrace("entering MatrixIterativeOptimizer:optimize ... \n");
+
+	init();
+
+	bool abort = false;
+	
+	
+	/******************************
+		start time criteria
+	******************************/
+	m_startTime = clock();
+
+	/******************************
+		compute start value 
+		and first gradient
+	******************************/
+	m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+	m_gradient = (m_analyticalGradients == true && 
+				 (m_costFunction->hasAnalyticGradient() == true) ) ?
+							getAnalyticalGradient(m_parameters) : 
+							getNumericalGradient(m_parameters, m_stepSize);
+
+
+	m_hk = m_gradient * -1.0;
+
+	matrix_type hk_norm;
+	double factor = m_hk.Norm(0);
+	if(fabs(factor) > 1.0e-12)
+	{
+		hk_norm = m_hk * (1.0/ factor); 
+	}
+	matrix_type grad_norm;
+	double factor2 = m_gradient.Norm(0);
+	if(fabs(factor2) > 1.0e-12)
+	{
+		grad_norm = m_gradient * (1.0/ factor2); 
+	}
+
+	/*******************************
+		optimize step length
+	*******************************/
+	/// ARMIJO
+	//m_lin.setXkGkDk(m_parameters, m_gradient, hk_norm);
+	m_lin.setXkGkDk(m_parameters, grad_norm, hk_norm);
+	m_lambdak = m_lin.optimize();
+	if(m_verbose == true)
+	{
+		std::cout << "stepsize lambda: " << m_lambdak << std::endl;
+		std::cout << "in direction: " << std::endl;
+		for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+		{
+			std::cout<< hk_norm[r][0] << " ";
+		}
+		std::cout << std::endl;
+			
+	}
+
+	
+	/*******************************
+		update the parameters
+	*******************************/
+	m_oldParams = m_parameters;
+	m_parameters = m_parameters + hk_norm * m_lambdak;
+
+
+	/******************************
+		check abort criteria 
+		   for gradient
+	******************************/	
+	if(m_gradientTolActive)
+	{
+		if(m_gradient.Norm(0) < m_gradientTol){
+			m_returnReason = SUCCESS_GRADIENTTOL;
+			abort = true;
+		}
+	}	
+
+
+	/* do the optimization loop */
+	while(abort == false)
+	{
+		/******************************
+			increase iteration 
+			      counter
+		******************************/
+		m_numIter++;
+		
+
+		/******************************
+			cout actual iteration
+		******************************/
+
+		if(m_verbose == true)
+		{
+			std::cout << "MatrixIterativeOptimizer :: Iteration: " << m_numIter << " : current parameters : ";
+			for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+			{
+				std::cout<< m_parameters[r][0] << "\t ";
+			}
+			std::cout << "    value:   " << m_currentCostFunctionValue << std::endl;
+		}
+
+
+		/******************************
+			Check iteration limit
+		******************************/
+		if(m_maxNumIterActive)
+		{
+			if(m_numIter >= m_maxNumIter)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "Gradient Descenct :: aborting because of numIter" << std::endl;
+				}
+
+
+				/* set according return status and return */
+				m_returnReason = SUCCESS_MAXITER;
+				abort = true;
+				continue;
+			}
+		}
+
+		/******************************
+			get the new Gradient
+		******************************/
+		m_oldGradient	= m_gradient;
+		m_gradient =	(m_analyticalGradients == true && 
+						(m_costFunction->hasAnalyticGradient() == true) ) ?
+							getAnalyticalGradient(m_parameters) : 
+							getNumericalGradient(m_parameters, m_stepSize);
+
+
+		/******************************
+			check gradient tol
+		******************************/
+		if(m_gradientTolActive)
+		{
+			if(m_gradient.Norm(0) < m_gradientTol)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "MatrixIterativeOptimizer :: aborting because of GradientTol" << std::endl;
+				}
+
+				m_returnReason = SUCCESS_GRADIENTTOL;
+				abort = true;
+				continue;
+			}
+		}
+
+		/******************************
+		      Update the Iteration 
+			       Matrix
+		******************************/
+		//matrix_type oldMatrix	= m_IterationMatrix;
+		updateIterationMatrix();
+
+		/******************************
+			compute a descent 
+			    direction
+		******************************/
+		computeDescentDirection();
+
+
+		// check if descent direction is really a descent direction
+		// if not: reset the iteration matrix and take
+		// the gradient as iteration direction
+		if( (!m_hk *m_gradient)[0][0] > 0.0)
+		{
+			cout << " resetting matrix" << endl;
+			resetMatrix();
+			m_hk=m_gradient * -1.0;
+		}
+	
+
+
+		double factor = m_hk.Norm(0);
+		if(fabs(factor) > 1.0e-12)
+		{
+			hk_norm = m_hk * (1.0/ factor); 
+		}
+		matrix_type grad_norm;
+		double factor2 = m_gradient.Norm(0);
+		if(fabs(factor2) > 1.0e-12)
+		{
+			grad_norm = m_gradient * (1.0/ factor2); 
+		}
+
+		/*******************************
+			optimize step length
+		*******************************/
+		//m_lin.setXkGkDk(m_parameters, m_gradient, hk_norm);
+		m_lin.setXkGkDk(m_parameters, grad_norm, hk_norm);
+		m_lambdak = m_lin.optimize();
+		
+
+		
+		/*******************************
+			update the parameters
+		*******************************/
+		m_oldParams = m_parameters;
+		m_parameters = m_parameters + hk_norm * m_lambdak;
+		if(m_verbose == true)
+		{
+			//matrix_type hk_normu=S2U(hk_norm);
+			std::cout << "MatrixIterativeOptimizer :: Iterating with lambda = " << m_lambdak << std::endl;
+
+			std::cout << "MatrixIterativeOptimizer :: in direction : ";
+			for(int mu = 0; mu < static_cast<int>(m_numberOfParameters); mu++)
+			{
+				//std::cout<< hk_normu[mu][0] << "\t ";
+				std::cout<< hk_norm[mu][0] << "\t ";
+			}
+			std::cout << std::endl;
+
+			//matrix_type m_gradientu=S2U(m_gradient);
+			std::cout << "MatrixIterativeOptimizer :: Gradient was ";
+			for(int nu = 0; nu < static_cast<int>(m_numberOfParameters); nu++)
+			{
+				//std::cout<< m_gradientu[nu][0] << "\t ";
+				std::cout<< m_gradient[nu][0] << "\t ";
+			}
+			std::cout << std::endl;
+			
+		}
+
+			
+				
+		/*******************************
+			Check new parameters 
+				are it is in bounds, 
+				paramTol,
+				funcTol,
+				maxSeconds
+		*******************************/
+		if(!checkParameters(m_parameters))
+		{
+			if(m_verbose == true)
+			{
+				std::cout << "Gradient Descenct :: aborting because of parameter Bounds" << std::endl;
+			}
+
+			/* set according return status and the last parameters and return */
+			m_returnReason = ERROR_XOUTOFBOUNDS;
+			m_parameters = m_oldParams;
+			abort = true;
+			continue;
+		}
+
+		/*
+			Get new Costfunction Value
+		*/
+		double oldFuncValue		= m_currentCostFunctionValue;
+		m_currentCostFunctionValue = evaluateCostFunction(m_parameters);
+
+
+		/*
+			Check ParamTol
+		*/
+		if(m_paramTolActive)
+		{
+			if( (m_parameters - m_oldParams).Norm(0) < m_paramTol)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "Gradient Descenct :: aborting because of param Tol" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_PARAMTOL;
+				/*m_parameters = oldParams;*/
+				abort = true;
+				continue;
+			}
+
+		}
+	
+		if(m_funcTolActive)
+		{
+			if( abs((m_currentCostFunctionValue - oldFuncValue)) < m_funcTol)
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "MatrixIterativeOptimizer :: aborting because of Func Tol" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_FUNCTOL;
+				abort = true;
+				continue;
+			}
+		}
+
+		/*
+			Check Optimization Timilimit, if active
+		*/
+		if(m_maxSecondsActive)
+		{
+			m_currentTime = clock();
+		
+			/* time limit exceeded ? */
+			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "MatrixIterativeOptimizer :: aborting because of Time Limit" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_TIMELIMIT;
+				m_parameters = m_oldParams;
+				abort = true;
+				continue;
+			}
+		}
+
+	}
+
+	if(m_verbose)
+	{
+		std::cout << "MatrixIterativeOptimizer :: RESULT: ";
+		for(int r = 0; r < static_cast<int>(m_numberOfParameters); r++)
+		{
+			std::cout<< m_parameters[r][0] << " ";
+		}
+		std::cout << "    value:   " << m_currentCostFunctionValue  << std::endl;
+	}
+
+	if(m_loger)
+		m_loger->logTrace("leaving MatrixIterativeOptimizer:optimize ... \n");
+	
+	return m_returnReason;
+
+}

+ 150 - 0
MatrixIterativeOptimizer.h

@@ -0,0 +1,150 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	AxA3dNUMMatrixIterativeOptimizer.h: interface for MatrixItertiveMethods class
+//
+//	Written by: Matthias Wacker
+//
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _MATRIX_ITERATIVE_OPTIMIZER_H_
+#define _MATRIX_ITERATIVE_OPTIMIZER_H_
+
+#include "optimization/DerivativeBasedOptimizer.h"
+#include "optimization/GoldenCutLineSearcher.h"
+//#include "optimization/BrentLineSearcher.h"
+#include "optimization/ArmijoLineSearcher.h"
+
+/*!
+	abstract base class of all matrix iterative based optimizers.
+
+	Methods like the newton iterative method or BFGS inherit from this class
+ */
+class MatrixIterativeOptimizer : public DerivativeBasedOptimizer
+{
+public:
+		typedef  DerivativeBasedOptimizer	SuperClass;	
+		typedef  SuperClass::matrix_type	matrix_type;	
+
+	    /*!
+			Constructor.
+			\param numOfParameters : Number of parameters to optimize
+			\param objectiveFunction : CostFunction*
+			\param loger : OptLogBase * to existing log class
+		*/
+		MatrixIterativeOptimizer(OptLogBase *loger);
+
+		/*!
+			Copy constructor
+			\param opt .. optimizer to copy
+		*/
+		MatrixIterativeOptimizer( const MatrixIterativeOptimizer &opt);
+
+
+		/*!
+			operator =
+		*/
+		MatrixIterativeOptimizer & operator=(const MatrixIterativeOptimizer &opt);
+
+
+		/*!
+				Destructor.
+		 */
+		virtual ~MatrixIterativeOptimizer();
+
+		/*!
+			\brief Set the initial step size
+			The initial stepsize is used to give the optimizer an initial value for the order of 
+			magnitude to start with.
+			(e.g. step 100000 in x direction or 0.01 ?)
+			\param stepSize with the step size for the i-th dimension in the (i,0)-th position.
+		*/
+		void setStepSize(const matrix_type & stepSize);
+		
+
+		/*!
+			\brief to set options for the internal line searcher, get the access
+		
+		*/
+		ArmijoLineSearcher * getLineSearcher(){return &m_lin;};
+
+		/*!
+			initialize
+		*/
+		void init();
+
+		/*!
+			optimize
+		*/
+		int optimize();
+
+protected:
+		
+
+		/*!
+			\brief update the Iteration Matrix
+		*/
+		virtual void updateIterationMatrix() = 0;
+
+		/*!
+			the iteration matrix
+		*/
+		matrix_type m_IterationMatrix;
+
+		/*!
+			\brief compute the descent direction
+		*/
+		void computeDescentDirection();
+		
+		/*!
+			the descent direction
+		*/
+		matrix_type m_hk;
+
+		/*!
+			\brief compute the steplength
+		*/
+		void ComputeStepLength();
+
+
+		/*!
+			\brief reset the matrix
+		*/
+		void resetMatrix();
+
+		/*!
+			the steplength
+		*/
+		double m_lambdak;
+
+		/*!
+			1-dim search to optimize the steplength
+		*/
+		//GoldenCutLineSearcher m_lin;
+		//BrentLineSearcher m_lin;
+		ArmijoLineSearcher m_lin;
+		
+		/*!
+			step size vector
+		*/
+		matrix_type m_stepSize;
+
+		/*
+			needed for updates
+		*/
+		matrix_type m_oldParams;
+
+		/*
+			needed for updates
+		*/
+		matrix_type m_oldGradient;
+
+		//double m_lin_Lower;
+		//double m_lin_Upper;
+		//double m_lin_Eps;
+
+
+		
+};
+
+#endif

+ 50 - 0
NewtonMethodOptimizer.cpp

@@ -0,0 +1,50 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	NewtonMethodOptimizer.cpp: Implementation of the NewtonMethodOptimizer
+//								  Optimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/NewtonMethodOptimizer.h"
+
+NewtonMethodOptimizer::NewtonMethodOptimizer(OptLogBase *loger) : SuperClass(loger)
+{
+}
+
+NewtonMethodOptimizer::NewtonMethodOptimizer( const NewtonMethodOptimizer &opt) : SuperClass(opt)
+{	
+}
+
+
+NewtonMethodOptimizer::~NewtonMethodOptimizer()
+{
+}
+
+
+
+void NewtonMethodOptimizer::init()
+{
+	SuperClass::init();
+}
+
+
+
+
+
+void NewtonMethodOptimizer::updateIterationMatrix()
+{
+	if(m_costFunction->hasAnalyticHessian() )
+	{
+		m_IterationMatrix = getAnalyticalHessian(m_parameters);
+
+	}
+	else
+	{
+		std::cerr << " NewtonMethodOptimizer::updateIterationMatrix() you cannot use this optimizer with the given costfunction" << std::endl;
+		if(m_loger)
+			m_loger->logError("NewtonMethodOptimizer::updateIterationMatrix  you cannot use this optimizer with the given costfunction");
+		assert(false);
+	}
+}

+ 85 - 0
NewtonMethodOptimizer.h

@@ -0,0 +1,85 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	AxA3dNUMNewtonMethodOptimizer.h: interface for MatrixItertiveMethods class
+//
+//	Written by: Matthias Wacker
+//
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _NEWTON_METHOD_OPTIMIZER_H_
+#define _NEWTON_METHOD_OPTIMIZER_H_
+
+#include "optimization/MatrixIterativeOptimizer.h"
+
+
+/*!
+	class for the newton method
+
+   Note: your objective function has to have analytical Hessian computation implemented!
+
+   HowToUse:
+	
+	  * use setStepSize to specify the initial stepsize to compute the numerical gradient
+	  * use setParameters() to set the start point
+	  * call init()
+	  * call optimize()
+
+
+ 	Implemented Abort criteria:
+		
+	  * maximum number of iterations
+	  *	time limit
+	  * parameter bounds
+	  * function value tolerance
+	  * parameter tolerance
+
+	Additional return reason:
+	
+ */
+class NewtonMethodOptimizer : public MatrixIterativeOptimizer
+{
+public:
+		typedef MatrixIterativeOptimizer	SuperClass;	
+		typedef	SuperClass::matrix_type	matrix_type;
+
+	   	///
+		///	Constructor.
+		///	\param loger : OptLogBase * to existing log class
+		///
+		NewtonMethodOptimizer(OptLogBase *loger);
+
+		///
+		///	Copy constructor
+		///	\param opt .. optimizer to copy
+		///
+		NewtonMethodOptimizer( const NewtonMethodOptimizer &opt);
+
+
+		///
+		///	operator =
+		///
+		NewtonMethodOptimizer & operator=(const NewtonMethodOptimizer &opt);
+
+
+	        ///
+		///		Destructor.
+	        ///
+	        virtual ~NewtonMethodOptimizer();
+
+		///
+		///	internal initializations
+		///
+		void init();
+
+protected:
+		
+
+		/*!
+			\brief update the Iteration Matrix
+		*/
+		void updateIterationMatrix() ;
+	
+};
+
+#endif

+ 65 - 0
OptLogBase.cpp

@@ -0,0 +1,65 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	OptLogBase.cpp: Implementation of the Log class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/OptLogBase.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+
+OptLogBase::OptLogBase()
+{
+	m_nMessageBufferSize = 8 * 1024;
+	m_pMessageBuffer = new char[m_nMessageBufferSize];
+}
+
+OptLogBase::~OptLogBase()
+{
+	delete[] m_pMessageBuffer;
+}
+
+void OptLogBase::logError(const char* format,...)
+{
+	va_list arguments;
+	va_start(arguments,format);
+	//_vsnprintf(m_pMessageBuffer, m_nMessageBufferSize, format, arguments);
+	vsnprintf(m_pMessageBuffer, m_nMessageBufferSize, format, arguments);
+	va_end(arguments);
+
+	m_pMessageBuffer[m_nMessageBufferSize - 1] = '\0';
+	writeLogError(m_pMessageBuffer);
+}
+
+void OptLogBase::logWarning(const char* format,...)
+{
+	va_list arguments;
+	va_start(arguments,format);
+	//_vsnprintf(m_pMessageBuffer, m_nMessageBufferSize, format, arguments);
+	vsnprintf(m_pMessageBuffer, m_nMessageBufferSize, format, arguments);
+	va_end(arguments);
+
+	m_pMessageBuffer[m_nMessageBufferSize - 1] = '\0';
+	writeLogWarning(m_pMessageBuffer);
+}
+
+void OptLogBase::logTrace(const char* format,...)
+{
+	va_list arguments;
+	va_start(arguments,format);
+	//_vsnprintf(m_pMessageBuffer, m_nMessageBufferSize, format, arguments);
+	vsnprintf(m_pMessageBuffer, m_nMessageBufferSize, format, arguments);
+	va_end(arguments);
+
+	m_pMessageBuffer[m_nMessageBufferSize - 1] = '\0';
+	writeLogTrace(m_pMessageBuffer);
+}
+
+
+void OptLogBase::init()
+{
+}

+ 85 - 0
OptLogBase.h

@@ -0,0 +1,85 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	OptLogBase.h: interface of the Log class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _OPT_LOG_BASE_H_
+#define _OPT_LOG_BASE_H_
+
+#include <string>
+#include <sstream>
+#include "optimization/Opt_Namespace.h"
+
+
+/*!
+    base class for all log classes
+*/
+
+class OptLogBase
+{
+	public:
+		
+        /*!
+            Constructor.
+		*/
+		OptLogBase();
+        
+        /*!
+            Destructor.
+         */
+        virtual ~OptLogBase();
+		
+		/*!
+			Logs an error
+		*/	
+		void logError(const char* format,...);
+
+		/*!
+			Logs a warning
+		*/	
+		void logWarning(const char* format,...);
+
+		/*!
+			Logs a trace message
+		*/	
+		void logTrace(const char* format,...);
+
+		/**! Write parameter vector to output device (file, stdio, etc.)
+        *
+        * @param parammatrix parameter matrix
+        */
+        // empty for all loger except the ParamLogger
+        virtual void writeParamsToFile(optimization::matrix_type& parammatrix)
+        {
+        }
+
+		void init();
+
+	protected:
+
+		/**! Write error message to an output device (file, stdio, etc.)
+		 */
+		virtual void writeLogError(const char* szMessage) = 0;
+
+		/**! Write warning message to an output device (file, stdio, etc.)
+		 */
+		virtual void writeLogWarning(const char* szMessage) = 0;
+
+		/**! Write trace message to an output device (file, stdio, etc.)
+		 */
+		virtual void writeLogTrace(const char* szMessage) = 0;
+
+	private:
+
+		//! Character buffer used to format messages to be logged
+		char* m_pMessageBuffer;
+
+		//! Size of the message format buffer
+		int m_nMessageBufferSize;
+		
+};
+
+#endif

+ 202 - 0
OptTestSuite.cpp

@@ -0,0 +1,202 @@
+#include "OptTestSuite.h"
+#include "Opt_Namespace.h"
+//
+//
+//
+
+#include "DownhillSimplexOptimizer.h"
+#include "GradientDescentOptimizer.h"
+#include "BFGSOptimizer.h"
+#include "AdaptiveDirectionRandomSearchOptimizer.h"
+#include "CombinatorialOptimizer.h"
+#include "AdditionalIceUtils.h"
+#include "CostFunction_ndim_2ndOrder.h"
+
+using namespace optimization;
+#include <cstdlib> // rand
+//#include <cmath> 
+#include <time.h> // time for srand
+double getRand()
+{
+	return static_cast<double>(rand())/static_cast<double>(RAND_MAX);
+}
+
+void OptTestSuite::runAllTests()
+{
+	// seed for test
+	srand ( time(NULL) );
+
+	cout << "beginning runAllTests()"<< endl;
+
+	// run simple opt test grid
+	runSimpleOptTestGrid();	
+
+	// any other test: to be continued ...
+
+}
+
+
+void OptTestSuite::runSimpleOptTestGrid()
+{
+	cout << "beginning runSimpleOptTestGrid" << endl;
+	vector<CostFunction*> cfns;
+	vector<Optimizer*> opts;
+
+	SimpleOptTestGrid testGrid;
+	
+	// insert optimizers
+	insertOptsInGrid(testGrid,opts);
+
+	// insert optprobs
+	insertOptProbsInGrid(testGrid,cfns);
+	
+	// run
+	cout << "running tests"<< endl;
+	testGrid.test();
+	cout << "finisshed tests"<< endl;
+
+	// delete the optimizers and costfunctions!
+	for (int i =0; i < static_cast<int>(opts.size());++i)
+	{
+		//cout << "deleting" << i << endl;
+		delete opts[i];
+	}
+	opts.clear();
+	for (int i =0; i < static_cast<int>(cfns.size());++i)
+	{
+		//cout << "deleting" << i << endl;
+		delete cfns[i];
+	}
+	cfns.clear();
+
+	cout << "finished runSimpleOptTestGrid" << endl;
+}
+
+
+
+
+void OptTestSuite::insertOptsInGrid(SimpleOptTestGrid &testGrid, vector<Optimizer*> &opts )
+{
+	cout << "insert opts" << endl;
+
+	// 0
+	// DownhillSimplexOptimizer
+	DownhillSimplexOptimizer *dho= new DownhillSimplexOptimizer();
+	dho->setMaxNumIter(true, 5000); // 0-order info
+	//dho->setParamTol(true, 1.0e-8);
+	testGrid.addSimpleOptimizer(dho);
+	opts.push_back(dho);
+
+	// 1
+	// GradientDescentOptimizer
+	GradientDescentOptimizer *gdo= new GradientDescentOptimizer();
+	gdo->setMaxNumIter(true, 5000); // 1st-order info
+	gdo->setParamTol(true, 1.0e-8);
+	//gdo->setVerbose(true);
+	testGrid.addSimpleOptimizer(gdo);
+	opts.push_back(gdo);
+
+	// 2 
+	// BFGSOptimizer
+	BFGSOptimizer *bfgso = new BFGSOptimizer();
+	bfgso->setMaxNumIter(true,1000); // 1+ order: approximating 2nd order info
+	bfgso->setParamTol(true, 1.0e-8);
+	//bfgso->setLineSearchOptions(0.0,1.0,1.0e-8);
+	//bfgso->setVerbose(true);
+	testGrid.addSimpleOptimizer(bfgso);
+	opts.push_back(bfgso);
+
+
+	// 3
+	//AdaptiveDirectionRandomSearchOptimizer
+	AdaptiveDirectionRandomSearchOptimizer *adrso = new AdaptiveDirectionRandomSearchOptimizer(5); // 0-order random info
+	adrso->setMaxNumIter(true,50000);
+	testGrid.addSimpleOptimizer(adrso);
+	opts.push_back(adrso);
+
+	// 4
+	// CombinatorialOptimizer
+	CombinatorialOptimizer *co = new CombinatorialOptimizer();
+	co->setMaxNumIter(true,250000);
+	testGrid.addSimpleOptimizer(co);
+	opts.push_back(co);
+
+
+
+}
+
+
+void OptTestSuite::insertOptProbsInGrid(SimpleOptTestGrid &testGrid, vector<CostFunction*> &cfns)
+{
+	cout << "insert opt probs" << endl;
+
+	int dim=0;
+
+	// 0
+	dim=1;
+	CostFunction_ndim_2ndOrder *secOrder1 = new CostFunction_ndim_2ndOrder(dim);
+
+	cfns.push_back(secOrder1);
+	matrix_type A(dim,dim);	A[0][0]= 3;
+	matrix_type b(dim,1); b[0][0]=1;
+	double c=14.378; secOrder1->setAbc(A,b,c);
+	
+	matrix_type secOrder1_solution(dim,1); secOrder1_solution[0][0]=-2.0/6.0; double secOrder1_succdist=1.0e-3;
+
+	matrix_type secOrder1_inits(dim,1); secOrder1_inits[0][0]=-15.789;
+	matrix_type secOrder1_scales(dim,1); secOrder1_scales[0][0]=1.0;
+	SimpleOptProblem secOrderProb1(secOrder1,secOrder1_inits,secOrder1_scales);
+	testGrid.addSimpleOptProblem( secOrderProb1, secOrder1_solution, secOrder1_succdist);
+
+
+	// 1
+	dim=15;
+	CostFunction_ndim_2ndOrder *secOrder2 = new CostFunction_ndim_2ndOrder(dim);
+	cfns.push_back(secOrder2);
+	matrix_type A2(dim,dim);
+	matrix_type b2(dim,1);
+	matrix_type secOrder2_inits(dim,1);
+	matrix_type secOrder2_scales(dim,1); 
+	for(int i=0; i < dim;++i)
+	{
+		for(int j=i;j< dim;++j)
+		{
+			if(i == j)
+			{
+				A2[i][j]=dim+getRand()*200;
+			}
+			else
+			{
+				A2[i][j]=getRand();
+				A2[j][i]=A2[i][j];
+			}
+		}
+		b2[i][0]=(getRand()-0.5) *dim ;
+		secOrder2_inits[i][0]=(getRand()-0.5) *dim;
+		secOrder2_scales[i][0]=1.0;
+	}
+	//A2 = A2 * (!A2);
+	double c2=1.23546; secOrder2->setAbc(A2,b2,c2);
+
+	matrix_type secOrder2_solution(dim,1);
+	matrix_type mb2=-b2;
+	linSolve(A2, secOrder2_solution, mb2);
+	double secOrder2_succdist=1.0e-3;
+	// test if matrix is spd
+	matrix_type L;
+	matrix_type V;
+	getEig(A2, L, V);
+
+	//cout << "Eigenvals: " << L << endl;
+	//cout << "A2 " << endl << A2 << endl;
+	//cout << "b2 " << endl << b2 << endl;
+	//cout << "init " << endl << secOrder2_inits << endl;
+	//cout << "sol " << endl << secOrder2_solution << endl;
+
+	SimpleOptProblem secOrderProb2(secOrder2,secOrder2_inits,secOrder2_scales);
+	testGrid.addSimpleOptProblem( secOrderProb2, secOrder2_solution, secOrder2_succdist);
+
+}
+
+
+

+ 50 - 0
OptTestSuite.h

@@ -0,0 +1,50 @@
+/// @file: 	optimization/OptTestSuite.h
+/// @author:	Matthias Wacker
+/// @date:	2009-10-05
+///
+///	
+///
+///	
+
+#ifndef _OPTTESTSUITE_H_
+#define _OPTTESTSUITE_H_
+
+#include "SimpleOptTestGrid.h"
+
+///
+///	class that implements a test suite for the optimization lib
+///
+class OptTestSuite 
+{
+public:
+
+	///
+	/// Default constructor
+	///
+	OptTestSuite(){};
+
+	///
+	/// Default destructor
+	///
+	~OptTestSuite(){};
+
+	///
+	/// runs all implemented tests, including output 
+	///
+	///
+	void runAllTests();
+
+private:
+
+	/// run simple opt test grid
+	void runSimpleOptTestGrid();
+
+	/// instert optimizers
+	void insertOptsInGrid(SimpleOptTestGrid &testGrid, vector<Optimizer*> &opts);
+
+	/// insert opt probs
+	void insertOptProbsInGrid(SimpleOptTestGrid &testGrid, vector<CostFunction*> &cfns);
+
+};
+
+#endif // _OPTTESTSUITE_H_

+ 23 - 0
Opt_Namespace.h

@@ -0,0 +1,23 @@
+/*!
+	Namespace definition
+
+*/
+
+#ifndef _OPT_NAMESPACE_
+#define _OPT_NAMESPACE_
+
+// FIXME include ICE  Matrix
+
+//#include <matrix.h>
+//#include <image.h>
+#include <MatrixO.h>
+//using namespace ice;
+
+namespace optimization
+{
+
+	typedef ice::Matrix matrix_type; // fixme
+};
+
+
+#endif

+ 61 - 0
Optimizable.cpp

@@ -0,0 +1,61 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	Optimizable.cpp: implementation of the Optimizable class.
+//
+//	Written By: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/Optimizable.h"
+
+using namespace optimization;
+
+Optimizable::Optimizable() : m_numOfParameters(0)
+{
+}
+
+Optimizable::Optimizable(unsigned int numOfParameters) : m_numOfParameters(numOfParameters)
+{
+}
+
+Optimizable::Optimizable(const Optimizable &optimizable)
+{
+	m_numOfParameters = optimizable.m_numOfParameters;
+}
+
+Optimizable::~Optimizable()
+{
+}
+
+Optimizable &Optimizable::operator=(const Optimizable &opt)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+		/*
+			own values:
+		*/
+		m_numOfParameters = opt.m_numOfParameters;
+	
+	}
+
+  	return *this;
+}
+
+matrix_type Optimizable::evaluateSet(const matrix_type &parameterSet)
+{
+	unsigned int iMax = parameterSet.cols();
+	matrix_type result(parameterSet.cols(),1);
+
+	for(unsigned int i = 0; i < iMax; i++)
+	{
+		//result(i) = this->evaluate(parameterSet.Sub(m_numOfParameters,1,0,i));
+		result[i][0] = this->evaluate(parameterSet(0,i,m_numOfParameters-1,i));
+	}
+
+	return result;
+}

+ 81 - 0
Optimizable.h

@@ -0,0 +1,81 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	Optimizable.h: interface of the Optimizable class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _OPTIMIZABLE_H_
+#define _OPTIMIZABLE_H_
+
+#include "optimization/Opt_Namespace.h"
+
+namespace opt=optimization;
+
+/*!
+    \class
+ */
+class Optimizable
+{
+	public:
+		
+	/*!
+		default Constructor
+	*/
+	Optimizable();
+
+        /*!
+            Constructor.
+         */
+        Optimizable(unsigned int numOfParameters);
+
+	/*!
+			Copy Constructor
+	*/
+        Optimizable(const Optimizable &optimizable);
+
+        /*!
+            Destructor.
+         */
+        virtual ~Optimizable();
+
+	/*!
+		operator=
+	*/
+	Optimizable & operator=(const Optimizable &opt);
+		
+	/*!
+		get Number of Parameters
+	*/
+	inline unsigned int getNumOfParameters(){return m_numOfParameters;};
+
+	/*!
+	    Evaluation of objective function.
+	    \param parameter double matrix (numOfParameters X 1) ^= column vector
+	    \return objective function value
+	 */
+       virtual double evaluate(const opt::matrix_type &parameter) = 0;
+
+	/*!
+		Evaluation of objection function 
+		\param parameterset [x_1, x_2, x_3, .... ,x_n]
+		\return doubleMatrix containing 
+				[evaluate(x_1), .... evaluate(x_n)];
+		
+		 can be overloaded for parallel computation
+
+	*/
+	opt::matrix_type evaluateSet(const opt::matrix_type &parameterSet);
+
+	protected:
+		
+
+		/*!
+			the number of parameters
+		*/
+		unsigned int m_numOfParameters;
+
+};
+
+#endif

+ 3 - 0
OptimizationVersions.txt

@@ -0,0 +1,3 @@
+Tagname		Datum		Kommentar
+
+V1_0		09.03.2010	initiale Version

+ 260 - 0
Optimizer.cpp

@@ -0,0 +1,260 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	Optimizer.cpp: Implementation of the Optimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/Optimizer.h"
+#include <iostream>
+#include <cassert>
+
+//Optimizer::Optimizer(unsigned int numOfParameters, CostFunction *objectiveFunction, OptLogBase *loger)
+Optimizer::Optimizer(OptLogBase *loger)
+{
+	m_costFunction			= NULL;
+	// may be NULL
+	m_loger				= loger;
+	
+	//  SET defaults
+	m_numberOfParameters		= 0;
+	m_maximize			= false;
+	m_lowerParameterBoundActive 	= false;
+	m_upperParameterBoundActive 	= false;
+	m_funcTolActive			= false;
+	m_paramTolActive		= false;
+	m_maxNumIterActive		= false;
+	m_maxSecondsActive		= false;
+	m_verbose 			= false;
+	m_paramTol	= 0.0001;
+	m_funcTol	= 0.000001;
+}
+
+
+Optimizer::Optimizer( const Optimizer &opt)
+{
+	
+	//  no deep copies!  
+	m_costFunction = opt.m_costFunction;
+	m_loger = opt.m_loger;
+	
+	// 
+	m_numberOfParameters = opt.m_numberOfParameters;
+	m_parameters = opt.m_parameters;
+	m_scales = opt.m_scales;
+	m_upperParameterBound = opt.m_upperParameterBound;
+	m_lowerParameterBound = opt.m_lowerParameterBound;
+	
+	m_funcTol = opt.m_funcTol;
+	m_paramTol = opt.m_paramTol;
+	m_maxNumIter = opt.m_maxNumIter;
+	m_maxSeconds = opt.m_maxSeconds;
+		
+	m_maximize		= opt.m_maximize;
+	m_funcTolActive		= opt.m_funcTolActive;
+	m_paramTolActive	= opt.m_paramTolActive;
+	m_maxNumIterActive	= opt.m_maxNumIterActive;
+	m_maxSecondsActive	= opt.m_maxSecondsActive;
+	m_lowerParameterBoundActive = opt.m_lowerParameterBoundActive;
+	m_upperParameterBoundActive = opt.m_upperParameterBoundActive;
+	m_verbose = opt.m_verbose;
+
+}
+
+Optimizer  & Optimizer::operator=(const Optimizer &opt)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+		//
+		//	=operator of SuperClass
+		//
+		//SuperClass::operator=(opt);
+
+		//
+		//	own values:
+		//
+	
+		//  no deep copies!  
+		m_costFunction = opt.m_costFunction;
+		m_loger = opt.m_loger;
+		
+		// 
+		m_numberOfParameters = opt.m_numberOfParameters;
+		m_parameters = opt.m_parameters;
+		m_scales = opt.m_scales;
+		m_upperParameterBound = opt.m_upperParameterBound;
+		m_lowerParameterBound = opt.m_lowerParameterBound;
+		
+		m_funcTol = opt.m_funcTol;
+		m_paramTol = opt.m_paramTol;
+		m_maxNumIter = opt.m_maxNumIter;
+		m_maxSeconds = opt.m_maxSeconds;
+			
+		m_maximize			= opt.m_maximize;
+		m_funcTolActive		= opt.m_funcTolActive;
+		m_paramTolActive		= opt.m_paramTolActive;
+		m_maxNumIterActive	= opt.m_maxNumIterActive;
+		m_maxSecondsActive	= opt.m_maxSecondsActive;
+		m_lowerParameterBoundActive = opt.m_lowerParameterBoundActive;
+		m_upperParameterBoundActive = opt.m_upperParameterBoundActive;
+		m_verbose = opt.m_verbose;
+
+	}
+
+  	return *this;
+}
+
+Optimizer::~Optimizer()
+{
+}
+
+void Optimizer::setMaximize(bool maximize)
+{
+	m_maximize = maximize;
+}
+
+void Optimizer::setParameters(const matrix_type & startParameters)
+{
+	if(checkParameters(startParameters) == true)
+	{
+		m_parameters = startParameters;
+	}
+	else
+	{
+		if(m_loger)
+			m_loger->logError("the parameters that were tried to set are not within the Bounds. Startparameters are set to the lower Bound");
+
+		assert(false); // :)
+		m_parameters = m_lowerParameterBound;
+	}
+}
+
+
+bool Optimizer::checkParameters(const matrix_type & parameters)
+{
+	assert( (parameters.rows() == static_cast<int>(m_numberOfParameters)) && (parameters.cols() == 1 ) );
+	
+	if(m_lowerParameterBoundActive || m_upperParameterBoundActive)
+	{
+		for(int i=0; i < static_cast<int>(m_numberOfParameters); i++)
+		{
+			if( m_upperParameterBoundActive)
+			{
+				if(parameters[i][0] >= m_upperParameterBound[i][0])
+				{
+					return false;
+				}
+			}
+			if( m_lowerParameterBoundActive)
+			{
+				if(parameters[i][0] <= m_lowerParameterBound[i][0])
+				{
+					return false;
+				}
+			}
+		}
+	}
+	return true;
+}
+
+
+void Optimizer::setScales(const matrix_type & scales)
+{
+	assert (scales.rows() == static_cast<int>(m_numberOfParameters) && (scales.cols() == 1 ) );
+
+	m_scales=scales;
+}
+
+void Optimizer::setUpperParameterBound(bool active, const matrix_type & upperBound)
+{
+	m_upperParameterBoundActive = active;
+	m_upperParameterBound = upperBound;
+}
+
+void Optimizer::setLowerParameterBound(bool active, const matrix_type & lowerBound)
+{
+	m_lowerParameterBoundActive = active;
+	m_lowerParameterBound = lowerBound;
+}
+
+void Optimizer::setFuncTol(bool active, double difference)
+{
+	m_funcTolActive = active;
+	m_funcTol = difference;
+}
+
+void Optimizer::setParamTol(bool active, double difference)
+{
+	m_paramTolActive = active;
+	m_paramTol = difference;
+}
+
+void Optimizer::setMaxNumIter(bool active, unsigned int num)
+{
+	m_maxNumIterActive = active;
+	m_maxNumIter = num;
+}
+
+void Optimizer::setTimeLimit(bool active, double seconds)
+{
+	m_maxSecondsActive = active;
+	m_maxSeconds = seconds;
+}
+
+
+void Optimizer::setLoger(OptLogBase *loger)
+{
+	m_loger = loger;
+
+	if(loger)
+	{
+		loger->init();
+	}
+}
+
+void Optimizer::changeCostFunction(CostFunction* costFunction)
+{
+	m_costFunction = costFunction;
+
+	if (m_costFunction)
+	{	
+		m_costFunction->init();
+	}
+}
+
+double Optimizer::evaluateCostFunction(const matrix_type & x)
+{
+	return ( m_maximize == true ? (-1.0 * m_costFunction->evaluate(x)):(m_costFunction->evaluate(x)));
+}
+
+void Optimizer::init()
+{
+	/* Check costfunctin and loger */
+	if( !m_costFunction)
+	{
+		if(m_loger)
+			m_loger->logError("Optimizer init() failed because of missing costfunction");
+		assert(false); // :) 
+	}
+	else
+	{
+		m_costFunction->init();
+	}
+
+	m_numIter = 0;
+
+}
+
+/*!
+	function that calls costfunction->evaluate for every column in x and inverts the sign in neccesary
+*/
+Optimizer::matrix_type Optimizer::evaluateSetCostFunction(const matrix_type & xSet)
+{
+	return ( m_maximize == true ? (m_costFunction->evaluateSet(xSet) * -1.0):(m_costFunction->evaluateSet(xSet)));
+}

+ 389 - 0
Optimizer.h

@@ -0,0 +1,389 @@
+///
+///	@file	Optimizer.h: interface of the Optimizer class.
+///	@author	Matthias Wacker, Esther Platzer
+///
+
+#ifndef _OPTIMIZER_H_
+#define _OPTIMIZER_H_
+
+#include "optimization/CostFunction.h"
+#include <time.h>
+#include "optimization/OptLogBase.h"
+#include "optimization/Opt_Namespace.h"
+
+
+/*!
+	Abstract base class of all optimizers.
+ */
+class Optimizer
+{
+	public:
+		typedef optimization::matrix_type matrix_type;
+
+		///
+		///	Constructor (also Default constructor)
+		///	@param optProb pointer to optimization problem object 
+		///	@param loger OptLogBase * to existing log class or NULL
+		///
+		Optimizer(OptLogBase *loger=NULL);
+
+		///
+		///	Copy constructor
+		///	@param opt .. optimizer to copy
+		///
+		Optimizer( const Optimizer &opt);
+
+		///
+		///	assignment opterator
+		///	@param opt optimizer to "copy"
+		///	@return self-reference
+		///
+		Optimizer  & operator=(const Optimizer &opt);
+
+		///
+		///		Destructor.
+	        ///
+	        virtual ~Optimizer();
+		
+		///
+		///	enumeration for the return reasons of an optimizer
+		///
+		enum {
+			SUCCESS_FUNCTOL,
+			SUCCESS_PARAMTOL,
+			SUCCESS_MAXITER,
+			SUCCESS_TIMELIMIT,
+			ERROR_XOUTOFBOUNDS,
+			ERROR_COMPUTATION_UNSTABLE,
+			_to_continue_
+		};
+
+			
+		///
+		///	setScales means the parameters are internally handled scaled to compensate for illposed
+		///	Parameter relations. 
+		///	The scales are used multiplicative: higher scales mean bigger steps
+		///	(For Example rotation has much more effect than translation, 
+		///	so steps should be much smaller than for translation, i.g. use small scales for the
+		///	rotation parameters and higher scales for the translation parameters)
+		///
+		///	@brief Set the scaling vector for the parameters.
+		///	@param scales vector for the scales (i-th scale for i-th parameter dimension)
+		///
+		void setScales(const optimization::matrix_type & scales);
+
+		///	
+		///	@brief Get the scales
+		///	
+		///	@return matrix with the scales
+		///
+		inline const optimization::matrix_type & getScales(){return m_scales;};
+		
+		///
+		///	@brief Set bounds for x
+		///	
+		///	Sets an upper bound for the parameter space. Optimization aborts if it iterates over these given bound.
+		///
+		void setUpperParameterBound(bool active, const optimization::matrix_type & upperBound);
+		
+		///
+		///	@brief Set bounds for x.
+		///	
+		///	Sets a lower bound for the parameter space. Optimization aborts if it iterates over these given bound.
+		///
+		void setLowerParameterBound(bool active, const optimization::matrix_type & lowerBound);
+
+		///
+		///	@brief get the upper bound
+		///	@return matrix containing the upper bound
+		///
+		inline const optimization::matrix_type & getUpperParameterBound(){	return m_upperParameterBound;};
+
+
+		///
+		///	@brief get the lower parameter bound
+		///	@return matrix containing the lower bound
+		///
+		inline const optimization::matrix_type & getLowerParameterBound(){return m_lowerParameterBound;};
+
+		///
+		///	@brief Set function tolerance abort criteria
+		///	
+		///	Set function tolerance abort criteria. While iterating, if the change of the objective function between
+		///	two iterations is below the given difference threshold. The optimization stops and returns SUCCESS if 
+		///	active is 'true'
+		///
+		///	@param active bool to activate the criteria (true == active..)
+		///	@param difference double to set the threshold
+		///
+		void setFuncTol(bool active, double difference);
+		
+		///
+		///	@brief Get the function tolerance abort criteria
+		///	@return double representing the threshold
+		///
+		inline double getFuncTol(){return m_funcTol;};
+
+		///
+		///	@brief Set parameter tolerance abort criteria
+		///	
+		///	Set parameter tolerance abort criteria. While iterating, if the change of the parameters between
+		///	two iterations is below the given difference threshold. The optimization stops and returns SUCCESS if 
+		///	active is 'true'
+		///
+		///	@param active : bool to activate the criteria (true == active..)
+		///	@param difference : representing the threshold
+		///
+		void setParamTol(bool active, double difference);
+		
+		///
+		///	@brief Get the parameter tolerance abort criteria
+		///	@return double representing the threshold
+		///
+		inline double getParamTol(){return m_paramTol;};
+	
+		///
+		///	@brief Set maximum number of Iterations
+		///	@param active : bool to enable the criteria
+		///	@param num : unsigned int -> max number of iterations
+		///
+		void setMaxNumIter(bool active, unsigned int num);
+
+		///
+		///	@brief get maximum number of Iterations
+		///	@return unsigned int representing the max number of iterations
+		///
+		inline unsigned int getMaxNumIter(){return m_maxNumIter;};
+
+		///
+		///	@brief get actual Iteration count
+		///	@return unsigned int represention the actual iteration count
+		///
+		inline unsigned int getNumIter(){return m_numIter;};
+
+		///
+		///	@brief set time limit in sec
+		///	@param active: bool to set the timelimit active or not
+		///	@param seconds: double representing the timelimit in seconds
+		///
+		void setTimeLimit(bool active, double seconds);
+
+		///
+		///	@brief get time limit in sec
+		///	@return time limit
+		///
+		inline double getTimeLimit(){return m_maxSeconds;};
+
+		///
+		///	@brief get return reason
+		///	@return returns an int representing the return reason. Compare with enumeration
+		///
+		inline int getReturnReason(){return m_returnReason;};
+
+		///
+		///	@brief setMaximize 'true' Changes Optimization to maximization instead of the default: 'false' ^= minimization.	
+		///	@param maximize maximize? 
+		///
+		void setMaximize(bool maximize);
+	
+		///
+		///	@brief set loger
+		///	@param loger OptLogBase * to an instance of a loger
+		///
+		void setLoger(OptLogBase *loger);
+
+		///
+		///	@brief setVerbose = true to enable a lot of outputs 
+		///
+		inline void setVerbose(bool verbose){m_verbose = verbose;};
+	
+	protected:
+		//	pure virtual definitions
+		
+		///
+		///	@brief virtual function that will start the optimization
+		///	@return integer representing abortion status from enum
+        	///
+		virtual int optimize() = 0;
+
+		///
+		///	@brief Initializations
+		///
+		void init();
+
+		//	end of pure virtual definitions
+
+				
+		///
+		///	@brief Checks if parameters are valid
+		///	@param parameters : parameters to check.
+		///
+		bool checkParameters(const optimization::matrix_type & parameters);
+		
+		///
+		///	@brief Get the last parameters
+		///	@return optimization::matrix_type containing the last parameters used in Optimization
+		///
+		inline optimization::matrix_type  getLastParameters(){return m_parameters;}
+
+		///
+		///	 @brief get number of paramters
+		///
+		inline unsigned int getNumberOfParameters(){return m_numberOfParameters;}
+
+			
+		///
+		///	@brief Sets parameters used as initial point for Optimization
+		///	@param startParameters :  used as initial point
+		///
+		void setParameters(const optimization::matrix_type & startParameters);
+	
+	
+		///
+		///	@brief get loger
+		///	@return pointer to the loger
+		///
+		inline OptLogBase * getLoger(){return m_loger;};
+
+		///
+		///	@brief Change cost function.
+		///
+		void changeCostFunction(CostFunction* costFunction);
+
+		//////////////////////////////////////////
+		//					//
+		//	 member definitions		//
+   		//					//
+   		//////////////////////////////////////////
+
+	        ///
+		///	pointer to cost function
+		///
+		CostFunction* m_costFunction;
+
+		///
+		///	number of parameters to be optimized
+		///
+		unsigned int m_numberOfParameters;
+		
+		///
+		///	parameter vector
+		///
+		optimization::matrix_type   m_parameters;
+
+		///
+		///	function value corresponding to m_parameters
+		///
+		double m_currentCostFunctionValue;
+
+		///
+		///	scales vector
+		///
+		optimization::matrix_type   m_scales;
+
+		///
+		///	parameters tolerance threshold
+		///
+		double m_paramTol;
+
+		///
+		///	parameters tolerance active
+		///
+		bool m_paramTolActive;
+
+		///
+		///	function tolerance threshold
+		///
+		double m_funcTol;
+
+		///
+		///	function tolerance active
+		///
+		bool m_funcTolActive;
+				
+		///
+		///	Maximum number of Iterations
+		///
+		unsigned int m_maxNumIter;
+
+		///
+		///	Iteration limit active
+		///
+		bool m_maxNumIterActive;
+
+		///
+		///	Iteration counter
+		///
+		unsigned int m_numIter;
+
+		///
+		///	Time limit in seconds
+		///
+		double m_maxSeconds;
+
+		///
+		///	Time limit active
+		///
+		bool m_maxSecondsActive;
+
+		///
+		///	start time sturct
+		///
+		clock_t m_startTime;
+
+		///
+		///	current time struct
+		///
+		clock_t m_currentTime;
+
+		///
+		///	container to store the return reason until it is returned from optimize()
+		///
+		int m_returnReason ;
+		
+		///
+		///	upper parameter bound
+		///
+		optimization::matrix_type  m_upperParameterBound;
+		///
+		///	upper parameter bound active
+		///
+		bool m_upperParameterBoundActive;
+	
+		///
+		///	lower parameter bound
+		///
+		optimization::matrix_type  m_lowerParameterBound;
+		///
+		///	lower parameter bound active
+		///
+		bool m_lowerParameterBoundActive;
+
+		///
+		///	maximize
+		///
+		bool m_maximize;
+
+		///
+		///	log class OptLogBase
+		///
+		OptLogBase *m_loger;
+		
+		///
+		///	bool to enable a bunch of outputs 
+		///
+		bool m_verbose;
+
+		///
+		///	function that calls costfunction->evaluate but inverts the sign if neccesary (min or max..)
+		///
+		double evaluateCostFunction(const optimization::matrix_type & x);
+
+		///
+		///	function that calls costfunction->evaluate for every column in x and inverts the sign in neccesary
+		///
+		optimization::matrix_type evaluateSetCostFunction(const optimization::matrix_type & xSet);
+
+};
+
+#endif

BIN
OptimizerToolBox.pdf


+ 53 - 0
ParamLog.cpp

@@ -0,0 +1,53 @@
+/*!
+	@file: optimization/ParamLog.cpp
+	@author: Esther Wacker
+*/
+
+
+#include "optimization/ParamLog.h"
+#include <iostream>
+
+using namespace std;
+
+ParamLog::ParamLog( std::string file)
+{
+	// open file
+	m_stream.open(file.c_str(), ios_base::out);
+}
+
+ParamLog::~ParamLog()
+{
+	// close file
+	m_stream.close();
+}
+
+void ParamLog::writeLogError(const char* szMessage)
+{
+}
+
+void ParamLog::writeLogWarning(const char* szMessage)
+{
+}
+
+void ParamLog::writeLogTrace(const char* szMessage)
+{
+}
+
+
+void ParamLog::writeParamsToFile(optimization::matrix_type& parammatrix)
+{
+    int numParams= parammatrix.rows();
+    if(m_stream.good())
+    {
+        for(int i= 0; i< numParams; ++i)
+        {
+            m_stream <<parammatrix[i][0]<<" ";
+        }
+        m_stream<<endl;
+        m_stream.flush();
+    }
+    else
+    {
+        std::cerr << " the log file stream is bad! " <<endl;
+    }
+}

+ 58 - 0
ParamLog.h

@@ -0,0 +1,58 @@
+/*!
+	@file: optimization/FileLog.h
+	@author: Matthias Wacker
+*/
+
+#ifndef _PARAMLOG_H_
+#define _PARAMLOG_H_
+
+#include <string>
+#include <fstream>
+#include "optimization/OptLogBase.h"
+
+
+class ParamLog : public OptLogBase
+{
+
+	public:
+	
+	/*!
+		default
+	*/
+	ParamLog(){};
+
+	/*!
+		@param file string with the filename to log to
+	*/
+	//FileLog( string file);
+	ParamLog( std::string file);
+
+	/*!
+		destructor
+	*/
+	virtual ~ParamLog();
+
+	/**! Write error message to an output device (file, stdio, etc.)
+	 */
+	virtual void writeLogError(const char* szMessage);
+
+	/**! Write warning message to an output device (file, stdio, etc.)
+	 */
+	virtual void writeLogWarning(const char* szMessage);
+
+	/**! Write trace message to an output device (file, stdio, etc.)
+	 */
+	virtual void writeLogTrace(const char* szMessage);
+
+	/**! Write parameter vector to output device (file, stdio, etc.)
+	 *
+	 * @param parammatrix parameter matrix
+	 */
+	virtual void writeParamsToFile(optimization::matrix_type& parammatrix);
+
+	private:
+	
+	std::ofstream m_stream;
+};
+
+#endif /* _PARAMLOG_H_ */

+ 274 - 0
Plotter.cpp

@@ -0,0 +1,274 @@
+/*
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License version 2 as published by the Free Software Foundation.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with this library; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.
+
+---
+Copyright (C) 2009, Esther-Sabrina Platzer <esther.platzer@uni-jena.de>
+					Matthias Wacker <Matthias.Wacker@mti.uni-jena.de>
+*/
+
+#include "optimization/Plotter.h"
+#include "optimization/Opt_Namespace.h"
+
+#include <iostream>
+#include <fstream>
+
+using namespace optimization;
+
+Plotter::Plotter() : m_absoluteBounds(true),
+						  m_pCostfunc(NULL)
+{
+}
+
+
+Plotter::Plotter(const Plotter& plotter)
+{
+	m_pCostfunc= plotter.m_pCostfunc;
+	m_absoluteBounds= plotter.m_absoluteBounds;
+}
+
+
+Plotter::~Plotter()
+{
+}
+
+
+Plotter& Plotter::operator=(const Plotter & plotter)
+{
+	//avoid self-copy
+	if(this != &plotter)
+	{
+		m_pCostfunc= plotter.m_pCostfunc;
+		m_absoluteBounds= plotter.m_absoluteBounds;
+	}
+  	return *this;
+}
+
+
+void Plotter::setOptProblem(const SimpleOptProblem& optprob)
+{
+	m_pOptprob= &optprob;
+	// Note: not nice, but a CostFunction cannot be const, as evaluate() is not const and cannot be made const
+	m_pCostfunc= const_cast<SimpleOptProblem&>(optprob).getOriginalCostFunction();
+}
+
+
+void Plotter::plot1D(int paramnum, const float from, const float to, const float stepwidth, const char* path)
+{
+	// if the parameter number is < 0 or has a higher dimension then the optimization problem, throw an assertion
+	assert(paramnum >= 0 && paramnum < m_pOptprob->getNumParams());
+	
+	// open plot file for writing
+	std::ofstream plotfile;
+	plotfile.open(path);
+	
+	// take actual parameter values from the optimization problem
+	matrix_type tmpParams= m_pOptprob->getAllCurrentParams();
+	
+	// start and end values for costfunction evaluation
+	float start,end;
+	
+	// if absolute bounds are used
+	if(m_absoluteBounds)
+	{
+		start= from;
+		end= to;
+	}
+	// else compute relative bounds
+	else
+	{
+		start= tmpParams[paramnum][0] + from;
+		end= tmpParams[paramnum][0] + to;
+	}
+	
+	// setting relevant parameter to start value
+	tmpParams[paramnum][0]= start;
+	
+	while(tmpParams[paramnum][0] <= end)
+	{
+		// evaluate costfunction and write to plotfile
+		plotfile << tmpParams[paramnum][0] << " " << m_pCostfunc->evaluate(tmpParams) << endl;
+		
+		// increment evaluation value
+		tmpParams[paramnum][0]+= stepwidth;
+	}
+	
+	plotfile.close();
+}
+
+
+void Plotter::plot2D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, const char* path)
+{
+	// if the parameter number is < 0 or has a higher dimension then the optimization problem, throw an assertion
+	assert(paramnum1 >= 0 && paramnum1 < m_pOptprob->getNumParams() && paramnum2 >= 0 && paramnum2 < m_pOptprob->getNumParams() );
+	
+	// open plot file for writing
+	std::ofstream plotfile;
+	plotfile.open(path);
+	
+	// take actual parameter values from the optimization problem
+	matrix_type tmpParams= m_pOptprob->getAllCurrentParams();
+	
+	// start and end values for costfunction evaluation
+	float start1,end1,start2,end2;
+	
+	// if absolute bounds are used
+	if(m_absoluteBounds)
+	{
+		start1= from1;
+		end1= to1;
+		start2= from2;
+		end2= to2;
+	}
+	// else compute relative bounds
+	else
+	{
+		start1= tmpParams[paramnum1][0] + from1;
+		end1= tmpParams[paramnum1][0] + to1;
+		start2= tmpParams[paramnum2][0] + from2;
+		end2= tmpParams[paramnum2][0] + to2;
+	}
+	
+	// setting relevant parameter to start value
+	tmpParams[paramnum1][0]= start1;
+	tmpParams[paramnum2][0]= start2;
+	
+	while(tmpParams[paramnum1][0] <= end1)
+	{
+		while(tmpParams[paramnum2][0] <= end2)
+		{
+			// evaluate costfunction and write to plotfile
+			plotfile << tmpParams[paramnum1][0] << " " << tmpParams[paramnum2][0] << " " << m_pCostfunc->evaluate(tmpParams) << endl;
+		
+			// increment evaluation value of parameter 2
+			tmpParams[paramnum2][0]+= stepwidth2;
+		}
+		plotfile<<endl;
+		// reset inner loop
+		tmpParams[paramnum2][0]= start2;
+		
+		// increment evaluation value of parameter 1
+		tmpParams[paramnum1][0]+= stepwidth1;
+	}
+	
+	plotfile.close();
+}
+
+
+void Plotter::plot3D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, int paramnum3, const float from3, const float to3, const float stepwidth3, const char* path)
+{
+	// if the parameter number is < 0 or has a higher dimension then the optimization problem, throw an assertion
+	assert(paramnum1 >= 0 && paramnum1 < m_pOptprob->getNumParams() && paramnum2 >= 0 && paramnum2 < m_pOptprob->getNumParams() && paramnum3 >= 0 && paramnum3 < m_pOptprob->getNumParams());
+	
+	// take actual parameter values from the optimization problem
+	matrix_type tmpParams= m_pOptprob->getAllCurrentParams();
+	
+	// start and end values for costfunction evaluation
+	float start1,end1;
+	
+	// if absolute bounds are used
+	if(m_absoluteBounds)
+	{
+		start1= from1;
+		end1= to1;
+	}
+	// else compute relative bounds
+	else
+	{
+		start1= tmpParams[paramnum1][0] + from1;
+		end1= tmpParams[paramnum1][0] + to1;
+	}
+	
+	// iterating over the first parameter
+	tmpParams[paramnum1][0]= start1;
+	string filename(const_cast<char*>(path));
+	
+	// cutting suffix ".txt", if there
+	if(filename.find(".txt") != string::npos)
+	{
+		filename.erase(filename.find(".txt"),4);
+	}
+	
+	while(tmpParams[paramnum1][0] <= end1)
+	{
+		// generate name of plotfile
+		char number[5];
+		sprintf(number,"%f",tmpParams[paramnum1][0]);
+		string numfilename= filename + "_" + number + ".txt";
+		
+		// open plot file for writing
+		std::ofstream plotfile;
+		plotfile.open(numfilename.c_str());
+		
+		// start and end values for costfunction evaluation
+		float start2,end2,start3,end3;
+		
+		if(m_absoluteBounds)
+		{
+			start2= from2;
+			end2= to2;
+			start3= from3;
+			end3= to3;
+		}
+		// else compute relative bounds
+		else
+		{
+			start2= tmpParams[paramnum2][0] + from2;
+			end2= tmpParams[paramnum2][0] + to2;
+			start3= tmpParams[paramnum3][0] + from3;
+			end3= tmpParams[paramnum3][0] + to3;
+		}
+	
+		// setting relevant parameter to start value
+		tmpParams[paramnum2][0]= start2;
+		tmpParams[paramnum3][0]= start3;
+	
+		while(tmpParams[paramnum2][0] <= end2)
+		{
+			while(tmpParams[paramnum3][0] <= end3)
+			{
+				// evaluate costfunction and write to plotfile
+				plotfile << tmpParams[paramnum2][0] << " " << tmpParams[paramnum3][0] << " " << m_pCostfunc->evaluate(tmpParams) << endl;
+			
+				// increment evaluation value of parameter 3
+				tmpParams[paramnum3][0]+= stepwidth3;
+			}
+			plotfile<<endl;
+			// reset inner loop
+			tmpParams[paramnum3][0]= start3;
+			
+			// increment evaluation value of parameter 2
+			tmpParams[paramnum2][0]+= stepwidth2;
+		}
+		// close plotfile
+		plotfile.close();
+		
+		// increment parameter 1
+		tmpParams[paramnum1][0]+= stepwidth1;
+	}
+}
+
+
+void Plotter::setBoundInterpretationStatus(bool absolute)
+{
+	m_absoluteBounds= absolute;
+}
+
+
+bool Plotter::getBoundInterpretationStatus()
+{
+	return m_absoluteBounds;
+}
+
+

+ 133 - 0
Plotter.h

@@ -0,0 +1,133 @@
+/*
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public
+License version 2 as published by the Free Software Foundation.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with this library; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.
+
+---
+Copyright (C) 2009, Esther-Sabrina Platzer <esther.platzer@uni-jena.de>
+Matthias Wacker <matthias.wacker@mti.uni-jena.de>
+*/
+
+	/// @class Plotter
+	/// Class interface for cost function plotting.
+	///
+	/// @author Esther-Sabrina Platzer, Matthias Wacker
+	/// @date 2009-07-07
+
+#ifndef _PLOTTER_H_
+#define _PLOTTER_H_
+
+#include "optimization/SimpleOptProblem.h"
+
+
+class Plotter
+{
+	public:
+		///
+		/// defaultConstructor
+		///
+		Plotter();
+				
+		///
+		///	Copy constructor
+		///	@param plotter plotter to copy
+		///
+		Plotter(const Plotter &plotter);
+
+        ///
+		/// Assignment operator
+		/// @param pkitter plotter to assign
+		Plotter& operator=(const Plotter &plotter);
+
+
+		///
+		///	Destructor.
+        ///	
+		~Plotter();
+		
+	 
+		///
+		/// Set optimization problem
+		///
+		void setOptProblem(const SimpleOptProblem& optprob);
+
+		
+		///
+		/// Performs a 1d evaluation of the costfunction defined in the optimization problem.
+		/// @param paramnum parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+		/// @param from lower border of parameter value
+		/// @param to upper border of parameter value
+		/// @param stepwidth sampling rate for costfunction evaluation
+		/// @param path path to the result file
+		///
+		void plot1D(int paramnum, const float from, const float to, const float stepwidth, const char* path);
+		
+		
+		///
+		/// Performs a 2d evaluation of the costfunction defined in the optimization problem.
+		/// @param paramnum1 first parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+		/// @param from1 lower border of parameter value for parameter 1
+		/// @param to1 upper border of parameter value for parameter 1
+		/// @param stepwidth1 sampling rate for costfunction evaluation of parameter 1
+		/// @param paramnum2 second parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+		/// @param from2 lower border of parameter value for parameter 2
+		/// @param to2 upper border of parameter value for parameter2
+		/// @param stepwidth2 sampling rate for costfunction evaluation of parameter 2
+		/// @param path path to the result file
+		///
+		void plot2D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, const char* path);
+		
+		
+		///
+		/// Performs a 3d evaluation of the costfunction defined in the optimization problem. Note, that the first to parameters will vary while the third one stays fixed for the generation of one evaluation cycle. Afterwards a new plot file will be generated with a new initialization of parameter 3 and again parameter 1 and 2 will be varied within their ranges. This function will result in a fixed number of plot files giving the behavior of the first two parameters over time for a varied third one. The files will be named given the path and a evaluation number corresponding to the parameter value of parameter 1 will be added to the path name. 
+		/// @param paramnum1 first parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+		/// @param from1 lower border of parameter value for parameter 1
+		/// @param to1 upper border of parameter value for parameter 1
+		/// @param stepwidth1 sampling rate for costfunction evaluation of parameter 1
+		/// @param paramnum2 second parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+		/// @param from2 lower border of parameter value for parameter 2
+		/// @param to2 upper border of parameter value for parameter 2
+		/// @param stepwidth2 sampling rate for costfunction evaluation of parameter 2
+		/// @param paramnum3 third parameter to evaluate (will be checked to lie in the range of the dimension of the opt. problem)
+		/// @param from3 lower border of parameter value for parameter 3
+		/// @param to3 upper border of parameter value for parameter 3
+		/// @param stepwidth3 sampling rate for costfunction evaluation of parameter 3
+		/// @param path path to the result file
+		///
+		void plot3D(int paramnum1, const float from1, const float to1, const float stepwidth1, int paramnum2, const float from2, const float to2, const float stepwidth2, int paramnum3, const float from3, const float to3, const float stepwidth3, const char* path);
+
+		///
+		/// Setting the bound interpretation status: if absolute= true, the bounds (from,to) will be interpreted as absolute values and the evaluation goes from from till to; if absolute= false, the bounds will be interpreted relative to the current parameter values, meaning the the evaluation goes from start= actualval + from till end= actualval + to. Default is true.
+		/// @param absolute bound interpretation status
+		///
+		void setBoundInterpretationStatus(bool absolute);
+
+		/// Get the bound interpretation status
+		/// @retval true, if bounds are interpreted as absolute bounds (start= from, end=to)
+		/// @retval false, if bounds are interpreted relative to the actual parameter value (start= actualval + from, end= actualval + to)	
+		///
+		bool getBoundInterpretationStatus();
+
+	protected:
+
+		//! Flag giving the status of the bound interpretation: if true, the bounds (from,to) will be interpreted as absolute values and the evaluation goes from from till to; if false, the bounds will be interpreted relative to the current parameter values, meaning the the evaluation goes from start= actualval + from till end= actualval + to. Default is true.
+		bool m_absoluteBounds;
+
+		//! Costfunction
+		CostFunction* m_pCostfunc;
+		
+		//! Optproblem pointer
+		const SimpleOptProblem* m_pOptprob;
+};
+
+#endif

+ 350 - 0
PowellBrentOptimizer.cpp

@@ -0,0 +1,350 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	PowellBrentOptimizer.cpp: Implementation of the 
+//												ADRS Optimizer
+//
+//	Written by Matthias Wacker
+//
+// 
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "optimization/PowellBrentOptimizer.h"
+#include "optimization/Opt_Namespace.h"
+//#include <iostream>
+
+
+PowellBrentOptimizer::PowellBrentOptimizer(OptLogBase *loger) : SuperClass(loger)
+{
+	//xi = matrix_type(m_numberOfParameters, m_numberOfParameters);
+	//m_lin = BrentLineSearcher(m_costFunction,loger);
+	m_lin_Lower		= -5.0;
+	m_lin_Upper		= 5.0;
+	m_lin_Eps		= 1.0e-3;//1.0e-2;
+}
+
+
+PowellBrentOptimizer::PowellBrentOptimizer( const PowellBrentOptimizer &opt) : SuperClass(opt)
+{
+	m_lin= opt.m_lin;
+	xi= opt.xi;
+	m_lin_Lower= opt.m_lin_Lower;
+	m_lin_Upper= opt.m_lin_Upper;
+	m_lin_Eps= opt.m_lin_Eps;
+	
+}
+
+/*
+	operator=
+*/
+PowellBrentOptimizer & PowellBrentOptimizer::operator=(const PowellBrentOptimizer &opt)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+				
+		/*
+			=operator of SuperClass
+		*/
+		SuperClass::operator=(opt);
+
+			
+		/*
+			own values:
+		*/
+		m_lin= opt.m_lin;
+		xi= opt.xi;
+		m_lin_Lower= opt.m_lin_Lower;
+		m_lin_Upper= opt.m_lin_Upper;
+		m_lin_Eps= opt.m_lin_Eps;
+		
+	}
+  	return *this;
+}
+
+
+PowellBrentOptimizer::~PowellBrentOptimizer()
+{
+}
+
+
+void PowellBrentOptimizer::init()
+{
+	SuperClass::init();
+
+	//xi =0.0;
+	xi = matrix_type(m_numberOfParameters, m_numberOfParameters);
+	m_lin = BrentLineSearcher(m_costFunction,m_loger);
+
+
+	for(uint i = 0; i < m_numberOfParameters;i++)
+	{
+		for(uint j = 0; j < m_numberOfParameters; j++)
+		{
+			if(i == j)
+			{
+				//xi(i,j) = 1.0;
+				xi[i][j] = 1.0;
+			}
+		}
+	}
+
+	if(m_funcTolActive)
+		m_lin.setEps(m_funcTol);
+
+	if(m_maximize == true)
+		m_lin.setMaximize(true);
+
+	//if(m_verbose)
+	//	m_lin.setVerbose(true);
+}
+
+
+void PowellBrentOptimizer::setLineSearchOptions(double lower, double upper, double eps)
+{
+	m_lin_Lower= lower;
+	m_lin_Upper= upper;
+	m_lin_Eps= eps;
+	m_lin.setBounds(lower, upper);
+	m_lin.setEps(eps);
+}
+
+
+int PowellBrentOptimizer::optimize()
+{
+	this->init();
+	/*
+		start time criteria
+	*/
+	m_startTime = clock();
+
+	int n = m_numberOfParameters;
+	
+	double ftol = m_funcTol;
+	double TINY = 1.0e-25;
+	
+	bool abort = false;
+	
+	int i,ibig, j;
+	double del, fp, fptt, t;
+	
+	matrix_type pt(m_numberOfParameters,1);
+	matrix_type ptt(m_numberOfParameters,1);
+	matrix_type xit(m_numberOfParameters,1);
+	
+	/*
+		eval at parameters
+	*/
+	double fret = evaluateCostFunction(m_parameters);
+	
+	/*
+		save the initial point
+	*/
+	pt = m_parameters;
+
+	if(m_verbose)
+	{
+		for(uint r = 0; r < m_numberOfParameters; r++)
+		{
+			std::cout<< m_parameters[r][0] << " ";
+		}
+		
+		std::cout<< fret << std::endl;
+	}
+	
+	while(abort == false)
+	{
+		/*
+			Check iteration limit
+		*/
+		if(m_maxNumIterActive)
+		{
+			if(m_numIter >= m_maxNumIter)
+			{
+				/* set according return status and return */
+				m_returnReason = SUCCESS_MAXITER;
+				abort = true;
+				continue;
+			}
+		}
+		 /*
+		  increase iteration counter
+		 */
+		++m_numIter;
+		
+		fp = fret;
+		ibig = 0;
+		del = 0.0;
+		
+		for(i = 0; i < n;i++)
+		{
+			for(j = 0; j<n;j++)
+			{
+				//xit(j,0) = xi(j,i) * m_scales(j,0);
+				xit[j][0] = xi[j][i] * m_scales[j][0];
+			}
+			fptt=fret;
+			
+			/* do a one dimensional line search */
+			m_lin.setBounds(m_lin_Lower ,m_lin_Upper);
+			m_lin.setEps(m_lin_Eps);
+			m_lin.setX0(m_parameters);
+			m_lin.setH0(xit);
+			double lambda = m_lin.optimize();
+			
+			fret = m_lin.evaluateSub(lambda);
+			xit = xit * lambda;
+			
+			// if value improved, accept new point
+			if(fret < fptt )
+			{
+				m_parameters= m_parameters + xit;
+			}
+			
+			if(m_verbose)
+			{
+				for(uint r = 0; r < m_numberOfParameters; r++)
+				{
+					std::cout<< m_parameters[r][0] << " ";
+				}
+				
+				std::cout <<fret<< std::endl;
+			}
+			
+			
+			if(fptt - fret > del)
+			{
+				del = fptt - fret;
+				ibig = i;
+			}
+		}
+	
+		// check for improvement -> if there is none, abort iteration!
+		if(fret >= fp)
+		{
+			m_returnReason=SUCCESS_NO_BETTER_POINT; 
+			abort=true;
+			continue;
+		}
+
+
+
+		if(m_funcTolActive)
+		{
+			/*
+				a recommended abort criteria
+			*/
+			if(2.0 * (fp - fret ) <= ftol *(fabs(fp) + fabs(fret) ) + TINY)
+			{
+				m_returnReason = SUCCESS_FUNCTOL;
+				abort = true;
+				continue;
+			}
+		}
+
+
+		/*
+			construct extrapolated point and the average direction moved
+			save old starting point
+		*/
+		for (j=0; j<n;j++)
+		{
+			//ptt(j,0)= 2.0*m_parameters(j,0)-pt(j,0);
+			//xit(j,0)= m_parameters(j,0)-pt(j,0);
+			//pt(j,0) = m_parameters(j,0);
+			ptt[j][0]= 2.0*m_parameters[j][0]-pt[j][0];
+			xit[j][0]= m_parameters[j][0]-pt[j][0];
+			pt[j][0] = m_parameters[j][0];
+		}
+
+		fptt = evaluateCostFunction(ptt);
+
+		if(fptt < fp)
+		{
+			t = 2.0*(fp-2.0*fret+fptt)*sqrt(fp-fret-del) - del *sqrt(fp-fptt);
+			if(t < 0.0)
+			{
+				/* 
+					move to minimum of new 
+					direction and save the new direction
+				*/
+				m_lin.setBounds(m_lin_Lower,m_lin_Upper);
+				m_lin.setX0(m_parameters);
+				m_lin.setH0(xit);
+
+				double lambda = m_lin.optimize();
+			
+				fret = m_lin.evaluateSub(lambda);
+				xit= xit*lambda;
+				m_parameters= m_parameters+ xit;
+				
+				// normalize direction
+				xit = xit * (1.0 / xit.Norm(0));  
+				for(j=0; j < n ;j++)
+				{
+					xi[j][ibig] = xi[j][n-1];
+					xi[j][n-1]= xit[j][0];
+				}
+			}
+		}
+
+
+		
+		/*
+			Check Optimization Timilimit, if active
+		*/
+		if(m_maxSecondsActive)
+		{
+			m_currentTime = clock();
+		
+			/* time limit exceeded ? */
+			if(((float)(m_currentTime - m_startTime )/CLOCKS_PER_SEC) >= m_maxSeconds )
+			{
+				if(m_verbose == true)
+				{
+					std::cout << "# aborting because of Time Limit" << std::endl;
+				}
+
+				/* set according return status and the last parameters and return */
+				m_returnReason = SUCCESS_TIMELIMIT;
+				abort = true;
+				continue;
+			}
+		}
+
+		/*
+			Check Bounds if Active
+		*/
+		if(!checkParameters(m_parameters))
+		{
+			if(m_verbose == true)
+			{
+				std::cout << "PowellBrent :: aborting because of parameter Bounds" << std::endl;
+			}
+
+			/* set according return status and the last parameters and return */
+			m_returnReason = ERROR_XOUTOFBOUNDS;
+			abort = true;
+			continue;
+		}
+
+		/*if(m_verbose)
+		{
+			for(int r = 0; r < m_numberOfParameters; r++)
+			{
+				std::cout<< m_parameters(r,0) << " ";
+			}
+			
+			std::cout << std::endl;
+		}*/
+		
+	}
+	
+	
+	return m_returnReason;
+	
+}

+ 118 - 0
PowellBrentOptimizer.h

@@ -0,0 +1,118 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	AxA3dNUMPowellBrentOptimizer.h: interface of the AxA3dNUMPowellBrentOptimizer
+//
+//	Powells Method - modified in a way, that it doesn't focus on having 100% conjugate
+//	directions, but having one direction in "valley" direction.
+//
+//	Written by: Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#ifndef _POWELL_BRENT_OPTIMIZER_H_
+#define _POWELL_BRENT_OPTIMIZER_H_
+
+#include <cmath>
+#include "optimization/SimpleOptimizer.h"
+#include "optimization/BrentLineSearcher.h"
+
+/*!
+	class for the PowellBrentOptimizer
+
+   HowToUse:
+
+	  * use setParameters() to set the start point
+	  * you may use m_scales to regulate the inner search interval 
+		(set it to a vector with 1/x as elements to divide it by x);
+	  * call init()
+	  * call optimize()
+
+
+ 	Implemented Abort criteria:
+		
+	  * maximum number of iterations
+	  *	time limit
+	  * parameter bounds
+	  * function value tolerance
+		  
+	Additional return reason:
+
+ */
+class PowellBrentOptimizer : public SimpleOptimizer
+{
+	public:
+
+		typedef  SimpleOptimizer SuperClass;
+		typedef SuperClass::matrix_type matrix_type;
+
+		/*!
+			Constructor.
+			\param loger : OptLogBase * to existing log class
+		*/
+		PowellBrentOptimizer(OptLogBase *loger=NULL);
+
+		/*!
+			Copy constructor
+			\param opt .. optimizer to copy
+		*/
+		PowellBrentOptimizer( const PowellBrentOptimizer &opt);
+
+		/*!
+			operator =
+		*/
+		PowellBrentOptimizer & operator=(const PowellBrentOptimizer &opt);
+
+	        /*!
+				Destructor.
+	         */
+	        ~PowellBrentOptimizer();
+
+		///
+		///	enumeration for the return reasons of an optimizer,
+		///	has all elements of the SuperClass optimizer
+		///
+		enum {	SUCCESS_NO_BETTER_POINT = _to_continue_,
+				_to_continue_
+		};
+
+	
+		/*!
+			\brief Do internal initializations
+		*/
+		void init();
+		
+		/*!
+			\brief start the optmization
+		*/
+		int optimize();
+
+
+		/*!
+			\brief set options for the internal line searcher
+		
+		*/
+		void setLineSearchOptions(double lower, double upper, double eps);
+		
+
+	private:
+
+		/*!
+			the direction set
+		*/
+		matrix_type xi;
+
+		/*!
+			the brent line searcher
+		*/
+		BrentLineSearcher m_lin;
+
+		/*!
+			parameter brent linesseracher	
+		*/
+		double m_lin_Lower;
+		double m_lin_Upper;
+		double m_lin_Eps;
+	
+};
+
+#endif

+ 412 - 0
SimpleOptProblem.cpp

@@ -0,0 +1,412 @@
+/*
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License version 2 as published by the Free Software Foundation.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public License
+    along with this library; see the file COPYING.LIB.  If not, write to
+    the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+    Boston, MA 02110-1301, USA.
+
+    ---
+    Copyright (C) 2009, Esther-Sabrina Platzer <esther.platzer@uni-jena.de>
+                        Matthias Wacker <Matthias.Wacker@mti.uni-jena.de>
+*/
+
+#include "optimization/SimpleOptProblem.h"
+
+using namespace optimization;
+
+// note: all matrices are of size 0x0 meaning that they are not initialised!
+SimpleOptProblem::SimpleOptProblem() : m_costfunc(NULL),
+                           m_numParams(0),
+                           m_numActiveParams(0),
+                           m_maximize(false),
+                           m_lowerBoundsActive(false),
+                           m_upperBoundsActive(false),
+                           m_dimwrapper(NULL)
+{
+}
+
+
+SimpleOptProblem::SimpleOptProblem(CostFunction* costfunc, optimization::matrix_type& initialParams, optimization::matrix_type& scales, bool allactive)
+{
+    // if dimension of initial parameter matrix and/or scale matrix do not fit the given dimension, stop
+    assert(initialParams.rows() == (int)costfunc->getNumOfParameters() && scales.rows() == (int)costfunc->getNumOfParameters());
+    
+    // pointer to costfunction
+    m_costfunc= costfunc;
+    
+    // full dimension of optimization problem
+    m_numParams= m_costfunc->getNumOfParameters();
+    
+    matrix_type tmp(m_numParams,1);
+    m_selection= tmp;
+    m_upperBounds= tmp;
+    m_lowerBounds= tmp;
+    
+    // number of active parameters (all or none?)
+    if(allactive)
+    {
+        m_numActiveParams= m_numParams;
+        for(int i= 0; i< m_numParams; ++i)
+            m_selection[i][0]= 1;
+    }
+    else
+    {
+        m_numActiveParams= 0;
+        for(int i= 0; i< m_numParams; ++i)
+            m_selection[i][0]= 0;
+    }
+    
+    // init parameters and scales
+    m_currentparams= initialParams;
+    m_scales= scales;
+    
+    // init upper and lower bounds with infitniy and -infinity
+    for(int i= 0; i< m_numParams; ++i)
+    {
+		m_lowerBounds[i][0]= -1.0*numeric_limits<double>::max( );
+		// -1.0*numeric_limits<double>::infinity( );//-1.0*numeric_limits<float>::max( );
+		m_upperBounds[i][0]= numeric_limits<double>::max( );
+		//numeric_limits<double>::infinity( );//numeric_limits<float>::max( );
+    }
+    
+    // per default minimization will be perfomed and no bounds are active
+    m_maximize= false;
+    m_lowerBoundsActive= false;
+    m_upperBoundsActive= false;
+    
+    m_dimwrapper= NULL;
+}
+
+
+SimpleOptProblem::SimpleOptProblem(const SimpleOptProblem& opt)
+{
+    m_costfunc= opt.getOriginalCostFunction();
+    m_numParams= opt.getNumParams();
+    m_numActiveParams= opt.getNumActiveParams();
+    m_currentparams= opt.getAllCurrentParams();
+    m_scales= opt.getAllScales();
+    m_selection=opt.m_selection; // direct member access
+    m_maximize= opt.getMaximize();
+    m_lowerBoundsActive= opt.lowerBoundsActive();
+    m_upperBoundsActive= opt.upperBoundsActive();
+    m_lowerBounds= opt.getAllLowerBounds();
+    m_upperBounds= opt.getAllUpperBounds();
+        
+    m_dimwrapper= NULL;
+}
+
+
+SimpleOptProblem::~SimpleOptProblem()
+{
+    if(m_dimwrapper != NULL)
+        delete m_dimwrapper;
+}
+
+
+SimpleOptProblem& SimpleOptProblem::operator=(const SimpleOptProblem& opt)
+{
+    m_costfunc= opt.m_costfunc;
+    m_selection=opt.m_selection; // direct member access
+    m_numParams= opt.getNumParams();
+    m_numActiveParams= opt.getNumActiveParams();
+    m_currentparams= opt.getAllCurrentParams();
+    m_scales= opt.getAllScales();
+    m_maximize= opt.getMaximize();
+    m_lowerBoundsActive= opt.lowerBoundsActive();
+    m_upperBoundsActive= opt.upperBoundsActive();
+    m_lowerBounds= opt.getAllLowerBounds();
+    m_upperBounds= opt.getAllUpperBounds();
+       
+    m_dimwrapper= NULL;
+    
+    return *this;
+}
+
+
+void SimpleOptProblem::activateAllParams(bool status)
+{
+    for(int i= 0; i< this->getNumParams(); ++i)
+    {
+        m_selection[i][0]= (int)(status);
+    }
+
+	if(status == true)
+	{
+		m_numActiveParams=m_numParams;
+	}
+	else
+	{
+		m_numActiveParams=0;
+	}
+}
+
+
+void SimpleOptProblem::activateParam(int paramnum, bool status)
+{
+
+    assert(paramnum < m_numParams && paramnum >= 0);
+    
+    m_selection[paramnum][0]= (int)(status);
+
+	int count = 0;
+	for(int i =0; i < m_numParams; ++i)
+	{
+		if(m_selection[i][0]==1)
+		{
+			count++;
+		}
+	}
+	m_numActiveParams=count;
+}
+
+
+bool SimpleOptProblem::isActive(int paramnum) const
+{
+    assert(paramnum < m_numParams && paramnum >= 0);
+    
+    if(m_selection[paramnum][0] == 1)
+        return true;
+    else
+        return false;
+}
+
+
+bool SimpleOptProblem::allActive() const
+{
+    if(m_numActiveParams == m_numParams)
+        return true;
+    else
+        return false;
+}
+
+
+int SimpleOptProblem::getNumParams() const
+{
+    return m_numParams;
+}
+
+
+int SimpleOptProblem::getNumActiveParams() const
+{
+    return m_numActiveParams;
+}
+
+
+matrix_type SimpleOptProblem::getAllCurrentParams() const
+{
+    return m_currentparams;
+}
+
+
+matrix_type SimpleOptProblem::getActiveCurrentParams() const
+{
+    //compute selection matrix (X x m_numParamsx)
+    matrix_type selmatrix= this->computeSelectionMatrix();
+    return (selmatrix*m_currentparams);
+}
+
+
+matrix_type SimpleOptProblem::getAllScales() const
+{
+    return m_scales;
+}
+
+
+matrix_type SimpleOptProblem::getActiveScales() const
+{
+    //compute selection matrix (X x m_numParamsx)
+    matrix_type selmatrix= this->computeSelectionMatrix();
+    return(selmatrix* m_scales);
+}
+
+
+matrix_type SimpleOptProblem::getAllUpperBounds() const
+{
+    return m_upperBounds;
+}
+
+
+matrix_type SimpleOptProblem::getActiveUpperBounds() const
+{
+    //compute selection matrix (X x m_numParamsx)
+    matrix_type selmatrix= this->computeSelectionMatrix();
+    return (selmatrix*m_upperBounds);
+}
+
+
+matrix_type SimpleOptProblem::getAllLowerBounds() const
+{
+    return m_lowerBounds;
+}
+
+
+matrix_type SimpleOptProblem::getActiveLowerBounds() const
+{
+    //compute selection matrix (X x m_numParamsx)
+    matrix_type selmatrix= this->computeSelectionMatrix();
+    return (selmatrix*m_lowerBounds);
+}
+
+
+CostFunction* SimpleOptProblem::getCostFunction() 
+{
+    // if number of active params is less then m_numParams we need a Wrapper function
+    if(m_numActiveParams < m_numParams)
+    {
+        // if there is still existing an old dimwrapper function
+        /// @todo little problem: if the opt problem was not changed concerning its selection and a dimension reduced version is already existing it will be deleted and newly allocated nevertheless; that is not so nice :(
+        if(m_dimwrapper != NULL)
+            delete m_dimwrapper;
+        
+        // new dimension reduction, wrapper function
+        m_dimwrapper= new DimWrapperCostFunction(m_costfunc, m_selection, m_currentparams);
+        return m_dimwrapper;
+    }
+    
+    // else we return just the normal costfunction
+    return m_costfunc;
+}
+
+
+CostFunction* SimpleOptProblem::getOriginalCostFunction() const
+{
+    return m_costfunc;
+}
+
+
+bool SimpleOptProblem::getMaximize() const
+{
+    return m_maximize;
+}
+
+
+bool SimpleOptProblem::lowerBoundsActive() const
+{
+    return m_lowerBoundsActive;
+}
+
+
+bool SimpleOptProblem::upperBoundsActive() const
+{
+    return m_upperBoundsActive;
+}
+
+
+void SimpleOptProblem::setAllCurrentParameters(matrix_type& params)
+{
+    assert(params.rows() == m_numParams && params.cols() == 1);
+    m_currentparams= params;
+}
+
+
+void SimpleOptProblem::setActiveCurrentParameters(matrix_type& params)
+{
+
+    assert(params.rows() == m_numActiveParams && params.cols() == 1);
+
+    int paramcopied= 0;
+    for(int i= 0; i< m_numParams; ++i)
+    {
+	if(m_selection[i][0] == 1)
+	{
+	    m_currentparams[i][0]= params[paramcopied][0];
+	    paramcopied++;
+	}
+    }
+}
+
+
+void SimpleOptProblem::setAllScales(matrix_type& scales)
+{
+    assert(scales.rows() == m_numParams && scales.cols() == 1);
+    m_scales= scales;
+}
+
+
+void SimpleOptProblem::setActiveScales(matrix_type& scales)
+{
+    assert(scales.rows() == m_numActiveParams && scales.cols() == 1);
+
+    int scalecopied= 0;
+    for(int i= 0; i< m_numParams; ++i)
+    {
+	if(m_selection[i][0] == 1)
+	{
+	    m_scales[i][0]= scales[scalecopied][0];
+	    scalecopied++;
+	}
+    }
+}
+
+
+void SimpleOptProblem::setLowerBound(int paramnumber, float lowerbound)
+{
+    assert(paramnumber >= 0 && paramnumber < m_numParams);
+    m_lowerBoundsActive= true;
+    m_lowerBounds[paramnumber][0]= lowerbound;
+}
+
+
+void SimpleOptProblem::setUpperBound(int paramnumber, float upperbound)
+{
+    assert(paramnumber >= 0 && paramnumber < m_numParams);
+    m_upperBoundsActive= true;
+    m_upperBounds[paramnumber][0]= upperbound;
+}
+
+
+void SimpleOptProblem::resetLowerBounds()
+{
+    m_lowerBoundsActive= false;
+    for(int i= 0; i< m_numParams; ++i)
+    {
+        m_lowerBounds[i][0]= -1.0*numeric_limits<double>::infinity( );
+    }
+}
+
+
+void SimpleOptProblem::resetUpperBounds()
+{
+    m_upperBoundsActive= false;
+    for(int i= 0; i< m_numParams; ++i)
+    {
+        m_upperBounds[i][0]= numeric_limits<double>::infinity( );
+    }
+}
+
+
+void SimpleOptProblem::changeCostFunc(CostFunction* costfunc)
+{
+    m_costfunc= costfunc;
+}
+
+
+void SimpleOptProblem::setMaximize(bool maximize)
+{
+    m_maximize=maximize;
+}
+
+
+matrix_type SimpleOptProblem::computeSelectionMatrix() const
+{
+    matrix_type selectionmatrix(m_numActiveParams,m_numParams,0);
+    int index= 0;
+    for(int i= 0; i < m_numParams; ++i)
+    {
+	if(m_selection[i][0] == 1)
+	{
+	    selectionmatrix[index][i]= 1.0;
+	    index++;
+	}
+    }
+    return selectionmatrix;
+}
+

+ 335 - 0
SimpleOptProblem.h

@@ -0,0 +1,335 @@
+/*
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License version 2 as published by the Free Software Foundation.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public License
+    along with this library; see the file COPYING.LIB.  If not, write to
+    the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+    Boston, MA 02110-1301, USA.
+
+    ---
+    Copyright (C) 2009, Esther-Sabrina Platzer <esther.platzer@uni-jena.de>
+                        Matthias Wacker <matthias.wacker@mti.uni-jena.de>
+*/
+
+    /// @class SimpleOptProblem
+    /// This class encapsulats simple optimization problems. Parameter values, scales and upper and lower bounds can be managed. Furthermore individual parameters can be deactivated,
+    /// which allows an optimization of just a parameter subset. Activation and Deactivation can also be done with help of the class interface.
+    ///
+    /// @author Esther-Sabrina Platzer, Matthias Wacker
+    /// @date 2009-07-02
+
+#ifndef _SimpleOptProblem_h_
+#define _SimpleOptProblem_h_
+
+#include <assert.h>
+
+#include "optimization/Opt_Namespace.h"
+#include "optimization/CostFunction.h"
+#include "optimization/DimWrapperCostFunction.h"
+
+
+class SimpleOptProblem 
+{
+  public:
+    ///
+    /// Standard Constructor
+    ///
+    SimpleOptProblem( );
+    
+    /// 
+    /// Parametrized Constructor allows to specify the initial parameter values, the parameter scales and the costfunction which should be used. Furthermore it is possible to optimize just a subset of the parameter set. For this the wanted parameters have to be activated and the unwanted must be deactivated. By usage of the allactive flag it is possible to activate/deactivate the initial parameter setting in advance. By default all parameters are activeted.
+    ///
+    /// @param costfunc pointer to costfunction
+    /// @param initialParams matrix containing the initial parameter values
+    /// @param scales matrix containing the scales for every parameter
+    /// @param allactive boolean flag for activating (true) or deactivating (false) all parameters
+    ///
+    SimpleOptProblem(CostFunction* costfunc, optimization::matrix_type& initialParams, optimization::matrix_type& scales, bool allactive=true);
+
+    /// 
+    /// Copy Constructor
+    ///
+    /// @param opt the optimization problem to copy
+    ///
+    SimpleOptProblem(const SimpleOptProblem& opt);
+    
+    ///
+    /// Destructor
+    ///
+    ~SimpleOptProblem();
+
+    ///
+    /// Assignment Operator
+    /// @param opt the optimization problem to assign
+    /// @return SimpleOptProblem
+    ///
+    SimpleOptProblem& operator=(const SimpleOptProblem& opt);
+    
+    ///
+    /// Depending on the value of status all parameters (the whole parameter set) is activated (status=true) or deactivated (status= false)
+    ///
+    /// @param status boolean activation flag
+    ///
+    void activateAllParams(bool status);
+    
+    ///
+    /// Given the number of a certain parameter it status (activated/deactivated) can be set individually
+    ///
+    /// @param paramnum number of the parameter in the overall parameter set
+    /// @param status activation status (true= activated, false= deactivated)
+    ///
+    void activateParam(int paramnum, bool status);
+    
+    /// 
+    /// Returns the status of individual parameters
+    ///
+    /// @param paramnum parameter number in the overall parameter set
+    /// @retval true, if the actual parameter is active
+    /// @retval false, if the actual parameter is inactive
+    ///
+    bool isActive(int paramnum) const;
+    
+    ///
+    /// Returns wether all params are active or not
+    /// @retval true, if all parameters are active (m_numParams == m_numActiveParams)
+    /// @retval false, if none or not all parameters are active (m_numParams != m_numActiveParams)
+    bool allActive() const;
+    
+    
+    ///
+    /// @name Getters
+    ///
+    
+    ///
+    /// Returns the size of the overall parameter set
+    ///
+    int getNumParams() const;
+    
+    ///
+    /// Returns the number of active parameters
+    ///
+    int getNumActiveParams() const;
+    
+    /// 
+    /// Returns the current parameter set
+    ///
+    /// @return current parameter matrix
+    ///
+    optimization::matrix_type getAllCurrentParams() const;
+
+    /// 
+    /// Returns the current parameter set
+    ///
+    /// @return current parameter matrix
+    ///
+    optimization::matrix_type getActiveCurrentParams() const;
+    
+    ///
+    /// Returns the current parameter scales
+    ///
+    /// @return current scale matrix
+    ///
+    optimization::matrix_type getAllScales() const;
+
+    ///
+    /// Returns the current parameter scales
+    ///
+    /// @return current scale matrix
+    ///
+    optimization::matrix_type getActiveScales() const;
+    
+    ///
+    /// Returns the upper bounds (if no upper bounds are active, the matrix just contains infinity)
+    ///
+    /// @return upper bound matrix
+    ///
+    optimization::matrix_type getAllUpperBounds() const;
+
+    ///
+    /// Returns the upper bounds (if no upper bounds are active, the matrix just contains infinity)
+    ///
+    /// @return upper bound matrix
+    ///
+    optimization::matrix_type getActiveUpperBounds() const;
+    
+    ///
+    /// Returns the lower bounds (if no lower bounds are active, the matrix just contains -infinity)
+    ///
+    /// @return lower bound matrix
+    ///
+    optimization::matrix_type getAllLowerBounds() const;
+
+    ///
+    /// Returns the lower bounds (if no lower bounds are active, the matrix just contains -infinity)
+    ///
+    /// @return lower bound matrix
+    ///
+    optimization::matrix_type getActiveLowerBounds() const;
+    
+    /// 
+    /// Returns a pointer to the CURRENT CostFunction with regard to the actual parameter selection
+    ///
+    /// @return CostFunction pointer
+    ///
+    CostFunction* getCostFunction();
+    
+    ///
+    /// Returns a pointer to the ORIGINAL CostFunction ignoring the acutal parameter selection
+    ///
+    /// @return CostFunction pointer
+    ///
+    CostFunction* getOriginalCostFunction() const;
+    
+    /// 
+    /// Current description of the optimization strategy
+    /// 
+    /// @retval true, if a maximization of the CostFunction is performed
+    /// @retval false, if a minimization of the CostFunction is performed
+    ///
+    bool getMaximize() const;
+    
+    /// 
+    /// Returns the current status of lower bounds
+    /// 
+    /// @retval true, if at least on lower bound (different from -infinity) is set
+    /// @retval false, if no lower bounds are set (lower bound matrix just contains -infinity)
+    ///
+    bool lowerBoundsActive() const;
+    
+    /// 
+    /// Returns the current status of upper bounds
+    /// 
+    /// @retval true, if at least on upper bound (different from infinity) is set
+    /// @retval false, if no upper bounds are set (lower bound matrix just contains infinity)
+    ///
+    bool upperBoundsActive() const;
+    
+    
+    ///
+    /// @name Setters
+    ///
+    
+    ///
+    /// (Re)initialization of the current parameter values
+    ///
+    /// @param params parameter value matrix (must be m_numParams x 1)
+    ///
+    void setAllCurrentParameters(optimization::matrix_type& params);
+
+    ///
+    /// (Re)initialization of the current parameter values of active parameters
+    ///
+    /// @param params parameter value matrix (must be m_numActiveParams x 1)
+    ///
+    void setActiveCurrentParameters(optimization::matrix_type& params);
+    
+    /// 
+    /// (Re)initialization of the current scale setting
+    /// 
+    /// @param scales parameter scale matrix (must be m_numParams x 1)
+    ///
+    void setAllScales(optimization::matrix_type& scales);
+
+    /// 
+    /// (Re)initialization of the current scale setting for active parameters
+    /// 
+    /// @param scales parameter scale matrix (must be m_numActiveParams x 1)
+    ///
+    void setActiveScales(optimization::matrix_type& scales);
+    
+    ///
+    /// Definition of a lower bound for an individual parameter
+    /// 
+    /// @param paramnumber number of the parameter in the overall parameter set
+    /// @param lowerbound the lower bound for this parameter
+    ///
+    void setLowerBound(int paramnumber, float lowerbound);
+    
+    ///
+    /// Definition of an upper bound for an individual parameter
+    /// 
+    /// @param paramnumber number of the parameter in the overall parameter set
+    /// @param upperbound the upper bound for this parameter
+    ///
+    void setUpperBound(int paramnumber, float upperbound);
+    
+    ///
+    /// Deactivates all lower bounds resulting in a lower bound matrix just containing -infinity.
+    ///
+    void resetLowerBounds();
+    
+    ///
+    /// Deactivates upper bounds and results in an upper bound matrix just containing infinity
+    ///
+    void resetUpperBounds();
+    
+    ///
+    /// Exchange of CostFunction. 
+    /// 
+    /// @param costfunc new CostFunction pointer
+    ///
+    void changeCostFunc(CostFunction* costfunc);
+    
+    ///
+    /// Definition of the optimization strategy. 
+    ///
+    /// @param maximize if maximize=true a maximization of the CostFunction will be performed, otherwise it is minimized (Default)
+    void setMaximize(bool maximize);
+    
+    
+  protected:
+
+  private:
+    ///
+    /// Computes a selection matrix out of the selection vector used for selecting active parameters which can be used to compute smaller dimension matrices containing just the active params (or scales and bounds for active params)
+    ///
+    /// @return selection matrix with dimension m_numActiveParams x m_numParams
+    optimization::matrix_type computeSelectionMatrix() const;
+
+    //! cost function pointer
+    CostFunction* m_costfunc;  
+      
+    //! number of parameters
+    int m_numParams;
+    
+    //! number of active parameters (active parameters are the ones, which currently should be optimized)
+    int m_numActiveParams;
+    
+    //! holds the current parameters as parameter vector
+    optimization::matrix_type m_currentparams;
+    
+    //! holds the current scales for the parameters
+    optimization::matrix_type m_scales;
+    
+    //! upper bounds for parameters
+    optimization::matrix_type m_upperBounds;
+    
+    //! lower bounds for parameters 
+    optimization::matrix_type m_lowerBounds;
+    
+    //! selection matrix: defines the active parameters
+    optimization::matrix_type m_selection;
+    
+    //! flag defining wether the optimization problem is a maximization problem (true) or a minimization problem (false)
+    bool m_maximize; 
+    
+    //! flag defining wether lowerBounds are set
+    bool m_lowerBoundsActive;
+    
+    //! flag defining wether upperBounds are set
+    bool m_upperBoundsActive;
+    
+    //! pointer to dimension wrapper function
+    CostFunction* m_dimwrapper;
+};
+
+
+#endif /* _SimpleOptProblem_h_ */
+

+ 84 - 0
SimpleOptTestGrid.cpp

@@ -0,0 +1,84 @@
+/// @file: 	optimization/SimpleOptTestGrid.cpp
+/// @author:	Matthias Wacker
+/// @date:	2009-10-05
+
+#include "SimpleOptTestGrid.h"
+
+using namespace std;
+using namespace opt;
+SimpleOptTestGrid::SimpleOptTestGrid()
+{
+	m_iNumberOfOptimizers = 0;
+	m_iNumberOfProblems = 0;
+}
+
+
+SimpleOptTestGrid::~SimpleOptTestGrid()
+{
+}
+
+
+void SimpleOptTestGrid::addSimpleOptProblem(const SimpleOptProblem & optProb, const opt::matrix_type &solution, double successDist)
+{
+	m_vSimpleOptProblem.push_back(optProb);
+	m_vSolutions.push_back(solution);
+	m_vSuccessDistances.push_back(successDist);
+
+	m_iNumberOfProblems = m_vSimpleOptProblem.size();
+}
+
+
+void SimpleOptTestGrid::addSimpleOptimizer(SimpleOptimizer *opt)
+{
+	m_vSimpleOptimzers.push_back(opt);
+	m_iNumberOfOptimizers = m_vSimpleOptimzers.size();
+}
+
+
+bool SimpleOptTestGrid::test()
+{
+	bool success = true;
+
+	// for every optimizer
+	for(int optIdx=0; optIdx < m_iNumberOfOptimizers; ++optIdx)
+	{
+		// run every test
+		for(int probIdx=0; probIdx < m_iNumberOfProblems; ++probIdx)
+		{
+			bool tmpResult = test(optIdx,probIdx);
+			cout << "SOTG: single test with opt: " << optIdx << " and prob: " << probIdx;
+			if(tmpResult == true)
+			{
+				cout << " SUCCEEDED"<<endl; 
+			}
+			else
+			{
+				cout << " FAILED"<<endl;
+			}
+
+		}
+	}
+
+	return success;
+}
+
+
+
+bool SimpleOptTestGrid::test(int optIdx, int probIdx)
+{
+	// get a copy of the prob
+	SimpleOptProblem optProb = m_vSimpleOptProblem[probIdx];
+	matrix_type init = optProb.getAllCurrentParams();
+	
+	// start Optimization
+	m_vSimpleOptimzers[optIdx]->optimizeProb(optProb);
+
+	// check for success
+	matrix_type result = optProb.getAllCurrentParams();
+	double distres = ( result - m_vSolutions[probIdx] ).Norm(0);
+	double distinit = ( init - m_vSolutions[probIdx] ).Norm(0);
+
+	cout << "resDist: "<< distres << " from initial: "<< distinit << endl;
+
+	return distres < m_vSuccessDistances[probIdx];
+}

+ 97 - 0
SimpleOptTestGrid.h

@@ -0,0 +1,97 @@
+/// @file	optimization/SimpleOptTestGrid.h
+/// @author	Matthias Wacker
+/// @date
+///
+///	
+///
+///
+
+#ifndef _SIMPLEOPTTESTGRID_H_
+#define _SIMPLEOPTTESTGRID_H_
+
+#include "Opt_Namespace.h"
+#include "SimpleOptimizer.h"
+#include "SimpleOptProblem.h"
+
+#include <vector>
+
+namespace opt=optimization;
+
+///
+/// @class SimpleOptTestGrid defines a class to test simpleOptimizer(s) with SimpleOptProblem(s)
+///
+/// gets a number of ploblems and a number of optimizers and tries to solve every problem with each optimizer
+///
+class SimpleOptTestGrid
+{
+public:
+	///
+	/// Default Constructor
+	///
+	SimpleOptTestGrid();
+
+	///
+	/// Default Destructor
+	///
+	~SimpleOptTestGrid();
+
+	///
+	/// adds a SimpleOptProb to the optimizer
+	///
+	///	@param optProb the optProb to test (is copied in this routine -> only works with deep copy implemented copy constructors!)
+	///	@param solution the solution to the problem
+	///	@param successDist the maximum distance to the solution that is considered as success
+	///
+	void addSimpleOptProblem( const SimpleOptProblem & optProb, const opt::matrix_type &solution, double successDist);
+
+	///
+	/// adds a SimpleOptimizer
+	///
+	///	@param opt the optimizer (already configured!!)
+	///
+	void addSimpleOptimizer(SimpleOptimizer *opt);
+
+
+	///
+	/// runs the tests on the test grid
+	///
+	/// 	@return true in case of a full success, false in case of at least 1 failed test case
+	///
+	bool test();
+
+	///
+	/// runs a specific test 
+	///	
+	///	@param optIdx index of the optimizer (starting with 0)
+	///	@param probIdx index of the problem (starting with 0)
+	///
+	bool test(int optIdx, int probIdx);
+
+
+	///
+	///	get optimizers
+	///
+	std::vector<SimpleOptimizer*> getOptimizers() const {return m_vSimpleOptimzers;};
+	
+private:
+	/// the optimizers
+	std::vector<SimpleOptimizer*> m_vSimpleOptimzers;
+	
+	/// number of optimizers
+	int m_iNumberOfOptimizers;
+
+	/// the problems
+	std::vector<SimpleOptProblem> m_vSimpleOptProblem;
+
+	/// the solutions
+	std::vector<opt::matrix_type> m_vSolutions;
+
+	/// the success distances
+	std::vector<double> m_vSuccessDistances;
+
+	/// number of Problems
+	int m_iNumberOfProblems;
+
+
+};
+#endif // _SIMPLEOPTTESTGRID_H_

+ 95 - 0
SimpleOptimizer.cpp

@@ -0,0 +1,95 @@
+//////////////////////////////////////////////////////////////////////
+//
+//	SimpleOptimizer.cpp: Implementation of the SimpleOptimizer class.
+//
+//	Written by Matthias Wacker
+//
+//////////////////////////////////////////////////////////////////////
+
+#include <iostream>
+
+#include "optimization/SimpleOptimizer.h"
+
+
+SimpleOptimizer::SimpleOptimizer(OptLogBase *loger) : SuperClass(loger)
+{
+}
+
+SimpleOptimizer::SimpleOptimizer( const SimpleOptimizer &opt) : SuperClass(opt)
+{
+}
+
+SimpleOptimizer  & SimpleOptimizer::operator=(const SimpleOptimizer &opt)
+{
+		
+	/*
+			avoid self-copy
+	*/
+	if(this != &opt)
+	{
+		
+		//
+		//	=operator of SuperClass
+		//
+		SuperClass::operator=(opt);
+
+		//
+		//	own values:
+		// currently no member variables
+	}
+
+  	return *this;
+}
+
+SimpleOptimizer::~SimpleOptimizer()
+{
+}
+
+
+void SimpleOptimizer::init()
+{
+	SuperClass::init();
+}
+
+
+int SimpleOptimizer::optimizeProb(SimpleOptProblem &optProb)
+{
+	// get settings
+	getSettings(optProb);
+
+	// run optimization
+	int returnReason = optimize(); // implementation in child!
+
+	//store result
+	setResult(optProb);	
+
+	// return the return reason	
+	return returnReason;
+}
+
+
+void SimpleOptimizer::setResult(SimpleOptProblem &optProb)
+{
+	matrix_type tmp = this->getLastParameters(); 
+	optProb.setActiveCurrentParameters(tmp);
+}
+
+void SimpleOptimizer::getSettings(SimpleOptProblem &optProb)
+{
+	// Costf
+	m_costFunction=optProb.getCostFunction();
+	m_numberOfParameters=m_costFunction->getNumOfParameters();
+	
+	// bounds
+	setLowerParameterBound(optProb.lowerBoundsActive(),optProb.getActiveLowerBounds());
+	setUpperParameterBound(optProb.upperBoundsActive(),optProb.getActiveUpperBounds());
+
+	// scales
+	setScales(optProb.getActiveScales());
+	
+	// initial params
+	setParameters(optProb.getActiveCurrentParams());
+
+	// max
+	setMaximize(optProb.getMaximize());
+}

+ 85 - 0
SimpleOptimizer.h

@@ -0,0 +1,85 @@
+///
+///	@file	SimpleSimpleOptimizer.h: interface of the SimpleOptimizer class.
+///	@author	Matthias Wacker, Esther Platzer
+///
+
+#ifndef _SIMPLEOPTIMIZER_H_
+#define _SIMPLEOPTIMIZER_H_
+
+#include "optimization/Optimizer.h"
+#include "optimization/SimpleOptProblem.h"
+#include "optimization/Opt_Namespace.h"
+
+namespace opt=optimization;
+
+/*!
+	Abstract base class of all optimizers.
+ */
+class SimpleOptimizer : public Optimizer
+{
+	public:
+		/// the SuperClass is Optimizer
+		typedef 	Optimizer 	SuperClass;
+
+		
+		///
+		///	Constructor (also the default constructor)
+		///	@param loger OptLogBase * to existing log class or NULL
+		///
+		SimpleOptimizer(OptLogBase *loger=NULL);
+
+		///
+		///	Copy constructor
+		///	@param opt .. optimizer to copy
+		///
+		SimpleOptimizer( const SimpleOptimizer &opt);
+
+		///
+		///	assignment opterator
+		///	@param opt optimizer to "copy"
+		///	@return self-reference
+		///
+		SimpleOptimizer  &operator=(const SimpleOptimizer &opt);
+
+
+		///
+		///		Destructor.
+	    ///
+	    virtual ~SimpleOptimizer();
+
+		///
+		///	start the optimization for the simple optimization problem
+		///	@param optProb the optimization problem to solve
+		///
+		virtual int optimizeProb(SimpleOptProblem &optProb);
+
+
+protected:
+		///
+		/// redeclaring the interface of opt
+		///
+		virtual int optimize() = 0;
+
+		///
+		///	do initializations (this is called from child class and calls superclass::init)
+		///
+		void init();
+
+		///
+		/// call after optimization to store the result in the optProblem 
+		///
+		///	@param optProb the optimization problem 
+		///
+		void setResult(SimpleOptProblem &optProb);
+
+		///
+		///	get all settings from the optimization problem
+		///
+		///	@param optProb the optimization problem
+		///
+		void getSettings(SimpleOptProblem &optProb);
+
+
+};
+
+#endif

+ 3 - 0
libdepend.inc

@@ -0,0 +1,3 @@
+$(call PKG_DESCRIPTION,Optimization toolbox)
+$(call PKG_VERSION,0.0.1)
+$(call PKG_DEPEND_EXT_ESSENTIAL,ICE)

+ 27 - 0
liboptimization.h

@@ -0,0 +1,27 @@
+/* library description */
+
+#ifndef __LIBOPTIMIZATION_H__
+#define __LIBOPTIMIZATION_H__
+
+#include "optimization/AdaptiveDirectionRandomSearchOptimizer.h"
+#include "optimization/AdditionalIceUtils.h"
+#include "optimization/CombinatorialOptimizer.h"
+#include "optimization/CostFunction.h" 
+#include "optimization/CostFunction_ndim_2ndOrder.h"
+#include "optimization/DimWrapperCostFunction.h"
+#include "optimization/DownhillSimplexOptimizer.h" 
+#include "optimization/BFGSOptimizer.h"
+#include "optimization/PowellBrentOptimizer.h"
+#include "optimization/GradientDescentOptimizer.h"
+#include "optimization/EmptyLog.h"
+#include "optimization/Optimizable.h"
+#include "optimization/SimpleOptimizer.h"
+#include "optimization/Optimizer.h"
+#include "optimization/SimpleOptProblem.h"
+#include "optimization/OptLogBase.h"
+#include "optimization/FileLog.h"
+#include "optimization/ParamLog.h"
+#include "optimization/Plotter.h"
+
+#endif /* __LIBOPTIMIZATION_H__ */
+