44 Gradient gradient(
const State &state)
const;
45 State advance(
const State ¤t,
const double alpha,
46 const Gradient &
g)
const;
53 typedef std::shared_ptr<NonlinearConjugateGradientOptimizer>
shared_ptr;
66 const Values& initialValues,
const Parameters&
params = Parameters());
86 template<
class S,
class V,
class W>
87 double lineSearch(
const S &system,
const V currentValues,
const W &gradient) {
90 const double g = gradient.norm();
94 const double phi = 0.5 * (1.0 +
std::sqrt(5.0)), resphi = 2.0 - phi, tau =
96 double minStep = -1.0 /
g, maxStep = 0, newStep = minStep
97 + (maxStep - minStep) / (phi + 1.0);
99 V newValues = system.advance(currentValues, newStep, gradient);
100 double newError = system.error(newValues);
103 const bool flag = (maxStep - newStep > newStep - minStep) ?
true :
false;
104 const double testStep =
105 flag ? newStep + resphi * (maxStep - newStep) :
106 newStep - resphi * (newStep - minStep);
108 if ((maxStep - minStep)
110 return 0.5 * (minStep + maxStep);
113 const V testValues = system.advance(currentValues, testStep, gradient);
114 const double testError = system.error(testValues);
117 if (testError >= newError) {
126 newError = testError;
130 newError = testError;
146 template<
class S,
class V>
149 const bool singleIteration,
const bool gradientDescent =
false) {
153 size_t iteration = 0;
156 double currentError = system.error(initial);
157 if (currentError <= params.
errorTol) {
159 std::cout <<
"Exiting, as error = " << currentError <<
" < " 166 typename S::Gradient currentGradient = system.gradient(currentValues),
167 prevGradient, direction = currentGradient;
170 V prevValues = currentValues;
171 double prevError = currentError;
173 currentValues = system.advance(prevValues, alpha, direction);
174 currentError = system.error(currentValues);
178 std::cout <<
"Initial error: " << currentError << std::endl;
182 if (gradientDescent ==
true) {
183 direction = system.gradient(currentValues);
185 prevGradient = currentGradient;
186 currentGradient = system.gradient(currentValues);
189 currentGradient.dot(currentGradient - prevGradient)
190 / prevGradient.dot(prevGradient));
191 direction = currentGradient + (beta * direction);
194 alpha =
lineSearch(system, currentValues, direction);
196 prevValues = currentValues;
197 prevError = currentError;
199 currentValues = system.advance(prevValues, alpha, direction);
200 currentError = system.error(currentValues);
208 std::cout <<
"iteration: " << iteration <<
", currentError: " << currentError << std::endl;
209 }
while (++iteration < params.
maxIterations && !singleIteration
217 <<
"nonlinearConjugateGradient: Terminating because reached maximum iterations" 220 return {currentValues, iteration};
const NonlinearFactorGraph & graph_
IterationHook iterationHook
NonlinearOptimizerParams Parameters
~NonlinearConjugateGradientOptimizer() override
Destructor.
Verbosity verbosity
The printing verbosity during optimization (default SILENT)
NonlinearOptimizerParams Parameters
NonlinearFactorGraph graph
double absoluteErrorTol
The maximum absolute error decrease to stop iterating (default 1e-5)
void g(const string &key, int i)
static const SmartProjectionParams params
double relativeErrorTol
The maximum relative error decrease to stop iterating (default 1e-5)
bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold, double errorThreshold, double currentError, double newError, NonlinearOptimizerParams::Verbosity verbosity)
double errorTol
The maximum total error to stop iterating (default 0.0)
std::tuple< V, int > nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent=false)
Base class and basic functions for Manifold types.
Point3 optimize(const NonlinearFactorGraph &graph, const Values &values, Key landmarkKey)
Array< double, 1, 3 > e(1./3., 0.5, 2.)
std::shared_ptr< This > shared_ptr
shared_ptr to this class
System(const NonlinearFactorGraph &graph)
double lineSearch(const S &system, const V currentValues, const W &gradient)
std::shared_ptr< NonlinearConjugateGradientOptimizer > shared_ptr
Jet< T, N > sqrt(const Jet< T, N > &f)
Base class and parameters for nonlinear optimization algorithms.
size_t maxIterations
The maximum iterations to stop iterating (default 100)
const NonlinearOptimizerParams & _params() const override