29 template <
typename Gradient>
31 const Gradient &prevGradient) {
34 currentGradient.dot(currentGradient) / prevGradient.dot(prevGradient);
39 template <
typename Gradient>
41 const Gradient &prevGradient) {
44 std::max(0.0, currentGradient.dot(currentGradient - prevGradient) /
45 prevGradient.dot(prevGradient));
51 template <
typename Gradient>
53 const Gradient &prevGradient,
54 const Gradient &direction) {
56 Gradient
d = currentGradient - prevGradient;
57 const double beta =
std::max(0.0, currentGradient.dot(
d) / -direction.dot(
d));
62 template <
typename Gradient>
63 double DaiYuan(
const Gradient ¤tGradient,
const Gradient &prevGradient,
64 const Gradient &direction) {
67 std::max(0.0, currentGradient.dot(currentGradient) /
68 -direction.dot(currentGradient - prevGradient));
97 const Gradient &
g)
const;
103 typedef std::shared_ptr<NonlinearConjugateGradientOptimizer>
shared_ptr;
115 const Parameters &
params = Parameters(),
135 template <
class S,
class V,
class W>
136 double lineSearch(
const S &system,
const V currentValues,
const W &gradient) {
138 const double g = gradient.norm();
142 const double phi = 0.5 * (1.0 +
std::sqrt(5.0)), resphi = 2.0 - phi,
144 double minStep = -1.0 /
g, maxStep = 0,
145 newStep = minStep + (maxStep - minStep) / (phi + 1.0);
147 V newValues = system.advance(currentValues, newStep, gradient);
148 double newError = system.error(newValues);
151 const bool flag = (maxStep - newStep > newStep - minStep);
152 const double testStep = flag ? newStep + resphi * (maxStep - newStep)
153 : newStep - resphi * (newStep - minStep);
156 return 0.5 * (minStep + maxStep);
159 const V testValues = system.advance(currentValues, testStep, gradient);
160 const double testError = system.error(testValues);
163 if (testError >= newError) {
172 newError = testError;
176 newError = testError;
195 template <
class S,
class V>
198 const bool singleIteration,
200 const bool gradientDescent =
false) {
203 size_t iteration = 0;
206 double currentError = system.error(
initial);
207 if (currentError <=
params.errorTol) {
209 std::cout <<
"Exiting, as error = " << currentError <<
" < "
210 <<
params.errorTol << std::endl;
216 typename S::Gradient currentGradient = system.gradient(currentValues),
217 prevGradient, direction = currentGradient;
220 V prevValues = currentValues;
221 double prevError = currentError;
223 currentValues = system.advance(prevValues,
alpha, direction);
224 currentError = system.error(currentValues);
228 std::cout <<
"Initial error: " << currentError << std::endl;
232 if (gradientDescent ==
true) {
233 direction = system.gradient(currentValues);
235 prevGradient = currentGradient;
236 currentGradient = system.gradient(currentValues);
239 switch (directionMethod) {
250 beta =
DaiYuan(currentGradient, prevGradient, direction);
253 throw std::runtime_error(
254 "NonlinearConjugateGradientOptimizer: Invalid directionMethod");
257 direction = currentGradient + (
beta * direction);
262 prevValues = currentValues;
263 prevError = currentError;
265 currentValues = system.advance(prevValues,
alpha, direction);
266 currentError = system.error(currentValues);
270 params.iterationHook(iteration, prevError, currentError);
274 std::cout <<
"iteration: " << iteration
275 <<
", currentError: " << currentError << std::endl;
276 }
while (++iteration <
params.maxIterations && !singleIteration &&
278 params.errorTol, prevError, currentError,
283 iteration >=
params.maxIterations)
284 std::cout <<
"nonlinearConjugateGradient: Terminating because reached "
288 return {currentValues, iteration};