DoglegOptimizerImpl.h
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
17 #pragma once
18 
19 #include <iomanip>
20 
23 
24 namespace gtsam {
25 
32 struct GTSAM_EXPORT DoglegOptimizerImpl {
33 
34  struct GTSAM_EXPORT IterationResult {
35  double delta;
37  double f_error;
38  };
39 
56  ONE_STEP_PER_ITERATION
57  };
58 
94  template<class M, class F, class VALUES>
95  static IterationResult Iterate(
96  double delta, TrustRegionAdaptationMode mode, const VectorValues& dx_u, const VectorValues& dx_n,
97  const M& Rd, const F& f, const VALUES& x0, const double f_error, const bool verbose=false);
98 
121  static VectorValues ComputeDoglegPoint(double delta, const VectorValues& dx_u, const VectorValues& dx_n, const bool verbose=false);
122 
132  static VectorValues ComputeBlend(double delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false);
133 };
134 
135 
136 /* ************************************************************************* */
137 template<class M, class F, class VALUES>
139  double delta, TrustRegionAdaptationMode mode, const VectorValues& dx_u, const VectorValues& dx_n,
140  const M& Rd, const F& f, const VALUES& x0, const double f_error, const bool verbose)
141 {
142  gttic(M_error);
143  const double M_error = Rd.error(VectorValues::Zero(dx_u));
144  gttoc(M_error);
145 
146  // Result to return
148 
149  bool stay = true;
150  enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction = NONE; // Used to prevent alternating between increasing and decreasing in one iteration
151  while(stay) {
152  gttic(Dog_leg_point);
153  // Compute dog leg point
154  result.dx_d = ComputeDoglegPoint(delta, dx_u, dx_n, verbose);
155  gttoc(Dog_leg_point);
156 
157  if(verbose) std::cout << "delta = " << delta << ", dx_d_norm = " << result.dx_d.norm() << std::endl;
158 
159  gttic(retract);
160  // Compute expmapped solution
161  const VALUES x_d(x0.retract(result.dx_d));
162  gttoc(retract);
163 
164  gttic(decrease_in_f);
165  // Compute decrease in f
166  result.f_error = f.error(x_d);
167  gttoc(decrease_in_f);
168 
169  gttic(new_M_error);
170  // Compute decrease in M
171  const double new_M_error = Rd.error(result.dx_d);
172  gttoc(new_M_error);
173 
174  if(verbose) std::cout << std::setprecision(15) << "f error: " << f_error << " -> " << result.f_error << std::endl;
175  if(verbose) std::cout << std::setprecision(15) << "M error: " << M_error << " -> " << new_M_error << std::endl;
176 
177  gttic(adjust_delta);
178  // Compute gain ratio. Here we take advantage of the invariant that the
179  // Bayes' net error at zero is equal to the nonlinear error
180  const double rho = std::abs(f_error - result.f_error) < 1e-15 || std::abs(M_error - new_M_error) < 1e-15 ?
181  0.5 :
182  (f_error - result.f_error) / (M_error - new_M_error);
183 
184  if(verbose) std::cout << std::setprecision(15) << "rho = " << rho << std::endl;
185 
186  if(rho >= 0.75) {
187  // M agrees very well with f, so try to increase lambda
188  const double dx_d_norm = result.dx_d.norm();
189  const double newDelta = std::max(delta, 3.0 * dx_d_norm); // Compute new delta
190 
192  stay = false; // If not searching, just return with the new delta
193  else if(mode == SEARCH_EACH_ITERATION) {
194  if(std::abs(newDelta - delta) < 1e-15 || lastAction == DECREASED_DELTA)
195  stay = false; // Searching, but Newton's solution is within trust region so keep the same trust region
196  else {
197  stay = true; // Searching and increased delta, so try again to increase delta
198  lastAction = INCREASED_DELTA;
199  }
200  } else {
201  assert(false); }
202 
203  delta = newDelta; // Update delta from new delta
204 
205  } else if(0.75 > rho && rho >= 0.25) {
206  // M agrees so-so with f, keep the same delta
207  stay = false;
208 
209  } else if(0.25 > rho && rho >= 0.0) {
210  // M does not agree well with f, decrease delta until it does
211  double newDelta;
212  bool hitMinimumDelta;
213  if(delta > 1e-5) {
214  newDelta = 0.5 * delta;
215  hitMinimumDelta = false;
216  } else {
217  newDelta = delta;
218  hitMinimumDelta = true;
219  }
220  if(mode == ONE_STEP_PER_ITERATION || /* mode == SEARCH_EACH_ITERATION && */ lastAction == INCREASED_DELTA || hitMinimumDelta)
221  stay = false; // If not searching, just return with the new smaller delta
223  stay = true;
224  lastAction = DECREASED_DELTA;
225  } else {
226  assert(false); }
227 
228  delta = newDelta; // Update delta from new delta
229 
230  } else {
231  // f actually increased, so keep decreasing delta until f does not decrease.
232  // NOTE: NaN and Inf solutions also will fall into this case, so that we
233  // decrease delta if the solution becomes undetermined.
234  assert(0.0 > rho);
235  if(delta > 1e-5) {
236  delta *= 0.5;
237  stay = true;
238  lastAction = DECREASED_DELTA;
239  } else {
240  if(verbose) std::cout << "Warning: Dog leg stopping because cannot decrease error with minimum delta" << std::endl;
241  result.dx_d.setZero(); // Set delta to zero - don't allow error to increase
242  result.f_error = f_error;
243  stay = false;
244  }
245  }
246  gttoc(adjust_delta);
247  }
248 
249  // dx_d and f_error have already been filled in during the loop
250  result.delta = delta;
251  return result;
252 }
253 
254 }
gttoc
#define gttoc(label)
Definition: timing.h:296
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
gtsam::DoglegOptimizerImpl::Iterate
static IterationResult Iterate(double delta, TrustRegionAdaptationMode mode, const VectorValues &dx_u, const VectorValues &dx_n, const M &Rd, const F &f, const VALUES &x0, const double f_error, const bool verbose=false)
Definition: DoglegOptimizerImpl.h:138
gtsam::DoglegOptimizerImpl::ONE_STEP_PER_ITERATION
@ ONE_STEP_PER_ITERATION
Definition: DoglegOptimizerImpl.h:56
gtsam::DoglegOptimizerImpl
Definition: DoglegOptimizerImpl.h:32
gtsam::utils.numerical_derivative.retract
def retract(a, np.ndarray xi)
Definition: numerical_derivative.py:44
Ordering.h
Variable ordering for the elimination algorithm.
result
Values result
Definition: OdometryOptimize.cpp:8
gtsam::DoglegOptimizerImpl::IterationResult::delta
double delta
Definition: DoglegOptimizerImpl.h:35
gtsam::DoglegOptimizerImpl::IterationResult::dx_d
VectorValues dx_d
Definition: DoglegOptimizerImpl.h:36
gtsam::DoglegOptimizerImpl::ComputeDoglegPoint
static VectorValues ComputeDoglegPoint(double delta, const VectorValues &dx_u, const VectorValues &dx_n, const bool verbose=false)
Definition: DoglegOptimizerImpl.cpp:25
gtsam.examples.PlanarManipulatorExample.delta
def delta(g0, g1)
Definition: PlanarManipulatorExample.py:45
gtsam::VectorValues
Definition: VectorValues.h:74
mode
static const DiscreteKey mode(modeKey, 2)
gtsam::DoglegOptimizerImpl::TrustRegionAdaptationMode
TrustRegionAdaptationMode
Definition: DoglegOptimizerImpl.h:53
x0
static Symbol x0('x', 0)
gtsam::VectorValues::Zero
static VectorValues Zero(const VectorValues &other)
Definition: VectorValues.cpp:77
VectorValues.h
Factor Graph Values.
gtsam::symbol_shorthand::F
Key F(std::uint64_t j)
Definition: inference/Symbol.h:153
tree::f
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
Definition: testExpression.cpp:218
gtsam
traits
Definition: SFMdata.h:40
gtsam::DoglegOptimizerImpl::SEARCH_REDUCE_ONLY
@ SEARCH_REDUCE_ONLY
Definition: DoglegOptimizerImpl.h:55
gtsam::DoglegOptimizerImpl::IterationResult
Definition: DoglegOptimizerImpl.h:34
exampleQR::Rd
Matrix Rd
Definition: testNoiseModel.cpp:215
gtsam::DoglegOptimizerImpl::SEARCH_EACH_ITERATION
@ SEARCH_EACH_ITERATION
Definition: DoglegOptimizerImpl.h:54
abs
#define abs(x)
Definition: datatypes.h:17
max
#define max(a, b)
Definition: datatypes.h:20
gttic
#define gttic(label)
Definition: timing.h:295
gtsam::DoglegOptimizerImpl::IterationResult::f_error
double f_error
Definition: DoglegOptimizerImpl.h:37
M
Matrix< RealScalar, Dynamic, Dynamic > M
Definition: bench_gemm.cpp:51


gtsam
Author(s):
autogenerated on Sat Nov 16 2024 04:02:13