optimization_algorithm_dogleg.cpp
Go to the documentation of this file.
1 // g2o - General Graph Optimization
2 // Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3 // All rights reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright notice,
10 // this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
18 // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
28 
29 #include <iostream>
30 
31 #include "../stuff/timeutil.h"
32 
33 #include "block_solver.h"
34 #include "sparse_optimizer.h"
35 #include "solver.h"
36 #include "batch_stats.h"
37 using namespace std;
38 
39 namespace g2o {
40 
41  OptimizationAlgorithmDogleg::OptimizationAlgorithmDogleg(BlockSolverBase* solver) :
43  {
45  _maxTrialsAfterFailure = _properties.makeProperty<Property<int> >("maxTrialsAfterFailure", 100);
46  _initialLambda = _properties.makeProperty<Property<double> >("initialLambda", 1e-7);
47  _lamdbaFactor = _properties.makeProperty<Property<double> >("lambdaFactor", 10.);
50  _wasPDInAllIterations = true;
51  }
52 
54  {
55  }
56 
58  {
59  assert(_optimizer && "_optimizer not set");
60  assert(_solver->optimizer() == _optimizer && "underlying linear solver operates on different graph");
61  assert(dynamic_cast<BlockSolverBase*>(_solver) && "underlying linear solver is not a block solver");
62 
63  BlockSolverBase* blockSolver = static_cast<BlockSolverBase*>(_solver);
64 
65  if (iteration == 0 && !online) { // built up the CCS structure, here due to easy time measure
66  bool ok = _solver->buildStructure();
67  if (! ok) {
68  cerr << __PRETTY_FUNCTION__ << ": Failure while building CCS structure" << endl;
70  }
71 
72  // init some members to the current size of the problem
73  _hsd.resize(_solver->vectorSize());
74  _hdl.resize(_solver->vectorSize());
75  _auxVector.resize(_solver->vectorSize());
78  _wasPDInAllIterations = true;
79  }
80 
81  double t=get_monotonic_time();
84  if (globalStats) {
85  globalStats->timeResiduals = get_monotonic_time()-t;
87  }
88 
89  double currentChi = _optimizer->activeRobustChi2();
90 
92  if (globalStats) {
93  globalStats->timeQuadraticForm = get_monotonic_time()-t;
94  }
95 
96  Eigen::VectorXd::ConstMapType b(_solver->b(), _solver->vectorSize());
97 
98  // compute alpha
99  _auxVector.setZero();
100  blockSolver->multiplyHessian(_auxVector.data(), _solver->b());
101  double bNormSquared = b.squaredNorm();
102  double alpha = bNormSquared / _auxVector.dot(b);
103 
104  _hsd = alpha * b;
105  double hsdNorm = _hsd.norm();
106  double hgnNorm = -1.;
107 
108  bool solvedGaussNewton = false;
109  bool goodStep = false;
110  int& numTries = _lastNumTries;
111  numTries = 0;
112  do {
113  ++numTries;
114 
115  if (! solvedGaussNewton) {
116  const double minLambda = 1e-12;
117  const double maxLambda = 1e3;
118  solvedGaussNewton = true;
119  // apply a damping factor to enforce positive definite Hessian, if the matrix appeared
120  // to be not positive definite in at least one iteration before.
121  // We apply a damping factor to obtain a PD matrix.
122  bool solverOk = false;
123  while(!solverOk) {
124  if (! _wasPDInAllIterations)
125  _solver->setLambda(_currentLambda, true); // add _currentLambda to the diagonal
126  solverOk = _solver->solve();
127  if (! _wasPDInAllIterations)
130  if (! _wasPDInAllIterations) {
131  // simple strategy to control the damping factor
132  if (solverOk) {
133  _currentLambda = std::max(minLambda, _currentLambda / (0.5 * _lamdbaFactor->value()));
134  } else {
136  if (_currentLambda > maxLambda) {
137  _currentLambda = maxLambda;
138  return Fail;
139  }
140  }
141  }
142  }
143  if (!solverOk) {
144  return Fail;
145  }
146  hgnNorm = Eigen::VectorXd::ConstMapType(_solver->x(), _solver->vectorSize()).norm();
147  }
148 
149  Eigen::VectorXd::ConstMapType hgn(_solver->x(), _solver->vectorSize());
150  assert(hgnNorm >= 0. && "Norm of the GN step is not computed");
151 
152  if (hgnNorm < _delta) {
153  _hdl = hgn;
154  _lastStep = STEP_GN;
155  }
156  else if (hsdNorm > _delta) {
157  _hdl = _delta / hsdNorm * _hsd;
158  _lastStep = STEP_SD;
159  } else {
160  _auxVector = hgn - _hsd; // b - a
161  double c = _hsd.dot(_auxVector);
162  double bmaSquaredNorm = _auxVector.squaredNorm();
163  double beta;
164  if (c <= 0.)
165  beta = (-c + sqrt(c*c + bmaSquaredNorm * (_delta*_delta - _hsd.squaredNorm()))) / bmaSquaredNorm;
166  else {
167  double hsdSqrNorm = _hsd.squaredNorm();
168  beta = (_delta*_delta - hsdSqrNorm) / (c + sqrt(c*c + bmaSquaredNorm * (_delta*_delta - hsdSqrNorm)));
169  }
170  assert(beta > 0. && beta < 1 && "Error while computing beta");
171  _hdl = _hsd + beta * (hgn - _hsd);
172  _lastStep = STEP_DL;
173  assert(_hdl.norm() < _delta + 1e-5 && "Computed step does not correspond to the trust region");
174  }
175 
176  // compute the linear gain
177  _auxVector.setZero();
178  blockSolver->multiplyHessian(_auxVector.data(), _hdl.data());
179  double linearGain = -1 * (_auxVector.dot(_hdl)) + 2 * (b.dot(_hdl));
180 
181  // apply the update and see what happens
182  _optimizer->push();
183  _optimizer->update(_hdl.data());
185  double newChi = _optimizer-> activeRobustChi2();
186  double nonLinearGain = currentChi - newChi;
187  if (fabs(linearGain) < 1e-12)
188  linearGain = 1e-12;
189  double rho = nonLinearGain / linearGain;
190  //cerr << PVAR(nonLinearGain) << " " << PVAR(linearGain) << " " << PVAR(rho) << endl;
191  if (rho > 0) { // step is good and will be accepted
193  goodStep = true;
194  } else { // recover previous state
195  _optimizer->pop();
196  }
197 
198  // update trust region based on the step quality
199  if (rho > 0.75)
200  _delta = std::max(_delta, 3. * _hdl.norm());
201  else if (rho < 0.25)
202  _delta *= 0.5;
203  } while (!goodStep && numTries < _maxTrialsAfterFailure->value());
204  if (numTries == _maxTrialsAfterFailure->value() || !goodStep)
205  return Terminate;
206  return OK;
207  }
208 
209  void OptimizationAlgorithmDogleg::printVerbose(std::ostream& os) const
210  {
211  os
212  << "\t Delta= " << _delta
213  << "\t step= " << stepType2Str(_lastStep)
214  << "\t tries= " << _lastNumTries;
215  if (! _wasPDInAllIterations)
216  os << "\t lambda= " << _currentLambda;
217  }
218 
220  {
221  switch (stepType) {
222  case STEP_SD: return "Descent";
223  case STEP_GN: return "GN";
224  case STEP_DL: return "Dogleg";
225  default: return "Undefined";
226  }
227  }
228 
229 } // end namespace
virtual void restoreDiagonal()=0
statistics about the optimization
Definition: batch_stats.h:39
#define __PRETTY_FUNCTION__
Definition: macros.h:95
static const char * stepType2Str(int stepType)
convert the type into an integer
Eigen::VectorXd _hdl
final dogleg step
double timeResiduals
residuals
Definition: batch_stats.h:48
bool _wasPDInAllIterations
the matrix we solve was positive definite in all iterations -> if not apply damping ...
void pop(SparseOptimizer::VertexContainer &vlist)
pop (restore) the estimate a subset of the variables from the stack
virtual bool setLambda(double lambda, bool backup=false)=0
Eigen::VectorXd _hsd
steepest decent step
void discardTop(SparseOptimizer::VertexContainer &vlist)
ignore the latest stored element on the stack, remove it from the stack but do not restore the estima...
Base for solvers operating on the approximated Hessian, e.g., Gauss-Newton, Levenberg.
PropertyMap _properties
the properties of your solver, use this to store the parameters of your solver
double _currentLambda
the damping factor to force positive definite matrix
P * makeProperty(const std::string &name_, const typename P::ValueType &v)
Definition: property.h:118
SparseOptimizer * _optimizer
the optimizer the solver is working on
static G2OBatchStatistics * globalStats()
Definition: batch_stats.h:72
virtual void multiplyHessian(double *dest, const double *src) const =0
double get_monotonic_time()
Definition: timeutil.cpp:113
virtual SolverResult solve(int iteration, bool online=false)
SparseOptimizer * optimizer() const
the optimizer (graph) on which the solver works
Definition: solver.h:105
virtual bool solve()=0
virtual void printVerbose(std::ostream &os) const
double * x()
return x, the solution vector
Definition: solver.h:95
double * b()
return b, the right hand side of the system
Definition: solver.h:98
virtual bool buildSystem()=0
size_t vectorSize() const
return the size of the solution vector (x) and b
Definition: solver.h:102
double activeRobustChi2() const
Eigen::VectorXd _auxVector
auxilary vector used to perform multiplications or other stuff
const T & value() const
Definition: property.h:56
int _lastStep
type of the step taken by the algorithm
double timeQuadraticForm
construct the quadratic form in the graph
Definition: batch_stats.h:50
void push(SparseOptimizer::VertexContainer &vlist)
push the estimate of a subset of the variables onto a stack
base for the block solvers with some basic function interfaces
Definition: block_solver.h:83
virtual bool buildStructure(bool zeroBlocks=false)=0


orb_slam2_ros
Author(s):
autogenerated on Wed Apr 21 2021 02:53:05