testNonlinearOptimizer.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
18 #include <tests/smallExample.h>
21 #include <gtsam/nonlinear/Values.h>
28 #include <gtsam/inference/Symbol.h>
29 #include <gtsam/geometry/Pose2.h>
30 #include <gtsam/base/Matrix.h>
31 
33 
34 
35 #include <iostream>
36 #include <fstream>
37 
38 using namespace std;
39 using namespace gtsam;
40 
41 const double tol = 1e-5;
42 
45 
46 /* ************************************************************************* */
47 TEST( NonlinearOptimizer, paramsEquals )
48 {
49  // default constructors lead to two identical params
50  GaussNewtonParams gnParams1;
51  GaussNewtonParams gnParams2;
52  CHECK(gnParams1.equals(gnParams2));
53 
54  // but the params become different if we change something in gnParams2
55  gnParams2.setVerbosity("DELTA");
56  CHECK(!gnParams1.equals(gnParams2));
57 }
58 
59 /* ************************************************************************* */
60 TEST( NonlinearOptimizer, iterateLM )
61 {
62  // really non-linear factor graph
64 
65  // config far from minimum
66  Point2 x0(3,0);
67  Values config;
68  config.insert(X(1), x0);
69 
70  // normal iterate
71  GaussNewtonParams gnParams;
72  GaussNewtonOptimizer gnOptimizer(fg, config, gnParams);
73  gnOptimizer.iterate();
74 
75  // LM iterate with lambda 0 should be the same
77  lmParams.lambdaInitial = 0.0;
78  LevenbergMarquardtOptimizer lmOptimizer(fg, config, lmParams);
79  lmOptimizer.iterate();
80 
81  CHECK(assert_equal(gnOptimizer.values(), lmOptimizer.values(), 1e-9));
82 }
83 
84 /* ************************************************************************* */
86 {
88 
89  // test error at minimum
90  Point2 xstar(0,0);
91  Values cstar;
92  cstar.insert(X(1), xstar);
93  DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
94 
95  // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
96  Point2 x0(3,3);
97  Values c0;
98  c0.insert(X(1), x0);
99  DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
100 
101  // optimize parameters
102  Ordering ord;
103  ord.push_back(X(1));
104 
105  // Gauss-Newton
106  GaussNewtonParams gnParams;
107  gnParams.ordering = ord;
108  Values actual1 = GaussNewtonOptimizer(fg, c0, gnParams).optimize();
109  DOUBLES_EQUAL(0,fg.error(actual1),tol);
110 
111  // Levenberg-Marquardt
113  lmParams.ordering = ord;
115  DOUBLES_EQUAL(0,fg.error(actual2),tol);
116 
117  // Dogleg
118  DoglegParams dlParams;
119  dlParams.ordering = ord;
120  Values actual3 = DoglegOptimizer(fg, c0, dlParams).optimize();
121  DOUBLES_EQUAL(0,fg.error(actual3),tol);
122 }
123 
124 /* ************************************************************************* */
125 TEST( NonlinearOptimizer, SimpleLMOptimizer )
126 {
128 
129  Point2 x0(3,3);
130  Values c0;
131  c0.insert(X(1), x0);
132 
133  Values actual = LevenbergMarquardtOptimizer(fg, c0).optimize();
134  DOUBLES_EQUAL(0,fg.error(actual),tol);
135 }
136 
137 /* ************************************************************************* */
138 TEST( NonlinearOptimizer, SimpleGNOptimizer )
139 {
141 
142  Point2 x0(3,3);
143  Values c0;
144  c0.insert(X(1), x0);
145 
146  Values actual = GaussNewtonOptimizer(fg, c0).optimize();
147  DOUBLES_EQUAL(0,fg.error(actual),tol);
148 }
149 
150 /* ************************************************************************* */
151 TEST( NonlinearOptimizer, SimpleDLOptimizer )
152 {
154 
155  Point2 x0(3,3);
156  Values c0;
157  c0.insert(X(1), x0);
158 
159  Values actual = DoglegOptimizer(fg, c0).optimize();
160  DOUBLES_EQUAL(0,fg.error(actual),tol);
161 }
162 
163 /* ************************************************************************* */
164 TEST( NonlinearOptimizer, optimization_method )
165 {
166  LevenbergMarquardtParams paramsQR;
167  paramsQR.linearSolverType = LevenbergMarquardtParams::MULTIFRONTAL_QR;
168  LevenbergMarquardtParams paramsChol;
169  paramsChol.linearSolverType = LevenbergMarquardtParams::MULTIFRONTAL_CHOLESKY;
170 
172 
173  Point2 x0(3,3);
174  Values c0;
175  c0.insert(X(1), x0);
176 
177  Values actualMFQR = LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize();
178  DOUBLES_EQUAL(0,fg.error(actualMFQR),tol);
179 
180  Values actualMFChol = LevenbergMarquardtOptimizer(fg, c0, paramsChol).optimize();
181  DOUBLES_EQUAL(0,fg.error(actualMFChol),tol);
182 }
183 
184 /* ************************************************************************* */
185 TEST( NonlinearOptimizer, Factorization )
186 {
187  Values config;
188  config.insert(X(1), Pose2(0.,0.,0.));
189  config.insert(X(2), Pose2(1.5,0.,0.));
190 
192  graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10));
193  graph.emplace_shared<BetweenFactor<Pose2>>(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1));
194 
196  ordering.push_back(X(1));
197  ordering.push_back(X(2));
198 
200  LevenbergMarquardtParams::SetLegacyDefaults(&params);
201  LevenbergMarquardtOptimizer optimizer(graph, config, ordering, params);
202  optimizer.iterate();
203 
205  expected.insert(X(1), Pose2(0.,0.,0.));
206  expected.insert(X(2), Pose2(1.,0.,0.));
207  CHECK(assert_equal(expected, optimizer.values(), 1e-5));
208 }
209 
210 /* ************************************************************************* */
211 TEST(NonlinearOptimizer, NullFactor) {
212 
214 
215  // Add null factor
216  fg.push_back(NonlinearFactorGraph::sharedFactor());
217 
218  // test error at minimum
219  Point2 xstar(0,0);
220  Values cstar;
221  cstar.insert(X(1), xstar);
222  DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
223 
224  // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
225  Point2 x0(3,3);
226  Values c0;
227  c0.insert(X(1), x0);
228  DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
229 
230  // optimize parameters
231  Ordering ord;
232  ord.push_back(X(1));
233 
234  // Gauss-Newton
235  Values actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize();
236  DOUBLES_EQUAL(0,fg.error(actual1),tol);
237 
238  // Levenberg-Marquardt
239  Values actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize();
240  DOUBLES_EQUAL(0,fg.error(actual2),tol);
241 
242  // Dogleg
243  Values actual3 = DoglegOptimizer(fg, c0, ord).optimize();
244  DOUBLES_EQUAL(0,fg.error(actual3),tol);
245 }
246 
247 /* ************************************************************************* */
248 TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) {
249 
251  fg.addPrior(0, Pose2(0, 0, 0), noiseModel::Isotropic::Sigma(3, 1));
252  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(1, 0, M_PI / 2),
253  noiseModel::Isotropic::Sigma(3, 1));
254  fg.emplace_shared<BetweenFactor<Pose2>>(1, 2, Pose2(1, 0, M_PI / 2),
255  noiseModel::Isotropic::Sigma(3, 1));
256 
257  Values init;
258  init.insert(0, Pose2(3, 4, -M_PI));
259  init.insert(1, Pose2(10, 2, -M_PI));
260  init.insert(2, Pose2(11, 7, -M_PI));
261 
263  expected.insert(0, Pose2(0, 0, 0));
264  expected.insert(1, Pose2(1, 0, M_PI / 2));
265  expected.insert(2, Pose2(1, 1, M_PI));
266 
267  VectorValues expectedGradient;
268  expectedGradient.insert(0,Z_3x1);
269  expectedGradient.insert(1,Z_3x1);
270  expectedGradient.insert(2,Z_3x1);
271 
272  // Try LM and Dogleg
273  LevenbergMarquardtParams params = LevenbergMarquardtParams::LegacyDefaults();
274  {
275  LevenbergMarquardtOptimizer optimizer(fg, init, params);
276 
277  // test convergence
278  Values actual = optimizer.optimize();
279  EXPECT(assert_equal(expected, actual));
280 
281  // Check that the gradient is zero
282  GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
283  EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
284  }
286 
287 // cout << "===================================================================================" << endl;
288 
289  // Try LM with diagonal damping
290  Values initBetter;
291  initBetter.insert(0, Pose2(3,4,0));
292  initBetter.insert(1, Pose2(10,2,M_PI/3));
293  initBetter.insert(2, Pose2(11,7,M_PI/2));
294 
295  {
296  params.diagonalDamping = true;
297  LevenbergMarquardtOptimizer optimizer(fg, initBetter, params);
298 
299  // test the diagonal
300  GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
301  VectorValues d = linear->hessianDiagonal();
302  VectorValues sqrtHessianDiagonal = d;
303  for (auto& [key, value] : sqrtHessianDiagonal) {
304  value = value.cwiseSqrt();
305  }
306  GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear, sqrtHessianDiagonal);
307  VectorValues expectedDiagonal = d + params.lambdaInitial * d;
308  EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal()));
309 
310  // test convergence (does not!)
311  Values actual = optimizer.optimize();
312  EXPECT(assert_equal(expected, actual));
313 
314  // Check that the gradient is zero (it is not!)
315  linear = optimizer.linearize();
316  EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
317 
318  // Check that the gradient is zero for damped system (it is not!)
319  damped = optimizer.buildDampedSystem(*linear, sqrtHessianDiagonal);
320  VectorValues actualGradient = damped.gradientAtZero();
321  EXPECT(assert_equal(expectedGradient,actualGradient));
322 
323  /* This block was made to test the original initial guess "init"
324  // Check errors at convergence and errors in direction of gradient (decreases!)
325  EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5);
326  EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5);
327 
328  // Check that solve yields gradient (it's not equal to gradient, as predicted)
329  VectorValues delta = damped.optimize();
330  double factor = actualGradient[0][0]/delta[0][0];
331  EXPECT(assert_equal(actualGradient,factor*delta));
332 
333  // Still pointing downhill wrt actual gradient !
334  EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3);
335 
336  // delta.print("This is the delta value computed by LM with diagonal damping");
337 
338  // Still pointing downhill wrt expected gradient (IT DOES NOT! actually they are perpendicular)
339  EXPECT_DOUBLES_EQUAL( 0.0,dot(-1*expectedGradient,delta),1e-5);
340 
341  // Check errors at convergence and errors in direction of solution (does not decrease!)
342  EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5);
343 
344  // Check errors at convergence and errors at a small step in direction of solution (does not decrease!)
345  EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3);
346  */
347  }
348 }
349 
350 /* ************************************************************************* */
351 TEST(NonlinearOptimizer, Pose2OptimizationWithHuberNoOutlier) {
352 
354  fg.addPrior(0, Pose2(0,0,0), noiseModel::Isotropic::Sigma(3,1));
355  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(1,1.1,M_PI/4),
356  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(2.0),
357  noiseModel::Isotropic::Sigma(3,1)));
358  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(1,0.9,M_PI/2),
359  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(3.0),
360  noiseModel::Isotropic::Sigma(3,1)));
361 
362  Values init;
363  init.insert(0, Pose2(0,0,0));
364  init.insert(1, Pose2(0.961187, 0.99965, 1.1781));
365 
367  expected.insert(0, Pose2(0,0,0));
368  expected.insert(1, Pose2(0.961187, 0.99965, 1.1781));
369 
370  LevenbergMarquardtParams lm_params;
371 
372  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
373  auto lm_result = LevenbergMarquardtOptimizer(fg, init, lm_params).optimize();
374  auto dl_result = DoglegOptimizer(fg, init).optimize();
375 
376  EXPECT(assert_equal(expected, gn_result, 3e-2));
377  EXPECT(assert_equal(expected, lm_result, 3e-2));
378  EXPECT(assert_equal(expected, dl_result, 3e-2));
379 }
380 
381 /* ************************************************************************* */
382 TEST(NonlinearOptimizer, Point2LinearOptimizationWithHuber) {
383 
385  fg.addPrior(0, Point2(0,0), noiseModel::Isotropic::Sigma(2,0.01));
387  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0),
388  noiseModel::Isotropic::Sigma(2,1)));
390  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0),
391  noiseModel::Isotropic::Sigma(2,1)));
393  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0),
394  noiseModel::Isotropic::Sigma(2,1)));
395 
396  Values init;
397  init.insert(0, Point2(1,1));
398  init.insert(1, Point2(1,0));
399 
401  expected.insert(0, Point2(0,0));
402  expected.insert(1, Point2(1,1.85));
403 
405 
406  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
407  auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize();
408  auto dl_result = DoglegOptimizer(fg, init).optimize();
409 
410  EXPECT(assert_equal(expected, gn_result, 1e-4));
411  EXPECT(assert_equal(expected, lm_result, 1e-4));
412  EXPECT(assert_equal(expected, dl_result, 1e-4));
413 }
414 
415 /* ************************************************************************* */
416 TEST(NonlinearOptimizer, Pose2OptimizationWithHuber) {
417 
419  fg.addPrior(0, Pose2(0,0, 0), noiseModel::Isotropic::Sigma(3,0.1));
420  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(0,9, M_PI/2),
421  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
422  noiseModel::Isotropic::Sigma(3,1)));
423  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(0, 11, M_PI/2),
424  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
425  noiseModel::Isotropic::Sigma(3,1)));
426  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(0, 10, M_PI/2),
427  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
428  noiseModel::Isotropic::Sigma(3,1)));
429  fg.emplace_shared<BetweenFactor<Pose2>>(0, 1, Pose2(0,9, 0),
430  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
431  noiseModel::Isotropic::Sigma(3,1)));
432 
433  Values init;
434  init.insert(0, Pose2(0, 0, 0));
435  init.insert(1, Pose2(0, 10, M_PI/4));
436 
438  expected.insert(0, Pose2(0, 0, 0));
439  expected.insert(1, Pose2(0, 10, 1.45212));
440 
442 
443  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
444  auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize();
445  auto dl_result = DoglegOptimizer(fg, init).optimize();
446 
447  EXPECT(assert_equal(expected, gn_result, 1e-1));
448  EXPECT(assert_equal(expected, lm_result, 1e-1));
449  EXPECT(assert_equal(expected, dl_result, 1e-1));
450 }
451 
452 /* ************************************************************************* */
453 TEST(NonlinearOptimizer, RobustMeanCalculation) {
454 
456 
457  Values init;
458 
460 
461  auto huber = noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(20),
462  noiseModel::Isotropic::Sigma(1, 1));
463 
464  vector<double> pts{-10,-3,-1,1,3,10,1000};
465  for(auto pt : pts) {
466  fg.addPrior(0, pt, huber);
467  }
468 
469  init.insert(0, 100.0);
470  expected.insert(0, 3.33333333);
471 
472  DoglegParams params_dl;
473  params_dl.setRelativeErrorTol(1e-10);
474 
475  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
476  auto lm_result = LevenbergMarquardtOptimizer(fg, init).optimize();
477  auto dl_result = DoglegOptimizer(fg, init, params_dl).optimize();
478 
479  EXPECT(assert_equal(expected, gn_result, tol));
480  EXPECT(assert_equal(expected, lm_result, tol));
481  EXPECT(assert_equal(expected, dl_result, tol));
482 }
483 
484 /* ************************************************************************* */
485 TEST(NonlinearOptimizer, disconnected_graph) {
487  expected.insert(X(1), Pose2(0.,0.,0.));
488  expected.insert(X(2), Pose2(1.5,0.,0.));
489  expected.insert(X(3), Pose2(3.0,0.,0.));
490 
491  Values init;
492  init.insert(X(1), Pose2(0.,0.,0.));
493  init.insert(X(2), Pose2(0.,0.,0.));
494  init.insert(X(3), Pose2(0.,0.,0.));
495 
497  graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3,1));
498  graph.emplace_shared<BetweenFactor<Pose2>>(X(1),X(2), Pose2(1.5,0.,0.), noiseModel::Isotropic::Sigma(3,1));
499  graph.addPrior(X(3), Pose2(3.,0.,0.), noiseModel::Isotropic::Sigma(3,1));
500 
502 }
503 
504 /* ************************************************************************* */
505 #include <gtsam/linear/iterative.h>
506 
511 
512  public:
514  IterativeLM(const NonlinearFactorGraph& graph, const Values& initialValues,
517  LevenbergMarquardtParams::LegacyDefaults())
518  : LevenbergMarquardtOptimizer(graph, initialValues, params),
519  cgParams_(p),
520  initial_(initialValues) {}
521 
524  const NonlinearOptimizerParams& params) const override {
525  VectorValues zeros = initial_.zeroVectors();
526  return conjugateGradientDescent(gfg, zeros, cgParams_);
527  }
528 };
529 
530 /* ************************************************************************* */
531 TEST(NonlinearOptimizer, subclass_solver) {
533  expected.insert(X(1), Pose2(0., 0., 0.));
534  expected.insert(X(2), Pose2(1.5, 0., 0.));
535  expected.insert(X(3), Pose2(3.0, 0., 0.));
536 
537  Values init;
538  init.insert(X(1), Pose2(0., 0., 0.));
539  init.insert(X(2), Pose2(0., 0., 0.));
540  init.insert(X(3), Pose2(0., 0., 0.));
541 
543  graph.addPrior(X(1), Pose2(0., 0., 0.), noiseModel::Isotropic::Sigma(3, 1));
544  graph.emplace_shared<BetweenFactor<Pose2>>(X(1), X(2), Pose2(1.5, 0., 0.),
545  noiseModel::Isotropic::Sigma(3, 1));
546  graph.addPrior(X(3), Pose2(3., 0., 0.), noiseModel::Isotropic::Sigma(3, 1));
547 
549  Values actual = IterativeLM(graph, init, p).optimize();
550  EXPECT(assert_equal(expected, actual, 1e-4));
551 }
552 
553 /* ************************************************************************* */
555 {
557 
558  Point2 x0(3,3);
559  Values c0;
560  c0.insert(X(1), x0);
561 
562  // Levenberg-Marquardt
564  static const string filename("testNonlinearOptimizer.log");
567 
568 // stringstream expected,actual;
569 // ifstream ifs(("../../gtsam/tests/" + filename).c_str());
570 // if(!ifs) throw std::runtime_error(filename);
571 // expected << ifs.rdbuf();
572 // ifs.close();
573 // ifstream ifs2(filename.c_str());
574 // if(!ifs2) throw std::runtime_error(filename);
575 // actual << ifs2.rdbuf();
576 // EXPECT(actual.str()==expected.str());
577 }
578 
579 /* ************************************************************************* */
580 TEST( NonlinearOptimizer, iterationHook_LM )
581 {
583 
584  Point2 x0(3,3);
585  Values c0;
586  c0.insert(X(1), x0);
587 
588  // Levenberg-Marquardt
590  size_t lastIterCalled = 0;
591  lmParams.iterationHook = [&](size_t iteration, double oldError, double newError)
592  {
593  // Tests:
594  lastIterCalled = iteration;
595  EXPECT(newError<oldError);
596 
597  // Example of evolution printout:
598  //std::cout << "iter: " << iteration << " error: " << oldError << " => " << newError <<"\n";
599  };
601 
602  EXPECT(lastIterCalled>5);
603 }
604 /* ************************************************************************* */
605 TEST( NonlinearOptimizer, iterationHook_CG )
606 {
608 
609  Point2 x0(3,3);
610  Values c0;
611  c0.insert(X(1), x0);
612 
613  // Levenberg-Marquardt
615  size_t lastIterCalled = 0;
616  cgParams.iterationHook = [&](size_t iteration, double oldError, double newError)
617  {
618  // Tests:
619  lastIterCalled = iteration;
620  EXPECT(newError<oldError);
621 
622  // Example of evolution printout:
623  //std::cout << "iter: " << iteration << " error: " << oldError << " => " << newError <<"\n";
624  };
625  NonlinearConjugateGradientOptimizer(fg, c0, cgParams).optimize();
626 
627  EXPECT(lastIterCalled>5);
628 }
629 
630 
631 /* ************************************************************************* */
633 struct MyType : public Vector3 {
634  using Vector3::Vector3;
635 };
636 
637 namespace gtsam {
638 template <>
639 struct traits<MyType> {
640  static bool Equals(const MyType& a, const MyType& b, double tol) {
641  return (a - b).array().abs().maxCoeff() < tol;
642  }
643  static void Print(const MyType&, const string&) {}
644  static int GetDimension(const MyType&) { return 3; }
645  static MyType Retract(const MyType& a, const Vector3& b) { return a + b; }
646  static Vector3 Local(const MyType& a, const MyType& b) { return b - a; }
647 };
648 }
649 
652  fg.addPrior(0, MyType(0, 0, 0), noiseModel::Isotropic::Sigma(3, 1));
653 
654  Values init;
655  init.insert(0, MyType(0,0,0));
656 
657  LevenbergMarquardtOptimizer optimizer(fg, init);
658  Values actual = optimizer.optimize();
659  EXPECT(assert_equal(init, actual));
660 }
661 
662 /* ************************************************************************* */
663 int main() {
664  TestResult tr;
665  return TestRegistry::runAllTests(tr);
666 }
667 /* ************************************************************************* */
TestRegistry::runAllTests
static int runAllTests(TestResult &result)
Definition: TestRegistry.cpp:27
NonlinearConjugateGradientOptimizer.h
Simple non-linear optimizer that solves using non-preconditioned CG.
Pose2.h
2D Pose
IterativeLM::solve
VectorValues solve(const GaussianFactorGraph &gfg, const NonlinearOptimizerParams &params) const override
Solve that uses conjugate gradient.
Definition: testNonlinearOptimizer.cpp:523
gtsam::NonlinearOptimizer::optimize
virtual const Values & optimize()
Definition: NonlinearOptimizer.h:98
gtsam::GaussianFactorGraph::hessianDiagonal
virtual VectorValues hessianDiagonal() const
Definition: GaussianFactorGraph.cpp:279
IterativeLM::IterativeLM
IterativeLM(const NonlinearFactorGraph &graph, const Values &initialValues, const ConjugateGradientParameters &p, const LevenbergMarquardtParams &params=LevenbergMarquardtParams::LegacyDefaults())
Constructor.
Definition: testNonlinearOptimizer.cpp:514
GaussianFactorGraph.h
Linear Factor Graph where all factors are Gaussians.
gtsam::GaussianFactorGraph::shared_ptr
std::shared_ptr< This > shared_ptr
shared_ptr to this class
Definition: GaussianFactorGraph.h:82
gtsam::GaussNewtonOptimizer
Definition: GaussNewtonOptimizer.h:38
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
d
static const double d[K][N]
Definition: igam.h:11
EXPECT
#define EXPECT(condition)
Definition: Test.h:150
optimize
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Definition: timeSFMBAL.h:64
gtsam::LevenbergMarquardtOptimizer::iterate
GaussianFactorGraph::shared_ptr iterate() override
Definition: LevenbergMarquardtOptimizer.cpp:273
gtsam::Z_3x1
static const Eigen::MatrixBase< Vector3 >::ConstantReturnType Z_3x1
Definition: Vector.h:46
TestHarness.h
gtsam::NonlinearOptimizerParams::setVerbosity
void setVerbosity(const std::string &src)
Definition: NonlinearOptimizerParams.h:59
IterativeLM::initial_
Values initial_
Definition: testNonlinearOptimizer.cpp:510
gtsam::NonlinearOptimizer::values
const Values & values() const
return values in current optimizer state
Definition: NonlinearOptimizer.cpp:57
gtsam::VectorValues::insert
iterator insert(const std::pair< Key, Vector > &key_value)
Definition: VectorValues.cpp:90
b
Scalar * b
Definition: benchVecAdd.cpp:17
LevenbergMarquardtOptimizer.h
A nonlinear optimizer that uses the Levenberg-Marquardt trust-region scheme.
Matrix.h
typedef and functions to augment Eigen's MatrixXd
IterativeLM::cgParams_
ConjugateGradientParameters cgParams_
Solver specific parameters.
Definition: testNonlinearOptimizer.cpp:509
lmParams
LevenbergMarquardtParams lmParams
Definition: testSmartProjectionRigFactor.cpp:55
pt
static const Point3 pt(1.0, 2.0, 3.0)
gtsam::NonlinearOptimizerParams::setRelativeErrorTol
void setRelativeErrorTol(double value)
Definition: NonlinearOptimizerParams.h:56
X
#define X
Definition: icosphere.cpp:20
gtsam::Vector3
Eigen::Vector3d Vector3
Definition: Vector.h:43
tol
const double tol
Definition: testNonlinearOptimizer.cpp:41
gtsam::traits< MyType >::Print
static void Print(const MyType &, const string &)
Definition: testNonlinearOptimizer.cpp:643
vanilla::params
static const SmartProjectionParams params
Definition: smartFactorScenarios.h:69
gtsam::NonlinearOptimizerParams::iterationHook
IterationHook iterationHook
Definition: NonlinearOptimizerParams.h:95
MyType
Definition: testNonlinearOptimizer.cpp:633
gtsam::traits< MyType >::Retract
static MyType Retract(const MyType &a, const Vector3 &b)
Definition: testNonlinearOptimizer.cpp:645
gtsam::DoglegOptimizer
Definition: DoglegOptimizer.h:68
GaussNewtonOptimizer.h
gtsam::LevenbergMarquardtParams::logFile
std::string logFile
an optional CSV log file, with [iteration, time, error, lambda]
Definition: LevenbergMarquardtParams.h:55
gtsam::GaussianFactorGraph
Definition: GaussianFactorGraph.h:73
gtsam::conjugateGradientDescent
Vector conjugateGradientDescent(const System &Ab, const Vector &x, const ConjugateGradientParameters &parameters)
Definition: iterative.cpp:45
gtsam::NonlinearFactorGraph::error
double error(const Values &values) const
Definition: NonlinearFactorGraph.cpp:170
IterativeLM
Definition: testNonlinearOptimizer.cpp:507
gtsam::NonlinearOptimizerParams
Definition: NonlinearOptimizerParams.h:35
gtsam::traits< MyType >::Equals
static bool Equals(const MyType &a, const MyType &b, double tol)
Definition: testNonlinearOptimizer.cpp:640
gtsam::VectorValues
Definition: VectorValues.h:74
gtsam::LevenbergMarquardtOptimizer::linearize
virtual GaussianFactorGraph::shared_ptr linearize() const
Definition: LevenbergMarquardtOptimizer.cpp:83
BetweenFactor.h
relicense.filename
filename
Definition: relicense.py:57
DOUBLES_EQUAL
#define DOUBLES_EQUAL(expected, actual, threshold)
Definition: Test.h:141
gtsam::GaussianFactorGraph::gradientAtZero
virtual VectorValues gradientAtZero() const
Definition: GaussianFactorGraph.cpp:369
TEST_UNSAFE
TEST_UNSAFE(NonlinearOptimizer, MoreOptimization)
Definition: testNonlinearOptimizer.cpp:248
gtsam::GaussNewtonParams
Definition: GaussNewtonOptimizer.h:30
gtsam::NonlinearFactorGraph
Definition: NonlinearFactorGraph.h:55
cholesky::expected
Matrix expected
Definition: testMatrix.cpp:971
gtsam::NonlinearFactorGraph::addPrior
void addPrior(Key key, const T &prior, const SharedNoiseModel &model=nullptr)
Definition: NonlinearFactorGraph.h:199
x0
static Symbol x0('x', 0)
gtsam::LevenbergMarquardtOptimizer
Definition: LevenbergMarquardtOptimizer.h:35
L
MatrixXd L
Definition: LLT_example.cpp:6
Symbol.h
iterative.h
Iterative methods, implementation.
gtsam::LevenbergMarquardtParams::lambdaInitial
double lambdaInitial
The initial Levenberg-Marquardt damping term (default: 1e-5)
Definition: LevenbergMarquardtParams.h:49
gtsam::NonlinearOptimizerParams::equals
bool equals(const NonlinearOptimizerParams &other, double tol=1e-9) const
Definition: NonlinearOptimizerParams.cpp:127
gtsam::Point2
Vector2 Point2
Definition: Point2.h:32
TEST
TEST(NonlinearOptimizer, paramsEquals)
Definition: testNonlinearOptimizer.cpp:47
init
detail::initimpl::constructor< Args... > init()
Binds an existing constructor taking arguments Args...
Definition: pybind11.h:2006
ordering
static enum @1096 ordering
TestResult
Definition: TestResult.h:26
gtsam::DoglegParams
Definition: DoglegOptimizer.h:32
key
const gtsam::Symbol key('X', 0)
gtsam::NonlinearConjugateGradientOptimizer::optimize
const Values & optimize() override
Definition: NonlinearConjugateGradientOptimizer.cpp:82
gtsam::Values::zeroVectors
VectorValues zeroVectors() const
Definition: Values.cpp:260
a
ArrayXXi a
Definition: Array_initializer_list_23_cxx11.cpp:1
gtsam::NonlinearConjugateGradientOptimizer
Definition: NonlinearConjugateGradientOptimizer.h:80
gtsam
traits
Definition: SFMdata.h:40
gtsam::NonlinearOptimizer
Definition: NonlinearOptimizer.h:75
NoiseModel.h
gtsam::traits
Definition: Group.h:36
gtsam::ConjugateGradientParameters
Definition: ConjugateGradientSolver.h:29
NonlinearFactorGraph.h
Factor Graph consisting of non-linear factors.
gtsam::FactorGraph::push_back
IsDerived< DERIVEDFACTOR > push_back(std::shared_ptr< DERIVEDFACTOR > factor)
Add a factor directly using a shared_ptr.
Definition: FactorGraph.h:147
gtsam::Values
Definition: Values.h:65
CHECK
#define CHECK(condition)
Definition: Test.h:108
std
Definition: BFloat16.h:88
p
float * p
Definition: Tutorial_Map_using.cpp:9
gtsam::Values::insert
void insert(Key j, const Value &val)
Definition: Values.cpp:155
gtsam::assert_equal
bool assert_equal(const Matrix &expected, const Matrix &actual, double tol)
Definition: Matrix.cpp:40
gtsam::GaussNewtonOptimizer::iterate
GaussianFactorGraph::shared_ptr iterate() override
Definition: GaussNewtonOptimizer.cpp:44
gtsam::LevenbergMarquardtParams
Definition: LevenbergMarquardtParams.h:35
gtsam::LevenbergMarquardtOptimizer::buildDampedSystem
GaussianFactorGraph buildDampedSystem(const GaussianFactorGraph &linear, const VectorValues &sqrtHessianDiagonal) const
Definition: LevenbergMarquardtOptimizer.cpp:88
gtsam::example::createReallyNonlinearFactorGraph
NonlinearFactorGraph createReallyNonlinearFactorGraph()
Definition: smallExample.h:378
main
int main()
Definition: testNonlinearOptimizer.cpp:663
DoglegOptimizer.h
M_PI
#define M_PI
Definition: mconf.h:117
gtsam::NonlinearOptimizerParams::ordering
std::optional< Ordering > ordering
The optional variable elimination ordering, or empty to use COLAMD (default: empty)
Definition: NonlinearOptimizerParams.h:108
graph
NonlinearFactorGraph graph
Definition: doc/Code/OdometryExample.cpp:2
gtsam::traits< MyType >::Local
static Vector3 Local(const MyType &a, const MyType &b)
Definition: testNonlinearOptimizer.cpp:646
smallExample.h
Create small example with two poses and one landmark.
init
Definition: TutorialInplaceLU.cpp:2
gtsam::Ordering
Definition: inference/Ordering.h:33
gtsam::traits< MyType >::GetDimension
static int GetDimension(const MyType &)
Definition: testNonlinearOptimizer.cpp:644
test_callbacks.value
value
Definition: test_callbacks.py:160
Values.h
A non-templated config holding any types of Manifold-group elements.
gtsam::FactorGraph::emplace_shared
IsDerived< DERIVEDFACTOR > emplace_shared(Args &&... args)
Emplace a shared pointer to factor of given type.
Definition: FactorGraph.h:153
gtsam::Pose2
Definition: Pose2.h:39
gtsam::BetweenFactor
Definition: BetweenFactor.h:40
gtsam::NonlinearOptimizerParams::linearSolverType
LinearSolverType linearSolverType
The type of linear solver to use in the nonlinear optimizer.
Definition: NonlinearOptimizerParams.h:107


gtsam
Author(s):
autogenerated on Sat Nov 16 2024 04:08:04