testNonlinearOptimizer.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
18 #include <tests/smallExample.h>
21 #include <gtsam/nonlinear/Values.h>
28 #include <gtsam/inference/Symbol.h>
29 #include <gtsam/geometry/Pose2.h>
30 #include <gtsam/base/Matrix.h>
31 
33 
34 #include <boost/range/adaptor/map.hpp>
35 #include <boost/shared_ptr.hpp>
36 #include <boost/assign/std/list.hpp> // for operator +=
37 using namespace boost::assign;
38 using boost::adaptors::map_values;
39 
40 #include <iostream>
41 #include <fstream>
42 
43 using namespace std;
44 using namespace gtsam;
45 
46 const double tol = 1e-5;
47 
50 
51 /* ************************************************************************* */
52 TEST( NonlinearOptimizer, paramsEquals )
53 {
54  // default constructors lead to two identical params
55  GaussNewtonParams gnParams1;
56  GaussNewtonParams gnParams2;
57  CHECK(gnParams1.equals(gnParams2));
58 
59  // but the params become different if we change something in gnParams2
60  gnParams2.setVerbosity("DELTA");
61  CHECK(!gnParams1.equals(gnParams2));
62 }
63 
64 /* ************************************************************************* */
65 TEST( NonlinearOptimizer, iterateLM )
66 {
67  // really non-linear factor graph
69 
70  // config far from minimum
71  Point2 x0(3,0);
72  Values config;
73  config.insert(X(1), x0);
74 
75  // normal iterate
76  GaussNewtonParams gnParams;
77  GaussNewtonOptimizer gnOptimizer(fg, config, gnParams);
78  gnOptimizer.iterate();
79 
80  // LM iterate with lambda 0 should be the same
82  lmParams.lambdaInitial = 0.0;
83  LevenbergMarquardtOptimizer lmOptimizer(fg, config, lmParams);
84  lmOptimizer.iterate();
85 
86  CHECK(assert_equal(gnOptimizer.values(), lmOptimizer.values(), 1e-9));
87 }
88 
89 /* ************************************************************************* */
91 {
93 
94  // test error at minimum
95  Point2 xstar(0,0);
96  Values cstar;
97  cstar.insert(X(1), xstar);
98  DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
99 
100  // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
101  Point2 x0(3,3);
102  Values c0;
103  c0.insert(X(1), x0);
104  DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
105 
106  // optimize parameters
107  Ordering ord;
108  ord.push_back(X(1));
109 
110  // Gauss-Newton
111  GaussNewtonParams gnParams;
112  gnParams.ordering = ord;
113  Values actual1 = GaussNewtonOptimizer(fg, c0, gnParams).optimize();
114  DOUBLES_EQUAL(0,fg.error(actual1),tol);
115 
116  // Levenberg-Marquardt
118  lmParams.ordering = ord;
119  Values actual2 = LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize();
120  DOUBLES_EQUAL(0,fg.error(actual2),tol);
121 
122  // Dogleg
123  DoglegParams dlParams;
124  dlParams.ordering = ord;
125  Values actual3 = DoglegOptimizer(fg, c0, dlParams).optimize();
126  DOUBLES_EQUAL(0,fg.error(actual3),tol);
127 }
128 
129 /* ************************************************************************* */
130 TEST( NonlinearOptimizer, SimpleLMOptimizer )
131 {
133 
134  Point2 x0(3,3);
135  Values c0;
136  c0.insert(X(1), x0);
137 
138  Values actual = LevenbergMarquardtOptimizer(fg, c0).optimize();
139  DOUBLES_EQUAL(0,fg.error(actual),tol);
140 }
141 
142 /* ************************************************************************* */
143 TEST( NonlinearOptimizer, SimpleGNOptimizer )
144 {
146 
147  Point2 x0(3,3);
148  Values c0;
149  c0.insert(X(1), x0);
150 
151  Values actual = GaussNewtonOptimizer(fg, c0).optimize();
152  DOUBLES_EQUAL(0,fg.error(actual),tol);
153 }
154 
155 /* ************************************************************************* */
156 TEST( NonlinearOptimizer, SimpleDLOptimizer )
157 {
159 
160  Point2 x0(3,3);
161  Values c0;
162  c0.insert(X(1), x0);
163 
164  Values actual = DoglegOptimizer(fg, c0).optimize();
165  DOUBLES_EQUAL(0,fg.error(actual),tol);
166 }
167 
168 /* ************************************************************************* */
169 TEST( NonlinearOptimizer, optimization_method )
170 {
171  LevenbergMarquardtParams paramsQR;
172  paramsQR.linearSolverType = LevenbergMarquardtParams::MULTIFRONTAL_QR;
173  LevenbergMarquardtParams paramsChol;
174  paramsChol.linearSolverType = LevenbergMarquardtParams::MULTIFRONTAL_CHOLESKY;
175 
177 
178  Point2 x0(3,3);
179  Values c0;
180  c0.insert(X(1), x0);
181 
182  Values actualMFQR = LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize();
183  DOUBLES_EQUAL(0,fg.error(actualMFQR),tol);
184 
185  Values actualMFChol = LevenbergMarquardtOptimizer(fg, c0, paramsChol).optimize();
186  DOUBLES_EQUAL(0,fg.error(actualMFChol),tol);
187 }
188 
189 /* ************************************************************************* */
190 TEST( NonlinearOptimizer, Factorization )
191 {
192  Values config;
193  config.insert(X(1), Pose2(0.,0.,0.));
194  config.insert(X(2), Pose2(1.5,0.,0.));
195 
197  graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10));
198  graph += BetweenFactor<Pose2>(X(1),X(2), Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1));
199 
201  ordering.push_back(X(1));
202  ordering.push_back(X(2));
203 
205  LevenbergMarquardtParams::SetLegacyDefaults(&params);
206  LevenbergMarquardtOptimizer optimizer(graph, config, ordering, params);
207  optimizer.iterate();
208 
210  expected.insert(X(1), Pose2(0.,0.,0.));
211  expected.insert(X(2), Pose2(1.,0.,0.));
212  CHECK(assert_equal(expected, optimizer.values(), 1e-5));
213 }
214 
215 /* ************************************************************************* */
216 TEST(NonlinearOptimizer, NullFactor) {
217 
219 
220  // Add null factor
221  fg.push_back(NonlinearFactorGraph::sharedFactor());
222 
223  // test error at minimum
224  Point2 xstar(0,0);
225  Values cstar;
226  cstar.insert(X(1), xstar);
227  DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
228 
229  // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
230  Point2 x0(3,3);
231  Values c0;
232  c0.insert(X(1), x0);
233  DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
234 
235  // optimize parameters
236  Ordering ord;
237  ord.push_back(X(1));
238 
239  // Gauss-Newton
240  Values actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize();
241  DOUBLES_EQUAL(0,fg.error(actual1),tol);
242 
243  // Levenberg-Marquardt
244  Values actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize();
245  DOUBLES_EQUAL(0,fg.error(actual2),tol);
246 
247  // Dogleg
248  Values actual3 = DoglegOptimizer(fg, c0, ord).optimize();
249  DOUBLES_EQUAL(0,fg.error(actual3),tol);
250 }
251 
252 /* ************************************************************************* */
253 TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) {
254 
256  fg.addPrior(0, Pose2(0, 0, 0), noiseModel::Isotropic::Sigma(3, 1));
257  fg += BetweenFactor<Pose2>(0, 1, Pose2(1, 0, M_PI / 2),
258  noiseModel::Isotropic::Sigma(3, 1));
259  fg += BetweenFactor<Pose2>(1, 2, Pose2(1, 0, M_PI / 2),
260  noiseModel::Isotropic::Sigma(3, 1));
261 
262  Values init;
263  init.insert(0, Pose2(3, 4, -M_PI));
264  init.insert(1, Pose2(10, 2, -M_PI));
265  init.insert(2, Pose2(11, 7, -M_PI));
266 
268  expected.insert(0, Pose2(0, 0, 0));
269  expected.insert(1, Pose2(1, 0, M_PI / 2));
270  expected.insert(2, Pose2(1, 1, M_PI));
271 
272  VectorValues expectedGradient;
273  expectedGradient.insert(0,Z_3x1);
274  expectedGradient.insert(1,Z_3x1);
275  expectedGradient.insert(2,Z_3x1);
276 
277  // Try LM and Dogleg
278  LevenbergMarquardtParams params = LevenbergMarquardtParams::LegacyDefaults();
279  {
280  LevenbergMarquardtOptimizer optimizer(fg, init, params);
281 
282  // test convergence
283  Values actual = optimizer.optimize();
284  EXPECT(assert_equal(expected, actual));
285 
286  // Check that the gradient is zero
287  GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
288  EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
289  }
290  EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize()));
291 
292 // cout << "===================================================================================" << endl;
293 
294  // Try LM with diagonal damping
295  Values initBetter;
296  initBetter.insert(0, Pose2(3,4,0));
297  initBetter.insert(1, Pose2(10,2,M_PI/3));
298  initBetter.insert(2, Pose2(11,7,M_PI/2));
299 
300  {
301  params.diagonalDamping = true;
302  LevenbergMarquardtOptimizer optimizer(fg, initBetter, params);
303 
304  // test the diagonal
305  GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
306  VectorValues d = linear->hessianDiagonal();
307  VectorValues sqrtHessianDiagonal = d;
308  for (Vector& v : sqrtHessianDiagonal | map_values) v = v.cwiseSqrt();
309  GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear, sqrtHessianDiagonal);
310  VectorValues expectedDiagonal = d + params.lambdaInitial * d;
311  EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal()));
312 
313  // test convergence (does not!)
314  Values actual = optimizer.optimize();
315  EXPECT(assert_equal(expected, actual));
316 
317  // Check that the gradient is zero (it is not!)
318  linear = optimizer.linearize();
319  EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
320 
321  // Check that the gradient is zero for damped system (it is not!)
322  damped = optimizer.buildDampedSystem(*linear, sqrtHessianDiagonal);
323  VectorValues actualGradient = damped.gradientAtZero();
324  EXPECT(assert_equal(expectedGradient,actualGradient));
325 
326  /* This block was made to test the original initial guess "init"
327  // Check errors at convergence and errors in direction of gradient (decreases!)
328  EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5);
329  EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5);
330 
331  // Check that solve yields gradient (it's not equal to gradient, as predicted)
332  VectorValues delta = damped.optimize();
333  double factor = actualGradient[0][0]/delta[0][0];
334  EXPECT(assert_equal(actualGradient,factor*delta));
335 
336  // Still pointing downhill wrt actual gradient !
337  EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3);
338 
339  // delta.print("This is the delta value computed by LM with diagonal damping");
340 
341  // Still pointing downhill wrt expected gradient (IT DOES NOT! actually they are perpendicular)
342  EXPECT_DOUBLES_EQUAL( 0.0,dot(-1*expectedGradient,delta),1e-5);
343 
344  // Check errors at convergence and errors in direction of solution (does not decrease!)
345  EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5);
346 
347  // Check errors at convergence and errors at a small step in direction of solution (does not decrease!)
348  EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3);
349  */
350  }
351 }
352 
353 /* ************************************************************************* */
354 TEST(NonlinearOptimizer, Pose2OptimizationWithHuberNoOutlier) {
355 
357  fg.addPrior(0, Pose2(0,0,0), noiseModel::Isotropic::Sigma(3,1));
358  fg += BetweenFactor<Pose2>(0, 1, Pose2(1,1.1,M_PI/4),
359  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(2.0),
360  noiseModel::Isotropic::Sigma(3,1)));
361  fg += BetweenFactor<Pose2>(0, 1, Pose2(1,0.9,M_PI/2),
362  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(3.0),
363  noiseModel::Isotropic::Sigma(3,1)));
364 
365  Values init;
366  init.insert(0, Pose2(0,0,0));
367  init.insert(1, Pose2(0.961187, 0.99965, 1.1781));
368 
370  expected.insert(0, Pose2(0,0,0));
371  expected.insert(1, Pose2(0.961187, 0.99965, 1.1781));
372 
374 
375  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
376  auto lm_result = LevenbergMarquardtOptimizer(fg, init, lm_params).optimize();
377  auto dl_result = DoglegOptimizer(fg, init).optimize();
378 
379  EXPECT(assert_equal(expected, gn_result, 3e-2));
380  EXPECT(assert_equal(expected, lm_result, 3e-2));
381  EXPECT(assert_equal(expected, dl_result, 3e-2));
382 }
383 
384 /* ************************************************************************* */
385 TEST(NonlinearOptimizer, Point2LinearOptimizationWithHuber) {
386 
388  fg.addPrior(0, Point2(0,0), noiseModel::Isotropic::Sigma(2,0.01));
389  fg += BetweenFactor<Point2>(0, 1, Point2(1,1.8),
390  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0),
391  noiseModel::Isotropic::Sigma(2,1)));
392  fg += BetweenFactor<Point2>(0, 1, Point2(1,0.9),
393  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0),
394  noiseModel::Isotropic::Sigma(2,1)));
395  fg += BetweenFactor<Point2>(0, 1, Point2(1,90),
396  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.0),
397  noiseModel::Isotropic::Sigma(2,1)));
398 
399  Values init;
400  init.insert(0, Point2(1,1));
401  init.insert(1, Point2(1,0));
402 
404  expected.insert(0, Point2(0,0));
405  expected.insert(1, Point2(1,1.85));
406 
408 
409  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
410  auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize();
411  auto dl_result = DoglegOptimizer(fg, init).optimize();
412 
413  EXPECT(assert_equal(expected, gn_result, 1e-4));
414  EXPECT(assert_equal(expected, lm_result, 1e-4));
415  EXPECT(assert_equal(expected, dl_result, 1e-4));
416 }
417 
418 /* ************************************************************************* */
419 TEST(NonlinearOptimizer, Pose2OptimizationWithHuber) {
420 
422  fg.addPrior(0, Pose2(0,0, 0), noiseModel::Isotropic::Sigma(3,0.1));
423  fg += BetweenFactor<Pose2>(0, 1, Pose2(0,9, M_PI/2),
424  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
425  noiseModel::Isotropic::Sigma(3,1)));
426  fg += BetweenFactor<Pose2>(0, 1, Pose2(0, 11, M_PI/2),
427  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
428  noiseModel::Isotropic::Sigma(3,1)));
429  fg += BetweenFactor<Pose2>(0, 1, Pose2(0, 10, M_PI/2),
430  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
431  noiseModel::Isotropic::Sigma(3,1)));
432  fg += BetweenFactor<Pose2>(0, 1, Pose2(0,9, 0),
433  noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(0.2),
434  noiseModel::Isotropic::Sigma(3,1)));
435 
436  Values init;
437  init.insert(0, Pose2(0, 0, 0));
438  init.insert(1, Pose2(0, 10, M_PI/4));
439 
441  expected.insert(0, Pose2(0, 0, 0));
442  expected.insert(1, Pose2(0, 10, 1.45212));
443 
445 
446  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
447  auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize();
448  auto dl_result = DoglegOptimizer(fg, init).optimize();
449 
450  EXPECT(assert_equal(expected, gn_result, 1e-1));
451  EXPECT(assert_equal(expected, lm_result, 1e-1));
452  EXPECT(assert_equal(expected, dl_result, 1e-1));
453 }
454 
455 /* ************************************************************************* */
456 TEST(NonlinearOptimizer, RobustMeanCalculation) {
457 
459 
460  Values init;
461 
463 
464  auto huber = noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(20),
465  noiseModel::Isotropic::Sigma(1, 1));
466 
467  vector<double> pts{-10,-3,-1,1,3,10,1000};
468  for(auto pt : pts) {
469  fg.addPrior(0, pt, huber);
470  }
471 
472  init.insert(0, 100.0);
473  expected.insert(0, 3.33333333);
474 
475  DoglegParams params_dl;
476  params_dl.setRelativeErrorTol(1e-10);
477 
478  auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
479  auto lm_result = LevenbergMarquardtOptimizer(fg, init).optimize();
480  auto dl_result = DoglegOptimizer(fg, init, params_dl).optimize();
481 
482  EXPECT(assert_equal(expected, gn_result, tol));
483  EXPECT(assert_equal(expected, lm_result, tol));
484  EXPECT(assert_equal(expected, dl_result, tol));
485 }
486 
487 /* ************************************************************************* */
488 TEST(NonlinearOptimizer, disconnected_graph) {
490  expected.insert(X(1), Pose2(0.,0.,0.));
491  expected.insert(X(2), Pose2(1.5,0.,0.));
492  expected.insert(X(3), Pose2(3.0,0.,0.));
493 
494  Values init;
495  init.insert(X(1), Pose2(0.,0.,0.));
496  init.insert(X(2), Pose2(0.,0.,0.));
497  init.insert(X(3), Pose2(0.,0.,0.));
498 
500  graph.addPrior(X(1), Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3,1));
501  graph += BetweenFactor<Pose2>(X(1),X(2), Pose2(1.5,0.,0.), noiseModel::Isotropic::Sigma(3,1));
502  graph.addPrior(X(3), Pose2(3.,0.,0.), noiseModel::Isotropic::Sigma(3,1));
503 
504  EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(graph, init).optimize()));
505 }
506 
507 /* ************************************************************************* */
508 #include <gtsam/linear/iterative.h>
509 
514 
515  public:
517  IterativeLM(const NonlinearFactorGraph& graph, const Values& initialValues,
520  LevenbergMarquardtParams::LegacyDefaults())
521  : LevenbergMarquardtOptimizer(graph, initialValues, params),
522  cgParams_(p),
523  initial_(initialValues) {}
524 
527  const NonlinearOptimizerParams& params) const override {
528  VectorValues zeros = initial_.zeroVectors();
529  return conjugateGradientDescent(gfg, zeros, cgParams_);
530  }
531 };
532 
533 /* ************************************************************************* */
534 TEST(NonlinearOptimizer, subclass_solver) {
536  expected.insert(X(1), Pose2(0., 0., 0.));
537  expected.insert(X(2), Pose2(1.5, 0., 0.));
538  expected.insert(X(3), Pose2(3.0, 0., 0.));
539 
540  Values init;
541  init.insert(X(1), Pose2(0., 0., 0.));
542  init.insert(X(2), Pose2(0., 0., 0.));
543  init.insert(X(3), Pose2(0., 0., 0.));
544 
546  graph.addPrior(X(1), Pose2(0., 0., 0.), noiseModel::Isotropic::Sigma(3, 1));
547  graph += BetweenFactor<Pose2>(X(1), X(2), Pose2(1.5, 0., 0.),
548  noiseModel::Isotropic::Sigma(3, 1));
549  graph.addPrior(X(3), Pose2(3., 0., 0.), noiseModel::Isotropic::Sigma(3, 1));
550 
552  Values actual = IterativeLM(graph, init, p).optimize();
553  EXPECT(assert_equal(expected, actual, 1e-4));
554 }
555 
556 /* ************************************************************************* */
558 {
560 
561  Point2 x0(3,3);
562  Values c0;
563  c0.insert(X(1), x0);
564 
565  // Levenberg-Marquardt
567  static const string filename("testNonlinearOptimizer.log");
568  lmParams.logFile = filename;
569  LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize();
570 
571 // stringstream expected,actual;
572 // ifstream ifs(("../../gtsam/tests/" + filename).c_str());
573 // if(!ifs) throw std::runtime_error(filename);
574 // expected << ifs.rdbuf();
575 // ifs.close();
576 // ifstream ifs2(filename.c_str());
577 // if(!ifs2) throw std::runtime_error(filename);
578 // actual << ifs2.rdbuf();
579 // EXPECT(actual.str()==expected.str());
580 }
581 
582 /* ************************************************************************* */
583 TEST( NonlinearOptimizer, iterationHook_LM )
584 {
586 
587  Point2 x0(3,3);
588  Values c0;
589  c0.insert(X(1), x0);
590 
591  // Levenberg-Marquardt
593  size_t lastIterCalled = 0;
594  lmParams.iterationHook = [&](size_t iteration, double oldError, double newError)
595  {
596  // Tests:
597  lastIterCalled = iteration;
598  EXPECT(newError<oldError);
599 
600  // Example of evolution printout:
601  //std::cout << "iter: " << iteration << " error: " << oldError << " => " << newError <<"\n";
602  };
603  LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize();
604 
605  EXPECT(lastIterCalled>5);
606 }
607 /* ************************************************************************* */
608 TEST( NonlinearOptimizer, iterationHook_CG )
609 {
611 
612  Point2 x0(3,3);
613  Values c0;
614  c0.insert(X(1), x0);
615 
616  // Levenberg-Marquardt
618  size_t lastIterCalled = 0;
619  cgParams.iterationHook = [&](size_t iteration, double oldError, double newError)
620  {
621  // Tests:
622  lastIterCalled = iteration;
623  EXPECT(newError<oldError);
624 
625  // Example of evolution printout:
626  //std::cout << "iter: " << iteration << " error: " << oldError << " => " << newError <<"\n";
627  };
628  NonlinearConjugateGradientOptimizer(fg, c0, cgParams).optimize();
629 
630  EXPECT(lastIterCalled>5);
631 }
632 
633 
634 /* ************************************************************************* */
636 struct MyType : public Vector3 {
637  using Vector3::Vector3;
638 };
639 
640 namespace gtsam {
641 template <>
642 struct traits<MyType> {
643  static bool Equals(const MyType& a, const MyType& b, double tol) {
644  return (a - b).array().abs().maxCoeff() < tol;
645  }
646  static void Print(const MyType&, const string&) {}
647  static int GetDimension(const MyType&) { return 3; }
648  static MyType Retract(const MyType& a, const Vector3& b) { return a + b; }
649  static Vector3 Local(const MyType& a, const MyType& b) { return b - a; }
650 };
651 }
652 
655  fg.addPrior(0, MyType(0, 0, 0), noiseModel::Isotropic::Sigma(3, 1));
656 
657  Values init;
658  init.insert(0, MyType(0,0,0));
659 
661  Values actual = optimizer.optimize();
662  EXPECT(assert_equal(init, actual));
663 }
664 
665 /* ************************************************************************* */
666 int main() {
667  TestResult tr;
668  return TestRegistry::runAllTests(tr);
669 }
670 /* ************************************************************************* */
LevenbergMarquardtParams lm_params
#define CHECK(condition)
Definition: Test.h:109
static const Eigen::MatrixBase< Vector3 >::ConstantReturnType Z_3x1
Definition: Vector.h:46
virtual const Values & optimize()
static Vector3 Local(const MyType &a, const MyType &b)
Scalar * b
Definition: benchVecAdd.cpp:17
static bool Equals(const MyType &a, const MyType &b, double tol)
static int runAllTests(TestResult &result)
Eigen::Vector3d Vector3
Definition: Vector.h:43
A non-templated config holding any types of Manifold-group elements.
VectorValues zeroVectors() const
Definition: Values.cpp:216
Factor Graph consisting of non-linear factors.
void insert(Key j, const Value &val)
Definition: Values.cpp:140
static enum @843 ordering
Matrix expected
Definition: testMatrix.cpp:974
boost::shared_ptr< This > shared_ptr
shared_ptr to this class
#define DOUBLES_EQUAL(expected, actual, threshold)
Definition: Test.h:142
Vector2 Point2
Definition: Point2.h:27
ArrayXcf v
Definition: Cwise_arg.cpp:1
NonlinearFactorGraph createReallyNonlinearFactorGraph()
Definition: smallExample.h:377
TEST_UNSAFE(NonlinearOptimizer, MoreOptimization)
#define M_PI
Definition: main.h:78
IterativeLM(const NonlinearFactorGraph &graph, const Values &initialValues, const ConjugateGradientParameters &p, const LevenbergMarquardtParams &params=LevenbergMarquardtParams::LegacyDefaults())
Constructor.
Iterative methods, implementation.
MatrixXd L
Definition: LLT_example.cpp:6
Definition: Half.h:150
static const Point3 pt(1.0, 2.0, 3.0)
const double tol
IsDerived< DERIVEDFACTOR > push_back(boost::shared_ptr< DERIVEDFACTOR > factor)
Add a factor directly using a shared_ptr.
Definition: FactorGraph.h:166
NonlinearFactorGraph graph
Array33i a
bool equals(const NonlinearOptimizerParams &other, double tol=1e-9) const
void addPrior(Key key, const T &prior, const SharedNoiseModel &model=nullptr)
Eigen::VectorXd Vector
Definition: Vector.h:38
static int GetDimension(const MyType &)
TEST(NonlinearOptimizer, paramsEquals)
std::string logFile
an optional CSV log file, with [iteration, time, error, lambda]
void setVerbosity(const std::string &src)
const Values & values() const
return values in current optimizer state
#define EXPECT(condition)
Definition: Test.h:151
A nonlinear optimizer that uses the Levenberg-Marquardt trust-region scheme.
Array< double, 1, 3 > e(1./3., 0.5, 2.)
boost::optional< Ordering > ordering
The optional variable elimination ordering, or empty to use COLAMD (default: empty) ...
Linear Factor Graph where all factors are Gaussians.
static SmartStereoProjectionParams params
GaussianFactorGraph buildDampedSystem(const GaussianFactorGraph &linear, const VectorValues &sqrtHessianDiagonal) const
static Symbol x0('x', 0)
traits
Definition: chartTesting.h:28
bool assert_equal(const Matrix &expected, const Matrix &actual, double tol)
Definition: Matrix.cpp:42
Vector conjugateGradientDescent(const System &Ab, const Vector &x, const ConjugateGradientParameters &parameters)
Definition: iterative.cpp:45
static MyType Retract(const MyType &a, const Vector3 &b)
GaussianFactorGraph::shared_ptr iterate() override
bool diagonalDamping
if true, use diagonal of Hessian
virtual GaussianFactorGraph::shared_ptr linearize() const
LevenbergMarquardtParams lmParams
detail::initimpl::constructor< Args... > init()
Binds an existing constructor taking arguments Args...
Definition: pybind11.h:1460
float * p
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Definition: timeSFMBAL.h:63
static void Print(const MyType &, const string &)
Create small example with two poses and one landmark.
GaussianFactorGraph::shared_ptr iterate() override
double error(const Values &values) const
2D Pose
VectorValues solve(const GaussianFactorGraph &gfg, const NonlinearOptimizerParams &params) const override
Solve that uses conjugate gradient.
#define X
Definition: icosphere.cpp:20
Simple non-linear optimizer that solves using non-preconditioned CG.
iterator insert(const std::pair< Key, Vector > &key_value)
ConjugateGradientParameters cgParams_
Solver specific parameters.
virtual VectorValues hessianDiagonal() const
LinearSolverType linearSolverType
The type of linear solver to use in the nonlinear optimizer.
virtual VectorValues gradientAtZero() const
double lambdaInitial
The initial Levenberg-Marquardt damping term (default: 1e-5)


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:48:25