testGncOptimizer.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
33 #include <gtsam/slam/dataset.h>
34 #include <tests/smallExample.h>
35 
37 #include <gtsam/geometry/Pose2.h>
38 
39 using namespace std;
40 using namespace gtsam;
41 
44 static double tol = 1e-7;
45 
46 /* ************************************************************************* */
47 TEST(GncOptimizer, gncParamsConstructor) {
48  // check params are correctly parsed
50  GncParams<LevenbergMarquardtParams> gncParams1(lmParams);
51  CHECK(lmParams.equals(gncParams1.baseOptimizerParams));
52 
53  // check also default constructor
55  CHECK(lmParams.equals(gncParams1b.baseOptimizerParams));
56 
57  // and check params become different if we change lmParams
58  lmParams.setVerbosity("DELTA");
59  CHECK(!lmParams.equals(gncParams1.baseOptimizerParams));
60 
61  // and same for GN
62  GaussNewtonParams gnParams;
63  GncParams<GaussNewtonParams> gncParams2(gnParams);
64  CHECK(gnParams.equals(gncParams2.baseOptimizerParams));
65 
66  // check default constructor
67  GncParams<GaussNewtonParams> gncParams2b;
68  CHECK(gnParams.equals(gncParams2b.baseOptimizerParams));
69 
70  // change something at the gncParams level
71  GncParams<GaussNewtonParams> gncParams2c(gncParams2b);
72  gncParams2c.setLossType(GncLossType::GM);
73  CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams));
74 }
75 
76 /* ************************************************************************* */
77 TEST(GncOptimizer, gncConstructor) {
78  // has to have Gaussian noise models !
79  auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor
80  // on a 2D point
81 
82  Point2 p0(3, 3);
84  initial.insert(X(1), p0);
85 
88  gncParams);
89 
90  CHECK(gnc.getFactors().equals(fg));
91  CHECK(gnc.getState().equals(initial));
92  CHECK(gnc.getParams().equals(gncParams));
93 
95  gncParams);
96 
97  // check the equal works
98  CHECK(gnc.equals(gnc2));
99 }
100 
101 /* ************************************************************************* */
102 TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) {
104  // same graph with robust noise model
106 
107  Point2 p0(3, 3);
108  Values initial;
109  initial.insert(X(1), p0);
110 
113  initial,
114  gncParams);
115 
116  // make sure that when parsing the graph is transformed into one without
117  // robust loss
118  CHECK(fg.equals(gnc.getFactors()));
119 }
120 
121 /* ************************************************************************* */
122 TEST(GncOptimizer, initializeMu) {
124 
125  Point2 p0(3, 3);
126  Values initial;
127  initial.insert(X(1), p0);
128 
129  // testing GM mu initialization
131  gncParams.setLossType(GncLossType::GM);
133  gncParams);
134  gnc_gm.setInlierCostThresholds(1.0);
135  // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq
136  // (barcSq=1 in this example)
137  EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3);
138 
139  // testing TLS mu initialization
140  gncParams.setLossType(GncLossType::TLS);
142  gncParams);
143  gnc_tls.setInlierCostThresholds(1.0);
144  // according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq)
145  // (barcSq=1 in this example)
146  EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3);
147 }
148 
149 /* ************************************************************************* */
150 TEST(GncOptimizer, updateMuGM) {
151  // has to have Gaussian noise models !
153 
154  Point2 p0(3, 3);
155  Values initial;
156  initial.insert(X(1), p0);
157 
159  gncParams.setLossType(GncLossType::GM);
160  gncParams.setMuStep(1.4);
162  gncParams);
163 
164  double mu = 5.0;
165  EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol);
166 
167  // check it correctly saturates to 1 for GM
168  mu = 1.2;
169  EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), 1.0, tol);
170 }
171 
172 /* ************************************************************************* */
173 TEST(GncOptimizer, updateMuTLS) {
174  // has to have Gaussian noise models !
176 
177  Point2 p0(3, 3);
178  Values initial;
179  initial.insert(X(1), p0);
180 
182  gncParams.setMuStep(1.4);
183  gncParams.setLossType(GncLossType::TLS);
185  gncParams);
186 
187  double mu = 5.0;
188  EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu * 1.4, tol);
189 }
190 
191 /* ************************************************************************* */
192 TEST(GncOptimizer, checkMuConvergence) {
193  // has to have Gaussian noise models !
195 
196  Point2 p0(3, 3);
197  Values initial;
198  initial.insert(X(1), p0);
199 
200  {
202  gncParams.setLossType(GncLossType::GM);
204  gncParams);
205 
206  double mu = 1.0;
207  CHECK(gnc.checkMuConvergence(mu));
208  }
209  {
211  gncParams.setLossType(
214  gncParams);
215 
216  double mu = 1.0;
217  CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS
218  }
219 }
220 
221 /* ************************************************************************* */
222 TEST(GncOptimizer, checkCostConvergence) {
223  // has to have Gaussian noise models !
225 
226  Point2 p0(3, 3);
227  Values initial;
228  initial.insert(X(1), p0);
229 
230  {
232  gncParams.setRelativeCostTol(0.49);
234  gncParams);
235 
236  double prev_cost = 1.0;
237  double cost = 0.5;
238  // relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false
239  CHECK(!gnc.checkCostConvergence(cost, prev_cost));
240  }
241  {
243  gncParams.setRelativeCostTol(0.51);
245  gncParams);
246 
247  double prev_cost = 1.0;
248  double cost = 0.5;
249  // relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true
250  CHECK(gnc.checkCostConvergence(cost, prev_cost));
251  }
252 }
253 
254 /* ************************************************************************* */
255 TEST(GncOptimizer, checkWeightsConvergence) {
256  // has to have Gaussian noise models !
258 
259  Point2 p0(3, 3);
260  Values initial;
261  initial.insert(X(1), p0);
262 
263  {
265  gncParams.setLossType(GncLossType::GM);
267  gncParams);
268 
269  Vector weights = Vector::Ones(fg.size());
270  CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM
271  }
272  {
274  gncParams.setLossType(
277  gncParams);
278 
279  Vector weights = Vector::Ones(fg.size());
280  // weights are binary, so checkWeightsConvergence = true
281  CHECK(gnc.checkWeightsConvergence(weights));
282  }
283  {
285  gncParams.setLossType(
288  gncParams);
289 
290  Vector weights = Vector::Ones(fg.size());
291  weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false
292  CHECK(!gnc.checkWeightsConvergence(weights));
293  }
294  {
296  gncParams.setLossType(
298  gncParams.setWeightsTol(0.1);
300  gncParams);
301 
302  Vector weights = Vector::Ones(fg.size());
303  weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true
304  CHECK(gnc.checkWeightsConvergence(weights));
305  }
306 }
307 
308 /* ************************************************************************* */
309 TEST(GncOptimizer, checkConvergenceTLS) {
310  // has to have Gaussian noise models !
312 
313  Point2 p0(3, 3);
314  Values initial;
315  initial.insert(X(1), p0);
316 
318  gncParams.setRelativeCostTol(1e-5);
319  gncParams.setLossType(GncLossType::TLS);
321  gncParams);
322 
323  CHECK(gnc.checkCostConvergence(1.0, 1.0));
324  CHECK(!gnc.checkCostConvergence(1.0, 2.0));
325 }
326 
327 /* ************************************************************************* */
328 TEST(GncOptimizer, calculateWeightsGM) {
330 
331  Point2 p0(0, 0);
332  Values initial;
333  initial.insert(X(1), p0);
334 
335  // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 *
336  // 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
337  Vector weights_expected = Vector::Zero(4);
338  weights_expected[0] = 1.0; // zero error
339  weights_expected[1] = 1.0; // zero error
340  weights_expected[2] = 1.0; // zero error
341  weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50
342 
343  GaussNewtonParams gnParams;
344  GncParams<GaussNewtonParams> gncParams(gnParams);
345  gncParams.setLossType(GncLossType::GM);
346  auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
347  gnc.setInlierCostThresholds(1.0);
348  double mu = 1.0;
349  Vector weights_actual = gnc.calculateWeights(initial, mu);
350  CHECK(assert_equal(weights_expected, weights_actual, tol));
351 
352  mu = 2.0;
353  double barcSq = 5.0;
354  weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
355 
357  gncParams);
358  gnc2.setInlierCostThresholds(barcSq);
359  weights_actual = gnc2.calculateWeights(initial, mu);
360  CHECK(assert_equal(weights_expected, weights_actual, tol));
361 }
362 
363 /* ************************************************************************* */
364 TEST(GncOptimizer, calculateWeightsTLS) {
366 
367  Point2 p0(0, 0);
368  Values initial;
369  initial.insert(X(1), p0);
370 
371  // we have 4 factors, 3 with zero errors (inliers), 1 with error
372  Vector weights_expected = Vector::Zero(4);
373  weights_expected[0] = 1.0; // zero error
374  weights_expected[1] = 1.0; // zero error
375  weights_expected[2] = 1.0; // zero error
376  weights_expected[3] = 0; // outliers
377 
378  GaussNewtonParams gnParams;
379  GncParams<GaussNewtonParams> gncParams(gnParams);
380  gncParams.setLossType(GncLossType::TLS);
381  auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
382  double mu = 1.0;
383  Vector weights_actual = gnc.calculateWeights(initial, mu);
384  CHECK(assert_equal(weights_expected, weights_actual, tol));
385 }
386 
387 /* ************************************************************************* */
388 TEST(GncOptimizer, calculateWeightsTLS2) {
389 
390  // create values
391  Point2 x_val(0.0, 0.0);
392  Point2 x_prior(1.0, 0.0);
393  Values initial;
394  initial.insert(X(1), x_val);
395 
396  // create very simple factor graph with a single factor 0.5 * 1/sigma^2 * || x - [1;0] ||^2
397  double sigma = 1;
398  SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma));
400  nfg.add(PriorFactor<Point2>(X(1), x_prior, noise));
401 
402  // cost of the factor:
403  DOUBLES_EQUAL(0.5 * 1 / (sigma * sigma), nfg.error(initial), tol);
404 
405  // check the TLS weights are correct: CASE 1: residual below barcsq
406  {
407  // expected:
408  Vector weights_expected = Vector::Zero(1);
409  weights_expected[0] = 1.0; // inlier
410  // actual:
411  GaussNewtonParams gnParams;
412  GncParams<GaussNewtonParams> gncParams(gnParams);
413  gncParams.setLossType(GncLossType::TLS);
415  gncParams);
416  gnc.setInlierCostThresholds(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier
417 
418  double mu = 1e6;
419  Vector weights_actual = gnc.calculateWeights(initial, mu);
420  CHECK(assert_equal(weights_expected, weights_actual, tol));
421  }
422  // check the TLS weights are correct: CASE 2: residual above barcsq
423  {
424  // expected:
425  Vector weights_expected = Vector::Zero(1);
426  weights_expected[0] = 0.0; // outlier
427  // actual:
428  GaussNewtonParams gnParams;
429  GncParams<GaussNewtonParams> gncParams(gnParams);
430  gncParams.setLossType(GncLossType::TLS);
432  gncParams);
433  gnc.setInlierCostThresholds(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier
434  double mu = 1e6; // very large mu recovers original TLS cost
435  Vector weights_actual = gnc.calculateWeights(initial, mu);
436  CHECK(assert_equal(weights_expected, weights_actual, tol));
437  }
438  // check the TLS weights are correct: CASE 2: residual at barcsq
439  {
440  // expected:
441  Vector weights_expected = Vector::Zero(1);
442  weights_expected[0] = 0.5; // undecided
443  // actual:
444  GaussNewtonParams gnParams;
445  GncParams<GaussNewtonParams> gncParams(gnParams);
446  gncParams.setLossType(GncLossType::TLS);
448  gncParams);
449  gnc.setInlierCostThresholds(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier
450  double mu = 1e6; // very large mu recovers original TLS cost
451  Vector weights_actual = gnc.calculateWeights(initial, mu);
452  CHECK(assert_equal(weights_expected, weights_actual, 1e-5));
453  }
454 }
455 
456 /* ************************************************************************* */
457 TEST(GncOptimizer, makeWeightedGraph) {
458  // create original factor
459  double sigma1 = 0.1;
461  sigma1);
462 
463  // create expected
464  double sigma2 = 10;
466  sigma2);
467 
468  // create weights
469  Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
470  weights[0] = 1e-4;
471 
472  // create actual
473  Point2 p0(3, 3);
474  Values initial;
475  initial.insert(X(1), p0);
476 
478  GncParams<LevenbergMarquardtParams> gncParams(lmParams);
480  gncParams);
481  NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights);
482 
483  // check it's all good
484  CHECK(assert_equal(expected, actual));
485 }
486 
487 /* ************************************************************************* */
488 TEST(GncOptimizer, optimizeSimple) {
490 
491  Point2 p0(3, 3);
492  Values initial;
493  initial.insert(X(1), p0);
494 
496  GncParams<LevenbergMarquardtParams> gncParams(lmParams);
498  gncParams);
499 
500  Values actual = gnc.optimize();
501  DOUBLES_EQUAL(0, fg.error(actual), tol);
502 }
503 
504 /* ************************************************************************* */
507 
508  Point2 p0(1, 0);
509  Values initial;
510  initial.insert(X(1), p0);
511 
512  // try with nonrobust cost function and standard GN
513  GaussNewtonParams gnParams;
514  GaussNewtonOptimizer gn(fg, initial, gnParams);
515  Values gn_results = gn.optimize();
516  // converges to incorrect point due to lack of robustness to an outlier, ideal
517  // solution is Point2(0,0)
518  CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at<Point2>(X(1)), 1e-3));
519 
520  // try with robust loss function and standard GN
521  auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with
522  // factors wrapped in
523  // Geman McClure losses
524  GaussNewtonOptimizer gn2(fg_robust, initial, gnParams);
525  Values gn2_results = gn2.optimize();
526  // converges to incorrect point, this time due to the nonconvexity of the loss
527  CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at<Point2>(X(1)), 1e-3));
528 
529  // .. but graduated nonconvexity ensures both robustness and convergence in
530  // the face of nonconvexity
531  GncParams<GaussNewtonParams> gncParams(gnParams);
532  // gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
533  auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
534  Values gnc_result = gnc.optimize();
535  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
536 }
537 
538 /* ************************************************************************* */
539 TEST(GncOptimizer, optimizeWithKnownInliers) {
541 
542  Point2 p0(1, 0);
543  Values initial;
544  initial.insert(X(1), p0);
545 
546  std::vector<size_t> knownInliers;
547  knownInliers.push_back(0);
548  knownInliers.push_back(1);
549  knownInliers.push_back(2);
550 
551  // nonconvexity with known inliers
552  {
554  gncParams.setKnownInliers(knownInliers);
555  gncParams.setLossType(GncLossType::GM);
556  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
558  gncParams);
559  gnc.setInlierCostThresholds(1.0);
560  Values gnc_result = gnc.optimize();
561  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
562 
563  // check weights were actually fixed:
564  Vector finalWeights = gnc.getWeights();
565  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
566  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
567  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
568  }
569  {
571  gncParams.setKnownInliers(knownInliers);
572  gncParams.setLossType(GncLossType::TLS);
573  // gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
575  gncParams);
576 
577  Values gnc_result = gnc.optimize();
578  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
579 
580  // check weights were actually fixed:
581  Vector finalWeights = gnc.getWeights();
582  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
583  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
584  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
585  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
586  }
587  {
588  // if we set the threshold large, they are all inliers
590  gncParams.setKnownInliers(knownInliers);
591  gncParams.setLossType(GncLossType::TLS);
592  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::VALUES);
594  gncParams);
595  gnc.setInlierCostThresholds(100.0);
596 
597  Values gnc_result = gnc.optimize();
598  CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
599 
600  // check weights were actually fixed:
601  Vector finalWeights = gnc.getWeights();
602  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
603  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
604  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
605  DOUBLES_EQUAL(1.0, finalWeights[3], tol);
606  }
607 }
608 
609 /* ************************************************************************* */
610 TEST(GncOptimizer, chi2inv) {
611  DOUBLES_EQUAL(8.807468393511950, Chi2inv(0.997, 1), tol); // from MATLAB: chi2inv(0.997, 1) = 8.807468393511950
612  DOUBLES_EQUAL(13.931422665512077, Chi2inv(0.997, 3), tol); // from MATLAB: chi2inv(0.997, 3) = 13.931422665512077
613 }
614 
615 /* ************************************************************************* */
616 TEST(GncOptimizer, barcsq) {
618 
619  Point2 p0(1, 0);
620  Values initial;
621  initial.insert(X(1), p0);
622 
623  std::vector<size_t> knownInliers;
624  knownInliers.push_back(0);
625  knownInliers.push_back(1);
626  knownInliers.push_back(2);
627 
629  gncParams.setKnownInliers(knownInliers);
630  gncParams.setLossType(GncLossType::GM);
631  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
633  gncParams);
634  // expected: chi2inv(0.99, 2)/2
635  CHECK(assert_equal(4.605170185988091 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
636 }
637 
638 /* ************************************************************************* */
639 TEST(GncOptimizer, barcsq_heterogeneousFactors) {
641  // specify noise model, otherwise it segfault if we leave default noise model
642  SharedNoiseModel model3D(noiseModel::Isotropic::Sigma(3, 0.5));
643  fg.add( PriorFactor<Pose2>( 0, Pose2(0.0, 0.0, 0.0) , model3D )); // size 3
644  SharedNoiseModel model2D(noiseModel::Isotropic::Sigma(2, 0.5));
645  fg.add( PriorFactor<Point2>( 1, Point2(0.0,0.0), model2D )); // size 2
646  SharedNoiseModel model1D(noiseModel::Isotropic::Sigma(1, 0.5));
647  fg.add( BearingFactor<Pose2, Point2>( 0, 1, 1.0, model1D) ); // size 1
648 
649  Values initial;
650  initial.insert(0, Pose2(0.0, 0.0, 0.0));
651  initial.insert(1, Point2(0.0,0.0));
652 
654  CHECK(assert_equal(Vector3(5.672433365072185, 4.605170185988091, 3.317448300510607),
655  gnc.getInlierCostThresholds(), 1e-3));
656 
657  // extra test:
658  // fg.add( PriorFactor<Pose2>( 0, Pose2(0.0, 0.0, 0.0) )); // works if we add model3D as noise model
659  // std::cout << "fg[3]->dim() " << fg[3]->dim() << std::endl; // this segfaults?
660 }
661 
662 /* ************************************************************************* */
663 TEST(GncOptimizer, setWeights) {
665 
666  Point2 p0(1, 0);
667  Values initial;
668  initial.insert(X(1), p0);
669 
670  std::vector<size_t> knownInliers;
671  knownInliers.push_back(0);
672  knownInliers.push_back(1);
673  knownInliers.push_back(2);
674  {
676  gncParams.setKnownInliers(knownInliers);
677  gncParams.setLossType(GncLossType::GM);
678  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
680  gncParams);
681  gnc.setInlierCostThresholds(2.0);
682  Values gnc_result = gnc.optimize();
683  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
684 
685  // check weights were actually fixed:
686  Vector finalWeights = gnc.getWeights();
687  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
688  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
689  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
690  CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
691  }
692  {
694  gncParams.setKnownInliers(knownInliers);
695  gncParams.setLossType(GncLossType::GM);
696  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
698  gncParams);
699  gnc.setInlierCostThresholds(2.0 * Vector::Ones(fg.size()));
700  Values gnc_result = gnc.optimize();
701  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
702 
703  // check weights were actually fixed:
704  Vector finalWeights = gnc.getWeights();
705  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
706  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
707  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
708  CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
709  }
710 }
711 
712 /* ************************************************************************* */
713 TEST(GncOptimizer, optimizeSmallPoseGraph) {
715  const string filename = findExampleDataFile("w100.graph");
717  Values::shared_ptr initial;
718  boost::tie(graph, initial) = load2D(filename);
719  // Add a Gaussian prior on first poses
720  Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
721  SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(
722  Vector3(0.01, 0.01, 0.01));
723  graph->addPrior(0, priorMean, priorNoise);
724 
726  Values expected = LevenbergMarquardtOptimizer(*graph, *initial).optimize();
727 
728  // add a few outliers
729  SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas(
730  Vector3(0.1, 0.1, 0.01));
731  graph->push_back(BetweenFactor<Pose2>(90, 50, Pose2(), betweenNoise)); // some arbitrary and incorrect between factor
732 
734  Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial)
735  .optimize();
736  // as expected, the following test fails due to the presence of an outlier!
737  // CHECK(assert_equal(expected, expectedWithOutliers, 1e-3));
738 
739  // GNC
740  // Note: in difficult instances, we set the odometry measurements to be
741  // inliers, but this problem is simple enought to succeed even without that
742  // assumption std::vector<size_t> knownInliers;
745  gncParams);
746  Values actual = gnc.optimize();
747 
748  // compare
749  CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers!
750 }
751 
752 /* ************************************************************************* */
753 int main() {
754  TestResult tr;
755  return TestRegistry::runAllTests(tr);
756 }
757 /* ************************************************************************* */
#define CHECK(condition)
Definition: Test.h:109
void setWeightsTol(double value)
Set the maximum difference between the weights and their rounding in {0,1} to stop iterating...
Definition: GncParams.h:99
virtual const Values & optimize()
static int runAllTests(TestResult &result)
Eigen::Vector3d Vector3
Definition: Vector.h:43
Wrap Jacobian and Hessian linear factors to allow simple injection into a nonlinear graph...
static double Chi2inv(const double alpha, const size_t dofs)
Definition: GncOptimizer.h:38
void insert(Key j, const Value &val)
Definition: Values.cpp:140
Matrix expected
Definition: testMatrix.cpp:974
GraphAndValues load2D(const string &filename, SharedNoiseModel model, size_t maxIndex, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType)
Definition: dataset.cpp:500
#define DOUBLES_EQUAL(expected, actual, threshold)
Definition: Test.h:142
Vector2 Point2
Definition: Point2.h:27
double mu
NonlinearFactorGraph sharedRobustFactorGraphWithOutliers()
Definition: smallExample.h:405
static const double sigma
NonlinearFactorGraph createReallyNonlinearFactorGraph()
Definition: smallExample.h:377
NonlinearFactorGraph nonlinearFactorGraphWithGivenSigma(const double sigma)
Definition: smallExample.h:353
static SharedNoiseModel model3D(noiseModel::Isotropic::Sigma(2, 0.5))
BaseOptimizerParameters baseOptimizerParams
GNC parameters.
Definition: GncParams.h:65
Values initial
MatrixXd L
Definition: LLT_example.cpp:6
Definition: Half.h:150
Vector3f p0
static SharedNoiseModel model2D(noiseModel::Isotropic::Sigma(1, 0.5))
IsDerived< DERIVEDFACTOR > add(boost::shared_ptr< DERIVEDFACTOR > factor)
add is a synonym for push_back.
Definition: FactorGraph.h:190
int main()
NonlinearFactorGraph graph
void setKnownInliers(const std::vector< size_t > &knownIn)
Definition: GncParams.h:114
#define EXPECT_DOUBLES_EQUAL(expected, actual, threshold)
Definition: Test.h:162
bool equals(const NonlinearOptimizerParams &other, double tol=1e-9) const
string findExampleDataFile(const string &name)
Definition: dataset.cpp:65
TEST(GncOptimizer, gncParamsConstructor)
boost::shared_ptr< This > shared_ptr
Eigen::VectorXd Vector
Definition: Vector.h:38
const ValueType at(Key j) const
Definition: Values-inl.h:342
void setLossType(const GncLossType type)
Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
Definition: GncParams.h:76
void setVerbosity(const std::string &src)
void setMuStep(const double step)
Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep...
Definition: GncParams.h:89
The GncOptimizer class.
Pose2 priorMean(0.0, 0.0, 0.0)
Array< double, 1, 3 > e(1./3., 0.5, 2.)
noiseModel::Diagonal::shared_ptr SharedDiagonal
Definition: NoiseModel.h:736
traits
Definition: chartTesting.h:28
void setRelativeCostTol(double value)
Set the maximum relative difference in mu values to stop iterating.
Definition: GncParams.h:94
bool assert_equal(const Matrix &expected, const Matrix &actual, double tol)
Definition: Matrix.cpp:42
Eigen::Vector2d Vector2
Definition: Vector.h:42
static double tol
LevenbergMarquardtParams lmParams
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Definition: timeSFMBAL.h:63
Create small example with two poses and one landmark.
noiseModel::Diagonal::shared_ptr priorNoise
double error(const Values &values) const
Jet< T, N > pow(const Jet< T, N > &f, double g)
Definition: jet.h:570
NonlinearFactorGraph sharedNonRobustFactorGraphWithOutliers()
Definition: smallExample.h:382
2D Pose
#define X
Definition: icosphere.cpp:20
void setInlierCostThresholds(const double inth)
Definition: GncOptimizer.h:88
utility functions for loading datasets
noiseModel::Base::shared_ptr SharedNoiseModel
Definition: NoiseModel.h:734


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:47:07