testGncOptimizer.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
33 #include <gtsam/slam/dataset.h>
34 #include <tests/smallExample.h>
35 
37 #include <gtsam/geometry/Pose2.h>
38 
39 using namespace std;
40 using namespace gtsam;
41 
44 static double tol = 1e-7;
45 
46 /* ************************************************************************* */
47 TEST(GncOptimizer, gncParamsConstructor) {
48  // check params are correctly parsed
50  GncParams<LevenbergMarquardtParams> gncParams1(lmParams);
51  CHECK(lmParams.equals(gncParams1.baseOptimizerParams));
52 
53  // check also default constructor
55  CHECK(lmParams.equals(gncParams1b.baseOptimizerParams));
56 
57  // and check params become different if we change lmParams
58  lmParams.setVerbosity("DELTA");
59  CHECK(!lmParams.equals(gncParams1.baseOptimizerParams));
60 
61  // and same for GN
62  GaussNewtonParams gnParams;
63  GncParams<GaussNewtonParams> gncParams2(gnParams);
64  CHECK(gnParams.equals(gncParams2.baseOptimizerParams));
65 
66  // check default constructor
67  GncParams<GaussNewtonParams> gncParams2b;
68  CHECK(gnParams.equals(gncParams2b.baseOptimizerParams));
69 
70  // change something at the gncParams level
71  GncParams<GaussNewtonParams> gncParams2c(gncParams2b);
72  gncParams2c.setLossType(GncLossType::GM);
73  CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams));
74 }
75 
76 /* ************************************************************************* */
77 TEST(GncOptimizer, gncConstructor) {
78  // has to have Gaussian noise models !
79  auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor
80  // on a 2D point
81 
82  Point2 p0(3, 3);
84  initial.insert(X(1), p0);
85 
88  gncParams);
89 
90  CHECK(gnc.getFactors().equals(fg));
91  CHECK(gnc.getState().equals(initial));
92  CHECK(gnc.getParams().equals(gncParams));
93 
95  gncParams);
96 
97  // check the equal works
98  CHECK(gnc.equals(gnc2));
99 }
100 
101 /* ************************************************************************* */
102 TEST(GncOptimizer, solverParameterParsing) {
103  // has to have Gaussian noise models !
104  auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor
105  // on a 2D point
106 
107  Point2 p0(3, 3);
108  Values initial;
109  initial.insert(X(1), p0);
110 
112  lmParams.setMaxIterations(0); // forces not to perform optimization
113  GncParams<LevenbergMarquardtParams> gncParams(lmParams);
115  gncParams);
116  Values result = gnc.optimize();
117 
118  // check that LM did not perform optimization and result is the same as the initial guess
119  DOUBLES_EQUAL(fg.error(initial), fg.error(result), tol);
120 
121  // also check the params:
123 }
124 
125 /* ************************************************************************* */
126 TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) {
128  // same graph with robust noise model
130 
131  Point2 p0(3, 3);
132  Values initial;
133  initial.insert(X(1), p0);
134 
137  initial,
138  gncParams);
139 
140  // make sure that when parsing the graph is transformed into one without
141  // robust loss
142  CHECK(fg.equals(gnc.getFactors()));
143 }
144 
145 /* ************************************************************************* */
146 TEST(GncOptimizer, initializeMu) {
148 
149  Point2 p0(3, 3);
150  Values initial;
151  initial.insert(X(1), p0);
152 
153  // testing GM mu initialization
155  gncParams.setLossType(GncLossType::GM);
157  gncParams);
158  gnc_gm.setInlierCostThresholds(1.0);
159  // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq
160  // (barcSq=1 in this example)
161  EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3);
162 
163  // testing TLS mu initialization
164  gncParams.setLossType(GncLossType::TLS);
166  gncParams);
167  gnc_tls.setInlierCostThresholds(1.0);
168  // according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq)
169  // (barcSq=1 in this example)
170  EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3);
171 }
172 
173 /* ************************************************************************* */
174 TEST(GncOptimizer, updateMuGM) {
175  // has to have Gaussian noise models !
177 
178  Point2 p0(3, 3);
179  Values initial;
180  initial.insert(X(1), p0);
181 
183  gncParams.setLossType(GncLossType::GM);
184  gncParams.setMuStep(1.4);
186  gncParams);
187 
188  double mu = 5.0;
189  EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol);
190 
191  // check it correctly saturates to 1 for GM
192  mu = 1.2;
193  EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), 1.0, tol);
194 }
195 
196 /* ************************************************************************* */
197 TEST(GncOptimizer, updateMuTLS) {
198  // has to have Gaussian noise models !
200 
201  Point2 p0(3, 3);
202  Values initial;
203  initial.insert(X(1), p0);
204 
206  gncParams.setMuStep(1.4);
207  gncParams.setLossType(GncLossType::TLS);
209  gncParams);
210 
211  double mu = 5.0;
212  EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu * 1.4, tol);
213 }
214 
215 /* ************************************************************************* */
216 TEST(GncOptimizer, checkMuConvergence) {
217  // has to have Gaussian noise models !
219 
220  Point2 p0(3, 3);
221  Values initial;
222  initial.insert(X(1), p0);
223 
224  {
226  gncParams.setLossType(GncLossType::GM);
228  gncParams);
229 
230  double mu = 1.0;
231  CHECK(gnc.checkMuConvergence(mu));
232  }
233  {
235  gncParams.setLossType(
238  gncParams);
239 
240  double mu = 1.0;
241  CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS
242  }
243 }
244 
245 /* ************************************************************************* */
246 TEST(GncOptimizer, checkCostConvergence) {
247  // has to have Gaussian noise models !
249 
250  Point2 p0(3, 3);
251  Values initial;
252  initial.insert(X(1), p0);
253 
254  {
256  gncParams.setRelativeCostTol(0.49);
258  gncParams);
259 
260  double prev_cost = 1.0;
261  double cost = 0.5;
262  // relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false
263  CHECK(!gnc.checkCostConvergence(cost, prev_cost));
264  }
265  {
267  gncParams.setRelativeCostTol(0.51);
269  gncParams);
270 
271  double prev_cost = 1.0;
272  double cost = 0.5;
273  // relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true
274  CHECK(gnc.checkCostConvergence(cost, prev_cost));
275  }
276 }
277 
278 /* ************************************************************************* */
279 TEST(GncOptimizer, checkWeightsConvergence) {
280  // has to have Gaussian noise models !
282 
283  Point2 p0(3, 3);
284  Values initial;
285  initial.insert(X(1), p0);
286 
287  {
289  gncParams.setLossType(GncLossType::GM);
291  gncParams);
292 
293  Vector weights = Vector::Ones(fg.size());
294  CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM
295  }
296  {
298  gncParams.setLossType(
301  gncParams);
302 
303  Vector weights = Vector::Ones(fg.size());
304  // weights are binary, so checkWeightsConvergence = true
305  CHECK(gnc.checkWeightsConvergence(weights));
306  }
307  {
309  gncParams.setLossType(
312  gncParams);
313 
314  Vector weights = Vector::Ones(fg.size());
315  weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false
316  CHECK(!gnc.checkWeightsConvergence(weights));
317  }
318  {
320  gncParams.setLossType(
322  gncParams.setWeightsTol(0.1);
324  gncParams);
325 
326  Vector weights = Vector::Ones(fg.size());
327  weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true
328  CHECK(gnc.checkWeightsConvergence(weights));
329  }
330 }
331 
332 /* ************************************************************************* */
333 TEST(GncOptimizer, checkConvergenceTLS) {
334  // has to have Gaussian noise models !
336 
337  Point2 p0(3, 3);
338  Values initial;
339  initial.insert(X(1), p0);
340 
342  gncParams.setRelativeCostTol(1e-5);
343  gncParams.setLossType(GncLossType::TLS);
345  gncParams);
346 
347  CHECK(gnc.checkCostConvergence(1.0, 1.0));
348  CHECK(!gnc.checkCostConvergence(1.0, 2.0));
349 }
350 
351 /* ************************************************************************* */
352 TEST(GncOptimizer, calculateWeightsGM) {
354 
355  Point2 p0(0, 0);
356  Values initial;
357  initial.insert(X(1), p0);
358 
359  // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 *
360  // 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
361  Vector weights_expected = Vector::Zero(4);
362  weights_expected[0] = 1.0; // zero error
363  weights_expected[1] = 1.0; // zero error
364  weights_expected[2] = 1.0; // zero error
365  weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50
366 
367  GaussNewtonParams gnParams;
368  GncParams<GaussNewtonParams> gncParams(gnParams);
369  gncParams.setLossType(GncLossType::GM);
370  auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
371  gnc.setInlierCostThresholds(1.0);
372  double mu = 1.0;
373  Vector weights_actual = gnc.calculateWeights(initial, mu);
374  CHECK(assert_equal(weights_expected, weights_actual, tol));
375 
376  mu = 2.0;
377  double barcSq = 5.0;
378  weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
379 
381  gncParams);
382  gnc2.setInlierCostThresholds(barcSq);
383  weights_actual = gnc2.calculateWeights(initial, mu);
384  CHECK(assert_equal(weights_expected, weights_actual, tol));
385 }
386 
387 /* ************************************************************************* */
388 TEST(GncOptimizer, calculateWeightsTLS) {
390 
391  Point2 p0(0, 0);
392  Values initial;
393  initial.insert(X(1), p0);
394 
395  // we have 4 factors, 3 with zero errors (inliers), 1 with error
396  Vector weights_expected = Vector::Zero(4);
397  weights_expected[0] = 1.0; // zero error
398  weights_expected[1] = 1.0; // zero error
399  weights_expected[2] = 1.0; // zero error
400  weights_expected[3] = 0; // outliers
401 
402  GaussNewtonParams gnParams;
403  GncParams<GaussNewtonParams> gncParams(gnParams);
404  gncParams.setLossType(GncLossType::TLS);
405  auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
406  double mu = 1.0;
407  Vector weights_actual = gnc.calculateWeights(initial, mu);
408  CHECK(assert_equal(weights_expected, weights_actual, tol));
409 }
410 
411 /* ************************************************************************* */
412 TEST(GncOptimizer, calculateWeightsTLS2) {
413 
414  // create values
415  Point2 x_val(0.0, 0.0);
416  Point2 x_prior(1.0, 0.0);
417  Values initial;
418  initial.insert(X(1), x_val);
419 
420  // create very simple factor graph with a single factor 0.5 * 1/sigma^2 * || x - [1;0] ||^2
421  double sigma = 1;
422  SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma));
424  nfg.add(PriorFactor<Point2>(X(1), x_prior, noise));
425 
426  // cost of the factor:
427  DOUBLES_EQUAL(0.5 * 1 / (sigma * sigma), nfg.error(initial), tol);
428 
429  // check the TLS weights are correct: CASE 1: residual below barcsq
430  {
431  // expected:
432  Vector weights_expected = Vector::Zero(1);
433  weights_expected[0] = 1.0; // inlier
434  // actual:
435  GaussNewtonParams gnParams;
436  GncParams<GaussNewtonParams> gncParams(gnParams);
437  gncParams.setLossType(GncLossType::TLS);
439  gncParams);
440  gnc.setInlierCostThresholds(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier
441 
442  double mu = 1e6;
443  Vector weights_actual = gnc.calculateWeights(initial, mu);
444  CHECK(assert_equal(weights_expected, weights_actual, tol));
445  }
446  // check the TLS weights are correct: CASE 2: residual above barcsq
447  {
448  // expected:
449  Vector weights_expected = Vector::Zero(1);
450  weights_expected[0] = 0.0; // outlier
451  // actual:
452  GaussNewtonParams gnParams;
453  GncParams<GaussNewtonParams> gncParams(gnParams);
454  gncParams.setLossType(GncLossType::TLS);
456  gncParams);
457  gnc.setInlierCostThresholds(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier
458  double mu = 1e6; // very large mu recovers original TLS cost
459  Vector weights_actual = gnc.calculateWeights(initial, mu);
460  CHECK(assert_equal(weights_expected, weights_actual, tol));
461  }
462  // check the TLS weights are correct: CASE 2: residual at barcsq
463  {
464  // expected:
465  Vector weights_expected = Vector::Zero(1);
466  weights_expected[0] = 0.5; // undecided
467  // actual:
468  GaussNewtonParams gnParams;
469  GncParams<GaussNewtonParams> gncParams(gnParams);
470  gncParams.setLossType(GncLossType::TLS);
472  gncParams);
473  gnc.setInlierCostThresholds(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier
474  double mu = 1e6; // very large mu recovers original TLS cost
475  Vector weights_actual = gnc.calculateWeights(initial, mu);
476  CHECK(assert_equal(weights_expected, weights_actual, 1e-5));
477  }
478 }
479 
480 /* ************************************************************************* */
481 TEST(GncOptimizer, makeWeightedGraph) {
482  // create original factor
483  double sigma1 = 0.1;
485  sigma1);
486 
487  // create expected
488  double sigma2 = 10;
490  sigma2);
491 
492  // create weights
493  Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
494  weights[0] = 1e-4;
495 
496  // create actual
497  Point2 p0(3, 3);
498  Values initial;
499  initial.insert(X(1), p0);
500 
502  GncParams<LevenbergMarquardtParams> gncParams(lmParams);
504  gncParams);
505  NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights);
506 
507  // check it's all good
508  CHECK(assert_equal(expected, actual));
509 }
510 
511 /* ************************************************************************* */
512 TEST(GncOptimizer, optimizeSimple) {
514 
515  Point2 p0(3, 3);
516  Values initial;
517  initial.insert(X(1), p0);
518 
520  GncParams<LevenbergMarquardtParams> gncParams(lmParams);
522  gncParams);
523 
524  Values actual = gnc.optimize();
525  DOUBLES_EQUAL(0, fg.error(actual), tol);
526 }
527 
528 /* ************************************************************************* */
531 
532  Point2 p0(1, 0);
533  Values initial;
534  initial.insert(X(1), p0);
535 
536  // try with nonrobust cost function and standard GN
537  GaussNewtonParams gnParams;
538  GaussNewtonOptimizer gn(fg, initial, gnParams);
539  Values gn_results = gn.optimize();
540  // converges to incorrect point due to lack of robustness to an outlier, ideal
541  // solution is Point2(0,0)
542  CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at<Point2>(X(1)), 1e-3));
543 
544  // try with robust loss function and standard GN
545  auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with
546  // factors wrapped in
547  // Geman McClure losses
548  GaussNewtonOptimizer gn2(fg_robust, initial, gnParams);
549  Values gn2_results = gn2.optimize();
550  // converges to incorrect point, this time due to the nonconvexity of the loss
551  CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at<Point2>(X(1)), 1e-3));
552 
553  // .. but graduated nonconvexity ensures both robustness and convergence in
554  // the face of nonconvexity
555  GncParams<GaussNewtonParams> gncParams(gnParams);
556  // gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
557  auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
558  Values gnc_result = gnc.optimize();
559  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
560 }
561 
562 /* ************************************************************************* */
563 TEST(GncOptimizer, optimizeWithKnownInliers) {
565 
566  Point2 p0(1, 0);
567  Values initial;
568  initial.insert(X(1), p0);
569 
571  knownInliers.push_back(0);
572  knownInliers.push_back(1);
573  knownInliers.push_back(2);
574 
575  // nonconvexity with known inliers
576  {
578  gncParams.setKnownInliers(knownInliers);
579  gncParams.setLossType(GncLossType::GM);
580  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
582  gncParams);
583  gnc.setInlierCostThresholds(1.0);
584  Values gnc_result = gnc.optimize();
585  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
586 
587  // check weights were actually fixed:
588  Vector finalWeights = gnc.getWeights();
589  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
590  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
591  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
592  }
593  {
595  gncParams.setKnownInliers(knownInliers);
596  gncParams.setLossType(GncLossType::TLS);
597  // gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
599  gncParams);
600 
601  Values gnc_result = gnc.optimize();
602  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
603 
604  // check weights were actually fixed:
605  Vector finalWeights = gnc.getWeights();
606  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
607  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
608  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
609  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
610  }
611  {
612  // if we set the threshold large, they are all inliers
614  gncParams.setKnownInliers(knownInliers);
615  gncParams.setLossType(GncLossType::TLS);
616  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::VALUES);
618  gncParams);
619  gnc.setInlierCostThresholds(100.0);
620 
621  Values gnc_result = gnc.optimize();
622  CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
623 
624  // check weights were actually fixed:
625  Vector finalWeights = gnc.getWeights();
626  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
627  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
628  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
629  DOUBLES_EQUAL(1.0, finalWeights[3], tol);
630  }
631 }
632 
633 /* ************************************************************************* */
634 TEST(GncOptimizer, chi2inv) {
635  DOUBLES_EQUAL(8.807468393511950, Chi2inv(0.997, 1), tol); // from MATLAB: chi2inv(0.997, 1) = 8.807468393511950
636  DOUBLES_EQUAL(13.931422665512077, Chi2inv(0.997, 3), tol); // from MATLAB: chi2inv(0.997, 3) = 13.931422665512077
637 }
638 
639 /* ************************************************************************* */
640 TEST(GncOptimizer, barcsq) {
642 
643  Point2 p0(1, 0);
644  Values initial;
645  initial.insert(X(1), p0);
646 
648  knownInliers.push_back(0);
649  knownInliers.push_back(1);
650  knownInliers.push_back(2);
651 
653  gncParams.setKnownInliers(knownInliers);
654  gncParams.setLossType(GncLossType::GM);
655  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
657  gncParams);
658  // expected: chi2inv(0.99, 2)/2
659  CHECK(assert_equal(4.605170185988091 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
660 }
661 
662 /* ************************************************************************* */
663 TEST(GncOptimizer, barcsq_heterogeneousFactors) {
665  // specify noise model, otherwise it segfault if we leave default noise model
666  SharedNoiseModel model3D(noiseModel::Isotropic::Sigma(3, 0.5));
667  fg.add( PriorFactor<Pose2>( 0, Pose2(0.0, 0.0, 0.0) , model3D )); // size 3
668  SharedNoiseModel model2D(noiseModel::Isotropic::Sigma(2, 0.5));
669  fg.add( PriorFactor<Point2>( 1, Point2(0.0,0.0), model2D )); // size 2
670  SharedNoiseModel model1D(noiseModel::Isotropic::Sigma(1, 0.5));
671  fg.add( BearingFactor<Pose2, Point2>( 0, 1, 1.0, model1D) ); // size 1
672 
673  Values initial;
674  initial.insert(0, Pose2(0.0, 0.0, 0.0));
675  initial.insert(1, Point2(0.0,0.0));
676 
678  CHECK(assert_equal(Vector3(5.672433365072185, 4.605170185988091, 3.317448300510607),
679  gnc.getInlierCostThresholds(), 1e-3));
680 
681  // extra test:
682  // fg.add( PriorFactor<Pose2>( 0, Pose2(0.0, 0.0, 0.0) )); // works if we add model3D as noise model
683  // std::cout << "fg[3]->dim() " << fg[3]->dim() << std::endl; // this segfaults?
684 }
685 
686 /* ************************************************************************* */
687 TEST(GncOptimizer, setInlierCostThresholds) {
689 
690  Point2 p0(1, 0);
691  Values initial;
692  initial.insert(X(1), p0);
693 
695  knownInliers.push_back(0);
696  knownInliers.push_back(1);
697  knownInliers.push_back(2);
698  {
700  gncParams.setKnownInliers(knownInliers);
701  gncParams.setLossType(GncLossType::GM);
702  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
704  gncParams);
705  gnc.setInlierCostThresholds(2.0);
706  Values gnc_result = gnc.optimize();
707  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
708 
709  // check weights were actually fixed:
710  Vector finalWeights = gnc.getWeights();
711  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
712  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
713  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
714  CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
715  }
716  {
718  gncParams.setKnownInliers(knownInliers);
719  gncParams.setLossType(GncLossType::GM);
720  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
722  gncParams);
723  gnc.setInlierCostThresholds(2.0 * Vector::Ones(fg.size()));
724  Values gnc_result = gnc.optimize();
725  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
726 
727  // check weights were actually fixed:
728  Vector finalWeights = gnc.getWeights();
729  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
730  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
731  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
732  CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
733  }
734 }
735 
736 /* ************************************************************************* */
737 TEST(GncOptimizer, optimizeSmallPoseGraph) {
739  const string filename = findExampleDataFile("w100.graph");
740  const auto [graph, initial] = load2D(filename);
741  // Add a Gaussian prior on first poses
742  Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
743  SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(
744  Vector3(0.01, 0.01, 0.01));
745  graph->addPrior(0, priorMean, priorNoise);
746 
749 
750  // add a few outliers
751  SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas(
752  Vector3(0.1, 0.1, 0.01));
753  graph->push_back(BetweenFactor<Pose2>(90, 50, Pose2(), betweenNoise)); // some arbitrary and incorrect between factor
754 
756  Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial)
757  .optimize();
758  // as expected, the following test fails due to the presence of an outlier!
759  // CHECK(assert_equal(expected, expectedWithOutliers, 1e-3));
760 
761  // GNC
762  // Note: in difficult instances, we set the odometry measurements to be
763  // inliers, but this problem is simple enought to succeed even without that
764  // assumption GncParams<GaussNewtonParams>::IndexVector knownInliers;
767  gncParams);
768  Values actual = gnc.optimize();
769 
770  // compare
771  CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers!
772 }
773 
774 /* ************************************************************************* */
775 TEST(GncOptimizer, knownInliersAndOutliers) {
777 
778  Point2 p0(1, 0);
779  Values initial;
780  initial.insert(X(1), p0);
781 
782  // nonconvexity with known inliers and known outliers (check early stopping
783  // when all measurements are known to be inliers or outliers)
784  {
786  knownInliers.push_back(0);
787  knownInliers.push_back(1);
788  knownInliers.push_back(2);
789 
791  knownOutliers.push_back(3);
792 
794  gncParams.setKnownInliers(knownInliers);
795  gncParams.setKnownOutliers(knownOutliers);
796  gncParams.setLossType(GncLossType::GM);
797  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
799  gncParams);
800  gnc.setInlierCostThresholds(1.0);
801  Values gnc_result = gnc.optimize();
802  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
803 
804  // check weights were actually fixed:
805  Vector finalWeights = gnc.getWeights();
806  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
807  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
808  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
809  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
810  }
811 
812  // nonconvexity with known inliers and known outliers
813  {
815  knownInliers.push_back(2);
816  knownInliers.push_back(0);
817 
819  knownOutliers.push_back(3);
820 
822  gncParams.setKnownInliers(knownInliers);
823  gncParams.setKnownOutliers(knownOutliers);
824  gncParams.setLossType(GncLossType::GM);
825  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
827  gncParams);
828  gnc.setInlierCostThresholds(1.0);
829  Values gnc_result = gnc.optimize();
830  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
831 
832  // check weights were actually fixed:
833  Vector finalWeights = gnc.getWeights();
834  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
835  DOUBLES_EQUAL(1.0, finalWeights[1], 1e-5);
836  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
837  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
838  }
839 
840  // only known outliers
841  {
843  knownOutliers.push_back(3);
844 
846  gncParams.setKnownOutliers(knownOutliers);
847  gncParams.setLossType(GncLossType::GM);
848  //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
850  gncParams);
851  gnc.setInlierCostThresholds(1.0);
852  Values gnc_result = gnc.optimize();
853  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
854 
855  // check weights were actually fixed:
856  Vector finalWeights = gnc.getWeights();
857  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
858  DOUBLES_EQUAL(1.0, finalWeights[1], 1e-5);
859  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
860  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
861  }
862 }
863 
864 /* ************************************************************************* */
865 TEST(GncOptimizer, setWeights) {
867 
868  Point2 p0(1, 0);
869  Values initial;
870  initial.insert(X(1), p0);
871  // initialize weights to be the same
872  {
874  gncParams.setLossType(GncLossType::TLS);
875 
876  Vector weights = 0.5 * Vector::Ones(fg.size());
878  gncParams);
879  gnc.setWeights(weights);
880  gnc.setInlierCostThresholds(1.0);
881  Values gnc_result = gnc.optimize();
882  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
883 
884  // check weights were actually fixed:
885  Vector finalWeights = gnc.getWeights();
886  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
887  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
888  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
889  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
890  }
891  // try a more challenging initialization
892  {
894  gncParams.setLossType(GncLossType::TLS);
895 
896  Vector weights = Vector::Zero(fg.size());
897  weights(2) = 1.0;
898  weights(3) = 1.0; // bad initialization: we say the outlier is inlier
899  // GNC can still recover (but if you omit weights(2) = 1.0, then it would fail)
901  gncParams);
902  gnc.setWeights(weights);
903  gnc.setInlierCostThresholds(1.0);
904  Values gnc_result = gnc.optimize();
905  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
906 
907  // check weights were actually fixed:
908  Vector finalWeights = gnc.getWeights();
909  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
910  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
911  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
912  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
913  }
914  // initialize weights and also set known inliers/outliers
915  {
918  knownInliers.push_back(2);
919  knownInliers.push_back(0);
920 
922  knownOutliers.push_back(3);
923  gncParams.setKnownInliers(knownInliers);
924  gncParams.setKnownOutliers(knownOutliers);
925 
926  gncParams.setLossType(GncLossType::TLS);
927 
928  Vector weights = 0.5 * Vector::Ones(fg.size());
930  gncParams);
931  gnc.setWeights(weights);
932  gnc.setInlierCostThresholds(1.0);
933  Values gnc_result = gnc.optimize();
934  CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
935 
936  // check weights were actually fixed:
937  Vector finalWeights = gnc.getWeights();
938  DOUBLES_EQUAL(1.0, finalWeights[0], tol);
939  DOUBLES_EQUAL(1.0, finalWeights[1], tol);
940  DOUBLES_EQUAL(1.0, finalWeights[2], tol);
941  DOUBLES_EQUAL(0.0, finalWeights[3], tol);
942  }
943 }
944 
945 /* ************************************************************************* */
946 int main() {
947  TestResult tr;
948  return TestRegistry::runAllTests(tr);
949 }
950 /* ************************************************************************* */
#define CHECK(condition)
Definition: Test.h:108
void setWeightsTol(double value)
Set the maximum difference between the weights and their rounding in {0,1} to stop iterating...
Definition: GncParams.h:107
void setWeights(const Vector w)
Definition: GncOptimizer.h:144
virtual const Values & optimize()
static int runAllTests(TestResult &result)
Eigen::Vector3d Vector3
Definition: Vector.h:43
Wrap Jacobian and Hessian linear factors to allow simple injection into a nonlinear graph...
static double Chi2inv(const double alpha, const size_t dofs)
Definition: GncOptimizer.h:38
const ValueType at(Key j) const
Definition: Values-inl.h:204
Matrix expected
Definition: testMatrix.cpp:971
#define DOUBLES_EQUAL(expected, actual, threshold)
Definition: Test.h:141
Vector2 Point2
Definition: Point2.h:32
double mu
IsDerived< DERIVEDFACTOR > push_back(std::shared_ptr< DERIVEDFACTOR > factor)
Add a factor directly using a shared_ptr.
Definition: FactorGraph.h:190
NonlinearFactorGraph sharedRobustFactorGraphWithOutliers()
Definition: smallExample.h:406
NonlinearFactorGraph createReallyNonlinearFactorGraph()
Definition: smallExample.h:378
bool assert_equal(const Matrix &expected, const Matrix &actual, double tol)
Definition: Matrix.cpp:40
NonlinearFactorGraph nonlinearFactorGraphWithGivenSigma(const double sigma)
Definition: smallExample.h:354
BaseOptimizerParameters baseOptimizerParams
GNC parameters.
Definition: GncParams.h:67
Values initial
MatrixXd L
Definition: LLT_example.cpp:6
Definition: BFloat16.h:88
Vector3f p0
int main()
NonlinearFactorGraph graph
FastVector< uint64_t > IndexVector
Slots in the factor graph corresponding to measurements that we know are inliers. ...
Definition: GncParams.h:78
#define EXPECT_DOUBLES_EQUAL(expected, actual, threshold)
Definition: Test.h:161
IsDerived< DERIVEDFACTOR > add(std::shared_ptr< DERIVEDFACTOR > factor)
add is a synonym for push_back.
Definition: FactorGraph.h:214
void addPrior(Key key, const T &prior, const SharedNoiseModel &model=nullptr)
TEST(GncOptimizer, gncParamsConstructor)
void setKnownInliers(const IndexVector &knownIn)
Definition: GncParams.h:122
Eigen::VectorXd Vector
Definition: Vector.h:38
Values result
void setLossType(const GncLossType type)
Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
Definition: GncParams.h:84
void setVerbosity(const std::string &src)
void setMuStep(const double step)
Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep...
Definition: GncParams.h:97
The GncOptimizer class.
GraphAndValues load2D(const std::string &filename, SharedNoiseModel model, size_t maxIndex, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType)
Definition: dataset.cpp:505
LevenbergMarquardtParams lmParams
Pose2 priorMean(0.0, 0.0, 0.0)
Array< double, 1, 3 > e(1./3., 0.5, 2.)
void setKnownOutliers(const IndexVector &knownOut)
Definition: GncParams.h:133
double error(const Values &values) const
noiseModel::Diagonal::shared_ptr SharedDiagonal
Definition: NoiseModel.h:743
traits
Definition: chartTesting.h:28
void setRelativeCostTol(double value)
Set the maximum relative difference in mu values to stop iterating.
Definition: GncParams.h:102
auto priorNoise
GTSAM_EXPORT std::string findExampleDataFile(const std::string &name)
Definition: dataset.cpp:70
Eigen::Vector2d Vector2
Definition: Vector.h:42
static double tol
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Definition: timeSFMBAL.h:64
Create small example with two poses and one landmark.
static const double sigma
void insert(Key j, const Value &val)
Definition: Values.cpp:155
bool equals(const NonlinearOptimizerParams &other, double tol=1e-9) const
Jet< T, N > pow(const Jet< T, N > &f, double g)
Definition: jet.h:570
NonlinearFactorGraph sharedNonRobustFactorGraphWithOutliers()
Definition: smallExample.h:383
2D Pose
#define X
Definition: icosphere.cpp:20
void setInlierCostThresholds(const double inth)
Definition: GncOptimizer.h:117
utility functions for loading datasets
noiseModel::Base::shared_ptr SharedNoiseModel
Definition: NoiseModel.h:741
size_t maxIterations
The maximum iterations to stop iterating (default 100)


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:38:18