40 using namespace gtsam;
55 CHECK(lmParams.
equals(gncParams1b.baseOptimizerParams));
68 CHECK(gnParams.equals(gncParams2b.baseOptimizerParams));
73 CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams));
90 CHECK(gnc.getFactors().equals(fg));
91 CHECK(gnc.getState().equals(initial));
92 CHECK(gnc.getParams().equals(gncParams));
98 CHECK(gnc.equals(gnc2));
142 CHECK(fg.equals(gnc.getFactors()));
231 CHECK(gnc.checkMuConvergence(mu));
241 CHECK(!gnc.checkMuConvergence(mu));
260 double prev_cost = 1.0;
263 CHECK(!gnc.checkCostConvergence(cost, prev_cost));
271 double prev_cost = 1.0;
274 CHECK(gnc.checkCostConvergence(cost, prev_cost));
293 Vector weights = Vector::Ones(fg.size());
294 CHECK(!gnc.checkWeightsConvergence(weights));
303 Vector weights = Vector::Ones(fg.size());
305 CHECK(gnc.checkWeightsConvergence(weights));
314 Vector weights = Vector::Ones(fg.size());
316 CHECK(!gnc.checkWeightsConvergence(weights));
326 Vector weights = Vector::Ones(fg.size());
328 CHECK(gnc.checkWeightsConvergence(weights));
347 CHECK(gnc.checkCostConvergence(1.0, 1.0));
348 CHECK(!gnc.checkCostConvergence(1.0, 2.0));
361 Vector weights_expected = Vector::Zero(4);
362 weights_expected[0] = 1.0;
363 weights_expected[1] = 1.0;
364 weights_expected[2] = 1.0;
365 weights_expected[3] =
std::pow(1.0 / (50.0 + 1.0), 2);
373 Vector weights_actual = gnc.calculateWeights(initial, mu);
378 weights_expected[3] =
std::pow(mu * barcSq / (50.0 + mu * barcSq), 2);
383 weights_actual = gnc2.calculateWeights(initial, mu);
396 Vector weights_expected = Vector::Zero(4);
397 weights_expected[0] = 1.0;
398 weights_expected[1] = 1.0;
399 weights_expected[2] = 1.0;
400 weights_expected[3] = 0;
407 Vector weights_actual = gnc.calculateWeights(initial, mu);
432 Vector weights_expected = Vector::Zero(1);
433 weights_expected[0] = 1.0;
443 Vector weights_actual = gnc.calculateWeights(initial, mu);
449 Vector weights_expected = Vector::Zero(1);
450 weights_expected[0] = 0.0;
459 Vector weights_actual = gnc.calculateWeights(initial, mu);
465 Vector weights_expected = Vector::Zero(1);
466 weights_expected[0] = 0.5;
475 Vector weights_actual = gnc.calculateWeights(initial, mu);
493 Vector weights = Vector::Ones(1);
524 Values actual = gnc.optimize();
558 Values gnc_result = gnc.optimize();
571 knownInliers.push_back(0);
572 knownInliers.push_back(1);
573 knownInliers.push_back(2);
584 Values gnc_result = gnc.optimize();
588 Vector finalWeights = gnc.getWeights();
601 Values gnc_result = gnc.optimize();
605 Vector finalWeights = gnc.getWeights();
621 Values gnc_result = gnc.optimize();
625 Vector finalWeights = gnc.getWeights();
648 knownInliers.push_back(0);
649 knownInliers.push_back(1);
650 knownInliers.push_back(2);
659 CHECK(
assert_equal(4.605170185988091 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1
e-3));
679 gnc.getInlierCostThresholds(), 1
e-3));
695 knownInliers.push_back(0);
696 knownInliers.push_back(1);
697 knownInliers.push_back(2);
706 Values gnc_result = gnc.optimize();
710 Vector finalWeights = gnc.getWeights();
714 CHECK(
assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1
e-3));
724 Values gnc_result = gnc.optimize();
728 Vector finalWeights = gnc.getWeights();
732 CHECK(
assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1
e-3));
768 Values actual = gnc.optimize();
786 knownInliers.push_back(0);
787 knownInliers.push_back(1);
788 knownInliers.push_back(2);
791 knownOutliers.push_back(3);
801 Values gnc_result = gnc.optimize();
805 Vector finalWeights = gnc.getWeights();
815 knownInliers.push_back(2);
816 knownInliers.push_back(0);
819 knownOutliers.push_back(3);
829 Values gnc_result = gnc.optimize();
833 Vector finalWeights = gnc.getWeights();
843 knownOutliers.push_back(3);
852 Values gnc_result = gnc.optimize();
856 Vector finalWeights = gnc.getWeights();
876 Vector weights = 0.5 * Vector::Ones(fg.size());
880 gnc.setInlierCostThresholds(1.0);
881 Values gnc_result = gnc.optimize();
885 Vector finalWeights = gnc.getWeights();
896 Vector weights = Vector::Zero(fg.size());
903 gnc.setInlierCostThresholds(1.0);
904 Values gnc_result = gnc.optimize();
908 Vector finalWeights = gnc.getWeights();
918 knownInliers.push_back(2);
919 knownInliers.push_back(0);
922 knownOutliers.push_back(3);
928 Vector weights = 0.5 * Vector::Ones(fg.size());
932 gnc.setInlierCostThresholds(1.0);
933 Values gnc_result = gnc.optimize();
937 Vector finalWeights = gnc.getWeights();
void setWeightsTol(double value)
Set the maximum difference between the weights and their rounding in {0,1} to stop iterating...
void setMaxIterations(int value)
void setWeights(const Vector w)
virtual const Values & optimize()
static int runAllTests(TestResult &result)
Wrap Jacobian and Hessian linear factors to allow simple injection into a nonlinear graph...
static double Chi2inv(const double alpha, const size_t dofs)
const ValueType at(Key j) const
#define DOUBLES_EQUAL(expected, actual, threshold)
IsDerived< DERIVEDFACTOR > push_back(std::shared_ptr< DERIVEDFACTOR > factor)
Add a factor directly using a shared_ptr.
NonlinearFactorGraph sharedRobustFactorGraphWithOutliers()
NonlinearFactorGraph createReallyNonlinearFactorGraph()
bool assert_equal(const Matrix &expected, const Matrix &actual, double tol)
NonlinearFactorGraph nonlinearFactorGraphWithGivenSigma(const double sigma)
BaseOptimizerParameters baseOptimizerParams
GNC parameters.
NonlinearFactorGraph graph
FastVector< uint64_t > IndexVector
Slots in the factor graph corresponding to measurements that we know are inliers. ...
#define EXPECT_DOUBLES_EQUAL(expected, actual, threshold)
IsDerived< DERIVEDFACTOR > add(std::shared_ptr< DERIVEDFACTOR > factor)
add is a synonym for push_back.
void addPrior(Key key, const T &prior, const SharedNoiseModel &model=nullptr)
TEST(GncOptimizer, gncParamsConstructor)
void setKnownInliers(const IndexVector &knownIn)
void setLossType(const GncLossType type)
Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
void setVerbosity(const std::string &src)
void setMuStep(const double step)
Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep...
GraphAndValues load2D(const std::string &filename, SharedNoiseModel model, size_t maxIndex, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType)
LevenbergMarquardtParams lmParams
Pose2 priorMean(0.0, 0.0, 0.0)
Array< double, 1, 3 > e(1./3., 0.5, 2.)
void setKnownOutliers(const IndexVector &knownOut)
double error(const Values &values) const
noiseModel::Diagonal::shared_ptr SharedDiagonal
void setRelativeCostTol(double value)
Set the maximum relative difference in mu values to stop iterating.
GTSAM_EXPORT std::string findExampleDataFile(const std::string &name)
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Create small example with two poses and one landmark.
static const double sigma
void insert(Key j, const Value &val)
bool equals(const NonlinearOptimizerParams &other, double tol=1e-9) const
Jet< T, N > pow(const Jet< T, N > &f, double g)
NonlinearFactorGraph sharedNonRobustFactorGraphWithOutliers()
void setInlierCostThresholds(const double inth)
utility functions for loading datasets
noiseModel::Base::shared_ptr SharedNoiseModel
size_t maxIterations
The maximum iterations to stop iterating (default 100)