GncOptimizer.h
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
27 #pragma once
28 
32 
33 namespace gtsam {
34 /*
35  * Quantile of chi-squared distribution with given degrees of freedom at probability alpha.
36  * Equivalent to chi2inv in Matlab.
37  */
38 static double Chi2inv(const double alpha, const size_t dofs) {
40 }
41 
42 /* ************************************************************************* */
43 template<class GncParameters>
44 class GncOptimizer {
45  public:
47  typedef typename GncParameters::OptimizerType BaseOptimizer;
48 
49  private:
52  GncParameters params_;
55 
56  public:
58  GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
59  const GncParameters& params = GncParameters())
60  : state_(initialValues),
61  params_(params) {
62 
63  // make sure all noiseModels are Gaussian or convert to Gaussian
64  nfg_.resize(graph.size());
65  for (size_t i = 0; i < graph.size(); i++) {
66  if (graph[i]) {
68  auto robust =
69  std::dynamic_pointer_cast<noiseModel::Robust>(factor->noiseModel());
70  // if the factor has a robust loss, we remove the robust loss
71  nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
72  }
73  }
74 
75  // check that known inliers and outliers make sense:
76  std::vector<size_t> inconsistentlySpecifiedWeights; // measurements the user has incorrectly specified
77  // to be BOTH known inliers and known outliers
78  std::set_intersection(params.knownInliers.begin(),params.knownInliers.end(),
79  params.knownOutliers.begin(),params.knownOutliers.end(),
80  std::inserter(inconsistentlySpecifiedWeights, inconsistentlySpecifiedWeights.begin()));
81  if(inconsistentlySpecifiedWeights.size() > 0){ // if we have inconsistently specified weights, we throw an exception
82  params.print("params\n");
83  throw std::runtime_error("GncOptimizer::constructor: the user has selected one or more measurements"
84  " to be BOTH a known inlier and a known outlier.");
85  }
86  // check that known inliers are in the graph
87  for (size_t i = 0; i < params.knownInliers.size(); i++){
88  if( params.knownInliers[i] > nfg_.size()-1 ){ // outside graph
89  throw std::runtime_error("GncOptimizer::constructor: the user has selected one or more measurements"
90  "that are not in the factor graph to be known inliers.");
91  }
92  }
93  // check that known outliers are in the graph
94  for (size_t i = 0; i < params.knownOutliers.size(); i++){
95  if( params.knownOutliers[i] > nfg_.size()-1 ){ // outside graph
96  throw std::runtime_error("GncOptimizer::constructor: the user has selected one or more measurements"
97  "that are not in the factor graph to be known outliers.");
98  }
99  }
100  // initialize weights (if we don't have prior knowledge of inliers/outliers
101  // the weights are all initialized to 1.
103 
104  // set default barcSq_ (inlier threshold)
105  double alpha = 0.99; // with this (default) probability, inlier residuals are smaller than barcSq_
107  }
108 
115  void setInlierCostThresholds(const double inth) {
116  barcSq_ = inth * Vector::Ones(nfg_.size());
117  }
118 
123  void setInlierCostThresholds(const Vector& inthVec) {
124  barcSq_ = inthVec;
125  }
126 
131  barcSq_ = Vector::Ones(nfg_.size()); // initialize
132  for (size_t k = 0; k < nfg_.size(); k++) {
133  if (nfg_[k]) {
134  barcSq_[k] = 0.5 * Chi2inv(alpha, nfg_[k]->dim()); // 0.5 derives from the error definition in gtsam
135  }
136  }
137  }
138 
142  void setWeights(const Vector w) {
143  if (size_t(w.size()) != nfg_.size()) {
144  throw std::runtime_error(
145  "GncOptimizer::setWeights: the number of specified weights"
146  " does not match the size of the factor graph.");
147  }
148  weights_ = w;
149  }
150 
152  const NonlinearFactorGraph& getFactors() const { return nfg_; }
153 
155  const Values& getState() const { return state_; }
156 
158  const GncParameters& getParams() const { return params_;}
159 
161  const Vector& getWeights() const { return weights_;}
162 
164  const Vector& getInlierCostThresholds() const {return barcSq_;}
165 
167  bool equals(const GncOptimizer& other, double tol = 1e-9) const {
168  return nfg_.equals(other.getFactors())
169  && equal(weights_, other.getWeights())
170  && params_.equals(other.getParams())
171  && equal(barcSq_, other.getInlierCostThresholds());
172  }
173 
175  Vector weights = Vector::Ones(nfg_.size());
176  for (size_t i = 0; i < params_.knownOutliers.size(); i++){
177  weights[ params_.knownOutliers[i] ] = 0.0; // known to be outliers
178  }
179  return weights;
180  }
181 
184  NonlinearFactorGraph graph_initial = this->makeWeightedGraph(weights_);
185  BaseOptimizer baseOptimizer(
186  graph_initial, state_, params_.baseOptimizerParams);
187  Values result = baseOptimizer.optimize();
188  double mu = initializeMu();
189  double prev_cost = graph_initial.error(result);
190  double cost = 0.0; // this will be updated in the main loop
191 
192  // handle the degenerate case that corresponds to small
193  // maximum residual errors at initialization
194  // For GM: if residual error is small, mu -> 0
195  // For TLS: if residual error is small, mu -> -1
196  int nrUnknownInOrOut = nfg_.size() - ( params_.knownInliers.size() + params_.knownOutliers.size() );
197  // ^^ number of measurements that are not known to be inliers or outliers (GNC will need to figure them out)
198  if (mu <= 0 || nrUnknownInOrOut == 0) { // no need to even call GNC in this case
199  if (mu <= 0 && params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
200  std::cout << "GNC Optimizer stopped because maximum residual at "
201  "initialization is small."
202  << std::endl;
203  }
204  if (nrUnknownInOrOut==0 && params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
205  std::cout << "GNC Optimizer stopped because all measurements are already known to be inliers or outliers"
206  << std::endl;
207  }
208  if (params_.verbosity >= GncParameters::Verbosity::MU) {
209  std::cout << "mu: " << mu << std::endl;
210  }
211  if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
212  result.print("result\n");
213  }
214  return result;
215  }
216 
217  size_t iter;
218  for (iter = 0; iter < params_.maxIterations; iter++) {
219 
220  // display info
221  if (params_.verbosity >= GncParameters::Verbosity::MU) {
222  std::cout << "iter: " << iter << std::endl;
223  std::cout << "mu: " << mu << std::endl;
224  }
225  if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
226  std::cout << "weights: " << weights_ << std::endl;
227  }
228  if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
229  result.print("result\n");
230  }
231  // weights update
233 
234  // variable/values update
235  NonlinearFactorGraph graph_iter = this->makeWeightedGraph(weights_);
236  BaseOptimizer baseOptimizer_iter(
237  graph_iter, state_, params_.baseOptimizerParams);
238  result = baseOptimizer_iter.optimize();
239 
240  // stopping condition
241  cost = graph_iter.error(result);
242  if (checkConvergence(mu, weights_, cost, prev_cost)) {
243  break;
244  }
245 
246  // update mu
247  mu = updateMu(mu);
248 
249  // get ready for next iteration
250  prev_cost = cost;
251 
252  // display info
253  if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
254  std::cout << "previous cost: " << prev_cost << std::endl;
255  std::cout << "current cost: " << cost << std::endl;
256  }
257  }
258  // display info
259  if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
260  std::cout << "final iterations: " << iter << std::endl;
261  std::cout << "final mu: " << mu << std::endl;
262  std::cout << "previous cost: " << prev_cost << std::endl;
263  std::cout << "current cost: " << cost << std::endl;
264  }
265  if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
266  std::cout << "final weights: " << weights_ << std::endl;
267  }
268  return result;
269  }
270 
272  double initializeMu() const {
273 
274  double mu_init = 0.0;
275  // initialize mu to the value specified in Remark 5 in GNC paper.
276  switch (params_.lossType) {
277  case GncLossType::GM:
278  /* surrogate cost is convex for large mu. initialize as in remark 5 in GNC paper.
279  Since barcSq_ can be different for each factor, we compute the max of the quantity in remark 5 in GNC paper
280  */
281  for (size_t k = 0; k < nfg_.size(); k++) {
282  if (nfg_[k]) {
283  mu_init = std::max(mu_init, 2 * nfg_[k]->error(state_) / barcSq_[k]);
284  }
285  }
286  return mu_init; // initial mu
287  case GncLossType::TLS:
288  /* surrogate cost is convex for mu close to zero. initialize as in remark 5 in GNC paper.
289  degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop)
290  according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual
291  however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop.
292  Since barcSq_ can be different for each factor, we look for the minimimum (positive) quantity in remark 5 in GNC paper
293  */
294  mu_init = std::numeric_limits<double>::infinity();
295  for (size_t k = 0; k < nfg_.size(); k++) {
296  if (nfg_[k]) {
297  double rk = nfg_[k]->error(state_);
298  mu_init = (2 * rk - barcSq_[k]) > 0 ? // if positive, update mu, otherwise keep same
299  std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init;
300  }
301  }
302  if (mu_init >= 0 && mu_init < 1e-6){
303  mu_init = 1e-6; // if mu ~ 0 (but positive), that means we have measurements with large errors,
304  // i.e., rk > barcSq_[k] and rk very large, hence we threshold to 1e-6 to avoid mu = 0
305  }
306 
307  return mu_init > 0 && !std::isinf(mu_init) ? mu_init : -1; // if mu <= 0 or mu = inf, return -1,
308  // which leads to termination of the main gnc loop. In this case, all residuals are already below the threshold
309  // and there is no need to robustify (TLS = least squares)
310  default:
311  throw std::runtime_error(
312  "GncOptimizer::initializeMu: called with unknown loss type.");
313  }
314  }
315 
317  double updateMu(const double mu) const {
318  switch (params_.lossType) {
319  case GncLossType::GM:
320  // reduce mu, but saturate at 1 (original cost is recovered for mu -> 1)
321  return std::max(1.0, mu / params_.muStep);
322  case GncLossType::TLS:
323  // increases mu at each iteration (original cost is recovered for mu -> inf)
324  return mu * params_.muStep;
325  default:
326  throw std::runtime_error(
327  "GncOptimizer::updateMu: called with unknown loss type.");
328  }
329  }
330 
332  bool checkMuConvergence(const double mu) const {
333  bool muConverged = false;
334  switch (params_.lossType) {
335  case GncLossType::GM:
336  muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function
337  break;
338  case GncLossType::TLS:
339  muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity)
340  break;
341  default:
342  throw std::runtime_error(
343  "GncOptimizer::checkMuConvergence: called with unknown loss type.");
344  }
345  if (muConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
346  std::cout << "muConverged = true " << std::endl;
347  return muConverged;
348  }
349 
351  bool checkCostConvergence(const double cost, const double prev_cost) const {
352  bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7)
353  < params_.relativeCostTol;
354  if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY){
355  std::cout << "checkCostConvergence = true (prev. cost = " << prev_cost
356  << ", curr. cost = " << cost << ")" << std::endl;
357  }
358  return costConverged;
359  }
360 
362  bool checkWeightsConvergence(const Vector& weights) const {
363  bool weightsConverged = false;
364  switch (params_.lossType) {
365  case GncLossType::GM:
366  weightsConverged = false; // for GM, there is no clear binary convergence for the weights
367  break;
368  case GncLossType::TLS:
369  weightsConverged = true;
370  for (int i = 0; i < weights.size(); i++) {
371  if (std::fabs(weights[i] - std::round(weights[i]))
372  > params_.weightsTol) {
373  weightsConverged = false;
374  break;
375  }
376  }
377  break;
378  default:
379  throw std::runtime_error(
380  "GncOptimizer::checkWeightsConvergence: called with unknown loss type.");
381  }
382  if (weightsConverged
383  && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
384  std::cout << "weightsConverged = true " << std::endl;
385  return weightsConverged;
386  }
387 
389  bool checkConvergence(const double mu, const Vector& weights,
390  const double cost, const double prev_cost) const {
391  return checkCostConvergence(cost, prev_cost)
393  }
394 
397  // make sure all noiseModels are Gaussian or convert to Gaussian
398  NonlinearFactorGraph newGraph;
399  newGraph.resize(nfg_.size());
400  for (size_t i = 0; i < nfg_.size(); i++) {
401  if (nfg_[i]) {
402  auto factor = nfg_.at<NoiseModelFactor>(i);
403  auto noiseModel = std::dynamic_pointer_cast<noiseModel::Gaussian>(
404  factor->noiseModel());
405  if (noiseModel) {
406  Matrix newInfo = weights[i] * noiseModel->information();
407  auto newNoiseModel = noiseModel::Gaussian::Information(newInfo);
408  newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel);
409  } else {
410  throw std::runtime_error(
411  "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model.");
412  }
413  }
414  }
415  return newGraph;
416  }
417 
419  Vector calculateWeights(const Values& currentEstimate, const double mu) {
421 
422  // do not update the weights that the user has decided are known inliers
423  std::vector<size_t> allWeights;
424  for (size_t k = 0; k < nfg_.size(); k++) {
425  allWeights.push_back(k);
426  }
427  std::vector<size_t> knownWeights;
428  std::set_union(params_.knownInliers.begin(), params_.knownInliers.end(),
429  params_.knownOutliers.begin(), params_.knownOutliers.end(),
430  std::inserter(knownWeights, knownWeights.begin()));
431 
432  std::vector<size_t> unknownWeights;
433  std::set_difference(allWeights.begin(), allWeights.end(),
434  knownWeights.begin(), knownWeights.end(),
435  std::inserter(unknownWeights, unknownWeights.begin()));
436 
437  // update weights of known inlier/outlier measurements
438  switch (params_.lossType) {
439  case GncLossType::GM: { // use eq (12) in GNC paper
440  for (size_t k : unknownWeights) {
441  if (nfg_[k]) {
442  double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
443  weights[k] = std::pow(
444  (mu * barcSq_[k]) / (u2_k + mu * barcSq_[k]), 2);
445  }
446  }
447  return weights;
448  }
449  case GncLossType::TLS: { // use eq (14) in GNC paper
450  for (size_t k : unknownWeights) {
451  if (nfg_[k]) {
452  double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
453  double upperbound = (mu + 1) / mu * barcSq_[k];
454  double lowerbound = mu / (mu + 1) * barcSq_[k];
455  weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) - mu;
456  if (u2_k >= upperbound || weights[k] < 0) {
457  weights[k] = 0;
458  } else if (u2_k <= lowerbound || weights[k] > 1) {
459  weights[k] = 1;
460  }
461  }
462  }
463  return weights;
464  }
465  default:
466  throw std::runtime_error(
467  "GncOptimizer::calculateWeights: called with unknown loss type.");
468  }
469  }
470 };
471 
472 }
w
RowVector3d w
Definition: Matrix_resize_int.cpp:3
gtsam::NonlinearFactorGraph::equals
bool equals(const NonlinearFactorGraph &other, double tol=1e-9) const
Definition: NonlinearFactorGraph.cpp:97
gtsam::TLS
@ TLS
Definition: GncParams.h:38
alpha
RealScalar alpha
Definition: level1_cplx_impl.h:147
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
mu
double mu
Definition: testBoundingConstraint.cpp:37
gtsam::GncOptimizer::getWeights
const Vector & getWeights() const
Access a copy of the GNC weights.
Definition: GncOptimizer.h:161
gtsam::GncOptimizer::params_
GncParameters params_
GNC parameters.
Definition: GncOptimizer.h:52
gtsam::GncOptimizer
Definition: GncOptimizer.h:44
gtsam::Matrix
Eigen::MatrixXd Matrix
Definition: base/Matrix.h:39
gtsam::NoiseModelFactor::shared_ptr
std::shared_ptr< This > shared_ptr
Definition: NonlinearFactor.h:209
gtsam::GncOptimizer::updateMu
double updateMu(const double mu) const
Update the gnc parameter mu to gradually increase nonconvexity.
Definition: GncOptimizer.h:317
gtsam::GncOptimizer::getInlierCostThresholds
const Vector & getInlierCostThresholds() const
Get the inlier threshold.
Definition: GncOptimizer.h:164
vanilla::params
static const SmartProjectionParams params
Definition: smartFactorScenarios.h:69
gtsam::Vector
Eigen::VectorXd Vector
Definition: Vector.h:38
gtsam::Chi2inv
static double Chi2inv(const double alpha, const size_t dofs)
Definition: GncOptimizer.h:38
gtsam::GncOptimizer::weights_
Vector weights_
Weights associated to each factor in GNC (this could be a local variable in optimize,...
Definition: GncOptimizer.h:53
result
Values result
Definition: OdometryOptimize.cpp:8
boost::multiprecision::fabs
Real fabs(const Real &a)
Definition: boostmultiprec.cpp:119
gtsam::GncOptimizer::equals
bool equals(const GncOptimizer &other, double tol=1e-9) const
Equals.
Definition: GncOptimizer.h:167
gtsam::FactorGraph::at
const sharedFactor at(size_t i) const
Definition: FactorGraph.h:306
gtsam::NonlinearFactorGraph::error
double error(const Values &values) const
Definition: NonlinearFactorGraph.cpp:170
gtsam::FactorGraph::resize
virtual void resize(size_t size)
Definition: FactorGraph.h:367
gtsam::GncOptimizer::checkCostConvergence
bool checkCostConvergence(const double cost, const double prev_cost) const
Check convergence of relative cost differences.
Definition: GncOptimizer.h:351
gtsam::GncOptimizer::calculateWeights
Vector calculateWeights(const Values &currentEstimate, const double mu)
Calculate gnc weights.
Definition: GncOptimizer.h:419
gtsam::GncOptimizer::getParams
const GncParameters & getParams() const
Access a copy of the parameters.
Definition: GncOptimizer.h:158
GncParams.h
gtsam::NonlinearFactorGraph
Definition: NonlinearFactorGraph.h:55
gtsam::noiseModel::Gaussian::Information
static shared_ptr Information(const Matrix &M, bool smart=true)
Definition: NoiseModel.cpp:97
gtsam::GncOptimizer::BaseOptimizer
GncParameters::OptimizerType BaseOptimizer
For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimi...
Definition: GncOptimizer.h:47
round
double round(double x)
Definition: round.c:38
gtsam::GncOptimizer::setInlierCostThresholds
void setInlierCostThresholds(const double inth)
Definition: GncOptimizer.h:115
gtsam::internal::chi_squared_quantile
double chi_squared_quantile(const double dofs, const double alpha)
Compute the quantile function of the Chi-Squared distribution.
Definition: ChiSquaredInverse.h:39
gtsam::GncOptimizer::makeWeightedGraph
NonlinearFactorGraph makeWeightedGraph(const Vector &weights) const
Create a graph where each factor is weighted by the gnc weights.
Definition: GncOptimizer.h:396
gtsam::GncOptimizer::nfg_
NonlinearFactorGraph nfg_
Original factor graph to be solved by GNC.
Definition: GncOptimizer.h:50
ceres::pow
Jet< T, N > pow(const Jet< T, N > &f, double g)
Definition: jet.h:570
ChiSquaredInverse.h
Implementation of the Chi Squared inverse function.
gtsam::GncOptimizer::checkWeightsConvergence
bool checkWeightsConvergence(const Vector &weights) const
Check convergence of weights to binary values.
Definition: GncOptimizer.h:362
gtsam::GncOptimizer::checkMuConvergence
bool checkMuConvergence(const double mu) const
Check if we have reached the value of mu for which the surrogate loss matches the original loss.
Definition: GncOptimizer.h:332
gtsam::FactorGraph::size
size_t size() const
Definition: FactorGraph.h:297
gtsam::GncOptimizer::initializeWeightsFromKnownInliersAndOutliers
Vector initializeWeightsFromKnownInliersAndOutliers() const
Definition: GncOptimizer.h:174
gtsam::GM
@ GM
Definition: GncParams.h:37
gtsam::GncOptimizer::barcSq_
Vector barcSq_
Inlier thresholds. A factor is considered an inlier if factor.error() < barcSq_[i] (where i is the po...
Definition: GncOptimizer.h:54
gtsam
traits
Definition: chartTesting.h:28
error
static double error
Definition: testRot3.cpp:37
NonlinearFactorGraph.h
Factor Graph consisting of non-linear factors.
gtsam::NoiseModelFactor
Definition: NonlinearFactor.h:197
gtsam::GncOptimizer::getFactors
const NonlinearFactorGraph & getFactors() const
Access a copy of the internal factor graph.
Definition: GncOptimizer.h:152
gtsam::Values
Definition: Values.h:65
iter
iterator iter(handle obj)
Definition: pytypes.h:2428
gtsam::GncOptimizer::initializeMu
double initializeMu() const
Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper).
Definition: GncOptimizer.h:272
gtsam::GncOptimizer::optimize
Values optimize()
Compute optimal solution using graduated non-convexity.
Definition: GncOptimizer.h:183
min
#define min(a, b)
Definition: datatypes.h:19
gtsam::tol
const G double tol
Definition: Group.h:79
gtsam::GncOptimizer::setWeights
void setWeights(const Vector w)
Definition: GncOptimizer.h:142
gtsam::GncOptimizer::state_
Values state_
Initial values to be used at each iteration by GNC.
Definition: GncOptimizer.h:51
graph
NonlinearFactorGraph graph
Definition: doc/Code/OdometryExample.cpp:2
max
#define max(a, b)
Definition: datatypes.h:20
gtsam::GncOptimizer::GncOptimizer
GncOptimizer(const NonlinearFactorGraph &graph, const Values &initialValues, const GncParameters &params=GncParameters())
Constructor.
Definition: GncOptimizer.h:58
gtsam::equal
bool equal(const T &obj1, const T &obj2, double tol)
Definition: Testable.h:85
gtsam::GncOptimizer::checkConvergence
bool checkConvergence(const double mu, const Vector &weights, const double cost, const double prev_cost) const
Check for convergence between consecutive GNC iterations.
Definition: GncOptimizer.h:389
ceres::sqrt
Jet< T, N > sqrt(const Jet< T, N > &f)
Definition: jet.h:418
i
int i
Definition: BiCGSTAB_step_by_step.cpp:9
pybind_wrapper_test_script.other
other
Definition: pybind_wrapper_test_script.py:42
gtsam::GncOptimizer::getState
const Values & getState() const
Access a copy of the internal values.
Definition: GncOptimizer.h:155
gtsam::GncOptimizer::setInlierCostThresholdsAtProbability
void setInlierCostThresholdsAtProbability(const double alpha)
Definition: GncOptimizer.h:130
gtsam::GncOptimizer::setInlierCostThresholds
void setInlierCostThresholds(const Vector &inthVec)
Definition: GncOptimizer.h:123
isinf
#define isinf(X)
Definition: main.h:94


gtsam
Author(s):
autogenerated on Thu Jun 13 2024 03:02:29