31 #include <boost/math/distributions/chi_squared.hpp> 39 boost::math::chi_squared_distribution<double> chi2(dofs);
40 return boost::math::quantile(chi2, alpha);
44 template<
class GncParameters>
60 const GncParameters&
params = GncParameters())
61 : state_(initialValues),
66 for (
size_t i = 0;
i < graph.
size();
i++) {
70 auto robust = std::dynamic_pointer_cast<
73 nfg_[
i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
78 std::vector<size_t> inconsistentlySpecifiedWeights;
80 std::set_intersection(
params.knownInliers.begin(),
params.knownInliers.end(),
82 std::inserter(inconsistentlySpecifiedWeights, inconsistentlySpecifiedWeights.begin()));
83 if(inconsistentlySpecifiedWeights.size() > 0){
85 throw std::runtime_error(
"GncOptimizer::constructor: the user has selected one or more measurements" 86 " to be BOTH a known inlier and a known outlier.");
89 for (
size_t i = 0;
i <
params.knownInliers.size();
i++){
91 throw std::runtime_error(
"GncOptimizer::constructor: the user has selected one or more measurements" 92 "that are not in the factor graph to be known inliers.");
96 for (
size_t i = 0;
i <
params.knownOutliers.size();
i++){
98 throw std::runtime_error(
"GncOptimizer::constructor: the user has selected one or more measurements" 99 "that are not in the factor graph to be known outliers.");
118 barcSq_ = inth * Vector::Ones(nfg_.
size());
133 barcSq_ = Vector::Ones(nfg_.
size());
134 for (
size_t k = 0; k < nfg_.
size(); k++) {
136 barcSq_[k] = 0.5 *
Chi2inv(alpha, nfg_[k]->dim());
145 if (
size_t(w.size()) != nfg_.
size()) {
146 throw std::runtime_error(
147 "GncOptimizer::setWeights: the number of specified weights" 148 " does not match the size of the factor graph.");
178 for (
size_t i = 0;
i < params_.knownOutliers.size();
i++){
179 weights[ params_.knownOutliers[
i] ] = 0.0;
187 BaseOptimizer baseOptimizer(
188 graph_initial, state_, params_.baseOptimizerParams);
191 double prev_cost = graph_initial.
error(result);
198 int nrUnknownInOrOut = nfg_.
size() - ( params_.knownInliers.size() + params_.knownOutliers.size() );
200 if (mu <= 0 || nrUnknownInOrOut == 0) {
201 if (mu <= 0 && params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
202 std::cout <<
"GNC Optimizer stopped because maximum residual at " 203 "initialization is small." 206 if (nrUnknownInOrOut==0 && params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
207 std::cout <<
"GNC Optimizer stopped because all measurements are already known to be inliers or outliers" 210 if (params_.verbosity >= GncParameters::Verbosity::MU) {
211 std::cout <<
"mu: " << mu << std::endl;
213 if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
214 result.
print(
"result\n");
220 for (iter = 0; iter < params_.maxIterations; iter++) {
223 if (params_.verbosity >= GncParameters::Verbosity::MU) {
224 std::cout <<
"iter: " << iter << std::endl;
225 std::cout <<
"mu: " << mu << std::endl;
227 if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
228 std::cout <<
"weights: " << weights_ << std::endl;
230 if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
231 result.
print(
"result\n");
238 BaseOptimizer baseOptimizer_iter(
239 graph_iter, state_, params_.baseOptimizerParams);
240 result = baseOptimizer_iter.optimize();
243 cost = graph_iter.
error(result);
255 if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
256 std::cout <<
"previous cost: " << prev_cost << std::endl;
257 std::cout <<
"current cost: " << cost << std::endl;
261 if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
262 std::cout <<
"final iterations: " << iter << std::endl;
263 std::cout <<
"final mu: " << mu << std::endl;
264 std::cout <<
"previous cost: " << prev_cost << std::endl;
265 std::cout <<
"current cost: " << cost << std::endl;
267 if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
268 std::cout <<
"final weights: " << weights_ << std::endl;
276 double mu_init = 0.0;
278 switch (params_.lossType) {
283 for (
size_t k = 0; k < nfg_.
size(); k++) {
285 mu_init =
std::max(mu_init, 2 * nfg_[k]->
error(state_) / barcSq_[k]);
296 mu_init = std::numeric_limits<double>::infinity();
297 for (
size_t k = 0; k < nfg_.
size(); k++) {
299 double rk = nfg_[k]->
error(state_);
300 mu_init = (2 * rk - barcSq_[k]) > 0 ?
301 std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init;
304 if (mu_init >= 0 && mu_init < 1
e-6){
309 return mu_init > 0 && !
std::isinf(mu_init) ? mu_init : -1;
313 throw std::runtime_error(
314 "GncOptimizer::initializeMu: called with unknown loss type.");
320 switch (params_.lossType) {
323 return std::max(1.0, mu / params_.muStep);
326 return mu * params_.muStep;
328 throw std::runtime_error(
329 "GncOptimizer::updateMu: called with unknown loss type.");
335 bool muConverged =
false;
336 switch (params_.lossType) {
344 throw std::runtime_error(
345 "GncOptimizer::checkMuConvergence: called with unknown loss type.");
347 if (muConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
348 std::cout <<
"muConverged = true " << std::endl;
355 < params_.relativeCostTol;
356 if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY){
357 std::cout <<
"checkCostConvergence = true (prev. cost = " << prev_cost
358 <<
", curr. cost = " << cost <<
")" << std::endl;
360 return costConverged;
365 bool weightsConverged =
false;
366 switch (params_.lossType) {
368 weightsConverged =
false;
371 weightsConverged =
true;
372 for (
int i = 0;
i < weights.size();
i++) {
374 > params_.weightsTol) {
375 weightsConverged =
false;
381 throw std::runtime_error(
382 "GncOptimizer::checkWeightsConvergence: called with unknown loss type.");
385 && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
386 std::cout <<
"weightsConverged = true " << std::endl;
387 return weightsConverged;
392 const double cost,
const double prev_cost)
const {
402 for (
size_t i = 0;
i < nfg_.
size();
i++) {
404 auto factor = std::dynamic_pointer_cast<
408 factor->noiseModel());
412 newGraph[
i] = factor->cloneWithNewNoiseModel(newNoiseModel);
414 throw std::runtime_error(
415 "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model.");
427 std::vector<size_t> allWeights;
428 for (
size_t k = 0; k < nfg_.
size(); k++) {
429 allWeights.push_back(k);
431 std::vector<size_t> knownWeights;
432 std::set_union(params_.knownInliers.begin(), params_.knownInliers.end(),
433 params_.knownOutliers.begin(), params_.knownOutliers.end(),
434 std::inserter(knownWeights, knownWeights.begin()));
436 std::vector<size_t> unknownWeights;
437 std::set_difference(allWeights.begin(), allWeights.end(),
438 knownWeights.begin(), knownWeights.end(),
439 std::inserter(unknownWeights, unknownWeights.begin()));
442 switch (params_.lossType) {
444 for (
size_t k : unknownWeights) {
446 double u2_k = nfg_[k]->
error(currentEstimate);
448 (mu * barcSq_[k]) / (u2_k + mu * barcSq_[k]), 2);
454 for (
size_t k : unknownWeights) {
456 double u2_k = nfg_[k]->
error(currentEstimate);
457 double upperbound = (mu + 1) / mu * barcSq_[k];
458 double lowerbound = mu / (mu + 1) * barcSq_[k];
459 weights[k] =
std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) -
mu;
460 if (u2_k >= upperbound || weights[k] < 0) {
462 }
else if (u2_k <= lowerbound || weights[k] > 1) {
470 throw std::runtime_error(
471 "GncOptimizer::calculateWeights: called with unknown loss type.");
void setWeights(const Vector w)
void setInlierCostThresholdsAtProbability(const double alpha)
Factor Graph consisting of non-linear factors.
const Values & getState() const
Access a copy of the internal values.
static double Chi2inv(const double alpha, const size_t dofs)
bool checkMuConvergence(const double mu) const
Check if we have reached the value of mu for which the surrogate loss matches the original loss...
bool checkWeightsConvergence(const Vector &weights) const
Check convergence of weights to binary values.
NonlinearFactorGraph makeWeightedGraph(const Vector &weights) const
Create a graph where each factor is weighted by the gnc weights.
double updateMu(const double mu) const
Update the gnc parameter mu to gradually increase nonconvexity.
iterator iter(handle obj)
Values optimize()
Compute optimal solution using graduated non-convexity.
GncOptimizer(const NonlinearFactorGraph &graph, const Values &initialValues, const GncParameters ¶ms=GncParameters())
Constructor.
NonlinearFactorGraph graph
static const SmartProjectionParams params
const Vector & getInlierCostThresholds() const
Get the inlier threshold.
void print(const std::string &str="", const KeyFormatter &keyFormatter=DefaultKeyFormatter) const
const NonlinearFactorGraph & getFactors() const
Access a copy of the internal factor graph.
static shared_ptr Information(const Matrix &M, bool smart=true)
const GncParameters & getParams() const
Access a copy of the parameters.
Vector weights_
Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside).
Array< double, 1, 3 > e(1./3., 0.5, 2.)
bool equals(const NonlinearFactorGraph &other, double tol=1e-9) const
double error(const Values &values) const
Vector calculateWeights(const Values ¤tEstimate, const double mu)
Calculate gnc weights.
GncParameters::OptimizerType BaseOptimizer
For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimi...
std::shared_ptr< This > shared_ptr
virtual void resize(size_t size)
bool equals(const GncOptimizer &other, double tol=1e-9) const
Equals.
NonlinearFactorGraph nfg_
Original factor graph to be solved by GNC.
void setInlierCostThresholds(const Vector &inthVec)
Jet< T, N > sqrt(const Jet< T, N > &f)
Vector initializeWeightsFromKnownInliersAndOutliers() const
GncParameters params_
GNC parameters.
Jet< T, N > pow(const Jet< T, N > &f, double g)
Vector barcSq_
Inlier thresholds. A factor is considered an inlier if factor.error() < barcSq_[i] (where i is the po...
virtual Matrix information() const
Compute information matrix.
const Vector & getWeights() const
Access a copy of the GNC weights.
bool equal(const T &obj1, const T &obj2, double tol)
Values state_
Initial values to be used at each iteration by GNC.
void setInlierCostThresholds(const double inth)
bool checkCostConvergence(const double cost, const double prev_cost) const
Check convergence of relative cost differences.
double initializeMu() const
Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper)...
bool checkConvergence(const double mu, const Vector &weights, const double cost, const double prev_cost) const
Check for convergence between consecutive GNC iterations.
EIGEN_DEVICE_FUNC const RoundReturnType round() const