ShonanAveraging.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
19 #include <SymEigsSolver.h>
20 #include <cmath>
21 #include <gtsam/linear/PCGSolver.h>
27 #include <gtsam/sfm/ShonanFactor.h>
31 
32 #include <Eigen/Eigenvalues>
33 #include <algorithm>
34 #include <complex>
35 #include <iostream>
36 #include <map>
37 #include <random>
38 #include <set>
39 #include <vector>
40 
41 namespace gtsam {
42 
43 // In Wrappers we have no access to this so have a default ready
44 static std::mt19937 kRandomNumberGenerator(42);
45 
47 
48 /* ************************************************************************* */
49 template <size_t d>
51  const LevenbergMarquardtParams &_lm, const std::string &method,
52  double optimalityThreshold, double alpha, double beta, double gamma)
53  : lm(_lm),
54  optimalityThreshold(optimalityThreshold),
55  alpha(alpha),
56  beta(beta),
57  gamma(gamma),
58  useHuber(false),
59  certifyOptimality(true) {
60  // By default, we will do conjugate gradient
62 
63  // Create subgraph builder parameters
64  SubgraphBuilderParameters builderParameters;
68  builderParameters.augmentationFactor = 0.0;
69 
70  // Choose optimization method
71  if (method == "SUBGRAPH") {
73  std::make_shared<SubgraphSolverParameters>(builderParameters);
74  } else if (method == "SGPC") {
75  lm.iterativeParams = std::make_shared<PCGSolverParameters>(
76  std::make_shared<SubgraphPreconditionerParameters>(builderParameters));
77  } else if (method == "JACOBI") {
78  lm.iterativeParams = std::make_shared<PCGSolverParameters>(std::make_shared<BlockJacobiPreconditionerParameters>());
79  } else if (method == "QR") {
80  lm.setLinearSolverType("MULTIFRONTAL_QR");
81  } else if (method == "CHOLESKY") {
82  lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY");
83  } else {
84  throw std::invalid_argument("ShonanAveragingParameters: unknown method");
85  }
86 }
87 
88 /* ************************************************************************* */
89 // Explicit instantiation for d=2 and d=3
90 template struct ShonanAveragingParameters<2>;
91 template struct ShonanAveragingParameters<3>;
92 
93 /* ************************************************************************* */
94 // Calculate number of unknown rotations referenced by measurements
95 // Throws exception of keys are not numbered as expected (may change in future).
96 template <size_t d>
97 static size_t NrUnknowns(
99  Key maxKey = 0;
100  std::set<Key> keys;
101  for (const auto &measurement : measurements) {
102  for (const Key &key : measurement.keys()) {
104  keys.insert(key);
105  }
106  }
107  size_t N = keys.size();
108  if (maxKey != N - 1) {
109  throw std::invalid_argument(
110  "As of now, ShonanAveraging expects keys numbered 0..N-1");
111  }
112  return N;
113 }
114 
115 /* ************************************************************************* */
116 template <size_t d>
118  const Parameters &parameters)
119  : parameters_(parameters),
120  measurements_(measurements),
121  nrUnknowns_(NrUnknowns<d>(measurements)) {
122  for (const auto &measurement : measurements_) {
123  const auto &model = measurement.noiseModel();
124  if (model && model->dim() != SO<d>::dimension) {
125  measurement.print("Factor with incorrect noise model:\n");
126  throw std::invalid_argument(
127  "ShonanAveraging: measurements passed to "
128  "constructor have incorrect dimension.");
129  }
130  }
131  Q_ = buildQ();
132  D_ = buildD();
133  L_ = D_ - Q_;
134 }
135 
136 /* ************************************************************************* */
137 template <size_t d>
140  auto G = std::make_shared<Matrix>(SO<-1>::VectorizedGenerators(p));
141 
142  for (const auto &measurement : measurements_) {
143  const auto &keys = measurement.keys();
144  const auto &Rij = measurement.measured();
145  const auto &model = measurement.noiseModel();
146  graph.emplace_shared<ShonanFactor<d>>(keys[0], keys[1], Rij, p, model, G);
147  }
148  // Possibly add Karcher prior
149  if (parameters_.beta > 0) {
150  const size_t dim = SOn::Dimension(p);
151  graph.emplace_shared<KarcherMeanFactor<SOn>>(graph.keys(), dim);
152  }
153  // Possibly add gauge factors - they are probably useless as gradient is zero
154  if (parameters_.gamma > 0 && p > d + 1) {
155  for (auto key : graph.keys())
156  graph.emplace_shared<ShonanGaugeFactor>(key, p, d, parameters_.gamma);
157  }
158  return graph;
159 }
160 
161 /* ************************************************************************* */
162 template <size_t d>
163 double ShonanAveraging<d>::costAt(size_t p, const Values &values) const {
164  const NonlinearFactorGraph graph = buildGraphAt(p);
165  return graph.error(values);
166 }
167 
168 /* ************************************************************************* */
169 template <size_t d>
170 std::shared_ptr<LevenbergMarquardtOptimizer>
172  // Build graph
173  NonlinearFactorGraph graph = buildGraphAt(p);
174 
175  // Anchor prior is added here as depends on initial value (and cost is zero)
176  if (parameters_.alpha > 0) {
177  const size_t dim = SOn::Dimension(p);
178  const auto [i, value] = parameters_.anchor;
179  auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha);
180  graph.emplace_shared<PriorFactor<SOn>>(i, SOn::Lift(p, value.matrix()),
181  model);
182  }
183  // Optimize
184  return std::make_shared<LevenbergMarquardtOptimizer>(graph, initial,
185  parameters_.lm);
186 }
187 
188 /* ************************************************************************* */
189 template <size_t d>
191  const Values &initial) const {
192  auto lm = createOptimizerAt(p, initial);
193  return lm->optimize();
194 }
195 
196 /* ************************************************************************* */
197 // Project to pxdN Stiefel manifold
198 template <size_t d>
200  const size_t N = values.size();
201  const size_t p = values.at<SOn>(0).rows();
202  Matrix S(p, N * d);
203  for (const auto& it : values.extract<SOn>()) {
204  S.middleCols<d>(it.first * d) =
205  it.second.matrix().leftCols<d>(); // project Qj to Stiefel
206  }
207  return S;
208 }
209 
210 /* ************************************************************************* */
211 template <>
213  Values result;
214  for (const auto& it : values.extract<SOn>()) {
215  assert(it.second.rows() == p);
216  const auto &M = it.second.matrix();
217  const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0));
218  result.insert(it.first, R);
219  }
220  return result;
221 }
222 
223 template <>
225  Values result;
226  for (const auto& it : values.extract<SOn>()) {
227  assert(it.second.rows() == p);
228  const auto &M = it.second.matrix();
229  const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>());
230  result.insert(it.first, R);
231  }
232  return result;
233 }
234 
235 /* ************************************************************************* */
236 template <size_t d>
237 static Matrix RoundSolutionS(const Matrix &S) {
238  const size_t N = S.cols() / d;
239  // First, compute a thin SVD of S
241  const Vector sigmas = svd.singularValues();
242 
243  // Construct a diagonal matrix comprised of the first d singular values
244  using DiagonalMatrix = Eigen::DiagonalMatrix<double, d>;
245  DiagonalMatrix Sigma_d;
246  Sigma_d.diagonal() = sigmas.head<d>();
247 
248  // Now, construct a rank-d truncated singular value decomposition for S
249  Matrix R = Sigma_d * svd.matrixV().leftCols<d>().transpose();
250 
251  // Count the number of blocks whose determinants have positive sign
252  size_t numPositiveBlocks = 0;
253  for (size_t i = 0; i < N; ++i) {
254  // Compute the determinant of the ith dxd block of R
255  double determinant = R.middleCols<d>(d * i).determinant();
256  if (determinant > 0) ++numPositiveBlocks;
257  }
258 
259  if (numPositiveBlocks < N / 2) {
260  // Less than half of the total number of blocks have the correct sign.
261  // To reverse their orientations, multiply with a reflection matrix.
262  DiagonalMatrix reflector;
263  reflector.setIdentity();
264  reflector.diagonal()(d - 1) = -1;
265  R = reflector * R;
266  }
267 
268  return R;
269 }
270 
271 /* ************************************************************************* */
272 template <>
274  // Round to a 2*2N matrix
275  Matrix R = RoundSolutionS<2>(S);
276 
277  // Finally, project each dxd rotation block to SO(2)
278  Values values;
279  for (size_t j = 0; j < nrUnknowns(); ++j) {
280  const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j));
281  values.insert(j, Ri);
282  }
283  return values;
284 }
285 
286 template <>
288  // Round to a 3*3N matrix
289  Matrix R = RoundSolutionS<3>(S);
290 
291  // Finally, project each dxd rotation block to SO(3)
292  Values values;
293  for (size_t j = 0; j < nrUnknowns(); ++j) {
294  const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j));
295  values.insert(j, Ri);
296  }
297  return values;
298 }
299 
300 /* ************************************************************************* */
301 template <size_t d>
303  // Project to pxdN Stiefel manifold...
304  Matrix S = StiefelElementMatrix(values);
305  // ...and call version above.
306  return roundSolutionS(S);
307 }
308 
309 /* ************************************************************************* */
310 template <size_t d>
311 double ShonanAveraging<d>::cost(const Values &values) const {
313  for (const auto &measurement : measurements_) {
314  const auto &keys = measurement.keys();
315  const auto &Rij = measurement.measured();
316  const auto &model = measurement.noiseModel();
317  graph.emplace_shared<FrobeniusBetweenFactor<SO<d>>>(
318  keys[0], keys[1], SO<d>(Rij.matrix()), model);
319  }
320  // Finally, project each dxd rotation block to SO(d)
321  Values result;
322  for (const auto& it : values.extract<Rot>()) {
323  result.insert(it.first, SO<d>(it.second.matrix()));
324  }
325  return graph.error(result);
326 }
327 
328 /* ************************************************************************* */
329 // Get kappa from noise model
330 template <typename T, size_t d>
333  const auto &isotropic = std::dynamic_pointer_cast<noiseModel::Isotropic>(
334  measurement.noiseModel());
335  double sigma;
336  if (isotropic) {
337  sigma = isotropic->sigma();
338  } else {
339  const auto &robust = std::dynamic_pointer_cast<noiseModel::Robust>(
340  measurement.noiseModel());
341  // Check if noise model is robust
342  if (robust) {
343  // If robust, check if optimality certificate is expected
344  if (parameters.getCertifyOptimality()) {
345  throw std::invalid_argument(
346  "Certification of optimality does not work with robust cost.");
347  } else {
348  // Optimality certificate not required, so setting default sigma
349  sigma = 1;
350  }
351  } else {
352  throw std::invalid_argument(
353  "Shonan averaging noise models must be isotropic (but robust losses "
354  "are allowed).");
355  }
356  }
357  return 1.0 / (sigma * sigma);
358 }
359 
360 /* ************************************************************************* */
361 template <size_t d>
363  // Each measurement contributes 2*d elements along the diagonal of the
364  // degree matrix.
365  static constexpr size_t stride = 2 * d;
366 
367  // Reserve space for triplets
368  std::vector<Eigen::Triplet<double>> triplets;
369  triplets.reserve(stride * measurements_.size());
370 
371  for (const auto &measurement : measurements_) {
372  // Get pose keys
373  const auto &keys = measurement.keys();
374 
375  // Get kappa from noise model
376  double kappa = Kappa<Rot, d>(measurement, parameters_);
377 
378  const size_t di = d * keys[0], dj = d * keys[1];
379  for (size_t k = 0; k < d; k++) {
380  // Elements of ith block-diagonal
381  triplets.emplace_back(di + k, di + k, kappa);
382  // Elements of jth block-diagonal
383  triplets.emplace_back(dj + k, dj + k, kappa);
384  }
385  }
386 
387  // Construct and return a sparse matrix from these triplets
388  const size_t dN = d * nrUnknowns();
389  Sparse D(dN, dN);
390  D.setFromTriplets(triplets.begin(), triplets.end());
391 
392  return D;
393 }
394 
395 /* ************************************************************************* */
396 template <size_t d>
398  // Each measurement contributes 2*d^2 elements on a pair of symmetric
399  // off-diagonal blocks
400  static constexpr size_t stride = 2 * d * d;
401 
402  // Reserve space for triplets
403  std::vector<Eigen::Triplet<double>> triplets;
404  triplets.reserve(stride * measurements_.size());
405 
406  for (const auto &measurement : measurements_) {
407  // Get pose keys
408  const auto &keys = measurement.keys();
409 
410  // Extract rotation measurement
411  const auto Rij = measurement.measured().matrix();
412 
413  // Get kappa from noise model
414  double kappa = Kappa<Rot, d>(measurement, parameters_);
415 
416  const size_t di = d * keys[0], dj = d * keys[1];
417  for (size_t r = 0; r < d; r++) {
418  for (size_t c = 0; c < d; c++) {
419  // Elements of ij block
420  triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c));
421  // Elements of ji block
422  triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r));
423  }
424  }
425  }
426 
427  // Construct and return a sparse matrix from these triplets
428  const size_t dN = d * nrUnknowns();
429  Sparse Q(dN, dN);
430  Q.setFromTriplets(triplets.begin(), triplets.end());
431 
432  return Q;
433 }
434 
435 /* ************************************************************************* */
436 template <size_t d>
438  // Each pose contributes 2*d elements along the diagonal of Lambda
439  static constexpr size_t stride = d * d;
440 
441  // Reserve space for triplets
442  const size_t N = nrUnknowns();
443  std::vector<Eigen::Triplet<double>> triplets;
444  triplets.reserve(stride * N);
445 
446  // Do sparse-dense multiply to get Q*S'
447  auto QSt = Q_ * S.transpose();
448 
449  for (size_t j = 0; j < N; j++) {
450  // Compute B, the building block for the j^th diagonal block of Lambda
451  const size_t dj = d * j;
452  Matrix B = QSt.middleRows(dj, d) * S.middleCols<d>(dj);
453 
454  // Elements of jth block-diagonal
455  for (size_t r = 0; r < d; r++)
456  for (size_t c = 0; c < d; c++)
457  triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r)));
458  }
459 
460  // Construct and return a sparse matrix from these triplets
461  Sparse Lambda(d * N, d * N);
462  Lambda.setFromTriplets(triplets.begin(), triplets.end());
463  return Lambda;
464 }
465 
466 /* ************************************************************************* */
467 template <size_t d>
469  // Project to pxdN Stiefel manifold...
470  Matrix S = StiefelElementMatrix(values);
471  // ...and call version above.
472  return computeLambda(S);
473 }
474 
475 /* ************************************************************************* */
476 template <size_t d>
478  assert(values.size() == nrUnknowns());
479  const Matrix S = StiefelElementMatrix(values);
480  auto Lambda = computeLambda(S);
481  return Lambda - Q_;
482 }
483 
484 /* ************************************************************************* */
485 template <size_t d>
487  auto Lambda = computeLambda(S);
488  return Lambda - Q_;
489 }
490 
491 /* ************************************************************************* */
492 // Perturb the initial initialVector by adding a spherically-uniformly
493 // distributed random vector with 0.03*||initialVector||_2 magnitude to
494 // initialVector
495 // ref : Part III. C, Rosen, D. and Carlone, L., 2017, September. Computational
496 // enhancements for certifiably correct SLAM. In Proceedings of the
497 // International Conference on Intelligent Robots and Systems.
498 static Vector perturb(const Vector &initialVector) {
499  // generate a 0.03*||x_0||_2 as stated in David's paper
500  int n = initialVector.rows();
501  Vector disturb = Vector::Random(n);
502  disturb.normalize();
503 
504  double magnitude = initialVector.norm();
505  Vector perturbedVector = initialVector + 0.03 * magnitude * disturb;
506  perturbedVector.normalize();
507  return perturbedVector;
508 }
509 
510 /* ************************************************************************* */
512 // Alg.6 from paper Distributed Certifiably Correct Pose-Graph Optimization,
513 // it takes in the certificate matrix A as input, the maxIterations and the
514 // minEigenvalueNonnegativityTolerance is set to 1000 and 10e-4 ad default,
515 // there are two parts
516 // in this algorithm:
517 // (1) compute the maximum eigenpair (\lamda_dom, \vect{v}_dom) of A by power
518 // method. if \lamda_dom is less than zero, then return the eigenpair. (2)
519 // compute the maximum eigenpair (\theta, \vect{v}) of C = \lamda_dom * I - A by
520 // accelerated power method. Then return (\lamda_dom - \theta, \vect{v}).
522  const Sparse &A, const Matrix &S, double &minEigenValue,
523  Vector *minEigenVector = 0, size_t *numIterations = 0,
524  size_t maxIterations = 1000,
525  double minEigenvalueNonnegativityTolerance = 10e-4) {
526 
527  // a. Compute dominant eigenpair of S using power method
528  PowerMethod<Sparse> pmOperator(A);
529 
530  const bool pmConverged = pmOperator.compute(
531  maxIterations, 1e-5);
532 
533  // Check convergence and bail out if necessary
534  if (!pmConverged) return false;
535 
536  const double pmEigenValue = pmOperator.eigenvalue();
537 
538  if (pmEigenValue < 0) {
539  // The largest-magnitude eigenvalue is negative, and therefore also the
540  // minimum eigenvalue, so just return this solution
541  minEigenValue = pmEigenValue;
542  if (minEigenVector) {
543  *minEigenVector = pmOperator.eigenvector();
544  minEigenVector->normalize(); // Ensure that this is a unit vector
545  }
546  return true;
547  }
548 
549  const Sparse C = pmEigenValue * Matrix::Identity(A.rows(), A.cols()).sparseView() - A;
550  const std::optional<Vector> initial = perturb(S.row(0));
551  AcceleratedPowerMethod<Sparse> apmShiftedOperator(C, initial);
552 
553  const bool minConverged = apmShiftedOperator.compute(
554  maxIterations, minEigenvalueNonnegativityTolerance / pmEigenValue);
555 
556  if (!minConverged) return false;
557 
558  minEigenValue = pmEigenValue - apmShiftedOperator.eigenvalue();
559  if (minEigenVector) {
560  *minEigenVector = apmShiftedOperator.eigenvector();
561  minEigenVector->normalize(); // Ensure that this is a unit vector
562  }
563  if (numIterations) *numIterations = apmShiftedOperator.nrIterations();
564  return true;
565 }
566 
572  // Const reference to an externally-held matrix whose minimum-eigenvalue we
573  // want to compute
574  const Sparse &A_;
575 
576  // Spectral shift
577  double sigma_;
578 
579  // Constructor
580  explicit MatrixProdFunctor(const Sparse &A, double sigma = 0)
581  : A_(A), sigma_(sigma) {}
582 
583  int rows() const { return A_.rows(); }
584  int cols() const { return A_.cols(); }
585 
586  // Matrix-vector multiplication operation
587  void perform_op(const double *x, double *y) const {
588  // Wrap the raw arrays as Eigen Vector types
591 
592  // Do the multiplication using wrapped Eigen vectors
593  Y = A_ * X + sigma_ * X;
594  }
595 };
596 
608 
617 
618 // For the defaults, David Rosen says:
619 // - maxIterations refers to the max number of Lanczos iterations to run;
620 // ~1000 should be sufficiently large
621 // - We've been using 10^-4 for the nonnegativity tolerance
622 // - for numLanczosVectors, 20 is a good default value
623 
625  const Sparse &A, const Matrix &S, double *minEigenValue,
626  Vector *minEigenVector = 0, size_t *numIterations = 0,
627  size_t maxIterations = 1000,
628  double minEigenvalueNonnegativityTolerance = 10e-4,
629  Eigen::Index numLanczosVectors = 20) {
630  // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos
631  MatrixProdFunctor lmOperator(A);
634  lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows()));
635  lmEigenValueSolver.init();
636 
637  const int lmConverged = lmEigenValueSolver.compute(
638  maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN);
639 
640  // Check convergence and bail out if necessary
641  if (lmConverged != 1) return false;
642 
643  const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0);
644 
645  if (lmEigenValue < 0) {
646  // The largest-magnitude eigenvalue is negative, and therefore also the
647  // minimum eigenvalue, so just return this solution
648  *minEigenValue = lmEigenValue;
649  if (minEigenVector) {
650  *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0);
651  minEigenVector->normalize(); // Ensure that this is a unit vector
652  }
653  return true;
654  }
655 
656  // The largest-magnitude eigenvalue is positive, and is therefore the
657  // maximum eigenvalue. Therefore, after shifting the spectrum of A
658  // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted
659  // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A),
660  // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of
661  // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding
662  // eigenvector v_min
663 
664  MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue);
665 
668  minEigenValueSolver(&minShiftedOperator, 1,
669  std::min(numLanczosVectors, A.rows()));
670 
671  // If S is a critical point of F, then S^T is also in the null space of S -
672  // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are
673  // eigenvectors corresponding to the eigenvalue 0. In the case that the
674  // relaxation is exact, this is the *minimum* eigenvalue, and therefore the
675  // rows of S are exactly the eigenvectors that we're looking for. On the
676  // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at
677  // least one strictly negative eigenvalue, and the rows of S are *unstable
678  // fixed points* for the Lanczos iterations. Thus, we will take a slightly
679  // "fuzzed" version of the first row of S as an initialization for the
680  // Lanczos iterations; this allows for rapid convergence in the case that
681  // the relaxation is exact (since are starting close to a solution), while
682  // simultaneously allowing the iterations to escape from this fixed point in
683  // the case that the relaxation is not exact.
684  Vector v0 = S.row(0).transpose();
685  Vector perturbation(v0.size());
686  perturbation.setRandom();
687  perturbation.normalize();
688  Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3%
689 
690  // Use this to initialize the eigensolver
691  minEigenValueSolver.init(xinit.data());
692 
693  // Now determine the relative precision required in the Lanczos method in
694  // order to be able to estimate the smallest eigenvalue within an *absolute*
695  // tolerance of 'minEigenvalueNonnegativityTolerance'
696  const int minConverged = minEigenValueSolver.compute(
697  maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue,
699 
700  if (minConverged != 1) return false;
701 
702  *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue;
703  if (minEigenVector) {
704  *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0);
705  minEigenVector->normalize(); // Ensure that this is a unit vector
706  }
707  if (numIterations) *numIterations = minEigenValueSolver.num_iterations();
708  return true;
709 }
710 
711 /* ************************************************************************* */
712 template <size_t d>
714  Vector *minEigenVector) const {
715  assert(values.size() == nrUnknowns());
716  const Matrix S = StiefelElementMatrix(values);
717  auto A = computeA(S);
718 
719  double minEigenValue;
720  bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector);
721  if (!success) {
722  throw std::runtime_error(
723  "SparseMinimumEigenValue failed to compute minimum eigenvalue.");
724  }
725  return minEigenValue;
726 }
727 
728 /* ************************************************************************* */
729 template <size_t d>
731  Vector *minEigenVector) const {
732  assert(values.size() == nrUnknowns());
733  const Matrix S = StiefelElementMatrix(values);
734  auto A = computeA(S);
735 
736  double minEigenValue = 0;
737  bool success = PowerMinimumEigenValue(A, S, minEigenValue, minEigenVector);
738  if (!success) {
739  throw std::runtime_error(
740  "PowerMinimumEigenValue failed to compute minimum eigenvalue.");
741  }
742  return minEigenValue;
743 }
744 
745 /* ************************************************************************* */
746 template <size_t d>
748  const Values &values) const {
749  Vector minEigenVector;
750  double minEigenValue = computeMinEigenValue(values, &minEigenVector);
751  return {minEigenValue, minEigenVector};
752 }
753 
754 /* ************************************************************************* */
755 template <size_t d>
757  double minEigenValue = computeMinEigenValue(values);
758  return minEigenValue > parameters_.optimalityThreshold;
759 }
760 
761 /* ************************************************************************* */
762 template <size_t d>
764  const Vector &v) {
766  // Create a tangent direction xi with eigenvector segment v_i
767  const size_t dimension = SOn::Dimension(p);
768  double sign0 = pow(-1.0, round((p + 1) / 2) + 1);
769  for (size_t i = 0; i < v.size() / d; i++) {
770  // Assumes key is 0-based integer
771  const auto v_i = v.segment<d>(d * i);
772  Vector xi = Vector::Zero(dimension);
773  double sign = sign0;
774  for (size_t j = 0; j < d; j++) {
775  xi(j + p - d - 1) = sign * v_i(d - j - 1);
776  sign = -sign;
777  }
778  delta.insert(i, xi);
779  }
780  return delta;
781 }
782 
783 /* ************************************************************************* */
784 template <size_t d>
786  const Values &values) const {
787  Matrix S_dot = StiefelElementMatrix(values);
788  // calculate the gradient of F(Q_dot) at Q_dot
789  Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose();
790  // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() <<
791  // "\t" << euclideanGradient.cols() << endl;
792 
793  // project the gradient onto the entire euclidean space
794  Matrix symBlockDiagProduct(p, d * nrUnknowns());
795  for (size_t i = 0; i < nrUnknowns(); i++) {
796  // Compute block product
797  const size_t di = d * i;
798  const Matrix P = S_dot.middleCols<d>(di).transpose() *
799  euclideanGradient.middleCols<d>(di);
800  // Symmetrize this block
801  Matrix S = .5 * (P + P.transpose());
802  // Compute S_dot * S and set corresponding block
803  symBlockDiagProduct.middleCols<d>(di) = S_dot.middleCols<d>(di) * S;
804  }
805  Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct;
806  return riemannianGradient;
807 }
808 
809 /* ************************************************************************* */
810 template <size_t d>
812  const Vector &minEigenVector) {
813  Values lifted = LiftTo<SOn>(p, values);
814  VectorValues delta = TangentVectorValues(p, minEigenVector);
815  return lifted.retract(delta);
816 }
817 
818 /* ************************************************************************* */
819 template <size_t d>
821  size_t p, const Values &values, const Vector &minEigenVector,
822  double minEigenValue, double gradienTolerance,
823  double preconditionedGradNormTolerance) const {
824  double funcVal = costAt(p - 1, values);
825  double alphaMin = 1e-2;
826  double alpha =
827  std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue));
828  vector<double> alphas;
829  vector<double> fvals;
830  // line search
831  while ((alpha >= alphaMin)) {
832  Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector);
833  double funcValTest = costAt(p, Qplus);
834  Matrix gradTest = riemannianGradient(p, Qplus);
835  double gradTestNorm = gradTest.norm();
836  // Record alpha and funcVal
837  alphas.push_back(alpha);
838  fvals.push_back(funcValTest);
839  if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) {
840  return Qplus;
841  }
842  alpha /= 2;
843  }
844 
845  auto fminIter = min_element(fvals.begin(), fvals.end());
846  auto minIdx = distance(fvals.begin(), fminIter);
847  double fMin = fvals[minIdx];
848  double aMin = alphas[minIdx];
849  if (fMin < funcVal) {
850  Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector);
851  return Qplus;
852  }
853 
854  return LiftwithDescent(p, values, alpha * minEigenVector);
855 }
856 
857 /* ************************************************************************* */
858 template <size_t d>
860  Values initial;
861  for (size_t j = 0; j < nrUnknowns(); j++) {
862  initial.insert(j, Rot::Random(rng));
863  }
864  return initial;
865 }
866 
867 /* ************************************************************************* */
868 template <size_t d>
870  return initializeRandomly(kRandomNumberGenerator);
871 }
872 
873 /* ************************************************************************* */
874 template <size_t d>
876  std::mt19937 &rng) const {
877  const Values randomRotations = initializeRandomly(rng);
878  return LiftTo<Rot3>(p, randomRotations); // lift to p!
879 }
880 
881 /* ************************************************************************* */
882 template <size_t d>
884  return initializeRandomlyAt(p, kRandomNumberGenerator);
885 }
886 
887 /* ************************************************************************* */
888 template <size_t d>
889 std::pair<Values, double> ShonanAveraging<d>::run(const Values &initialEstimate,
890  size_t pMin,
891  size_t pMax) const {
892  if (pMin < d) {
893  throw std::runtime_error("pMin is smaller than the base dimension d");
894  }
895  Values Qstar;
896  Values initialSOp = LiftTo<Rot>(pMin, initialEstimate); // lift to pMin!
897  for (size_t p = pMin; p <= pMax; p++) {
898  // Optimize until convergence at this level
899  Qstar = tryOptimizingAt(p, initialSOp);
900  if (parameters_.getUseHuber() || !parameters_.getCertifyOptimality()) {
901  // in this case, there is no optimality certification
902  if (pMin != pMax) {
903  throw std::runtime_error(
904  "When using robust norm, Shonan only tests a single rank. Set pMin = pMax");
905  }
906  const Values SO3Values = roundSolution(Qstar);
907  return {SO3Values, 0};
908  } else {
909  // Check certificate of global optimality
910  Vector minEigenVector;
911  double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector);
912  if (minEigenValue > parameters_.optimalityThreshold) {
913  // If at global optimum, round and return solution
914  const Values SO3Values = roundSolution(Qstar);
915  return {SO3Values, minEigenValue};
916  }
917 
918  // Not at global optimimum yet, so check whether we will go to next level
919  if (p != pMax) {
920  // Calculate initial estimate for next level by following minEigenVector
921  initialSOp =
922  initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue);
923  }
924  }
925  }
926  throw std::runtime_error("Shonan::run did not converge for given pMax");
927 }
928 
929 /* ************************************************************************* */
930 // Explicit instantiation for d=2
931 template class ShonanAveraging<2>;
932 
933 ShonanAveraging2::ShonanAveraging2(const Measurements &measurements,
934  const Parameters &parameters)
935  : ShonanAveraging<2>(maybeRobust(measurements, parameters.getUseHuber()),
936  parameters) {}
937 
939  : ShonanAveraging<2>(maybeRobust(parseMeasurements<Rot2>(g2oFile),
940  parameters.getUseHuber()),
941  parameters) {}
942 
943 // Extract Rot2 measurement from Pose2 betweenfactors
944 // Modeled after similar function in dataset.cpp
947  auto gaussian =
948  std::dynamic_pointer_cast<noiseModel::Gaussian>(f->noiseModel());
949  if (!gaussian)
950  throw std::invalid_argument(
951  "parseMeasurements<Rot2> can only convert Pose2 measurements "
952  "with Gaussian noise models.");
953  const Matrix3 M = gaussian->covariance();
954  // the (2,2) entry of Pose2's covariance corresponds to Rot2's covariance
955  // because the tangent space of Pose2 is ordered as (vx, vy, w)
956  auto model = noiseModel::Isotropic::Variance(1, M(2, 2));
957  return BinaryMeasurement<Rot2>(f->key<1>(), f->key<2>(), f->measured().rotation(),
958  model);
959 }
960 
962  const BetweenFactorPose2s &factors) {
964  result.reserve(factors.size());
965  for (auto f : factors) result.push_back(convertPose2ToBinaryMeasurementRot2(f));
966  return result;
967 }
968 
970  const Parameters &parameters)
972  parameters.getUseHuber()),
973  parameters) {}
974 
975 /* ************************************************************************* */
976 // Explicit instantiation for d=3
977 template class ShonanAveraging<3>;
978 
980  const Parameters &parameters)
981  : ShonanAveraging<3>(maybeRobust(measurements, parameters.getUseHuber()),
982  parameters) {}
983 
985  : ShonanAveraging<3>(maybeRobust(parseMeasurements<Rot3>(g2oFile),
986  parameters.getUseHuber()),
987  parameters) {}
988 
989 // TODO(frank): Deprecate after we land pybind wrapper
990 
991 // Extract Rot3 measurement from Pose3 betweenfactors
992 // Modeled after similar function in dataset.cpp
995  auto gaussian =
996  std::dynamic_pointer_cast<noiseModel::Gaussian>(f->noiseModel());
997  if (!gaussian)
998  throw std::invalid_argument(
999  "parseMeasurements<Rot3> can only convert Pose3 measurements "
1000  "with Gaussian noise models.");
1001  const Matrix6 M = gaussian->covariance();
1002  // the upper-left 3x3 sub-block of Pose3's covariance corresponds to Rot3's covariance
1003  // because the tangent space of Pose3 is ordered as (w,T) where w and T are both Vector3's
1004  auto model = noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0));
1005  return BinaryMeasurement<Rot3>(f->key<1>(), f->key<2>(), f->measured().rotation(),
1006  model);
1007 }
1008 
1010  const BetweenFactorPose3s &factors) {
1012  result.reserve(factors.size());
1013  for (auto f : factors) result.push_back(convert(f));
1014  return result;
1015 }
1016 
1018  const Parameters &parameters)
1020  parameters.getUseHuber()),
1021  parameters) {}
1022 
1023 /* ************************************************************************* */
1024 } // namespace gtsam
gtsam::SubgraphBuilderParameters
Definition: SubgraphBuilder.h:102
Eigen::SparseMatrix::cols
Index cols() const
Definition: SparseMatrix.h:140
gtsam::SparseMinimumEigenValue
static bool SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, Vector *minEigenVector=0, size_t *numIterations=0, size_t maxIterations=1000, double minEigenvalueNonnegativityTolerance=10e-4, Eigen::Index numLanczosVectors=20)
Definition: ShonanAveraging.cpp:624
Eigen::SparseMatrix< double >
rng
static std::mt19937 rng
Definition: timeFactorOverhead.cpp:31
B
Matrix< SCALARB, Dynamic, Dynamic, opt_B > B
Definition: bench_gemm.cpp:49
gtsam::convertPose2ToBinaryMeasurementRot2
static BinaryMeasurement< Rot2 > convertPose2ToBinaryMeasurementRot2(const BetweenFactor< Pose2 >::shared_ptr &f)
Definition: ShonanAveraging.cpp:945
Eigen::DiagonalMatrix
Represents a diagonal matrix with its storage.
Definition: DiagonalMatrix.h:140
D
MatrixXcd D
Definition: EigenSolver_EigenSolver_MatrixType.cpp:14
gtsam::ShonanAveragingParameters::lm
LevenbergMarquardtParams lm
LM parameters.
Definition: ShonanAveraging.h:52
ShonanAveraging.h
Shonan Averaging algorithm.
alpha
RealScalar alpha
Definition: level1_cplx_impl.h:147
gtsam::SubgraphBuilderParameters::EQUAL
@ EQUAL
Definition: SubgraphBuilder.h:113
gtsam::ShonanAveraging< 3 >::Rot
typename Parameters::Rot Rot
Definition: ShonanAveraging.h:129
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
d
static const double d[K][N]
Definition: igam.h:11
gtsam::PowerMethod::nrIterations
size_t nrIterations() const
Return the number of iterations.
Definition: PowerMethod.h:122
v0
static const double v0
Definition: testCal3DFisheye.cpp:31
screwPose2::xi
Vector xi
Definition: testPose2.cpp:148
gtsam::SubgraphBuilderParameters::SKELETON
@ SKELETON
Definition: SubgraphBuilder.h:120
test_constructor::sigmas
Vector1 sigmas
Definition: testHybridNonlinearFactor.cpp:52
keys
const KeyVector keys
Definition: testRegularImplicitSchurFactor.cpp:40
gtsam::PowerMethod
Compute maximum Eigenpair with power method.
Definition: PowerMethod.h:58
gtsam::ShonanAveraging::ShonanAveraging
ShonanAveraging(const Measurements &measurements, const Parameters &parameters=Parameters())
Definition: ShonanAveraging.cpp:117
gtsam::ShonanAveragingParameters::ShonanAveragingParameters
ShonanAveragingParameters(const LevenbergMarquardtParams &lm=LevenbergMarquardtParams::CeresDefaults(), const std::string &method="JACOBI", double optimalityThreshold=-1e-4, double alpha=0.0, double beta=1.0, double gamma=0.0)
Definition: ShonanAveraging.cpp:50
gtsam::ShonanGaugeFactor
Definition: ShonanGaugeFactor.h:45
c
Scalar Scalar * c
Definition: benchVecAdd.cpp:17
gtsam::NonlinearOptimizerParams::iterativeParams
IterativeOptimizationParameters::shared_ptr iterativeParams
The container for iterativeOptimization parameters. used in CG Solvers.
Definition: NonlinearOptimizerParams.h:109
gtsam::maxKey
Key maxKey(const PROBLEM &problem)
Definition: ActiveSetSolver.h:191
LevenbergMarquardtOptimizer.h
A nonlinear optimizer that uses the Levenberg-Marquardt trust-region scheme.
gtsam::Y
GaussianFactorGraphValuePair Y
Definition: HybridGaussianProductFactor.cpp:29
gtsam::NrUnknowns
static size_t NrUnknowns(const typename ShonanAveraging< d >::Measurements &measurements)
Definition: ShonanAveraging.cpp:97
initial
Values initial
Definition: OdometryOptimize.cpp:2
simple_graph::factors
const GaussianFactorGraph factors
Definition: testJacobianFactor.cpp:213
x
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
Definition: gnuplot_common_settings.hh:12
sign
const EIGEN_DEVICE_FUNC SignReturnType sign() const
Definition: ArrayCwiseUnaryOps.h:219
gtsam::extractRot3Measurements
static ShonanAveraging3::Measurements extractRot3Measurements(const BetweenFactorPose3s &factors)
Definition: ShonanAveraging.cpp:1009
gtsam::PriorFactor
Definition: nonlinear/PriorFactor.h:30
Spectra::SymEigsSolver
Definition: SymEigsSolver.h:141
gtsam::Matrix
Eigen::MatrixXd Matrix
Definition: base/Matrix.h:39
ShonanFactor.h
Main factor type in Shonan averaging, on SO(n) pairs.
gtsam::noiseModel::Isotropic::Variance
static shared_ptr Variance(size_t dim, double variance, bool smart=true)
Definition: NoiseModel.cpp:630
different_sigmas::values
HybridValues values
Definition: testHybridBayesNet.cpp:245
X
#define X
Definition: icosphere.cpp:20
gtsam::NonlinearOptimizerParams::setLinearSolverType
void setLinearSolverType(const std::string &solver)
Definition: NonlinearOptimizerParams.h:157
ShonanGaugeFactor.h
Factor used in Shonan Averaging to clamp down gauge freedom.
gtsam::Vector
Eigen::VectorXd Vector
Definition: Vector.h:38
result
Values result
Definition: OdometryOptimize.cpp:8
beta
double beta(double a, double b)
Definition: beta.c:61
gtsam::MatrixProdFunctor::sigma_
double sigma_
Definition: ShonanAveraging.cpp:577
svd
cout<< "Here is the matrix m:"<< endl<< m<< endl;JacobiSVD< MatrixXf > svd(m, ComputeThinU|ComputeThinV)
Q
Quaternion Q
Definition: testQuaternion.cpp:27
rows
int rows
Definition: Tutorial_commainit_02.cpp:1
determinant
void determinant(const MatrixType &m)
Definition: determinant.cpp:14
gtsam::KarcherMeanFactor
Definition: KarcherMeanFactor.h:46
sampling::sigma
static const double sigma
Definition: testGaussianBayesNet.cpp:170
A
Matrix< SCALARA, Dynamic, Dynamic, opt_A > A
Definition: bench_gemm.cpp:48
gtsam::FrobeniusBetweenFactor
Definition: FrobeniusFactor.h:110
boost::multiprecision::fabs
Real fabs(const Real &a)
Definition: boostmultiprec.cpp:119
n
int n
Definition: BiCGSTAB_simple.cpp:1
Eigen::DiagonalMatrix::diagonal
const EIGEN_DEVICE_FUNC DiagonalVectorType & diagonal() const
Definition: DiagonalMatrix.h:160
gtsam::Values::retract
Values retract(const VectorValues &delta) const
Definition: Values.cpp:98
gtsam::AcceleratedPowerMethod::compute
bool compute(size_t maxIterations, double tol)
Definition: AcceleratedPowerMethod.h:156
gtsam.examples.PlanarManipulatorExample.delta
def delta(g0, g1)
Definition: PlanarManipulatorExample.py:45
gtsam::SubgraphBuilderParameters::augmentationWeight
enum gtsam::SubgraphBuilderParameters::AugmentationWeight augmentationWeight
gtsam::VectorValues
Definition: VectorValues.h:74
j
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2
gtsam::AcceleratedPowerMethod
Compute maximum Eigenpair with accelerated power method.
Definition: AcceleratedPowerMethod.h:51
gtsam::MatrixProdFunctor::A_
const Sparse & A_
Definition: ShonanAveraging.cpp:574
gtsam::Rot3
Rot3 is a 3D rotation represented as a rotation matrix if the preprocessor symbol GTSAM_USE_QUATERNIO...
Definition: Rot3.h:58
gtsam::SubgraphBuilderParameters::augmentationFactor
double augmentationFactor
factor multiplied with n, yields number of extra edges.
Definition: SubgraphBuilder.h:129
gtsam::kRandomNumberGenerator
static std::mt19937 kRandomNumberGenerator(42)
parameters
static ConjugateGradientParameters parameters
Definition: testIterative.cpp:33
gtsam::BetweenFactor::shared_ptr
std::shared_ptr< BetweenFactor > shared_ptr
Definition: BetweenFactor.h:63
gtsam::MatrixProdFunctor::cols
int cols() const
Definition: ShonanAveraging.cpp:584
gtsam::ShonanAveragingParameters
Parameters governing optimization etc.
Definition: ShonanAveraging.h:46
gtsam::SubgraphBuilderParameters::skeletonWeight
enum gtsam::SubgraphBuilderParameters::SkeletonWeight skeletonWeight
gtsam::NonlinearFactorGraph
Definition: NonlinearFactorGraph.h:55
gtsam::PowerMethod::eigenvalue
double eigenvalue() const
Return the eigenvalue.
Definition: PowerMethod.h:147
PCGSolver.h
round
double round(double x)
Definition: round.c:38
gtsam::MatrixProdFunctor::MatrixProdFunctor
MatrixProdFunctor(const Sparse &A, double sigma=0)
Definition: ShonanAveraging.cpp:580
gtsam::ShonanFactor
Definition: ShonanFactor.h:36
gtsam.examples.DogLegOptimizerExample.run
def run(args)
Definition: DogLegOptimizerExample.py:21
Spectra::LARGEST_MAGN
@ LARGEST_MAGN
Definition: SelectionRule.h:32
perturb
bool perturb
Definition: SolverComparer.cpp:99
NonlinearEquality.h
E1::A
@ A
gamma
#define gamma
Definition: mconf.h:85
Eigen::Map
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
gtsam::SubgraphBuilderParameters::KRUSKAL
@ KRUSKAL
Definition: SubgraphBuilder.h:109
model
noiseModel::Diagonal::shared_ptr model
Definition: doc/Code/Pose2SLAMExample.cpp:7
gtsam::Kappa
static double Kappa(const BinaryMeasurement< T > &measurement, const ShonanAveragingParameters< d > &parameters)
Definition: ShonanAveraging.cpp:331
y
Scalar * y
Definition: level1_cplx_impl.h:124
atan2
AnnoyingScalar atan2(const AnnoyingScalar &y, const AnnoyingScalar &x)
Definition: AnnoyingScalar.h:110
key
const gtsam::Symbol key('X', 0)
tree::f
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
Definition: testExpression.cpp:218
gtsam::PowerMethod::eigenvector
Vector eigenvector() const
Return the eigenvector.
Definition: PowerMethod.h:150
gtsam::FactorGraph::size
size_t size() const
Definition: FactorGraph.h:297
Eigen::JacobiSVD
Two-sided Jacobi SVD decomposition of a rectangular matrix.
Definition: ForwardDeclarations.h:278
SymEigsSolver.h
gtsam::Rot2
Definition: Rot2.h:35
Eigen::ArrayBase::pow
const Eigen::CwiseBinaryOp< Eigen::internal::scalar_pow_op< typename Derived::Scalar, typename ExponentDerived::Scalar >, const Derived, const ExponentDerived > pow(const Eigen::ArrayBase< Derived > &x, const Eigen::ArrayBase< ExponentDerived > &exponents)
Definition: GlobalFunctions.h:143
gtsam::BinaryMeasurement
Definition: BinaryMeasurement.h:36
Eigen::Quaternion
The quaternion class used to represent 3D orientations and rotations.
Definition: ForwardDeclarations.h:293
KarcherMeanFactor-inl.h
Eigen::ComputeThinV
@ ComputeThinV
Definition: Constants.h:399
C
Matrix< Scalar, Dynamic, Dynamic > C
Definition: bench_gemm.cpp:50
gtsam::MatrixProdFunctor::rows
int rows() const
Definition: ShonanAveraging.cpp:583
gtsam
traits
Definition: SFMdata.h:40
gtsam::BetweenFactorPose3s
std::vector< BetweenFactor< Pose3 >::shared_ptr > BetweenFactorPose3s
Definition: dataset.h:218
estimation_fixture::measurements
std::vector< double > measurements
Definition: testHybridEstimation.cpp:51
gtsam::Sparse
Eigen::SparseMatrix< double > Sparse
Definition: AcceleratedPowerMethod.h:26
NonlinearFactorGraph.h
Factor Graph consisting of non-linear factors.
gtsam::NonlinearOptimizerParams::Iterative
@ Iterative
Definition: NonlinearOptimizerParams.h:103
gtsam::Values
Definition: Values.h:65
gtsam::ShonanAveraging::Measurements
std::vector< BinaryMeasurement< Rot > > Measurements
Definition: ShonanAveraging.h:132
SubgraphPreconditioner.h
p
float * p
Definition: Tutorial_Map_using.cpp:9
G
JacobiRotation< float > G
Definition: Jacobi_makeGivens.cpp:2
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1108
gtsam::SubgraphBuilderParameters::skeletonType
enum gtsam::SubgraphBuilderParameters::Skeleton skeletonType
v
Array< int, Dynamic, 1 > v
Definition: Array_initializer_list_vector_cxx11.cpp:1
gtsam::MatrixProdFunctor
Definition: ShonanAveraging.cpp:571
P
static double P[]
Definition: ellpe.c:68
gtsam::extractRot2Measurements
static ShonanAveraging2::Measurements extractRot2Measurements(const BetweenFactorPose2s &factors)
Definition: ShonanAveraging.cpp:961
min
#define min(a, b)
Definition: datatypes.h:19
gtsam::LevenbergMarquardtParams
Definition: LevenbergMarquardtParams.h:35
gtsam::SO
Definition: SOn.h:54
gtsam::BetweenFactorPose2s
std::vector< BetweenFactor< Pose2 >::shared_ptr > BetweenFactorPose2s
Definition: dataset.h:212
initial
Definition: testScenarioRunner.cpp:148
gtsam::distance
Double_ distance(const OrientedPlane3_ &p)
Definition: slam/expressions.h:117
gtsam::RoundSolutionS
static Matrix RoundSolutionS(const Matrix &S)
Definition: ShonanAveraging.cpp:237
N
#define N
Definition: igam.h:9
gtsam::ShonanAveraging2::ShonanAveraging2
ShonanAveraging2(const Measurements &measurements, const Parameters &parameters=Parameters())
Definition: ShonanAveraging.cpp:933
gtsam::ShonanAveraging
Definition: ShonanAveraging.h:123
graph
NonlinearFactorGraph graph
Definition: doc/Code/OdometryExample.cpp:2
Eigen::SparseMatrix::rows
Index rows() const
Definition: SparseMatrix.h:138
gtsam::Key
std::uint64_t Key
Integer nonlinear key type.
Definition: types.h:97
max
#define max(a, b)
Definition: datatypes.h:20
gtsam::parseMeasurements
GTSAM_EXPORT std::vector< BinaryMeasurement< Pose2 > > parseMeasurements(const std::string &filename, const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex)
Definition: dataset.cpp:396
gtsam::convert
static BinaryMeasurement< Rot3 > convert(const BetweenFactor< Pose3 >::shared_ptr &f)
Definition: ShonanAveraging.cpp:993
gtsam::PowerMethod::compute
bool compute(size_t maxIterations, double tol)
Definition: PowerMethod.h:130
FrobeniusFactor.h
Various factors that minimize some Frobenius norm.
gtsam::ShonanAveraging3::ShonanAveraging3
ShonanAveraging3(const Measurements &measurements, const Parameters &parameters=Parameters())
Definition: ShonanAveraging.cpp:979
test_callbacks.value
value
Definition: test_callbacks.py:160
gtsam::MatrixProdFunctor::perform_op
void perform_op(const double *x, double *y) const
Definition: ShonanAveraging.cpp:587
i
int i
Definition: BiCGSTAB_step_by_step.cpp:9
R
Rot2 R(Rot2::fromAngle(0.1))
measurement
static Point2 measurement(323.0, 240.0)
gtsam::PowerMinimumEigenValue
static bool PowerMinimumEigenValue(const Sparse &A, const Matrix &S, double &minEigenValue, Vector *minEigenVector=0, size_t *numIterations=0, size_t maxIterations=1000, double minEigenvalueNonnegativityTolerance=10e-4)
MINIMUM EIGENVALUE COMPUTATIONS.
Definition: ShonanAveraging.cpp:521
gtsam::noiseModel::Gaussian::Covariance
static shared_ptr Covariance(const Matrix &covariance, bool smart=true)
Definition: NoiseModel.cpp:114
S
DiscreteKey S(1, 2)
Eigen::Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
gtsam::NonlinearOptimizerParams::linearSolverType
LinearSolverType linearSolverType
The type of linear solver to use in the nonlinear optimizer.
Definition: NonlinearOptimizerParams.h:107
M
Matrix< RealScalar, Dynamic, Dynamic > M
Definition: bench_gemm.cpp:51


gtsam
Author(s):
autogenerated on Fri Nov 1 2024 03:35:15