ShonanAveraging.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
19 #include <SymEigsSolver.h>
20 #include <cmath>
21 #include <gtsam/linear/PCGSolver.h>
27 #include <gtsam/sfm/ShonanFactor.h>
31 
32 #include <Eigen/Eigenvalues>
33 #include <algorithm>
34 #include <complex>
35 #include <iostream>
36 #include <map>
37 #include <random>
38 #include <set>
39 #include <vector>
40 #include <cassert>
41 
42 namespace gtsam {
43 
44 // In Wrappers we have no access to this so have a default ready
45 static std::mt19937 kRandomNumberGenerator(42);
46 
48 
49 /* ************************************************************************* */
50 template <size_t d>
52  const LevenbergMarquardtParams &_lm, const std::string &method,
53  double optimalityThreshold, double alpha, double beta, double gamma)
54  : lm(_lm),
55  optimalityThreshold(optimalityThreshold),
56  alpha(alpha),
57  beta(beta),
58  gamma(gamma),
59  useHuber(false),
60  certifyOptimality(true) {
61  // By default, we will do conjugate gradient
63 
64  // Create subgraph builder parameters
65  SubgraphBuilderParameters builderParameters;
69  builderParameters.augmentationFactor = 0.0;
70 
71  // Choose optimization method
72  if (method == "SUBGRAPH") {
74  std::make_shared<SubgraphSolverParameters>(builderParameters);
75  } else if (method == "SGPC") {
76  lm.iterativeParams = std::make_shared<PCGSolverParameters>(
77  std::make_shared<SubgraphPreconditionerParameters>(builderParameters));
78  } else if (method == "JACOBI") {
79  lm.iterativeParams = std::make_shared<PCGSolverParameters>(std::make_shared<BlockJacobiPreconditionerParameters>());
80  } else if (method == "QR") {
81  lm.setLinearSolverType("MULTIFRONTAL_QR");
82  } else if (method == "CHOLESKY") {
83  lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY");
84  } else {
85  throw std::invalid_argument("ShonanAveragingParameters: unknown method");
86  }
87 }
88 
89 /* ************************************************************************* */
90 // Explicit instantiation for d=2 and d=3
91 template struct ShonanAveragingParameters<2>;
92 template struct ShonanAveragingParameters<3>;
93 
94 /* ************************************************************************* */
95 // Calculate number of unknown rotations referenced by measurements
96 // Throws exception of keys are not numbered as expected (may change in future).
97 template <size_t d>
98 static size_t NrUnknowns(
100  Key maxKey = 0;
101  std::set<Key> keys;
102  for (const auto &measurement : measurements) {
103  for (const Key &key : measurement.keys()) {
105  keys.insert(key);
106  }
107  }
108  size_t N = keys.size();
109  if (maxKey != N - 1) {
110  throw std::invalid_argument(
111  "As of now, ShonanAveraging expects keys numbered 0..N-1");
112  }
113  return N;
114 }
115 
116 /* ************************************************************************* */
117 template <size_t d>
119  const Parameters &parameters)
120  : parameters_(parameters),
121  measurements_(measurements),
122  nrUnknowns_(NrUnknowns<d>(measurements)) {
123  for (const auto &measurement : measurements_) {
124  const auto &model = measurement.noiseModel();
125  if (model && model->dim() != SO<d>::dimension) {
126  measurement.print("Factor with incorrect noise model:\n");
127  throw std::invalid_argument(
128  "ShonanAveraging: measurements passed to "
129  "constructor have incorrect dimension.");
130  }
131  }
132  Q_ = buildQ();
133  D_ = buildD();
134  L_ = D_ - Q_;
135 }
136 
137 /* ************************************************************************* */
138 template <size_t d>
141  auto G = std::make_shared<Matrix>(SO<-1>::VectorizedGenerators(p));
142 
143  for (const auto &measurement : measurements_) {
144  const auto &keys = measurement.keys();
145  const auto &Rij = measurement.measured();
146  const auto &model = measurement.noiseModel();
147  graph.emplace_shared<ShonanFactor<d>>(keys[0], keys[1], Rij, p, model, G);
148  }
149  // Possibly add Karcher prior
150  if (parameters_.beta > 0) {
151  const size_t dim = SOn::Dimension(p);
152  graph.emplace_shared<KarcherMeanFactor<SOn>>(graph.keys(), dim);
153  }
154  // Possibly add gauge factors - they are probably useless as gradient is zero
155  if (parameters_.gamma > 0 && p > d + 1) {
156  for (auto key : graph.keys())
157  graph.emplace_shared<ShonanGaugeFactor>(key, p, d, parameters_.gamma);
158  }
159  return graph;
160 }
161 
162 /* ************************************************************************* */
163 template <size_t d>
164 double ShonanAveraging<d>::costAt(size_t p, const Values &values) const {
165  const NonlinearFactorGraph graph = buildGraphAt(p);
166  return graph.error(values);
167 }
168 
169 /* ************************************************************************* */
170 template <size_t d>
171 std::shared_ptr<LevenbergMarquardtOptimizer>
173  // Build graph
174  NonlinearFactorGraph graph = buildGraphAt(p);
175 
176  // Anchor prior is added here as depends on initial value (and cost is zero)
177  if (parameters_.alpha > 0) {
178  const size_t dim = SOn::Dimension(p);
179  const auto [i, value] = parameters_.anchor;
180  auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha);
181  graph.emplace_shared<PriorFactor<SOn>>(i, SOn::Lift(p, value.matrix()),
182  model);
183  }
184  // Optimize
185  return std::make_shared<LevenbergMarquardtOptimizer>(graph, initial,
186  parameters_.lm);
187 }
188 
189 /* ************************************************************************* */
190 template <size_t d>
192  const Values &initial) const {
193  auto lm = createOptimizerAt(p, initial);
194  return lm->optimize();
195 }
196 
197 /* ************************************************************************* */
198 // Project to pxdN Stiefel manifold
199 template <size_t d>
201  const size_t N = values.size();
202  const size_t p = values.at<SOn>(0).rows();
203  Matrix S(p, N * d);
204  for (const auto& it : values.extract<SOn>()) {
205  S.middleCols<d>(it.first * d) =
206  it.second.matrix().leftCols<d>(); // project Qj to Stiefel
207  }
208  return S;
209 }
210 
211 /* ************************************************************************* */
212 template <>
214  Values result;
215  for (const auto& it : values.extract<SOn>()) {
216  assert(it.second.rows() == p);
217  const auto &M = it.second.matrix();
218  const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0));
219  result.insert(it.first, R);
220  }
221  return result;
222 }
223 
224 template <>
226  Values result;
227  for (const auto& it : values.extract<SOn>()) {
228  assert(it.second.rows() == p);
229  const auto &M = it.second.matrix();
230  const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>());
231  result.insert(it.first, R);
232  }
233  return result;
234 }
235 
236 /* ************************************************************************* */
237 template <size_t d>
238 static Matrix RoundSolutionS(const Matrix &S) {
239  const size_t N = S.cols() / d;
240  // First, compute a thin SVD of S
242  const Vector sigmas = svd.singularValues();
243 
244  // Construct a diagonal matrix comprised of the first d singular values
245  using DiagonalMatrix = Eigen::DiagonalMatrix<double, d>;
246  DiagonalMatrix Sigma_d;
247  Sigma_d.diagonal() = sigmas.head<d>();
248 
249  // Now, construct a rank-d truncated singular value decomposition for S
250  Matrix R = Sigma_d * svd.matrixV().leftCols<d>().transpose();
251 
252  // Count the number of blocks whose determinants have positive sign
253  size_t numPositiveBlocks = 0;
254  for (size_t i = 0; i < N; ++i) {
255  // Compute the determinant of the ith dxd block of R
256  double determinant = R.middleCols<d>(d * i).determinant();
257  if (determinant > 0) ++numPositiveBlocks;
258  }
259 
260  if (numPositiveBlocks < N / 2) {
261  // Less than half of the total number of blocks have the correct sign.
262  // To reverse their orientations, multiply with a reflection matrix.
263  DiagonalMatrix reflector;
264  reflector.setIdentity();
265  reflector.diagonal()(d - 1) = -1;
266  R = reflector * R;
267  }
268 
269  return R;
270 }
271 
272 /* ************************************************************************* */
273 template <>
275  // Round to a 2*2N matrix
276  Matrix R = RoundSolutionS<2>(S);
277 
278  // Finally, project each dxd rotation block to SO(2)
279  Values values;
280  for (size_t j = 0; j < nrUnknowns(); ++j) {
281  const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j));
282  values.insert(j, Ri);
283  }
284  return values;
285 }
286 
287 template <>
289  // Round to a 3*3N matrix
290  Matrix R = RoundSolutionS<3>(S);
291 
292  // Finally, project each dxd rotation block to SO(3)
293  Values values;
294  for (size_t j = 0; j < nrUnknowns(); ++j) {
295  const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j));
296  values.insert(j, Ri);
297  }
298  return values;
299 }
300 
301 /* ************************************************************************* */
302 template <size_t d>
304  // Project to pxdN Stiefel manifold...
305  Matrix S = StiefelElementMatrix(values);
306  // ...and call version above.
307  return roundSolutionS(S);
308 }
309 
310 /* ************************************************************************* */
311 template <size_t d>
312 double ShonanAveraging<d>::cost(const Values &values) const {
314  for (const auto &measurement : measurements_) {
315  const auto &keys = measurement.keys();
316  const auto &Rij = measurement.measured();
317  const auto &model = measurement.noiseModel();
318  graph.emplace_shared<FrobeniusBetweenFactor<SO<d>>>(
319  keys[0], keys[1], SO<d>(Rij.matrix()), model);
320  }
321  // Finally, project each dxd rotation block to SO(d)
322  Values result;
323  for (const auto& it : values.extract<Rot>()) {
324  result.insert(it.first, SO<d>(it.second.matrix()));
325  }
326  return graph.error(result);
327 }
328 
329 /* ************************************************************************* */
330 // Get kappa from noise model
331 template <typename T, size_t d>
334  const auto &isotropic = std::dynamic_pointer_cast<noiseModel::Isotropic>(
335  measurement.noiseModel());
336  double sigma;
337  if (isotropic) {
338  sigma = isotropic->sigma();
339  } else {
340  const auto &robust = std::dynamic_pointer_cast<noiseModel::Robust>(
341  measurement.noiseModel());
342  // Check if noise model is robust
343  if (robust) {
344  // If robust, check if optimality certificate is expected
345  if (parameters.getCertifyOptimality()) {
346  throw std::invalid_argument(
347  "Certification of optimality does not work with robust cost.");
348  } else {
349  // Optimality certificate not required, so setting default sigma
350  sigma = 1;
351  }
352  } else {
353  throw std::invalid_argument(
354  "Shonan averaging noise models must be isotropic (but robust losses "
355  "are allowed).");
356  }
357  }
358  return 1.0 / (sigma * sigma);
359 }
360 
361 /* ************************************************************************* */
362 template <size_t d>
364  // Each measurement contributes 2*d elements along the diagonal of the
365  // degree matrix.
366  static constexpr size_t stride = 2 * d;
367 
368  // Reserve space for triplets
369  std::vector<Eigen::Triplet<double>> triplets;
370  triplets.reserve(stride * measurements_.size());
371 
372  for (const auto &measurement : measurements_) {
373  // Get pose keys
374  const auto &keys = measurement.keys();
375 
376  // Get kappa from noise model
377  double kappa = Kappa<Rot, d>(measurement, parameters_);
378 
379  const size_t di = d * keys[0], dj = d * keys[1];
380  for (size_t k = 0; k < d; k++) {
381  // Elements of ith block-diagonal
382  triplets.emplace_back(di + k, di + k, kappa);
383  // Elements of jth block-diagonal
384  triplets.emplace_back(dj + k, dj + k, kappa);
385  }
386  }
387 
388  // Construct and return a sparse matrix from these triplets
389  const size_t dN = d * nrUnknowns();
390  Sparse D(dN, dN);
391  D.setFromTriplets(triplets.begin(), triplets.end());
392 
393  return D;
394 }
395 
396 /* ************************************************************************* */
397 template <size_t d>
399  // Each measurement contributes 2*d^2 elements on a pair of symmetric
400  // off-diagonal blocks
401  static constexpr size_t stride = 2 * d * d;
402 
403  // Reserve space for triplets
404  std::vector<Eigen::Triplet<double>> triplets;
405  triplets.reserve(stride * measurements_.size());
406 
407  for (const auto &measurement : measurements_) {
408  // Get pose keys
409  const auto &keys = measurement.keys();
410 
411  // Extract rotation measurement
412  const auto Rij = measurement.measured().matrix();
413 
414  // Get kappa from noise model
415  double kappa = Kappa<Rot, d>(measurement, parameters_);
416 
417  const size_t di = d * keys[0], dj = d * keys[1];
418  for (size_t r = 0; r < d; r++) {
419  for (size_t c = 0; c < d; c++) {
420  // Elements of ij block
421  triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c));
422  // Elements of ji block
423  triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r));
424  }
425  }
426  }
427 
428  // Construct and return a sparse matrix from these triplets
429  const size_t dN = d * nrUnknowns();
430  Sparse Q(dN, dN);
431  Q.setFromTriplets(triplets.begin(), triplets.end());
432 
433  return Q;
434 }
435 
436 /* ************************************************************************* */
437 template <size_t d>
439  // Each pose contributes 2*d elements along the diagonal of Lambda
440  static constexpr size_t stride = d * d;
441 
442  // Reserve space for triplets
443  const size_t N = nrUnknowns();
444  std::vector<Eigen::Triplet<double>> triplets;
445  triplets.reserve(stride * N);
446 
447  // Do sparse-dense multiply to get Q*S'
448  auto QSt = Q_ * S.transpose();
449 
450  for (size_t j = 0; j < N; j++) {
451  // Compute B, the building block for the j^th diagonal block of Lambda
452  const size_t dj = d * j;
453  Matrix B = QSt.middleRows(dj, d) * S.middleCols<d>(dj);
454 
455  // Elements of jth block-diagonal
456  for (size_t r = 0; r < d; r++)
457  for (size_t c = 0; c < d; c++)
458  triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r)));
459  }
460 
461  // Construct and return a sparse matrix from these triplets
462  Sparse Lambda(d * N, d * N);
463  Lambda.setFromTriplets(triplets.begin(), triplets.end());
464  return Lambda;
465 }
466 
467 /* ************************************************************************* */
468 template <size_t d>
470  // Project to pxdN Stiefel manifold...
471  Matrix S = StiefelElementMatrix(values);
472  // ...and call version above.
473  return computeLambda(S);
474 }
475 
476 /* ************************************************************************* */
477 template <size_t d>
479  assert(values.size() == nrUnknowns());
480  const Matrix S = StiefelElementMatrix(values);
481  auto Lambda = computeLambda(S);
482  return Lambda - Q_;
483 }
484 
485 /* ************************************************************************* */
486 template <size_t d>
488  auto Lambda = computeLambda(S);
489  return Lambda - Q_;
490 }
491 
492 /* ************************************************************************* */
493 // Perturb the initial initialVector by adding a spherically-uniformly
494 // distributed random vector with 0.03*||initialVector||_2 magnitude to
495 // initialVector
496 // ref : Part III. C, Rosen, D. and Carlone, L., 2017, September. Computational
497 // enhancements for certifiably correct SLAM. In Proceedings of the
498 // International Conference on Intelligent Robots and Systems.
499 static Vector perturb(const Vector &initialVector) {
500  // generate a 0.03*||x_0||_2 as stated in David's paper
501  int n = initialVector.rows();
502  Vector disturb = Vector::Random(n);
503  disturb.normalize();
504 
505  double magnitude = initialVector.norm();
506  Vector perturbedVector = initialVector + 0.03 * magnitude * disturb;
507  perturbedVector.normalize();
508  return perturbedVector;
509 }
510 
511 /* ************************************************************************* */
513 // Alg.6 from paper Distributed Certifiably Correct Pose-Graph Optimization,
514 // it takes in the certificate matrix A as input, the maxIterations and the
515 // minEigenvalueNonnegativityTolerance is set to 1000 and 10e-4 ad default,
516 // there are two parts
517 // in this algorithm:
518 // (1) compute the maximum eigenpair (\lamda_dom, \vect{v}_dom) of A by power
519 // method. if \lamda_dom is less than zero, then return the eigenpair. (2)
520 // compute the maximum eigenpair (\theta, \vect{v}) of C = \lamda_dom * I - A by
521 // accelerated power method. Then return (\lamda_dom - \theta, \vect{v}).
523  const Sparse &A, const Matrix &S, double &minEigenValue,
524  Vector *minEigenVector = 0, size_t *numIterations = 0,
525  size_t maxIterations = 1000,
526  double minEigenvalueNonnegativityTolerance = 10e-4) {
527 
528  // a. Compute dominant eigenpair of S using power method
529  PowerMethod<Sparse> pmOperator(A);
530 
531  const bool pmConverged = pmOperator.compute(
532  maxIterations, 1e-5);
533 
534  // Check convergence and bail out if necessary
535  if (!pmConverged) return false;
536 
537  const double pmEigenValue = pmOperator.eigenvalue();
538 
539  if (pmEigenValue < 0) {
540  // The largest-magnitude eigenvalue is negative, and therefore also the
541  // minimum eigenvalue, so just return this solution
542  minEigenValue = pmEigenValue;
543  if (minEigenVector) {
544  *minEigenVector = pmOperator.eigenvector();
545  minEigenVector->normalize(); // Ensure that this is a unit vector
546  }
547  return true;
548  }
549 
550  const Sparse C = pmEigenValue * Matrix::Identity(A.rows(), A.cols()).sparseView() - A;
551  const std::optional<Vector> initial = perturb(S.row(0));
552  AcceleratedPowerMethod<Sparse> apmShiftedOperator(C, initial);
553 
554  const bool minConverged = apmShiftedOperator.compute(
555  maxIterations, minEigenvalueNonnegativityTolerance / pmEigenValue);
556 
557  if (!minConverged) return false;
558 
559  minEigenValue = pmEigenValue - apmShiftedOperator.eigenvalue();
560  if (minEigenVector) {
561  *minEigenVector = apmShiftedOperator.eigenvector();
562  minEigenVector->normalize(); // Ensure that this is a unit vector
563  }
564  if (numIterations) *numIterations = apmShiftedOperator.nrIterations();
565  return true;
566 }
567 
573  // Const reference to an externally-held matrix whose minimum-eigenvalue we
574  // want to compute
575  const Sparse &A_;
576 
577  // Spectral shift
578  double sigma_;
579 
580  // Constructor
581  explicit MatrixProdFunctor(const Sparse &A, double sigma = 0)
582  : A_(A), sigma_(sigma) {}
583 
584  int rows() const { return A_.rows(); }
585  int cols() const { return A_.cols(); }
586 
587  // Matrix-vector multiplication operation
588  void perform_op(const double *x, double *y) const {
589  // Wrap the raw arrays as Eigen Vector types
592 
593  // Do the multiplication using wrapped Eigen vectors
594  Y = A_ * X + sigma_ * X;
595  }
596 };
597 
609 
618 
619 // For the defaults, David Rosen says:
620 // - maxIterations refers to the max number of Lanczos iterations to run;
621 // ~1000 should be sufficiently large
622 // - We've been using 10^-4 for the nonnegativity tolerance
623 // - for numLanczosVectors, 20 is a good default value
624 
626  const Sparse &A, const Matrix &S, double *minEigenValue,
627  Vector *minEigenVector = 0, size_t *numIterations = 0,
628  size_t maxIterations = 1000,
629  double minEigenvalueNonnegativityTolerance = 10e-4,
630  Eigen::Index numLanczosVectors = 20) {
631  // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos
632  MatrixProdFunctor lmOperator(A);
635  lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows()));
636  lmEigenValueSolver.init();
637 
638  const int lmConverged = lmEigenValueSolver.compute(
639  maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN);
640 
641  // Check convergence and bail out if necessary
642  if (lmConverged != 1) return false;
643 
644  const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0);
645 
646  if (lmEigenValue < 0) {
647  // The largest-magnitude eigenvalue is negative, and therefore also the
648  // minimum eigenvalue, so just return this solution
649  *minEigenValue = lmEigenValue;
650  if (minEigenVector) {
651  *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0);
652  minEigenVector->normalize(); // Ensure that this is a unit vector
653  }
654  return true;
655  }
656 
657  // The largest-magnitude eigenvalue is positive, and is therefore the
658  // maximum eigenvalue. Therefore, after shifting the spectrum of A
659  // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted
660  // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A),
661  // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of
662  // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding
663  // eigenvector v_min
664 
665  MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue);
666 
669  minEigenValueSolver(&minShiftedOperator, 1,
670  std::min(numLanczosVectors, A.rows()));
671 
672  // If S is a critical point of F, then S^T is also in the null space of S -
673  // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are
674  // eigenvectors corresponding to the eigenvalue 0. In the case that the
675  // relaxation is exact, this is the *minimum* eigenvalue, and therefore the
676  // rows of S are exactly the eigenvectors that we're looking for. On the
677  // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at
678  // least one strictly negative eigenvalue, and the rows of S are *unstable
679  // fixed points* for the Lanczos iterations. Thus, we will take a slightly
680  // "fuzzed" version of the first row of S as an initialization for the
681  // Lanczos iterations; this allows for rapid convergence in the case that
682  // the relaxation is exact (since are starting close to a solution), while
683  // simultaneously allowing the iterations to escape from this fixed point in
684  // the case that the relaxation is not exact.
685  Vector v0 = S.row(0).transpose();
686  Vector perturbation(v0.size());
687  perturbation.setRandom();
688  perturbation.normalize();
689  Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3%
690 
691  // Use this to initialize the eigensolver
692  minEigenValueSolver.init(xinit.data());
693 
694  // Now determine the relative precision required in the Lanczos method in
695  // order to be able to estimate the smallest eigenvalue within an *absolute*
696  // tolerance of 'minEigenvalueNonnegativityTolerance'
697  const int minConverged = minEigenValueSolver.compute(
698  maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue,
700 
701  if (minConverged != 1) return false;
702 
703  *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue;
704  if (minEigenVector) {
705  *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0);
706  minEigenVector->normalize(); // Ensure that this is a unit vector
707  }
708  if (numIterations) *numIterations = minEigenValueSolver.num_iterations();
709  return true;
710 }
711 
712 /* ************************************************************************* */
713 template <size_t d>
715  Vector *minEigenVector) const {
716  assert(values.size() == nrUnknowns());
717  const Matrix S = StiefelElementMatrix(values);
718  auto A = computeA(S);
719 
720  double minEigenValue;
721  bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector);
722  if (!success) {
723  throw std::runtime_error(
724  "SparseMinimumEigenValue failed to compute minimum eigenvalue.");
725  }
726  return minEigenValue;
727 }
728 
729 /* ************************************************************************* */
730 template <size_t d>
732  Vector *minEigenVector) const {
733  assert(values.size() == nrUnknowns());
734  const Matrix S = StiefelElementMatrix(values);
735  auto A = computeA(S);
736 
737  double minEigenValue = 0;
738  bool success = PowerMinimumEigenValue(A, S, minEigenValue, minEigenVector);
739  if (!success) {
740  throw std::runtime_error(
741  "PowerMinimumEigenValue failed to compute minimum eigenvalue.");
742  }
743  return minEigenValue;
744 }
745 
746 /* ************************************************************************* */
747 template <size_t d>
749  const Values &values) const {
750  Vector minEigenVector;
751  double minEigenValue = computeMinEigenValue(values, &minEigenVector);
752  return {minEigenValue, minEigenVector};
753 }
754 
755 /* ************************************************************************* */
756 template <size_t d>
758  double minEigenValue = computeMinEigenValue(values);
759  return minEigenValue > parameters_.optimalityThreshold;
760 }
761 
762 /* ************************************************************************* */
763 template <size_t d>
765  const Vector &v) {
767  // Create a tangent direction xi with eigenvector segment v_i
768  const size_t dimension = SOn::Dimension(p);
769  double sign0 = pow(-1.0, round((p + 1) / 2) + 1);
770  for (size_t i = 0; i < v.size() / d; i++) {
771  // Assumes key is 0-based integer
772  const auto v_i = v.segment<d>(d * i);
773  Vector xi = Vector::Zero(dimension);
774  double sign = sign0;
775  for (size_t j = 0; j < d; j++) {
776  xi(j + p - d - 1) = sign * v_i(d - j - 1);
777  sign = -sign;
778  }
779  delta.insert(i, xi);
780  }
781  return delta;
782 }
783 
784 /* ************************************************************************* */
785 template <size_t d>
787  const Values &values) const {
788  Matrix S_dot = StiefelElementMatrix(values);
789  // calculate the gradient of F(Q_dot) at Q_dot
790  Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose();
791  // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() <<
792  // "\t" << euclideanGradient.cols() << endl;
793 
794  // project the gradient onto the entire euclidean space
795  Matrix symBlockDiagProduct(p, d * nrUnknowns());
796  for (size_t i = 0; i < nrUnknowns(); i++) {
797  // Compute block product
798  const size_t di = d * i;
799  const Matrix P = S_dot.middleCols<d>(di).transpose() *
800  euclideanGradient.middleCols<d>(di);
801  // Symmetrize this block
802  Matrix S = .5 * (P + P.transpose());
803  // Compute S_dot * S and set corresponding block
804  symBlockDiagProduct.middleCols<d>(di) = S_dot.middleCols<d>(di) * S;
805  }
806  Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct;
807  return riemannianGradient;
808 }
809 
810 /* ************************************************************************* */
811 template <size_t d>
813  const Vector &minEigenVector) {
814  Values lifted = LiftTo<SOn>(p, values);
815  VectorValues delta = TangentVectorValues(p, minEigenVector);
816  return lifted.retract(delta);
817 }
818 
819 /* ************************************************************************* */
820 template <size_t d>
822  size_t p, const Values &values, const Vector &minEigenVector,
823  double minEigenValue, double gradienTolerance,
824  double preconditionedGradNormTolerance) const {
825  double funcVal = costAt(p - 1, values);
826  double alphaMin = 1e-2;
827  double alpha =
828  std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue));
829  vector<double> alphas;
830  vector<double> fvals;
831  // line search
832  while ((alpha >= alphaMin)) {
833  Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector);
834  double funcValTest = costAt(p, Qplus);
835  Matrix gradTest = riemannianGradient(p, Qplus);
836  double gradTestNorm = gradTest.norm();
837  // Record alpha and funcVal
838  alphas.push_back(alpha);
839  fvals.push_back(funcValTest);
840  if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) {
841  return Qplus;
842  }
843  alpha /= 2;
844  }
845 
846  auto fminIter = min_element(fvals.begin(), fvals.end());
847  auto minIdx = distance(fvals.begin(), fminIter);
848  double fMin = fvals[minIdx];
849  double aMin = alphas[minIdx];
850  if (fMin < funcVal) {
851  Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector);
852  return Qplus;
853  }
854 
855  return LiftwithDescent(p, values, alpha * minEigenVector);
856 }
857 
858 /* ************************************************************************* */
859 template <size_t d>
861  Values initial;
862  for (size_t j = 0; j < nrUnknowns(); j++) {
863  initial.insert(j, Rot::Random(rng));
864  }
865  return initial;
866 }
867 
868 /* ************************************************************************* */
869 template <size_t d>
871  return initializeRandomly(kRandomNumberGenerator);
872 }
873 
874 /* ************************************************************************* */
875 template <size_t d>
877  std::mt19937 &rng) const {
878  const Values randomRotations = initializeRandomly(rng);
879  return LiftTo<Rot3>(p, randomRotations); // lift to p!
880 }
881 
882 /* ************************************************************************* */
883 template <size_t d>
885  return initializeRandomlyAt(p, kRandomNumberGenerator);
886 }
887 
888 /* ************************************************************************* */
889 template <size_t d>
890 std::pair<Values, double> ShonanAveraging<d>::run(const Values &initialEstimate,
891  size_t pMin,
892  size_t pMax) const {
893  if (pMin < d) {
894  throw std::runtime_error("pMin is smaller than the base dimension d");
895  }
896  Values Qstar;
897  Values initialSOp = LiftTo<Rot>(pMin, initialEstimate); // lift to pMin!
898  for (size_t p = pMin; p <= pMax; p++) {
899  // Optimize until convergence at this level
900  Qstar = tryOptimizingAt(p, initialSOp);
901  if (parameters_.getUseHuber() || !parameters_.getCertifyOptimality()) {
902  // in this case, there is no optimality certification
903  if (pMin != pMax) {
904  throw std::runtime_error(
905  "When using robust norm, Shonan only tests a single rank. Set pMin = pMax");
906  }
907  const Values SO3Values = roundSolution(Qstar);
908  return {SO3Values, 0};
909  } else {
910  // Check certificate of global optimality
911  Vector minEigenVector;
912  double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector);
913  if (minEigenValue > parameters_.optimalityThreshold) {
914  // If at global optimum, round and return solution
915  const Values SO3Values = roundSolution(Qstar);
916  return {SO3Values, minEigenValue};
917  }
918 
919  // Not at global optimimum yet, so check whether we will go to next level
920  if (p != pMax) {
921  // Calculate initial estimate for next level by following minEigenVector
922  initialSOp =
923  initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue);
924  }
925  }
926  }
927  throw std::runtime_error("Shonan::run did not converge for given pMax");
928 }
929 
930 /* ************************************************************************* */
931 // Explicit instantiation for d=2
932 template class ShonanAveraging<2>;
933 
934 ShonanAveraging2::ShonanAveraging2(const Measurements &measurements,
935  const Parameters &parameters)
936  : ShonanAveraging<2>(maybeRobust(measurements, parameters.getUseHuber()),
937  parameters) {}
938 
940  : ShonanAveraging<2>(maybeRobust(parseMeasurements<Rot2>(g2oFile),
941  parameters.getUseHuber()),
942  parameters) {}
943 
944 // Extract Rot2 measurement from Pose2 betweenfactors
945 // Modeled after similar function in dataset.cpp
948  auto gaussian =
949  std::dynamic_pointer_cast<noiseModel::Gaussian>(f->noiseModel());
950  if (!gaussian)
951  throw std::invalid_argument(
952  "parseMeasurements<Rot2> can only convert Pose2 measurements "
953  "with Gaussian noise models.");
954  const Matrix3 M = gaussian->covariance();
955  // the (2,2) entry of Pose2's covariance corresponds to Rot2's covariance
956  // because the tangent space of Pose2 is ordered as (vx, vy, w)
957  auto model = noiseModel::Isotropic::Variance(1, M(2, 2));
958  return BinaryMeasurement<Rot2>(f->key<1>(), f->key<2>(), f->measured().rotation(),
959  model);
960 }
961 
963  const BetweenFactorPose2s &factors) {
965  result.reserve(factors.size());
966  for (auto f : factors) result.push_back(convertPose2ToBinaryMeasurementRot2(f));
967  return result;
968 }
969 
971  const Parameters &parameters)
973  parameters.getUseHuber()),
974  parameters) {}
975 
976 /* ************************************************************************* */
977 // Explicit instantiation for d=3
978 template class ShonanAveraging<3>;
979 
981  const Parameters &parameters)
982  : ShonanAveraging<3>(maybeRobust(measurements, parameters.getUseHuber()),
983  parameters) {}
984 
986  : ShonanAveraging<3>(maybeRobust(parseMeasurements<Rot3>(g2oFile),
987  parameters.getUseHuber()),
988  parameters) {}
989 
990 // TODO(frank): Deprecate after we land pybind wrapper
991 
992 // Extract Rot3 measurement from Pose3 betweenfactors
993 // Modeled after similar function in dataset.cpp
996  auto gaussian =
997  std::dynamic_pointer_cast<noiseModel::Gaussian>(f->noiseModel());
998  if (!gaussian)
999  throw std::invalid_argument(
1000  "parseMeasurements<Rot3> can only convert Pose3 measurements "
1001  "with Gaussian noise models.");
1002  const Matrix6 M = gaussian->covariance();
1003  // the upper-left 3x3 sub-block of Pose3's covariance corresponds to Rot3's covariance
1004  // because the tangent space of Pose3 is ordered as (w,T) where w and T are both Vector3's
1005  auto model = noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0));
1006  return BinaryMeasurement<Rot3>(f->key<1>(), f->key<2>(), f->measured().rotation(),
1007  model);
1008 }
1009 
1011  const BetweenFactorPose3s &factors) {
1013  result.reserve(factors.size());
1014  for (auto f : factors) result.push_back(convert(f));
1015  return result;
1016 }
1017 
1019  const Parameters &parameters)
1021  parameters.getUseHuber()),
1022  parameters) {}
1023 
1024 /* ************************************************************************* */
1025 } // namespace gtsam
gtsam::SubgraphBuilderParameters
Definition: SubgraphBuilder.h:102
Eigen::SparseMatrix::cols
Index cols() const
Definition: SparseMatrix.h:140
gtsam::SparseMinimumEigenValue
static bool SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, Vector *minEigenVector=0, size_t *numIterations=0, size_t maxIterations=1000, double minEigenvalueNonnegativityTolerance=10e-4, Eigen::Index numLanczosVectors=20)
Definition: ShonanAveraging.cpp:625
Eigen::SparseMatrix< double >
rng
static std::mt19937 rng
Definition: timeFactorOverhead.cpp:31
B
Matrix< SCALARB, Dynamic, Dynamic, opt_B > B
Definition: bench_gemm.cpp:49
gtsam::convertPose2ToBinaryMeasurementRot2
static BinaryMeasurement< Rot2 > convertPose2ToBinaryMeasurementRot2(const BetweenFactor< Pose2 >::shared_ptr &f)
Definition: ShonanAveraging.cpp:946
Eigen::DiagonalMatrix
Represents a diagonal matrix with its storage.
Definition: DiagonalMatrix.h:140
D
MatrixXcd D
Definition: EigenSolver_EigenSolver_MatrixType.cpp:14
gtsam::ShonanAveragingParameters::lm
LevenbergMarquardtParams lm
LM parameters.
Definition: ShonanAveraging.h:52
ShonanAveraging.h
Shonan Averaging algorithm.
alpha
RealScalar alpha
Definition: level1_cplx_impl.h:147
gtsam::SubgraphBuilderParameters::EQUAL
@ EQUAL
Definition: SubgraphBuilder.h:113
gtsam::ShonanAveraging< 3 >::Rot
typename Parameters::Rot Rot
Definition: ShonanAveraging.h:129
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
d
static const double d[K][N]
Definition: igam.h:11
gtsam::PowerMethod::nrIterations
size_t nrIterations() const
Return the number of iterations.
Definition: PowerMethod.h:122
v0
static const double v0
Definition: testCal3DFisheye.cpp:31
screwPose2::xi
Vector xi
Definition: testPose2.cpp:148
gtsam::SubgraphBuilderParameters::SKELETON
@ SKELETON
Definition: SubgraphBuilder.h:120
test_constructor::sigmas
Vector1 sigmas
Definition: testHybridNonlinearFactor.cpp:52
keys
const KeyVector keys
Definition: testRegularImplicitSchurFactor.cpp:40
gtsam::PowerMethod
Compute maximum Eigenpair with power method.
Definition: PowerMethod.h:58
gtsam::ShonanAveraging::ShonanAveraging
ShonanAveraging(const Measurements &measurements, const Parameters &parameters=Parameters())
Definition: ShonanAveraging.cpp:118
gtsam::ShonanAveragingParameters::ShonanAveragingParameters
ShonanAveragingParameters(const LevenbergMarquardtParams &lm=LevenbergMarquardtParams::CeresDefaults(), const std::string &method="JACOBI", double optimalityThreshold=-1e-4, double alpha=0.0, double beta=1.0, double gamma=0.0)
Definition: ShonanAveraging.cpp:51
gtsam::ShonanGaugeFactor
Definition: ShonanGaugeFactor.h:45
c
Scalar Scalar * c
Definition: benchVecAdd.cpp:17
gtsam::NonlinearOptimizerParams::iterativeParams
IterativeOptimizationParameters::shared_ptr iterativeParams
The container for iterativeOptimization parameters. used in CG Solvers.
Definition: NonlinearOptimizerParams.h:109
gtsam::maxKey
Key maxKey(const PROBLEM &problem)
Definition: ActiveSetSolver.h:191
LevenbergMarquardtOptimizer.h
A nonlinear optimizer that uses the Levenberg-Marquardt trust-region scheme.
gtsam::Y
GaussianFactorGraphValuePair Y
Definition: HybridGaussianProductFactor.cpp:29
gtsam::NrUnknowns
static size_t NrUnknowns(const typename ShonanAveraging< d >::Measurements &measurements)
Definition: ShonanAveraging.cpp:98
initial
Values initial
Definition: OdometryOptimize.cpp:2
simple_graph::factors
const GaussianFactorGraph factors
Definition: testJacobianFactor.cpp:213
x
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
Definition: gnuplot_common_settings.hh:12
sign
const EIGEN_DEVICE_FUNC SignReturnType sign() const
Definition: ArrayCwiseUnaryOps.h:219
gtsam::extractRot3Measurements
static ShonanAveraging3::Measurements extractRot3Measurements(const BetweenFactorPose3s &factors)
Definition: ShonanAveraging.cpp:1010
gtsam::PriorFactor
Definition: nonlinear/PriorFactor.h:30
Spectra::SymEigsSolver
Definition: SymEigsSolver.h:141
gtsam::Matrix
Eigen::MatrixXd Matrix
Definition: base/Matrix.h:39
ShonanFactor.h
Main factor type in Shonan averaging, on SO(n) pairs.
gtsam::noiseModel::Isotropic::Variance
static shared_ptr Variance(size_t dim, double variance, bool smart=true)
Definition: NoiseModel.cpp:631
different_sigmas::values
HybridValues values
Definition: testHybridBayesNet.cpp:245
X
#define X
Definition: icosphere.cpp:20
gtsam::NonlinearOptimizerParams::setLinearSolverType
void setLinearSolverType(const std::string &solver)
Definition: NonlinearOptimizerParams.h:157
ShonanGaugeFactor.h
Factor used in Shonan Averaging to clamp down gauge freedom.
gtsam::Vector
Eigen::VectorXd Vector
Definition: Vector.h:39
result
Values result
Definition: OdometryOptimize.cpp:8
beta
double beta(double a, double b)
Definition: beta.c:61
gtsam::MatrixProdFunctor::sigma_
double sigma_
Definition: ShonanAveraging.cpp:578
svd
cout<< "Here is the matrix m:"<< endl<< m<< endl;JacobiSVD< MatrixXf > svd(m, ComputeThinU|ComputeThinV)
Q
Quaternion Q
Definition: testQuaternion.cpp:27
rows
int rows
Definition: Tutorial_commainit_02.cpp:1
determinant
void determinant(const MatrixType &m)
Definition: determinant.cpp:14
gtsam::KarcherMeanFactor
Definition: KarcherMeanFactor.h:46
sampling::sigma
static const double sigma
Definition: testGaussianBayesNet.cpp:170
A
Matrix< SCALARA, Dynamic, Dynamic, opt_A > A
Definition: bench_gemm.cpp:48
gtsam::FrobeniusBetweenFactor
Definition: FrobeniusFactor.h:110
boost::multiprecision::fabs
Real fabs(const Real &a)
Definition: boostmultiprec.cpp:119
n
int n
Definition: BiCGSTAB_simple.cpp:1
Eigen::DiagonalMatrix::diagonal
const EIGEN_DEVICE_FUNC DiagonalVectorType & diagonal() const
Definition: DiagonalMatrix.h:160
gtsam::Values::retract
Values retract(const VectorValues &delta) const
Definition: Values.cpp:99
gtsam::AcceleratedPowerMethod::compute
bool compute(size_t maxIterations, double tol)
Definition: AcceleratedPowerMethod.h:156
gtsam.examples.PlanarManipulatorExample.delta
def delta(g0, g1)
Definition: PlanarManipulatorExample.py:45
gtsam::SubgraphBuilderParameters::augmentationWeight
enum gtsam::SubgraphBuilderParameters::AugmentationWeight augmentationWeight
gtsam::VectorValues
Definition: VectorValues.h:74
j
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2
gtsam::AcceleratedPowerMethod
Compute maximum Eigenpair with accelerated power method.
Definition: AcceleratedPowerMethod.h:51
gtsam::MatrixProdFunctor::A_
const Sparse & A_
Definition: ShonanAveraging.cpp:575
gtsam::Rot3
Rot3 is a 3D rotation represented as a rotation matrix if the preprocessor symbol GTSAM_USE_QUATERNIO...
Definition: Rot3.h:58
gtsam::SubgraphBuilderParameters::augmentationFactor
double augmentationFactor
factor multiplied with n, yields number of extra edges.
Definition: SubgraphBuilder.h:129
gtsam::kRandomNumberGenerator
static std::mt19937 kRandomNumberGenerator(42)
parameters
static ConjugateGradientParameters parameters
Definition: testIterative.cpp:33
gtsam::BetweenFactor::shared_ptr
std::shared_ptr< BetweenFactor > shared_ptr
Definition: BetweenFactor.h:63
gtsam::MatrixProdFunctor::cols
int cols() const
Definition: ShonanAveraging.cpp:585
gtsam::ShonanAveragingParameters
Parameters governing optimization etc.
Definition: ShonanAveraging.h:46
gtsam::SubgraphBuilderParameters::skeletonWeight
enum gtsam::SubgraphBuilderParameters::SkeletonWeight skeletonWeight
gtsam::NonlinearFactorGraph
Definition: NonlinearFactorGraph.h:55
gtsam::PowerMethod::eigenvalue
double eigenvalue() const
Return the eigenvalue.
Definition: PowerMethod.h:147
PCGSolver.h
round
double round(double x)
Definition: round.c:38
gtsam::MatrixProdFunctor::MatrixProdFunctor
MatrixProdFunctor(const Sparse &A, double sigma=0)
Definition: ShonanAveraging.cpp:581
gtsam::ShonanFactor
Definition: ShonanFactor.h:36
gtsam.examples.DogLegOptimizerExample.run
def run(args)
Definition: DogLegOptimizerExample.py:21
Spectra::LARGEST_MAGN
@ LARGEST_MAGN
Definition: SelectionRule.h:32
perturb
bool perturb
Definition: SolverComparer.cpp:99
NonlinearEquality.h
E1::A
@ A
gamma
#define gamma
Definition: mconf.h:85
Eigen::Map
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
gtsam::SubgraphBuilderParameters::KRUSKAL
@ KRUSKAL
Definition: SubgraphBuilder.h:109
model
noiseModel::Diagonal::shared_ptr model
Definition: doc/Code/Pose2SLAMExample.cpp:7
gtsam::Kappa
static double Kappa(const BinaryMeasurement< T > &measurement, const ShonanAveragingParameters< d > &parameters)
Definition: ShonanAveraging.cpp:332
y
Scalar * y
Definition: level1_cplx_impl.h:124
atan2
AnnoyingScalar atan2(const AnnoyingScalar &y, const AnnoyingScalar &x)
Definition: AnnoyingScalar.h:110
key
const gtsam::Symbol key('X', 0)
tree::f
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
Definition: testExpression.cpp:218
gtsam::PowerMethod::eigenvector
Vector eigenvector() const
Return the eigenvector.
Definition: PowerMethod.h:150
gtsam::FactorGraph::size
size_t size() const
Definition: FactorGraph.h:297
Eigen::JacobiSVD
Two-sided Jacobi SVD decomposition of a rectangular matrix.
Definition: ForwardDeclarations.h:278
SymEigsSolver.h
gtsam::Rot2
Definition: Rot2.h:35
Eigen::ArrayBase::pow
const Eigen::CwiseBinaryOp< Eigen::internal::scalar_pow_op< typename Derived::Scalar, typename ExponentDerived::Scalar >, const Derived, const ExponentDerived > pow(const Eigen::ArrayBase< Derived > &x, const Eigen::ArrayBase< ExponentDerived > &exponents)
Definition: GlobalFunctions.h:143
gtsam::BinaryMeasurement
Definition: BinaryMeasurement.h:36
Eigen::Quaternion
The quaternion class used to represent 3D orientations and rotations.
Definition: ForwardDeclarations.h:293
KarcherMeanFactor-inl.h
Eigen::ComputeThinV
@ ComputeThinV
Definition: Constants.h:399
C
Matrix< Scalar, Dynamic, Dynamic > C
Definition: bench_gemm.cpp:50
gtsam::MatrixProdFunctor::rows
int rows() const
Definition: ShonanAveraging.cpp:584
gtsam
traits
Definition: SFMdata.h:40
gtsam::BetweenFactorPose3s
std::vector< BetweenFactor< Pose3 >::shared_ptr > BetweenFactorPose3s
Definition: dataset.h:218
estimation_fixture::measurements
std::vector< double > measurements
Definition: testHybridEstimation.cpp:51
gtsam::Sparse
Eigen::SparseMatrix< double > Sparse
Definition: AcceleratedPowerMethod.h:26
NonlinearFactorGraph.h
Factor Graph consisting of non-linear factors.
gtsam::NonlinearOptimizerParams::Iterative
@ Iterative
Definition: NonlinearOptimizerParams.h:103
gtsam::Values
Definition: Values.h:65
gtsam::ShonanAveraging::Measurements
std::vector< BinaryMeasurement< Rot > > Measurements
Definition: ShonanAveraging.h:132
SubgraphPreconditioner.h
p
float * p
Definition: Tutorial_Map_using.cpp:9
G
JacobiRotation< float > G
Definition: Jacobi_makeGivens.cpp:2
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1108
gtsam::SubgraphBuilderParameters::skeletonType
enum gtsam::SubgraphBuilderParameters::Skeleton skeletonType
v
Array< int, Dynamic, 1 > v
Definition: Array_initializer_list_vector_cxx11.cpp:1
gtsam::MatrixProdFunctor
Definition: ShonanAveraging.cpp:572
P
static double P[]
Definition: ellpe.c:68
gtsam::extractRot2Measurements
static ShonanAveraging2::Measurements extractRot2Measurements(const BetweenFactorPose2s &factors)
Definition: ShonanAveraging.cpp:962
min
#define min(a, b)
Definition: datatypes.h:19
gtsam::LevenbergMarquardtParams
Definition: LevenbergMarquardtParams.h:35
gtsam::SO
Definition: SOn.h:55
gtsam::BetweenFactorPose2s
std::vector< BetweenFactor< Pose2 >::shared_ptr > BetweenFactorPose2s
Definition: dataset.h:212
initial
Definition: testScenarioRunner.cpp:148
gtsam::distance
Double_ distance(const OrientedPlane3_ &p)
Definition: slam/expressions.h:117
gtsam::RoundSolutionS
static Matrix RoundSolutionS(const Matrix &S)
Definition: ShonanAveraging.cpp:238
N
#define N
Definition: igam.h:9
gtsam::ShonanAveraging2::ShonanAveraging2
ShonanAveraging2(const Measurements &measurements, const Parameters &parameters=Parameters())
Definition: ShonanAveraging.cpp:934
gtsam::ShonanAveraging
Definition: ShonanAveraging.h:123
graph
NonlinearFactorGraph graph
Definition: doc/Code/OdometryExample.cpp:2
Eigen::SparseMatrix::rows
Index rows() const
Definition: SparseMatrix.h:138
gtsam::Key
std::uint64_t Key
Integer nonlinear key type.
Definition: types.h:97
max
#define max(a, b)
Definition: datatypes.h:20
gtsam::parseMeasurements
GTSAM_EXPORT std::vector< BinaryMeasurement< Pose2 > > parseMeasurements(const std::string &filename, const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex)
Definition: dataset.cpp:396
gtsam::convert
static BinaryMeasurement< Rot3 > convert(const BetweenFactor< Pose3 >::shared_ptr &f)
Definition: ShonanAveraging.cpp:994
gtsam::PowerMethod::compute
bool compute(size_t maxIterations, double tol)
Definition: PowerMethod.h:130
FrobeniusFactor.h
Various factors that minimize some Frobenius norm.
gtsam::ShonanAveraging3::ShonanAveraging3
ShonanAveraging3(const Measurements &measurements, const Parameters &parameters=Parameters())
Definition: ShonanAveraging.cpp:980
test_callbacks.value
value
Definition: test_callbacks.py:160
gtsam::MatrixProdFunctor::perform_op
void perform_op(const double *x, double *y) const
Definition: ShonanAveraging.cpp:588
i
int i
Definition: BiCGSTAB_step_by_step.cpp:9
R
Rot2 R(Rot2::fromAngle(0.1))
measurement
static Point2 measurement(323.0, 240.0)
gtsam::PowerMinimumEigenValue
static bool PowerMinimumEigenValue(const Sparse &A, const Matrix &S, double &minEigenValue, Vector *minEigenVector=0, size_t *numIterations=0, size_t maxIterations=1000, double minEigenvalueNonnegativityTolerance=10e-4)
MINIMUM EIGENVALUE COMPUTATIONS.
Definition: ShonanAveraging.cpp:522
gtsam::noiseModel::Gaussian::Covariance
static shared_ptr Covariance(const Matrix &covariance, bool smart=true)
Definition: NoiseModel.cpp:115
S
DiscreteKey S(1, 2)
Eigen::Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
gtsam::NonlinearOptimizerParams::linearSolverType
LinearSolverType linearSolverType
The type of linear solver to use in the nonlinear optimizer.
Definition: NonlinearOptimizerParams.h:107
M
Matrix< RealScalar, Dynamic, Dynamic > M
Definition: bench_gemm.cpp:51


gtsam
Author(s):
autogenerated on Sun Dec 22 2024 04:13:18