ShonanAveraging.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
19 #include <SymEigsSolver.h>
20 #include <cmath>
21 #include <gtsam/linear/PCGSolver.h>
27 #include <gtsam/sfm/ShonanFactor.h>
31 
32 #include <Eigen/Eigenvalues>
33 #include <algorithm>
34 #include <complex>
35 #include <iostream>
36 #include <map>
37 #include <random>
38 #include <set>
39 #include <vector>
40 
41 namespace gtsam {
42 
43 // In Wrappers we have no access to this so have a default ready
44 static std::mt19937 kRandomNumberGenerator(42);
45 
47 
48 /* ************************************************************************* */
49 template <size_t d>
51  const LevenbergMarquardtParams &_lm, const std::string &method,
52  double optimalityThreshold, double alpha, double beta, double gamma)
53  : lm(_lm),
54  optimalityThreshold(optimalityThreshold),
55  alpha(alpha),
56  beta(beta),
57  gamma(gamma),
58  useHuber(false),
59  certifyOptimality(true) {
60  // By default, we will do conjugate gradient
62 
63  // Create subgraph builder parameters
64  SubgraphBuilderParameters builderParameters;
68  builderParameters.augmentationFactor = 0.0;
69 
70  auto pcg = boost::make_shared<PCGSolverParameters>();
71 
72  // Choose optimization method
73  if (method == "SUBGRAPH") {
75  boost::make_shared<SubgraphSolverParameters>(builderParameters);
76  } else if (method == "SGPC") {
77  pcg->preconditioner_ =
78  boost::make_shared<SubgraphPreconditionerParameters>(builderParameters);
79  lm.iterativeParams = pcg;
80  } else if (method == "JACOBI") {
81  pcg->preconditioner_ =
82  boost::make_shared<BlockJacobiPreconditionerParameters>();
83  lm.iterativeParams = pcg;
84  } else if (method == "QR") {
85  lm.setLinearSolverType("MULTIFRONTAL_QR");
86  } else if (method == "CHOLESKY") {
87  lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY");
88  } else {
89  throw std::invalid_argument("ShonanAveragingParameters: unknown method");
90  }
91 }
92 
93 /* ************************************************************************* */
94 // Explicit instantiation for d=2 and d=3
95 template struct ShonanAveragingParameters<2>;
96 template struct ShonanAveragingParameters<3>;
97 
98 /* ************************************************************************* */
99 // Calculate number of unknown rotations referenced by measurements
100 // Throws exception of keys are not numbered as expected (may change in future).
101 template <size_t d>
102 static size_t NrUnknowns(
103  const typename ShonanAveraging<d>::Measurements &measurements) {
104  Key maxKey = 0;
105  std::set<Key> keys;
106  for (const auto &measurement : measurements) {
107  for (const Key &key : measurement.keys()) {
108  maxKey = std::max(key, maxKey);
109  keys.insert(key);
110  }
111  }
112  size_t N = keys.size();
113  if (maxKey != N - 1) {
114  throw std::invalid_argument(
115  "As of now, ShonanAveraging expects keys numbered 0..N-1");
116  }
117  return N;
118 }
119 
120 /* ************************************************************************* */
121 template <size_t d>
123  const Parameters &parameters)
124  : parameters_(parameters),
125  measurements_(measurements),
126  nrUnknowns_(NrUnknowns<d>(measurements)) {
127  for (const auto &measurement : measurements_) {
128  const auto &model = measurement.noiseModel();
129  if (model && model->dim() != SO<d>::dimension) {
130  measurement.print("Factor with incorrect noise model:\n");
131  throw std::invalid_argument(
132  "ShonanAveraging: measurements passed to "
133  "constructor have incorrect dimension.");
134  }
135  }
136  Q_ = buildQ();
137  D_ = buildD();
138  L_ = D_ - Q_;
139 }
140 
141 /* ************************************************************************* */
142 template <size_t d>
145  auto G = boost::make_shared<Matrix>(SO<-1>::VectorizedGenerators(p));
146 
147  for (const auto &measurement : measurements_) {
148  const auto &keys = measurement.keys();
149  const auto &Rij = measurement.measured();
150  const auto &model = measurement.noiseModel();
151  graph.emplace_shared<ShonanFactor<d>>(keys[0], keys[1], Rij, p, model, G);
152  }
153  // Possibly add Karcher prior
154  if (parameters_.beta > 0) {
155  const size_t dim = SOn::Dimension(p);
157  }
158  // Possibly add gauge factors - they are probably useless as gradient is zero
159  if (parameters_.gamma > 0 && p > d + 1) {
160  for (auto key : graph.keys())
162  }
163  return graph;
164 }
165 
166 /* ************************************************************************* */
167 template <size_t d>
168 double ShonanAveraging<d>::costAt(size_t p, const Values &values) const {
170  return graph.error(values);
171 }
172 
173 /* ************************************************************************* */
174 template <size_t d>
175 boost::shared_ptr<LevenbergMarquardtOptimizer>
177  // Build graph
179 
180  // Anchor prior is added here as depends on initial value (and cost is zero)
181  if (parameters_.alpha > 0) {
182  size_t i;
183  Rot value;
184  const size_t dim = SOn::Dimension(p);
185  std::tie(i, value) = parameters_.anchor;
187  graph.emplace_shared<PriorFactor<SOn>>(i, SOn::Lift(p, value.matrix()),
188  model);
189  }
190  // Optimize
191  return boost::make_shared<LevenbergMarquardtOptimizer>(graph, initial,
192  parameters_.lm);
193 }
194 
195 /* ************************************************************************* */
196 template <size_t d>
198  const Values &initial) const {
199  auto lm = createOptimizerAt(p, initial);
200  return lm->optimize();
201 }
202 
203 /* ************************************************************************* */
204 // Project to pxdN Stiefel manifold
205 template <size_t d>
207  const size_t N = values.size();
208  const size_t p = values.at<SOn>(0).rows();
209  Matrix S(p, N * d);
210  for (const auto it : values.filter<SOn>()) {
211  S.middleCols<d>(it.key * d) =
212  it.value.matrix().leftCols<d>(); // project Qj to Stiefel
213  }
214  return S;
215 }
216 
217 /* ************************************************************************* */
218 template <>
220  Values result;
221  for (const auto it : values.filter<SOn>()) {
222  assert(it.value.rows() == p);
223  const auto &M = it.value.matrix();
224  const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0));
225  result.insert(it.key, R);
226  }
227  return result;
228 }
229 
230 template <>
232  Values result;
233  for (const auto it : values.filter<SOn>()) {
234  assert(it.value.rows() == p);
235  const auto &M = it.value.matrix();
236  const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>());
237  result.insert(it.key, R);
238  }
239  return result;
240 }
241 
242 /* ************************************************************************* */
243 template <size_t d>
244 static Matrix RoundSolutionS(const Matrix &S) {
245  const size_t N = S.cols() / d;
246  // First, compute a thin SVD of S
248  const Vector sigmas = svd.singularValues();
249 
250  // Construct a diagonal matrix comprised of the first d singular values
251  using DiagonalMatrix = Eigen::DiagonalMatrix<double, d>;
252  DiagonalMatrix Sigma_d;
253  Sigma_d.diagonal() = sigmas.head<d>();
254 
255  // Now, construct a rank-d truncated singular value decomposition for S
256  Matrix R = Sigma_d * svd.matrixV().leftCols<d>().transpose();
257 
258  // Count the number of blocks whose determinants have positive sign
259  size_t numPositiveBlocks = 0;
260  for (size_t i = 0; i < N; ++i) {
261  // Compute the determinant of the ith dxd block of R
262  double determinant = R.middleCols<d>(d * i).determinant();
263  if (determinant > 0) ++numPositiveBlocks;
264  }
265 
266  if (numPositiveBlocks < N / 2) {
267  // Less than half of the total number of blocks have the correct sign.
268  // To reverse their orientations, multiply with a reflection matrix.
269  DiagonalMatrix reflector;
270  reflector.setIdentity();
271  reflector.diagonal()(d - 1) = -1;
272  R = reflector * R;
273  }
274 
275  return R;
276 }
277 
278 /* ************************************************************************* */
279 template <>
281  // Round to a 2*2N matrix
282  Matrix R = RoundSolutionS<2>(S);
283 
284  // Finally, project each dxd rotation block to SO(2)
285  Values values;
286  for (size_t j = 0; j < nrUnknowns(); ++j) {
287  const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j));
288  values.insert(j, Ri);
289  }
290  return values;
291 }
292 
293 template <>
295  // Round to a 3*3N matrix
296  Matrix R = RoundSolutionS<3>(S);
297 
298  // Finally, project each dxd rotation block to SO(3)
299  Values values;
300  for (size_t j = 0; j < nrUnknowns(); ++j) {
301  const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j));
302  values.insert(j, Ri);
303  }
304  return values;
305 }
306 
307 /* ************************************************************************* */
308 template <size_t d>
310  // Project to pxdN Stiefel manifold...
311  Matrix S = StiefelElementMatrix(values);
312  // ...and call version above.
313  return roundSolutionS(S);
314 }
315 
316 /* ************************************************************************* */
317 template <size_t d>
318 double ShonanAveraging<d>::cost(const Values &values) const {
320  for (const auto &measurement : measurements_) {
321  const auto &keys = measurement.keys();
322  const auto &Rij = measurement.measured();
323  const auto &model = measurement.noiseModel();
325  keys[0], keys[1], SO<d>(Rij.matrix()), model);
326  }
327  // Finally, project each dxd rotation block to SO(d)
328  Values result;
329  for (const auto it : values.filter<Rot>()) {
330  result.insert(it.key, SO<d>(it.value.matrix()));
331  }
332  return graph.error(result);
333 }
334 
335 /* ************************************************************************* */
336 // Get kappa from noise model
337 template <typename T, size_t d>
340  const auto &isotropic = boost::dynamic_pointer_cast<noiseModel::Isotropic>(
341  measurement.noiseModel());
342  double sigma;
343  if (isotropic) {
344  sigma = isotropic->sigma();
345  } else {
346  const auto &robust = boost::dynamic_pointer_cast<noiseModel::Robust>(
347  measurement.noiseModel());
348  // Check if noise model is robust
349  if (robust) {
350  // If robust, check if optimality certificate is expected
351  if (parameters.getCertifyOptimality()) {
352  throw std::invalid_argument(
353  "Certification of optimality does not work with robust cost.");
354  } else {
355  // Optimality certificate not required, so setting default sigma
356  sigma = 1;
357  }
358  } else {
359  throw std::invalid_argument(
360  "Shonan averaging noise models must be isotropic (but robust losses "
361  "are allowed).");
362  }
363  }
364  return 1.0 / (sigma * sigma);
365 }
366 
367 /* ************************************************************************* */
368 template <size_t d>
370  // Each measurement contributes 2*d elements along the diagonal of the
371  // degree matrix.
372  static constexpr size_t stride = 2 * d;
373 
374  // Reserve space for triplets
375  std::vector<Eigen::Triplet<double>> triplets;
376  triplets.reserve(stride * measurements_.size());
377 
378  for (const auto &measurement : measurements_) {
379  // Get pose keys
380  const auto &keys = measurement.keys();
381 
382  // Get kappa from noise model
383  double kappa = Kappa<Rot, d>(measurement, parameters_);
384 
385  const size_t di = d * keys[0], dj = d * keys[1];
386  for (size_t k = 0; k < d; k++) {
387  // Elements of ith block-diagonal
388  triplets.emplace_back(di + k, di + k, kappa);
389  // Elements of jth block-diagonal
390  triplets.emplace_back(dj + k, dj + k, kappa);
391  }
392  }
393 
394  // Construct and return a sparse matrix from these triplets
395  const size_t dN = d * nrUnknowns();
396  Sparse D(dN, dN);
397  D.setFromTriplets(triplets.begin(), triplets.end());
398 
399  return D;
400 }
401 
402 /* ************************************************************************* */
403 template <size_t d>
405  // Each measurement contributes 2*d^2 elements on a pair of symmetric
406  // off-diagonal blocks
407  static constexpr size_t stride = 2 * d * d;
408 
409  // Reserve space for triplets
410  std::vector<Eigen::Triplet<double>> triplets;
411  triplets.reserve(stride * measurements_.size());
412 
413  for (const auto &measurement : measurements_) {
414  // Get pose keys
415  const auto &keys = measurement.keys();
416 
417  // Extract rotation measurement
418  const auto Rij = measurement.measured().matrix();
419 
420  // Get kappa from noise model
421  double kappa = Kappa<Rot, d>(measurement, parameters_);
422 
423  const size_t di = d * keys[0], dj = d * keys[1];
424  for (size_t r = 0; r < d; r++) {
425  for (size_t c = 0; c < d; c++) {
426  // Elements of ij block
427  triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c));
428  // Elements of ji block
429  triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r));
430  }
431  }
432  }
433 
434  // Construct and return a sparse matrix from these triplets
435  const size_t dN = d * nrUnknowns();
436  Sparse Q(dN, dN);
437  Q.setFromTriplets(triplets.begin(), triplets.end());
438 
439  return Q;
440 }
441 
442 /* ************************************************************************* */
443 template <size_t d>
445  // Each pose contributes 2*d elements along the diagonal of Lambda
446  static constexpr size_t stride = d * d;
447 
448  // Reserve space for triplets
449  const size_t N = nrUnknowns();
450  std::vector<Eigen::Triplet<double>> triplets;
451  triplets.reserve(stride * N);
452 
453  // Do sparse-dense multiply to get Q*S'
454  auto QSt = Q_ * S.transpose();
455 
456  for (size_t j = 0; j < N; j++) {
457  // Compute B, the building block for the j^th diagonal block of Lambda
458  const size_t dj = d * j;
459  Matrix B = QSt.middleRows(dj, d) * S.middleCols<d>(dj);
460 
461  // Elements of jth block-diagonal
462  for (size_t r = 0; r < d; r++)
463  for (size_t c = 0; c < d; c++)
464  triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r)));
465  }
466 
467  // Construct and return a sparse matrix from these triplets
468  Sparse Lambda(d * N, d * N);
469  Lambda.setFromTriplets(triplets.begin(), triplets.end());
470  return Lambda;
471 }
472 
473 /* ************************************************************************* */
474 template <size_t d>
476  // Project to pxdN Stiefel manifold...
477  Matrix S = StiefelElementMatrix(values);
478  // ...and call version above.
479  return computeLambda(S);
480 }
481 
482 /* ************************************************************************* */
483 template <size_t d>
485  assert(values.size() == nrUnknowns());
486  const Matrix S = StiefelElementMatrix(values);
487  auto Lambda = computeLambda(S);
488  return Lambda - Q_;
489 }
490 
491 /* ************************************************************************* */
492 template <size_t d>
494  auto Lambda = computeLambda(S);
495  return Lambda - Q_;
496 }
497 
498 /* ************************************************************************* */
499 // Perturb the initial initialVector by adding a spherically-uniformly
500 // distributed random vector with 0.03*||initialVector||_2 magnitude to
501 // initialVector
502 // ref : Part III. C, Rosen, D. and Carlone, L., 2017, September. Computational
503 // enhancements for certifiably correct SLAM. In Proceedings of the
504 // International Conference on Intelligent Robots and Systems.
505 static Vector perturb(const Vector &initialVector) {
506  // generate a 0.03*||x_0||_2 as stated in David's paper
507  int n = initialVector.rows();
508  Vector disturb = Vector::Random(n);
509  disturb.normalize();
510 
511  double magnitude = initialVector.norm();
512  Vector perturbedVector = initialVector + 0.03 * magnitude * disturb;
513  perturbedVector.normalize();
514  return perturbedVector;
515 }
516 
517 /* ************************************************************************* */
519 // Alg.6 from paper Distributed Certifiably Correct Pose-Graph Optimization,
520 // it takes in the certificate matrix A as input, the maxIterations and the
521 // minEigenvalueNonnegativityTolerance is set to 1000 and 10e-4 ad default,
522 // there are two parts
523 // in this algorithm:
524 // (1) compute the maximum eigenpair (\lamda_dom, \vect{v}_dom) of A by power
525 // method. if \lamda_dom is less than zero, then return the eigenpair. (2)
526 // compute the maximum eigenpair (\theta, \vect{v}) of C = \lamda_dom * I - A by
527 // accelerated power method. Then return (\lamda_dom - \theta, \vect{v}).
529  const Sparse &A, const Matrix &S, double &minEigenValue,
530  Vector *minEigenVector = 0, size_t *numIterations = 0,
531  size_t maxIterations = 1000,
532  double minEigenvalueNonnegativityTolerance = 10e-4) {
533 
534  // a. Compute dominant eigenpair of S using power method
535  PowerMethod<Sparse> pmOperator(A);
536 
537  const bool pmConverged = pmOperator.compute(
538  maxIterations, 1e-5);
539 
540  // Check convergence and bail out if necessary
541  if (!pmConverged) return false;
542 
543  const double pmEigenValue = pmOperator.eigenvalue();
544 
545  if (pmEigenValue < 0) {
546  // The largest-magnitude eigenvalue is negative, and therefore also the
547  // minimum eigenvalue, so just return this solution
548  minEigenValue = pmEigenValue;
549  if (minEigenVector) {
550  *minEigenVector = pmOperator.eigenvector();
551  minEigenVector->normalize(); // Ensure that this is a unit vector
552  }
553  return true;
554  }
555 
556  const Sparse C = pmEigenValue * Matrix::Identity(A.rows(), A.cols()).sparseView() - A;
557  const boost::optional<Vector> initial = perturb(S.row(0));
558  AcceleratedPowerMethod<Sparse> apmShiftedOperator(C, initial);
559 
560  const bool minConverged = apmShiftedOperator.compute(
561  maxIterations, minEigenvalueNonnegativityTolerance / pmEigenValue);
562 
563  if (!minConverged) return false;
564 
565  minEigenValue = pmEigenValue - apmShiftedOperator.eigenvalue();
566  if (minEigenVector) {
567  *minEigenVector = apmShiftedOperator.eigenvector();
568  minEigenVector->normalize(); // Ensure that this is a unit vector
569  }
570  if (numIterations) *numIterations = apmShiftedOperator.nrIterations();
571  return true;
572 }
573 
579  // Const reference to an externally-held matrix whose minimum-eigenvalue we
580  // want to compute
581  const Sparse &A_;
582 
583  // Spectral shift
584  double sigma_;
585 
586  // Constructor
587  explicit MatrixProdFunctor(const Sparse &A, double sigma = 0)
588  : A_(A), sigma_(sigma) {}
589 
590  int rows() const { return A_.rows(); }
591  int cols() const { return A_.cols(); }
592 
593  // Matrix-vector multiplication operation
594  void perform_op(const double *x, double *y) const {
595  // Wrap the raw arrays as Eigen Vector types
597  Eigen::Map<Vector> Y(y, rows());
598 
599  // Do the multiplication using wrapped Eigen vectors
600  Y = A_ * X + sigma_ * X;
601  }
602 };
603 
615 
624 
625 // For the defaults, David Rosen says:
626 // - maxIterations refers to the max number of Lanczos iterations to run;
627 // ~1000 should be sufficiently large
628 // - We've been using 10^-4 for the nonnegativity tolerance
629 // - for numLanczosVectors, 20 is a good default value
630 
632  const Sparse &A, const Matrix &S, double *minEigenValue,
633  Vector *minEigenVector = 0, size_t *numIterations = 0,
634  size_t maxIterations = 1000,
635  double minEigenvalueNonnegativityTolerance = 10e-4,
636  Eigen::Index numLanczosVectors = 20) {
637  // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos
638  MatrixProdFunctor lmOperator(A);
641  lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows()));
642  lmEigenValueSolver.init();
643 
644  const int lmConverged = lmEigenValueSolver.compute(
645  maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN);
646 
647  // Check convergence and bail out if necessary
648  if (lmConverged != 1) return false;
649 
650  const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0);
651 
652  if (lmEigenValue < 0) {
653  // The largest-magnitude eigenvalue is negative, and therefore also the
654  // minimum eigenvalue, so just return this solution
655  *minEigenValue = lmEigenValue;
656  if (minEigenVector) {
657  *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0);
658  minEigenVector->normalize(); // Ensure that this is a unit vector
659  }
660  return true;
661  }
662 
663  // The largest-magnitude eigenvalue is positive, and is therefore the
664  // maximum eigenvalue. Therefore, after shifting the spectrum of A
665  // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted
666  // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A),
667  // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of
668  // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding
669  // eigenvector v_min
670 
671  MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue);
672 
675  minEigenValueSolver(&minShiftedOperator, 1,
676  std::min(numLanczosVectors, A.rows()));
677 
678  // If S is a critical point of F, then S^T is also in the null space of S -
679  // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are
680  // eigenvectors corresponding to the eigenvalue 0. In the case that the
681  // relaxation is exact, this is the *minimum* eigenvalue, and therefore the
682  // rows of S are exactly the eigenvectors that we're looking for. On the
683  // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at
684  // least one strictly negative eigenvalue, and the rows of S are *unstable
685  // fixed points* for the Lanczos iterations. Thus, we will take a slightly
686  // "fuzzed" version of the first row of S as an initialization for the
687  // Lanczos iterations; this allows for rapid convergence in the case that
688  // the relaxation is exact (since are starting close to a solution), while
689  // simultaneously allowing the iterations to escape from this fixed point in
690  // the case that the relaxation is not exact.
691  Vector v0 = S.row(0).transpose();
692  Vector perturbation(v0.size());
693  perturbation.setRandom();
694  perturbation.normalize();
695  Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3%
696 
697  // Use this to initialize the eigensolver
698  minEigenValueSolver.init(xinit.data());
699 
700  // Now determine the relative precision required in the Lanczos method in
701  // order to be able to estimate the smallest eigenvalue within an *absolute*
702  // tolerance of 'minEigenvalueNonnegativityTolerance'
703  const int minConverged = minEigenValueSolver.compute(
704  maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue,
705  Spectra::SELECT_EIGENVALUE::LARGEST_MAGN);
706 
707  if (minConverged != 1) return false;
708 
709  *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue;
710  if (minEigenVector) {
711  *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0);
712  minEigenVector->normalize(); // Ensure that this is a unit vector
713  }
714  if (numIterations) *numIterations = minEigenValueSolver.num_iterations();
715  return true;
716 }
717 
718 /* ************************************************************************* */
719 template <size_t d>
721  Vector *minEigenVector) const {
722  assert(values.size() == nrUnknowns());
723  const Matrix S = StiefelElementMatrix(values);
724  auto A = computeA(S);
725 
726  double minEigenValue;
727  bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector);
728  if (!success) {
729  throw std::runtime_error(
730  "SparseMinimumEigenValue failed to compute minimum eigenvalue.");
731  }
732  return minEigenValue;
733 }
734 
735 /* ************************************************************************* */
736 template <size_t d>
738  Vector *minEigenVector) const {
739  assert(values.size() == nrUnknowns());
740  const Matrix S = StiefelElementMatrix(values);
741  auto A = computeA(S);
742 
743  double minEigenValue = 0;
744  bool success = PowerMinimumEigenValue(A, S, minEigenValue, minEigenVector);
745  if (!success) {
746  throw std::runtime_error(
747  "PowerMinimumEigenValue failed to compute minimum eigenvalue.");
748  }
749  return minEigenValue;
750 }
751 
752 /* ************************************************************************* */
753 template <size_t d>
755  const Values &values) const {
756  Vector minEigenVector;
757  double minEigenValue = computeMinEigenValue(values, &minEigenVector);
758  return std::make_pair(minEigenValue, minEigenVector);
759 }
760 
761 /* ************************************************************************* */
762 template <size_t d>
764  double minEigenValue = computeMinEigenValue(values);
765  return minEigenValue > parameters_.optimalityThreshold;
766 }
767 
768 /* ************************************************************************* */
769 template <size_t d>
771  const Vector &v) {
773  // Create a tangent direction xi with eigenvector segment v_i
774  const size_t dimension = SOn::Dimension(p);
775  double sign0 = pow(-1.0, round((p + 1) / 2) + 1);
776  for (size_t i = 0; i < v.size() / d; i++) {
777  // Assumes key is 0-based integer
778  const auto v_i = v.segment<d>(d * i);
779  Vector xi = Vector::Zero(dimension);
780  double sign = sign0;
781  for (size_t j = 0; j < d; j++) {
782  xi(j + p - d - 1) = sign * v_i(d - j - 1);
783  sign = -sign;
784  }
785  delta.insert(i, xi);
786  }
787  return delta;
788 }
789 
790 /* ************************************************************************* */
791 template <size_t d>
793  const Values &values) const {
794  Matrix S_dot = StiefelElementMatrix(values);
795  // calculate the gradient of F(Q_dot) at Q_dot
796  Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose();
797  // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() <<
798  // "\t" << euclideanGradient.cols() << endl;
799 
800  // project the gradient onto the entire euclidean space
801  Matrix symBlockDiagProduct(p, d * nrUnknowns());
802  for (size_t i = 0; i < nrUnknowns(); i++) {
803  // Compute block product
804  const size_t di = d * i;
805  const Matrix P = S_dot.middleCols<d>(di).transpose() *
806  euclideanGradient.middleCols<d>(di);
807  // Symmetrize this block
808  Matrix S = .5 * (P + P.transpose());
809  // Compute S_dot * S and set corresponding block
810  symBlockDiagProduct.middleCols<d>(di) = S_dot.middleCols<d>(di) * S;
811  }
812  Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct;
813  return riemannianGradient;
814 }
815 
816 /* ************************************************************************* */
817 template <size_t d>
819  const Vector &minEigenVector) {
820  Values lifted = LiftTo<SOn>(p, values);
821  VectorValues delta = TangentVectorValues(p, minEigenVector);
822  return lifted.retract(delta);
823 }
824 
825 /* ************************************************************************* */
826 template <size_t d>
828  size_t p, const Values &values, const Vector &minEigenVector,
829  double minEigenValue, double gradienTolerance,
830  double preconditionedGradNormTolerance) const {
831  double funcVal = costAt(p - 1, values);
832  double alphaMin = 1e-2;
833  double alpha =
834  std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue));
835  vector<double> alphas;
836  vector<double> fvals;
837  // line search
838  while ((alpha >= alphaMin)) {
839  Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector);
840  double funcValTest = costAt(p, Qplus);
841  Matrix gradTest = riemannianGradient(p, Qplus);
842  double gradTestNorm = gradTest.norm();
843  // Record alpha and funcVal
844  alphas.push_back(alpha);
845  fvals.push_back(funcValTest);
846  if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) {
847  return Qplus;
848  }
849  alpha /= 2;
850  }
851 
852  auto fminIter = min_element(fvals.begin(), fvals.end());
853  auto minIdx = distance(fvals.begin(), fminIter);
854  double fMin = fvals[minIdx];
855  double aMin = alphas[minIdx];
856  if (fMin < funcVal) {
857  Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector);
858  return Qplus;
859  }
860 
861  return LiftwithDescent(p, values, alpha * minEigenVector);
862 }
863 
864 /* ************************************************************************* */
865 template <size_t d>
867  Values initial;
868  for (size_t j = 0; j < nrUnknowns(); j++) {
869  initial.insert(j, Rot::Random(rng));
870  }
871  return initial;
872 }
873 
874 /* ************************************************************************* */
875 template <size_t d>
878 }
879 
880 /* ************************************************************************* */
881 template <size_t d>
883  std::mt19937 &rng) const {
884  const Values randomRotations = initializeRandomly(rng);
885  return LiftTo<Rot3>(p, randomRotations); // lift to p!
886 }
887 
888 /* ************************************************************************* */
889 template <size_t d>
892 }
893 
894 /* ************************************************************************* */
895 template <size_t d>
896 std::pair<Values, double> ShonanAveraging<d>::run(const Values &initialEstimate,
897  size_t pMin,
898  size_t pMax) const {
899  Values Qstar;
900  Values initialSOp = LiftTo<Rot>(pMin, initialEstimate); // lift to pMin!
901  for (size_t p = pMin; p <= pMax; p++) {
902  // Optimize until convergence at this level
903  Qstar = tryOptimizingAt(p, initialSOp);
905  // in this case, there is no optimality certification
906  if (pMin != pMax) {
907  throw std::runtime_error(
908  "When using robust norm, Shonan only tests a single rank. Set pMin = pMax");
909  }
910  const Values SO3Values = roundSolution(Qstar);
911  return std::make_pair(SO3Values, 0);
912  } else {
913  // Check certificate of global optimality
914  Vector minEigenVector;
915  double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector);
916  if (minEigenValue > parameters_.optimalityThreshold) {
917  // If at global optimum, round and return solution
918  const Values SO3Values = roundSolution(Qstar);
919  return std::make_pair(SO3Values, minEigenValue);
920  }
921 
922  // Not at global optimimum yet, so check whether we will go to next level
923  if (p != pMax) {
924  // Calculate initial estimate for next level by following minEigenVector
925  initialSOp =
926  initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue);
927  }
928  }
929  }
930  throw std::runtime_error("Shonan::run did not converge for given pMax");
931 }
932 
933 /* ************************************************************************* */
934 // Explicit instantiation for d=2
935 template class ShonanAveraging<2>;
936 
938  const Parameters &parameters)
939  : ShonanAveraging<2>(maybeRobust(measurements, parameters.getUseHuber()),
940  parameters) {}
941 
944  parameters.getUseHuber()),
945  parameters) {}
946 
947 /* ************************************************************************* */
948 // Explicit instantiation for d=3
949 template class ShonanAveraging<3>;
950 
952  const Parameters &parameters)
953  : ShonanAveraging<3>(maybeRobust(measurements, parameters.getUseHuber()),
954  parameters) {}
955 
958  parameters.getUseHuber()),
959  parameters) {}
960 
961 // TODO(frank): Deprecate after we land pybind wrapper
962 
963 // Extract Rot3 measurement from Pose3 betweenfactors
964 // Modeled after similar function in dataset.cpp
967  auto gaussian =
968  boost::dynamic_pointer_cast<noiseModel::Gaussian>(f->noiseModel());
969  if (!gaussian)
970  throw std::invalid_argument(
971  "parseMeasurements<Rot3> can only convert Pose3 measurements "
972  "with Gaussian noise models.");
973  const Matrix6 M = gaussian->covariance();
974  auto model = noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3));
975  return BinaryMeasurement<Rot3>(f->key1(), f->key2(), f->measured().rotation(),
976  model);
977 }
978 
980  const BetweenFactorPose3s &factors) {
982  result.reserve(factors.size());
983  for (auto f : factors) result.push_back(convert(f));
984  return result;
985 }
986 
988  const Parameters &parameters)
990  parameters.getUseHuber()),
991  parameters) {}
992 
993 /* ************************************************************************* */
994 } // namespace gtsam
enum gtsam::SubgraphBuilderParameters::Skeleton skeletonType
const SharedNoiseModel & noiseModel() const
std::vector< BetweenFactor< Pose3 >::shared_ptr > BetweenFactorPose3s
Definition: dataset.h:500
double computeMinEigenValueAP(const Values &values, Vector *minEigenVector=nullptr) const
#define max(a, b)
Definition: datatypes.h:20
Matrix< RealScalar, Dynamic, Dynamic > M
Definition: bench_gemm.cpp:38
const BinaryMeasurement< Rot > & measurement(size_t k) const
k^th binary measurement
static std::mt19937 kRandomNumberGenerator(42)
double beta
weight of Karcher-based prior (default 1)
double computeMinEigenValue(const Values &values, Vector *minEigenVector=nullptr) const
Main factor type in Shonan averaging, on SO(n) pairs.
void setLinearSolverType(const std::string &solver)
Values initializeRandomly() const
Random initialization for wrapper, fixed random seed.
Scalar * y
static shared_ptr Covariance(const Matrix &covariance, bool smart=true)
Definition: NoiseModel.cpp:116
double cost(const Values &values) const
Vector eigenvector() const
Return the eigenvector.
Definition: PowerMethod.h:149
Sparse buildD() const
Build 3Nx3N sparse degree matrix D.
Factor Graph consisting of non-linear factors.
JacobiRotation< float > G
noiseModel::Diagonal::shared_ptr model
const SingularValuesType & singularValues() const
Definition: SVDBase.h:111
static VectorValues TangentVectorValues(size_t p, const Vector &v)
Create a VectorValues with eigenvector v_i.
void insert(Key j, const Value &val)
Definition: Values.cpp:140
#define min(a, b)
Definition: datatypes.h:19
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
Compute maximum Eigenpair with accelerated power method.
void determinant(const MatrixType &m)
Definition: determinant.cpp:14
static bool SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, Vector *minEigenVector=0, size_t *numIterations=0, size_t maxIterations=1000, double minEigenvalueNonnegativityTolerance=10e-4, Eigen::Index numLanczosVectors=20)
ArrayXcf v
Definition: Cwise_arg.cpp:1
KeySet keys() const
static std::mt19937 rng
MatrixProdFunctor(const Sparse &A, double sigma=0)
static const double sigma
int n
Eigen::MatrixXd Matrix
Definition: base/Matrix.h:43
Scalar Scalar * c
Definition: benchVecAdd.cpp:17
Sparse computeA(const Values &values) const
Compute A matrix whose Eigenvalues we will examine.
Rot2 R(Rot2::fromAngle(0.1))
size_t nrUnknowns() const
Return number of unknowns.
leaf::MyValues values
double costAt(size_t p, const Values &values) const
Represents a diagonal matrix with its storage.
bool compute(size_t maxIterations, double tol)
Definition: PowerMethod.h:129
static Matrix VectorizedGenerators()
Calculate N^2 x dim matrix of vectorized Lie algebra generators for SO(N)
Definition: SOn.h:297
Values initial
LevenbergMarquardtParams lm
LM parameters.
double measurement(10.0)
GaussianFactorGraph factors(list_of(factor1)(factor2)(factor3))
const VALUE & measured() const
return the measurement
static Matrix StiefelElementMatrix(const Values &values)
Project to pxdN Stiefel manifold.
static Matrix RoundSolutionS(const Matrix &S)
Values retract(const VectorValues &delta) const
Definition: Values.cpp:109
static size_t Dimension(size_t n)
Definition: SOn.h:204
Matrix riemannianGradient(size_t p, const Values &values) const
Calculate the riemannian gradient of F(values) at values.
static Values LiftwithDescent(size_t p, const Values &values, const Vector &minEigenVector)
#define N
Definition: gksort.c:12
typename Parameters::Rot Rot
Real fabs(const Real &a)
NonlinearFactorGraph graph
Matrix< SCALARB, Dynamic, Dynamic > B
Definition: bench_gemm.cpp:36
Eigen::SparseMatrix< double > Sparse
Key maxKey(const PROBLEM &problem)
Sparse computeLambda(const Matrix &S) const
Version that takes pxdN Stiefel manifold elements.
Values projectFrom(size_t p, const Values &values) const
Definition: SOn.h:50
const SharedNoiseModel & noiseModel() const
access to the noise model
Values initializeRandomlyAt(size_t p, std::mt19937 &rng) const
Index rows() const
Definition: SparseMatrix.h:136
Sparse Q() const
Sparse version of Q.
static Rot2 atan2(double y, double x)
Definition: Rot2.cpp:37
std::pair< Values, double > run(const Values &initialEstimate, size_t pMin=d, size_t pMax=10) const
static Vector perturb(const Vector &initialVector)
ShonanAveraging2(const Measurements &measurements, const Parameters &parameters=Parameters())
ShonanAveragingParameters(const LevenbergMarquardtParams &lm=LevenbergMarquardtParams::CeresDefaults(), const std::string &method="JACOBI", double optimalityThreshold=-1e-4, double alpha=0.0, double beta=1.0, double gamma=0.0)
EIGEN_DEVICE_FUNC const RoundReturnType round() const
boost::shared_ptr< BetweenFactor > shared_ptr
Definition: BetweenFactor.h:60
ShonanAveraging(const Measurements &measurements, const Parameters &parameters=Parameters())
double alpha
weight of anchor-based prior (default 0)
Parameters governing optimization etc.
IsDerived< DERIVEDFACTOR > emplace_shared(Args &&...args)
Emplace a shared pointer to factor of given type.
Definition: FactorGraph.h:172
Factor used in Shonan Averaging to clamp down gauge freedom.
double augmentationFactor
factor multiplied with n, yields number of extra edges.
Eigen::VectorXd Vector
Definition: Vector.h:38
size_t size() const
Definition: Values.h:236
EIGEN_DEVICE_FUNC const SignReturnType sign() const
Values result
Values roundSolutionS(const Matrix &S) const
Project pxdN Stiefel manifold matrix S to Rot3^N.
const ValueType at(Key j) const
Definition: Values-inl.h:342
const mpreal gamma(const mpreal &x, mp_rnd_t r=mpreal::get_default_rnd())
Definition: mpreal.h:2262
TransposeReturnType transpose()
static double Kappa(const BinaryMeasurement< T > &measurement, const ShonanAveragingParameters< d > &parameters)
Key S(std::uint64_t j)
EIGEN_DEVICE_FUNC const DiagonalVectorType & diagonal() const
Index cols() const
Definition: SparseMatrix.h:138
Compute maximum Eigenpair with power method.
Definition: PowerMethod.h:57
std::vector< BinaryMeasurement< T > > maybeRobust(const std::vector< BinaryMeasurement< T >> &measurements, bool useRobustModel=false) const
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
boost::shared_ptr< LevenbergMarquardtOptimizer > createOptimizerAt(size_t p, const Values &initial) const
std::vector< BinaryMeasurement< Pose2 > > parseMeasurements(const std::string &filename, const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex)
Definition: dataset.cpp:390
Anchor anchor
pose to use as anchor if not Karcher
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
RealScalar alpha
A nonlinear optimizer that uses the Levenberg-Marquardt trust-region scheme.
Array< double, 1, 3 > e(1./3., 0.5, 2.)
static size_t NrUnknowns(const typename ShonanAveraging< d >::Measurements &measurements)
static bool PowerMinimumEigenValue(const Sparse &A, const Matrix &S, double &minEigenValue, Vector *minEigenVector=0, size_t *numIterations=0, size_t maxIterations=1000, double minEigenvalueNonnegativityTolerance=10e-4)
MINIMUM EIGENVALUE COMPUTATIONS.
const KeyVector & keys(size_t k) const
Keys for k^th measurement, as a vector of Key values.
IterativeOptimizationParameters::shared_ptr iterativeParams
The container for iterativeOptimization parameters. used in CG Solvers.
double optimalityThreshold
threshold used in checkOptimality
Various factors that minimize some Frobenius norm.
static ConjugateGradientParameters parameters
Matrix< Scalar, Dynamic, Dynamic > C
Definition: bench_gemm.cpp:37
Vector xi
Definition: testPose2.cpp:150
const mpreal dim(const mpreal &a, const mpreal &b, mp_rnd_t r=mpreal::get_default_rnd())
Definition: mpreal.h:2201
traits
Definition: chartTesting.h:28
enum gtsam::SubgraphBuilderParameters::SkeletonWeight skeletonWeight
static SO Lift(size_t n, const Eigen::MatrixBase< Derived > &R)
Named constructor from lower dimensional matrix.
Definition: SOn.h:99
Values roundSolution(const Values &values) const
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:993
ShonanAveraging3(const Measurements &measurements, const Parameters &parameters=Parameters())
NonlinearFactorGraph buildGraphAt(size_t p) const
Values tryOptimizingAt(size_t p, const Values &initial) const
enum gtsam::SubgraphBuilderParameters::AugmentationWeight augmentationWeight
static const double v0
void svd(const Matrix &A, Matrix &U, Vector &S, Matrix &V)
Definition: Matrix.cpp:559
Sparse D() const
Sparse version of D.
Two-sided Jacobi SVD decomposition of a rectangular matrix.
float * p
static ShonanAveraging3::Measurements extractRot3Measurements(const BetweenFactorPose3s &factors)
static Rot3 ClosestTo(const Matrix3 &M)
Definition: Rot3.h:274
std::pair< double, Vector > computeMinEigenVector(const Values &values) const
void perform_op(const double *x, double *y) const
double error(const Values &values) const
const KeyVector keys
Jet< T, N > pow(const Jet< T, N > &f, double g)
Definition: jet.h:570
double eigenvalue() const
Return the eigenvalue.
Definition: PowerMethod.h:146
#define X
Definition: icosphere.cpp:20
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
iterator insert(const std::pair< Key, Vector > &key_value)
static shared_ptr Precision(size_t dim, double precision, bool smart=true)
Definition: NoiseModel.h:559
std::uint64_t Key
Integer nonlinear key type.
Definition: types.h:61
LinearSolverType linearSolverType
The type of linear solver to use in the nonlinear optimizer.
std::ptrdiff_t j
static BinaryMeasurement< Rot3 > convert(const BetweenFactor< Pose3 >::shared_ptr &f)
bool checkOptimality(const Values &values) const
Shonan Averaging algorithm.
Values initializeWithDescent(size_t p, const Values &values, const Vector &minEigenVector, double minEigenValue, double gradienTolerance=1e-2, double preconditionedGradNormTolerance=1e-4) const
std::vector< BinaryMeasurement< Rot >> Measurements
Filtered< Value > filter(const boost::function< bool(Key)> &filterFcn)
Definition: Values-inl.h:237


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:44:01