32 #include <Eigen/Eigenvalues>
53 double optimalityThreshold,
double alpha,
double beta,
double gamma)
55 optimalityThreshold(optimalityThreshold),
60 certifyOptimality(true) {
72 if (method ==
"SUBGRAPH") {
74 std::make_shared<SubgraphSolverParameters>(builderParameters);
75 }
else if (method ==
"SGPC") {
77 std::make_shared<SubgraphPreconditionerParameters>(builderParameters));
78 }
else if (method ==
"JACOBI") {
79 lm.
iterativeParams = std::make_shared<PCGSolverParameters>(std::make_shared<BlockJacobiPreconditionerParameters>());
80 }
else if (method ==
"QR") {
82 }
else if (method ==
"CHOLESKY") {
85 throw std::invalid_argument(
"ShonanAveragingParameters: unknown method");
108 size_t N =
keys.size();
110 throw std::invalid_argument(
111 "As of now, ShonanAveraging expects keys numbered 0..N-1");
126 measurement.print(
"Factor with incorrect noise model:\n");
127 throw std::invalid_argument(
128 "ShonanAveraging: measurements passed to "
129 "constructor have incorrect dimension.");
150 if (parameters_.beta > 0) {
151 const size_t dim = SOn::Dimension(
p);
155 if (parameters_.gamma > 0 &&
p >
d + 1) {
171 std::shared_ptr<LevenbergMarquardtOptimizer>
177 if (parameters_.alpha > 0) {
178 const size_t dim = SOn::Dimension(
p);
179 const auto [
i,
value] = parameters_.anchor;
180 auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha);
185 return std::make_shared<LevenbergMarquardtOptimizer>(
graph,
initial,
193 auto lm = createOptimizerAt(
p,
initial);
194 return lm->optimize();
201 const size_t N =
values.size();
204 for (
const auto& it :
values.extract<
SOn>()) {
205 S.middleCols<
d>(it.first *
d) =
206 it.second.matrix().leftCols<
d>();
215 for (
const auto& it :
values.extract<
SOn>()) {
216 assert(it.second.rows() ==
p);
217 const auto &
M = it.second.matrix();
227 for (
const auto& it :
values.extract<
SOn>()) {
228 assert(it.second.rows() ==
p);
229 const auto &
M = it.second.matrix();
230 const Rot3 R = Rot3::ClosestTo(
M.topLeftCorner<3, 3>());
239 const size_t N =
S.cols() /
d;
246 DiagonalMatrix Sigma_d;
250 Matrix R = Sigma_d *
svd.matrixV().leftCols<
d>().transpose();
253 size_t numPositiveBlocks = 0;
254 for (
size_t i = 0;
i <
N; ++
i) {
260 if (numPositiveBlocks <
N / 2) {
263 DiagonalMatrix reflector;
264 reflector.setIdentity();
265 reflector.diagonal()(
d - 1) = -1;
280 for (
size_t j = 0;
j < nrUnknowns(); ++
j) {
294 for (
size_t j = 0;
j < nrUnknowns(); ++
j) {
295 const Rot3 Ri = Rot3::ClosestTo(
R.middleCols<3>(3 *
j));
307 return roundSolutionS(
S);
323 for (
const auto& it :
values.extract<
Rot>()) {
324 result.insert(it.first,
SO<d>(it.second.matrix()));
331 template <
typename T,
size_t d>
334 const auto &isotropic = std::dynamic_pointer_cast<noiseModel::Isotropic>(
338 sigma = isotropic->sigma();
340 const auto &robust = std::dynamic_pointer_cast<noiseModel::Robust>(
346 throw std::invalid_argument(
347 "Certification of optimality does not work with robust cost.");
353 throw std::invalid_argument(
354 "Shonan averaging noise models must be isotropic (but robust losses "
366 static constexpr
size_t stride = 2 *
d;
369 std::vector<Eigen::Triplet<double>> triplets;
370 triplets.reserve(stride * measurements_.size());
377 double kappa = Kappa<Rot, d>(
measurement, parameters_);
379 const size_t di =
d *
keys[0], dj =
d *
keys[1];
380 for (
size_t k = 0; k <
d; k++) {
382 triplets.emplace_back(di + k, di + k, kappa);
384 triplets.emplace_back(dj + k, dj + k, kappa);
389 const size_t dN =
d * nrUnknowns();
391 D.setFromTriplets(triplets.begin(), triplets.end());
401 static constexpr
size_t stride = 2 *
d *
d;
404 std::vector<Eigen::Triplet<double>> triplets;
405 triplets.reserve(stride * measurements_.size());
415 double kappa = Kappa<Rot, d>(
measurement, parameters_);
417 const size_t di =
d *
keys[0], dj =
d *
keys[1];
418 for (
size_t r = 0; r <
d; r++) {
419 for (
size_t c = 0;
c <
d;
c++) {
421 triplets.emplace_back(di + r, dj +
c, kappa * Rij(r,
c));
423 triplets.emplace_back(dj + r, di +
c, kappa * Rij(
c, r));
429 const size_t dN =
d * nrUnknowns();
431 Q.setFromTriplets(triplets.begin(), triplets.end());
440 static constexpr
size_t stride =
d *
d;
443 const size_t N = nrUnknowns();
444 std::vector<Eigen::Triplet<double>> triplets;
445 triplets.reserve(stride *
N);
448 auto QSt = Q_ *
S.transpose();
450 for (
size_t j = 0;
j <
N;
j++) {
452 const size_t dj =
d *
j;
453 Matrix B = QSt.middleRows(dj,
d) *
S.middleCols<
d>(dj);
456 for (
size_t r = 0; r <
d; r++)
457 for (
size_t c = 0;
c <
d;
c++)
458 triplets.emplace_back(dj + r, dj +
c, 0.5 * (
B(r,
c) +
B(
c, r)));
473 return computeLambda(
S);
479 assert(
values.size() == nrUnknowns());
481 auto Lambda = computeLambda(
S);
488 auto Lambda = computeLambda(
S);
501 int n = initialVector.rows();
502 Vector disturb = Vector::Random(
n);
505 double magnitude = initialVector.norm();
506 Vector perturbedVector = initialVector + 0.03 * magnitude * disturb;
507 perturbedVector.normalize();
508 return perturbedVector;
524 Vector *minEigenVector = 0,
size_t *numIterations = 0,
525 size_t maxIterations = 1000,
526 double minEigenvalueNonnegativityTolerance = 10
e-4) {
531 const bool pmConverged = pmOperator.
compute(
532 maxIterations, 1
e-5);
535 if (!pmConverged)
return false;
537 const double pmEigenValue = pmOperator.
eigenvalue();
539 if (pmEigenValue < 0) {
542 minEigenValue = pmEigenValue;
543 if (minEigenVector) {
545 minEigenVector->normalize();
550 const Sparse C = pmEigenValue * Matrix::Identity(
A.rows(),
A.cols()).sparseView() -
A;
554 const bool minConverged = apmShiftedOperator.
compute(
555 maxIterations, minEigenvalueNonnegativityTolerance / pmEigenValue);
557 if (!minConverged)
return false;
559 minEigenValue = pmEigenValue - apmShiftedOperator.
eigenvalue();
560 if (minEigenVector) {
561 *minEigenVector = apmShiftedOperator.
eigenvector();
562 minEigenVector->normalize();
564 if (numIterations) *numIterations = apmShiftedOperator.
nrIterations();
594 Y = A_ *
X + sigma_ *
X;
627 Vector *minEigenVector = 0,
size_t *numIterations = 0,
628 size_t maxIterations = 1000,
629 double minEigenvalueNonnegativityTolerance = 10
e-4,
635 lmEigenValueSolver(&lmOperator, 1,
std::min(numLanczosVectors,
A.rows()));
636 lmEigenValueSolver.init();
638 const int lmConverged = lmEigenValueSolver.compute(
642 if (lmConverged != 1)
return false;
644 const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0);
646 if (lmEigenValue < 0) {
649 *minEigenValue = lmEigenValue;
650 if (minEigenVector) {
651 *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0);
652 minEigenVector->normalize();
669 minEigenValueSolver(&minShiftedOperator, 1,
687 perturbation.setRandom();
688 perturbation.normalize();
689 Vector xinit =
v0 + (.03 *
v0.norm()) * perturbation;
692 minEigenValueSolver.init(xinit.data());
697 const int minConverged = minEigenValueSolver.compute(
698 maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue,
701 if (minConverged != 1)
return false;
703 *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue;
704 if (minEigenVector) {
705 *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0);
706 minEigenVector->normalize();
708 if (numIterations) *numIterations = minEigenValueSolver.num_iterations();
715 Vector *minEigenVector)
const {
716 assert(
values.size() == nrUnknowns());
718 auto A = computeA(
S);
720 double minEigenValue;
723 throw std::runtime_error(
724 "SparseMinimumEigenValue failed to compute minimum eigenvalue.");
726 return minEigenValue;
732 Vector *minEigenVector)
const {
733 assert(
values.size() == nrUnknowns());
735 auto A = computeA(
S);
737 double minEigenValue = 0;
740 throw std::runtime_error(
741 "PowerMinimumEigenValue failed to compute minimum eigenvalue.");
743 return minEigenValue;
751 double minEigenValue = computeMinEigenValue(
values, &minEigenVector);
752 return {minEigenValue, minEigenVector};
758 double minEigenValue = computeMinEigenValue(
values);
759 return minEigenValue > parameters_.optimalityThreshold;
768 const size_t dimension = SOn::Dimension(
p);
769 double sign0 =
pow(-1.0,
round((
p + 1) / 2) + 1);
770 for (
size_t i = 0;
i <
v.size() /
d;
i++) {
772 const auto v_i =
v.segment<
d>(
d *
i);
773 Vector xi = Vector::Zero(dimension);
775 for (
size_t j = 0;
j <
d;
j++) {
790 Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose();
795 Matrix symBlockDiagProduct(
p,
d * nrUnknowns());
796 for (
size_t i = 0;
i < nrUnknowns();
i++) {
798 const size_t di =
d *
i;
799 const Matrix P = S_dot.middleCols<
d>(di).transpose() *
800 euclideanGradient.middleCols<
d>(di);
804 symBlockDiagProduct.middleCols<
d>(di) = S_dot.middleCols<
d>(di) *
S;
806 Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct;
807 return riemannianGradient;
813 const Vector &minEigenVector) {
823 double minEigenValue,
double gradienTolerance,
824 double preconditionedGradNormTolerance)
const {
825 double funcVal = costAt(
p - 1,
values);
826 double alphaMin = 1
e-2;
828 std::max(1024 * alphaMin, 10 * gradienTolerance /
fabs(minEigenValue));
829 vector<double> alphas;
830 vector<double> fvals;
832 while ((
alpha >= alphaMin)) {
834 double funcValTest = costAt(
p, Qplus);
835 Matrix gradTest = riemannianGradient(
p, Qplus);
836 double gradTestNorm = gradTest.norm();
838 alphas.push_back(
alpha);
839 fvals.push_back(funcValTest);
840 if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) {
846 auto fminIter = min_element(fvals.begin(), fvals.end());
847 auto minIdx =
distance(fvals.begin(), fminIter);
848 double fMin = fvals[minIdx];
849 double aMin = alphas[minIdx];
850 if (fMin < funcVal) {
851 Values Qplus = LiftwithDescent(
p,
values, aMin * minEigenVector);
855 return LiftwithDescent(
p,
values,
alpha * minEigenVector);
862 for (
size_t j = 0;
j < nrUnknowns();
j++) {
877 std::mt19937 &
rng)
const {
878 const Values randomRotations = initializeRandomly(
rng);
879 return LiftTo<Rot3>(
p, randomRotations);
894 throw std::runtime_error(
"pMin is smaller than the base dimension d");
897 Values initialSOp = LiftTo<Rot>(pMin, initialEstimate);
898 for (
size_t p = pMin;
p <= pMax;
p++) {
900 Qstar = tryOptimizingAt(
p, initialSOp);
901 if (parameters_.getUseHuber() || !parameters_.getCertifyOptimality()) {
904 throw std::runtime_error(
905 "When using robust norm, Shonan only tests a single rank. Set pMin = pMax");
907 const Values SO3Values = roundSolution(Qstar);
908 return {SO3Values, 0};
912 double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector);
913 if (minEigenValue > parameters_.optimalityThreshold) {
915 const Values SO3Values = roundSolution(Qstar);
916 return {SO3Values, minEigenValue};
923 initializeWithDescent(
p + 1, Qstar, minEigenVector, minEigenValue);
927 throw std::runtime_error(
"Shonan::run did not converge for given pMax");
949 std::dynamic_pointer_cast<noiseModel::Gaussian>(
f->noiseModel());
951 throw std::invalid_argument(
952 "parseMeasurements<Rot2> can only convert Pose2 measurements "
953 "with Gaussian noise models.");
954 const Matrix3
M = gaussian->covariance();
997 std::dynamic_pointer_cast<noiseModel::Gaussian>(
f->noiseModel());
999 throw std::invalid_argument(
1000 "parseMeasurements<Rot3> can only convert Pose3 measurements "
1001 "with Gaussian noise models.");
1002 const Matrix6
M = gaussian->covariance();