NoiseModel.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
20 #include <gtsam/base/timing.h>
21 
22 #include <cmath>
23 #include <iostream>
24 #include <limits>
25 #include <stdexcept>
26 #include <typeinfo>
27 
28 using namespace std;
29 
30 namespace gtsam {
31 namespace noiseModel {
32 
33 /* ************************************************************************* */
34 // update A, b
35 // A' \define A_{S}-ar and b'\define b-ad
36 // Linear algebra: takes away projection on latest orthogonal
37 // Graph: make a new factor on the separator S
38 // __attribute__ ((noinline)) // uncomment to prevent inlining when profiling
39 template<class MATRIX>
40 void updateAb(MATRIX& Ab, int j, const Vector& a, const Vector& rd) {
41  size_t n = Ab.cols()-1;
42  Ab.middleCols(j+1,n-j) -= a * rd.segment(j+1, n-j).transpose();
43 }
44 
45 /* ************************************************************************* */
46 // check *above the diagonal* for non-zero entries
47 std::optional<Vector> checkIfDiagonal(const Matrix& M) {
48  size_t m = M.rows(), n = M.cols();
49  assert(m > 0);
50  // check all non-diagonal entries
51  bool full = false;
52  size_t i, j;
53  for (i = 0; i < m; i++)
54  if (!full)
55  for (j = i + 1; j < n; j++)
56  if (std::abs(M(i, j)) > 1e-9) {
57  full = true;
58  break;
59  }
60  if (full) {
61  return {};
62  } else {
63  Vector diagonal(n);
64  for (j = 0; j < n; j++)
65  diagonal(j) = M(j, j);
66  return std::move(diagonal);
67  }
68 }
69 
70 /* ************************************************************************* */
72  throw("Base::sigmas: sigmas() not implemented for this noise model");
73 }
74 
75 /* ************************************************************************* */
76 double Base::squaredMahalanobisDistance(const Vector& v) const {
77  // Note: for Diagonal, which does ediv_, will be correct for constraints
78  Vector w = whiten(v);
79  return w.dot(w);
80 }
81 
82 /* ************************************************************************* */
83 Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
84  size_t m = R.rows(), n = R.cols();
85  if (m != n)
86  throw invalid_argument("Gaussian::SqrtInformation: R not square");
87  if (smart) {
88  std::optional<Vector> diagonal = checkIfDiagonal(R);
89  if (diagonal)
90  return Diagonal::Sigmas(diagonal->array().inverse(), true);
91  }
92  // NOTE(frank): only reaches here if !(smart && diagonal)
93  return std::make_shared<Gaussian>(R.rows(), R);
94 }
95 
96 /* ************************************************************************* */
97 Gaussian::shared_ptr Gaussian::Information(const Matrix& information, bool smart) {
98  size_t m = information.rows(), n = information.cols();
99  if (m != n)
100  throw invalid_argument("Gaussian::Information: R not square");
101  std::optional<Vector> diagonal = {};
102  if (smart)
103  diagonal = checkIfDiagonal(information);
104  if (diagonal)
105  return Diagonal::Precisions(*diagonal, true);
106  else {
107  Eigen::LLT<Matrix> llt(information);
108  Matrix R = llt.matrixU();
109  return std::make_shared<Gaussian>(n, R);
110  }
111 }
112 
113 /* ************************************************************************* */
114 Gaussian::shared_ptr Gaussian::Covariance(const Matrix& covariance,
115  bool smart) {
116  size_t m = covariance.rows(), n = covariance.cols();
117  if (m != n)
118  throw invalid_argument("Gaussian::Covariance: covariance not square");
119  std::optional<Vector> variances = {};
120  if (smart)
121  variances = checkIfDiagonal(covariance);
122  if (variances)
123  return Diagonal::Variances(*variances, true);
124  else {
125  // NOTE: if cov = L'*L, then the square root information R can be found by
126  // QR, as L.inverse() = Q*R, with Q some rotation matrix. However, R has
127  // annoying sign flips with respect the simpler Information(inv(cov)),
128  // hence we choose the simpler path here:
129  return Information(covariance.inverse(), false);
130  }
131 }
132 
133 /* ************************************************************************* */
134 void Gaussian::print(const string& name) const {
135  gtsam::print(thisR(), name + "Gaussian ");
136 }
137 
138 /* ************************************************************************* */
139 bool Gaussian::equals(const Base& expected, double tol) const {
140  const Gaussian* p = dynamic_cast<const Gaussian*> (&expected);
141  if (p == nullptr) return false;
142  if (typeid(*this) != typeid(*p)) return false;
143  return equal_with_abs_tol(R(), p->R(), sqrt(tol));
144 }
145 
146 /* ************************************************************************* */
147 Matrix Gaussian::covariance() const {
148  // Uses a fast version of `covariance = information().inverse();`
149  const Matrix& R = this->R();
150  Matrix I = Matrix::Identity(R.rows(), R.cols());
151  // Fast inverse of upper-triangular matrix R using forward-substitution
152  Matrix Rinv = R.triangularView<Eigen::Upper>().solve(I);
153  // (R' * R)^{-1} = R^{-1} * R^{-1}'
154  return Rinv * Rinv.transpose();
155 }
156 
157 /* ************************************************************************* */
159  return Vector(covariance().diagonal()).cwiseSqrt();
160 }
161 
162 /* ************************************************************************* */
163 Vector Gaussian::whiten(const Vector& v) const {
164  return thisR() * v;
165 }
166 
167 /* ************************************************************************* */
168 Vector Gaussian::unwhiten(const Vector& v) const {
169  return backSubstituteUpper(thisR(), v);
170 }
171 
172 /* ************************************************************************* */
173 Matrix Gaussian::Whiten(const Matrix& H) const {
174  return thisR() * H;
175 }
176 
177 /* ************************************************************************* */
178 void Gaussian::WhitenInPlace(Matrix& H) const {
179  H = thisR() * H;
180 }
181 
182 /* ************************************************************************* */
183 void Gaussian::WhitenInPlace(Eigen::Block<Matrix> H) const {
184  H = thisR() * H;
185 }
186 
187 /* ************************************************************************* */
188 // General QR, see also special version in Constrained
189 SharedDiagonal Gaussian::QR(Matrix& Ab) const {
190 
191  gttic(Gaussian_noise_model_QR);
192 
193  static const bool debug = false;
194 
195  // get size(A) and maxRank
196  // TODO: really no rank problems ?
197  size_t m = Ab.rows(), n = Ab.cols()-1;
198  size_t maxRank = min(m,n);
199 
200  // pre-whiten everything (cheaply if possible)
201  WhitenInPlace(Ab);
202 
203  if(debug) gtsam::print(Ab, "Whitened Ab: ");
204 
205  // Eigen QR - much faster than older householder approach
206  inplace_QR(Ab);
207  Ab.triangularView<Eigen::StrictlyLower>().setZero();
208 
209  // hand-coded householder implementation
210  // TODO: necessary to isolate last column?
211  // householder(Ab, maxRank);
212 
213  return noiseModel::Unit::Create(maxRank);
214 }
215 
216 void Gaussian::WhitenSystem(vector<Matrix>& A, Vector& b) const {
217  for(Matrix& Aj: A) { WhitenInPlace(Aj); }
218  whitenInPlace(b);
219 }
220 
221 void Gaussian::WhitenSystem(Matrix& A, Vector& b) const {
222  WhitenInPlace(A);
223  whitenInPlace(b);
224 }
225 
226 void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const {
227  WhitenInPlace(A1);
228  WhitenInPlace(A2);
229  whitenInPlace(b);
230 }
231 
232 void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{
233  WhitenInPlace(A1);
234  WhitenInPlace(A2);
235  WhitenInPlace(A3);
236  whitenInPlace(b);
237 }
238 
239 Matrix Gaussian::information() const { return R().transpose() * R(); }
240 
241 /* ************************************************************************* */
242 // Diagonal
243 /* ************************************************************************* */
245  Gaussian(1) // TODO: Frank asks: really sure about this?
246 {
247 }
248 
249 /* ************************************************************************* */
251  : Gaussian(sigmas.size()),
252  sigmas_(sigmas),
253  invsigmas_(sigmas.array().inverse()),
255 }
256 
257 /* ************************************************************************* */
258 Diagonal::shared_ptr Diagonal::Variances(const Vector& variances, bool smart) {
259  if (smart) {
260  // check whether all the same entry
261  size_t n = variances.size();
262  for (size_t j = 1; j < n; j++)
263  if (variances(j) != variances(0)) goto full;
264  return Isotropic::Variance(n, variances(0), true);
265  }
266  full: return shared_ptr(new Diagonal(variances.cwiseSqrt()));
267 }
268 
269 /* ************************************************************************* */
271  if (smart) {
272  size_t n = sigmas.size();
273  if (n==0) goto full;
274  // look for zeros to make a constraint
275  for (size_t j=0; j< n; ++j)
276  if (sigmas(j)<1e-8)
277  return Constrained::MixedSigmas(sigmas);
278  // check whether all the same entry
279  for (size_t j = 1; j < n; j++)
280  if (sigmas(j) != sigmas(0)) goto full;
281  return Isotropic::Sigma(n, sigmas(0), true);
282  }
283  full: return Diagonal::shared_ptr(new Diagonal(sigmas));
284 }
285 
286 /* ************************************************************************* */
288  bool smart) {
289  return Variances(precisions.array().inverse(), smart);
290 }
291 /* ************************************************************************* */
292 void Diagonal::print(const string& name) const {
293  gtsam::print(sigmas_, name + "diagonal sigmas ");
294 }
295 
296 /* ************************************************************************* */
298  return v.cwiseProduct(invsigmas_);
299 }
300 
302  return v.cwiseProduct(sigmas_);
303 }
304 
306  return vector_scale(invsigmas(), H);
307 }
308 
311 }
312 
314  H = invsigmas().asDiagonal() * H;
315 }
316 
317 /* ************************************************************************* */
318 // Constrained
319 /* ************************************************************************* */
320 
321 namespace internal {
322 // switch precisions and invsigmas to finite value
323 // TODO: why?? And, why not just ask s==0.0 below ?
324 static void fix(const Vector& sigmas, Vector& precisions, Vector& invsigmas) {
325  for (Vector::Index i = 0; i < sigmas.size(); ++i)
326  if (!std::isfinite(1. / sigmas[i])) {
327  precisions[i] = 0.0;
328  invsigmas[i] = 0.0;
329  }
330 }
331 }
332 
333 /* ************************************************************************* */
335  : Diagonal(sigmas), mu_(Vector::Constant(sigmas.size(), 1000.0)) {
337 }
338 
339 /* ************************************************************************* */
341  : Diagonal(sigmas), mu_(mu) {
343 }
344 
345 /* ************************************************************************* */
347  const Vector& sigmas) {
348  return shared_ptr(new Constrained(mu, sigmas));
349 }
350 
351 /* ************************************************************************* */
352 bool Constrained::constrained(size_t i) const {
353  // TODO why not just check sigmas_[i]==0.0 ?
354  return !std::isfinite(1./sigmas_[i]);
355 }
356 
357 /* ************************************************************************* */
358 void Constrained::print(const std::string& name) const {
359  gtsam::print(sigmas_, name + "constrained sigmas ");
360  gtsam::print(mu_, name + "constrained mu ");
361 }
362 
363 /* ************************************************************************* */
365  // If sigmas[i] is not 0 then divide v[i] by sigmas[i], as usually done in
366  // other normal Gaussian noise model. Otherwise, sigmas[i] = 0 indicating
367  // a hard constraint, we don't do anything.
368  const Vector& a = v;
369  const Vector& b = sigmas_;
370  size_t n = a.size();
371  assert (b.size()==a.size());
372  Vector c(n);
373  for( size_t i = 0; i < n; i++ ) {
374  const double& ai = a(i), bi = b(i);
375  c(i) = (bi==0.0) ? ai : ai/bi; // NOTE: not ediv_()
376  }
377  return c;
378 }
379 
380 /* ************************************************************************* */
382  return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas);
383 }
384 
386  const Vector& sigmas) {
387  return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas);
388 }
389 
391  const Vector& variances) {
392  return shared_ptr(new Constrained(mu, variances.cwiseSqrt()));
393 }
395  return shared_ptr(new Constrained(variances.cwiseSqrt()));
396 }
397 
399  const Vector& precisions) {
400  return MixedVariances(mu, precisions.array().inverse());
401 }
403  return MixedVariances(precisions.array().inverse());
404 }
405 
406 /* ************************************************************************* */
408  Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
409  for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
410  if (constrained(i)) // whiten makes constrained variables zero
411  w[i] = v[i] * sqrt(mu_[i]); // TODO: may want to store sqrt rather than rebuild
412  return w.dot(w);
413 }
414 
415 /* ************************************************************************* */
417  Matrix A = H;
418  for (DenseIndex i=0; i<(DenseIndex)dim_; ++i)
419  if (!constrained(i)) // if constrained, leave row of A as is
420  A.row(i) *= invsigmas_(i);
421  return A;
422 }
423 
424 /* ************************************************************************* */
426  for (DenseIndex i=0; i<(DenseIndex)dim_; ++i)
427  if (!constrained(i)) // if constrained, leave row of H as is
428  H.row(i) *= invsigmas_(i);
429 }
430 
431 /* ************************************************************************* */
433  for (DenseIndex i=0; i<(DenseIndex)dim_; ++i)
434  if (!constrained(i)) // if constrained, leave row of H as is
435  H.row(i) *= invsigmas_(i);
436 }
437 
438 /* ************************************************************************* */
440  Vector sigmas = Vector::Ones(dim());
441  for (size_t i=0; i<dim(); ++i)
442  if (constrained(i))
443  sigmas(i) = 0.0;
444  return MixedSigmas(mu_, sigmas);
445 }
446 
447 /* ************************************************************************* */
448 // Special version of QR for Constrained calls slower but smarter code
449 // that deals with possibly zero sigmas
450 // It is Gram-Schmidt orthogonalization rather than Householder
451 
452 // Check whether column a triggers a constraint and corresponding variable is deterministic
453 // Return constraint_row with maximum element in case variable plays in multiple constraints
454 template <typename VECTOR>
455 std::optional<size_t> check_if_constraint(VECTOR a, const Vector& invsigmas, size_t m) {
456  std::optional<size_t> constraint_row;
457  // not zero, so roundoff errors will not be counted
458  // TODO(frank): that's a fairly crude way of dealing with roundoff errors :-(
459  double max_element = 1e-9;
460  for (size_t i = 0; i < m; i++) {
461  if (!std::isinf(invsigmas[i]))
462  continue;
463  double abs_ai = std::abs(a(i,0));
464  if (abs_ai > max_element) {
465  max_element = abs_ai;
466  constraint_row = i;
467  }
468  }
469  return constraint_row;
470 }
471 
473  static const double kInfinity = std::numeric_limits<double>::infinity();
474 
475  // get size(A) and maxRank
476  size_t m = Ab.rows();
477  const size_t n = Ab.cols() - 1;
478  const size_t maxRank = min(m, n);
479 
480  // create storage for [R d]
481  typedef std::tuple<size_t, Matrix, double> Triple;
482  list<Triple> Rd;
483 
484  Matrix rd(1, n + 1); // and for row of R
485  Vector invsigmas = sigmas_.array().inverse();
486  Vector weights = invsigmas.array().square(); // calculate weights once
487 
488  // We loop over all columns, because the columns that can be eliminated
489  // are not necessarily contiguous. For each one, estimate the corresponding
490  // scalar variable x as d-rS, with S the separator (remaining columns).
491  // Then update A and b by substituting x with d-rS, zero-ing out x's column.
492  for (size_t j = 0; j < n; ++j) {
493  // extract the first column of A
494  Eigen::Block<Matrix> a = Ab.block(0, j, m, 1);
495 
496  // Check whether we need to handle as a constraint
497  std::optional<size_t> constraint_row = check_if_constraint(a, invsigmas, m);
498 
499  if (constraint_row) {
500  // Handle this as a constraint, as the i^th row has zero sigma with non-zero entry A(i,j)
501 
502  // In this case, the row in [R|d] is simply the row in [A|b]
503  // NOTE(frank): we used to divide by a[i] but there is no need with a constraint
504  rd = Ab.row(*constraint_row);
505 
506  // Construct solution (r, d, sigma)
507  Rd.push_back(std::make_tuple(j, rd, kInfinity));
508 
509  // exit after rank exhausted
510  if (Rd.size() >= maxRank)
511  break;
512 
513  // The constraint row will be zeroed out, so we can save work by swapping in the
514  // last valid row and decreasing m. This will save work on subsequent down-dates, too.
515  m -= 1;
516  if (*constraint_row != m) {
517  Ab.row(*constraint_row) = Ab.row(m);
518  weights(*constraint_row) = weights(m);
519  invsigmas(*constraint_row) = invsigmas(m);
520  }
521 
522  // get a reduced a-column which is now shorter
523  Eigen::Block<Matrix> a_reduced = Ab.block(0, j, m, 1);
524  a_reduced *= (1.0/rd(0, j)); // NOTE(frank): this is the 1/a[i] = 1/rd(0,j) factor we need!
525 
526  // Rank-1 down-date of Ab, expensive, using outer product
527  Ab.block(0, j + 1, m, n - j).noalias() -= a_reduced * rd.middleCols(j + 1, n - j);
528  } else {
529  // Treat in normal Gram-Schmidt way
530  // Calculate weighted pseudo-inverse and corresponding precision
531 
532  // Form psuedo-inverse inv(a'inv(Sigma)a)a'inv(Sigma)
533  // For diagonal Sigma, inv(Sigma) = diag(precisions)
534  double precision = 0;
535  Vector pseudo(m); // allocate storage for pseudo-inverse
536  for (size_t i = 0; i < m; i++) {
537  double ai = a(i, 0);
538  if (std::abs(ai) > 1e-9) { // also catches remaining sigma==0 rows
539  pseudo[i] = weights[i] * ai;
540  precision += pseudo[i] * ai;
541  } else
542  pseudo[i] = 0;
543  }
544 
545  if (precision > 1e-8) {
546  pseudo /= precision;
547 
548  // create solution [r d], rhs is automatically r(n)
549  rd(0, j) = 1.0; // put 1 on diagonal
550  rd.block(0, j + 1, 1, n - j) = pseudo.transpose() * Ab.block(0, j + 1, m, n - j);
551 
552  // construct solution (r, d, sigma)
553  Rd.push_back(std::make_tuple(j, rd, precision));
554  } else {
555  // If precision is zero, no information on this column
556  // This is actually not limited to constraints, could happen in Gaussian::QR
557  // In that case, we're probably hosed. TODO: make sure Householder is rank-revealing
558  continue; // but even if not, no need to update if a==zeros
559  }
560 
561  // exit after rank exhausted
562  if (Rd.size() >= maxRank)
563  break;
564 
565  // Rank-1 down-date of Ab, expensive, using outer product
566  Ab.block(0, j + 1, m, n - j).noalias() -= a * rd.middleCols(j + 1, n - j);
567  }
568  }
569 
570  // Create storage for precisions
571  Vector precisions(Rd.size());
572 
573  // Write back result in Ab, imperative as we are
574  size_t i = 0; // start with first row
575  bool mixed = false;
576  Ab.setZero(); // make sure we don't look below
577  for (const Triple& t: Rd) {
578  const size_t& j = std::get<0>(t);
579  const Matrix& rd = std::get<1>(t);
580  precisions(i) = std::get<2>(t);
581  if (std::isinf(precisions(i)))
582  mixed = true;
583  Ab.block(i, j, 1, n + 1 - j) = rd.block(0, j, 1, n + 1 - j);
584  i += 1;
585  }
586 
587  // Must include mu, as the defaults might be higher, resulting in non-convergence
589 }
590 
591 /* ************************************************************************* */
592 // Isotropic
593 /* ************************************************************************* */
594 Isotropic::shared_ptr Isotropic::Sigma(size_t dim, double sigma, bool smart) {
595  if (smart && std::abs(sigma-1.0)<1e-9) return Unit::Create(dim);
596  return shared_ptr(new Isotropic(dim, sigma));
597 }
598 
599 /* ************************************************************************* */
600 Isotropic::shared_ptr Isotropic::Variance(size_t dim, double variance, bool smart) {
601  if (smart && std::abs(variance-1.0)<1e-9) return Unit::Create(dim);
602  return shared_ptr(new Isotropic(dim, sqrt(variance)));
603 }
604 
605 /* ************************************************************************* */
606 void Isotropic::print(const string& name) const {
607  cout << "isotropic dim=" << dim() << " sigma=" << sigma_ << endl;
608 }
609 
610 /* ************************************************************************* */
612  return v.dot(v) * invsigma_ * invsigma_;
613 }
614 
615 /* ************************************************************************* */
617  return v * invsigma_;
618 }
619 
620 /* ************************************************************************* */
622  return v * sigma_;
623 }
624 
625 /* ************************************************************************* */
627  return invsigma_ * H;
628 }
629 
630 /* ************************************************************************* */
632  H *= invsigma_;
633 }
634 
635 /* ************************************************************************* */
637  v *= invsigma_;
638 }
639 
640 /* ************************************************************************* */
642  H *= invsigma_;
643 }
644 
645 /* ************************************************************************* */
646 // Unit
647 /* ************************************************************************* */
648 void Unit::print(const std::string& name) const {
649  cout << name << "unit (" << dim_ << ") " << endl;
650 }
651 
652 /* ************************************************************************* */
654  return v.dot(v);
655 }
656 
657 /* ************************************************************************* */
658 // Robust
659 /* ************************************************************************* */
660 
661 void Robust::print(const std::string& name) const {
662  robust_->print(name);
663  noise_->print(name);
664 }
665 
666 bool Robust::equals(const Base& expected, double tol) const {
667  const Robust* p = dynamic_cast<const Robust*> (&expected);
668  if (p == nullptr) return false;
669  return noise_->equals(*p->noise_,tol) && robust_->equals(*p->robust_,tol);
670 }
671 
673  noise_->whitenInPlace(b);
674  robust_->reweight(b);
675 }
676 
677 void Robust::WhitenSystem(vector<Matrix>& A, Vector& b) const {
678  noise_->WhitenSystem(A,b);
679  robust_->reweight(A,b);
680 }
681 
683  noise_->WhitenSystem(A,b);
684  robust_->reweight(A,b);
685 }
686 
688  noise_->WhitenSystem(A1,A2,b);
689  robust_->reweight(A1,A2,b);
690 }
691 
692 void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{
693  noise_->WhitenSystem(A1,A2,A3,b);
694  robust_->reweight(A1,A2,A3,b);
695 }
696 
698  return noise_->unweightedWhiten(v);
699 }
700 double Robust::weight(const Vector& v) const {
701  return robust_->weight(v.norm());
702 }
703 
705 const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){
706  return shared_ptr(new Robust(robust,noise));
707 }
708 
709 /* ************************************************************************* */
710 
711 }
712 } // gtsam
void print(const Matrix &A, const string &s, ostream &stream)
Definition: Matrix.cpp:155
Vector unweightedWhiten(const Vector &v) const override
Definition: NoiseModel.cpp:697
Matrix3f m
static Matrix A1
Vector mu_
Penalty function weight - needs to be large enough to dominate soft constraints.
Definition: NoiseModel.h:394
#define I
Definition: main.h:112
void print(const std::string &name) const override
Definition: NoiseModel.cpp:661
Matrix< RealScalar, Dynamic, Dynamic > M
Definition: bench_gemm.cpp:51
std::optional< size_t > check_if_constraint(VECTOR a, const Vector &invsigmas, size_t m)
Definition: NoiseModel.cpp:455
Vector backSubstituteUpper(const Matrix &U, const Vector &b, bool unit)
Definition: Matrix.cpp:375
void inplace_QR(Matrix &A)
Definition: Matrix.cpp:636
Scalar * b
Definition: benchVecAdd.cpp:17
void print(const std::string &name) const override
Definition: NoiseModel.cpp:606
tuple make_tuple()
Definition: cast.h:1209
#define min(a, b)
Definition: datatypes.h:19
const Vector & invsigmas() const
Definition: NoiseModel.h:347
Matrix expected
Definition: testMatrix.cpp:971
std::shared_ptr< Base > shared_ptr
Definition: LossFunctions.h:70
void updateAb(MATRIX &Ab, int j, const Vector &a, const Vector &rd)
Definition: NoiseModel.cpp:40
Definition: numpy.h:680
double squaredMahalanobisDistance(const Vector &v) const override
Definition: NoiseModel.cpp:407
int n
void diagonal(const MatrixType &m)
Definition: diagonal.cpp:12
Eigen::MatrixXd Matrix
Definition: base/Matrix.h:39
Scalar Scalar * c
Definition: benchVecAdd.cpp:17
Rot2 R(Rot2::fromAngle(0.1))
Vector whiten(const Vector &v) const override
Calculates error vector with weights applied.
Definition: NoiseModel.cpp:364
Definition: BFloat16.h:88
EIGEN_STRONG_INLINE Packet4f print(const Packet4f &a)
Matrix Whiten(const Matrix &H) const override
Definition: NoiseModel.cpp:416
#define isfinite(X)
Definition: main.h:95
Matrix< SCALARA, Dynamic, Dynamic, opt_A > A
Definition: bench_gemm.cpp:48
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate set rrange [*:*] noreverse nowriteback set trange [*:*] noreverse nowriteback set urange [*:*] noreverse nowriteback set vrange [*:*] noreverse nowriteback set xlabel matrix size set x2label set timefmt d m y n H
void WhitenInPlace(Matrix &H) const override
Definition: NoiseModel.cpp:425
#define isinf(X)
Definition: main.h:94
static shared_ptr Create(const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise)
Definition: NoiseModel.cpp:704
EIGEN_DEVICE_FUNC const InverseReturnType inverse() const
ptrdiff_t DenseIndex
The index type for Eigen objects.
Definition: types.h:108
Matrix vector_scale(const Vector &v, const Matrix &A, bool inf_mask)
Definition: Matrix.cpp:496
static constexpr bool debug
static shared_ptr MixedPrecisions(const Vector &mu, const Vector &precisions)
Definition: NoiseModel.cpp:398
static shared_ptr Create(size_t dim)
Definition: NoiseModel.h:610
virtual void WhitenSystem(Vector &b) const
Definition: NoiseModel.cpp:672
void WhitenInPlace(Matrix &H) const override
Definition: NoiseModel.cpp:631
double squaredMahalanobisDistance(const Vector &v) const override
Squared Mahalanobis distance v&#39;*R&#39;*R*v = <R*v,R*v>
Definition: NoiseModel.cpp:611
static shared_ptr Variance(size_t dim, double variance, bool smart=true)
Definition: NoiseModel.cpp:600
void WhitenInPlace(Matrix &H) const override
Definition: NoiseModel.cpp:309
std::shared_ptr< Constrained > shared_ptr
Definition: NoiseModel.h:405
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
double sigma(size_t i) const
Definition: NoiseModel.h:342
#define gttic(label)
Definition: timing.h:295
EIGEN_DONT_INLINE void llt(const Mat &A, const Mat &B, Mat &C)
Definition: llt.cpp:5
static shared_ptr MixedSigmas(const Vector &mu, const Vector &sigmas)
Definition: NoiseModel.cpp:346
EIGEN_DEVICE_FUNC const SquareReturnType square() const
const RobustModel::shared_ptr robust_
robust error function used
Definition: NoiseModel.h:665
Eigen::VectorXd Vector
Definition: Vector.h:38
void whitenInPlace(Vector &v) const override
Definition: NoiseModel.cpp:636
const Vector & precisions() const
Definition: NoiseModel.h:353
Vector sigmas() const override
Calculate standard deviations.
Definition: NoiseModel.h:332
bool constrained(size_t i) const
Return true if a particular dimension is free or constrained.
Definition: NoiseModel.cpp:352
const NoiseModel::shared_ptr noise_
noise model used
Definition: NoiseModel.h:666
std::shared_ptr< Base > shared_ptr
Definition: NoiseModel.h:60
Standard Cholesky decomposition (LL^T) of a matrix and associated features.
Definition: LLT.h:66
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
Array< int, Dynamic, 1 > v
double weight(const Vector &v) const override
Definition: NoiseModel.cpp:700
static shared_ptr MixedVariances(const Vector &mu, const Vector &variances)
Definition: NoiseModel.cpp:390
Array< double, 1, 3 > e(1./3., 0.5, 2.)
std::shared_ptr< Isotropic > shared_ptr
Definition: NoiseModel.h:542
Vector unwhiten(const Vector &v) const override
Unwhiten an error vector.
Definition: NoiseModel.cpp:301
const G & b
Definition: Group.h:86
RowVector3d w
Vector whiten(const Vector &v) const override
Whiten an error vector.
Definition: NoiseModel.cpp:297
void print(const std::string &name) const override
Definition: NoiseModel.cpp:648
static shared_ptr Precisions(const Vector &precisions, bool smart=true)
Definition: NoiseModel.cpp:287
bool equal_with_abs_tol(const Eigen::DenseBase< MATRIX > &A, const Eigen::DenseBase< MATRIX > &B, double tol=1e-9)
Definition: base/Matrix.h:80
noiseModel::Diagonal::shared_ptr SharedDiagonal
Definition: NoiseModel.h:743
traits
Definition: chartTesting.h:28
Matrix Whiten(const Matrix &H) const override
Definition: NoiseModel.cpp:305
std::optional< Vector > checkIfDiagonal(const Matrix &M)
Definition: NoiseModel.cpp:47
Matrix Whiten(const Matrix &H) const override
Definition: NoiseModel.cpp:626
std::shared_ptr< Robust > shared_ptr
Definition: NoiseModel.h:659
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:103
static shared_ptr Sigmas(const Vector &sigmas, bool smart=true)
Definition: NoiseModel.cpp:270
void print(const std::string &name) const override
Definition: NoiseModel.cpp:292
Traits::MatrixU matrixU() const
Definition: LLT.h:128
Diagonal::shared_ptr QR(Matrix &Ab) const override
Definition: NoiseModel.cpp:472
const Vector & mu() const
Access mu as a vector.
Definition: NoiseModel.h:424
float * p
static noiseModel::Diagonal::shared_ptr Diagonal(const Matrix &covariance)
bool equals(const Base &expected, double tol=1e-9) const override
Definition: NoiseModel.cpp:666
Jet< T, N > sqrt(const Jet< T, N > &f)
Definition: jet.h:418
Annotation for function names.
Definition: attr.h:48
void vector_scale_inplace(const Vector &v, Matrix &A, bool inf_mask)
Definition: Matrix.cpp:480
const G double tol
Definition: Group.h:86
double precision(size_t i) const
Definition: NoiseModel.h:354
std::shared_ptr< Gaussian > shared_ptr
Definition: NoiseModel.h:189
void print(const std::string &name) const override
Definition: NoiseModel.cpp:358
std::shared_ptr< Diagonal > shared_ptr
Definition: NoiseModel.h:307
size_t dim() const
Dimensionality.
Definition: NoiseModel.h:79
#define abs(x)
Definition: datatypes.h:17
double squaredMahalanobisDistance(const Vector &v) const override
Squared Mahalanobis distance v&#39;*R&#39;*R*v = <R*v,R*v>
Definition: NoiseModel.cpp:653
Eigen::Matrix< double, Eigen::Dynamic, 1 > Vector
Vector unwhiten(const Vector &v) const override
Unwhiten an error vector.
Definition: NoiseModel.cpp:621
static shared_ptr Variances(const Vector &variances, bool smart=true)
Definition: NoiseModel.cpp:258
Constrained(const Vector &mu, const Vector &sigmas)
Definition: NoiseModel.cpp:340
Vector whiten(const Vector &v) const override
Whiten an error vector.
Definition: NoiseModel.cpp:616
static void fix(const Vector &sigmas, Vector &precisions, Vector &invsigmas)
Definition: NoiseModel.cpp:324
std::ptrdiff_t j
Timing utilities.
Point2 t(10, 10)
static shared_ptr Sigma(size_t dim, double sigma, bool smart=true)
Definition: NoiseModel.cpp:594
virtual Matrix R() const
Return R itself, but note that Whiten(H) is cheaper than R*H.
Definition: NoiseModel.h:261
v setZero(3)


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:34:57