NoiseModel.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
20 #include <gtsam/base/timing.h>
21 
22 #include <boost/format.hpp>
23 #include <boost/make_shared.hpp>
24 
25 #include <cmath>
26 #include <iostream>
27 #include <limits>
28 #include <stdexcept>
29 #include <typeinfo>
30 
31 using namespace std;
32 
33 namespace gtsam {
34 namespace noiseModel {
35 
36 /* ************************************************************************* */
37 // update A, b
38 // A' \define A_{S}-ar and b'\define b-ad
39 // Linear algebra: takes away projection on latest orthogonal
40 // Graph: make a new factor on the separator S
41 // __attribute__ ((noinline)) // uncomment to prevent inlining when profiling
42 template<class MATRIX>
43 void updateAb(MATRIX& Ab, int j, const Vector& a, const Vector& rd) {
44  size_t n = Ab.cols()-1;
45  Ab.middleCols(j+1,n-j) -= a * rd.segment(j+1, n-j).transpose();
46 }
47 
48 /* ************************************************************************* */
49 // check *above the diagonal* for non-zero entries
50 boost::optional<Vector> checkIfDiagonal(const Matrix M) {
51  size_t m = M.rows(), n = M.cols();
52  // check all non-diagonal entries
53  bool full = false;
54  size_t i, j;
55  for (i = 0; i < m; i++)
56  if (!full)
57  for (j = i + 1; j < n; j++)
58  if (std::abs(M(i, j)) > 1e-9) {
59  full = true;
60  break;
61  }
62  if (full) {
63  return boost::none;
64  } else {
65  Vector diagonal(n);
66  for (j = 0; j < n; j++)
67  diagonal(j) = M(j, j);
68  return diagonal;
69  }
70 }
71 
72 /* ************************************************************************* */
74  throw("Base::sigmas: sigmas() not implemented for this noise model");
75 }
76 
77 /* ************************************************************************* */
78 double Base::squaredMahalanobisDistance(const Vector& v) const {
79  // Note: for Diagonal, which does ediv_, will be correct for constraints
80  Vector w = whiten(v);
81  return w.dot(w);
82 }
83 
84 /* ************************************************************************* */
85 Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
86  size_t m = R.rows(), n = R.cols();
87  if (m != n)
88  throw invalid_argument("Gaussian::SqrtInformation: R not square");
89  if (smart) {
90  boost::optional<Vector> diagonal = checkIfDiagonal(R);
91  if (diagonal)
92  return Diagonal::Sigmas(diagonal->array().inverse(), true);
93  }
94  // NOTE(frank): only reaches here if !(smart && diagonal)
95  return shared_ptr(new Gaussian(R.rows(), R));
96 }
97 
98 /* ************************************************************************* */
99 Gaussian::shared_ptr Gaussian::Information(const Matrix& information, bool smart) {
100  size_t m = information.rows(), n = information.cols();
101  if (m != n)
102  throw invalid_argument("Gaussian::Information: R not square");
103  boost::optional<Vector> diagonal = boost::none;
104  if (smart)
105  diagonal = checkIfDiagonal(information);
106  if (diagonal)
107  return Diagonal::Precisions(*diagonal, true);
108  else {
109  Eigen::LLT<Matrix> llt(information);
110  Matrix R = llt.matrixU();
111  return shared_ptr(new Gaussian(n, R));
112  }
113 }
114 
115 /* ************************************************************************* */
116 Gaussian::shared_ptr Gaussian::Covariance(const Matrix& covariance,
117  bool smart) {
118  size_t m = covariance.rows(), n = covariance.cols();
119  if (m != n)
120  throw invalid_argument("Gaussian::Covariance: covariance not square");
121  boost::optional<Vector> variances = boost::none;
122  if (smart)
123  variances = checkIfDiagonal(covariance);
124  if (variances)
125  return Diagonal::Variances(*variances, true);
126  else {
127  // NOTE: if cov = L'*L, then the square root information R can be found by
128  // QR, as L.inverse() = Q*R, with Q some rotation matrix. However, R has
129  // annoying sign flips with respect the simpler Information(inv(cov)),
130  // hence we choose the simpler path here:
131  return Information(covariance.inverse(), false);
132  }
133 }
134 
135 /* ************************************************************************* */
136 void Gaussian::print(const string& name) const {
137  gtsam::print(thisR(), name + "Gaussian");
138 }
139 
140 /* ************************************************************************* */
141 bool Gaussian::equals(const Base& expected, double tol) const {
142  const Gaussian* p = dynamic_cast<const Gaussian*> (&expected);
143  if (p == nullptr) return false;
144  if (typeid(*this) != typeid(*p)) return false;
145  return equal_with_abs_tol(R(), p->R(), sqrt(tol));
146 }
147 
148 /* ************************************************************************* */
149 Matrix Gaussian::covariance() const {
150  // Uses a fast version of `covariance = information().inverse();`
151  const Matrix& R = this->R();
152  Matrix I = Matrix::Identity(R.rows(), R.cols());
153  // Fast inverse of upper-triangular matrix R using forward-substitution
154  Matrix Rinv = R.triangularView<Eigen::Upper>().solve(I);
155  // (R' * R)^{-1} = R^{-1} * R^{-1}'
156  return Rinv * Rinv.transpose();
157 }
158 
159 /* ************************************************************************* */
161  return Vector(covariance().diagonal()).cwiseSqrt();
162 }
163 
164 /* ************************************************************************* */
165 Vector Gaussian::whiten(const Vector& v) const {
166  return thisR() * v;
167 }
168 
169 /* ************************************************************************* */
170 Vector Gaussian::unwhiten(const Vector& v) const {
171  return backSubstituteUpper(thisR(), v);
172 }
173 
174 /* ************************************************************************* */
175 Matrix Gaussian::Whiten(const Matrix& H) const {
176  return thisR() * H;
177 }
178 
179 /* ************************************************************************* */
180 void Gaussian::WhitenInPlace(Matrix& H) const {
181  H = thisR() * H;
182 }
183 
184 /* ************************************************************************* */
185 void Gaussian::WhitenInPlace(Eigen::Block<Matrix> H) const {
186  H = thisR() * H;
187 }
188 
189 /* ************************************************************************* */
190 // General QR, see also special version in Constrained
191 SharedDiagonal Gaussian::QR(Matrix& Ab) const {
192 
193  gttic(Gaussian_noise_model_QR);
194 
195  static const bool debug = false;
196 
197  // get size(A) and maxRank
198  // TODO: really no rank problems ?
199  size_t m = Ab.rows(), n = Ab.cols()-1;
200  size_t maxRank = min(m,n);
201 
202  // pre-whiten everything (cheaply if possible)
203  WhitenInPlace(Ab);
204 
205  if(debug) gtsam::print(Ab, "Whitened Ab: ");
206 
207  // Eigen QR - much faster than older householder approach
208  inplace_QR(Ab);
209  Ab.triangularView<Eigen::StrictlyLower>().setZero();
210 
211  // hand-coded householder implementation
212  // TODO: necessary to isolate last column?
213  // householder(Ab, maxRank);
214 
215  return noiseModel::Unit::Create(maxRank);
216 }
217 
218 void Gaussian::WhitenSystem(vector<Matrix>& A, Vector& b) const {
219  for(Matrix& Aj: A) { WhitenInPlace(Aj); }
220  whitenInPlace(b);
221 }
222 
223 void Gaussian::WhitenSystem(Matrix& A, Vector& b) const {
224  WhitenInPlace(A);
225  whitenInPlace(b);
226 }
227 
228 void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const {
229  WhitenInPlace(A1);
230  WhitenInPlace(A2);
231  whitenInPlace(b);
232 }
233 
234 void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{
235  WhitenInPlace(A1);
236  WhitenInPlace(A2);
237  WhitenInPlace(A3);
238  whitenInPlace(b);
239 }
240 
241 /* ************************************************************************* */
242 // Diagonal
243 /* ************************************************************************* */
245  Gaussian(1) // TODO: Frank asks: really sure about this?
246 {
247 }
248 
249 /* ************************************************************************* */
251  : Gaussian(sigmas.size()),
252  sigmas_(sigmas),
253  invsigmas_(sigmas.array().inverse()),
255 }
256 
257 /* ************************************************************************* */
258 Diagonal::shared_ptr Diagonal::Variances(const Vector& variances, bool smart) {
259  if (smart) {
260  // check whether all the same entry
261  size_t n = variances.size();
262  for (size_t j = 1; j < n; j++)
263  if (variances(j) != variances(0)) goto full;
264  return Isotropic::Variance(n, variances(0), true);
265  }
266  full: return shared_ptr(new Diagonal(variances.cwiseSqrt()));
267 }
268 
269 /* ************************************************************************* */
271  if (smart) {
272  size_t n = sigmas.size();
273  if (n==0) goto full;
274  // look for zeros to make a constraint
275  for (size_t j=0; j< n; ++j)
276  if (sigmas(j)<1e-8)
277  return Constrained::MixedSigmas(sigmas);
278  // check whether all the same entry
279  for (size_t j = 1; j < n; j++)
280  if (sigmas(j) != sigmas(0)) goto full;
281  return Isotropic::Sigma(n, sigmas(0), true);
282  }
283  full: return Diagonal::shared_ptr(new Diagonal(sigmas));
284 }
285 
286 /* ************************************************************************* */
287 void Diagonal::print(const string& name) const {
288  gtsam::print(sigmas_, name + "diagonal sigmas");
289 }
290 
291 /* ************************************************************************* */
293  return v.cwiseProduct(invsigmas_);
294 }
295 
296 /* ************************************************************************* */
298  return v.cwiseProduct(sigmas_);
299 }
300 
301 /* ************************************************************************* */
303  return vector_scale(invsigmas(), H);
304 }
305 
306 /* ************************************************************************* */
309 }
310 
311 /* ************************************************************************* */
313  H = invsigmas().asDiagonal() * H;
314 }
315 
316 /* ************************************************************************* */
317 // Constrained
318 /* ************************************************************************* */
319 
320 namespace internal {
321 // switch precisions and invsigmas to finite value
322 // TODO: why?? And, why not just ask s==0.0 below ?
323 static void fix(const Vector& sigmas, Vector& precisions, Vector& invsigmas) {
324  for (Vector::Index i = 0; i < sigmas.size(); ++i)
325  if (!std::isfinite(1. / sigmas[i])) {
326  precisions[i] = 0.0;
327  invsigmas[i] = 0.0;
328  }
329 }
330 }
331 
332 /* ************************************************************************* */
334  : Diagonal(sigmas), mu_(Vector::Constant(sigmas.size(), 1000.0)) {
336 }
337 
338 /* ************************************************************************* */
340  : Diagonal(sigmas), mu_(mu) {
342 }
343 
344 /* ************************************************************************* */
346  const Vector& sigmas) {
347  return shared_ptr(new Constrained(mu, sigmas));
348 }
349 
350 /* ************************************************************************* */
351 bool Constrained::constrained(size_t i) const {
352  // TODO why not just check sigmas_[i]==0.0 ?
353  return !std::isfinite(1./sigmas_[i]);
354 }
355 
356 /* ************************************************************************* */
357 void Constrained::print(const std::string& name) const {
358  gtsam::print(sigmas_, name + "constrained sigmas");
359  gtsam::print(mu_, name + "constrained mu");
360 }
361 
362 /* ************************************************************************* */
364  // If sigmas[i] is not 0 then divide v[i] by sigmas[i], as usually done in
365  // other normal Gaussian noise model. Otherwise, sigmas[i] = 0 indicating
366  // a hard constraint, we don't do anything.
367  const Vector& a = v;
368  const Vector& b = sigmas_;
369  size_t n = a.size();
370  assert (b.size()==a.size());
371  Vector c(n);
372  for( size_t i = 0; i < n; i++ ) {
373  const double& ai = a(i), bi = b(i);
374  c(i) = (bi==0.0) ? ai : ai/bi; // NOTE: not ediv_()
375  }
376  return c;
377 }
378 
379 /* ************************************************************************* */
381  Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
382  for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
383  if (constrained(i)) // whiten makes constrained variables zero
384  w[i] = v[i] * sqrt(mu_[i]); // TODO: may want to store sqrt rather than rebuild
385  return w.dot(w);
386 }
387 
388 /* ************************************************************************* */
390  Matrix A = H;
391  for (DenseIndex i=0; i<(DenseIndex)dim_; ++i)
392  if (!constrained(i)) // if constrained, leave row of A as is
393  A.row(i) *= invsigmas_(i);
394  return A;
395 }
396 
397 /* ************************************************************************* */
399  for (DenseIndex i=0; i<(DenseIndex)dim_; ++i)
400  if (!constrained(i)) // if constrained, leave row of H as is
401  H.row(i) *= invsigmas_(i);
402 }
403 
404 /* ************************************************************************* */
406  for (DenseIndex i=0; i<(DenseIndex)dim_; ++i)
407  if (!constrained(i)) // if constrained, leave row of H as is
408  H.row(i) *= invsigmas_(i);
409 }
410 
411 /* ************************************************************************* */
413  Vector sigmas = Vector::Ones(dim());
414  for (size_t i=0; i<dim(); ++i)
415  if (constrained(i))
416  sigmas(i) = 0.0;
417  return MixedSigmas(mu_, sigmas);
418 }
419 
420 /* ************************************************************************* */
421 // Special version of QR for Constrained calls slower but smarter code
422 // that deals with possibly zero sigmas
423 // It is Gram-Schmidt orthogonalization rather than Householder
424 
425 // Check whether column a triggers a constraint and corresponding variable is deterministic
426 // Return constraint_row with maximum element in case variable plays in multiple constraints
427 template <typename VECTOR>
428 boost::optional<size_t> check_if_constraint(VECTOR a, const Vector& invsigmas, size_t m) {
429  boost::optional<size_t> constraint_row;
430  // not zero, so roundoff errors will not be counted
431  // TODO(frank): that's a fairly crude way of dealing with roundoff errors :-(
432  double max_element = 1e-9;
433  for (size_t i = 0; i < m; i++) {
434  if (!std::isinf(invsigmas[i]))
435  continue;
436  double abs_ai = std::abs(a(i,0));
437  if (abs_ai > max_element) {
438  max_element = abs_ai;
439  constraint_row.reset(i);
440  }
441  }
442  return constraint_row;
443 }
444 
446  static const double kInfinity = std::numeric_limits<double>::infinity();
447 
448  // get size(A) and maxRank
449  size_t m = Ab.rows();
450  const size_t n = Ab.cols() - 1;
451  const size_t maxRank = min(m, n);
452 
453  // create storage for [R d]
454  typedef boost::tuple<size_t, Matrix, double> Triple;
455  list<Triple> Rd;
456 
457  Matrix rd(1, n + 1); // and for row of R
458  Vector invsigmas = sigmas_.array().inverse();
459  Vector weights = invsigmas.array().square(); // calculate weights once
460 
461  // We loop over all columns, because the columns that can be eliminated
462  // are not necessarily contiguous. For each one, estimate the corresponding
463  // scalar variable x as d-rS, with S the separator (remaining columns).
464  // Then update A and b by substituting x with d-rS, zero-ing out x's column.
465  for (size_t j = 0; j < n; ++j) {
466  // extract the first column of A
467  Eigen::Block<Matrix> a = Ab.block(0, j, m, 1);
468 
469  // Check whether we need to handle as a constraint
470  boost::optional<size_t> constraint_row = check_if_constraint(a, invsigmas, m);
471 
472  if (constraint_row) {
473  // Handle this as a constraint, as the i^th row has zero sigma with non-zero entry A(i,j)
474 
475  // In this case, the row in [R|d] is simply the row in [A|b]
476  // NOTE(frank): we used to divide by a[i] but there is no need with a constraint
477  rd = Ab.row(*constraint_row);
478 
479  // Construct solution (r, d, sigma)
480  Rd.push_back(boost::make_tuple(j, rd, kInfinity));
481 
482  // exit after rank exhausted
483  if (Rd.size() >= maxRank)
484  break;
485 
486  // The constraint row will be zeroed out, so we can save work by swapping in the
487  // last valid row and decreasing m. This will save work on subsequent down-dates, too.
488  m -= 1;
489  if (*constraint_row != m) {
490  Ab.row(*constraint_row) = Ab.row(m);
491  weights(*constraint_row) = weights(m);
492  invsigmas(*constraint_row) = invsigmas(m);
493  }
494 
495  // get a reduced a-column which is now shorter
496  Eigen::Block<Matrix> a_reduced = Ab.block(0, j, m, 1);
497  a_reduced *= (1.0/rd(0, j)); // NOTE(frank): this is the 1/a[i] = 1/rd(0,j) factor we need!
498 
499  // Rank-1 down-date of Ab, expensive, using outer product
500  Ab.block(0, j + 1, m, n - j).noalias() -= a_reduced * rd.middleCols(j + 1, n - j);
501  } else {
502  // Treat in normal Gram-Schmidt way
503  // Calculate weighted pseudo-inverse and corresponding precision
504 
505  // Form psuedo-inverse inv(a'inv(Sigma)a)a'inv(Sigma)
506  // For diagonal Sigma, inv(Sigma) = diag(precisions)
507  double precision = 0;
508  Vector pseudo(m); // allocate storage for pseudo-inverse
509  for (size_t i = 0; i < m; i++) {
510  double ai = a(i, 0);
511  if (std::abs(ai) > 1e-9) { // also catches remaining sigma==0 rows
512  pseudo[i] = weights[i] * ai;
513  precision += pseudo[i] * ai;
514  } else
515  pseudo[i] = 0;
516  }
517 
518  if (precision > 1e-8) {
519  pseudo /= precision;
520 
521  // create solution [r d], rhs is automatically r(n)
522  rd(0, j) = 1.0; // put 1 on diagonal
523  rd.block(0, j + 1, 1, n - j) = pseudo.transpose() * Ab.block(0, j + 1, m, n - j);
524 
525  // construct solution (r, d, sigma)
526  Rd.push_back(boost::make_tuple(j, rd, precision));
527  } else {
528  // If precision is zero, no information on this column
529  // This is actually not limited to constraints, could happen in Gaussian::QR
530  // In that case, we're probably hosed. TODO: make sure Householder is rank-revealing
531  continue; // but even if not, no need to update if a==zeros
532  }
533 
534  // exit after rank exhausted
535  if (Rd.size() >= maxRank)
536  break;
537 
538  // Rank-1 down-date of Ab, expensive, using outer product
539  Ab.block(0, j + 1, m, n - j).noalias() -= a * rd.middleCols(j + 1, n - j);
540  }
541  }
542 
543  // Create storage for precisions
544  Vector precisions(Rd.size());
545 
546  // Write back result in Ab, imperative as we are
547  size_t i = 0; // start with first row
548  bool mixed = false;
549  Ab.setZero(); // make sure we don't look below
550  for (const Triple& t: Rd) {
551  const size_t& j = t.get<0>();
552  const Matrix& rd = t.get<1>();
553  precisions(i) = t.get<2>();
554  if (std::isinf(precisions(i)))
555  mixed = true;
556  Ab.block(i, j, 1, n + 1 - j) = rd.block(0, j, 1, n + 1 - j);
557  i += 1;
558  }
559 
560  // Must include mu, as the defaults might be higher, resulting in non-convergence
562 }
563 
564 /* ************************************************************************* */
565 // Isotropic
566 /* ************************************************************************* */
567 Isotropic::shared_ptr Isotropic::Sigma(size_t dim, double sigma, bool smart) {
568  if (smart && std::abs(sigma-1.0)<1e-9) return Unit::Create(dim);
569  return shared_ptr(new Isotropic(dim, sigma));
570 }
571 
572 /* ************************************************************************* */
573 Isotropic::shared_ptr Isotropic::Variance(size_t dim, double variance, bool smart) {
574  if (smart && std::abs(variance-1.0)<1e-9) return Unit::Create(dim);
575  return shared_ptr(new Isotropic(dim, sqrt(variance)));
576 }
577 
578 /* ************************************************************************* */
579 void Isotropic::print(const string& name) const {
580  cout << boost::format("isotropic dim=%1% sigma=%2%") % dim() % sigma_ << endl;
581 }
582 
583 /* ************************************************************************* */
585  return v.dot(v) * invsigma_ * invsigma_;
586 }
587 
588 /* ************************************************************************* */
590  return v * invsigma_;
591 }
592 
593 /* ************************************************************************* */
595  return v * sigma_;
596 }
597 
598 /* ************************************************************************* */
600  return invsigma_ * H;
601 }
602 
603 /* ************************************************************************* */
605  H *= invsigma_;
606 }
607 
608 /* ************************************************************************* */
610  v *= invsigma_;
611 }
612 
613 /* ************************************************************************* */
615  H *= invsigma_;
616 }
617 
618 /* ************************************************************************* */
619 // Unit
620 /* ************************************************************************* */
621 void Unit::print(const std::string& name) const {
622  cout << name << "unit (" << dim_ << ") " << endl;
623 }
624 
625 /* ************************************************************************* */
626 // Robust
627 /* ************************************************************************* */
628 
629 void Robust::print(const std::string& name) const {
630  robust_->print(name);
631  noise_->print(name);
632 }
633 
634 bool Robust::equals(const Base& expected, double tol) const {
635  const Robust* p = dynamic_cast<const Robust*> (&expected);
636  if (p == nullptr) return false;
637  return noise_->equals(*p->noise_,tol) && robust_->equals(*p->robust_,tol);
638 }
639 
641  noise_->whitenInPlace(b);
642  robust_->reweight(b);
643 }
644 
645 void Robust::WhitenSystem(vector<Matrix>& A, Vector& b) const {
646  noise_->WhitenSystem(A,b);
647  robust_->reweight(A,b);
648 }
649 
651  noise_->WhitenSystem(A,b);
652  robust_->reweight(A,b);
653 }
654 
655 void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const {
656  noise_->WhitenSystem(A1,A2,b);
657  robust_->reweight(A1,A2,b);
658 }
659 
660 void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{
661  noise_->WhitenSystem(A1,A2,A3,b);
662  robust_->reweight(A1,A2,A3,b);
663 }
664 
666 const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){
667  return shared_ptr(new Robust(robust,noise));
668 }
669 
670 /* ************************************************************************* */
671 
672 }
673 } // gtsam
void print(const Matrix &A, const string &s, ostream &stream)
Definition: Matrix.cpp:155
Matrix3f m
void inplace_QR(Matrix &A)
Definition: Matrix.cpp:635
Vector mu_
Penalty function weight - needs to be large enough to dominate soft constraints.
Definition: NoiseModel.h:388
void print(const std::string &name) const override
Definition: NoiseModel.cpp:629
Constrained(const Vector &sigmas=Z_1x1)
Definition: NoiseModel.cpp:333
Matrix< RealScalar, Dynamic, Dynamic > M
Definition: bench_gemm.cpp:38
const mpreal ai(const mpreal &x, mp_rnd_t r=mpreal::get_default_rnd())
Definition: mpreal.h:2467
Scalar * b
Definition: benchVecAdd.cpp:17
void print(const std::string &name) const override
Definition: NoiseModel.cpp:579
#define min(a, b)
Definition: datatypes.h:19
Matrix expected
Definition: testMatrix.cpp:974
boost::shared_ptr< Robust > shared_ptr
Definition: NoiseModel.h:655
void updateAb(MATRIX &Ab, int j, const Vector &a, const Vector &rd)
Definition: NoiseModel.cpp:43
ArrayXcf v
Definition: Cwise_arg.cpp:1
Traits::MatrixU matrixU() const
Definition: LLT.h:119
void vector_scale_inplace(const Vector &v, Matrix &A, bool inf_mask)
Definition: Matrix.cpp:481
Definition: numpy.h:543
double squaredMahalanobisDistance(const Vector &v) const override
Squared Mahalanobis distance v&#39;*R&#39;*R*v = <R*v,R*v>
Definition: NoiseModel.cpp:380
int n
void diagonal(const MatrixType &m)
Definition: diagonal.cpp:12
Eigen::MatrixXd Matrix
Definition: base/Matrix.h:43
Scalar Scalar * c
Definition: benchVecAdd.cpp:17
Rot2 R(Rot2::fromAngle(0.1))
Vector whiten(const Vector &v) const override
Calculates error vector with weights applied.
Definition: NoiseModel.cpp:363
EIGEN_DEVICE_FUNC const SqrtReturnType sqrt() const
virtual Matrix R() const
Return R itself, but note that Whiten(H) is cheaper than R*H.
Definition: NoiseModel.h:256
Definition: Half.h:150
Matrix Whiten(const Matrix &H) const override
Definition: NoiseModel.cpp:389
#define isfinite(X)
Definition: main.h:74
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate set rrange[*:*] noreverse nowriteback set trange[*:*] noreverse nowriteback set urange[*:*] noreverse nowriteback set vrange[*:*] noreverse nowriteback set xlabel matrix size set x2label set timefmt d m y n H
void WhitenInPlace(Matrix &H) const override
Definition: NoiseModel.cpp:398
#define isinf(X)
Definition: main.h:73
static shared_ptr Precisions(const Vector &precisions, bool smart=true)
Definition: NoiseModel.h:323
static shared_ptr Create(const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise)
Definition: NoiseModel.cpp:665
ptrdiff_t DenseIndex
The index type for Eigen objects.
Definition: types.h:67
Matrix vector_scale(const Vector &v, const Matrix &A, bool inf_mask)
Definition: Matrix.cpp:497
static shared_ptr Create(size_t dim)
Definition: NoiseModel.h:608
Array33i a
void WhitenInPlace(Matrix &H) const override
Definition: NoiseModel.cpp:604
double squaredMahalanobisDistance(const Vector &v) const override
Squared Mahalanobis distance v&#39;*R&#39;*R*v = <R*v,R*v>
Definition: NoiseModel.cpp:584
static shared_ptr Variance(size_t dim, double variance, bool smart=true)
Definition: NoiseModel.cpp:573
void WhitenInPlace(Matrix &H) const override
Definition: NoiseModel.cpp:307
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
#define gttic(label)
Definition: timing.h:280
static shared_ptr MixedSigmas(const Vector &mu, const Vector &sigmas)
Definition: NoiseModel.cpp:345
const RobustModel::shared_ptr robust_
robust error function used
Definition: NoiseModel.h:661
Eigen::VectorXd Vector
Definition: Vector.h:38
Tuple< Args... > make_tuple(Args...args)
Creates a tuple object, deducing the target type from the types of arguments.
void whitenInPlace(Vector &v) const override
Definition: NoiseModel.cpp:609
boost::optional< size_t > check_if_constraint(VECTOR a, const Vector &invsigmas, size_t m)
Definition: NoiseModel.cpp:428
static shared_ptr MixedPrecisions(const Vector &mu, const Vector &precisions)
Definition: NoiseModel.h:457
Vector sigmas() const override
Calculate standard deviations.
Definition: NoiseModel.h:328
shared_ptr unit() const
Definition: NoiseModel.cpp:412
const NoiseModel::shared_ptr noise_
noise model used
Definition: NoiseModel.h:662
Standard Cholesky decomposition (LL^T) of a matrix and associated features.
Definition: LLT.h:56
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
static bool debug
Array< double, 1, 3 > e(1./3., 0.5, 2.)
boost::shared_ptr< Base > shared_ptr
Definition: LossFunctions.h:62
static const Matrix I
Definition: lago.cpp:35
Vector unwhiten(const Vector &v) const override
Unwhiten an error vector.
Definition: NoiseModel.cpp:297
boost::shared_ptr< Diagonal > shared_ptr
Definition: NoiseModel.h:301
const G & b
Definition: Group.h:83
RowVector3d w
Vector whiten(const Vector &v) const override
Whiten an error vector.
Definition: NoiseModel.cpp:292
boost::shared_ptr< Constrained > shared_ptr
Definition: NoiseModel.h:407
void print(const std::string &name) const override
Definition: NoiseModel.cpp:621
boost::shared_ptr< Base > shared_ptr
Definition: NoiseModel.h:56
bool equal_with_abs_tol(const Eigen::DenseBase< MATRIX > &A, const Eigen::DenseBase< MATRIX > &B, double tol=1e-9)
Definition: base/Matrix.h:84
noiseModel::Diagonal::shared_ptr SharedDiagonal
Definition: NoiseModel.h:736
virtual void WhitenSystem(Vector &b) const
Definition: NoiseModel.cpp:640
traits
Definition: chartTesting.h:28
double sigma(size_t i) const
Definition: NoiseModel.h:338
Matrix Whiten(const Matrix &H) const override
Definition: NoiseModel.cpp:302
Matrix Whiten(const Matrix &H) const override
Definition: NoiseModel.cpp:599
bool constrained(size_t i) const
Return true if a particular dimension is free or constrained.
Definition: NoiseModel.cpp:351
size_t dim() const
Dimensionality.
Definition: NoiseModel.h:75
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:103
static shared_ptr Sigmas(const Vector &sigmas, bool smart=true)
Definition: NoiseModel.cpp:270
void print(const std::string &name) const override
Definition: NoiseModel.cpp:287
boost::optional< Vector > checkIfDiagonal(const Matrix M)
Definition: NoiseModel.cpp:50
Vector backSubstituteUpper(const Matrix &U, const Vector &b, bool unit)
Definition: Matrix.cpp:376
Diagonal::shared_ptr QR(Matrix &Ab) const override
Definition: NoiseModel.cpp:445
float * p
static noiseModel::Diagonal::shared_ptr Diagonal(const Matrix &covariance)
const Vector & precisions() const
Definition: NoiseModel.h:349
boost::shared_ptr< Isotropic > shared_ptr
Definition: NoiseModel.h:541
bool equals(const Base &expected, double tol=1e-9) const override
Definition: NoiseModel.cpp:634
boost::shared_ptr< Gaussian > shared_ptr
Definition: NoiseModel.h:189
const Vector & mu() const
Access mu as a vector.
Definition: NoiseModel.h:418
Annotation for function names.
Definition: attr.h:36
const G double tol
Definition: Group.h:83
const Vector & invsigmas() const
Definition: NoiseModel.h:343
void print(const std::string &name) const override
Definition: NoiseModel.cpp:357
#define abs(x)
Definition: datatypes.h:17
double precision(size_t i) const
Definition: NoiseModel.h:350
Eigen::Matrix< double, Eigen::Dynamic, 1 > Vector
Vector unwhiten(const Vector &v) const override
Unwhiten an error vector.
Definition: NoiseModel.cpp:594
static shared_ptr Variances(const Vector &variances, bool smart=true)
Definition: NoiseModel.cpp:258
EIGEN_DEVICE_FUNC const InverseReturnType inverse() const
Vector whiten(const Vector &v) const override
Whiten an error vector.
Definition: NoiseModel.cpp:589
static void fix(const Vector &sigmas, Vector &precisions, Vector &invsigmas)
Definition: NoiseModel.cpp:323
std::ptrdiff_t j
Timing utilities.
Point2 t(10, 10)
EIGEN_DEVICE_FUNC const SquareReturnType square() const
static shared_ptr Sigma(size_t dim, double sigma, bool smart=true)
Definition: NoiseModel.cpp:567
v setZero(3)


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:43:03