elaboratePoint2KalmanFilter.cpp
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
25 //#include <gtsam/nonlinear/Ordering.h>
26 #include <gtsam/inference/Symbol.h>
30 #include <gtsam/geometry/Point2.h>
31 
32 #include <cassert>
33 
34 using namespace std;
35 using namespace gtsam;
36 
37 int main() {
38 
39  // [code below basically does SRIF with Cholesky]
40 
41  // Create a factor graph to perform the inference
43 
44  // Create the desired ordering
46 
47  // Create a structure to hold the linearization points
48  Values linearizationPoints;
49 
50  // Ground truth example
51  // Start at origin, move to the right (x-axis): 0,0 0,1 0,2
52  // Motion model is just moving to the right (x'-x)^2
53  // Measurements are GPS like, (x-z)^2, where z is a 2D measurement
54  // i.e., we should get 0,0 0,1 0,2 if there is no noise
55 
56  // Create new state variable
57  Symbol x0('x',0);
58  ordering->insert(x0, 0);
59 
60  // Initialize state x0 (2D point) at origin by adding a prior factor, i.e., Bayes net P(x0)
61  // This is equivalent to x_0 and P_0
62  Point2 x_initial(0,0);
63  SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
64  PriorFactor<Point2> factor1(x0, x_initial, P_initial);
65  // Linearize the factor and add it to the linear factor graph
66  linearizationPoints.insert(x0, x_initial);
67  linearFactorGraph->push_back(factor1.linearize(linearizationPoints, *ordering));
68 
69 
70  // Now predict the state at t=1, i.e. argmax_{x1} P(x1) = P(x1|x0) P(x0)
71  // In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
72  // For the Kalman Filter, this requires a motion model, f(x_{t}) = x_{t+1|t)
73  // Assuming the system is linear, this will be of the form f(x_{t}) = F*x_{t} + B*u_{t} + w
74  // where F is the state transition model/matrix, B is the control input model,
75  // and w is zero-mean, Gaussian white noise with covariance Q
76  // Note, in some models, Q is actually derived as G*w*G^T where w models uncertainty of some
77  // physical property, such as velocity or acceleration, and G is derived from physics
78  //
79  // For the purposes of this example, let us assume we are using a constant-position model and
80  // the controls are driving the point to the right at 1 m/s. Then, F = [1 0 ; 0 1], B = [1 0 ; 0 1]
81  // and u = [1 ; 0]. Let us also assume that the process noise Q = [0.1 0 ; 0 0.1];
82  //
83  // In the case of factor graphs, the factor related to the motion model would be defined as
84  // f2 = (f(x_{t}) - x_{t+1}) * Q^-1 * (f(x_{t}) - x_{t+1})^T
85  // Conveniently, there is a factor type, called a BetweenFactor, that can generate this factor
86  // given the expected difference, f(x_{t}) - x_{t+1}, and Q.
87  // so, difference = x_{t+1} - x_{t} = F*x_{t} + B*u_{t} - I*x_{t}
88  // = (F - I)*x_{t} + B*u_{t}
89  // = B*u_{t} (for our example)
90  Symbol x1('x',1);
91  ordering->insert(x1, 1);
92 
93  Point2 difference(1,0);
94  SharedDiagonal Q = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
95  BetweenFactor<Point2> factor2(x0, x1, difference, Q);
96  // Linearize the factor and add it to the linear factor graph
97  linearizationPoints.insert(x1, x_initial);
98  linearFactorGraph->push_back(factor2.linearize(linearizationPoints, *ordering));
99 
100  // We have now made the small factor graph f1-(x0)-f2-(x1)
101  // where factor f1 is just the prior from time t0, P(x0)
102  // and factor f2 is from the motion model
103  // Eliminate this in order x0, x1, to get Bayes net P(x0|x1)P(x1)
104  // As this is a filter, all we need is the posterior P(x1), so we just keep the root of the Bayes net
105  //
106  // Because of the way GTSAM works internally, we have used nonlinear class even though this example
107  // system is linear. We first convert the nonlinear factor graph into a linear one, using the specified
108  // ordering. Linear factors are simply numbered, and are not accessible via named key like the nonlinear
109  // variables. Also, the nonlinear factors are linearized around an initial estimate. For a true linear
110  // system, the initial estimate is not important.
111 
112  // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
113  GaussianSequentialSolver solver0(*linearFactorGraph);
114  GaussianBayesNet::shared_ptr linearBayesNet = solver0.eliminate();
115 
116  // Extract the current estimate of x1,P1 from the Bayes Network
117  VectorValues result = optimize(*linearBayesNet);
118  Point2 x1_predict = linearizationPoints.at<Point2>(x1).retract(result[ordering->at(x1)]);
119  x1_predict.print("X1 Predict");
120 
121  // Update the new linearization point to the new estimate
122  linearizationPoints.update(x1, x1_predict);
123 
124 
125 
126  // Create a new, empty graph and add the prior from the previous step
127  linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
128 
129  // Convert the root conditional, P(x1) in this case, into a Prior for the next step
130  // Some care must be done here, as the linearization point in future steps will be different
131  // than what was used when the factor was created.
132  // f = || F*dx1' - (F*x0 - x1) ||^2, originally linearized at x1 = x0
133  // After this step, the factor needs to be linearized around x1 = x1_predict
134  // This changes the factor to f = || F*dx1'' - b'' ||^2
135  // = || F*(dx1' - (dx1' - dx1'')) - b'' ||^2
136  // = || F*dx1' - F*(dx1' - dx1'') - b'' ||^2
137  // = || F*dx1' - (b'' + F(dx1' - dx1'')) ||^2
138  // -> b' = b'' + F(dx1' - dx1'')
139  // -> b'' = b' - F(dx1' - dx1'')
140  // = || F*dx1'' - (b' - F(dx1' - dx1'')) ||^2
141  // = || F*dx1'' - (b' - F(x_predict - x_inital)) ||^2
142  const GaussianConditional::shared_ptr& cg0 = linearBayesNet->back();
143  assert(cg0->nrFrontals() == 1);
144  assert(cg0->nrParents() == 0);
145  linearFactorGraph->add(0, cg0->R(), cg0->d() - cg0->R()*result[ordering->at(x1)], noiseModel::Diagonal::Sigmas(cg0->get_sigmas(), true));
146 
147  // Create the desired ordering
149  ordering->insert(x1, 0);
150 
151  // Now, a measurement, z1, has been received, and the Kalman Filter should be "Updated"/"Corrected"
152  // This is equivalent to saying P(x1|z1) ~ P(z1|x1)*P(x1) ~ f3(x1)*f4(x1;z1)
153  // where f3 is the prior from the previous step, and
154  // where f4 is a measurement factor
155  //
156  // So, now we need to create the measurement factor, f4
157  // For the Kalman Filter, this is the measurement function, h(x_{t}) = z_{t}
158  // Assuming the system is linear, this will be of the form h(x_{t}) = H*x_{t} + v
159  // where H is the observation model/matrix, and v is zero-mean, Gaussian white noise with covariance R
160  //
161  // For the purposes of this example, let us assume we have something like a GPS that returns
162  // the current position of the robot. For this simple example, we can use a PriorFactor to model the
163  // observation as it depends on only a single state variable, x1. To model real sensor observations
164  // generally requires the creation of a new factor type. For example, factors for range sensors, bearing
165  // sensors, and camera projections have already been added to GTSAM.
166  //
167  // In the case of factor graphs, the factor related to the measurements would be defined as
168  // f4 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T
169  // = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T
170  // This can be modeled using the PriorFactor, where the mean is z_{t} and the covariance is R.
171  Point2 z1(1.0, 0.0);
172  SharedDiagonal R1 = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25));
173  PriorFactor<Point2> factor4(x1, z1, R1);
174  // Linearize the factor and add it to the linear factor graph
175  linearFactorGraph->push_back(factor4.linearize(linearizationPoints, *ordering));
176 
177  // We have now made the small factor graph f3-(x1)-f4
178  // where factor f3 is the prior from previous time ( P(x1) )
179  // and factor f4 is from the measurement, z1 ( P(x1|z1) )
180  // Eliminate this in order x1, to get Bayes net P(x1)
181  // As this is a filter, all we need is the posterior P(x1), so we just keep the root of the Bayes net
182  // We solve as before...
183 
184  // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
185  GaussianSequentialSolver solver1(*linearFactorGraph);
186  linearBayesNet = solver1.eliminate();
187 
188  // Extract the current estimate of x1 from the Bayes Network
189  result = optimize(*linearBayesNet);
190  Point2 x1_update = linearizationPoints.at<Point2>(x1).retract(result[ordering->at(x1)]);
191  x1_update.print("X1 Update");
192 
193  // Update the linearization point to the new estimate
194  linearizationPoints.update(x1, x1_update);
195 
196 
197 
198 
199 
200 
201  // Wash, rinse, repeat for another time step
202  // Create a new, empty graph and add the prior from the previous step
203  linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
204 
205  // Convert the root conditional, P(x1) in this case, into a Prior for the next step
206  // The linearization point of this prior must be moved to the new estimate of x, and the key/index needs to be reset to 0,
207  // the first key in the next iteration
208  const GaussianConditional::shared_ptr& cg1 = linearBayesNet->back();
209  assert(cg1->nrFrontals() == 1);
210  assert(cg1->nrParents() == 0);
211  JacobianFactor tmpPrior1 = JacobianFactor(*cg1);
212  linearFactorGraph->add(0, tmpPrior1.getA(tmpPrior1.begin()), tmpPrior1.getb() - tmpPrior1.getA(tmpPrior1.begin()) * result[ordering->at(x1)], tmpPrior1.get_model());
213 
214  // Create a key for the new state
215  Symbol x2('x',2);
216 
217  // Create the desired ordering
219  ordering->insert(x1, 0);
220  ordering->insert(x2, 1);
221 
222  // Create a nonlinear factor describing the motion model
223  difference = Point2(1,0);
224  Q = noiseModel::Diagonal::Sigmas((Vec(2) <, 0.1, 0.1));
225  BetweenFactor<Point2> factor6(x1, x2, difference, Q);
226 
227  // Linearize the factor and add it to the linear factor graph
228  linearizationPoints.insert(x2, x1_update);
229  linearFactorGraph->push_back(factor6.linearize(linearizationPoints, *ordering));
230 
231  // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x1,x2) = P(x1|x2)*P(x2) )
232  GaussianSequentialSolver solver2(*linearFactorGraph);
233  linearBayesNet = solver2.eliminate();
234 
235  // Extract the current estimate of x2 from the Bayes Network
236  result = optimize(*linearBayesNet);
237  Point2 x2_predict = linearizationPoints.at<Point2>(x2).retract(result[ordering->at(x2)]);
238  x2_predict.print("X2 Predict");
239 
240  // Update the linearization point to the new estimate
241  linearizationPoints.update(x2, x2_predict);
242 
243 
244 
245  // Now add the next measurement
246  // Create a new, empty graph and add the prior from the previous step
247  linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
248 
249  // Convert the root conditional, P(x1) in this case, into a Prior for the next step
250  const GaussianConditional::shared_ptr& cg2 = linearBayesNet->back();
251  assert(cg2->nrFrontals() == 1);
252  assert(cg2->nrParents() == 0);
253  JacobianFactor tmpPrior2 = JacobianFactor(*cg2);
254  linearFactorGraph->add(0, tmpPrior2.getA(tmpPrior2.begin()), tmpPrior2.getb() - tmpPrior2.getA(tmpPrior2.begin()) * result[ordering->at(x2)], tmpPrior2.get_model());
255 
256  // Create the desired ordering
258  ordering->insert(x2, 0);
259 
260  // And update using z2 ...
261  Point2 z2(2.0, 0.0);
262  SharedDiagonal R2 = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25));
263  PriorFactor<Point2> factor8(x2, z2, R2);
264 
265  // Linearize the factor and add it to the linear factor graph
266  linearFactorGraph->push_back(factor8.linearize(linearizationPoints, *ordering));
267 
268  // We have now made the small factor graph f7-(x2)-f8
269  // where factor f7 is the prior from previous time ( P(x2) )
270  // and factor f8 is from the measurement, z2 ( P(x2|z2) )
271  // Eliminate this in order x2, to get Bayes net P(x2)
272  // As this is a filter, all we need is the posterior P(x2), so we just keep the root of the Bayes net
273  // We solve as before...
274 
275  // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
276  GaussianSequentialSolver solver3(*linearFactorGraph);
277  linearBayesNet = solver3.eliminate();
278 
279  // Extract the current estimate of x2 from the Bayes Network
280  result = optimize(*linearBayesNet);
281  Point2 x2_update = linearizationPoints.at<Point2>(x2).retract(result[ordering->at(x2)]);
282  x2_update.print("X2 Update");
283 
284  // Update the linearization point to the new estimate
285  linearizationPoints.update(x2, x2_update);
286 
287 
288 
289 
290 
291 
292  // Wash, rinse, repeat for a third time step
293  // Create a new, empty graph and add the prior from the previous step
294  linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
295 
296  // Convert the root conditional, P(x1) in this case, into a Prior for the next step
297  const GaussianConditional::shared_ptr& cg3 = linearBayesNet->back();
298  assert(cg3->nrFrontals() == 1);
299  assert(cg3->nrParents() == 0);
300  JacobianFactor tmpPrior3 = JacobianFactor(*cg3);
301  linearFactorGraph->add(0, tmpPrior3.getA(tmpPrior3.begin()), tmpPrior3.getb() - tmpPrior3.getA(tmpPrior3.begin()) * result[ordering->at(x2)], tmpPrior3.get_model());
302 
303  // Create a key for the new state
304  Symbol x3('x',3);
305 
306  // Create the desired ordering
308  ordering->insert(x2, 0);
309  ordering->insert(x3, 1);
310 
311  // Create a nonlinear factor describing the motion model
312  difference = Point2(1,0);
313  Q = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
314  BetweenFactor<Point2> factor10(x2, x3, difference, Q);
315 
316  // Linearize the factor and add it to the linear factor graph
317  linearizationPoints.insert(x3, x2_update);
318  linearFactorGraph->push_back(factor10.linearize(linearizationPoints, *ordering));
319 
320  // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x1,x2) = P(x1|x2)*P(x2) )
321  GaussianSequentialSolver solver4(*linearFactorGraph);
322  linearBayesNet = solver4.eliminate();
323 
324  // Extract the current estimate of x3 from the Bayes Network
325  result = optimize(*linearBayesNet);
326  Point2 x3_predict = linearizationPoints.at<Point2>(x3).retract(result[ordering->at(x3)]);
327  x3_predict.print("X3 Predict");
328 
329  // Update the linearization point to the new estimate
330  linearizationPoints.update(x3, x3_predict);
331 
332 
333 
334  // Now add the next measurement
335  // Create a new, empty graph and add the prior from the previous step
336  linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
337 
338  // Convert the root conditional, P(x1) in this case, into a Prior for the next step
339  const GaussianConditional::shared_ptr& cg4 = linearBayesNet->back();
340  assert(cg4->nrFrontals() == 1);
341  assert(cg4->nrParents() == 0);
342  JacobianFactor tmpPrior4 = JacobianFactor(*cg4);
343  linearFactorGraph->add(0, tmpPrior4.getA(tmpPrior4.begin()), tmpPrior4.getb() - tmpPrior4.getA(tmpPrior4.begin()) * result[ordering->at(x3)], tmpPrior4.get_model());
344 
345  // Create the desired ordering
347  ordering->insert(x3, 0);
348 
349  // And update using z3 ...
350  Point2 z3(3.0, 0.0);
351  SharedDiagonal R3 = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25));
352  PriorFactor<Point2> factor12(x3, z3, R3);
353 
354  // Linearize the factor and add it to the linear factor graph
355  linearFactorGraph->push_back(factor12.linearize(linearizationPoints, *ordering));
356 
357  // We have now made the small factor graph f11-(x3)-f12
358  // where factor f11 is the prior from previous time ( P(x3) )
359  // and factor f12 is from the measurement, z3 ( P(x3|z3) )
360  // Eliminate this in order x3, to get Bayes net P(x3)
361  // As this is a filter, all we need is the posterior P(x3), so we just keep the root of the Bayes net
362  // We solve as before...
363 
364  // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
365  GaussianSequentialSolver solver5(*linearFactorGraph);
366  linearBayesNet = solver5.eliminate();
367 
368  // Extract the current estimate of x2 from the Bayes Network
369  result = optimize(*linearBayesNet);
370  Point2 x3_update = linearizationPoints.at<Point2>(x3).retract(result[ordering->at(x3)]);
371  x3_update.print("X3 Update");
372 
373  // Update the linearization point to the new estimate
374  linearizationPoints.update(x3, x3_update);
375 
376 
377 
378  return 0;
379 }
GaussianFactorGraph.h
Linear Factor Graph where all factors are Gaussians.
gtsam::GaussianFactorGraph::shared_ptr
std::shared_ptr< This > shared_ptr
shared_ptr to this class
Definition: GaussianFactorGraph.h:82
simple_graph::factor2
auto factor2
Definition: testJacobianFactor.cpp:203
optimize
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Definition: timeSFMBAL.h:64
gtsam::JacobianFactor::get_model
const SharedDiagonal & get_model() const
Definition: JacobianFactor.h:294
z1
Point2 z1
Definition: testTriangulationFactor.cpp:45
gtsam::JacobianFactor
Definition: JacobianFactor.h:91
gtsam::PriorFactor
Definition: nonlinear/PriorFactor.h:30
simple::R3
static Rot3 R3
Definition: testInitializePose3.cpp:54
gtsam::utils.numerical_derivative.retract
def retract(a, np.ndarray xi)
Definition: numerical_derivative.py:44
GaussianBayesNet.h
Chordal Bayes Net, the result of eliminating a factor graph.
gtsam::Values::update
void update(Key j, const Value &val)
Definition: Values.cpp:170
gtsam::Factor::begin
const_iterator begin() const
Definition: Factor.h:146
result
Values result
Definition: OdometryOptimize.cpp:8
gtsam::Ordering::shared_ptr
std::shared_ptr< This > shared_ptr
shared_ptr to this class
Definition: inference/Ordering.h:45
x3
Pose3 x3(Rot3::Ypr(M_PI/4.0, 0.0, 0.0), l2)
gtsam::JacobianFactor::getb
const constBVector getb() const
Definition: JacobianFactor.h:300
Point2.h
2D Point
so3::R1
SO3 R1
Definition: testShonanFactor.cpp:41
gtsam::NoiseModelFactor::linearize
std::shared_ptr< GaussianFactor > linearize(const Values &x) const override
Definition: NonlinearFactor.cpp:152
gtsam::GaussianFactorGraph
Definition: GaussianFactorGraph.h:73
gtsam::Values::at
const ValueType at(Key j) const
Definition: Values-inl.h:261
PriorFactor.h
z3
static const Unit3 z3
Definition: testRotateFactor.cpp:44
gtsam::VectorValues
Definition: VectorValues.h:74
BetweenFactor.h
x1
Pose3 x1
Definition: testPose3.cpp:663
gtsam::SharedDiagonal
noiseModel::Diagonal::shared_ptr SharedDiagonal
Definition: NoiseModel.h:764
x0
static Symbol x0('x', 0)
Symbol.h
gtsam::Point2
Vector2 Point2
Definition: Point2.h:32
ordering
static enum @1096 ordering
Eigen::Quaternion
The quaternion class used to represent 3D orientations and rotations.
Definition: ForwardDeclarations.h:293
so3::R2
SO3 R2
Definition: testShonanFactor.cpp:43
gtsam
traits
Definition: SFMdata.h:40
NoiseModel.h
gtsam::JacobianFactor::getA
constABlock getA(const_iterator variable) const
Definition: JacobianFactor.h:303
gtsam::Values
Definition: Values.h:65
std
Definition: BFloat16.h:88
gtsam::Values::insert
void insert(Key j, const Value &val)
Definition: Values.cpp:156
gtsam::GaussianConditional::shared_ptr
std::shared_ptr< This > shared_ptr
shared_ptr to this class
Definition: GaussianConditional.h:46
gtsam::Ordering
Definition: inference/Ordering.h:33
gtsam::GaussianBayesNet::shared_ptr
std::shared_ptr< This > shared_ptr
Definition: GaussianBayesNet.h:42
x2
Pose3 x2(Rot3::Ypr(0.0, 0.0, 0.0), l2)
Vec
Matrix< Scalar, Dynamic, 1 > Vec
Definition: gemv_common.h:17
simple_graph::factor1
auto factor1
Definition: testJacobianFactor.cpp:196
z2
static const Unit3 z2
Definition: testRotateFactor.cpp:43
gtsam::Symbol
Definition: inference/Symbol.h:37
gtsam::BetweenFactor
Definition: BetweenFactor.h:40
main
int main()
Definition: elaboratePoint2KalmanFilter.cpp:37


gtsam
Author(s):
autogenerated on Tue Jan 7 2025 04:02:13