| 1 | /* ---------------------------------------------------------------------------- |
| 2 | |
| 3 | * GTSAM Copyright 2010, Georgia Tech Research Corporation, |
| 4 | * Atlanta, Georgia 30332-0415 |
| 5 | * All Rights Reserved |
| 6 | * Authors: Frank Dellaert, et al. (see THANKS for the full author list) |
| 7 | |
| 8 | * See LICENSE for the license information |
| 9 | |
| 10 | * -------------------------------------------------------------------------- */ |
| 11 | |
| 12 | /** |
| 13 | * @file elaboratePoint2KalmanFilter.cpp |
| 14 | * |
| 15 | * simple linear Kalman filter on a moving 2D point, but done using factor graphs |
| 16 | * This example manually creates all of the needed data structures |
| 17 | * |
| 18 | * @date Aug 19, 2011 |
| 19 | * @author Frank Dellaert |
| 20 | * @author Stephen Williams |
| 21 | */ |
| 22 | |
| 23 | #include <gtsam/nonlinear/PriorFactor.h> |
| 24 | #include <gtsam/slam/BetweenFactor.h> |
| 25 | #include <gtsam/inference/Symbol.h> |
| 26 | #include <gtsam/linear/GaussianBayesNet.h> |
| 27 | #include <gtsam/linear/GaussianFactorGraph.h> |
| 28 | #include <gtsam/linear/NoiseModel.h> |
| 29 | #include <gtsam/geometry/Point2.h> |
| 30 | #include <gtsam/base/Vector.h> |
| 31 | |
| 32 | #include <cassert> |
| 33 | |
| 34 | using namespace std; |
| 35 | using namespace gtsam; |
| 36 | using symbol_shorthand::X; |
| 37 | |
| 38 | int main() { |
| 39 | |
| 40 | // [code below basically does SRIF with Cholesky] |
| 41 | |
| 42 | // Create a factor graph to perform the inference |
| 43 | GaussianFactorGraph::shared_ptr linearFactorGraph(new GaussianFactorGraph); |
| 44 | |
| 45 | // Create the desired ordering |
| 46 | Ordering::shared_ptr ordering(new Ordering); |
| 47 | |
| 48 | // Create a structure to hold the linearization points |
| 49 | Values linearizationPoints; |
| 50 | |
| 51 | // Ground truth example |
| 52 | // Start at origin, move to the right (x-axis): 0,0 0,1 0,2 |
| 53 | // Motion model is just moving to the right (x'-x)^2 |
| 54 | // Measurements are GPS like, (x-z)^2, where z is a 2D measurement |
| 55 | // i.e., we should get 0,0 0,1 0,2 if there is no noise |
| 56 | |
| 57 | // Create new state variable |
| 58 | ordering->push_back(x: X(j: 0)); |
| 59 | |
| 60 | // Initialize state x0 (2D point) at origin by adding a prior factor, i.e., Bayes net P(x0) |
| 61 | // This is equivalent to x_0 and P_0 |
| 62 | Point2 x_initial(0,0); |
| 63 | SharedDiagonal P_initial = noiseModel::Isotropic::Sigma(dim: 2, sigma: 0.1); |
| 64 | |
| 65 | // Linearize the factor and add it to the linear factor graph |
| 66 | linearizationPoints.insert(j: X(j: 0), val: x_initial); |
| 67 | linearFactorGraph->add(key1: X(j: 0), |
| 68 | A1: P_initial->R(), |
| 69 | b: Vector::Zero(size: 2), |
| 70 | model: noiseModel::Unit::Create(dim: 2)); |
| 71 | |
| 72 | // Now predict the state at t=1, i.e. argmax_{x1} P(x1) = P(x1|x0) P(x0) |
| 73 | // In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t} |
| 74 | // For the Kalman Filter, this requires a motion model, f(x_{t}) = x_{t+1|t) |
| 75 | // Assuming the system is linear, this will be of the form f(x_{t}) = F*x_{t} + B*u_{t} + w |
| 76 | // where F is the state transition model/matrix, B is the control input model, |
| 77 | // and w is zero-mean, Gaussian white noise with covariance Q |
| 78 | // Note, in some models, Q is actually derived as G*w*G^T where w models uncertainty of some |
| 79 | // physical property, such as velocity or acceleration, and G is derived from physics |
| 80 | // |
| 81 | // For the purposes of this example, let us assume we are using a constant-position model and |
| 82 | // the controls are driving the point to the right at 1 m/s. Then, F = [1 0 ; 0 1], B = [1 0 ; 0 1] |
| 83 | // and u = [1 ; 0]. Let us also assume that the process noise Q = [0.1 0 ; 0 0.1]; |
| 84 | // |
| 85 | // In the case of factor graphs, the factor related to the motion model would be defined as |
| 86 | // f2 = (f(x_{t}) - x_{t+1}) * Q^-1 * (f(x_{t}) - x_{t+1})^T |
| 87 | // Conveniently, there is a factor type, called a BetweenFactor, that can generate this factor |
| 88 | // given the expected difference, f(x_{t}) - x_{t+1}, and Q. |
| 89 | // so, difference = x_{t+1} - x_{t} = F*x_{t} + B*u_{t} - I*x_{t} |
| 90 | // = (F - I)*x_{t} + B*u_{t} |
| 91 | // = B*u_{t} (for our example) |
| 92 | ordering->push_back(x: X(j: 1)); |
| 93 | |
| 94 | Point2 difference(1,0); |
| 95 | SharedDiagonal Q = noiseModel::Isotropic::Sigma(dim: 2, sigma: 0.1); |
| 96 | BetweenFactor<Point2> factor2(X(j: 0), X(j: 1), difference, Q); |
| 97 | // Linearize the factor and add it to the linear factor graph |
| 98 | linearizationPoints.insert(j: X(j: 1), val: x_initial); |
| 99 | linearFactorGraph->push_back(factor: factor2.linearize(x: linearizationPoints)); |
| 100 | |
| 101 | // We have now made the small factor graph f1-(x0)-f2-(x1) |
| 102 | // where factor f1 is just the prior from time t0, P(x0) |
| 103 | // and factor f2 is from the motion model |
| 104 | // Eliminate this in order x0, x1, to get Bayes net P(x0|x1)P(x1) |
| 105 | // As this is a filter, all we need is the posterior P(x1), so we just keep the root of the Bayes net |
| 106 | // |
| 107 | // Because of the way GTSAM works internally, we have used nonlinear class even though this example |
| 108 | // system is linear. We first convert the nonlinear factor graph into a linear one, using the specified |
| 109 | // ordering. Linear factors are simply numbered, and are not accessible via named key like the nonlinear |
| 110 | // variables. Also, the nonlinear factors are linearized around an initial estimate. For a true linear |
| 111 | // system, the initial estimate is not important. |
| 112 | |
| 113 | // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) ) |
| 114 | GaussianBayesNet::shared_ptr bayesNet = linearFactorGraph->eliminateSequential(ordering: *ordering); |
| 115 | const GaussianConditional::shared_ptr& x1Conditional = bayesNet->back(); // This should be P(x1) |
| 116 | |
| 117 | // Extract the current estimate of x1,P1 from the Bayes Network |
| 118 | VectorValues result = bayesNet->optimize(); |
| 119 | Point2 x1_predict = linearizationPoints.at<Point2>(j: X(j: 1)) + result[X(j: 1)]; |
| 120 | traits<Point2>::Print(m: x1_predict, str: "X1 Predict" ); |
| 121 | |
| 122 | // Update the new linearization point to the new estimate |
| 123 | linearizationPoints.update(j: X(j: 1), val: x1_predict); |
| 124 | |
| 125 | |
| 126 | |
| 127 | // Create a new, empty graph and add the prior from the previous step |
| 128 | linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph); |
| 129 | |
| 130 | // Convert the root conditional, P(x1) in this case, into a Prior for the next step |
| 131 | // Some care must be done here, as the linearization point in future steps will be different |
| 132 | // than what was used when the factor was created. |
| 133 | // f = || F*dx1' - (F*x0 - x1) ||^2, originally linearized at x1 = x0 |
| 134 | // After this step, the factor needs to be linearized around x1 = x1_predict |
| 135 | // This changes the factor to f = || F*dx1'' - b'' ||^2 |
| 136 | // = || F*(dx1' - (dx1' - dx1'')) - b'' ||^2 |
| 137 | // = || F*dx1' - F*(dx1' - dx1'') - b'' ||^2 |
| 138 | // = || F*dx1' - (b'' + F(dx1' - dx1'')) ||^2 |
| 139 | // -> b' = b'' + F(dx1' - dx1'') |
| 140 | // -> b'' = b' - F(dx1' - dx1'') |
| 141 | // = || F*dx1'' - (b' - F(dx1' - dx1'')) ||^2 |
| 142 | // = || F*dx1'' - (b' - F(x_predict - x_inital)) ||^2 |
| 143 | |
| 144 | // Create a new, empty graph and add the new prior |
| 145 | linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph); |
| 146 | linearFactorGraph->add( |
| 147 | key1: X(j: 1), |
| 148 | A1: x1Conditional->R(), |
| 149 | b: x1Conditional->d() - x1Conditional->R() * result[X(j: 1)], |
| 150 | model: x1Conditional->get_model()); |
| 151 | |
| 152 | // Reset ordering for the next step |
| 153 | ordering = Ordering::shared_ptr(new Ordering); |
| 154 | ordering->push_back(x: X(j: 1)); |
| 155 | |
| 156 | // Now, a measurement, z1, has been received, and the Kalman Filter should be "Updated"/"Corrected" |
| 157 | // This is equivalent to saying P(x1|z1) ~ P(z1|x1)*P(x1) ~ f3(x1)*f4(x1;z1) |
| 158 | // where f3 is the prior from the previous step, and |
| 159 | // where f4 is a measurement factor |
| 160 | // |
| 161 | // So, now we need to create the measurement factor, f4 |
| 162 | // For the Kalman Filter, this is the measurement function, h(x_{t}) = z_{t} |
| 163 | // Assuming the system is linear, this will be of the form h(x_{t}) = H*x_{t} + v |
| 164 | // where H is the observation model/matrix, and v is zero-mean, Gaussian white noise with covariance R |
| 165 | // |
| 166 | // For the purposes of this example, let us assume we have something like a GPS that returns |
| 167 | // the current position of the robot. For this simple example, we can use a PriorFactor to model the |
| 168 | // observation as it depends on only a single state variable, x1. To model real sensor observations |
| 169 | // generally requires the creation of a new factor type. For example, factors for range sensors, bearing |
| 170 | // sensors, and camera projections have already been added to GTSAM. |
| 171 | // |
| 172 | // In the case of factor graphs, the factor related to the measurements would be defined as |
| 173 | // f4 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T |
| 174 | // = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T |
| 175 | // This can be modeled using the PriorFactor, where the mean is z_{t} and the covariance is R. |
| 176 | Point2 z1(1.0, 0.0); |
| 177 | SharedDiagonal R1 = noiseModel::Isotropic::Sigma(dim: 2, sigma: 0.25); |
| 178 | PriorFactor<Point2> factor4(X(j: 1), z1, R1); |
| 179 | // Linearize the factor and add it to the linear factor graph |
| 180 | linearFactorGraph->push_back(factor: factor4.linearize(x: linearizationPoints)); |
| 181 | |
| 182 | // We have now made the small factor graph f3-(x1)-f4 |
| 183 | // where factor f3 is the prior from previous time ( P(x1) ) |
| 184 | // and factor f4 is from the measurement, z1 ( P(x1|z1) ) |
| 185 | // Eliminate this in order x1, to get Bayes net P(x1) |
| 186 | // As this is a filter, all we need is the posterior P(x1), so we just keep the root of the Bayes net |
| 187 | // We solve as before... |
| 188 | |
| 189 | // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) ) |
| 190 | GaussianBayesNet::shared_ptr updatedBayesNet = linearFactorGraph->eliminateSequential(ordering: *ordering); |
| 191 | const GaussianConditional::shared_ptr& updatedConditional = updatedBayesNet->back(); |
| 192 | |
| 193 | // Extract the current estimate of x1 from the Bayes Network |
| 194 | VectorValues updatedResult = updatedBayesNet->optimize(); |
| 195 | Point2 x1_update = linearizationPoints.at<Point2>(j: X(j: 1)) + updatedResult[X(j: 1)]; |
| 196 | traits<Point2>::Print(m: x1_update, str: "X1 Update" ); |
| 197 | |
| 198 | // Update the linearization point to the new estimate |
| 199 | linearizationPoints.update(j: X(j: 1), val: x1_update); |
| 200 | |
| 201 | |
| 202 | |
| 203 | |
| 204 | |
| 205 | |
| 206 | // Wash, rinse, repeat for another time step |
| 207 | // Create a new, empty graph and add the prior from the previous step |
| 208 | linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph); |
| 209 | |
| 210 | // Convert the root conditional, P(x1) in this case, into a Prior for the next step |
| 211 | // The linearization point of this prior must be moved to the new estimate of x, and the key/index needs to be reset to 0, |
| 212 | // the first key in the next iteration |
| 213 | linearFactorGraph->add( |
| 214 | key1: X(j: 1), |
| 215 | A1: updatedConditional->R(), |
| 216 | b: updatedConditional->d() - updatedConditional->R() * updatedResult[X(j: 1)], |
| 217 | model: updatedConditional->get_model()); |
| 218 | |
| 219 | // Create the desired ordering |
| 220 | ordering = Ordering::shared_ptr(new Ordering); |
| 221 | ordering->push_back(x: X(j: 1)); |
| 222 | ordering->push_back(x: X(j: 2)); |
| 223 | |
| 224 | // Create a nonlinear factor describing the motion model (moving right again) |
| 225 | Point2 difference2(1,0); |
| 226 | SharedDiagonal Q2 = noiseModel::Isotropic::Sigma(dim: 2, sigma: 0.1); |
| 227 | BetweenFactor<Point2> factor6(X(j: 1), X(j: 2), difference2, Q2); |
| 228 | |
| 229 | // Linearize the factor and add it to the linear factor graph |
| 230 | linearizationPoints.insert(j: X(j: 2), val: x1_update); |
| 231 | linearFactorGraph->push_back(factor: factor6.linearize(x: linearizationPoints)); |
| 232 | |
| 233 | // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x1,x2) = P(x1|x2)*P(x2) ) |
| 234 | GaussianBayesNet::shared_ptr predictionBayesNet2 = linearFactorGraph->eliminateSequential(ordering: *ordering); |
| 235 | const GaussianConditional::shared_ptr& x2Conditional = predictionBayesNet2->back(); |
| 236 | |
| 237 | // Extract the predicted state |
| 238 | VectorValues prediction2Result = predictionBayesNet2->optimize(); |
| 239 | Point2 x2_predict = linearizationPoints.at<Point2>(j: X(j: 2)) + prediction2Result[X(j: 2)]; |
| 240 | traits<Point2>::Print(m: x2_predict, str: "X2 Predict" ); |
| 241 | |
| 242 | // Update the linearization point to the new estimate |
| 243 | linearizationPoints.update(j: X(j: 2), val: x2_predict); |
| 244 | |
| 245 | // Now add the next measurement |
| 246 | // Create a new, empty graph and add the prior from the previous step |
| 247 | linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph); |
| 248 | |
| 249 | // Convert the root conditional, P(x1) in this case, into a Prior for the next step |
| 250 | linearFactorGraph->add( |
| 251 | key1: X(j: 2), |
| 252 | A1: x2Conditional->R(), |
| 253 | b: x2Conditional->d() - x2Conditional->R() * prediction2Result[X(j: 2)], |
| 254 | model: x2Conditional->get_model()); |
| 255 | |
| 256 | // Create the desired ordering |
| 257 | ordering = Ordering::shared_ptr(new Ordering); |
| 258 | ordering->push_back(x: X(j: 2)); |
| 259 | |
| 260 | // And update using z2 ... |
| 261 | Point2 z2(2.0, 0.0); |
| 262 | SharedDiagonal R2 = noiseModel::Diagonal::Sigmas(sigmas: (gtsam::Vector2() << 0.25, 0.25).finished()); |
| 263 | PriorFactor<Point2> factor8(X(j: 2), z2, R2); |
| 264 | |
| 265 | // Linearize the factor and add it to the linear factor graph |
| 266 | linearFactorGraph->push_back(factor: factor8.linearize(x: linearizationPoints)); |
| 267 | |
| 268 | // We have now made the small factor graph f7-(x2)-f8 |
| 269 | // where factor f7 is the prior from previous time ( P(x2) ) |
| 270 | // and factor f8 is from the measurement, z2 ( P(x2|z2) ) |
| 271 | // Eliminate this in order x2, to get Bayes net P(x2) |
| 272 | // As this is a filter, all we need is the posterior P(x2), so we just keep the root of the Bayes net |
| 273 | // We solve as before... |
| 274 | |
| 275 | // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) ) |
| 276 | GaussianBayesNet::shared_ptr updatedBayesNet2 = linearFactorGraph->eliminateSequential(ordering: *ordering); |
| 277 | const GaussianConditional::shared_ptr& updatedConditional2 = updatedBayesNet2->back(); |
| 278 | |
| 279 | // Extract the current estimate of x2 from the Bayes Network |
| 280 | VectorValues updatedResult2 = updatedBayesNet2->optimize(); |
| 281 | Point2 x2_update = linearizationPoints.at<Point2>(j: X(j: 2)) + updatedResult2[X(j: 2)]; |
| 282 | traits<Point2>::Print(m: x2_update, str: "X2 Update" ); |
| 283 | |
| 284 | // Update the linearization point to the new estimate |
| 285 | linearizationPoints.update(j: X(j: 2), val: x2_update); |
| 286 | |
| 287 | |
| 288 | |
| 289 | |
| 290 | |
| 291 | |
| 292 | // Wash, rinse, repeat for a third time step |
| 293 | // Create a new, empty graph and add the prior from the previous step |
| 294 | linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph); |
| 295 | |
| 296 | // Convert the root conditional, P(x1) in this case, into a Prior for the next step |
| 297 | linearFactorGraph->add( |
| 298 | key1: X(j: 2), |
| 299 | A1: updatedConditional2->R(), |
| 300 | b: updatedConditional2->d() - updatedConditional2->R() * updatedResult2[X(j: 2)], |
| 301 | model: updatedConditional2->get_model()); |
| 302 | |
| 303 | // Create the desired ordering |
| 304 | ordering = Ordering::shared_ptr(new Ordering); |
| 305 | ordering->push_back(x: X(j: 2)); |
| 306 | ordering->push_back(x: X(j: 3)); |
| 307 | |
| 308 | // Create a nonlinear factor describing the motion model |
| 309 | Point2 difference3(1,0); |
| 310 | SharedDiagonal Q3 = noiseModel::Isotropic::Sigma(dim: 2, sigma: 0.1); |
| 311 | BetweenFactor<Point2> factor10(X(j: 2), X(j: 3), difference3, Q3); |
| 312 | |
| 313 | // Linearize the factor and add it to the linear factor graph |
| 314 | linearizationPoints.insert(j: X(j: 3), val: x2_update); |
| 315 | linearFactorGraph->push_back(factor: factor10.linearize(x: linearizationPoints)); |
| 316 | |
| 317 | // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x1,x2) = P(x1|x2)*P(x2) ) |
| 318 | GaussianBayesNet::shared_ptr predictionBayesNet3 = linearFactorGraph->eliminateSequential(ordering: *ordering); |
| 319 | const GaussianConditional::shared_ptr& x3Conditional = predictionBayesNet3->back(); |
| 320 | |
| 321 | // Extract the current estimate of x3 from the Bayes Network |
| 322 | VectorValues prediction3Result = predictionBayesNet3->optimize(); |
| 323 | Point2 x3_predict = linearizationPoints.at<Point2>(j: X(j: 3)) + prediction3Result[X(j: 3)]; |
| 324 | traits<Point2>::Print(m: x3_predict, str: "X3 Predict" ); |
| 325 | |
| 326 | // Update the linearization point to the new estimate |
| 327 | linearizationPoints.update(j: X(j: 3), val: x3_predict); |
| 328 | |
| 329 | |
| 330 | |
| 331 | // Now add the next measurement |
| 332 | // Create a new, empty graph and add the prior from the previous step |
| 333 | linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph); |
| 334 | |
| 335 | // Convert the root conditional, P(x1) in this case, into a Prior for the next step |
| 336 | linearFactorGraph->add( |
| 337 | key1: X(j: 3), |
| 338 | A1: x3Conditional->R(), |
| 339 | b: x3Conditional->d() - x3Conditional->R() * prediction3Result[X(j: 3)], |
| 340 | model: x3Conditional->get_model()); |
| 341 | |
| 342 | // Create the desired ordering |
| 343 | ordering = Ordering::shared_ptr(new Ordering); |
| 344 | ordering->push_back(x: X(j: 3)); |
| 345 | |
| 346 | // And update using z3 ... |
| 347 | Point2 z3(3.0, 0.0); |
| 348 | SharedDiagonal R3 = noiseModel::Isotropic::Sigma(dim: 2, sigma: 0.25); |
| 349 | PriorFactor<Point2> factor12(X(j: 3), z3, R3); |
| 350 | |
| 351 | // Linearize the factor and add it to the linear factor graph |
| 352 | linearFactorGraph->push_back(factor: factor12.linearize(x: linearizationPoints)); |
| 353 | |
| 354 | // We have now made the small factor graph f11-(x3)-f12 |
| 355 | // where factor f11 is the prior from previous time ( P(x3) ) |
| 356 | // and factor f12 is from the measurement, z3 ( P(x3|z3) ) |
| 357 | // Eliminate this in order x3, to get Bayes net P(x3) |
| 358 | // As this is a filter, all we need is the posterior P(x3), so we just keep the root of the Bayes net |
| 359 | // We solve as before... |
| 360 | |
| 361 | // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) ) |
| 362 | GaussianBayesNet::shared_ptr updatedBayesNet3 = linearFactorGraph->eliminateSequential(ordering: *ordering); |
| 363 | const GaussianConditional::shared_ptr& updatedConditional3 = updatedBayesNet3->back(); |
| 364 | |
| 365 | // Extract the current estimate of x2 from the Bayes Network |
| 366 | VectorValues updatedResult3 = updatedBayesNet3->optimize(); |
| 367 | Point2 x3_update = linearizationPoints.at<Point2>(j: X(j: 3)) + updatedResult3[X(j: 3)]; |
| 368 | traits<Point2>::Print(m: x3_update, str: "X3 Update" ); |
| 369 | |
| 370 | // Update the linearization point to the new estimate |
| 371 | linearizationPoints.update(j: X(j: 3), val: x3_update); |
| 372 | |
| 373 | |
| 374 | |
| 375 | return 0; |
| 376 | } |
| 377 | |