| 1 | /* ---------------------------------------------------------------------------- |
| 2 | |
| 3 | * GTSAM Copyright 2010, Georgia Tech Research Corporation, |
| 4 | * Atlanta, Georgia 30332-0415 |
| 5 | * All Rights Reserved |
| 6 | * Authors: Frank Dellaert, et al. (see THANKS for the full author list) |
| 7 | |
| 8 | * See LICENSE for the license information |
| 9 | |
| 10 | * -------------------------------------------------------------------------- */ |
| 11 | |
| 12 | /** |
| 13 | * @file ConcurrentBatchSmoother.cpp |
| 14 | * @brief A Levenberg-Marquardt Batch Smoother that implements the |
| 15 | * Concurrent Filtering and Smoothing interface. |
| 16 | * @author Stephen Williams |
| 17 | */ |
| 18 | |
| 19 | #include <gtsam_unstable/nonlinear/ConcurrentBatchSmoother.h> |
| 20 | #include <gtsam/nonlinear/LinearContainerFactor.h> |
| 21 | #include <gtsam/linear/GaussianJunctionTree.h> |
| 22 | #include <gtsam/base/timing.h> |
| 23 | #include <gtsam/base/debug.h> |
| 24 | |
| 25 | namespace gtsam { |
| 26 | |
| 27 | /* ************************************************************************* */ |
| 28 | void ConcurrentBatchSmoother::print(const std::string& s, const KeyFormatter& keyFormatter) const { |
| 29 | std::cout << s; |
| 30 | std::cout << " Factors:" << std::endl; |
| 31 | for(const NonlinearFactor::shared_ptr& factor: factors_) { |
| 32 | PrintNonlinearFactor(factor, indent: " " , keyFormatter); |
| 33 | } |
| 34 | theta_.print(str: "Values:\n" ); |
| 35 | } |
| 36 | |
| 37 | /* ************************************************************************* */ |
| 38 | bool ConcurrentBatchSmoother::equals(const ConcurrentSmoother& rhs, double tol) const { |
| 39 | const ConcurrentBatchSmoother* smoother = dynamic_cast<const ConcurrentBatchSmoother*>(&rhs); |
| 40 | return smoother |
| 41 | && factors_.equals(other: smoother->factors_) |
| 42 | && theta_.equals(other: smoother->theta_) |
| 43 | && ordering_.equals(other: smoother->ordering_) |
| 44 | && delta_.equals(x: smoother->delta_) |
| 45 | && separatorValues_.equals(other: smoother->separatorValues_); |
| 46 | } |
| 47 | |
| 48 | /* ************************************************************************* */ |
| 49 | ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearFactorGraph& newFactors, const Values& newTheta, |
| 50 | const std::optional< std::vector<size_t> >& removeFactorIndices) { |
| 51 | |
| 52 | gttic(update); |
| 53 | |
| 54 | // Create the return result meta-data |
| 55 | Result result; |
| 56 | |
| 57 | // Update all of the internal variables with the new information |
| 58 | gttic(augment_system); |
| 59 | { |
| 60 | // Add the new variables to theta |
| 61 | theta_.insert(values: newTheta); |
| 62 | |
| 63 | // Add new variables to the end of the ordering |
| 64 | for(const auto key: newTheta.keys()) { |
| 65 | ordering_.push_back(x: key); |
| 66 | } |
| 67 | |
| 68 | // Augment Delta |
| 69 | delta_.insert(values: newTheta.zeroVectors()); |
| 70 | |
| 71 | // Add the new factors to the graph, updating the variable index |
| 72 | insertFactors(factors: newFactors); |
| 73 | |
| 74 | if(removeFactorIndices) |
| 75 | removeFactors(slots: *removeFactorIndices); |
| 76 | } |
| 77 | gttoc(augment_system); |
| 78 | |
| 79 | if(factors_.size() > 0) { |
| 80 | // Reorder the system to ensure efficient optimization (and marginalization) performance |
| 81 | gttic(reorder); |
| 82 | reorder(); |
| 83 | gttoc(reorder); |
| 84 | |
| 85 | // Optimize the factors using a modified version of L-M |
| 86 | gttic(optimize); |
| 87 | result = optimize(); |
| 88 | gttoc(optimize); |
| 89 | } |
| 90 | |
| 91 | // TODO: The following code does considerable work, much of which could be redundant given the previous optimization step |
| 92 | // Refactor this code to reduce computational burden |
| 93 | |
| 94 | // Calculate the marginal on the separator from the smoother factors |
| 95 | if(separatorValues_.size() > 0) { |
| 96 | gttic(presync); |
| 97 | updateSmootherSummarization(); |
| 98 | gttoc(presync); |
| 99 | } |
| 100 | |
| 101 | gttoc(update); |
| 102 | |
| 103 | return result; |
| 104 | } |
| 105 | |
| 106 | /* ************************************************************************* */ |
| 107 | void ConcurrentBatchSmoother::presync() { |
| 108 | |
| 109 | gttic(presync); |
| 110 | |
| 111 | gttoc(presync); |
| 112 | } |
| 113 | |
| 114 | /* ************************************************************************* */ |
| 115 | void ConcurrentBatchSmoother::getSummarizedFactors(NonlinearFactorGraph& summarizedFactors, Values& separatorValues) { |
| 116 | |
| 117 | gttic(get_summarized_factors); |
| 118 | |
| 119 | // Copy the previous calculated smoother summarization factors into the output |
| 120 | summarizedFactors.push_back(container: smootherSummarization_); |
| 121 | |
| 122 | // Copy the separator values into the output |
| 123 | separatorValues.insert(values: separatorValues_); |
| 124 | |
| 125 | gttoc(get_summarized_factors); |
| 126 | } |
| 127 | |
| 128 | /* ************************************************************************* */ |
| 129 | void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFactors, const Values& smootherValues, |
| 130 | const NonlinearFactorGraph& summarizedFactors, const Values& separatorValues) { |
| 131 | |
| 132 | gttic(synchronize); |
| 133 | |
| 134 | // Remove the previous filter summarization from the graph |
| 135 | removeFactors(slots: filterSummarizationSlots_); |
| 136 | |
| 137 | // Insert new linpoints into the values, augment the ordering, and store new dims to augment delta |
| 138 | for(const auto key: smootherValues.keys()) { |
| 139 | if(!theta_.exists(j: key)) { |
| 140 | // If this a new key for theta_, also add to ordering and delta. |
| 141 | const auto& value = smootherValues.at(j: key); |
| 142 | delta_.insert(j: key, value: Vector::Zero(size: value.dim())); |
| 143 | theta_.insert(j: key, val: value); |
| 144 | ordering_.push_back(x: key); |
| 145 | } else { |
| 146 | // If the key already existed in theta_, just update. |
| 147 | const auto& value = smootherValues.at(j: key); |
| 148 | theta_.update(j: key, val: value); |
| 149 | } |
| 150 | } |
| 151 | for(const auto key: separatorValues.keys()) { |
| 152 | if(!theta_.exists(j: key)) { |
| 153 | // If this a new key for theta_, also add to ordering and delta. |
| 154 | const auto& value = separatorValues.at(j: key); |
| 155 | delta_.insert(j: key, value: Vector::Zero(size: value.dim())); |
| 156 | theta_.insert(j: key, val: value); |
| 157 | ordering_.push_back(x: key); |
| 158 | } else { |
| 159 | // If the key already existed in theta_, just update. |
| 160 | const auto& value = separatorValues.at(j: key); |
| 161 | theta_.update(j: key, val: value); |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | // Insert the new smoother factors |
| 166 | insertFactors(factors: smootherFactors); |
| 167 | |
| 168 | // Insert the new filter summarized factors |
| 169 | filterSummarizationSlots_ = insertFactors(factors: summarizedFactors); |
| 170 | |
| 171 | // Update the list of root keys |
| 172 | separatorValues_ = separatorValues; |
| 173 | |
| 174 | gttoc(synchronize); |
| 175 | } |
| 176 | |
| 177 | /* ************************************************************************* */ |
| 178 | void ConcurrentBatchSmoother::postsync() { |
| 179 | |
| 180 | gttic(postsync); |
| 181 | |
| 182 | gttoc(postsync); |
| 183 | } |
| 184 | |
| 185 | /* ************************************************************************* */ |
| 186 | std::vector<size_t> ConcurrentBatchSmoother::insertFactors(const NonlinearFactorGraph& factors) { |
| 187 | |
| 188 | gttic(insert_factors); |
| 189 | |
| 190 | // create the output vector |
| 191 | std::vector<size_t> slots; |
| 192 | slots.reserve(n: factors.size()); |
| 193 | |
| 194 | // Insert the factor into an existing hole in the factor graph, if possible |
| 195 | for(const NonlinearFactor::shared_ptr& factor: factors) { |
| 196 | size_t slot; |
| 197 | if(availableSlots_.size() > 0) { |
| 198 | slot = availableSlots_.front(); |
| 199 | availableSlots_.pop(); |
| 200 | factors_.replace(index: slot, factor); |
| 201 | } else { |
| 202 | slot = factors_.size(); |
| 203 | factors_.push_back(factor); |
| 204 | } |
| 205 | slots.push_back(x: slot); |
| 206 | } |
| 207 | |
| 208 | gttoc(insert_factors); |
| 209 | |
| 210 | return slots; |
| 211 | } |
| 212 | |
| 213 | /* ************************************************************************* */ |
| 214 | void ConcurrentBatchSmoother::removeFactors(const std::vector<size_t>& slots) { |
| 215 | |
| 216 | gttic(remove_factors); |
| 217 | |
| 218 | // For each factor slot to delete... |
| 219 | for(size_t slot: slots) { |
| 220 | |
| 221 | // Remove the factor from the graph |
| 222 | factors_.remove(i: slot); |
| 223 | |
| 224 | // Mark the factor slot as available |
| 225 | availableSlots_.push(x: slot); |
| 226 | } |
| 227 | |
| 228 | gttoc(remove_factors); |
| 229 | } |
| 230 | |
| 231 | /* ************************************************************************* */ |
| 232 | void ConcurrentBatchSmoother::reorder() { |
| 233 | |
| 234 | // Recalculate the variable index |
| 235 | variableIndex_ = VariableIndex(factors_); |
| 236 | |
| 237 | KeyVector separatorKeys = separatorValues_.keys(); |
| 238 | ordering_ = Ordering::ColamdConstrainedLast(variableIndex: variableIndex_, constrainLast: KeyVector(separatorKeys.begin(), separatorKeys.end())); |
| 239 | |
| 240 | } |
| 241 | |
| 242 | /* ************************************************************************* */ |
| 243 | ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() { |
| 244 | |
| 245 | // Create output result structure |
| 246 | Result result; |
| 247 | result.nonlinearVariables = theta_.size() - separatorValues_.size(); |
| 248 | result.linearVariables = separatorValues_.size(); |
| 249 | |
| 250 | // Pull out parameters we'll use |
| 251 | const LevenbergMarquardtParams::VerbosityLM lmVerbosity = parameters_.verbosityLM; |
| 252 | double lambda = parameters_.lambdaInitial; |
| 253 | |
| 254 | // Create a Values that holds the current evaluation point |
| 255 | Values evalpoint = theta_.retract(delta: delta_); |
| 256 | result.error = factors_.error(values: evalpoint); |
| 257 | if(result.error < parameters_.errorTol) { |
| 258 | return result; |
| 259 | } |
| 260 | |
| 261 | // Use a custom optimization loop so the linearization points can be controlled |
| 262 | double previousError; |
| 263 | VectorValues newDelta; |
| 264 | do { |
| 265 | previousError = result.error; |
| 266 | |
| 267 | // Do next iteration |
| 268 | gttic(optimizer_iteration); |
| 269 | { |
| 270 | // Linearize graph around the linearization point |
| 271 | GaussianFactorGraph linearFactorGraph = *factors_.linearize(linearizationPoint: theta_); |
| 272 | |
| 273 | // Keep increasing lambda until we make make progress |
| 274 | while(true) { |
| 275 | if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) |
| 276 | std::cout << "trying lambda = " << lambda << std::endl; |
| 277 | |
| 278 | // Add prior factors at the current solution |
| 279 | gttic(damp); |
| 280 | GaussianFactorGraph dampedFactorGraph(linearFactorGraph); |
| 281 | dampedFactorGraph.reserve(size: linearFactorGraph.size() + delta_.size()); |
| 282 | { |
| 283 | // for each of the variables, add a prior at the current solution |
| 284 | for(const VectorValues::KeyValuePair& key_value: delta_) { |
| 285 | size_t dim = key_value.second.size(); |
| 286 | Matrix A = Matrix::Identity(rows: dim,cols: dim); |
| 287 | Vector b = key_value.second; |
| 288 | SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma: 1.0 / std::sqrt(x: lambda)); |
| 289 | GaussianFactor::shared_ptr prior(new JacobianFactor(key_value.first, A, b, model)); |
| 290 | dampedFactorGraph.push_back(factor: prior); |
| 291 | } |
| 292 | } |
| 293 | gttoc(damp); |
| 294 | if (lmVerbosity >= LevenbergMarquardtParams::DAMPED) |
| 295 | dampedFactorGraph.print(s: "damped" ); |
| 296 | result.lambdas++; |
| 297 | |
| 298 | gttic(solve); |
| 299 | // Solve Damped Gaussian Factor Graph |
| 300 | newDelta = dampedFactorGraph.optimize(ordering: ordering_, function: parameters_.getEliminationFunction()); |
| 301 | // update the evalpoint with the new delta |
| 302 | evalpoint = theta_.retract(delta: newDelta); |
| 303 | gttoc(solve); |
| 304 | |
| 305 | if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) |
| 306 | std::cout << "linear delta norm = " << newDelta.norm() << std::endl; |
| 307 | if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) |
| 308 | newDelta.print(str: "delta" ); |
| 309 | |
| 310 | // Evaluate the new error |
| 311 | gttic(compute_error); |
| 312 | double error = factors_.error(values: evalpoint); |
| 313 | gttoc(compute_error); |
| 314 | |
| 315 | if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) |
| 316 | std::cout << "next error = " << error << std::endl; |
| 317 | |
| 318 | if(error < result.error) { |
| 319 | // Keep this change |
| 320 | // Update the error value |
| 321 | result.error = error; |
| 322 | // Update the linearization point |
| 323 | theta_ = evalpoint; |
| 324 | // Reset the deltas to zeros |
| 325 | delta_.setZero(); |
| 326 | // Put the linearization points and deltas back for specific variables |
| 327 | if(separatorValues_.size() > 0) { |
| 328 | theta_.update(values: separatorValues_); |
| 329 | for(const auto key: separatorValues_.keys()) { |
| 330 | delta_.at(j: key) = newDelta.at(j: key); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | // Decrease lambda for next time |
| 335 | lambda /= parameters_.lambdaFactor; |
| 336 | // End this lambda search iteration |
| 337 | break; |
| 338 | } else { |
| 339 | // Reject this change |
| 340 | if(lambda >= parameters_.lambdaUpperBound) { |
| 341 | // The maximum lambda has been used. Print a warning and end the search. |
| 342 | std::cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << std::endl; |
| 343 | break; |
| 344 | } else { |
| 345 | // Increase lambda and continue searching |
| 346 | lambda *= parameters_.lambdaFactor; |
| 347 | } |
| 348 | } |
| 349 | } // end while |
| 350 | } |
| 351 | gttoc(optimizer_iteration); |
| 352 | |
| 353 | if (lmVerbosity >= LevenbergMarquardtParams::LAMBDA) |
| 354 | std::cout << "using lambda = " << lambda << std::endl; |
| 355 | |
| 356 | result.iterations++; |
| 357 | } while(result.iterations < (size_t)parameters_.maxIterations && |
| 358 | !checkConvergence(relativeErrorTreshold: parameters_.relativeErrorTol, absoluteErrorTreshold: parameters_.absoluteErrorTol, errorThreshold: parameters_.errorTol, currentError: previousError, newError: result.error, verbosity: NonlinearOptimizerParams::SILENT)); |
| 359 | |
| 360 | return result; |
| 361 | } |
| 362 | |
| 363 | /* ************************************************************************* */ |
| 364 | void ConcurrentBatchSmoother::updateSmootherSummarization() { |
| 365 | |
| 366 | // The smoother summarization factors are the resulting marginal factors on the separator |
| 367 | // variables that result from marginalizing out all of the other variables |
| 368 | // These marginal factors will be cached for later transmission to the filter using |
| 369 | // linear container factors |
| 370 | |
| 371 | // Create a nonlinear factor graph without the filter summarization factors |
| 372 | NonlinearFactorGraph graph(factors_); |
| 373 | for(size_t slot: filterSummarizationSlots_) { |
| 374 | graph.remove(i: slot); |
| 375 | } |
| 376 | |
| 377 | // Get the set of separator keys |
| 378 | const KeySet separatorKeys = separatorValues_.keySet(); |
| 379 | |
| 380 | // Calculate the marginal factors on the separator |
| 381 | smootherSummarization_ = internal::calculateMarginalFactors(graph, theta: theta_, remainingKeys: separatorKeys, eliminateFunction: parameters_.getEliminationFunction()); |
| 382 | } |
| 383 | |
| 384 | /* ************************************************************************* */ |
| 385 | void ConcurrentBatchSmoother::PrintNonlinearFactor(const NonlinearFactor::shared_ptr& factor, const std::string& indent, const KeyFormatter& keyFormatter) { |
| 386 | std::cout << indent; |
| 387 | if(factor) { |
| 388 | if(std::dynamic_pointer_cast<LinearContainerFactor>(r: factor)) { |
| 389 | std::cout << "l( " ; |
| 390 | } else { |
| 391 | std::cout << "f( " ; |
| 392 | } |
| 393 | for(Key key: *factor) { |
| 394 | std::cout << keyFormatter(key) << " " ; |
| 395 | } |
| 396 | std::cout << ")" << std::endl; |
| 397 | } else { |
| 398 | std::cout << "{ nullptr }" << std::endl; |
| 399 | } |
| 400 | } |
| 401 | |
| 402 | /* ************************************************************************* */ |
| 403 | void ConcurrentBatchSmoother::PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const std::string& indent, const KeyFormatter& keyFormatter) { |
| 404 | std::cout << indent; |
| 405 | if(factor) { |
| 406 | std::cout << "g( " ; |
| 407 | for(Key key: *factor) { |
| 408 | std::cout << keyFormatter(key) << " " ; |
| 409 | } |
| 410 | std::cout << ")" << std::endl; |
| 411 | } else { |
| 412 | std::cout << "{ nullptr }" << std::endl; |
| 413 | } |
| 414 | } |
| 415 | |
| 416 | /* ************************************************************************* */ |
| 417 | }/// namespace gtsam |
| 418 | |