Program Listing for File amcl_core.hpp

Return to documentation for file (include/beluga/algorithm/amcl_core.hpp)

// Copyright 2024 Ekumen, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef BELUGA_ALGORITHM_AMCL_CORE_HPP
#define BELUGA_ALGORITHM_AMCL_CORE_HPP

#include <execution>
#include <optional>
#include <type_traits>
#include <utility>
#include <vector>

#include <beluga/beluga.hpp>

#include <range/v3/range/concepts.hpp>
#include <range/v3/view/any_view.hpp>
#include <range/v3/view/take_exactly.hpp>

namespace beluga {

struct AmclParams {
  double update_min_d = 0.25;
  double update_min_a = 0.2;
  std::size_t resample_interval = 1UL;
  bool selective_resampling = false;
  std::size_t min_particles = 500UL;
  std::size_t max_particles = 2000UL;
  double alpha_slow = 0.001;
  double alpha_fast = 0.1;
  double kld_epsilon = 0.05;
  double kld_z = 3.0;
};


template <
    class MotionModel,
    class SensorModel,
    class RandomStateGenerator,
    typename WeightT = beluga::Weight,
    class ParticleType = std::tuple<typename SensorModel::state_type, WeightT>,
    class ExecutionPolicy = std::execution::sequenced_policy>
class Amcl {
  static_assert(
      std::is_same_v<ExecutionPolicy, std::execution::parallel_policy> or
      std::is_same_v<ExecutionPolicy, std::execution::sequenced_policy>);

  using particle_type = ParticleType;
  using measurement_type = typename SensorModel::measurement_type;
  using state_type = typename SensorModel::state_type;
  using map_type = typename SensorModel::map_type;
  using spatial_hasher_type = spatial_hash<state_type>;
  using random_state_generator_type = RandomStateGenerator;
  using estimation_type =
      std::invoke_result_t<decltype(beluga::estimate<std::vector<state_type>>), std::vector<state_type>>;

 public:

  Amcl(
      MotionModel motion_model,
      SensorModel sensor_model,
      RandomStateGenerator random_state_generator,
      spatial_hasher_type spatial_hasher,
      const AmclParams& params = AmclParams{},
      ExecutionPolicy execution_policy = std::execution::seq)
      : params_{params},
        motion_model_{std::move(motion_model)},
        sensor_model_{std::move(sensor_model)},
        execution_policy_{std::move(execution_policy)},
        spatial_hasher_{std::move(spatial_hasher)},
        random_probability_estimator_{params_.alpha_slow, params_.alpha_fast},
        update_policy_{beluga::policies::on_motion(params_.update_min_d, params_.update_min_a)},
        resample_policy_{beluga::policies::every_n(params_.resample_interval)},
        random_state_generator_(std::move(random_state_generator)) {
    if (params_.selective_resampling) {
      resample_policy_ = resample_policy_ && beluga::policies::on_effective_size_drop;
    }
  }

  [[nodiscard]] const auto& particles() const { return particles_; }

  template <class Distribution>
  void initialize(Distribution distribution) {
    particles_ = beluga::views::sample(std::move(distribution)) |                    //
                 ranges::views::transform(beluga::make_from_state<particle_type>) |  //
                 ranges::views::take_exactly(params_.max_particles) |                //
                 ranges::to<beluga::TupleVector>;
    force_update_ = true;
  }


  template <class CovarianceT>
  void initialize(state_type pose, CovarianceT covariance) {
    initialize(beluga::MultivariateNormalDistribution{pose, covariance});
  }

  void update_map(map_type map) { sensor_model_.update_map(std::move(map)); }


  auto update(state_type control_action, measurement_type measurement) -> std::optional<estimation_type> {
    if (particles_.empty()) {
      return std::nullopt;
    }

    if (!update_policy_(control_action) && !force_update_) {
      return std::nullopt;
    }

    particles_ |= beluga::actions::propagate(
                      execution_policy_, motion_model_(control_action_window_ << std::move(control_action))) |  //
                  beluga::actions::reweight(execution_policy_, sensor_model_(std::move(measurement))) |         //
                  beluga::actions::normalize(execution_policy_);

    const double random_state_probability = random_probability_estimator_(particles_);

    if (resample_policy_(particles_)) {
      auto random_state = ranges::compose(beluga::make_from_state<particle_type>, get_random_state_generator());

      if (random_state_probability > 0.0) {
        random_probability_estimator_.reset();
      }

      particles_ |= beluga::views::sample |
                    beluga::views::random_intersperse(std::move(random_state), random_state_probability) |
                    beluga::views::take_while_kld(
                        spatial_hasher_,        //
                        params_.min_particles,  //
                        params_.max_particles,  //
                        params_.kld_epsilon,    //
                        params_.kld_z) |
                    beluga::actions::assign;
    }

    force_update_ = false;
    return beluga::estimate(beluga::views::states(particles_), beluga::views::weights(particles_));
  }

  void force_update() { force_update_ = true; }

 private:
  [[nodiscard]] decltype(auto) get_random_state_generator() const {
    if constexpr (std::is_invocable_v<random_state_generator_type>) {
      return random_state_generator_;
    } else {
      return random_state_generator_(particles_);
    }
  }
  beluga::TupleVector<particle_type> particles_;

  AmclParams params_;

  MotionModel motion_model_;
  SensorModel sensor_model_;
  ExecutionPolicy execution_policy_;

  spatial_hasher_type spatial_hasher_;
  beluga::ThrunRecoveryProbabilityEstimator random_probability_estimator_;
  beluga::any_policy<state_type> update_policy_;
  beluga::any_policy<decltype(particles_)> resample_policy_;

  random_state_generator_type random_state_generator_;

  beluga::RollingWindow<state_type, 2> control_action_window_;

  bool force_update_{true};
};

}  // namespace beluga

#endif  // BELUGA_ALGORITHM_AMCL_CORE_HPP