Make 2020 Localizer to process ImageMatchResult's

This should provide a localizer that can correctly consume the vision
processing results coming from the Pi's. It does not actually start up
this localizer yet, atlhough that should just be a 1-line change in
y2020/control_loops/drivetrain/drivetrain_main.cc.

Change-Id: Iea8aa50774932ebf19d89ca3a3f4b9cd12dfe446
diff --git a/frc971/control_loops/drivetrain/BUILD b/frc971/control_loops/drivetrain/BUILD
index 35aebc7..9fef8f9 100644
--- a/frc971/control_loops/drivetrain/BUILD
+++ b/frc971/control_loops/drivetrain/BUILD
@@ -187,6 +187,7 @@
         ":drivetrain_config",
         ":drivetrain_status_fbs",
         ":hybrid_ekf",
+        "//frc971/control_loops:control_loops_fbs",
         "//frc971/control_loops:pose",
     ],
 )
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index fa723e0..d2c0be3 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -660,14 +660,18 @@
   Q_continuous_(kAngularError, kAngularError) = std::pow(2.0, 2.0);
   // This noise value largely governs whether we will trust the encoders or
   // accelerometer more for estimating the robot position.
+  // Note that this also affects how we interpret camera measurements,
+  // particularly when using a heading/distance/skew measurement--if the
+  // noise on these numbers is particularly high, then we can end up with weird
+  // dynamics where a camera update both shifts our X/Y position and adjusts our
+  // velocity estimates substantially, causing the camera updates to create
+  // "momentum" and if we don't trust the encoders enough, then we have no way of
+  // determining that the velocity updates are bogus. This also interacts with
+  // kVelocityOffsetTimeConstant.
   Q_continuous_(kLongitudinalVelocityOffset, kLongitudinalVelocityOffset) =
       std::pow(1.1, 2.0);
   Q_continuous_(kLateralVelocity, kLateralVelocity) = std::pow(0.1, 2.0);
 
-  P_.setZero();
-  P_.diagonal() << 0.01, 0.01, 0.01, 0.02, 0.01, 0.02, 0.01, 1, 1, 0.03, 0.01,
-      0.01;
-
   H_encoders_and_gyro_.setZero();
   // Encoders are stored directly in the state matrix, so are a minor
   // transform away.
diff --git a/frc971/control_loops/drivetrain/localizer.h b/frc971/control_loops/drivetrain/localizer.h
index d1778b6..43462b8 100644
--- a/frc971/control_loops/drivetrain/localizer.h
+++ b/frc971/control_loops/drivetrain/localizer.h
@@ -36,9 +36,10 @@
 
 // Defines an interface for classes that provide field-global localization.
 class LocalizerInterface {
+ public:
   typedef HybridEkf<double> Ekf;
   typedef typename Ekf::StateIdx StateIdx;
- public:
+
   // Perform a single step of the filter, using the information that is
   // available on every drivetrain iteration.
   // The user should pass in the U that the real system experienced from the
@@ -124,9 +125,6 @@
 // This provides no method for using cameras or the such to get global
 // measurements and just assumes that you can dead-reckon perfectly.
 class DeadReckonEkf : public LocalizerInterface {
-  typedef HybridEkf<double> Ekf;
-  typedef typename Ekf::StateIdx StateIdx;
-
  public:
   DeadReckonEkf(::aos::EventLoop *event_loop,
                 const DrivetrainConfig<double> &dt_config)
diff --git a/y2020/BUILD b/y2020/BUILD
index 9a233d4..f91f532 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -30,7 +30,6 @@
         "//aos/mutex",
         "//aos/network:team_number",
         "//frc971:constants",
-        "//frc971/control_loops:pose",
         "//frc971/control_loops:static_zeroing_single_dof_profiled_subsystem",
         "//y2020/control_loops/drivetrain:polydrivetrain_plants",
         "//y2020/control_loops/superstructure/accelerator:accelerator_plants",
diff --git a/y2020/constants.h b/y2020/constants.h
index ed58777..39f6e36 100644
--- a/y2020/constants.h
+++ b/y2020/constants.h
@@ -7,7 +7,6 @@
 #include <array>
 
 #include "frc971/constants.h"
-#include "frc971/control_loops/pose.h"
 #include "frc971/control_loops/static_zeroing_single_dof_profiled_subsystem.h"
 #include "y2020/control_loops/drivetrain/drivetrain_dog_motor_plant.h"
 #include "y2020/control_loops/superstructure/accelerator/accelerator_plant.h"
diff --git a/y2020/control_loops/drivetrain/BUILD b/y2020/control_loops/drivetrain/BUILD
index 9abb2a1..41c6d81 100644
--- a/y2020/control_loops/drivetrain/BUILD
+++ b/y2020/control_loops/drivetrain/BUILD
@@ -1,4 +1,5 @@
 load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("//aos:config.bzl", "aos_config")
 load("//tools/build_rules:select.bzl", "compiler_select", "cpu_select")
 
 genrule(
@@ -53,6 +54,21 @@
 )
 
 cc_library(
+    name = "localizer",
+    srcs = ["localizer.cc"],
+    hdrs = ["localizer.h"],
+    deps = [
+        "//aos/containers:ring_buffer",
+        "//frc971/control_loops:profiled_subsystem_fbs",
+        "//frc971/control_loops/drivetrain:hybrid_ekf",
+        "//frc971/control_loops/drivetrain:localizer",
+        "//y2020:constants",
+        "//y2020/control_loops/superstructure:superstructure_status_fbs",
+        "//y2020/vision/sift:sift_fbs",
+    ],
+)
+
+cc_library(
     name = "drivetrain_base",
     srcs = [
         "drivetrain_base.cc",
@@ -82,6 +98,33 @@
     ],
 )
 
+aos_config(
+    name = "simulation_config",
+    src = "drivetrain_simulation_config.json",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//frc971/control_loops/drivetrain:simulation_channels",
+        "//y2020:config",
+    ],
+)
+
+cc_test(
+    name = "localizer_test",
+    srcs = ["localizer_test.cc"],
+    data = [":simulation_config.json"],
+    deps = [
+        ":drivetrain_base",
+        ":localizer",
+        "//aos/controls:control_loop_test",
+        "//aos/events:simulated_event_loop",
+        "//aos/events/logging:logger",
+        "//aos/network:team_number",
+        "//frc971/control_loops:team_number_test_environment",
+        "//frc971/control_loops/drivetrain:drivetrain_lib",
+        "//frc971/control_loops/drivetrain:drivetrain_test_lib",
+    ],
+)
+
 cc_binary(
     name = "drivetrain_replay",
     srcs = ["drivetrain_replay.cc"],
diff --git a/y2020/control_loops/drivetrain/drivetrain_simulation_config.json b/y2020/control_loops/drivetrain/drivetrain_simulation_config.json
new file mode 100644
index 0000000..f4bfbea
--- /dev/null
+++ b/y2020/control_loops/drivetrain/drivetrain_simulation_config.json
@@ -0,0 +1,6 @@
+{
+  "imports": [
+    "../../y2020.json",
+    "../../../frc971/control_loops/drivetrain/drivetrain_simulation_channels.json"
+  ]
+}
diff --git a/y2020/control_loops/drivetrain/localizer.cc b/y2020/control_loops/drivetrain/localizer.cc
new file mode 100644
index 0000000..a9e454a
--- /dev/null
+++ b/y2020/control_loops/drivetrain/localizer.cc
@@ -0,0 +1,182 @@
+#include "y2020/control_loops/drivetrain/localizer.h"
+
+#include "y2020/constants.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+
+namespace {
+// Converts a flatbuffer TransformationMatrix to an Eigen matrix. Technically,
+// this should be able to do a single memcpy, but the extra verbosity here seems
+// appropriate.
+Eigen::Matrix<double, 4, 4> FlatbufferToTransformationMatrix(
+    const frc971::vision::sift::TransformationMatrix &flatbuffer) {
+  CHECK_EQ(16u, CHECK_NOTNULL(flatbuffer.data())->size());
+  Eigen::Matrix<double, 4, 4> result;
+  result.setIdentity();
+  for (int row = 0; row < 4; ++row) {
+    for (int col = 0; col < 4; ++col) {
+      result(row, col) = (*flatbuffer.data())[row * 4 + col];
+    }
+  }
+  return result;
+}
+}  // namespace
+
+Localizer::Localizer(
+    aos::EventLoop *event_loop,
+    const frc971::control_loops::drivetrain::DrivetrainConfig<double>
+        &dt_config)
+    : event_loop_(event_loop), dt_config_(dt_config), ekf_(dt_config) {
+  // TODO(james): This doesn't really need to be a watcher; we could just use a
+  // fetcher for the superstructure status.
+  // This probably should be a Fetcher instead of a Watcher, but this
+  // seems simpler for the time being (although technically it should be
+  // possible to do everything we need to using just a Fetcher without
+  // even maintaining a separate buffer, but that seems overly cute).
+  event_loop_->MakeWatcher("/superstructure",
+                           [this](const superstructure::Status &status) {
+                             HandleSuperstructureStatus(status);
+                           });
+
+  image_fetchers_.emplace_back(
+      event_loop_->MakeFetcher<frc971::vision::sift::ImageMatchResult>(
+          "/camera"));
+
+  target_selector_.set_has_target(false);
+}
+
+void Localizer::HandleSuperstructureStatus(
+    const y2020::control_loops::superstructure::Status &status) {
+  CHECK(status.has_turret());
+  turret_data_.Push({event_loop_->monotonic_now(), status.turret()->position(),
+                     status.turret()->velocity()});
+}
+
+Localizer::TurretData Localizer::GetTurretDataForTime(
+    aos::monotonic_clock::time_point time) {
+  if (turret_data_.empty()) {
+    return {};
+  }
+
+  aos::monotonic_clock::duration lowest_time_error =
+      aos::monotonic_clock::duration::max();
+  TurretData best_data_match;
+  for (const auto &sample : turret_data_) {
+    const aos::monotonic_clock::duration time_error =
+        std::chrono::abs(sample.receive_time - time);
+    if (time_error < lowest_time_error) {
+      lowest_time_error = time_error;
+      best_data_match = sample;
+    }
+  }
+  return best_data_match;
+}
+
+void Localizer::Update(const ::Eigen::Matrix<double, 2, 1> &U,
+                       aos::monotonic_clock::time_point now,
+                       double left_encoder, double right_encoder,
+                       double gyro_rate, const Eigen::Vector3d &accel) {
+  for (auto &image_fetcher : image_fetchers_) {
+    while (image_fetcher.FetchNext()) {
+      HandleImageMatch(*image_fetcher);
+    }
+  }
+  ekf_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, accel,
+                             now);
+}
+
+void Localizer::HandleImageMatch(
+    const frc971::vision::sift::ImageMatchResult &result) {
+  // TODO(james): compensate for capture time across multiple nodes correctly.
+  aos::monotonic_clock::time_point capture_time(
+      std::chrono::nanoseconds(result.image_monotonic_timestamp_ns()));
+  CHECK(result.has_camera_calibration());
+  // Per the ImageMatchResult specification, we can actually determine whether
+  // the camera is the turret camera just from the presence of the
+  // turret_extrinsics member.
+  const bool is_turret = result.camera_calibration()->has_turret_extrinsics();
+  const TurretData turret_data = GetTurretDataForTime(capture_time);
+  // Ignore readings when the turret is spinning too fast, on the assumption
+  // that the odds of screwing up the time compensation are higher.
+  // Note that the current number here is chosen pretty arbitrarily--1 rad / sec
+  // seems reasonable, but may be unnecessarily low or high.
+  constexpr double kMaxTurretVelocity = 1.0;
+  if (is_turret && std::abs(turret_data.velocity) > kMaxTurretVelocity) {
+    return;
+  }
+  CHECK(result.camera_calibration()->has_fixed_extrinsics());
+  const Eigen::Matrix<double, 4, 4> fixed_extrinsics =
+      FlatbufferToTransformationMatrix(
+          *result.camera_calibration()->fixed_extrinsics());
+  // Calculate the pose of the camera relative to the robot origin.
+  Eigen::Matrix<double, 4, 4> H_camera_robot = fixed_extrinsics;
+  if (is_turret) {
+    H_camera_robot = H_camera_robot *
+                     frc971::control_loops::TransformationMatrixForYaw(
+                         turret_data.position) *
+                     FlatbufferToTransformationMatrix(
+                         *result.camera_calibration()->turret_extrinsics());
+  }
+
+  if (!result.has_camera_poses()) {
+    return;
+  }
+
+  for (const frc971::vision::sift::CameraPose *vision_result :
+       *result.camera_poses()) {
+    if (!vision_result->has_camera_to_target() ||
+        !vision_result->has_field_to_target()) {
+      continue;
+    }
+    const Eigen::Matrix<double, 4, 4> H_target_camera =
+        FlatbufferToTransformationMatrix(*vision_result->camera_to_target());
+    const Eigen::Matrix<double, 4, 4> H_target_field =
+        FlatbufferToTransformationMatrix(*vision_result->field_to_target());
+    // Back out the robot position that is implied by the current camera
+    // reading.
+    const Pose measured_pose(H_target_field *
+                             (H_camera_robot * H_target_camera).inverse());
+    const Eigen::Matrix<double, 3, 1> Z(measured_pose.rel_pos().x(),
+                                        measured_pose.rel_pos().y(),
+                                        measured_pose.rel_theta());
+    // TODO(james): Figure out how to properly handle calculating the
+    // noise. Currently, the values are deliberately tuned so that image updates
+    // will not be trusted overly much. In theory, we should probably also be
+    // populating some cross-correlation terms.
+    // Note that these are the noise standard deviations (they are squared below
+    // to get variances).
+    Eigen::Matrix<double, 3, 1> noises(1.0, 1.0, 0.1);
+    // Augment the noise by the approximate rotational speed of the
+    // camera. This should help account for the fact that, while we are
+    // spinning, slight timing errors in the camera/turret data will tend to
+    // have mutch more drastic effects on the results.
+    noises *= 1.0 + std::abs((right_velocity() - left_velocity()) /
+                                 (2.0 * dt_config_.robot_radius) +
+                             (is_turret ? turret_data.velocity : 0.0));
+    Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
+    R.diagonal() = noises.cwiseAbs2();
+    Eigen::Matrix<double, HybridEkf::kNOutputs, HybridEkf::kNStates> H;
+    H.setZero();
+    H(0, StateIdx::kX) = 1;
+    H(1, StateIdx::kY) = 1;
+    H(2, StateIdx::kTheta) = 1;
+    ekf_.Correct(Z, nullptr, {}, [H, Z](const State &X, const Input &) {
+                                   Eigen::Vector3d Zhat = H * X;
+                                   // In order to deal with wrapping of the
+                                   // angle, calculate an expected angle that is
+                                   // in the range (Z(2) - pi, Z(2) + pi].
+                                   const double angle_error =
+                                       aos::math::NormalizeAngle(
+                                           X(StateIdx::kTheta) - Z(2));
+                                   Zhat(2) = Z(2) + angle_error;
+                                   return Zhat;
+                                 },
+                 [H](const State &) { return H; }, R, capture_time);
+  }
+}
+
+}  // namespace drivetrain
+}  // namespace control_loops
+}  // namespace y2020
diff --git a/y2020/control_loops/drivetrain/localizer.h b/y2020/control_loops/drivetrain/localizer.h
new file mode 100644
index 0000000..0b79361
--- /dev/null
+++ b/y2020/control_loops/drivetrain/localizer.h
@@ -0,0 +1,102 @@
+#ifndef Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
+#define Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
+
+#include "aos/containers/ring_buffer.h"
+#include "aos/events/event_loop.h"
+#include "frc971/control_loops/drivetrain/hybrid_ekf.h"
+#include "frc971/control_loops/drivetrain/localizer.h"
+#include "y2020/control_loops/superstructure/superstructure_status_generated.h"
+#include "y2020/vision/sift/sift_generated.h"
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+
+// This class handles the localization for the 2020 robot. In order to handle
+// camera updates, we get the ImageMatchResult message from the cameras and then
+// project the result onto the 2-D X/Y plane and use the implied robot
+// position/heading from that as the measurement. This is distinct from 2019,
+// when we used a heading/distance/skew measurement update. This is because
+// updating with x/y/theta directly seems to be better conditioned (even if it
+// may not reflect the measurement noise quite as accurately). The poor
+// conditioning seemed to work in 2019, but due to the addition of a couple of
+// velocity offset states that allow us to use the accelerometer more
+// effectively, things started to become unstable.
+class Localizer : public frc971::control_loops::drivetrain::LocalizerInterface {
+ public:
+  typedef frc971::control_loops::TypedPose<double> Pose;
+  typedef frc971::control_loops::drivetrain::HybridEkf<double> HybridEkf;
+  typedef typename HybridEkf::State State;
+  typedef typename HybridEkf::StateIdx StateIdx;
+  typedef typename HybridEkf::StateSquare StateSquare;
+  typedef typename HybridEkf::Input Input;
+  typedef typename HybridEkf::Output Output;
+  Localizer(aos::EventLoop *event_loop,
+            const frc971::control_loops::drivetrain::DrivetrainConfig<double>
+                &dt_config);
+  State Xhat() const override { return ekf_.X_hat(); }
+  frc971::control_loops::drivetrain::TrivialTargetSelector *target_selector()
+      override {
+    return &target_selector_;
+  }
+
+  void Update(const ::Eigen::Matrix<double, 2, 1> &U,
+              aos::monotonic_clock::time_point now, double left_encoder,
+              double right_encoder, double gyro_rate,
+              const Eigen::Vector3d &accel) override;
+
+  void Reset(aos::monotonic_clock::time_point t, const State &state) {
+    ekf_.ResetInitialState(t, state, ekf_.P());
+  }
+
+  void ResetPosition(aos::monotonic_clock::time_point t, double x, double y,
+                     double theta, double /*theta_override*/,
+                     bool /*reset_theta*/) override {
+    const double left_encoder = ekf_.X_hat(StateIdx::kLeftEncoder);
+    const double right_encoder = ekf_.X_hat(StateIdx::kRightEncoder);
+    ekf_.ResetInitialState(t, (Ekf::State() << x, y, theta, left_encoder, 0,
+                               right_encoder, 0, 0, 0, 0, 0, 0).finished(),
+                           ekf_.P());
+  };
+
+ private:
+  // Storage for a single turret position data point.
+  struct TurretData {
+    aos::monotonic_clock::time_point receive_time =
+        aos::monotonic_clock::min_time;
+    double position = 0.0;  // rad
+    double velocity = 0.0;  // rad/sec
+  };
+
+  // Processes new image data and updates the EKF.
+  void HandleImageMatch(const frc971::vision::sift::ImageMatchResult &result);
+
+  // Processes the most recent turret position and stores it in the turret_data_
+  // buffer.
+  void HandleSuperstructureStatus(
+      const y2020::control_loops::superstructure::Status &status);
+
+  // Retrieves the turret data closest to the provided time.
+  TurretData GetTurretDataForTime(aos::monotonic_clock::time_point time);
+
+  aos::EventLoop *const event_loop_;
+  const frc971::control_loops::drivetrain::DrivetrainConfig<double> dt_config_;
+  HybridEkf ekf_;
+
+  std::vector<aos::Fetcher<frc971::vision::sift::ImageMatchResult>>
+      image_fetchers_;
+
+  // Buffer of recent turret data--this is used so that when we receive a camera
+  // frame from the turret, we can back out what the turret angle was at that
+  // time.
+  aos::RingBuffer<TurretData, 200> turret_data_;
+
+  // Target selector to allow us to satisfy the LocalizerInterface requirements.
+  frc971::control_loops::drivetrain::TrivialTargetSelector target_selector_;
+};
+
+}  // namespace drivetrain
+}  // namespace control_loops
+}  // namespace y2020
+
+#endif  // Y2020_CONTROL_LOOPS_DRIVETRAIN_LOCALIZER_H_
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
new file mode 100644
index 0000000..910cd31
--- /dev/null
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -0,0 +1,335 @@
+#include <queue>
+
+#include "gtest/gtest.h"
+
+#include "aos/controls/control_loop_test.h"
+#include "aos/events/logging/logger.h"
+#include "aos/network/team_number.h"
+#include "frc971/control_loops/drivetrain/drivetrain.h"
+#include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
+#include "frc971/control_loops/team_number_test_environment.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
+#include "y2020/control_loops/drivetrain/localizer.h"
+
+DEFINE_string(output_file, "",
+              "If set, logs all channels to the provided logfile.");
+
+// This file tests that the full 2020 localizer behaves sanely.
+
+namespace y2020 {
+namespace control_loops {
+namespace drivetrain {
+namespace testing {
+
+using frc971::control_loops::drivetrain::DrivetrainConfig;
+using frc971::control_loops::drivetrain::Goal;
+using frc971::control_loops::drivetrain::LocalizerControl;
+using frc971::vision::sift::ImageMatchResult;
+using frc971::vision::sift::ImageMatchResultT;
+using frc971::vision::sift::CameraPoseT;
+using frc971::vision::sift::CameraCalibrationT;
+using frc971::vision::sift::TransformationMatrixT;
+
+namespace {
+DrivetrainConfig<double> GetTest2020DrivetrainConfig() {
+  DrivetrainConfig<double> config = GetDrivetrainConfig();
+  return config;
+}
+
+// Copies an Eigen matrix into a row-major vector of the data.
+std::vector<float> MatrixToVector(const Eigen::Matrix<double, 4, 4> &H) {
+  std::vector<float> data;
+  for (int row = 0; row < 4; ++row) {
+    for (int col = 0; col < 4; ++col) {
+      data.push_back(H(row, col));
+    }
+  }
+  return data;
+}
+
+// Provides the location of the turret to use for simulation. Mostly we care
+// about providing a location that is not perfectly aligned with the robot's
+// origin.
+Eigen::Matrix<double, 4, 4> TurretRobotTransformation() {
+  Eigen::Matrix<double, 4, 4> H;
+  H.setIdentity();
+  H.block<3, 1>(0, 3) << 1, 1, 1;
+  return H;
+}
+
+// Provides the location of the camera on the turret.
+// TODO(james): Also simulate a fixed camera that is *not* on the turret.
+Eigen::Matrix<double, 4, 4> CameraTurretTransformation() {
+  Eigen::Matrix<double, 4, 4> H;
+  H.setIdentity();
+  H.block<3, 1>(0, 3) << 0.1, 0, 0;
+  // Introduce a bit of pitch to make sure that we're exercising all the code.
+  H.block<3, 3>(0, 0) =
+      Eigen::AngleAxis<double>(M_PI_2, Eigen::Vector3d::UnitY()) *
+      H.block<3, 3>(0, 0);
+  return H;
+}
+
+// The absolute target location to use. Not meant to correspond with a
+// particular field target.
+// TODO(james): Make more targets.
+Eigen::Matrix<double, 4, 4> TargetLocation() {
+  Eigen::Matrix<double, 4, 4> H;
+  H.setIdentity();
+  H.block<3, 1>(0, 3) << 10.0, 0, 0;
+  return H;
+}
+}  // namespace
+
+namespace chrono = std::chrono;
+using frc971::control_loops::drivetrain::testing::DrivetrainSimulation;
+using frc971::control_loops::drivetrain::DrivetrainLoop;
+using frc971::control_loops::drivetrain::testing::GetTestDrivetrainConfig;
+using aos::monotonic_clock;
+
+class LocalizedDrivetrainTest : public ::aos::testing::ControlLoopTest {
+ protected:
+  // We must use the 2020 drivetrain config so that we don't have to deal
+  // with shifting:
+  LocalizedDrivetrainTest()
+      : aos::testing::ControlLoopTest(
+            aos::configuration::ReadConfig(
+                "y2020/control_loops/drivetrain/simulation_config.json"),
+            GetTest2020DrivetrainConfig().dt),
+        test_event_loop_(MakeEventLoop("test")),
+        drivetrain_goal_sender_(
+            test_event_loop_->MakeSender<Goal>("/drivetrain")),
+        drivetrain_goal_fetcher_(
+            test_event_loop_->MakeFetcher<Goal>("/drivetrain")),
+        localizer_control_sender_(
+            test_event_loop_->MakeSender<LocalizerControl>("/drivetrain")),
+        drivetrain_event_loop_(MakeEventLoop("drivetrain")),
+        dt_config_(GetTest2020DrivetrainConfig()),
+        camera_sender_(
+            test_event_loop_->MakeSender<ImageMatchResult>("/camera")),
+        localizer_(drivetrain_event_loop_.get(), dt_config_),
+        drivetrain_(dt_config_, drivetrain_event_loop_.get(), &localizer_),
+        drivetrain_plant_event_loop_(MakeEventLoop("plant")),
+        drivetrain_plant_(drivetrain_plant_event_loop_.get(), dt_config_),
+        last_frame_(monotonic_now()) {
+    set_team_id(frc971::control_loops::testing::kTeamNumber);
+    SetStartingPosition({3.0, 2.0, 0.0});
+    set_battery_voltage(12.0);
+
+    if (!FLAGS_output_file.empty()) {
+      log_buffer_writer_ = std::make_unique<aos::logger::DetachedBufferWriter>(
+          FLAGS_output_file);
+      logger_event_loop_ = MakeEventLoop("logger");
+      logger_ = std::make_unique<aos::logger::Logger>(log_buffer_writer_.get(),
+                                                      logger_event_loop_.get());
+    }
+
+    test_event_loop_->MakeWatcher(
+        "/drivetrain",
+        [this](const frc971::control_loops::drivetrain::Status &) {
+          // Needs to do camera updates right after we run the control loop.
+          if (enable_cameras_) {
+            SendDelayedFrames();
+            if (last_frame_ + std::chrono::milliseconds(100) <
+                monotonic_now()) {
+              CaptureFrames();
+              last_frame_ = monotonic_now();
+            }
+          }
+        });
+
+    // Run for enough time to allow the gyro/imu zeroing code to run.
+    RunFor(std::chrono::seconds(10));
+  }
+
+  virtual ~LocalizedDrivetrainTest() override {}
+
+  void SetStartingPosition(const Eigen::Matrix<double, 3, 1> &xytheta) {
+    *drivetrain_plant_.mutable_state() << xytheta.x(), xytheta.y(),
+        xytheta(2, 0), 0.0, 0.0;
+    Eigen::Matrix<double, Localizer::HybridEkf::kNStates, 1> localizer_state;
+    localizer_state.setZero();
+    localizer_state.block<3, 1>(0, 0) = xytheta;
+    localizer_.Reset(monotonic_now(), localizer_state);
+  }
+
+  void VerifyNearGoal(double eps = 1e-3) {
+    drivetrain_goal_fetcher_.Fetch();
+    EXPECT_NEAR(drivetrain_goal_fetcher_->left_goal(),
+                drivetrain_plant_.GetLeftPosition(), eps);
+    EXPECT_NEAR(drivetrain_goal_fetcher_->right_goal(),
+                drivetrain_plant_.GetRightPosition(), eps);
+  }
+
+  void VerifyEstimatorAccurate(double eps) {
+    const Eigen::Matrix<double, 5, 1> true_state = drivetrain_plant_.state();
+    EXPECT_NEAR(localizer_.x(), true_state(0, 0), eps);
+    EXPECT_NEAR(localizer_.y(), true_state(1, 0), eps);
+    EXPECT_NEAR(localizer_.theta(), true_state(2, 0), eps);
+    EXPECT_NEAR(localizer_.left_velocity(), true_state(3, 0), eps);
+    EXPECT_NEAR(localizer_.right_velocity(), true_state(4, 0), eps);
+  }
+
+  // Goes through and captures frames on the camera(s), queueing them up to be
+  // sent by SendDelayedFrames().
+  void CaptureFrames() {
+    const frc971::control_loops::Pose robot_pose(
+        {drivetrain_plant_.GetPosition().x(),
+         drivetrain_plant_.GetPosition().y(), 0.0},
+        drivetrain_plant_.state()(2, 0));
+    std::unique_ptr<ImageMatchResultT> frame(new ImageMatchResultT());
+
+    // TODO(james): Test with more than one (and no) target(s).
+    {
+      std::unique_ptr<CameraPoseT> camera_target(new CameraPoseT());
+
+      camera_target->field_to_target.reset(new TransformationMatrixT());
+      camera_target->field_to_target->data = MatrixToVector(TargetLocation());
+
+      // TODO(james): Use non-zero turret angles.
+      camera_target->camera_to_target.reset(new TransformationMatrixT());
+      camera_target->camera_to_target->data = MatrixToVector(
+          (robot_pose.AsTransformationMatrix() * TurretRobotTransformation() *
+           CameraTurretTransformation())
+              .inverse() *
+          TargetLocation());
+
+      frame->camera_poses.emplace_back(std::move(camera_target));
+    }
+
+    frame->image_monotonic_timestamp_ns =
+        chrono::duration_cast<chrono::nanoseconds>(
+            monotonic_now().time_since_epoch())
+            .count();
+    frame->camera_calibration.reset(new CameraCalibrationT());
+    {
+      frame->camera_calibration->fixed_extrinsics.reset(
+          new TransformationMatrixT());
+      TransformationMatrixT *H_turret_robot =
+          frame->camera_calibration->fixed_extrinsics.get();
+      H_turret_robot->data = MatrixToVector(TurretRobotTransformation());
+    }
+    {
+      frame->camera_calibration->turret_extrinsics.reset(
+          new TransformationMatrixT());
+      TransformationMatrixT *H_camera_turret =
+          frame->camera_calibration->turret_extrinsics.get();
+      H_camera_turret->data = MatrixToVector(CameraTurretTransformation());
+    }
+
+    camera_delay_queue_.emplace(monotonic_now(), std::move(frame));
+  }
+
+  // Actually sends out all the camera frames.
+  void SendDelayedFrames() {
+    const std::chrono::milliseconds camera_latency(150);
+    while (!camera_delay_queue_.empty() &&
+           std::get<0>(camera_delay_queue_.front()) <
+               monotonic_now() - camera_latency) {
+      auto builder = camera_sender_.MakeBuilder();
+      ASSERT_TRUE(builder.Send(ImageMatchResult::Pack(
+          *builder.fbb(), std::get<1>(camera_delay_queue_.front()).get())));
+      camera_delay_queue_.pop();
+    }
+  }
+
+  std::unique_ptr<aos::EventLoop> test_event_loop_;
+  aos::Sender<Goal> drivetrain_goal_sender_;
+  aos::Fetcher<Goal> drivetrain_goal_fetcher_;
+  aos::Sender<LocalizerControl> localizer_control_sender_;
+
+  std::unique_ptr<aos::EventLoop> drivetrain_event_loop_;
+  const frc971::control_loops::drivetrain::DrivetrainConfig<double>
+      dt_config_;
+
+  aos::Sender<ImageMatchResult> camera_sender_;
+
+  Localizer localizer_;
+  DrivetrainLoop drivetrain_;
+
+  std::unique_ptr<aos::EventLoop> drivetrain_plant_event_loop_;
+  DrivetrainSimulation drivetrain_plant_;
+  monotonic_clock::time_point last_frame_;
+
+  // A queue of camera frames so that we can add a time delay to the data
+  // coming from the cameras.
+  std::queue<std::tuple<aos::monotonic_clock::time_point,
+                        std::unique_ptr<ImageMatchResultT>>>
+      camera_delay_queue_;
+
+  void set_enable_cameras(bool enable) { enable_cameras_ = enable; }
+
+ private:
+  bool enable_cameras_ = false;
+
+  std::unique_ptr<aos::EventLoop> logger_event_loop_;
+  std::unique_ptr<aos::logger::DetachedBufferWriter> log_buffer_writer_;
+  std::unique_ptr<aos::logger::Logger> logger_;
+};
+
+// Tests that no camera updates, combined with a perfect model, results in no
+// error.
+TEST_F(LocalizedDrivetrainTest, NoCameraUpdate) {
+  SetEnabled(true);
+  set_enable_cameras(false);
+  VerifyEstimatorAccurate(1e-7);
+  {
+    auto builder = drivetrain_goal_sender_.MakeBuilder();
+
+    Goal::Builder drivetrain_builder = builder.MakeBuilder<Goal>();
+    drivetrain_builder.add_controller_type(
+        frc971::control_loops::drivetrain::ControllerType::MOTION_PROFILE);
+    drivetrain_builder.add_left_goal(-1.0);
+    drivetrain_builder.add_right_goal(1.0);
+
+    EXPECT_TRUE(builder.Send(drivetrain_builder.Finish()));
+  }
+  RunFor(chrono::seconds(3));
+  VerifyNearGoal();
+  VerifyEstimatorAccurate(5e-4);
+}
+
+// Tests that camera udpates with a perfect models results in no errors.
+TEST_F(LocalizedDrivetrainTest, PerfectCameraUpdate) {
+  SetEnabled(true);
+  set_enable_cameras(true);
+  auto builder = drivetrain_goal_sender_.MakeBuilder();
+
+  Goal::Builder drivetrain_builder = builder.MakeBuilder<Goal>();
+  drivetrain_builder.add_controller_type(
+      frc971::control_loops::drivetrain::ControllerType::MOTION_PROFILE);
+  drivetrain_builder.add_left_goal(-1.0);
+  drivetrain_builder.add_right_goal(1.0);
+
+  EXPECT_TRUE(builder.Send(drivetrain_builder.Finish()));
+  RunFor(chrono::seconds(3));
+  VerifyNearGoal();
+  VerifyEstimatorAccurate(5e-4);
+}
+
+// Tests that camera udpates with a constant initial error in the position
+// results in convergence.
+TEST_F(LocalizedDrivetrainTest, InitialPositionError) {
+  SetEnabled(true);
+  set_enable_cameras(true);
+  drivetrain_plant_.mutable_state()->topRows(3) +=
+      Eigen::Vector3d(0.1, 0.1, 0.01);
+  auto builder = drivetrain_goal_sender_.MakeBuilder();
+
+  Goal::Builder drivetrain_builder = builder.MakeBuilder<Goal>();
+  drivetrain_builder.add_controller_type(
+      frc971::control_loops::drivetrain::ControllerType::MOTION_PROFILE);
+  drivetrain_builder.add_left_goal(-1.0);
+  drivetrain_builder.add_right_goal(1.0);
+
+  EXPECT_TRUE(builder.Send(drivetrain_builder.Finish()));
+  // Give the filters enough time to converge.
+  RunFor(chrono::seconds(10));
+  VerifyNearGoal(5e-3);
+  VerifyEstimatorAccurate(1e-2);
+}
+
+}  // namespace testing
+}  // namespace drivetrain
+}  // namespace control_loops
+}  // namespace y2020