Fold y2019 localizer into drivetrain

Change-Id: Icc192ae9f910741b54f114ec9a27559cc289b29b
diff --git a/frc971/control_loops/drivetrain/drivetrain.cc b/frc971/control_loops/drivetrain/drivetrain.cc
index 9cf71f7..19e604d 100644
--- a/frc971/control_loops/drivetrain/drivetrain.cc
+++ b/frc971/control_loops/drivetrain/drivetrain.cc
@@ -231,6 +231,8 @@
     Y << position->left_encoder, position->right_encoder, last_gyro_rate_,
         last_accel_;
     kf_.Correct(Y);
+    // TODO(james): Account for delayed_U as appropriate (should be
+    // last_last_*_voltage).
     localizer_->Update({last_left_voltage_, last_right_voltage_}, monotonic_now,
                        position->left_encoder, position->right_encoder,
                        last_gyro_rate_, last_accel_);
diff --git a/y2019/constants.cc b/y2019/constants.cc
index d77d2c7..7baac55 100644
--- a/y2019/constants.cc
+++ b/y2019/constants.cc
@@ -23,6 +23,7 @@
 using ::frc971::zeroing::PotAndAbsoluteEncoderZeroingEstimator;
 
 const int Values::kZeroingSampleSize;
+constexpr size_t Values::kNumCameras;
 
 namespace {
 
@@ -122,6 +123,12 @@
   stilts_params->zeroing_constants.moving_buffer_size = 20;
   stilts_params->zeroing_constants.allowable_encoder_error = 0.9;
 
+  r->camera_noise_parameters = {.max_viewable_distance = 10.0,
+                                .heading_noise = 0.02,
+                                .nominal_distance_noise = 0.06,
+                                .nominal_skew_noise = 0.1,
+                                .nominal_height_noise = 0.01};
+
   switch (team) {
     // A set of constants for tests.
     case 1:
@@ -135,6 +142,17 @@
 
       stilts_params->zeroing_constants.measured_absolute_position = 0.0;
       stilts->potentiometer_offset = 0.0;
+
+      // Deliberately make FOV a bit large so that we are overly conservative in
+      // our EKF.
+      for (auto &camera : r->cameras) {
+        camera.fov = M_PI_2 * 1.1;
+      }
+      r->cameras[0].pose.set_theta(M_PI);
+      r->cameras[1].pose.set_theta(0.26);
+      r->cameras[2].pose.set_theta(-0.26);
+      r->cameras[3].pose.set_theta(M_PI_2);
+      r->cameras[4].pose.set_theta(-M_PI_2);
       break;
 
     case kCompTeamNumber:
diff --git a/y2019/constants.h b/y2019/constants.h
index 24d8b92..2fbcc5b 100644
--- a/y2019/constants.h
+++ b/y2019/constants.h
@@ -190,6 +190,21 @@
       intake;
 
   PotAndAbsConstants stilts;
+
+  struct CameraCalibration {
+    // constants matrix to send to camera for calibration.
+    Eigen::Matrix<float, 3, 4> for_camera;
+    // Pose of the camera relative to the robot.
+    ::frc971::control_loops::TypedPose<double> pose;
+    // Field of view, in radians. This is total horizontal FOV, from left
+    // edge to right edge of the camera image.
+    double fov;
+  };
+
+  static constexpr size_t kNumCameras = 5;
+  ::std::array<CameraCalibration, kNumCameras> cameras;
+  control_loops::TypedCamera<Field::kNumTargets, Field::kNumObstacles,
+                             double>::NoiseParameters camera_noise_parameters;
 };
 
 // Creates (once) a Values instance for ::aos::network::GetTeamNumber() and
diff --git a/y2019/control_loops/drivetrain/BUILD b/y2019/control_loops/drivetrain/BUILD
index 310b853..677726e 100644
--- a/y2019/control_loops/drivetrain/BUILD
+++ b/y2019/control_loops/drivetrain/BUILD
@@ -76,12 +76,20 @@
     visibility = ["//visibility:public"],
     deps = [
         ":drivetrain_base",
+        ":event_loop_localizer",
         "//aos:init",
         "//aos/events:shm-event-loop",
         "//frc971/control_loops/drivetrain:drivetrain_lib",
     ],
 )
 
+queue_library(
+    name = "camera_queue",
+    srcs = [
+        "camera.q",
+    ],
+)
+
 cc_library(
     name = "camera",
     srcs = ["camera.h"],
@@ -111,6 +119,18 @@
     ],
 )
 
+cc_library(
+    name = "event_loop_localizer",
+    srcs = ["event_loop_localizer.cc"],
+    hdrs = ["event_loop_localizer.h"],
+    deps = [
+        ":camera_queue",
+        ":localizer",
+        "//frc971/control_loops/drivetrain:localizer",
+        "//y2019:constants",
+    ],
+)
+
 cc_test(
     name = "localizer_test",
     srcs = ["localizer_test.cc"],
@@ -139,3 +159,19 @@
         "arm": [],
     }),
 )
+
+cc_test(
+    name = "localized_drivetrain_test",
+    srcs = ["localized_drivetrain_test.cc"],
+    deps = [
+        ":camera_queue",
+        ":drivetrain_base",
+        ":event_loop_localizer",
+        ":localizer",
+        "//aos/controls:control_loop_test",
+        "//aos/network:team_number",
+        "//frc971/control_loops:team_number_test_environment",
+        "//frc971/control_loops/drivetrain:drivetrain_lib",
+        "//frc971/control_loops/drivetrain:drivetrain_test_lib",
+    ],
+)
diff --git a/y2019/control_loops/drivetrain/camera.h b/y2019/control_loops/drivetrain/camera.h
index 6439d0d..8015772 100644
--- a/y2019/control_loops/drivetrain/camera.h
+++ b/y2019/control_loops/drivetrain/camera.h
@@ -117,7 +117,7 @@
     Reading noise;
 
     // The target that this view corresponds to.
-    const TypedTarget<Scalar> *target;
+    const TypedTarget<Scalar> *target = nullptr;
     // The Pose the camera was at when viewing the target:
     Pose camera_pose;
   };
@@ -152,6 +152,9 @@
     Scalar nominal_height_noise;  // meters
   };
 
+  // Provide a default constructor to make life easier.
+  TypedCamera() {}
+
   // Creates a camera:
   // pose: The Pose of the camera, relative to the robot at least transitively.
   // fov: The field-of-view of the camera, in radians. Note that this is the
@@ -201,6 +204,41 @@
   const Pose &pose() const { return pose_; }
   Scalar fov() const { return fov_; }
 
+  // Estimates the noise values of a target based on the raw readings.
+  // Also estimates whether we would expect the target to be visible, and
+  // populates is_visible if is_visible is not nullptr.
+  void PopulateNoise(TargetView *view, bool *is_visible = nullptr) const {
+    // Calculate the width of the target as it appears in the image.
+    // This number is unitless and if greater than 1, implies that the target is
+    // visible to the camera and if less than 1 implies it is too small to be
+    // registered on the camera.
+    Scalar apparent_width =
+        ::std::max<Scalar>(0.0, ::std::cos(view->reading.skew) *
+                                    noise_parameters_.max_viewable_distance /
+                                    view->reading.distance);
+    // As both a sanity check and for the sake of numerical stability, manually
+    // set apparent_width to something "very small" if the distance is too
+    // close.
+    if (view->reading.distance < 0.01) {
+      apparent_width = 0.01;
+    }
+
+    if (is_visible != nullptr) {
+      *is_visible = apparent_width >= 1.0;
+    }
+
+    view->noise.heading = noise_parameters_.heading_noise;
+
+    const Scalar normalized_width =
+        apparent_width / noise_parameters_.max_viewable_distance;
+    view->noise.distance =
+        noise_parameters_.nominal_distance_noise / normalized_width;
+    view->noise.skew =
+        noise_parameters_.nominal_skew_noise / normalized_width;
+    view->noise.height =
+        noise_parameters_.nominal_height_noise / normalized_width;
+  }
+
  private:
 
   // If the specified target is visible from the current camera Pose, adds it to
@@ -210,24 +248,24 @@
       ::aos::SizedArray<TargetView, num_targets> *views) const;
 
   // The Pose of this camera.
-  const Pose pose_;
+  Pose pose_;
 
   // Field of view of the camera, in radians.
-  const Scalar fov_;
+  Scalar fov_;
 
   // Various constants to calclate sensor noise; see definition of
   // NoiseParameters for more detail.
-  const NoiseParameters noise_parameters_;
+  NoiseParameters noise_parameters_;
 
   // A list of all the targets on the field.
   // TODO(james): Is it worth creating some sort of cache for the targets and
   // obstacles? e.g., passing around pointer to the targets/obstacles.
-  const ::std::array<TypedTarget<Scalar>, num_targets> targets_;
+  ::std::array<TypedTarget<Scalar>, num_targets> targets_;
   // Known obstacles on the field which can interfere with our view of the
   // targets. An "obstacle" is a line segment which we cannot see through, as
   // such a logical obstacle (e.g., the cargo ship) may consist of many
   // obstacles in this list to account for all of its sides.
-  const ::std::array<LineSegment, num_obstacles> obstacles_;
+  ::std::array<LineSegment, num_obstacles> obstacles_;
 }; // class TypedCamera
 
 template <int num_targets, int num_obstacles, typename Scalar>
@@ -250,15 +288,19 @@
     return;
   }
 
-  // Calculate the width of the target as it appears in the image.
-  // This number is unitless and if greater than 1, implies that the target is
-  // visible to the camera and if less than 1 implies it is too small to be
-  // registered on the camera.
-  const Scalar apparent_width = ::std::max<Scalar>(
-      0.0,
-      ::std::cos(skew) * noise_parameters_.max_viewable_distance / distance);
+  TargetView view;
+  view.reading.heading = heading;
+  view.reading.distance = distance;
+  view.reading.skew = skew;
+  view.reading.height = target.pose().abs_pos().z();
+  view.target = &target;
+  view.camera_pose = camera_abs_pose;
 
-  if (apparent_width < 1.0) {
+  bool is_visible = false;
+
+  PopulateNoise(&view, &is_visible);
+
+  if (!is_visible) {
     return;
   }
 
@@ -270,22 +312,8 @@
     }
   }
 
-  Scalar normalized_width =
-      apparent_width / noise_parameters_.max_viewable_distance;
-  Scalar distance_noise =
-      noise_parameters_.nominal_distance_noise / normalized_width;
-  Scalar skew_noise = noise_parameters_.nominal_skew_noise / normalized_width;
-  Scalar height_noise =
-      noise_parameters_.nominal_height_noise / normalized_width;
-
-  // If we've made it here, the target is visible to the camera and we should
-  // provide the actual TargetView in question.
-  TargetView view;
-  view.reading = {heading, distance, target.pose().abs_pos().z(), skew};
-  view.noise = {noise_parameters_.heading_noise, distance_noise, height_noise,
-                skew_noise};
-  view.target = &target;
-  view.camera_pose = camera_abs_pose;
+  // At this point, we've passed all the checks to ensure that the target is
+  // visible and we can add it to the list of targets.
   views->push_back(view);
 }
 
diff --git a/y2019/control_loops/drivetrain/camera.q b/y2019/control_loops/drivetrain/camera.q
new file mode 100644
index 0000000..5add759
--- /dev/null
+++ b/y2019/control_loops/drivetrain/camera.q
@@ -0,0 +1,27 @@
+package y2019.control_loops.drivetrain;
+
+// These structures have a nearly one-to-one correspondence to those in
+// //y2019/jevois:structures.h. Please refer to that file for details.
+struct CameraTarget {
+  float distance;
+  float height;
+  float heading;
+  float skew;
+};
+
+message CameraFrame {
+  // monotonic time in nanoseconds at which frame was taken (note structure.h
+  // uses age).
+  int64_t timestamp;
+
+  // Number of targets actually in this frame.
+  uint8_t num_targets;
+
+  // Buffer for the targets
+  CameraTarget[3] targets;
+
+  // Index of the camera with which this frame was taken:
+  uint8_t camera;
+};
+
+queue CameraFrame camera_frames;
diff --git a/y2019/control_loops/drivetrain/drivetrain_main.cc b/y2019/control_loops/drivetrain/drivetrain_main.cc
index 50fd030..63da6c9 100644
--- a/y2019/control_loops/drivetrain/drivetrain_main.cc
+++ b/y2019/control_loops/drivetrain/drivetrain_main.cc
@@ -3,14 +3,15 @@
 #include "aos/events/shm-event-loop.h"
 #include "frc971/control_loops/drivetrain/drivetrain.h"
 #include "y2019/control_loops/drivetrain/drivetrain_base.h"
+#include "y2019/control_loops/drivetrain/event_loop_localizer.h"
 
 using ::frc971::control_loops::drivetrain::DrivetrainLoop;
 
 int main() {
   ::aos::Init();
   ::aos::ShmEventLoop event_loop;
-  ::frc971::control_loops::drivetrain::DeadReckonEkf localizer(
-      ::y2019::control_loops::drivetrain::GetDrivetrainConfig());
+  ::y2019::control_loops::drivetrain::EventLoopLocalizer localizer(
+      ::y2019::control_loops::drivetrain::GetDrivetrainConfig(), &event_loop);
   DrivetrainLoop drivetrain(
       ::y2019::control_loops::drivetrain::GetDrivetrainConfig(), &event_loop,
       &localizer);
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.cc b/y2019/control_loops/drivetrain/event_loop_localizer.cc
new file mode 100644
index 0000000..4656cb3
--- /dev/null
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.cc
@@ -0,0 +1,76 @@
+#include "y2019/control_loops/drivetrain/event_loop_localizer.h"
+
+#include <functional>
+
+namespace y2019 {
+namespace control_loops {
+namespace drivetrain {
+constexpr size_t EventLoopLocalizer::kMaxTargetsPerFrame;
+
+::std::array<EventLoopLocalizer::Camera, constants::Values::kNumCameras>
+MakeCameras(EventLoopLocalizer::Pose *pose) {
+  constants::Field field;
+  ::std::array<EventLoopLocalizer::Camera, constants::Values::kNumCameras>
+      cameras;
+  for (size_t ii = 0; ii < constants::Values::kNumCameras; ++ii) {
+    constants::Values::CameraCalibration camera_info =
+        constants::GetValues().cameras[ii];
+    EventLoopLocalizer::Pose camera_pose = camera_info.pose.Rebase(pose);
+    cameras[ii] = EventLoopLocalizer::Camera(
+        camera_pose, camera_info.fov,
+        constants::GetValues().camera_noise_parameters, field.targets(),
+        field.obstacles());
+  }
+  return cameras;
+}
+
+EventLoopLocalizer::EventLoopLocalizer(
+    const ::frc971::control_loops::drivetrain::DrivetrainConfig<double>
+        &dt_config,
+    ::aos::EventLoop *event_loop)
+    : event_loop_(event_loop),
+      cameras_(MakeCameras(&robot_pose_)),
+      localizer_(dt_config, &robot_pose_) {
+  localizer_.ResetInitialState(::aos::monotonic_clock::now(),
+                               Localizer::State::Zero(), localizer_.P());
+  frame_fetcher_ = event_loop_->MakeFetcher<CameraFrame>(
+      ".y2019.control_loops.drivetrain.camera_frames");
+}
+
+void EventLoopLocalizer::Reset(const Localizer::State &state) {
+  localizer_.ResetInitialState(::aos::monotonic_clock::now(), state,
+                               localizer_.P());
+}
+
+void EventLoopLocalizer::Update(
+    const ::Eigen::Matrix<double, 2, 1> &U,
+    ::aos::monotonic_clock::time_point now, double left_encoder,
+    double right_encoder, double gyro_rate,
+    double /*longitudinal_accelerometer*/) {
+  localizer_.UpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U,
+                                   now);
+  while (frame_fetcher_.FetchNext()) {
+    HandleFrame(*frame_fetcher_.get());
+  }
+}
+
+void EventLoopLocalizer::HandleFrame(const CameraFrame &frame) {
+  // We need to construct TargetView's and pass them to the localizer:
+  ::aos::SizedArray<TargetView, kMaxTargetsPerFrame> views;
+  for (int ii = 0; ii < frame.num_targets; ++ii) {
+    TargetView view;
+    view.reading.heading = frame.targets[ii].heading;
+    view.reading.distance = frame.targets[ii].distance;
+    view.reading.skew = frame.targets[ii].skew;
+    view.reading.height = frame.targets[ii].height;
+    cameras_[frame.camera].PopulateNoise(&view);
+    views.push_back(view);
+  }
+  ::aos::monotonic_clock::time_point t(
+      ::std::chrono::nanoseconds(frame.timestamp));
+  localizer_.UpdateTargets(cameras_[frame.camera], views, t);
+}
+
+}  // namespace drivetrain
+}  // namespace control_loops
+}  // namespace y2019
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.h b/y2019/control_loops/drivetrain/event_loop_localizer.h
new file mode 100644
index 0000000..2812108
--- /dev/null
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.h
@@ -0,0 +1,84 @@
+#ifndef Y2019_CONTROL_LOOPS_DRIVETRAIN_EVENT_LOOP_LOCALIZER_H_
+#define Y2019_CONTROL_LOOPS_DRIVETRAIN_EVENT_LOOP_LOCALIZER_H_
+
+#include "frc971/control_loops/drivetrain/localizer.h"
+#include "y2019/constants.h"
+#include "y2019/control_loops/drivetrain/localizer.h"
+#include "y2019/control_loops/drivetrain/camera.q.h"
+
+namespace y2019 {
+namespace control_loops {
+namespace drivetrain {
+
+// Wrap the localizer to allow it to fetch camera frames from the queues.
+// TODO(james): Provide a way of resetting the current position and
+// localizer.
+class EventLoopLocalizer
+    : public ::frc971::control_loops::drivetrain::LocalizerInterface {
+ public:
+  static constexpr size_t kMaxTargetsPerFrame = 3;
+  typedef TypedLocalizer<
+      constants::Values::kNumCameras, y2019::constants::Field::kNumTargets,
+      y2019::constants::Field::kNumObstacles, kMaxTargetsPerFrame, double>
+      Localizer;
+  typedef typename Localizer::StateIdx StateIdx;
+
+  typedef typename Localizer::Camera Camera;
+  typedef typename Localizer::TargetView TargetView;
+  typedef typename Localizer::Pose Pose;
+
+  EventLoopLocalizer(
+      const ::frc971::control_loops::drivetrain::DrivetrainConfig<double>
+          &dt_config,
+      ::aos::EventLoop *event_loop);
+
+  void Reset(const Localizer::State &state);
+
+  void Update(const ::Eigen::Matrix<double, 2, 1> &U,
+              ::aos::monotonic_clock::time_point now, double left_encoder,
+              double right_encoder, double gyro_rate,
+              double /*longitudinal_accelerometer*/) override;
+
+  double x() const override {
+    return localizer_.X_hat(StateIdx::kX); }
+  double y() const override {
+    return localizer_.X_hat(StateIdx::kY); }
+  double theta() const override {
+    return localizer_.X_hat(StateIdx::kTheta); }
+  double left_velocity() const override {
+    return localizer_.X_hat(StateIdx::kLeftVelocity);
+  }
+  double right_velocity() const override {
+    return localizer_.X_hat(StateIdx::kRightVelocity);
+  }
+  double left_voltage_error() const override {
+    return localizer_.X_hat(StateIdx::kLeftVoltageError);
+  }
+  double right_voltage_error() const override {
+    return localizer_.X_hat(StateIdx::kRightVoltageError);
+  }
+
+ private:
+  void HandleFrame(const CameraFrame &frame);
+
+  ::aos::EventLoop *event_loop_;
+  // TODO(james): Make this use Watchers once we have them working in our
+  // simulation framework.
+  ::aos::Fetcher<CameraFrame> frame_fetcher_;
+
+  // Pose used as base for cameras; note that the exact value populated here
+  // is not overly important as it will be overridden by the Localizer.
+  Pose robot_pose_;
+  const ::std::array<Camera, constants::Values::kNumCameras> cameras_;
+  Localizer localizer_;
+};
+
+// Constructs the cameras based on the constants in the //y2019/constants.*
+// files and uses the provided Pose as the robot pose for the cameras.
+::std::array<EventLoopLocalizer::Camera, constants::Values::kNumCameras>
+MakeCameras(EventLoopLocalizer::Pose *pose);
+
+}  // namespace drivetrain
+}  // namespace control_loops
+}  // namespace y2019
+#endif  // Y2019_CONTROL_LOOPS_DRIVETRAIN_EVENT_LOOP_LOCALIZER_H_
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
new file mode 100644
index 0000000..bcbad52
--- /dev/null
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -0,0 +1,246 @@
+#include <queue>
+
+#include "gtest/gtest.h"
+
+#include "aos/controls/control_loop_test.h"
+#include "aos/network/team_number.h"
+#include "frc971/control_loops/drivetrain/drivetrain.h"
+#include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
+#include "frc971/control_loops/team_number_test_environment.h"
+#include "y2019/control_loops/drivetrain/camera.q.h"
+#include "y2019/control_loops/drivetrain/event_loop_localizer.h"
+#include "y2019/control_loops/drivetrain/drivetrain_base.h"
+
+// This file tests that the full Localizer, when used with queues within the
+// drivetrain, will behave properly. The purpose of this test is to make sure
+// that all the pieces fit together, not to test the algorithms themselves.
+
+namespace y2019 {
+namespace control_loops {
+namespace drivetrain {
+namespace testing {
+
+using ::frc971::control_loops::drivetrain::DrivetrainConfig;
+
+namespace {
+DrivetrainConfig<double> GetTest2019DrivetrainConfig() {
+  DrivetrainConfig<double> config = GetDrivetrainConfig();
+  config.gyro_type =
+      ::frc971::control_loops::drivetrain::GyroType::SPARTAN_GYRO;
+  return config;
+}
+};  // namespace
+
+namespace chrono = ::std::chrono;
+using ::frc971::control_loops::drivetrain::testing::DrivetrainSimulation;
+using ::frc971::control_loops::drivetrain::DrivetrainLoop;
+using ::frc971::control_loops::drivetrain::testing::GetTestDrivetrainConfig;
+using ::aos::monotonic_clock;
+
+class LocalizedDrivetrainTest : public ::aos::testing::ControlLoopTest {
+ protected:
+  // We must use the 2019 drivetrain config so that we don't have to deal
+  // with shifting:
+  LocalizedDrivetrainTest()
+      : dt_config_(GetTest2019DrivetrainConfig()),
+        my_drivetrain_queue_(".frc971.control_loops.drivetrain_queue",
+                             ".frc971.control_loops.drivetrain_queue.goal",
+                             ".frc971.control_loops.drivetrain_queue.position",
+                             ".frc971.control_loops.drivetrain_queue.output",
+                             ".frc971.control_loops.drivetrain_queue.status"),
+        camera_queue_(::y2019::control_loops::drivetrain::camera_frames.name()),
+        localizer_(dt_config_, &event_loop_),
+        drivetrain_motor_(dt_config_, &event_loop_, &localizer_),
+        drivetrain_motor_plant_(dt_config_),
+        cameras_(MakeCameras(&robot_pose_)),
+        last_frame_(monotonic_clock::now()) {
+    set_team_id(::frc971::control_loops::testing::kTeamNumber);
+    // Because 0, 0 is on the top level of the Hab and right on the edge of an
+    // obstacle, it ends up confusing the localizer; as such, starting there
+    // is just prone to causing confusion.
+    SetStartingPosition({3.0, 2.0, 0.0});
+    ::frc971::sensors::gyro_reading.Clear();
+    ::y2019::control_loops::drivetrain::camera_frames.Clear();
+    set_battery_voltage(12.0);
+  }
+
+  void SetStartingPosition(const Eigen::Matrix<double, 3, 1> &xytheta) {
+    *drivetrain_motor_plant_.mutable_state() << xytheta.x(), xytheta.y(),
+        xytheta(2, 0), 0.0, 0.0;
+    Eigen::Matrix<double, 10, 1> localizer_state;
+    localizer_state.setZero();
+    localizer_state.block<3, 1>(0, 0) = xytheta;
+    localizer_.Reset(localizer_state);
+  }
+
+  void RunIteration() {
+    drivetrain_motor_plant_.SendPositionMessage();
+    drivetrain_motor_.Iterate();
+    drivetrain_motor_plant_.Simulate();
+    if (enable_cameras_) {
+      SendDelayedFrames();
+      if (last_frame_ + ::std::chrono::milliseconds(33) <
+          monotonic_clock::now()) {
+        CaptureFrames();
+        last_frame_ = monotonic_clock::now();
+      }
+    }
+    SimulateTimestep(true, dt_config_.dt);
+  }
+
+  void RunForTime(monotonic_clock::duration run_for) {
+    const auto end_time = monotonic_clock::now() + run_for;
+    while (monotonic_clock::now() < end_time) {
+      RunIteration();
+    }
+    // Let the estimator catch up with the final simulation step:
+    drivetrain_motor_plant_.SendPositionMessage();
+    drivetrain_motor_.Iterate();
+  }
+
+  void VerifyNearGoal() {
+    my_drivetrain_queue_.goal.FetchLatest();
+    my_drivetrain_queue_.position.FetchLatest();
+    EXPECT_NEAR(my_drivetrain_queue_.goal->left_goal,
+                drivetrain_motor_plant_.GetLeftPosition(), 1e-3);
+    EXPECT_NEAR(my_drivetrain_queue_.goal->right_goal,
+                drivetrain_motor_plant_.GetRightPosition(), 1e-3);
+  }
+
+  void VerifyEstimatorAccurate() {
+    const Eigen::Matrix<double, 5, 1> true_state =
+        drivetrain_motor_plant_.state();
+    EXPECT_NEAR(localizer_.x(), true_state(0, 0), 1e-5);
+    EXPECT_NEAR(localizer_.y(), true_state(1, 0), 1e-5);
+    EXPECT_NEAR(localizer_.theta(), true_state(2, 0), 1e-5);
+    EXPECT_NEAR(localizer_.left_velocity(), true_state(3, 0), 1e-5);
+    EXPECT_NEAR(localizer_.right_velocity(), true_state(4, 0), 1e-5);
+  }
+
+  void CaptureFrames() {
+    *robot_pose_.mutable_pos() << drivetrain_motor_plant_.GetPosition().x(),
+        drivetrain_motor_plant_.GetPosition().y(), 0.0;
+    robot_pose_.set_theta(drivetrain_motor_plant_.state()(2, 0));
+    for (size_t ii = 0; ii < cameras_.size(); ++ii) {
+      const auto target_views = cameras_[ii].target_views();
+      CameraFrame frame;
+      frame.timestamp = chrono::duration_cast<chrono::nanoseconds>(
+                            monotonic_clock::now().time_since_epoch())
+                            .count();
+      frame.camera = ii;
+      frame.num_targets = 0;
+      for (size_t jj = 0;
+           jj < ::std::min(EventLoopLocalizer::kMaxTargetsPerFrame,
+                           target_views.size());
+           ++jj) {
+        EventLoopLocalizer::TargetView view = target_views[jj];
+        ++frame.num_targets;
+        frame.targets[jj].heading = view.reading.heading;
+        frame.targets[jj].distance = view.reading.distance;
+        frame.targets[jj].skew = view.reading.skew;
+        frame.targets[jj].height = view.reading.height;
+      }
+      camera_delay_queue_.emplace(monotonic_clock::now(), frame);
+    }
+  }
+
+  void SendDelayedFrames() {
+    const ::std::chrono::milliseconds camera_latency(50);
+    while (!camera_delay_queue_.empty() &&
+           ::std::get<0>(camera_delay_queue_.front()) <
+               monotonic_clock::now() - camera_latency) {
+      ::aos::ScopedMessagePtr<CameraFrame> message =
+          camera_queue_.MakeMessage();
+      *message = ::std::get<1>(camera_delay_queue_.front());
+      ASSERT_TRUE(message.Send());
+      camera_delay_queue_.pop();
+    }
+  }
+
+  virtual ~LocalizedDrivetrainTest() {
+    ::frc971::sensors::gyro_reading.Clear();
+    ::y2019::control_loops::drivetrain::camera_frames.Clear();
+  }
+
+  const ::frc971::control_loops::drivetrain::DrivetrainConfig<double>
+      dt_config_;
+
+  ::frc971::control_loops::DrivetrainQueue my_drivetrain_queue_;
+  ::aos::Queue<CameraFrame> camera_queue_;
+  ::aos::ShmEventLoop event_loop_;
+
+  EventLoopLocalizer localizer_;
+  DrivetrainLoop drivetrain_motor_;
+  DrivetrainSimulation drivetrain_motor_plant_;
+  EventLoopLocalizer::Pose robot_pose_;
+  const ::std::array<EventLoopLocalizer::Camera, constants::Values::kNumCameras>
+      cameras_;
+  monotonic_clock::time_point last_frame_;
+
+  // A queue of camera frames so that we can add a time delay to the data
+  // coming from the cameras.
+  ::std::queue<::std::tuple<::aos::monotonic_clock::time_point, CameraFrame>>
+      camera_delay_queue_;
+
+  void set_enable_cameras(bool enable) { enable_cameras_ = enable; }
+
+ private:
+  bool enable_cameras_ = false;
+};
+
+// Tests that no camera updates, combined with a perfect model, results in no
+// error.
+TEST_F(LocalizedDrivetrainTest, NoCameraUpdate) {
+  set_enable_cameras(false);
+  my_drivetrain_queue_.goal.MakeWithBuilder()
+      .controller_type(1)
+      .left_goal(-1.0)
+      .right_goal(1.0)
+      .Send();
+  RunForTime(chrono::seconds(3));
+  VerifyNearGoal();
+  VerifyEstimatorAccurate();
+}
+
+// Tests that not having cameras with an initial disturbance results in
+// estimator error. This is to test the test.
+TEST_F(LocalizedDrivetrainTest, NoCameraWithDisturbanceFails) {
+  set_enable_cameras(false);
+  (*drivetrain_motor_plant_.mutable_state())(0, 0) += 0.05;
+  my_drivetrain_queue_.goal.MakeWithBuilder()
+      .controller_type(1)
+      .left_goal(-1.0)
+      .right_goal(1.0)
+      .Send();
+  RunForTime(chrono::seconds(3));
+  // VerifyNearGoal succeeds because it is just checking wheel positions:
+  VerifyNearGoal();
+  const Eigen::Matrix<double, 5, 1> true_state =
+      drivetrain_motor_plant_.state();
+  // Everything but X-value should be correct:
+  EXPECT_NEAR(true_state.x(), localizer_.x() + 0.05, 1e-5);
+  EXPECT_NEAR(true_state.y(), localizer_.y(), 1e-5);
+  EXPECT_NEAR(true_state(2, 0), localizer_.theta(), 1e-5);
+  EXPECT_NEAR(true_state(3, 0), localizer_.left_velocity(), 1e-5);
+  EXPECT_NEAR(true_state(4, 0), localizer_.right_velocity(), 1e-5);
+}
+
+// Tests that, when a small error in X is introduced, the camera corrections do
+// correct for it.
+TEST_F(LocalizedDrivetrainTest, CameraUpdate) {
+  set_enable_cameras(true);
+  (*drivetrain_motor_plant_.mutable_state())(0, 0) += 0.05;
+  my_drivetrain_queue_.goal.MakeWithBuilder()
+      .controller_type(1)
+      .left_goal(-1.0)
+      .right_goal(1.0)
+      .Send();
+  RunForTime(chrono::seconds(3));
+  VerifyNearGoal();
+  VerifyEstimatorAccurate();
+}
+
+}  // namespace testing
+}  // namespace drivetrain
+}  // namespace control_loops
+}  // namespace y2019