Implement 2019 EKF

The main remaining task with this is to integrate it with the actual
drivetrain code, and then to tune it on the actual robot.

For performance, on my computer I'm seeing ~0.6ms per simulation
iteration, which will vary depending on exact camera frame rates and
latencies. It might need a bit of optimization for CPU load, but we
should be reasonably close.

Change-Id: I286599e2c2f88dfef200afeb9ebbe9e7108714bf
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index 245e772..01a49a9 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -9,6 +9,14 @@
 #include "Eigen/Dense"
 #include "frc971/control_loops/drivetrain/drivetrain_config.h"
 
+namespace y2019 {
+namespace control_loops {
+namespace testing {
+class ParameterizedLocalizerTest;
+}  // namespace testing
+}  // namespace control_loops
+}  // namespace y2019
+
 namespace frc971 {
 namespace control_loops {
 namespace drivetrain {
@@ -95,9 +103,10 @@
   // TODO(james): We may want to actually re-initialize and reset things on
   // the field. Create some sort of Reset() function.
   void ResetInitialState(::aos::monotonic_clock::time_point t,
-                       const State &state) {
+                         const State &state, const StateSquare &P) {
     observations_.clear();
     X_hat_ = state;
+    P_ = P;
     observations_.PushFromBottom(
         {t,
          t,
@@ -299,6 +308,7 @@
       observations_;
 
   friend class testing::HybridEkfTest;
+  friend class ::y2019::control_loops::testing::ParameterizedLocalizerTest;
 };  // class HybridEkf
 
 template <typename Scalar>
@@ -419,8 +429,9 @@
 
   Q_continuous_.setZero();
   // TODO(james): Improve estimates of process noise--e.g., X/Y noise can
-  // probably be reduced because we rarely randomly jump in space, but maybe
-  // wheel speed noise should increase due to likelihood of slippage.
+  // probably be reduced when we are stopped because you rarely jump randomly.
+  // Or maybe it's more appropriate to scale wheelspeed noise with wheelspeed,
+  // since the wheels aren't likely to slip much stopped.
   Q_continuous_(kX, kX) = 0.005;
   Q_continuous_(kY, kY) = 0.005;
   Q_continuous_(kTheta, kTheta) = 0.001;
@@ -428,7 +439,7 @@
       dt_config_.make_kf_drivetrain_loop().observer().coefficients().Q;
 
   P_.setZero();
-  P_.diagonal() << 1, 1, 1, 0.5, 0.1, 0.5, 0.1, 1, 1, 1;
+  P_.diagonal() << 0.1, 0.1, 0.01, 0.02, 0.01, 0.02, 0.01, 1, 1, 0.03;
 
   H_encoders_and_gyro_.setZero();
   // Encoders are stored directly in the state matrix, so are a minor
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
index 9bdb30b..27119b1 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
+++ b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
@@ -28,7 +28,8 @@
         velocity_plant_coefs_(dt_config_.make_hybrid_drivetrain_velocity_loop()
                                   .plant()
                                   .coefficients()) {
-    ekf_.ResetInitialState(t0_, HybridEkf<>::State::Zero());
+    ekf_.ResetInitialState(t0_, State::Zero(),
+                           HybridEkf<>::StateSquare::Identity());
   }
 
  protected:
@@ -107,25 +108,26 @@
               {5.0, 6.0});
 }
 
-// Tests that if we provide a bunch of observations of the angular position
+// Tests that if we provide a bunch of observations of the position
 // with zero change in time, the state should approach the estimation.
 TEST_F(HybridEkfTest, ZeroTimeCorrect) {
-  HybridEkf<>::Output Z(1, 0, 0);
+  HybridEkf<>::Output Z(0.5, 0.5, 1);
   Eigen::Matrix<double, 3, 10> H;
-  H.setZero();
-  H(0, 2) = 1;
+  H.setIdentity();
   auto h = [H](const State &X, const Input &) { return H * X; };
   auto dhdx = [H](const State &) { return H; };
   Eigen::Matrix<double, 3, 3> R;
-  R.setZero();
-  R.diagonal() << 0.01, 0.01, 0.01;
+  R.setIdentity();
+  R *= 1e-3;
   Input U(0, 0);
   EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kTheta));
   const double starting_p_norm = ekf_.P().norm();
   for (int ii = 0; ii < 100; ++ii) {
     ekf_.Correct(Z, &U, {}, h, dhdx, R, t0_);
   }
-  EXPECT_NEAR(Z(0, 0), ekf_.X_hat(StateIdx::kTheta), 1e-3);
+  EXPECT_NEAR(Z(0, 0), ekf_.X_hat(StateIdx::kX), 1e-3);
+  EXPECT_NEAR(Z(1, 0), ekf_.X_hat(StateIdx::kY), 1e-3);
+  EXPECT_NEAR(Z(2, 0), ekf_.X_hat(StateIdx::kTheta), 1e-3);
   const double ending_p_norm = ekf_.P().norm();
   // Due to corrections, noise should've decreased.
   EXPECT_LT(ending_p_norm, starting_p_norm * 0.95);
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.cc b/frc971/control_loops/drivetrain/splinedrivetrain.cc
index b45f218..9aa1708 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.cc
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.cc
@@ -88,15 +88,14 @@
   enable_ = enable;
   if (enable && current_trajectory_) {
     ::Eigen::Matrix<double, 2, 1> U_ff = ::Eigen::Matrix<double, 2, 1>::Zero();
-    if (!current_trajectory_->is_at_end(current_state_)) {
+    if (!IsAtEnd()) {
       // TODO(alex): It takes about a cycle for the outputs to propagate to the
       // motors. Consider delaying the output by a cycle.
       U_ff = current_trajectory_->FFVoltage(current_xva_(0));
     }
     ::Eigen::Matrix<double, 2, 5> K =
         current_trajectory_->KForState(state, dt_config_.dt, Q, R);
-    ::Eigen::Matrix<double, 5, 1> goal_state =
-        current_trajectory_->GoalState(current_xva_(0), current_xva_(1));
+    ::Eigen::Matrix<double, 5, 1> goal_state = CurrentGoalState();
     ::Eigen::Matrix<double, 5, 1> state_error = goal_state - state;
     ::Eigen::Matrix<double, 2, 1> U_fb = K * state_error;
     next_U_ = U_ff + U_fb;
@@ -116,7 +115,7 @@
     return;
   }
   if (current_spline_handle_ == current_spline_idx_) {
-    if (!current_trajectory_->is_at_end(current_state_)) {
+    if (!IsAtEnd()) {
       output->left_voltage = next_U_(0);
       output->right_voltage = next_U_(1);
       current_xva_ = next_xva_;
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.h b/frc971/control_loops/drivetrain/splinedrivetrain.h
index a065167..2a79df6 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.h
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.h
@@ -26,6 +26,16 @@
   // TODO(alex): What status do we need?
   void PopulateStatus(
       ::frc971::control_loops::DrivetrainQueue::Status *status) const;
+
+  // Accessor for the current goal state, pretty much only present for debugging
+  // purposes.
+  Eigen::Matrix<double, 5, 1> CurrentGoalState() const {
+    return current_trajectory_->GoalState(current_xva_(0), current_xva_(1));
+  }
+
+  bool IsAtEnd() const {
+    return current_trajectory_->is_at_end(current_state_);
+  }
  private:
   void ScaleCapU(Eigen::Matrix<double, 2, 1> *U);
 
diff --git a/y2019/control_loops/drivetrain/BUILD b/y2019/control_loops/drivetrain/BUILD
index 38c73f0..310b853 100644
--- a/y2019/control_loops/drivetrain/BUILD
+++ b/y2019/control_loops/drivetrain/BUILD
@@ -1,4 +1,5 @@
 load("//aos/build:queues.bzl", "queue_library")
+load("//tools/build_rules:select.bzl", "cpu_select", "compiler_select")
 
 genrule(
     name = "genrule_drivetrain",
@@ -99,3 +100,42 @@
         "//aos/testing:googletest",
     ],
 )
+
+cc_library(
+    name = "localizer",
+    hdrs = ["localizer.h"],
+    deps = [
+        ":camera",
+        "//frc971/control_loops:pose",
+        "//frc971/control_loops/drivetrain:hybrid_ekf",
+    ],
+)
+
+cc_test(
+    name = "localizer_test",
+    srcs = ["localizer_test.cc"],
+    defines =
+        cpu_select({
+            "amd64": [
+                "SUPPORT_PLOT=1",
+            ],
+            "arm": [],
+        }),
+    linkstatic = True,
+    deps = [
+        ":localizer",
+        ":drivetrain_base",
+        "//aos/testing:googletest",
+        "//aos/testing:random_seed",
+        "//aos/testing:test_shm",
+        "//frc971/control_loops/drivetrain:trajectory",
+        "//y2019:constants",
+        "//frc971/control_loops/drivetrain:splinedrivetrain",
+        "@com_github_gflags_gflags//:gflags",
+    ] + cpu_select({
+        "amd64": [
+            "//third_party/matplotlib-cpp",
+        ],
+        "arm": [],
+    }),
+)
diff --git a/y2019/control_loops/drivetrain/camera.h b/y2019/control_loops/drivetrain/camera.h
index ff53b7b..6439d0d 100644
--- a/y2019/control_loops/drivetrain/camera.h
+++ b/y2019/control_loops/drivetrain/camera.h
@@ -193,7 +193,7 @@
   ::std::vector<::std::vector<Pose>> PlotPoints() const {
     ::std::vector<::std::vector<Pose>> list_of_lists;
     for (const auto &view : target_views()) {
-      list_of_lists.push_back({pose_, view.target->pose()});
+      list_of_lists.push_back({pose_, view.target->pose().Rebase(&pose_)});
     }
     return list_of_lists;
   }
@@ -254,9 +254,9 @@
   // This number is unitless and if greater than 1, implies that the target is
   // visible to the camera and if less than 1 implies it is too small to be
   // registered on the camera.
-  const Scalar apparent_width =
-      ::std::max(0.0, ::std::cos(skew) *
-                          noise_parameters_.max_viewable_distance / distance);
+  const Scalar apparent_width = ::std::max<Scalar>(
+      0.0,
+      ::std::cos(skew) * noise_parameters_.max_viewable_distance / distance);
 
   if (apparent_width < 1.0) {
     return;
diff --git a/y2019/control_loops/drivetrain/localizer.h b/y2019/control_loops/drivetrain/localizer.h
new file mode 100644
index 0000000..f011720
--- /dev/null
+++ b/y2019/control_loops/drivetrain/localizer.h
@@ -0,0 +1,443 @@
+#ifndef Y2019_CONTROL_LOOPS_DRIVETRAIN_LOCALIZATER_H_
+#define Y2019_CONTROL_LOOPS_DRIVETRAIN_LOCALIZATER_H_
+
+#include <cmath>
+#include <memory>
+
+#include "frc971/control_loops/pose.h"
+#include "y2019/control_loops/drivetrain/camera.h"
+#include "frc971/control_loops/drivetrain/hybrid_ekf.h"
+
+namespace y2019 {
+namespace control_loops {
+
+template <int num_cameras, int num_targets, int num_obstacles,
+          int max_targets_per_frame, typename Scalar = double>
+class TypedLocalizer
+    : public ::frc971::control_loops::drivetrain::HybridEkf<Scalar> {
+ public:
+  typedef TypedCamera<num_targets, num_obstacles, Scalar> Camera;
+  typedef typename Camera::TargetView TargetView;
+  typedef typename Camera::Pose Pose;
+  typedef ::frc971::control_loops::drivetrain::HybridEkf<Scalar> HybridEkf;
+  typedef typename HybridEkf::State State;
+  typedef typename HybridEkf::StateSquare StateSquare;
+  typedef typename HybridEkf::Input Input;
+  typedef typename HybridEkf::Output Output;
+  using HybridEkf::kNInputs;
+  using HybridEkf::kNOutputs;
+  using HybridEkf::kNStates;
+
+  // robot_pose should be the object that is used by the cameras, such that when
+  // we update robot_pose, the cameras will change what they report the relative
+  // position of the targets as.
+  // Note that the parameters for the cameras should be set to allow slightly
+  // larger fields of view and slightly longer range than the true cameras so
+  // that we can identify potential matches for targets even when we have slight
+  // modelling errors.
+  TypedLocalizer(
+      const ::frc971::control_loops::drivetrain::DrivetrainConfig<Scalar>
+          &dt_config,
+      Pose *robot_pose)
+      : ::frc971::control_loops::drivetrain::HybridEkf<Scalar>(dt_config),
+        robot_pose_(robot_pose) {}
+
+  // Performs a kalman filter correction with a single camera frame, consisting
+  // of up to max_targets_per_frame targets and taken at time t.
+  // camera is the Camera used to take the images.
+  void UpdateTargets(
+      const Camera &camera,
+      const ::aos::SizedArray<TargetView, max_targets_per_frame> &targets,
+      ::aos::monotonic_clock::time_point t) {
+    if (targets.empty()) {
+      return;
+    }
+
+    if (t > HybridEkf::latest_t()) {
+      LOG(ERROR,
+          "target observations must be older than most recent encoder/gyro "
+          "update.");
+      return;
+    }
+
+    Output z;
+    Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
+    TargetViewToMatrices(targets[0], &z, &R);
+
+    // In order to perform the correction steps for the targets, we will
+    // separately perform a Correct step for each following target.
+    // This way, we can have the first correction figure out the mappings
+    // between targets in the image and targets on the field, and then re-use
+    // those mappings for all the remaining corrections.
+    // As such, we need to store the EKF functions that the remaining targets
+    // will need in arrays:
+    ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
+                      max_targets_per_frame> h_functions;
+    ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
+                                                    kNStates>(const State &)>,
+                      max_targets_per_frame> dhdx_functions;
+    HybridEkf::Correct(
+        z, nullptr,
+        ::std::bind(&TypedLocalizer::MakeH, this, camera, targets, &h_functions,
+                    &dhdx_functions, ::std::placeholders::_1,
+                    ::std::placeholders::_2, ::std::placeholders::_3,
+                    ::std::placeholders::_4),
+        {}, {}, R, t);
+    // Fetch cache:
+    for (size_t ii = 1; ii < targets.size(); ++ii) {
+      TargetViewToMatrices(targets[ii], &z, &R);
+      HybridEkf::Correct(z, nullptr, {}, h_functions[ii], dhdx_functions[ii], R,
+                         t);
+    }
+  }
+
+ private:
+  // The threshold to use for completely rejecting potentially bad target
+  // matches.
+  // TODO(james): Tune
+  static constexpr Scalar kRejectionScore = 1.0;
+
+  // Computes the measurement (z) and noise covariance (R) matrices for a given
+  // TargetView.
+  void TargetViewToMatrices(const TargetView &view, Output *z,
+                            Eigen::Matrix<Scalar, kNOutputs, kNOutputs> *R) {
+    *z << view.reading.heading, view.reading.distance, view.reading.skew;
+    // TODO(james): R should account as well for our confidence in the target
+    // matching. However, handling that properly requires thing a lot more about
+    // the probabilities.
+    R->setZero();
+    R->diagonal() << ::std::pow(view.noise.heading, 2),
+        ::std::pow(view.noise.distance, 2), ::std::pow(view.noise.skew, 2);
+  }
+
+  // This is the function that will be called once the Ekf has inserted the
+  // measurement into the right spot in the measurement queue and needs the
+  // output functions to actually perform the corrections.
+  // Specifically, this will take the estimate of the state at that time and
+  // figure out how the targets seen by the camera best map onto the actual
+  // targets on the field.
+  // It then fills in the h and dhdx functions that are called by the Ekf.
+  void MakeH(
+      const Camera &camera,
+      const ::aos::SizedArray<TargetView, max_targets_per_frame> &target_views,
+      ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
+                        max_targets_per_frame> *h_functions,
+      ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
+                                                      kNStates>(const State &)>,
+                        max_targets_per_frame> *dhdx_functions,
+      const State &X_hat, const StateSquare &P,
+      ::std::function<Output(const State &, const Input &)> *h,
+      ::std::function<
+          Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)> *dhdx) {
+    // Because we need to match camera targets ("views") to actual field
+    // targets, and because we want to take advantage of the correlations
+    // between the targets (i.e., if we see two targets in the image, they
+    // probably correspond to different on-field targets), the matching problem
+    // here is somewhat non-trivial. Some of the methods we use only work
+    // because we are dealing with very small N (e.g., handling the correlations
+    // between multiple views has combinatoric complexity, but since N = 3,
+    // it's not an issue).
+    //
+    // High-level steps:
+    // 1) Set the base robot pose for the cameras to the Pose implied by X_hat.
+    // 2) Fetch all the expected target views from the camera.
+    // 3) Determine the "magnitude" of the Kalman correction from each potential
+    //    view/target pair.
+    // 4) Match based on the combination of targets with the smallest
+    //    corrections.
+    // 5) Calculate h and dhdx for each pair of targets.
+    //
+    // For the "magnitude" of the correction, we do not directly use the
+    // standard Kalman correction formula. Instead, we calculate the correction
+    // we would get from each component of the measurement and take the L2 norm
+    // of those. This prevents situations where a target matches very poorly but
+    // produces an overall correction of near-zero.
+    // TODO(james): I do not know if this is strictly the correct method to
+    // minimize likely error, but should be reasonable.
+    //
+    // For the matching, we do the following (see MatchFrames):
+    // 1. Compute the best max_targets_per_frame matches for each view.
+    // 2. Exhaust every possible combination of view/target pairs and
+    //    choose the best one.
+    // When we don't think the camera should be able to see as many targets as
+    // we actually got in the frame, then we do permit doubling/tripling/etc.
+    // up on potential targets once we've exhausted all the targets we think
+    // we can see.
+
+    // Set the current robot pose so that the cameras know where they are
+    // (all the cameras have robot_pose_ as their base):
+    *robot_pose_->mutable_pos() << X_hat(0, 0), X_hat(1, 0), 0.0;
+    robot_pose_->set_theta(X_hat(2, 0));
+
+    // Compute the things we *think* the camera should be seeing.
+    // Note: Because we will not try to match to any targets that are not
+    // returned by this function, we generally want the modelled camera to have
+    // a slightly larger field of view than the real camera, and be able to see
+    // slightly smaller targets.
+    const ::aos::SizedArray<TargetView, num_targets> camera_views =
+        camera.target_views();
+
+    // Each row contains the scores for each pair of target view and camera
+    // target view. Values in each row will not be populated past
+    // camera.target_views().size(); of the rows, only the first
+    // target_views.size() shall be populated.
+    // Higher scores imply a worse match. Zero implies a perfect match.
+    Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> scores;
+    scores.setConstant(::std::numeric_limits<Scalar>::infinity());
+    // Each row contains the indices of the best matches per view, where
+    // index 0 is the best, 1 the second best, and 2 the third, etc.
+    // -1 indicates an unfilled field.
+    Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame>
+        best_matches;
+    best_matches.setConstant(-1);
+    // The H matrices for each potential matching. This has the same structure
+    // as the scores matrix.
+    ::std::array<::std::array<Eigen::Matrix<Scalar, kNOutputs, kNStates>,
+                              max_targets_per_frame>,
+                 num_targets> all_H_matrices;
+
+    // Iterate through and fill out the scores for each potential pairing:
+    for (size_t ii = 0; ii < target_views.size(); ++ii) {
+      const TargetView &target_view = target_views[ii];
+      Output z;
+      Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
+      TargetViewToMatrices(target_view, &z, &R);
+
+      for (size_t jj = 0; jj < camera_views.size(); ++jj) {
+        // Compute the ckalman update for this step:
+        const TargetView &view = camera_views[jj];
+        const Eigen::Matrix<Scalar, kNOutputs, kNStates> H =
+            HMatrix(*view.target, target_view);
+        const Eigen::Matrix<Scalar, kNStates, kNOutputs> PH = P * H.transpose();
+        const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> S = H * PH + R;
+        // Note: The inverse here should be very cheap so long as kNOutputs = 3.
+        const Eigen::Matrix<Scalar, kNStates, kNOutputs> K = PH * S.inverse();
+        const Output err = z - Output(view.reading.heading,
+                                      view.reading.distance, view.reading.skew);
+        // In order to compute the actual score, we want to consider each
+        // component of the error separately, as well as considering the impacts
+        // on the each of the states separately. As such, we calculate what
+        // the separate updates from each error component would be, and sum
+        // the impacts on the states.
+        Output scorer;
+        for (size_t kk = 0; kk < kNOutputs; ++kk) {
+          // TODO(james): squaredNorm or norm or L1-norm? Do we care about the
+          // square root? Do we prefer a quadratic or linear response?
+          scorer(kk, 0) = (K.col(kk) * err(kk, 0)).squaredNorm();
+        }
+        // Compute the overall score--note that we add in a term for the height,
+        // scaled by a manual fudge-factor. The height is not accounted for
+        // in the Kalman update because we are not trying to estimate the height
+        // of the robot directly.
+        Scalar score =
+            scorer.squaredNorm() +
+            ::std::pow((view.reading.height - target_view.reading.height) /
+                           target_view.noise.height / 20.0,
+                       2);
+        scores(ii, jj) = score;
+        all_H_matrices[ii][jj] = H;
+
+        // Update the best_matches matrix:
+        int insert_target = jj;
+        for (size_t kk = 0; kk < max_targets_per_frame; ++kk) {
+          int idx = best_matches(ii, kk);
+          // Note that -1 indicates an unfilled value.
+          if (idx == -1 || scores(ii, idx) > scores(ii, insert_target)) {
+            best_matches(ii, kk) = insert_target;
+            insert_target = idx;
+            if (idx == -1) {
+              break;
+            }
+          }
+        }
+      }
+    }
+
+    if (camera_views.size() == 0) {
+      // If we can't get a match, provide H = zero, which will make this
+      // correction step a nop.
+      *h = [](const State &, const Input &) { return Output::Zero(); };
+      *dhdx = [](const State &) {
+        return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
+      };
+      for (size_t ii = 0; ii < target_views.size(); ++ii) {
+        h_functions->push_back(*h);
+        dhdx_functions->push_back(*dhdx);
+      }
+    } else {
+      // Go through and brute force the issue of what the best combination of
+      // target matches are. The worst case for this algorithm will be
+      // max_targets_per_frame!, which is awful for any N > ~4, but since
+      // max_targets_per_frame = 3, I'm not really worried.
+      ::std::array<int, max_targets_per_frame> best_frames =
+          MatchFrames(scores, best_matches, target_views.size());
+      for (size_t ii = 0; ii < target_views.size(); ++ii) {
+        int view_idx = best_frames[ii];
+        const Eigen::Matrix<Scalar, kNOutputs, kNStates> best_H =
+            all_H_matrices[ii][view_idx];
+        const TargetView best_view = camera_views[view_idx];
+        const TargetView target_view = target_views[ii];
+        const Scalar match_score = scores(ii, view_idx);
+        if (match_score > kRejectionScore) {
+          h_functions->push_back(
+              [](const State &, const Input &) { return Output::Zero(); });
+          dhdx_functions->push_back([](const State &) {
+            return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
+          });
+        } else {
+          h_functions->push_back([this, &camera, best_view, target_view](
+                                     const State &X, const Input &) {
+            // This function actually handles determining what the Output should
+            // be at a given state, now that we have chosen the target that
+            // we want to match to.
+            *robot_pose_->mutable_pos() << X(0, 0), X(1, 0), 0.0;
+            robot_pose_->set_theta(X(2, 0));
+            const Pose relative_pose =
+                best_view.target->pose().Rebase(&camera.pose());
+            const Scalar heading = relative_pose.heading();
+            const Scalar distance = relative_pose.xy_norm();
+            const Scalar skew =
+                ::aos::math::NormalizeAngle(relative_pose.rel_theta());
+            return Output(heading, distance, skew);
+          });
+
+          // TODO(james): Experiment to better understand whether we want to
+          // recalculate H or not.
+          dhdx_functions->push_back(
+              [best_H](const Eigen::Matrix<Scalar, kNStates, 1> &) {
+                return best_H;
+              });
+        }
+      }
+      *h = h_functions->at(0);
+      *dhdx = dhdx_functions->at(0);
+    }
+  }
+
+  Eigen::Matrix<Scalar, kNOutputs, kNStates> HMatrix(
+      const Target &target, const TargetView &target_view) {
+    // To calculate dheading/d{x,y,theta}:
+    // heading = arctan2(target_pos - camera_pos) - camera_theta
+    Eigen::Matrix<Scalar, 3, 1> target_pos = target.pose().abs_pos();
+    Eigen::Matrix<Scalar, 3, 1> camera_pos = target_view.camera_pose.abs_pos();
+    Scalar diffx = target_pos.x() - camera_pos.x();
+    Scalar diffy = target_pos.y() - camera_pos.y();
+    Scalar norm2 = diffx * diffx + diffy * diffy;
+    Scalar dheadingdx = diffy / norm2;
+    Scalar dheadingdy = -diffx / norm2;
+    Scalar dheadingdtheta = -1.0;
+
+    // To calculate ddistance/d{x,y}:
+    // distance = sqrt(diffx^2 + diffy^2)
+    Scalar distance = ::std::sqrt(norm2);
+    Scalar ddistdx = -diffx / distance;
+    Scalar ddistdy = -diffy / distance;
+
+    // Skew = target.theta - camera.theta:
+    Scalar dskewdtheta = -1.0;
+    Eigen::Matrix<Scalar, kNOutputs, kNStates> H;
+    H.setZero();
+    H(0, 0) = dheadingdx;
+    H(0, 1) = dheadingdy;
+    H(0, 2) = dheadingdtheta;
+    H(1, 0) = ddistdx;
+    H(1, 1) = ddistdy;
+    H(2, 2) = dskewdtheta;
+    return H;
+  }
+
+  // A helper function for the fuller version of MatchFrames; this just
+  // removes some of the arguments that are only needed during the recursion.
+  // n_views is the number of targets actually seen in the camera image (i.e.,
+  // the number of rows in scores/best_matches that are actually populated).
+  ::std::array<int, max_targets_per_frame> MatchFrames(
+      const Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> &scores,
+      const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame> &
+          best_matches,
+      int n_views) {
+    ::std::array<int, max_targets_per_frame> best_set;
+    Scalar best_score;
+    // We start out without having "used" any views/targets:
+    ::aos::SizedArray<bool, max_targets_per_frame> used_views;
+    for (int ii = 0; ii < n_views; ++ii) {
+      used_views.push_back(false);
+    }
+    MatchFrames(scores, best_matches, used_views, {{false}}, &best_set,
+                &best_score);
+    return best_set;
+  }
+
+  // Recursively iterates over every plausible combination of targets/views
+  // that there is and determines the lowest-scoring combination.
+  // used_views and used_targets indicate which rows/columns of the
+  // scores/best_matches matrices should be ignored. When used_views is all
+  // true, that means that we are done recursing.
+  void MatchFrames(
+      const Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> &scores,
+      const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame> &
+          best_matches,
+      const ::aos::SizedArray<bool, max_targets_per_frame> &used_views,
+      const ::std::array<bool, num_targets> &used_targets,
+      ::std::array<int, max_targets_per_frame> *best_set, Scalar *best_score) {
+    *best_score = ::std::numeric_limits<Scalar>::infinity();
+    // Iterate by letting each target in the camera frame (that isn't in
+    // used_views) choose it's best match that isn't already taken. We then set
+    // the appropriate flags in used_views and used_targets and call MatchFrames
+    // to let all the other views sort themselves out.
+    for (size_t ii = 0; ii < used_views.size(); ++ii) {
+      if (used_views[ii]) {
+        continue;
+      }
+      int best_match = -1;
+      for (size_t jj = 0; jj < max_targets_per_frame; ++jj) {
+        if (best_matches(ii, jj) == -1) {
+          // If we run out of potential targets from the camera, then there
+          // are more targets in the frame than we think there should be.
+          // In this case, we are going to be doubling/tripling/etc. up
+          // anyhow. So we just give everyone their top choice:
+          // TODO(james): If we ever are dealing with larger numbers of
+          // targets per frame, do something to minimize doubling-up.
+          best_match = best_matches(ii, 0);
+          break;
+        }
+        best_match = best_matches(ii, jj);
+        if (!used_targets[best_match]) {
+          break;
+        }
+      }
+      // If we reach here and best_match = -1, that means that no potential
+      // targets were generated by the camera, and we should never have gotten
+      // here.
+      CHECK(best_match != -1);
+      ::aos::SizedArray<bool, max_targets_per_frame> sub_views = used_views;
+      sub_views[ii] = true;
+      ::std::array<bool, num_targets> sub_targets = used_targets;
+      sub_targets[best_match] = true;
+      ::std::array<int, max_targets_per_frame> sub_best_set;
+      Scalar score;
+      MatchFrames(scores, best_matches, sub_views, sub_targets, &sub_best_set,
+                  &score);
+      score += scores(ii, best_match);
+      sub_best_set[ii] = best_match;
+      if (score < *best_score) {
+        *best_score = score;
+        *best_set = sub_best_set;
+      }
+    }
+    // best_score will be infinite if we did not find a result due to there
+    // being no targets that weren't set in used_vies; this is the
+    // base case of the recursion and so we set best_score to zero:
+    if (!::std::isfinite(*best_score)) {
+      *best_score = 0.0;
+    }
+  }
+
+  // The pose that is used by the cameras to determine the location of the robot
+  // and thus the expected view of the targets.
+  Pose *robot_pose_;
+};  // class TypedLocalizer
+
+}  // namespace control_loops
+}  // namespace y2019
+
+#endif  // Y2019_CONTROL_LOOPS_DRIVETRAIN_LOCALIZATER_H_
diff --git a/y2019/control_loops/drivetrain/localizer_test.cc b/y2019/control_loops/drivetrain/localizer_test.cc
new file mode 100644
index 0000000..45ae714
--- /dev/null
+++ b/y2019/control_loops/drivetrain/localizer_test.cc
@@ -0,0 +1,603 @@
+#include "y2019/control_loops/drivetrain/localizer.h"
+
+#include <random>
+#include <queue>
+
+#include "aos/testing/random_seed.h"
+#include "aos/testing/test_shm.h"
+#include "frc971/control_loops/drivetrain/trajectory.h"
+#include "frc971/control_loops/drivetrain/splinedrivetrain.h"
+#include "gflags/gflags.h"
+#if defined(SUPPORT_PLOT)
+#include "third_party/matplotlib-cpp/matplotlibcpp.h"
+#endif
+#include "gtest/gtest.h"
+#include "y2019/control_loops/drivetrain/drivetrain_base.h"
+#include "y2019/constants.h"
+
+DEFINE_bool(plot, false, "If true, plot");
+
+namespace y2019 {
+namespace control_loops {
+namespace testing {
+
+using ::y2019::constants::Field;
+
+constexpr size_t kNumCameras = 4;
+constexpr size_t kNumTargetsPerFrame = 3;
+
+typedef TypedLocalizer<kNumCameras, Field::kNumTargets, Field::kNumObstacles,
+                       kNumTargetsPerFrame, double> TestLocalizer;
+typedef typename TestLocalizer::Camera TestCamera;
+typedef typename TestCamera::Pose Pose;
+typedef typename TestCamera::LineSegment Obstacle;
+
+typedef TestLocalizer::StateIdx StateIdx;
+
+using ::frc971::control_loops::drivetrain::DrivetrainConfig;
+
+// When placing the cameras on the robot, set them all kCameraOffset out from
+// the center, to test that we really can handle cameras that aren't at the
+// center-of-mass.
+constexpr double kCameraOffset = 0.1;
+
+#if defined(SUPPORT_PLOT)
+// Plots a line from a vector of Pose's.
+void PlotPlotPts(const ::std::vector<Pose> &poses,
+                 const ::std::map<::std::string, ::std::string> &kwargs) {
+  ::std::vector<double> x;
+  ::std::vector<double> y;
+  for (const Pose & p : poses) {
+    x.push_back(p.abs_pos().x());
+    y.push_back(p.abs_pos().y());
+  }
+  matplotlibcpp::plot(x, y, kwargs);
+}
+#endif
+
+struct LocalizerTestParams {
+  // Control points for the spline to make the robot follow.
+  ::std::array<float, 6> control_pts_x;
+  ::std::array<float, 6> control_pts_y;
+  // The actual state to start the robot at. By setting voltage errors and the
+  // such you can introduce persistent disturbances.
+  TestLocalizer::State true_start_state;
+  // The initial state of the estimator.
+  TestLocalizer::State known_start_state;
+  // Whether or not to add Gaussian noise to the sensors and cameras.
+  bool noisify;
+  // Whether or not to add unmodelled disturbances.
+  bool disturb;
+  // The tolerances for the estimator and for the trajectory following at
+  // the end of the spline:
+  double estimate_tolerance;
+  double goal_tolerance;
+};
+
+class ParameterizedLocalizerTest
+    : public ::testing::TestWithParam<LocalizerTestParams> {
+ public:
+  ::aos::testing::TestSharedMemory shm_;
+
+  // Set up three targets in a row, at (-1, 0), (0, 0), and (1, 0).
+  // Make the right-most target (1, 0) be facing away from the camera, and give
+  // the middle target some skew.
+  // Place one camera facing forward, the other facing backward, and set the
+  // robot at (0, -5) with the cameras each 0.1m from the center.
+  // Place one obstacle in a place where it can block the left-most target (-1,
+  // 0).
+  ParameterizedLocalizerTest()
+      : field_(),
+        targets_(field_.targets()),
+        modeled_obstacles_(field_.obstacles()),
+        true_obstacles_(field_.obstacles()),
+        dt_config_(drivetrain::GetDrivetrainConfig()),
+        // Pull the noise for the encoders/gyros from the R matrix:
+        encoder_noise_(::std::sqrt(
+            dt_config_.make_kf_drivetrain_loop().observer().coefficients().R(
+                0, 0))),
+        gyro_noise_(::std::sqrt(
+            dt_config_.make_kf_drivetrain_loop().observer().coefficients().R(
+                2, 2))),
+        // As per the comments in localizer.h, we set the field of view and
+        // noise parameters on the robot_cameras_ so that they see a bit more
+        // than the true_cameras_. The robot_cameras_ are what is passed to the
+        // localizer and used to generate "expected" targets. The true_cameras_
+        // are what we actually use to generate targets to pass to the
+        // localizer.
+        robot_cameras_{
+            {TestCamera({&robot_pose_, {0.0, kCameraOffset, 0.0}, M_PI_2},
+                        M_PI_2 * 1.1, robot_noise_parameters_, targets_,
+                        modeled_obstacles_),
+             TestCamera({&robot_pose_, {kCameraOffset, 0.0, 0.0}, 0.0},
+                        M_PI_2 * 1.1, robot_noise_parameters_, targets_,
+                        modeled_obstacles_),
+             TestCamera({&robot_pose_, {-kCameraOffset, 0.0, 0.0}, M_PI},
+                        M_PI_2 * 1.1, robot_noise_parameters_, targets_,
+                        modeled_obstacles_),
+             TestCamera({&robot_pose_, {0.0, -kCameraOffset, 0.0}, -M_PI_2},
+                        M_PI_2 * 1.1, robot_noise_parameters_, targets_,
+                        modeled_obstacles_)}},
+        true_cameras_{
+            {TestCamera({&true_robot_pose_, {0.0, kCameraOffset, 0.0}, M_PI_2},
+                        M_PI_2 * 0.9, true_noise_parameters_, targets_,
+                        true_obstacles_),
+             TestCamera({&true_robot_pose_, {kCameraOffset, 0.0, 0.0}, 0.0},
+                        M_PI_2 * 0.9, true_noise_parameters_, targets_,
+                        true_obstacles_),
+             TestCamera({&true_robot_pose_, {-kCameraOffset, 0.0, 0.0}, M_PI},
+                        M_PI_2 * 0.9, true_noise_parameters_, targets_,
+                        true_obstacles_),
+             TestCamera(
+                 {&true_robot_pose_, {-0.0, -kCameraOffset, 0.0}, -M_PI_2},
+                 M_PI_2 * 0.9, true_noise_parameters_, targets_,
+                 true_obstacles_)}},
+        localizer_(dt_config_, &robot_pose_),
+        spline_drivetrain_(dt_config_) {
+    // We use the default P() for initialization.
+    localizer_.ResetInitialState(t0_, GetParam().known_start_state,
+                                 localizer_.P());
+  }
+
+  void SetUp() {
+    ::frc971::control_loops::DrivetrainQueue::Goal goal;
+    goal.controller_type = 2;
+    goal.spline.spline_idx = 1;
+    goal.spline.spline_count = 1;
+    goal.spline_handle = 1;
+    ::std::copy(GetParam().control_pts_x.begin(),
+                GetParam().control_pts_x.end(), goal.spline.spline_x.begin());
+    ::std::copy(GetParam().control_pts_y.begin(),
+                GetParam().control_pts_y.end(), goal.spline.spline_y.begin());
+    spline_drivetrain_.SetGoal(goal);
+  }
+
+  void TearDown() {
+    printf("Each iteration of the simulation took on average %f seconds.\n",
+           avg_time_.count());
+#if defined(SUPPORT_PLOT)
+    if (FLAGS_plot) {
+      matplotlibcpp::figure();
+      matplotlibcpp::plot(simulation_t_, simulation_vl_, {{"label", "Vl"}});
+      matplotlibcpp::plot(simulation_t_, simulation_vr_, {{"label", "Vr"}});
+      matplotlibcpp::legend();
+
+      matplotlibcpp::figure();
+      matplotlibcpp::plot(spline_x_, spline_y_, {{"label", "spline"}});
+      matplotlibcpp::plot(simulation_x_, simulation_y_, {{"label", "robot"}});
+      matplotlibcpp::plot(estimated_x_, estimated_y_,
+                          {{"label", "estimation"}});
+      for (const Target & target : targets_) {
+        PlotPlotPts(target.PlotPoints(), {{"c", "g"}});
+      }
+      for (const Obstacle &obstacle : true_obstacles_) {
+        PlotPlotPts(obstacle.PlotPoints(), {{"c", "k"}});
+      }
+      // Go through and plot true/expected camera targets for a few select
+      // time-steps.
+      ::std::vector<const char *> colors{"m", "y", "c"};
+      int jj = 0;
+      for (size_t ii = 0; ii < simulation_x_.size(); ii += 400) {
+        *true_robot_pose_.mutable_pos() << simulation_x_[ii], simulation_y_[ii],
+            0.0;
+        true_robot_pose_.set_theta(simulation_theta_[ii]);
+        for (const TestCamera &camera : true_cameras_) {
+          for (const auto &plot_pts : camera.PlotPoints()) {
+            PlotPlotPts(plot_pts, {{"c", colors[jj]}});
+          }
+        }
+        for (const TestCamera &camera : robot_cameras_) {
+          *robot_pose_.mutable_pos() << estimated_x_[ii], estimated_y_[ii], 0.0;
+          robot_pose_.set_theta(estimated_theta_[ii]);
+          const auto &all_plot_pts = camera.PlotPoints();
+          *robot_pose_.mutable_pos() = true_robot_pose_.rel_pos();
+          robot_pose_.set_theta(true_robot_pose_.rel_theta());
+          for (const auto &plot_pts : all_plot_pts) {
+            PlotPlotPts(plot_pts, {{"c", colors[jj]}, {"ls", "--"}});
+          }
+        }
+        jj = (jj + 1) % colors.size();
+      }
+      matplotlibcpp::legend();
+
+      matplotlibcpp::figure();
+      matplotlibcpp::plot(
+          simulation_t_, spline_x_,
+          {{"label", "spline x"}, {"c", "g"}, {"ls", ""}, {"marker", "o"}});
+      matplotlibcpp::plot(simulation_t_, simulation_x_,
+                          {{"label", "simulated x"}, {"c", "g"}});
+      matplotlibcpp::plot(simulation_t_, estimated_x_,
+                          {{"label", "estimated x"}, {"c", "g"}, {"ls", "--"}});
+
+      matplotlibcpp::plot(
+          simulation_t_, spline_y_,
+          {{"label", "spline y"}, {"c", "b"}, {"ls", ""}, {"marker", "o"}});
+      matplotlibcpp::plot(simulation_t_, simulation_y_,
+                          {{"label", "simulated y"}, {"c", "b"}});
+      matplotlibcpp::plot(simulation_t_, estimated_y_,
+                          {{"label", "estimated y"}, {"c", "b"}, {"ls", "--"}});
+
+      matplotlibcpp::plot(simulation_t_, simulation_theta_,
+                          {{"label", "simulated theta"}, {"c", "r"}});
+      matplotlibcpp::plot(
+          simulation_t_, estimated_theta_,
+          {{"label", "estimated theta"}, {"c", "r"}, {"ls", "--"}});
+      matplotlibcpp::legend();
+
+      matplotlibcpp::show();
+    }
+#endif
+  }
+
+ protected:
+  // Returns a random number with a gaussian distribution with a mean of zero
+  // and a standard deviation of std, if noisify = true.
+  // If noisify is false, then returns 0.0.
+  double Normal(double std) {
+    if (GetParam().noisify) {
+      return normal_(gen_) * std;
+    }
+    return 0.0;
+  }
+  // Adds random noise to the given target view.
+  void Noisify(TestCamera::TargetView *view) {
+    view->reading.heading += Normal(view->noise.heading);
+    view->reading.distance += Normal(view->noise.distance);
+    view->reading.height += Normal(view->noise.height);
+    view->reading.skew += Normal(view->noise.skew);
+  }
+  // The differential equation for the dynamics of the system.
+  TestLocalizer::State DiffEq(const TestLocalizer::State &X,
+                              const TestLocalizer::Input &U) {
+    return localizer_.DiffEq(X, U);
+  }
+
+  Field field_;
+  ::std::array<Target, Field::kNumTargets> targets_;
+  // The obstacles that are passed to the camera objects for the localizer.
+  ::std::array<Obstacle, Field::kNumObstacles> modeled_obstacles_;
+  // The obstacles that are used for actually simulating the cameras.
+  ::std::array<Obstacle, Field::kNumObstacles> true_obstacles_;
+
+  DrivetrainConfig<double> dt_config_;
+
+  // Noise information for the actual simulated cameras (true_*) and the
+  // parameters that the localizer should use for modelling the cameras. Note
+  // how the max_viewable_distance is larger for the localizer, so that if
+  // there is any error in the estimation, it still thinks that it can see
+  // any targets that might actually be in range.
+  TestCamera::NoiseParameters true_noise_parameters_ = {
+      .max_viewable_distance = 10.0,
+      .heading_noise = 0.02,
+      .nominal_distance_noise = 0.06,
+      .nominal_skew_noise = 0.1,
+      .nominal_height_noise = 0.01};
+  TestCamera::NoiseParameters robot_noise_parameters_ = {
+      .max_viewable_distance = 11.0,
+      .heading_noise = 0.02,
+      .nominal_distance_noise = 0.06,
+      .nominal_skew_noise = 0.1,
+      .nominal_height_noise = 0.01};
+
+  // Standard deviations of the noise for the encoders/gyro.
+  double encoder_noise_, gyro_noise_;
+
+  Pose robot_pose_;
+  ::std::array<TestCamera, 4> robot_cameras_;
+  Pose true_robot_pose_;
+  ::std::array<TestCamera, 4> true_cameras_;
+
+  TestLocalizer localizer_;
+
+  ::frc971::control_loops::drivetrain::SplineDrivetrain spline_drivetrain_;
+
+  // All the data we want to end up plotting.
+  ::std::vector<double> simulation_t_;
+
+  ::std::vector<double> spline_x_;
+  ::std::vector<double> spline_y_;
+  ::std::vector<double> estimated_x_;
+  ::std::vector<double> estimated_y_;
+  ::std::vector<double> estimated_theta_;
+  ::std::vector<double> simulation_x_;
+  ::std::vector<double> simulation_y_;
+  ::std::vector<double> simulation_theta_;
+
+  ::std::vector<double> simulation_vl_;
+  ::std::vector<double> simulation_vr_;
+
+  // Simulation start time
+  ::aos::monotonic_clock::time_point t0_;
+
+  // Average duration of each iteration; used for debugging and getting a
+  // sanity-check on what performance looks like--uses a real system clock.
+  ::std::chrono::duration<double> avg_time_;
+
+  ::std::mt19937 gen_{static_cast<uint32_t>(::aos::testing::RandomSeed())};
+  ::std::normal_distribution<> normal_;
+};
+
+// Tests that, when we attempt to follow a spline and use the localizer to
+// perform the state estimation, we end up roughly where we should and that
+// the localizer has a valid position estimate.
+TEST_P(ParameterizedLocalizerTest, SplineTest) {
+  // state stores the true state of the robot throughout the simulation.
+  TestLocalizer::State state = GetParam().true_start_state;
+
+  ::aos::monotonic_clock::time_point t = t0_;
+
+  // The period with which we should take frames from the cameras. Currently,
+  // we just trigger all the cameras at once, rather than offsetting them or
+  // anything.
+  const int camera_period = 5; // cycles
+  // The amount of time to delay the camera images from when they are taken.
+  const ::std::chrono::milliseconds camera_latency(50);
+
+  // A queue of camera frames so that we can add a time delay to the data
+  // coming from the cameras.
+  ::std::queue<::std::tuple<
+      ::aos::monotonic_clock::time_point, const TestCamera *,
+      ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>>>
+      camera_queue;
+
+  // Start time, for debugging.
+  const auto begin = ::std::chrono::steady_clock::now();
+
+  size_t i;
+  for (i = 0; !spline_drivetrain_.IsAtEnd(); ++i) {
+    // Get the current state estimate into a matrix that works for the
+    // trajectory code.
+    ::Eigen::Matrix<double, 5, 1> known_state;
+    TestLocalizer::State X_hat = localizer_.X_hat();
+    known_state << X_hat(StateIdx::kX, 0), X_hat(StateIdx::kY, 0),
+        X_hat(StateIdx::kTheta, 0), X_hat(StateIdx::kLeftVelocity, 0),
+        X_hat(StateIdx::kRightVelocity, 0);
+
+    spline_drivetrain_.Update(true, known_state);
+
+    ::frc971::control_loops::DrivetrainQueue::Output output;
+    output.left_voltage = 0;
+    output.right_voltage = 0;
+    spline_drivetrain_.SetOutput(&output);
+    TestLocalizer::Input U(output.left_voltage, output.right_voltage);
+
+    const ::Eigen::Matrix<double, 5, 1> goal_state =
+        spline_drivetrain_.CurrentGoalState();
+    simulation_t_.push_back(
+        ::std::chrono::duration_cast<::std::chrono::duration<double>>(
+            t.time_since_epoch())
+            .count());
+    spline_x_.push_back(goal_state(0, 0));
+    spline_y_.push_back(goal_state(1, 0));
+    simulation_x_.push_back(state(StateIdx::kX, 0));
+    simulation_y_.push_back(state(StateIdx::kY, 0));
+    simulation_theta_.push_back(state(StateIdx::kTheta, 0));
+    estimated_x_.push_back(known_state(0, 0));
+    estimated_y_.push_back(known_state(1, 0));
+    estimated_theta_.push_back(known_state(StateIdx::kTheta, 0));
+
+    simulation_vl_.push_back(U(0));
+    simulation_vr_.push_back(U(1));
+    U(0, 0) = ::std::max(::std::min(U(0, 0), 12.0), -12.0);
+    U(1, 0) = ::std::max(::std::min(U(1, 0), 12.0), -12.0);
+
+    state = ::frc971::control_loops::RungeKuttaU(
+        [this](const ::Eigen::Matrix<double, 10, 1> &X,
+               const ::Eigen::Matrix<double, 2, 1> &U) { return DiffEq(X, U); },
+        state, U,
+        ::std::chrono::duration_cast<::std::chrono::duration<double>>(
+            dt_config_.dt)
+            .count());
+
+    // Add arbitrary disturbances at some arbitrary interval. The main points of
+    // interest here are that we (a) stop adding disturbances at the very end of
+    // the trajectory, to allow us to actually converge to the goal, and (b)
+    // scale disturbances by the corruent velocity.
+    if (GetParam().disturb && i % 50 == 0) {
+      // Scale the disturbance so that when we have near-zero velocity we don't
+      // have much disturbance.
+      double disturbance_scale = ::std::min(
+          1.0, ::std::sqrt(::std::pow(state(StateIdx::kLeftVelocity, 0), 2) +
+                           ::std::pow(state(StateIdx::kRightVelocity, 0), 2)) /
+                   3.0);
+      TestLocalizer::State disturbance;
+      disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0;
+      disturbance *= disturbance_scale;
+      state += disturbance;
+    }
+
+    t += dt_config_.dt;
+    *true_robot_pose_.mutable_pos() << state(StateIdx::kX, 0),
+        state(StateIdx::kY, 0), 0.0;
+    true_robot_pose_.set_theta(state(StateIdx::kTheta, 0));
+    const double left_enc = state(StateIdx::kLeftEncoder, 0);
+    const double right_enc = state(StateIdx::kRightEncoder, 0);
+
+    const double gyro = (state(StateIdx::kRightVelocity, 0) -
+                         state(StateIdx::kLeftVelocity, 0)) /
+                        dt_config_.robot_radius / 2.0;
+
+    localizer_.UpdateEncodersAndGyro(left_enc + Normal(encoder_noise_),
+                                     right_enc + Normal(encoder_noise_),
+                                     gyro + Normal(gyro_noise_), U, t);
+
+    // Clear out the camera frames that we are ready to pass to the localizer.
+    while (!camera_queue.empty() &&
+           ::std::get<0>(camera_queue.front()) < t - camera_latency) {
+      const auto tuple = camera_queue.front();
+      camera_queue.pop();
+      ::aos::monotonic_clock::time_point t_obs = ::std::get<0>(tuple);
+      const TestCamera *camera = ::std::get<1>(tuple);
+      ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame> views =
+          ::std::get<2>(tuple);
+      localizer_.UpdateTargets(*camera, views, t_obs);
+    }
+
+    // Actually take all the images and store them in the queue.
+    if (i % camera_period == 0) {
+      for (size_t jj = 0; jj < true_cameras_.size(); ++jj) {
+        const auto target_views = true_cameras_[jj].target_views();
+        ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>
+            pass_views;
+        for (size_t ii = 0;
+             ii < ::std::min(kNumTargetsPerFrame, target_views.size()); ++ii) {
+          TestCamera::TargetView view = target_views[ii];
+          Noisify(&view);
+          pass_views.push_back(view);
+        }
+        camera_queue.emplace(t, &robot_cameras_[jj], pass_views);
+      }
+    }
+
+  }
+
+  const auto end = ::std::chrono::steady_clock::now();
+  avg_time_ = (end - begin) / i;
+  TestLocalizer::State estimate_err = state - localizer_.X_hat();
+  EXPECT_LT(estimate_err.template topRows<7>().norm(),
+            GetParam().estimate_tolerance);
+  // Check that none of the states that we actually care about (x/y/theta, and
+  // wheel positions/speeds) are too far off, individually:
+  EXPECT_LT(estimate_err.template topRows<7>().cwiseAbs().maxCoeff(),
+            GetParam().estimate_tolerance / 3.0)
+      << "Estimate error: " << estimate_err.transpose();
+  Eigen::Matrix<double, 5, 1> final_trajectory_state;
+  final_trajectory_state << state(StateIdx::kX, 0), state(StateIdx::kY, 0),
+      state(StateIdx::kTheta, 0), state(StateIdx::kLeftVelocity, 0),
+      state(StateIdx::kRightVelocity, 0);
+  const Eigen::Matrix<double, 5, 1> final_trajectory_state_err =
+      final_trajectory_state - spline_drivetrain_.CurrentGoalState();
+  EXPECT_LT(final_trajectory_state_err.norm(), GetParam().goal_tolerance)
+      << "Goal error: " << final_trajectory_state_err.transpose();
+}
+
+INSTANTIATE_TEST_CASE_P(
+    LocalizerTest, ParameterizedLocalizerTest,
+    ::testing::Values(
+        // Checks a "perfect" scenario, where we should track perfectly.
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            /*noisify=*/false,
+            /*disturb=*/false,
+            /*estimate_tolerance=*/1e-5,
+            /*goal_tolerance=*/2e-2,
+        }),
+        // Checks "perfect" estimation, but start off the spline and check
+        // that we can still follow it.
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            /*noisify=*/false,
+            /*disturb=*/false,
+            /*estimate_tolerance=*/1e-5,
+            /*goal_tolerance=*/2e-2,
+        }),
+        // Repeats perfect scenario, but add sensor noise.
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            /*noisify=*/true,
+            /*disturb=*/false,
+            /*estimate_tolerance=*/0.2,
+            /*goal_tolerance=*/0.2,
+        }),
+        // Repeats perfect scenario, but add initial estimator error.
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0, 0.0)
+                .finished(),
+            /*noisify=*/false,
+            /*disturb=*/false,
+            /*estimate_tolerance=*/1e-4,
+            /*goal_tolerance=*/2e-2,
+        }),
+        // Repeats perfect scenario, but add voltage + angular errors:
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
+             0.5, 0.02)
+                .finished(),
+            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0, 0.0)
+                .finished(),
+            /*noisify=*/false,
+            /*disturb=*/false,
+            /*estimate_tolerance=*/1e-4,
+            /*goal_tolerance=*/2e-2,
+        }),
+        // Add disturbances while we are driving:
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            /*noisify=*/false,
+            /*disturb=*/true,
+            /*estimate_tolerance=*/2e-2,
+            /*goal_tolerance=*/0.15,
+        }),
+        // Add noise and some initial error in addition:
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
+            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.1, -5.1, 0.03, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            /*noisify=*/true,
+            /*disturb=*/true,
+            /*estimate_tolerance=*/0.15,
+            /*goal_tolerance=*/0.5,
+        }),
+        // Try another spline, just in case the one I was using is special for
+        // some reason; this path will also go straight up to a target, to
+        // better simulate what might happen when trying to score:
+        LocalizerTestParams({
+            /*control_pts_x=*/{{0.5, 3.5, 4.0, 8.0, 11.0, 10.2}},
+            /*control_pts_y=*/{{1.0, 1.0, -3.0, -2.0, -3.5, -3.65}},
+            (TestLocalizer::State() << 0.6, 1.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            (TestLocalizer::State() << 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+             0.0, 0.0)
+                .finished(),
+            /*noisify=*/true,
+            /*disturb=*/false,
+            /*estimate_tolerance=*/0.1,
+            /*goal_tolerance=*/0.5,
+        })));
+
+}  // namespace testing
+}  // namespace control_loops
+}  // namespace y2019