Stop using std::function in HybridEkf

Removes malloc's from the HybridEkf and localizer as a whole.

This solution feels a bit inelegant, but that's true of the entire
HybridEkf class (I may simplify some of this once we delete y2019).

Change-Id: I2deb5b1221ea17b08baad7e8bb46d6bbd1b987a6
Signed-off-by: James Kuszmaul <jabukuszmaul+collab@gmail.com>
diff --git a/aos/containers/priority_queue.h b/aos/containers/priority_queue.h
index b092219..d4d0196 100644
--- a/aos/containers/priority_queue.h
+++ b/aos/containers/priority_queue.h
@@ -3,6 +3,7 @@
 
 #include <array>
 #include <iterator>
+#include <optional>
 
 namespace aos {
 
@@ -60,18 +61,19 @@
   // and the queue is full, then data is ignored and end() is returned.
   // PushFromBottom starts the search from the bottom of the queue.
   // TODO(james): If performance becomes an issue, improve search.
-  iterator PushFromBottom(const Data &data) {
+  iterator PushFromBottom(Data data) {
     size_t lower_idx = buffer_size;
     size_t upper_idx = bottom_;
     // Find our spot in the queue:
-    while (upper_idx != buffer_size && !cmp_(data, list_[upper_idx].data)) {
+    while (upper_idx != buffer_size &&
+           !cmp_(data, list_[upper_idx].data.value())) {
       lower_idx = upper_idx;
       upper_idx = list_[upper_idx].upper_idx;
       if (upper_idx == buffer_size) {
         break;
       }
     }
-    return InsertInList(data, lower_idx, upper_idx);
+    return InsertInList(std::move(data), lower_idx, upper_idx);
   }
 
   size_t size() const { return size_; }
@@ -85,10 +87,10 @@
     top_ = buffer_size;
   }
 
-  Data &top() { return list_[top_].data; }
-  const Data &top() const { return list_[top_].data; }
-  Data &get(size_t idx) { return list_[idx].data; }
-  const Data &get(size_t idx) const { return list_[idx].data; }
+  Data &top() { return list_[top_].data.value(); }
+  const Data &top() const { return list_[top_].data.value(); }
+  Data &get(size_t idx) { return list_[idx].data.value(); }
+  const Data &get(size_t idx) const { return list_[idx].data.value(); }
   iterator begin() { return iterator(this, bottom_); }
   iterator end() { return iterator(this, buffer_size); }
 
@@ -101,7 +103,7 @@
   struct Datum {
     // A list element with data and the indices of the next highest/lowest
     // priority elements.
-    Data data;
+    std::optional<Data> data;
     // Values of buffer_size indicate that we are at the beginning or end of
     // the queue.
     size_t lower_idx = buffer_size;
@@ -109,7 +111,7 @@
   };
 
   // Insert an element above lower_idx and below upper_idx.
-  iterator InsertInList(const Data &data, size_t lower_idx, size_t upper_idx) {
+  iterator InsertInList(Data data, size_t lower_idx, size_t upper_idx) {
     // For inserting new elements, when we are initially filling the queue we
     // will increment upwards in the array; once full, we just evict the
     // lowest priority element.
@@ -140,7 +142,7 @@
     if (top_ == lower_idx) {
       top_ = insertion_idx;
     }
-    list_[insertion_idx].data = data;
+    list_[insertion_idx].data.emplace(std::move(data));
     list_[insertion_idx].upper_idx = upper_idx;
     list_[insertion_idx].lower_idx = lower_idx;
     ++size_;
diff --git a/aos/containers/priority_queue_test.cc b/aos/containers/priority_queue_test.cc
index d26d7c9..64456c4 100644
--- a/aos/containers/priority_queue_test.cc
+++ b/aos/containers/priority_queue_test.cc
@@ -153,16 +153,22 @@
   ASSERT_TRUE(reverse_expected.empty());
 }
 
-// Check that operator-> works as expected on the iterator.
+// Check that operator-> works as expected on the iterator, and that we can use
+// a non-copyable, non-assignable object.
 struct TestStruct {
+  TestStruct(int a) : a(a) {}
+  TestStruct(const TestStruct &) = delete;
+  TestStruct &operator=(const TestStruct &) = delete;
+  TestStruct(TestStruct &&) = default;
+  TestStruct &operator=(TestStruct &&) = delete;
   int a;
   friend bool operator<(const TestStruct &lhs, const TestStruct &rhs) {
     return lhs.a < rhs.a;
   }
 };
-TEST(PriorirtyQueueTest, MemberAccess) {
+TEST(PriorityQueueMoveTest, MemberAccess) {
   PriorityQueue<TestStruct, 10, ::std::less<TestStruct>> q;
-  auto it = q.PushFromBottom({11});
+  auto it = q.PushFromBottom(TestStruct{11});
   EXPECT_EQ(11, it->a);
 }
 
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index fcbed76..89b2182 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -135,6 +135,7 @@
     kLongitudinalAccel = 2,
     kLateralAccel = 3,
   };
+
   static constexpr int kNInputs = 4;
   // Number of previous samples to save.
   static constexpr int kSaveSamples = 200;
@@ -177,6 +178,135 @@
   // State contains the states defined by the StateIdx enum. See comments there.
   typedef Eigen::Matrix<Scalar, kNStates, 1> State;
 
+  // The following classes exist to allow us to support doing corections in the
+  // past by rewinding the EKF, calling the appropriate H and dhdx functions,
+  // and then playing everything back. Originally, this simply used
+  // std::function's, but doing so causes us to perform dynamic memory
+  // allocation in the core of the drivetrain control loop.
+  //
+  // The ExpectedObservationFunctor class serves to provide an interface for the
+  // actual H and dH/dX that the EKF itself needs. Most implementations end up
+  // just using this; in the degenerate case, ExpectedObservationFunctor could
+  // be implemented as a class that simply stores two std::functions and calls
+  // them when H() and DHDX() are called.
+  //
+  // The ObserveDeletion() and deleted() methods exist for sanity checking--we
+  // don't rely on them to do any work, but in order to ensure that memory is
+  // being managed correctly, we have the HybridEkf call ObserveDeletion() when
+  // it no longer needs an instance of the object.
+  class ExpectedObservationFunctor {
+   public:
+    virtual ~ExpectedObservationFunctor() = default;
+    // Return the expected measurement of the system for a given state and plant
+    // input.
+    virtual Output H(const State &state, const Input &input) = 0;
+    // Return the derivative of H() with respect to the state, given the current
+    // state.
+    virtual Eigen::Matrix<Scalar, kNOutputs, kNStates> DHDX(
+        const State &state) = 0;
+    virtual void ObserveDeletion() {
+      CHECK(!deleted_);
+      deleted_ = true;
+    }
+    bool deleted() const { return deleted_; }
+
+   private:
+    bool deleted_ = false;
+  };
+
+  // The ExpectedObservationBuilder creates a new ExpectedObservationFunctor.
+  // This is used for situations where in order to know what the correction
+  // methods even are we need to know the state at some time in the past. This
+  // is only used in the y2019 code and we've generally stopped using this
+  // pattern.
+  class ExpectedObservationBuilder {
+   public:
+    virtual ~ExpectedObservationBuilder() = default;
+    // The lifetime of the returned object should last at least until
+    // ObserveDeletion() is called on said object.
+    virtual ExpectedObservationFunctor *MakeExpectedObservations(
+        const State &state, const StateSquare &P) = 0;
+    void ObserveDeletion() {
+      CHECK(!deleted_);
+      deleted_ = true;
+    }
+    bool deleted() const { return deleted_; }
+
+   private:
+    bool deleted_ = false;
+  };
+
+  // The ExpectedObservationAllocator provides a utility class which manages the
+  // memory for a single type of correction step for a given localizer.
+  // Using the knowledge that at most kSaveSamples ExpectedObservation* objects
+  // can be referenced by the HybridEkf at any given time, this keeps an
+  // internal queue that more than mirrors the HybridEkf's internal queue, using
+  // the oldest spots in the queue to construct new ExpectedObservation*'s.
+  // This can be used with T as either a ExpectedObservationBuilder or
+  // ExpectedObservationFunctor. The appropriate Correct function will then be
+  // called in place of calling HybridEkf::Correct directly. Note that unless T
+  // implements both the Builder and Functor (which is generally discouraged),
+  // only one of the Correct* functions will build.
+  template <typename T>
+  class ExpectedObservationAllocator {
+   public:
+    ExpectedObservationAllocator(HybridEkf *ekf) : ekf_(ekf) {}
+    void CorrectKnownH(const Output &z, const Input *U, T H,
+                       const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
+                       aos::monotonic_clock::time_point t) {
+      if (functors_.full()) {
+        CHECK(functors_.begin()->functor->deleted());
+      }
+      auto pushed = functors_.PushFromBottom(Pair{t, std::move(H)});
+      if (pushed == functors_.end()) {
+        VLOG(1) << "Observation dropped off bottom of queue.";
+        return;
+      }
+      ekf_->Correct(z, U, nullptr, &pushed->functor.value(), R, t);
+    }
+    void CorrectKnownHBuilder(
+        const Output &z, const Input *U, T builder,
+        const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
+        aos::monotonic_clock::time_point t) {
+      if (functors_.full()) {
+        CHECK(functors_.begin()->functor->deleted());
+      }
+      auto pushed = functors_.PushFromBottom(Pair{t, std::move(builder)});
+      if (pushed == functors_.end()) {
+        VLOG(1) << "Observation dropped off bottom of queue.";
+        return;
+      }
+      ekf_->Correct(z, U, &pushed->functor.value(), nullptr, R, t);
+    }
+
+   private:
+    struct Pair {
+      aos::monotonic_clock::time_point t;
+      std::optional<T> functor;
+      friend bool operator<(const Pair &l, const Pair &r) { return l.t < r.t; }
+    };
+
+    HybridEkf *const ekf_;
+    aos::PriorityQueue<Pair, kSaveSamples + 1, std::less<Pair>> functors_;
+  };
+
+  // A simple implementation of ExpectedObservationFunctor for an LTI correction
+  // step. Does not store any external references, so overrides
+  // ObserveDeletion() to do nothing.
+  class LinearH : public ExpectedObservationFunctor {
+   public:
+    LinearH(const Eigen::Matrix<Scalar, kNOutputs, kNStates> &H) : H_(H) {}
+    virtual ~LinearH() = default;
+    Output H(const State &state, const Input &) final { return H_ * state; }
+    Eigen::Matrix<Scalar, kNOutputs, kNStates> DHDX(const State &) final {
+      return H_;
+    }
+    void ObserveDeletion() {}
+
+   private:
+    const Eigen::Matrix<Scalar, kNOutputs, kNStates> H_;
+  };
+
   // Constructs a HybridEkf for a particular drivetrain.
   // Currently, we use the drivetrain config for modelling constants
   // (continuous time A and B matrices) and for the noise matrices for the
@@ -207,11 +337,8 @@
         P_,
         Input::Zero(),
         Output::Zero(),
-        {},
-        [](const State &, const Input &) { return Output::Zero(); },
-        [](const State &) {
-          return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
-        },
+        nullptr,
+        &H_encoders_and_gyro_.value(),
         Eigen::Matrix<Scalar, kNOutputs, kNOutputs>::Identity(),
         StateSquare::Identity(),
         StateSquare::Zero(),
@@ -230,18 +357,11 @@
   // on an assumption that the voltage was held constant between the time steps.
   // TODO(james): Is it necessary to explicitly to provide a version with H as a
   // matrix for linear cases?
-  void Correct(
-      const Output &z, const Input *U,
-      std::function<void(const State &, const StateSquare &,
-                         std::function<Output(const State &, const Input &)> *,
-                         std::function<Eigen::Matrix<
-                             Scalar, kNOutputs, kNStates>(const State &)> *)>
-          make_h,
-      std::function<Output(const State &, const Input &)> h,
-      std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
-          dhdx,
-      const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
-      aos::monotonic_clock::time_point t);
+  void Correct(const Output &z, const Input *U,
+               ExpectedObservationBuilder *observation_builder,
+               ExpectedObservationFunctor *expected_observations,
+               const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
+               aos::monotonic_clock::time_point t);
 
   // A utility function for specifically updating with encoder and gyro
   // measurements.
@@ -291,11 +411,8 @@
     Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
     R.setZero();
     R.diagonal() << encoder_noise_, encoder_noise_, gyro_noise_;
-    Correct(z, &U, {},
-            [this](const State &X, const Input &) {
-              return H_encoders_and_gyro_ * X;
-            },
-            [this](const State &) { return H_encoders_and_gyro_; }, R, t);
+    CHECK(H_encoders_and_gyro_.has_value());
+    Correct(z, &U, nullptr, &H_encoders_and_gyro_.value(), R, t);
   }
 
   // Sundry accessor:
@@ -348,6 +465,50 @@
 
  private:
   struct Observation {
+    Observation(aos::monotonic_clock::time_point t,
+                aos::monotonic_clock::time_point prev_t, State X_hat,
+                StateSquare P, Input U, Output z,
+                ExpectedObservationBuilder *make_h,
+                ExpectedObservationFunctor *h,
+                Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R, StateSquare A_d,
+                StateSquare Q_d,
+                aos::monotonic_clock::duration discretization_time,
+                State predict_update)
+        : t(t),
+          prev_t(prev_t),
+          X_hat(X_hat),
+          P(P),
+          U(U),
+          z(z),
+          make_h(make_h),
+          h(h),
+          R(R),
+          A_d(A_d),
+          Q_d(Q_d),
+          discretization_time(discretization_time),
+          predict_update(predict_update) {}
+    Observation(const Observation &) = delete;
+    Observation &operator=(const Observation &) = delete;
+    // Move-construct an observation by copying over the contents of the struct
+    // and then clearing the old Observation's pointers so that it doesn't try
+    // to clean things up.
+    Observation(Observation &&o)
+        : Observation(o.t, o.prev_t, o.X_hat, o.P, o.U, o.z, o.make_h, o.h, o.R,
+                      o.A_d, o.Q_d, o.discretization_time, o.predict_update) {
+      o.make_h = nullptr;
+      o.h = nullptr;
+    }
+    Observation &operator=(Observation &&observation) = delete;
+    ~Observation() {
+      // Observe h being deleted first, since make_h may own its memory.
+      // Shouldn't actually matter, though.
+      if (h != nullptr) {
+        h->ObserveDeletion();
+      }
+      if (make_h != nullptr) {
+        make_h->ObserveDeletion();
+      }
+    }
     // Time when the observation was taken.
     aos::monotonic_clock::time_point t;
     // Time that the previous observation was taken:
@@ -365,24 +526,11 @@
     // estimate. This is used by the camera to make it so that we only have to
     // match targets once.
     // Only called if h and dhdx are empty.
-    std::function<void(const State &, const StateSquare &,
-                       std::function<Output(const State &, const Input &)> *,
-                       std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
-                           const State &)> *)>
-        make_h;
+    ExpectedObservationBuilder *make_h = nullptr;
     // A function to calculate the expected output at a given state/input.
     // TODO(james): For encoders/gyro, it is linear and the function call may
     // be expensive. Potential source of optimization.
-    std::function<Output(const State &, const Input &)> h;
-    // The Jacobian of h with respect to x.
-    // We assume that U has no impact on the Jacobian.
-    // TODO(james): Currently, none of the users of this actually make use of
-    // the ability to have dynamic dhdx (technically, the camera code should
-    // recalculate it to be strictly correct, but I was both too lazy to do
-    // so and it seemed unnecessary). This is a potential source for future
-    // optimizations if function calls are being expensive.
-    std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
-        dhdx;
+    ExpectedObservationFunctor *h = nullptr;
     // The measurement noise matrix.
     Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
 
@@ -556,7 +704,7 @@
   }
 
   void CorrectImpl(Observation *obs, State *state, StateSquare *P) {
-    const Eigen::Matrix<Scalar, kNOutputs, kNStates> H = obs->dhdx(*state);
+    const Eigen::Matrix<Scalar, kNOutputs, kNStates> H = obs->h->DHDX(*state);
     // Note: Technically, this does calculate P * H.transpose() twice. However,
     // when I was mucking around with some things, I found that in practice
     // putting everything into one expression and letting Eigen optimize it
@@ -566,7 +714,7 @@
         *P * H.transpose() * (H * *P * H.transpose() + obs->R).inverse();
     const StateSquare Ptemp = (StateSquare::Identity() - K * H) * *P;
     *P = Ptemp;
-    *state += K * (obs->z - obs->h(*state, obs->U));
+    *state += K * (obs->z - obs->h->H(*state, obs->U));
   }
 
   void ProcessObservation(Observation *obs, const std::chrono::nanoseconds dt,
@@ -576,9 +724,9 @@
     if (dt.count() != 0 && dt < kMaxTimestep) {
       PredictImpl(obs, dt, state, P);
     }
-    if (!(obs->h && obs->dhdx)) {
-      CHECK(obs->make_h);
-      obs->make_h(*state, *P, &obs->h, &obs->dhdx);
+    if (obs->h == nullptr) {
+      CHECK(obs->make_h != nullptr);
+      obs->h = CHECK_NOTNULL(obs->make_h->MakeExpectedObservations(*state, *P));
     }
     CorrectImpl(obs, state, P);
   }
@@ -590,7 +738,7 @@
   StateSquare A_continuous_;
   StateSquare Q_continuous_;
   StateSquare P_;
-  Eigen::Matrix<Scalar, kNOutputs, kNStates> H_encoders_and_gyro_;
+  std::optional<LinearH> H_encoders_and_gyro_;
   Scalar encoder_noise_, gyro_noise_;
   Eigen::Matrix<Scalar, kNStates, kNInputs> B_continuous_;
 
@@ -610,14 +758,8 @@
 template <typename Scalar>
 void HybridEkf<Scalar>::Correct(
     const Output &z, const Input *U,
-    std::function<void(const State &, const StateSquare &,
-                       std::function<Output(const State &, const Input &)> *,
-                       std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
-                           const State &)> *)>
-        make_h,
-    std::function<Output(const State &, const Input &)> h,
-    std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
-        dhdx,
+    ExpectedObservationBuilder *observation_builder,
+    ExpectedObservationFunctor *expected_observations,
     const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
     aos::monotonic_clock::time_point t) {
   CHECK(!observations_.empty());
@@ -627,9 +769,9 @@
     return;
   }
   auto cur_it = observations_.PushFromBottom(
-      {t, t, State::Zero(), StateSquare::Zero(), Input::Zero(), z, make_h, h,
-       dhdx, R, StateSquare::Identity(), StateSquare::Zero(),
-       std::chrono::seconds(0), State::Zero()});
+      {t, t, State::Zero(), StateSquare::Zero(), Input::Zero(), z,
+       observation_builder, expected_observations, R, StateSquare::Identity(),
+       StateSquare::Zero(), std::chrono::seconds(0), State::Zero()});
   if (cur_it == observations_.end()) {
     VLOG(1) << "Camera dropped off of end with time of "
             << aos::time::DurationInSeconds(t.time_since_epoch())
@@ -773,14 +915,18 @@
       std::pow(1.1, 2.0);
   Q_continuous_(kLateralVelocity, kLateralVelocity) = std::pow(0.1, 2.0);
 
-  H_encoders_and_gyro_.setZero();
-  // Encoders are stored directly in the state matrix, so are a minor
-  // transform away.
-  H_encoders_and_gyro_(0, kLeftEncoder) = 1.0;
-  H_encoders_and_gyro_(1, kRightEncoder) = 1.0;
-  // Gyro rate is just the difference between right/left side speeds:
-  H_encoders_and_gyro_(2, kLeftVelocity) = -1.0 / diameter;
-  H_encoders_and_gyro_(2, kRightVelocity) = 1.0 / diameter;
+  {
+    Eigen::Matrix<Scalar, kNOutputs, kNStates> H_encoders_and_gyro;
+    H_encoders_and_gyro.setZero();
+    // Encoders are stored directly in the state matrix, so are a minor
+    // transform away.
+    H_encoders_and_gyro(0, kLeftEncoder) = 1.0;
+    H_encoders_and_gyro(1, kRightEncoder) = 1.0;
+    // Gyro rate is just the difference between right/left side speeds:
+    H_encoders_and_gyro(2, kLeftVelocity) = -1.0 / diameter;
+    H_encoders_and_gyro(2, kRightVelocity) = 1.0 / diameter;
+    H_encoders_and_gyro_.emplace(H_encoders_and_gyro);
+  }
 
   encoder_noise_ = 5e-9;
   gyro_noise_ = 1e-13;
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
index 4bdf4de..5efb297 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
+++ b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
@@ -213,8 +213,7 @@
   HybridEkf<>::Output Z(0.5, 0.5, 1);
   Eigen::Matrix<double, 3, 12> H;
   H.setIdentity();
-  auto h = [H](const State &X, const Input &) { return H * X; };
-  auto dhdx = [H](const State &) { return H; };
+  HybridEkf<>::LinearH h(H);
   Eigen::Matrix<double, 3, 3> R;
   R.setIdentity();
   R *= 1e-3;
@@ -222,7 +221,7 @@
   EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kTheta));
   const double starting_p_norm = ekf_.P().norm();
   for (int ii = 0; ii < 100; ++ii) {
-    ekf_.Correct(Z, &U, {}, h, dhdx, R, t0_);
+    ekf_.Correct(Z, &U, nullptr, &h, R, t0_);
   }
   EXPECT_NEAR(Z(0, 0), ekf_.X_hat(StateIdx::kX), 1e-3);
   EXPECT_NEAR(Z(1, 0), ekf_.X_hat(StateIdx::kY), 1e-3);
@@ -241,8 +240,7 @@
   State true_X = ekf_.X_hat();
   Eigen::Matrix<double, 3, 12> H;
   H.setZero();
-  auto h = [H](const State &X, const Input &) { return H * X; };
-  auto dhdx = [H](const State &) { return H; };
+  HybridEkf<>::LinearH h(H);
   // Provide constant input voltage.
   Input U;
   U << 12.0, 10.0, 1.0, -0.1;
@@ -253,7 +251,7 @@
   EXPECT_EQ(0.0, ekf_.X_hat().norm());
   const double starting_p_norm = ekf_.P().norm();
   for (int ii = 0; ii < 100; ++ii) {
-    ekf_.Correct(Z, &U, {}, h, dhdx, R, t0_ + dt_config_.dt * (ii + 1));
+    ekf_.Correct(Z, &U, nullptr, &h, R, t0_ + dt_config_.dt * (ii + 1));
     true_X = Update(true_X, U, false);
     EXPECT_EQ(true_X, ekf_.X_hat());
   }
@@ -292,8 +290,7 @@
   Z.setZero();
   Eigen::Matrix<double, 3, 12> H;
   H.setZero();
-  auto h_zero = [H](const State &X, const Input &) { return H * X; };
-  auto dhdx_zero = [H](const State &) { return H; };
+  HybridEkf<>::LinearH h_zero(H);
   Input U;
   U << 12.0, 12.0, 0.0, 0.0;
   Eigen::Matrix<double, 3, 3> R;
@@ -302,8 +299,7 @@
   // We fill up the buffer to be as full as demanded by the user.
   const size_t n_predictions = GetParam();
   for (size_t ii = 0; ii < n_predictions; ++ii) {
-    ekf_.Correct(Z, &U, {}, h_zero, dhdx_zero, R,
-                 t0_ + dt_config_.dt * (ii + 1));
+    ekf_.Correct(Z, &U, nullptr, &h_zero, R, t0_ + dt_config_.dt * (ii + 1));
   }
 
   // Store state and covariance after prediction steps.
@@ -315,13 +311,12 @@
   H(0, 0) = 1;
   H(1, 1) = 1;
   H(2, 2) = 1;
-  auto h = [H](const State &X, const Input &) { return H * X; };
-  auto dhdx = [H](const State &) { return H; };
+  HybridEkf<>::LinearH h(H);
   R.setZero();
   R.diagonal() << 1e-5, 1e-5, 1e-5;
   U.setZero();
   for (int ii = 0; ii < 20; ++ii) {
-    ekf_.Correct(Z, &U, {}, h, dhdx, R, t0_);
+    ekf_.Correct(Z, &U, nullptr, &h, R, t0_);
   }
   const double corrected_p_norm = ekf_.P().norm();
   State expected_X_hat = modeled_X_hat;
@@ -348,8 +343,7 @@
   Z.setZero();
   Eigen::Matrix<double, 3, 12> H;
   H.setZero();
-  auto h_zero = [H](const State &X, const Input &) { return H * X; };
-  auto dhdx_zero = [H](const State &) { return H; };
+  HybridEkf<>::LinearH h_zero(H);
   Input U;
   U << 12.0, 12.0, 0.0, 0.0;
   Eigen::Matrix<double, 3, 3> R;
@@ -357,8 +351,7 @@
 
   EXPECT_EQ(0.0, ekf_.X_hat().norm());
   for (int ii = 0; ii < HybridEkf<>::kSaveSamples; ++ii) {
-    ekf_.Correct(Z, &U, {}, h_zero, dhdx_zero, R,
-                 t0_ + dt_config_.dt * (ii + 1));
+    ekf_.Correct(Z, &U, nullptr, &h_zero, R, t0_ + dt_config_.dt * (ii + 1));
   }
   const State modeled_X_hat = ekf_.X_hat();
   const HybridEkf<>::StateSquare modeled_P = ekf_.P();
@@ -368,12 +361,11 @@
   H(0, 0) = 1;
   H(1, 1) = 1;
   H(2, 2) = 1;
-  auto h = [H](const State &X, const Input &) { return H * X; };
-  auto dhdx = [H](const State &) { return H; };
+  HybridEkf<>::LinearH h(H);
   R.setIdentity();
   R *= 1e-5;
   U.setZero();
-  ekf_.Correct(Z, &U, {}, h, dhdx, R, t0_);
+  ekf_.Correct(Z, &U, nullptr, &h, R, t0_);
   EXPECT_EQ(ekf_.X_hat(), modeled_X_hat)
       << "Expected too-old correction to have no effect; X_hat: "
       << ekf_.X_hat() << " expected " << modeled_X_hat;
@@ -493,8 +485,8 @@
   // Expect death if the user does not provide U when creating a fresh
   // measurement.
   EXPECT_DEATH(
-      ekf_.Correct({1, 2, 3}, nullptr, {}, {}, {}, Eigen::Matrix3d::Zero(),
-                   t0_ + std::chrono::seconds(1)),
+      ekf_.Correct({1, 2, 3}, nullptr, nullptr, nullptr,
+                   Eigen::Matrix3d::Zero(), t0_ + std::chrono::seconds(1)),
       "U != nullptr");
 }
 
@@ -504,23 +496,10 @@
   // Check that we die when no h-related functions are provided:
   Input U;
   U << 1.0, 2.0, 0.0, 0.0;
-  EXPECT_DEATH(ekf_.Correct({1, 2, 3}, &U, {}, {}, {}, Eigen::Matrix3d::Zero(),
-                            t0_ + std::chrono::seconds(1)),
-               "make_h");
-  // Check that we die when only one of h and dhdx are provided:
   EXPECT_DEATH(
-      ekf_.Correct(
-          {1, 2, 3}, &U, {}, {},
-          [](const State &) { return Eigen::Matrix<double, 3, 12>::Zero(); },
-          Eigen::Matrix3d::Zero(), t0_ + std::chrono::seconds(1)),
+      ekf_.Correct({1, 2, 3}, &U, nullptr, nullptr, Eigen::Matrix3d::Zero(),
+                   t0_ + std::chrono::seconds(1)),
       "make_h");
-  EXPECT_DEATH(ekf_.Correct(
-                   {1, 2, 3}, &U, {},
-                   [](const State &, const Input &) {
-                     return Eigen::Matrix<double, 3, 1>::Zero();
-                   },
-                   {}, Eigen::Matrix3d::Zero(), t0_ + std::chrono::seconds(1)),
-               "make_h");
 }
 
 }  // namespace testing
diff --git a/y2019/control_loops/drivetrain/localizer.h b/y2019/control_loops/drivetrain/localizer.h
index 879cd0f..c55ccc9 100644
--- a/y2019/control_loops/drivetrain/localizer.h
+++ b/y2019/control_loops/drivetrain/localizer.h
@@ -4,9 +4,9 @@
 #include <cmath>
 #include <memory>
 
-#include "frc971/control_loops/pose.h"
 #include "frc971/control_loops/drivetrain/camera.h"
 #include "frc971/control_loops/drivetrain/hybrid_ekf.h"
+#include "frc971/control_loops/pose.h"
 
 namespace y2019 {
 namespace control_loops {
@@ -42,7 +42,9 @@
           &dt_config,
       Pose *robot_pose)
       : ::frc971::control_loops::drivetrain::HybridEkf<Scalar>(dt_config),
-        robot_pose_(robot_pose) {}
+        robot_pose_(robot_pose),
+        h_queue_(this),
+        make_h_queue_(this) {}
 
   // Performs a kalman filter correction with a single camera frame, consisting
   // of up to max_targets_per_frame targets and taken at time t.
@@ -79,26 +81,92 @@
     // As such, we need to store the EKF functions that the remaining targets
     // will need in arrays:
     ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
-                      max_targets_per_frame> h_functions;
+                      max_targets_per_frame>
+        h_functions;
     ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
                                                     kNStates>(const State &)>,
-                      max_targets_per_frame> dhdx_functions;
-    HybridEkf::Correct(
+                      max_targets_per_frame>
+        dhdx_functions;
+    make_h_queue_.CorrectKnownHBuilder(
         z, nullptr,
-        ::std::bind(&TypedLocalizer::MakeH, this, camera, targets, &h_functions,
-                    &dhdx_functions, ::std::placeholders::_1,
-                    ::std::placeholders::_2, ::std::placeholders::_3,
-                    ::std::placeholders::_4),
-        {}, {}, R, t);
+        ExpectedObservationBuilder(this, camera, targets, &h_functions,
+                                   &dhdx_functions),
+        R, t);
     // Fetch cache:
     for (size_t ii = 1; ii < targets.size(); ++ii) {
       TargetViewToMatrices(targets[ii], &z, &R);
-      HybridEkf::Correct(z, nullptr, {}, h_functions[ii], dhdx_functions[ii], R,
-                         t);
+      h_queue_.CorrectKnownH(
+          z, nullptr,
+          ExpectedObservationFunctor(h_functions[ii], dhdx_functions[ii]), R,
+          t);
     }
   }
 
  private:
+  class ExpectedObservationFunctor
+      : public HybridEkf::ExpectedObservationFunctor {
+   public:
+    ExpectedObservationFunctor(
+        ::std::function<Output(const State &, const Input &)> h,
+        ::std::function<
+            Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+            dhdx)
+        : h_(h), dhdx_(dhdx) {}
+
+    Output H(const State &state, const Input &input) final {
+      return h_(state, input);
+    }
+
+    virtual Eigen::Matrix<Scalar, kNOutputs, kNStates> DHDX(
+        const State &state) final {
+      return dhdx_(state);
+    }
+
+   private:
+    ::std::function<Output(const State &, const Input &)> h_;
+    ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+        dhdx_;
+  };
+  class ExpectedObservationBuilder
+      : public HybridEkf::ExpectedObservationBuilder {
+   public:
+    ExpectedObservationBuilder(
+        TypedLocalizer *localizer, const Camera &camera,
+        const ::aos::SizedArray<TargetView, max_targets_per_frame>
+            &target_views,
+        ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
+                          max_targets_per_frame> *h_functions,
+        ::aos::SizedArray<::std::function<Eigen::Matrix<
+                              Scalar, kNOutputs, kNStates>(const State &)>,
+                          max_targets_per_frame> *dhdx_functions)
+        : localizer_(localizer),
+          camera_(camera),
+          target_views_(target_views),
+          h_functions_(h_functions),
+          dhdx_functions_(dhdx_functions) {}
+
+    virtual ExpectedObservationFunctor *MakeExpectedObservations(
+        const State &state, const StateSquare &P) {
+      ::std::function<Output(const State &, const Input &)> h;
+      ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+          dhdx;
+      localizer_->MakeH(camera_, target_views_, h_functions_, dhdx_functions_,
+                        state, P, &h, &dhdx);
+      functor_.emplace(h, dhdx);
+      return &functor_.value();
+    }
+
+   private:
+    TypedLocalizer *localizer_;
+    const Camera &camera_;
+    const ::aos::SizedArray<TargetView, max_targets_per_frame> &target_views_;
+    ::aos::SizedArray<::std::function<Output(const State &, const Input &)>,
+                      max_targets_per_frame> *h_functions_;
+    ::aos::SizedArray<::std::function<Eigen::Matrix<Scalar, kNOutputs,
+                                                    kNStates>(const State &)>,
+                      max_targets_per_frame> *dhdx_functions_;
+    std::optional<ExpectedObservationFunctor> functor_;
+  };
   // The threshold to use for completely rejecting potentially bad target
   // matches.
   // TODO(james): Tune
@@ -159,8 +227,8 @@
                         max_targets_per_frame> *dhdx_functions,
       const State &X_hat, const StateSquare &P,
       ::std::function<Output(const State &, const Input &)> *h,
-      ::std::function<
-          Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)> *dhdx) {
+      ::std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
+          *dhdx) {
     // Because we need to match camera targets ("views") to actual field
     // targets, and because we want to take advantage of the correlations
     // between the targets (i.e., if we see two targets in the image, they
@@ -226,7 +294,8 @@
     // as the scores matrix.
     ::std::array<::std::array<Eigen::Matrix<Scalar, kNOutputs, kNStates>,
                               max_targets_per_frame>,
-                 num_targets> all_H_matrices;
+                 num_targets>
+        all_H_matrices;
 
     // Iterate through and fill out the scores for each potential pairing:
     for (size_t ii = 0; ii < target_views.size(); ++ii) {
@@ -360,8 +429,8 @@
     }
   }
 
-  Eigen::Matrix<Scalar, kNOutputs, kNStates> HMatrix(
-      const Target &target, const Pose &camera_pose) {
+  Eigen::Matrix<Scalar, kNOutputs, kNStates> HMatrix(const Target &target,
+                                                     const Pose &camera_pose) {
     // To calculate dheading/d{x,y,theta}:
     // heading = arctan2(target_pos - camera_pos) - camera_theta
     Eigen::Matrix<Scalar, 3, 1> target_pos = target.pose().abs_pos();
@@ -401,8 +470,8 @@
   // the number of rows in scores/best_matches that are actually populated).
   ::std::array<int, max_targets_per_frame> MatchFrames(
       const Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> &scores,
-      const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame> &
-          best_matches,
+      const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame>
+          &best_matches,
       int n_views) {
     ::std::array<int, max_targets_per_frame> best_set;
     best_set.fill(-1);
@@ -424,8 +493,8 @@
   // true, that means that we are done recursing.
   void MatchFrames(
       const Eigen::Matrix<Scalar, max_targets_per_frame, num_targets> &scores,
-      const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame> &
-          best_matches,
+      const Eigen::Matrix<int, max_targets_per_frame, max_targets_per_frame>
+          &best_matches,
       const ::aos::SizedArray<bool, max_targets_per_frame> &used_views,
       const ::std::array<bool, num_targets> &used_targets,
       ::std::array<int, max_targets_per_frame> *best_set, Scalar *best_score) {
@@ -485,6 +554,15 @@
   // The pose that is used by the cameras to determine the location of the robot
   // and thus the expected view of the targets.
   Pose *robot_pose_;
+
+  typename HybridEkf::template ExpectedObservationAllocator<
+      ExpectedObservationFunctor>
+      h_queue_;
+  typename HybridEkf::template ExpectedObservationAllocator<
+      ExpectedObservationBuilder>
+      make_h_queue_;
+
+  friend class ExpectedObservationBuilder;
 };  // class TypedLocalizer
 
 }  // namespace control_loops
diff --git a/y2020/control_loops/drivetrain/localizer.cc b/y2020/control_loops/drivetrain/localizer.cc
index 505a598..8f8bf87 100644
--- a/y2020/control_loops/drivetrain/localizer.cc
+++ b/y2020/control_loops/drivetrain/localizer.cc
@@ -89,6 +89,7 @@
     : event_loop_(event_loop),
       dt_config_(dt_config),
       ekf_(dt_config),
+      observations_(&ekf_),
       clock_offset_fetcher_(
           event_loop_->MakeFetcher<aos::message_bridge::ServerStatistics>(
               "/aos")),
@@ -244,7 +245,7 @@
     rejection_reason = RejectionReason::IMAGE_FROM_FUTURE;
   }
   if (!result.has_camera_calibration()) {
-    LOG(WARNING) << "Got camera frame without calibration data.";
+    AOS_LOG(WARNING, "Got camera frame without calibration data.\n");
     ImageMatchDebug::Builder builder(*fbb);
     builder.add_camera(camera_index);
     builder.add_accepted(false);
@@ -401,11 +402,6 @@
 
     Eigen::Matrix3f R = Eigen::Matrix3f::Zero();
     R.diagonal() = noises.cwiseAbs2();
-    Eigen::Matrix<float, HybridEkf::kNOutputs, HybridEkf::kNStates> H;
-    H.setZero();
-    H(0, StateIdx::kX) = 1;
-    H(1, StateIdx::kY) = 1;
-    H(2, StateIdx::kTheta) = 1;
     VLOG(1) << "Pose implied by target: " << Z.transpose()
             << " and current pose " << x() << ", " << y() << ", " << theta()
             << " Heading/dist/skew implied by target: "
@@ -447,47 +443,11 @@
     // more convenient to write given the Correct() interface we already have.
     // Note: If we start going back to doing back-in-time rewinds, then we can't
     // get away with passing things by reference.
-    ekf_.Correct(
-        Eigen::Vector3f::Zero(), &U, {},
-        [H, H_field_target, pose_robot_target, state_at_capture, Z,
-         &correction_rejection](const State &,
-                                const Input &) -> Eigen::Vector3f {
-          // Weighting for how much to use the current robot heading estimate
-          // vs. the heading estimate from the image results. A value of 1.0
-          // completely ignores the measured heading, but will prioritize turret
-          // aiming above all else. A value of 0.0 will prioritize correcting
-          // any gyro heading drift.
-          constexpr float kImpliedWeight = 0.99;
-          const float z_yaw_diff = aos::math::NormalizeAngle(
-              state_at_capture.value()(Localizer::StateIdx::kTheta) - Z(2));
-          const float z_yaw = Z(2) + kImpliedWeight * z_yaw_diff;
-          const Eigen::Vector3f Z_implied =
-              CalculateImpliedPose(z_yaw, H_field_target, pose_robot_target);
-          const Eigen::Vector3f Z_used = Z_implied;
-          // Just in case we ever do encounter any, drop measurements if they
-          // have non-finite numbers.
-          if (!Z.allFinite()) {
-            AOS_LOG(WARNING, "Got measurement with infinites or NaNs.\n");
-            correction_rejection = RejectionReason::NONFINITE_MEASUREMENT;
-            return Eigen::Vector3f::Zero();
-          }
-          Eigen::Vector3f Zhat = H * state_at_capture.value() - Z_used;
-          // Rewrap angle difference to put it back in range. Note that this
-          // component of the error is currently ignored (see definition of H
-          // above).
-          Zhat(2) = aos::math::NormalizeAngle(Zhat(2));
-          // If the measurement implies that we are too far from the current
-          // estimate, then ignore it.
-          // Note that I am not entirely sure how much effect this actually has,
-          // because I primarily introduced it to make sure that any grossly
-          // invalid measurements get thrown out.
-          if (Zhat.squaredNorm() > std::pow(10.0, 2)) {
-            correction_rejection = RejectionReason::CORRECTION_TOO_LARGE;
-            return Eigen::Vector3f::Zero();
-          }
-          return Zhat;
-        },
-        [H](const State &) { return H; }, R, now);
+    observations_.CorrectKnownH(
+        Eigen::Vector3f::Zero(), &U,
+        Corrector(H_field_target, pose_robot_target, state_at_capture.value(),
+                  Z, &correction_rejection),
+        R, now);
     if (correction_rejection) {
       builder.add_accepted(false);
       builder.add_rejection_reason(*correction_rejection);
@@ -502,6 +462,43 @@
   return debug_offsets;
 }
 
+Localizer::Output Localizer::Corrector::H(const State &, const Input &) {
+  // Weighting for how much to use the current robot heading estimate
+  // vs. the heading estimate from the image results. A value of 1.0
+  // completely ignores the measured heading, but will prioritize turret
+  // aiming above all else. A value of 0.0 will prioritize correcting
+  // any gyro heading drift.
+  constexpr float kImpliedWeight = 0.99;
+  const float z_yaw_diff = aos::math::NormalizeAngle(
+      state_at_capture_(Localizer::StateIdx::kTheta) - Z_(2));
+  const float z_yaw = Z_(2) + kImpliedWeight * z_yaw_diff;
+  const Eigen::Vector3f Z_implied =
+      CalculateImpliedPose(z_yaw, H_field_target_, pose_robot_target_);
+  const Eigen::Vector3f Z_used = Z_implied;
+  // Just in case we ever do encounter any, drop measurements if they
+  // have non-finite numbers.
+  if (!Z_.allFinite()) {
+    AOS_LOG(WARNING, "Got measurement with infinites or NaNs.\n");
+    *correction_rejection_ = RejectionReason::NONFINITE_MEASUREMENT;
+    return Eigen::Vector3f::Zero();
+  }
+  Eigen::Vector3f Zhat = H_ * state_at_capture_ - Z_used;
+  // Rewrap angle difference to put it back in range. Note that this
+  // component of the error is currently ignored (see definition of H
+  // above).
+  Zhat(2) = aos::math::NormalizeAngle(Zhat(2));
+  // If the measurement implies that we are too far from the current
+  // estimate, then ignore it.
+  // Note that I am not entirely sure how much effect this actually has,
+  // because I primarily introduced it to make sure that any grossly
+  // invalid measurements get thrown out.
+  if (Zhat.squaredNorm() > std::pow(10.0, 2)) {
+    *correction_rejection_ = RejectionReason::CORRECTION_TOO_LARGE;
+    return Eigen::Vector3f::Zero();
+  }
+  return Zhat;
+}
+
 }  // namespace drivetrain
 }  // namespace control_loops
 }  // namespace y2020
diff --git a/y2020/control_loops/drivetrain/localizer.h b/y2020/control_loops/drivetrain/localizer.h
index 5458797..de57091 100644
--- a/y2020/control_loops/drivetrain/localizer.h
+++ b/y2020/control_loops/drivetrain/localizer.h
@@ -9,8 +9,8 @@
 #include "aos/network/message_bridge_server_generated.h"
 #include "frc971/control_loops/drivetrain/hybrid_ekf.h"
 #include "frc971/control_loops/drivetrain/localizer.h"
-#include "y2020/control_loops/superstructure/superstructure_status_generated.h"
 #include "y2020/control_loops/drivetrain/localizer_debug_generated.h"
+#include "y2020/control_loops/superstructure/superstructure_status_generated.h"
 #include "y2020/vision/sift/sift_generated.h"
 
 namespace y2020 {
@@ -95,6 +95,37 @@
     std::array<int, kNumRejectionReasons> rejection_counts;
   };
 
+  class Corrector : public HybridEkf::ExpectedObservationFunctor {
+   public:
+    Corrector(const Eigen::Matrix<float, 4, 4> &H_field_target,
+              const Pose &pose_robot_target, const State &state_at_capture,
+              const Eigen::Vector3f &Z,
+              std::optional<RejectionReason> *correction_rejection)
+        : H_field_target_(H_field_target),
+          pose_robot_target_(pose_robot_target),
+          state_at_capture_(state_at_capture),
+          Z_(Z),
+          correction_rejection_(correction_rejection) {
+      H_.setZero();
+      H_(0, StateIdx::kX) = 1;
+      H_(1, StateIdx::kY) = 1;
+      H_(2, StateIdx::kTheta) = 1;
+    }
+    Output H(const State &, const Input &) final;
+    Eigen::Matrix<float, HybridEkf::kNOutputs, HybridEkf::kNStates> DHDX(
+        const State &) final {
+      return H_;
+    }
+
+   private:
+    Eigen::Matrix<float, HybridEkf::kNOutputs, HybridEkf::kNStates> H_;
+    const Eigen::Matrix<float, 4, 4> H_field_target_;
+    Pose pose_robot_target_;
+    const State state_at_capture_;
+    const Eigen::Vector3f &Z_;
+    std::optional<RejectionReason> *correction_rejection_;
+  };
+
   // Processes new image data from the given pi and updates the EKF.
   aos::SizedArray<flatbuffers::Offset<ImageMatchDebug>, 5> HandleImageMatch(
       size_t camera_index, std::string_view pi,
@@ -113,6 +144,7 @@
   aos::EventLoop *const event_loop_;
   const frc971::control_loops::drivetrain::DrivetrainConfig<double> dt_config_;
   HybridEkf ekf_;
+  HybridEkf::ExpectedObservationAllocator<Corrector> observations_;
 
   std::vector<aos::Fetcher<frc971::vision::sift::ImageMatchResult>>
       image_fetchers_;
diff --git a/y2020/control_loops/drivetrain/localizer_test.cc b/y2020/control_loops/drivetrain/localizer_test.cc
index 9e9e7dd..d280523 100644
--- a/y2020/control_loops/drivetrain/localizer_test.cc
+++ b/y2020/control_loops/drivetrain/localizer_test.cc
@@ -15,6 +15,7 @@
 
 DEFINE_string(output_file, "",
               "If set, logs all channels to the provided logfile.");
+DECLARE_bool(die_on_malloc);
 
 // This file tests that the full 2020 localizer behaves sanely.
 
@@ -146,6 +147,7 @@
     CHECK_EQ(aos::configuration::GetNodeIndex(configuration(), pi1_), 1);
     set_team_id(frc971::control_loops::testing::kTeamNumber);
     set_battery_voltage(12.0);
+    FLAGS_die_on_malloc = true;
 
     if (!FLAGS_output_file.empty()) {
       logger_event_loop_ = MakeEventLoop("logger", roborio_);
diff --git a/y2022/control_loops/drivetrain/localizer.cc b/y2022/control_loops/drivetrain/localizer.cc
index 79ced2d..0aa4456 100644
--- a/y2022/control_loops/drivetrain/localizer.cc
+++ b/y2022/control_loops/drivetrain/localizer.cc
@@ -11,6 +11,7 @@
     : event_loop_(event_loop),
       dt_config_(dt_config),
       ekf_(dt_config),
+      observations_(&ekf_),
       localizer_output_fetcher_(
           event_loop_->MakeFetcher<frc971::controls::LocalizerOutput>(
               "/localizer")),
@@ -48,7 +49,7 @@
       joystick_state_fetcher_->autonomous()) {
     // TODO(james): This is an inelegant way to avoid having the localizer mess
     // up splines. Do better.
-    //return;
+    // return;
   }
   if (localizer_output_fetcher_.Fetch()) {
     clock_offset_fetcher_.Fetch();
@@ -89,11 +90,6 @@
       }
     }
 
-    Eigen::Matrix<float, HybridEkf::kNOutputs, HybridEkf::kNStates> H;
-    H.setZero();
-    H(0, StateIdx::kX) = 1;
-    H(1, StateIdx::kY) = 1;
-    H(2, StateIdx::kTheta) = 1;
     const Eigen::Vector3f Z{
         static_cast<float>(localizer_output_fetcher_->x()),
         static_cast<float>(localizer_output_fetcher_->y()),
@@ -101,15 +97,8 @@
     Eigen::Matrix3f R = Eigen::Matrix3f::Zero();
     R.diagonal() << 0.01, 0.01, 1e-4;
     const Input U_correct = ekf_.MostRecentInput();
-    ekf_.Correct(
-        Eigen::Vector3f::Zero(), &U_correct, {},
-        [H, state_at_capture, Z](const State &,
-                                 const Input &) -> Eigen::Vector3f {
-          Eigen::Vector3f error = H * state_at_capture.value() - Z;
-          error(2) = aos::math::NormalizeAngle(error(2));
-          return error;
-        },
-        [H](const State &) { return H; }, R, now);
+    observations_.CorrectKnownH(Eigen::Vector3f::Zero(), &U_correct,
+                                Corrector(state_at_capture.value(), Z), R, now);
   }
 }
 
diff --git a/y2022/control_loops/drivetrain/localizer.h b/y2022/control_loops/drivetrain/localizer.h
index 0d2673b..77b29eb 100644
--- a/y2022/control_loops/drivetrain/localizer.h
+++ b/y2022/control_loops/drivetrain/localizer.h
@@ -4,11 +4,11 @@
 #include <string_view>
 
 #include "aos/events/event_loop.h"
+#include "aos/network/message_bridge_server_generated.h"
 #include "frc971/control_loops/drivetrain/hybrid_ekf.h"
 #include "frc971/control_loops/drivetrain/localizer.h"
-#include "y2022/localizer/localizer_output_generated.h"
-#include "aos/network/message_bridge_server_generated.h"
 #include "frc971/input/joystick_state_generated.h"
+#include "y2022/localizer/localizer_output_generated.h"
 
 namespace y2022 {
 namespace control_loops {
@@ -63,9 +63,34 @@
   }
 
  private:
+  class Corrector : public HybridEkf::ExpectedObservationFunctor {
+   public:
+    Corrector(const State &state_at_capture, const Eigen::Vector3f &Z)
+        : state_at_capture_(state_at_capture), Z_(Z) {
+      H_.setZero();
+      H_(0, StateIdx::kX) = 1;
+      H_(1, StateIdx::kY) = 1;
+      H_(2, StateIdx::kTheta) = 1;
+    }
+    Output H(const State &, const Input &) final {
+      Eigen::Vector3f error = H_ * state_at_capture_ - Z_;
+      error(2) = aos::math::NormalizeAngle(error(2));
+      return error;
+    }
+    Eigen::Matrix<float, HybridEkf::kNOutputs, HybridEkf::kNStates> DHDX(
+        const State &) final {
+      return H_;
+    }
+
+   private:
+    Eigen::Matrix<float, HybridEkf::kNOutputs, HybridEkf::kNStates> H_;
+    State state_at_capture_;
+    Eigen::Vector3f Z_;
+  };
   aos::EventLoop *const event_loop_;
   const frc971::control_loops::drivetrain::DrivetrainConfig<double> dt_config_;
   HybridEkf ekf_;
+  HybridEkf::ExpectedObservationAllocator<Corrector> observations_;
 
   aos::Fetcher<frc971::controls::LocalizerOutput> localizer_output_fetcher_;
   aos::Fetcher<aos::JoystickState> joystick_state_fetcher_;
diff --git a/y2022/control_loops/drivetrain/localizer_test.cc b/y2022/control_loops/drivetrain/localizer_test.cc
index 1e33826..77f3988 100644
--- a/y2022/control_loops/drivetrain/localizer_test.cc
+++ b/y2022/control_loops/drivetrain/localizer_test.cc
@@ -10,12 +10,13 @@
 #include "frc971/control_loops/drivetrain/drivetrain.h"
 #include "frc971/control_loops/drivetrain/drivetrain_test_lib.h"
 #include "frc971/control_loops/team_number_test_environment.h"
-#include "y2022/localizer/localizer_output_generated.h"
 #include "gtest/gtest.h"
 #include "y2022/control_loops/drivetrain/drivetrain_base.h"
+#include "y2022/localizer/localizer_output_generated.h"
 
 DEFINE_string(output_folder, "",
               "If set, logs all channels to the provided logfile.");
+DECLARE_bool(die_on_malloc);
 
 namespace y2022 {
 namespace control_loops {
@@ -31,7 +32,7 @@
   DrivetrainConfig<double> config = GetDrivetrainConfig();
   return config;
 }
-}
+}  // namespace
 
 namespace chrono = std::chrono;
 using aos::monotonic_clock;
@@ -74,6 +75,7 @@
         drivetrain_plant_(drivetrain_plant_event_loop_.get(),
                           drivetrain_plant_imu_event_loop_.get(), dt_config_,
                           std::chrono::microseconds(500)) {
+    FLAGS_die_on_malloc = true;
     set_team_id(frc971::control_loops::testing::kTeamNumber);
     set_battery_voltage(12.0);
 
@@ -95,6 +97,7 @@
           output_builder.add_x(drivetrain_plant_.state()(0));
           output_builder.add_y(drivetrain_plant_.state()(1));
           output_builder.add_theta(drivetrain_plant_.state()(2));
+          builder.CheckOk(builder.Send(output_builder.Finish()));
         })
         ->Setup(imu_test_event_loop_->monotonic_now(),
                 std::chrono::milliseconds(5));