blob: bd358cadab359ae8f0568ee90152538008a58688 [file] [log] [blame]
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -08001#ifndef FRC971_CONTROL_LOOPS_DRIVETRAIN_HYBRID_EKF_H_
2#define FRC971_CONTROL_LOOPS_DRIVETRAIN_HYBRID_EKF_H_
3
4#include <chrono>
5
James Kuszmaul651fc3f2019-05-15 21:14:25 -07006#include "Eigen/Dense"
James Kuszmaul3c5b4d32020-02-11 17:22:14 -08007#include "aos/commonmath.h"
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -08008#include "aos/containers/priority_queue.h"
James Kuszmaulfedc4612019-03-10 11:24:51 -07009#include "aos/util/math.h"
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080010#include "frc971/control_loops/c2d.h"
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080011#include "frc971/control_loops/drivetrain/drivetrain_config.h"
James Kuszmaul651fc3f2019-05-15 21:14:25 -070012#include "frc971/control_loops/runge_kutta.h"
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080013
James Kuszmaul1057ce82019-02-09 17:58:24 -080014namespace y2019 {
15namespace control_loops {
16namespace testing {
17class ParameterizedLocalizerTest;
18} // namespace testing
19} // namespace control_loops
20} // namespace y2019
21
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080022namespace frc971 {
23namespace control_loops {
24namespace drivetrain {
25
26namespace testing {
27class HybridEkfTest;
28}
29
30// HybridEkf is an EKF for use in robot localization. It is currently
31// coded for use with drivetrains in particular, and so the states and inputs
32// are chosen as such.
33// The "Hybrid" part of the name refers to the fact that it can take in
34// measurements with variable time-steps.
35// measurements can also have been taken in the past and we maintain a buffer
36// so that we can replay the kalman filter whenever we get an old measurement.
37// Currently, this class provides the necessary utilities for doing
38// measurement updates with an encoder/gyro as well as a more generic
39// update function that can be used for arbitrary nonlinear updates (presumably
40// a camera update).
James Kuszmaul3c5b4d32020-02-11 17:22:14 -080041//
42// Discussion of the model:
43// In the current model, we try to rely primarily on IMU measurements for
44// estimating robot state--we also need additional information (some combination
45// of output voltages, encoders, and camera data) to help eliminate the biases
46// that can accumulate due to integration of IMU data.
47// We use IMU measurements as inputs rather than measurement outputs because
48// that seemed to be easier to implement. I tried initially running with
49// the IMU as a measurement, but it seemed to blow up the complexity of the
50// model.
51//
52// On each prediction update, we take in inputs of the left/right voltages and
53// the current measured longitudinal/lateral accelerations. In the current
54// setup, the accelerometer readings will be used for estimating how the
55// evolution of the longitudinal/lateral velocities. The voltages (and voltage
56// errors) will solely be used for estimating the current rotational velocity of
57// the robot (I do this because currently I suspect that the accelerometer is a
58// much better indicator of current robot state than the voltages). We also
59// deliberately decay all of the velocity estimates towards zero to help address
60// potential accelerometer biases. We use two separate decay models:
61// -The longitudinal velocity is modelled as decaying at a constant rate (see
62// the documentation on the VelocityAccel() method)--this needs a more
63// complex model because the robot will, under normal circumstances, be
64// travelling at non-zero velocities.
65// -The lateral velocity is modelled as exponentially decaying towards zero.
66// This is simpler to model and should be reasonably valid, since we will
67// not *normally* be travelling sideways consistently (this assumption may
68// need to be revisited).
69// -The "longitudinal velocity offset" (described below) also uses an
70// exponential decay, albeit with a different time constant. A future
71// improvement may remove the decay modelling on the longitudinal velocity
72// itself and instead use that decay model on the longitudinal velocity offset.
73// This would place a bit more trust in the encoder measurements but also
74// more correctly model situations where the robot is legitimately moving at
75// a certain velocity.
76//
77// For modelling how the drivetrain encoders evolve, and to help prevent the
78// aforementioned decay functions from affecting legitimate high-velocity
79// maneuvers too much, we have a "longitudinal velocity offset" term. This term
80// models the difference between the actual longitudinal velocity of the robot
81// (estimated by the average of the left/right velocities) and the velocity
82// experienced by the wheels (which can be observed from the encoders more
83// directly). Because we model this velocity offset as decaying towards zero,
84// what this will do is allow the encoders to be a constant velocity off from
85// the accelerometer updates for short periods of time but then gradually
86// pull the "actual" longitudinal velocity offset towards that of the encoders,
87// helping to reduce constant biases.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080088template <typename Scalar = double>
89class HybridEkf {
90 public:
91 // An enum specifying what each index in the state vector is for.
92 enum StateIdx {
James Kuszmaul3c5b4d32020-02-11 17:22:14 -080093 // Current X/Y position, in meters, of the robot.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080094 kX = 0,
95 kY = 1,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -080096 // Current heading of the robot.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -080097 kTheta = 2,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -080098 // Current estimated encoder reading of the left wheels, in meters.
99 // Rezeroed once on startup.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800100 kLeftEncoder = 3,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800101 // Current estimated actual velocity of the left side of the robot, in m/s.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800102 kLeftVelocity = 4,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800103 // Same variables, for the right side of the robot.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800104 kRightEncoder = 5,
105 kRightVelocity = 6,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800106 // Estimated offset to input voltage. Used as a generic error term, Volts.
James Kuszmaul074429e2019-03-23 16:01:49 -0700107 kLeftVoltageError = 7,
James Kuszmaul651fc3f2019-05-15 21:14:25 -0700108 kRightVoltageError = 8,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800109 // These error terms are used to estimate the difference between the actual
110 // movement of the drivetrain and that implied by the wheel odometry.
111 // Angular error effectively estimates a constant angular rate offset of the
112 // encoders relative to the actual rotation of the robot.
113 // Semi-arbitrary units (we don't bother accounting for robot radius in
114 // this).
James Kuszmaul074429e2019-03-23 16:01:49 -0700115 kAngularError = 9,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800116 // Estimate of slip between the drivetrain wheels and the actual
117 // forwards/backwards velocity of the robot, in m/s.
118 // I.e., (left velocity + right velocity) / 2.0 = (left wheel velocity +
119 // right wheel velocity) / 2.0 + longitudinal velocity offset
120 kLongitudinalVelocityOffset = 10,
121 // Current estimate of the lateral velocity of the robot, in m/s.
122 // Positive implies the robot is moving to its left.
123 kLateralVelocity = 11,
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800124 };
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800125 static constexpr int kNStates = 12;
126 enum InputIdx {
127 // Left/right drivetrain voltages.
128 kLeftVoltage = 0,
129 kRightVoltage = 1,
130 // Current accelerometer readings, in m/s/s, along the longitudinal and
131 // lateral axes of the robot. Should be projected onto the X/Y plane, to
132 // compensate for tilt of the robot before being passed to this filter. The
133 // HybridEkf has no knowledge of the current pitch/roll of the robot, and so
134 // can't do anything to compensate for it.
135 kLongitudinalAccel = 2,
136 kLateralAccel = 3,
137 };
138 static constexpr int kNInputs = 4;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800139 // Number of previous samples to save.
140 static constexpr int kSaveSamples = 50;
James Kuszmaul06257f42020-05-09 15:40:09 -0700141 // Whether we should completely rerun the entire stored history of
142 // kSaveSamples on every correction. Enabling this will increase overall CPU
143 // usage substantially; however, leaving it disabled makes it so that we are
144 // less likely to notice if processing camera frames is causing delays in the
145 // drivetrain.
146 // If we are having CPU issues, we have three easy avenues to improve things:
147 // (1) Reduce kSaveSamples (e.g., if all camera frames arive within
148 // 100 ms, then we can reduce kSaveSamples to be 25 (125 ms of samples)).
149 // (2) Don't actually rely on the ability to insert corrections into the
150 // timeline.
151 // (3) Set this to false.
152 static constexpr bool kFullRewindOnEverySample = false;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800153 // Assume that all correction steps will have kNOutputs
154 // dimensions.
155 // TODO(james): Relax this assumption; relaxing it requires
156 // figuring out how to deal with storing variable size
157 // observation matrices, though.
158 static constexpr int kNOutputs = 3;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800159 // Time constant to use for estimating how the longitudinal/lateral velocity
160 // offsets decay, in seconds.
James Kuszmaul5f6d1d42020-03-01 18:10:07 -0800161 static constexpr double kVelocityOffsetTimeConstant = 1.0;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800162 static constexpr double kLateralVelocityTimeConstant = 1.0;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800163 // Inputs are [left_volts, right_volts]
164 typedef Eigen::Matrix<Scalar, kNInputs, 1> Input;
165 // Outputs are either:
166 // [left_encoder, right_encoder, gyro_vel]; or [heading, distance, skew] to
167 // some target. This makes it so we don't have to figure out how we store
168 // variable-size measurement updates.
169 typedef Eigen::Matrix<Scalar, kNOutputs, 1> Output;
170 typedef Eigen::Matrix<Scalar, kNStates, kNStates> StateSquare;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800171 // State contains the states defined by the StateIdx enum. See comments there.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800172 typedef Eigen::Matrix<Scalar, kNStates, 1> State;
173
174 // Constructs a HybridEkf for a particular drivetrain.
175 // Currently, we use the drivetrain config for modelling constants
176 // (continuous time A and B matrices) and for the noise matrices for the
177 // encoders/gyro.
James Kuszmauld478f872020-03-16 20:54:27 -0700178 HybridEkf(const DrivetrainConfig<double> &dt_config)
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800179 : dt_config_(dt_config),
180 velocity_drivetrain_coefficients_(
181 dt_config.make_hybrid_drivetrain_velocity_loop()
182 .plant()
183 .coefficients()) {
184 InitializeMatrices();
185 }
186
187 // Set the initial guess of the state. Can only be called once, and before
188 // any measurement updates have occured.
189 // TODO(james): We may want to actually re-initialize and reset things on
190 // the field. Create some sort of Reset() function.
191 void ResetInitialState(::aos::monotonic_clock::time_point t,
James Kuszmaul1057ce82019-02-09 17:58:24 -0800192 const State &state, const StateSquare &P) {
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800193 observations_.clear();
194 X_hat_ = state;
James Kuszmaul074429e2019-03-23 16:01:49 -0700195 have_zeroed_encoders_ = true;
James Kuszmaul1057ce82019-02-09 17:58:24 -0800196 P_ = P;
James Kuszmaul06257f42020-05-09 15:40:09 -0700197 observations_.PushFromBottom({
198 t,
199 t,
200 X_hat_,
201 P_,
202 Input::Zero(),
203 Output::Zero(),
204 {},
205 [](const State &, const Input &) { return Output::Zero(); },
206 [](const State &) {
207 return Eigen::Matrix<Scalar, kNOutputs, kNStates>::Zero();
208 },
209 Eigen::Matrix<Scalar, kNOutputs, kNOutputs>::Identity(),
210 StateSquare::Identity(),
211 StateSquare::Zero(),
212 std::chrono::seconds(0),
213 State::Zero(),
214 });
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800215 }
216
217 // Correct with:
218 // A measurement z at time t with z = h(X_hat, U) + v where v has noise
219 // covariance R.
220 // Input U is applied from the previous timestep until time t.
221 // If t is later than any previous measurements, then U must be provided.
222 // If the measurement falls between two previous measurements, then U
223 // can be provided or not; if U is not provided, then it is filled in based
224 // on an assumption that the voltage was held constant between the time steps.
225 // TODO(james): Is it necessary to explicitly to provide a version with H as a
226 // matrix for linear cases?
227 void Correct(
228 const Output &z, const Input *U,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800229 std::function<
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800230 void(const State &, const StateSquare &,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800231 std::function<Output(const State &, const Input &)> *,
232 std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
233 const State &)> *)> make_h,
234 std::function<Output(const State &, const Input &)> h,
235 std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
236 dhdx, const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800237 aos::monotonic_clock::time_point t);
238
239 // A utility function for specifically updating with encoder and gyro
240 // measurements.
241 void UpdateEncodersAndGyro(const Scalar left_encoder,
242 const Scalar right_encoder, const Scalar gyro_rate,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800243 const Eigen::Matrix<Scalar, 2, 1> &voltage,
244 const Eigen::Matrix<Scalar, 3, 1> &accel,
245 aos::monotonic_clock::time_point t) {
246 Input U;
247 U.template block<2, 1>(0, 0) = voltage;
248 U.template block<2, 1>(kLongitudinalAccel, 0) =
249 accel.template block<2, 1>(0, 0);
250 RawUpdateEncodersAndGyro(left_encoder, right_encoder, gyro_rate, U, t);
251 }
252 // Version of UpdateEncodersAndGyro that takes a input matrix rather than
253 // taking in a voltage/acceleration separately.
254 void RawUpdateEncodersAndGyro(const Scalar left_encoder,
255 const Scalar right_encoder,
256 const Scalar gyro_rate, const Input &U,
257 aos::monotonic_clock::time_point t) {
James Kuszmaul074429e2019-03-23 16:01:49 -0700258 // Because the check below for have_zeroed_encoders_ will add an
259 // Observation, do a check here to ensure that initialization has been
260 // performed and so there is at least one observation.
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800261 CHECK(!observations_.empty());
James Kuszmaul074429e2019-03-23 16:01:49 -0700262 if (!have_zeroed_encoders_) {
263 // This logic handles ensuring that on the first encoder reading, we
264 // update the internal state for the encoders to match the reading.
265 // Otherwise, if we restart the drivetrain without restarting
266 // wpilib_interface, then we can get some obnoxious initial corrections
267 // that mess up the localization.
268 State newstate = X_hat_;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800269 newstate(kLeftEncoder) = left_encoder;
270 newstate(kRightEncoder) = right_encoder;
271 newstate(kLeftVoltageError) = 0.0;
272 newstate(kRightVoltageError) = 0.0;
273 newstate(kAngularError) = 0.0;
274 newstate(kLongitudinalVelocityOffset) = 0.0;
275 newstate(kLateralVelocity) = 0.0;
James Kuszmaul074429e2019-03-23 16:01:49 -0700276 ResetInitialState(t, newstate, P_);
277 // We need to set have_zeroed_encoders_ after ResetInitialPosition because
278 // the reset clears have_zeroed_encoders_...
279 have_zeroed_encoders_ = true;
280 }
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800281
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800282 Output z(left_encoder, right_encoder, gyro_rate);
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800283
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800284 Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
285 R.setZero();
286 R.diagonal() << encoder_noise_, encoder_noise_, gyro_noise_;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800287 Correct(
288 z, &U, {},
289 [this](const State &X, const Input &) {
290 return H_encoders_and_gyro_ * X;
291 },
292 [this](const State &) { return H_encoders_and_gyro_; }, R, t);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800293 }
294
295 // Sundry accessor:
296 State X_hat() const { return X_hat_; }
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800297 Scalar X_hat(long i) const { return X_hat_(i); }
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800298 StateSquare P() const { return P_; }
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800299 aos::monotonic_clock::time_point latest_t() const {
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800300 return observations_.top().t;
301 }
302
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800303 static Scalar CalcLongitudinalVelocity(const State &X) {
304 return (X(kLeftVelocity) + X(kRightVelocity)) / 2.0;
305 }
306
307 Scalar CalcYawRate(const State &X) const {
308 return (X(kRightVelocity) - X(kLeftVelocity)) / 2.0 /
309 dt_config_.robot_radius;
310 }
311
James Kuszmaul06257f42020-05-09 15:40:09 -0700312 // Returns the last state before the specified time.
313 // Returns nullopt if time is older than the oldest measurement.
314 std::optional<State> LastStateBeforeTime(
315 aos::monotonic_clock::time_point time) {
316 if (observations_.empty() || observations_.begin()->t > time) {
317 return std::nullopt;
318 }
319 for (const auto &observation : observations_) {
320 if (observation.t > time) {
321 // Note that observation.X_hat actually references the _previous_ X_hat.
322 return observation.X_hat;
323 }
324 }
325 return X_hat();
326 }
327
328 // Returns the most recent input vector.
329 Input MostRecentInput() {
330 CHECK(!observations_.empty());
331 Input U = observations_.top().U;
332 return U;
333 }
334
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800335 private:
336 struct Observation {
337 // Time when the observation was taken.
338 aos::monotonic_clock::time_point t;
339 // Time that the previous observation was taken:
340 aos::monotonic_clock::time_point prev_t;
341 // Estimate of state at previous observation time t, after accounting for
342 // the previous observation.
343 State X_hat;
344 // Noise matrix corresponding to X_hat_.
345 StateSquare P;
346 // The input applied from previous observation until time t.
347 Input U;
348 // Measurement taken at that time.
349 Output z;
350 // A function to create h and dhdx from a given position/covariance
351 // estimate. This is used by the camera to make it so that we only have to
352 // match targets once.
353 // Only called if h and dhdx are empty.
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800354 std::function<void(const State &, const StateSquare &,
355 std::function<Output(const State &, const Input &)> *,
356 std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
357 const State &)> *)> make_h;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800358 // A function to calculate the expected output at a given state/input.
359 // TODO(james): For encoders/gyro, it is linear and the function call may
360 // be expensive. Potential source of optimization.
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800361 std::function<Output(const State &, const Input &)> h;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800362 // The Jacobian of h with respect to x.
363 // We assume that U has no impact on the Jacobian.
364 // TODO(james): Currently, none of the users of this actually make use of
365 // the ability to have dynamic dhdx (technically, the camera code should
366 // recalculate it to be strictly correct, but I was both too lazy to do
367 // so and it seemed unnecessary). This is a potential source for future
368 // optimizations if function calls are being expensive.
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800369 std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
James Kuszmaul651fc3f2019-05-15 21:14:25 -0700370 dhdx;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800371 // The measurement noise matrix.
372 Eigen::Matrix<Scalar, kNOutputs, kNOutputs> R;
373
James Kuszmaul06257f42020-05-09 15:40:09 -0700374 // Discretized A and Q to use on this update step. These will only be
375 // recalculated if the timestep changes.
376 StateSquare A_d;
377 StateSquare Q_d;
378 aos::monotonic_clock::duration discretization_time;
379
380 // A cached value indicating how much we change X_hat in the prediction step
381 // of this Observation.
382 State predict_update;
383
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800384 // In order to sort the observations in the PriorityQueue object, we
385 // need a comparison function.
James Kuszmaul651fc3f2019-05-15 21:14:25 -0700386 friend bool operator<(const Observation &l, const Observation &r) {
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800387 return l.t < r.t;
388 }
389 };
390
391 void InitializeMatrices();
392
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800393 // These constants and functions define how the longitudinal velocity
394 // (the average of the left and right velocities) decays. We model it as
395 // decaying at a constant rate, except very near zero where the decay rate is
396 // exponential (this is more numerically stable than just using a constant
397 // rate the whole time). We use this model rather than a simpler exponential
398 // decay because an exponential decay will result in the robot's velocity
399 // estimate consistently being far too low when at high velocities, and since
400 // the acceleromater-based estimate of the velocity will only drift at a
401 // relatively slow rate and doesn't get worse at higher velocities, we can
402 // safely decay pretty slowly.
403 static constexpr double kMaxVelocityAccel = 0.005;
404 static constexpr double kMaxVelocityGain = 1.0;
405 static Scalar VelocityAccel(Scalar velocity) {
406 return -std::clamp(kMaxVelocityGain * velocity, -kMaxVelocityAccel,
407 kMaxVelocityAccel);
408 }
409
410 static Scalar VelocityAccelDiff(Scalar velocity) {
411 return (std::abs(kMaxVelocityGain * velocity) > kMaxVelocityAccel)
412 ? 0.0
413 : -kMaxVelocityGain;
414 }
415
416 // Returns the "A" matrix for a given state. See DiffEq for discussion of
417 // ignore_accel.
418 StateSquare AForState(const State &X, bool ignore_accel = false) const {
419 // Calculate the A matrix for a given state. Note that A = partial Xdot /
420 // partial X. This is distinct from saying that Xdot = A * X. This is
421 // particularly relevant for the (kX, kTheta) members which otherwise seem
422 // odd.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800423 StateSquare A_continuous = A_continuous_;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800424 const Scalar theta = X(kTheta);
425 const Scalar stheta = std::sin(theta);
426 const Scalar ctheta = std::cos(theta);
427 const Scalar lng_vel = CalcLongitudinalVelocity(X);
428 const Scalar lat_vel = X(kLateralVelocity);
429 const Scalar diameter = 2.0 * dt_config_.robot_radius;
430 const Scalar yaw_rate = CalcYawRate(X);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800431 // X and Y derivatives
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800432 A_continuous(kX, kTheta) =
433 -stheta * lng_vel - ctheta * lat_vel;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800434 A_continuous(kX, kLeftVelocity) = ctheta / 2.0;
435 A_continuous(kX, kRightVelocity) = ctheta / 2.0;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800436 A_continuous(kX, kLateralVelocity) = -stheta;
437 A_continuous(kY, kTheta) = ctheta * lng_vel - stheta * lat_vel;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800438 A_continuous(kY, kLeftVelocity) = stheta / 2.0;
439 A_continuous(kY, kRightVelocity) = stheta / 2.0;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800440 A_continuous(kY, kLateralVelocity) = ctheta;
441
442 if (!ignore_accel) {
443 const Eigen::Matrix<Scalar, 1, kNStates> lng_vel_row =
444 (A_continuous.row(kLeftVelocity) + A_continuous.row(kRightVelocity)) /
445 2.0;
446 A_continuous.row(kLeftVelocity) -= lng_vel_row;
447 A_continuous.row(kRightVelocity) -= lng_vel_row;
448 // Terms to account for centripetal accelerations.
449 // lateral centripetal accel = -yaw_rate * lng_vel
450 A_continuous(kLateralVelocity, kLeftVelocity) +=
451 X(kLeftVelocity) / diameter;
452 A_continuous(kLateralVelocity, kRightVelocity) +=
453 -X(kRightVelocity) / diameter;
454 A_continuous(kRightVelocity, kLateralVelocity) += yaw_rate;
455 A_continuous(kLeftVelocity, kLateralVelocity) += yaw_rate;
456 const Scalar dlng_accel_dwheel_vel = X(kLateralVelocity) / diameter;
457 A_continuous(kRightVelocity, kRightVelocity) += dlng_accel_dwheel_vel;
458 A_continuous(kLeftVelocity, kRightVelocity) += dlng_accel_dwheel_vel;
459 A_continuous(kRightVelocity, kLeftVelocity) += -dlng_accel_dwheel_vel;
460 A_continuous(kLeftVelocity, kLeftVelocity) += -dlng_accel_dwheel_vel;
461
462 A_continuous(kRightVelocity, kRightVelocity) +=
463 VelocityAccelDiff(lng_vel) / 2.0;
464 A_continuous(kRightVelocity, kLeftVelocity) +=
465 VelocityAccelDiff(lng_vel) / 2.0;
466 A_continuous(kLeftVelocity, kRightVelocity) +=
467 VelocityAccelDiff(lng_vel) / 2.0;
468 A_continuous(kLeftVelocity, kLeftVelocity) +=
469 VelocityAccelDiff(lng_vel) / 2.0;
470 }
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800471 return A_continuous;
472 }
473
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800474 // Returns dX / dt given X and U. If ignore_accel is set, then we ignore the
475 // accelerometer-based components of U (this is solely used in testing).
476 State DiffEq(const State &X, const Input &U,
477 bool ignore_accel = false) const {
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800478 State Xdot = A_continuous_ * X + B_continuous_ * U;
479 // And then we need to add on the terms for the x/y change:
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800480 const Scalar theta = X(kTheta);
481 const Scalar lng_vel = CalcLongitudinalVelocity(X);
482 const Scalar lat_vel = X(kLateralVelocity);
483 const Scalar stheta = std::sin(theta);
484 const Scalar ctheta = std::cos(theta);
485 Xdot(kX) = ctheta * lng_vel - stheta * lat_vel;
486 Xdot(kY) = stheta * lng_vel + ctheta * lat_vel;
487
488 const Scalar yaw_rate = CalcYawRate(X);
489 const Scalar expected_lat_accel = lng_vel * yaw_rate;
490 const Scalar expected_lng_accel =
491 CalcLongitudinalVelocity(Xdot) - yaw_rate * lat_vel;
492 const Scalar lng_accel_offset =
493 U(kLongitudinalAccel) - expected_lng_accel;
494 constexpr double kAccelWeight = 1.0;
495 if (!ignore_accel) {
496 Xdot(kLeftVelocity) += kAccelWeight * lng_accel_offset;
497 Xdot(kRightVelocity) += kAccelWeight * lng_accel_offset;
498 Xdot(kLateralVelocity) += U(kLateralAccel) - expected_lat_accel;
499
500 Xdot(kRightVelocity) += VelocityAccel(lng_vel);
501 Xdot(kLeftVelocity) += VelocityAccel(lng_vel);
502 }
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800503 return Xdot;
504 }
505
James Kuszmaul06257f42020-05-09 15:40:09 -0700506 void PredictImpl(Observation *obs, std::chrono::nanoseconds dt, State *state,
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800507 StateSquare *P) {
James Kuszmaul06257f42020-05-09 15:40:09 -0700508 // Only recalculate the discretization if the timestep has changed.
509 // Technically, this isn't quite correct, since the discretization will
510 // change depending on the current state. However, the slight loss of
511 // precision seems acceptable for the sake of significantly reducing CPU
512 // usage.
513 if (obs->discretization_time != dt) {
514 // TODO(james): By far the biggest CPU sink in the localization appears to
515 // be this discretization--it's possible the spline code spikes higher,
516 // but it doesn't create anywhere near the same sustained load. There
517 // are a few potential options for optimizing this code, but none of
518 // them are entirely trivial, e.g. we could:
519 // -Reduce the number of states (this function grows at O(kNStates^3))
520 // -Adjust the discretization function itself (there're a few things we
521 // can tune there).
522 // -Try to come up with some sort of lookup table or other way of
523 // pre-calculating A_d and Q_d.
524 // I also have to figure out how much we care about the precision of
525 // some of these values--I don't think we care much, but we probably
526 // do want to maintain some of the structure of the matrices.
527 const StateSquare A_c = AForState(*state);
528 controls::DiscretizeQAFast(Q_continuous_, A_c, dt, &obs->Q_d, &obs->A_d);
529 obs->discretization_time = dt;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800530
James Kuszmaul06257f42020-05-09 15:40:09 -0700531 obs->predict_update =
532 RungeKuttaU(
533 [this](const State &X, const Input &U) { return DiffEq(X, U); },
534 *state, obs->U, aos::time::DurationInSeconds(dt)) -
535 *state;
536 }
James Kuszmaulb2a2f352019-03-02 16:59:34 -0800537
James Kuszmaul06257f42020-05-09 15:40:09 -0700538 *state += obs->predict_update;
539
540 StateSquare Ptemp = obs->A_d * *P * obs->A_d.transpose() + obs->Q_d;
James Kuszmaulb2a2f352019-03-02 16:59:34 -0800541 *P = Ptemp;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800542 }
543
James Kuszmaul06257f42020-05-09 15:40:09 -0700544 void CorrectImpl(Observation *obs, State *state, StateSquare *P) {
545 const Eigen::Matrix<Scalar, kNOutputs, kNStates> H = obs->dhdx(*state);
546 // Note: Technically, this does calculate P * H.transpose() twice. However,
547 // when I was mucking around with some things, I found that in practice
548 // putting everything into one expression and letting Eigen optimize it
549 // directly actually improved performance relative to precalculating P *
550 // H.transpose().
551 const Eigen::Matrix<Scalar, kNStates, kNOutputs> K =
552 *P * H.transpose() * (H * *P * H.transpose() + obs->R).inverse();
553 const StateSquare Ptemp = (StateSquare::Identity() - K * H) * *P;
James Kuszmaulb2a2f352019-03-02 16:59:34 -0800554 *P = Ptemp;
James Kuszmaul06257f42020-05-09 15:40:09 -0700555 *state += K * (obs->z - obs->h(*state, obs->U));
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800556 }
557
558 void ProcessObservation(Observation *obs, const std::chrono::nanoseconds dt,
559 State *state, StateSquare *P) {
560 *state = obs->X_hat;
561 *P = obs->P;
562 if (dt.count() != 0) {
James Kuszmaul06257f42020-05-09 15:40:09 -0700563 PredictImpl(obs, dt, state, P);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800564 }
565 if (!(obs->h && obs->dhdx)) {
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800566 CHECK(obs->make_h);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800567 obs->make_h(*state, *P, &obs->h, &obs->dhdx);
568 }
James Kuszmaul06257f42020-05-09 15:40:09 -0700569 CorrectImpl(obs, state, P);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800570 }
571
James Kuszmauld478f872020-03-16 20:54:27 -0700572 DrivetrainConfig<double> dt_config_;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800573 State X_hat_;
James Kuszmauld478f872020-03-16 20:54:27 -0700574 StateFeedbackHybridPlantCoefficients<2, 2, 2, double>
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800575 velocity_drivetrain_coefficients_;
576 StateSquare A_continuous_;
577 StateSquare Q_continuous_;
578 StateSquare P_;
579 Eigen::Matrix<Scalar, kNOutputs, kNStates> H_encoders_and_gyro_;
580 Scalar encoder_noise_, gyro_noise_;
581 Eigen::Matrix<Scalar, kNStates, kNInputs> B_continuous_;
582
James Kuszmaul074429e2019-03-23 16:01:49 -0700583 bool have_zeroed_encoders_ = false;
584
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800585 aos::PriorityQueue<Observation, kSaveSamples, std::less<Observation>>
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800586 observations_;
587
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800588
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800589 friend class testing::HybridEkfTest;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800590 friend class y2019::control_loops::testing::ParameterizedLocalizerTest;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800591}; // class HybridEkf
592
593template <typename Scalar>
594void HybridEkf<Scalar>::Correct(
595 const Output &z, const Input *U,
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800596 std::function<void(const State &, const StateSquare &,
597 std::function<Output(const State &, const Input &)> *,
598 std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(
599 const State &)> *)> make_h,
600 std::function<Output(const State &, const Input &)> h,
601 std::function<Eigen::Matrix<Scalar, kNOutputs, kNStates>(const State &)>
602 dhdx, const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> &R,
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800603 aos::monotonic_clock::time_point t) {
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800604 CHECK(!observations_.empty());
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800605 if (!observations_.full() && t < observations_.begin()->t) {
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800606 LOG(ERROR) << "Dropped an observation that was received before we "
607 "initialized.\n";
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800608 return;
609 }
James Kuszmaul06257f42020-05-09 15:40:09 -0700610 auto cur_it = observations_.PushFromBottom(
611 {t, t, State::Zero(), StateSquare::Zero(), Input::Zero(), z, make_h, h,
612 dhdx, R, StateSquare::Identity(), StateSquare::Zero(),
613 std::chrono::seconds(0), State::Zero()});
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800614 if (cur_it == observations_.end()) {
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800615 VLOG(1) << "Camera dropped off of end with time of "
616 << aos::time::DurationInSeconds(t.time_since_epoch())
617 << "s; earliest observation in "
618 "queue has time of "
619 << aos::time::DurationInSeconds(
620 observations_.begin()->t.time_since_epoch())
621 << "s.\n";
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800622 return;
623 }
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800624 // Now we populate any state information that depends on where the
625 // observation was inserted into the queue. X_hat and P must be populated
626 // from the values present in the observation *following* this one in
627 // the queue (note that the X_hat and P that we store in each observation
628 // is the values that they held after accounting for the previous
629 // measurement and before accounting for the time between the previous and
630 // current measurement). If we appended to the end of the queue, then
631 // we need to pull from X_hat_ and P_ specifically.
632 // Furthermore, for U:
633 // -If the observation was inserted at the end, then the user must've
634 // provided U and we use it.
635 // -Otherwise, only grab U if necessary.
636 auto next_it = cur_it;
637 ++next_it;
638 if (next_it == observations_.end()) {
639 cur_it->X_hat = X_hat_;
640 cur_it->P = P_;
641 // Note that if next_it == observations_.end(), then because we already
642 // checked for !observations_.empty(), we are guaranteed to have
643 // valid prev_it.
644 auto prev_it = cur_it;
645 --prev_it;
646 cur_it->prev_t = prev_it->t;
647 // TODO(james): Figure out a saner way of handling this.
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800648 CHECK(U != nullptr);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800649 cur_it->U = *U;
650 } else {
651 cur_it->X_hat = next_it->X_hat;
652 cur_it->P = next_it->P;
653 cur_it->prev_t = next_it->prev_t;
654 next_it->prev_t = cur_it->t;
655 cur_it->U = (U == nullptr) ? next_it->U : *U;
656 }
James Kuszmaul06257f42020-05-09 15:40:09 -0700657
658 if (kFullRewindOnEverySample) {
659 next_it = observations_.begin();
660 cur_it = next_it++;
661 }
662
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800663 // Now we need to rerun the predict step from the previous to the new
664 // observation as well as every following correct/predict up to the current
665 // time.
666 while (true) {
667 // We use X_hat_ and P_ to store the intermediate states, and then
668 // once we reach the end they will all be up-to-date.
669 ProcessObservation(&*cur_it, cur_it->t - cur_it->prev_t, &X_hat_, &P_);
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800670 CHECK(X_hat_.allFinite());
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800671 if (next_it != observations_.end()) {
672 next_it->X_hat = X_hat_;
673 next_it->P = P_;
674 } else {
675 break;
676 }
677 ++cur_it;
678 ++next_it;
679 }
680}
681
682template <typename Scalar>
683void HybridEkf<Scalar>::InitializeMatrices() {
684 A_continuous_.setZero();
685 const Scalar diameter = 2.0 * dt_config_.robot_radius;
686 // Theta derivative
687 A_continuous_(kTheta, kLeftVelocity) = -1.0 / diameter;
688 A_continuous_(kTheta, kRightVelocity) = 1.0 / diameter;
689
690 // Encoder derivatives
691 A_continuous_(kLeftEncoder, kLeftVelocity) = 1.0;
James Kuszmaul074429e2019-03-23 16:01:49 -0700692 A_continuous_(kLeftEncoder, kAngularError) = 1.0;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800693 A_continuous_(kLeftEncoder, kLongitudinalVelocityOffset) = -1.0;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800694 A_continuous_(kRightEncoder, kRightVelocity) = 1.0;
James Kuszmaul074429e2019-03-23 16:01:49 -0700695 A_continuous_(kRightEncoder, kAngularError) = -1.0;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800696 A_continuous_(kRightEncoder, kLongitudinalVelocityOffset) = -1.0;
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800697
698 // Pull velocity derivatives from velocity matrices.
699 // Note that this looks really awkward (doesn't use
700 // Eigen blocks) because someone decided that the full
701 // drivetrain Kalman Filter should half a weird convention.
702 // TODO(james): Support shifting drivetrains with changing A_continuous
703 const auto &vel_coefs = velocity_drivetrain_coefficients_;
704 A_continuous_(kLeftVelocity, kLeftVelocity) = vel_coefs.A_continuous(0, 0);
705 A_continuous_(kLeftVelocity, kRightVelocity) = vel_coefs.A_continuous(0, 1);
706 A_continuous_(kRightVelocity, kLeftVelocity) = vel_coefs.A_continuous(1, 0);
707 A_continuous_(kRightVelocity, kRightVelocity) = vel_coefs.A_continuous(1, 1);
708
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800709 A_continuous_(kLongitudinalVelocityOffset, kLongitudinalVelocityOffset) =
710 -1.0 / kVelocityOffsetTimeConstant;
711 A_continuous_(kLateralVelocity, kLateralVelocity) =
712 -1.0 / kLateralVelocityTimeConstant;
713
714 // We currently ignore all voltage-related modelling terms.
715 // TODO(james): Decide what to do about these terms. They don't really matter
716 // too much when we have accelerometer readings available.
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800717 B_continuous_.setZero();
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800718 B_continuous_.template block<1, 2>(kLeftVelocity, kLeftVoltage) =
James Kuszmauld478f872020-03-16 20:54:27 -0700719 vel_coefs.B_continuous.row(0).template cast<Scalar>();
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800720 B_continuous_.template block<1, 2>(kRightVelocity, kLeftVoltage) =
James Kuszmauld478f872020-03-16 20:54:27 -0700721 vel_coefs.B_continuous.row(1).template cast<Scalar>();
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800722 A_continuous_.template block<kNStates, 2>(0, kLeftVoltageError) =
723 B_continuous_.template block<kNStates, 2>(0, kLeftVoltage);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800724
725 Q_continuous_.setZero();
726 // TODO(james): Improve estimates of process noise--e.g., X/Y noise can
James Kuszmaul1057ce82019-02-09 17:58:24 -0800727 // probably be reduced when we are stopped because you rarely jump randomly.
728 // Or maybe it's more appropriate to scale wheelspeed noise with wheelspeed,
729 // since the wheels aren't likely to slip much stopped.
James Kuszmaula5632fe2019-03-23 20:28:33 -0700730 Q_continuous_(kX, kX) = 0.002;
731 Q_continuous_(kY, kY) = 0.002;
James Kuszmaul7f1a4082019-04-14 10:50:44 -0700732 Q_continuous_(kTheta, kTheta) = 0.0001;
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800733 Q_continuous_(kLeftEncoder, kLeftEncoder) = std::pow(0.15, 2.0);
734 Q_continuous_(kRightEncoder, kRightEncoder) = std::pow(0.15, 2.0);
735 Q_continuous_(kLeftVelocity, kLeftVelocity) = std::pow(0.5, 2.0);
736 Q_continuous_(kRightVelocity, kRightVelocity) = std::pow(0.5, 2.0);
737 Q_continuous_(kLeftVoltageError, kLeftVoltageError) = std::pow(10.0, 2.0);
738 Q_continuous_(kRightVoltageError, kRightVoltageError) = std::pow(10.0, 2.0);
739 Q_continuous_(kAngularError, kAngularError) = std::pow(2.0, 2.0);
740 // This noise value largely governs whether we will trust the encoders or
741 // accelerometer more for estimating the robot position.
James Kuszmaul5398fae2020-02-17 16:44:03 -0800742 // Note that this also affects how we interpret camera measurements,
743 // particularly when using a heading/distance/skew measurement--if the
744 // noise on these numbers is particularly high, then we can end up with weird
745 // dynamics where a camera update both shifts our X/Y position and adjusts our
746 // velocity estimates substantially, causing the camera updates to create
747 // "momentum" and if we don't trust the encoders enough, then we have no way of
748 // determining that the velocity updates are bogus. This also interacts with
749 // kVelocityOffsetTimeConstant.
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800750 Q_continuous_(kLongitudinalVelocityOffset, kLongitudinalVelocityOffset) =
751 std::pow(1.1, 2.0);
752 Q_continuous_(kLateralVelocity, kLateralVelocity) = std::pow(0.1, 2.0);
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800753
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800754 H_encoders_and_gyro_.setZero();
755 // Encoders are stored directly in the state matrix, so are a minor
756 // transform away.
757 H_encoders_and_gyro_(0, kLeftEncoder) = 1.0;
758 H_encoders_and_gyro_(1, kRightEncoder) = 1.0;
759 // Gyro rate is just the difference between right/left side speeds:
760 H_encoders_and_gyro_(2, kLeftVelocity) = -1.0 / diameter;
761 H_encoders_and_gyro_(2, kRightVelocity) = 1.0 / diameter;
762
James Kuszmaul3c5b4d32020-02-11 17:22:14 -0800763 encoder_noise_ = 5e-9;
764 gyro_noise_ = 1e-13;
Austin Schuh9fe68f72019-08-10 19:32:03 -0700765
766 X_hat_.setZero();
767 P_.setZero();
James Kuszmaul2ed7b3c2019-02-09 18:26:19 -0800768}
769
770} // namespace drivetrain
771} // namespace control_loops
772} // namespace frc971
773
774#endif // FRC971_CONTROL_LOOPS_DRIVETRAIN_HYBRID_EKF_H_