Merge "Attach the perimeter to the polygons."
diff --git a/aos/input/drivetrain_input.cc b/aos/input/drivetrain_input.cc
index 67ee9f3..02957a8 100644
--- a/aos/input/drivetrain_input.cc
+++ b/aos/input/drivetrain_input.cc
@@ -23,8 +23,6 @@
 
 void DrivetrainInputReader::HandleDrivetrain(
     const ::aos::input::driver_station::Data &data) {
-  bool is_control_loop_driving = false;
-
   const auto wheel_and_throttle = GetWheelAndThrottle(data);
   const double wheel = wheel_and_throttle.wheel;
   const double wheel_velocity = wheel_and_throttle.wheel_velocity;
@@ -39,7 +37,32 @@
     robot_velocity_ = drivetrain_queue.status->robot_speed;
   }
 
-  if (data.PosEdge(turn1_) || data.PosEdge(turn2_)) {
+  bool is_control_loop_driving = false;
+  bool is_line_following = false;
+
+  if (data.IsPressed(turn1_)) {
+    switch (turn1_use_) {
+      case TurnButtonUse::kControlLoopDriving:
+        is_control_loop_driving = true;
+        break;
+      case TurnButtonUse::kLineFollow:
+        is_line_following = true;
+        break;
+    }
+  }
+
+  if (data.IsPressed(turn2_)) {
+    switch (turn2_use_) {
+      case TurnButtonUse::kControlLoopDriving:
+        is_control_loop_driving = true;
+        break;
+      case TurnButtonUse::kLineFollow:
+        is_line_following = true;
+        break;
+    }
+  }
+
+  if (is_control_loop_driving) {
     if (drivetrain_queue.status.get()) {
       left_goal_ = drivetrain_queue.status->estimated_left_position;
       right_goal_ = drivetrain_queue.status->estimated_right_position;
@@ -49,9 +72,6 @@
       left_goal_ - wheel * wheel_multiplier_ + throttle * 0.3;
   const double current_right_goal =
       right_goal_ + wheel * wheel_multiplier_ + throttle * 0.3;
-  if (data.IsPressed(turn1_) || data.IsPressed(turn2_)) {
-    is_control_loop_driving = true;
-  }
   auto new_drivetrain_goal = drivetrain_queue.goal.MakeMessage();
   new_drivetrain_goal->wheel = wheel;
   new_drivetrain_goal->wheel_velocity = wheel_velocity;
@@ -61,7 +81,8 @@
   new_drivetrain_goal->throttle_torque = throttle_torque;
   new_drivetrain_goal->highgear = high_gear;
   new_drivetrain_goal->quickturn = data.IsPressed(quick_turn_);
-  new_drivetrain_goal->controller_type = is_control_loop_driving ? 1 : 0;
+  new_drivetrain_goal->controller_type =
+      is_line_following ? 3 : (is_control_loop_driving ? 1 : 0);
   new_drivetrain_goal->left_goal = current_left_goal;
   new_drivetrain_goal->right_goal = current_right_goal;
 
@@ -192,15 +213,17 @@
   const ButtonLocation kTurn1(1, 7);
   const ButtonLocation kTurn2(1, 11);
   std::unique_ptr<SteeringWheelDrivetrainInputReader> result(
-      new SteeringWheelDrivetrainInputReader(kSteeringWheel, kDriveThrottle,
-                                             kQuickTurn, kTurn1, kTurn2));
+      new SteeringWheelDrivetrainInputReader(
+          kSteeringWheel, kDriveThrottle, kQuickTurn, kTurn1,
+          TurnButtonUse::kControlLoopDriving, kTurn2,
+          TurnButtonUse::kControlLoopDriving));
   result.get()->set_default_high_gear(default_high_gear);
 
   return result;
 }
 
 std::unique_ptr<PistolDrivetrainInputReader> PistolDrivetrainInputReader::Make(
-    bool default_high_gear) {
+    bool default_high_gear, TopButtonUse top_button_use) {
   // Pistol Grip controller
   const JoystickAxis kTriggerHigh(1, 1), kTriggerLow(1, 4),
       kTriggerVelocityHigh(1, 2), kTriggerVelocityLow(1, 5),
@@ -211,12 +234,23 @@
       kWheelTorqueLow(2, 6);
 
   const ButtonLocation kQuickTurn(1, 3);
-  const ButtonLocation kShiftHigh(1, 1);
-  const ButtonLocation kShiftLow(1, 2);
 
-  // Nop
-  const ButtonLocation kTurn1(1, 9);
-  const ButtonLocation kTurn2(1, 10);
+  const ButtonLocation TopButton(1, 1);
+  const ButtonLocation SecondButton(1, 2);
+  // Non-existant button for nops.
+  const ButtonLocation DummyButton(1, 10);
+
+  // TODO(james): Make a copy assignment operator for ButtonLocation so we don't
+  // have to shoehorn in these ternary operators.
+  const ButtonLocation kTurn1 =
+      (top_button_use == TopButtonUse::kLineFollow) ? TopButton : DummyButton;
+  // Turn2 currently does nothing on the pistol grip, ever.
+  const ButtonLocation kTurn2 = DummyButton;
+  const ButtonLocation kShiftHigh =
+      (top_button_use == TopButtonUse::kShift) ? TopButton : DummyButton;
+  const ButtonLocation kShiftLow =
+      (top_button_use == TopButtonUse::kShift) ? SecondButton : DummyButton;
+
   std::unique_ptr<PistolDrivetrainInputReader> result(
       new PistolDrivetrainInputReader(
           kWheelHigh, kWheelLow, kTriggerVelocityHigh, kTriggerVelocityLow,
@@ -239,7 +273,9 @@
 
   std::unique_ptr<XboxDrivetrainInputReader> result(
       new XboxDrivetrainInputReader(kSteeringWheel, kDriveThrottle, kQuickTurn,
-                                    kTurn1, kTurn2));
+                                    kTurn1, TurnButtonUse::kControlLoopDriving,
+                                    kTurn2,
+                                    TurnButtonUse::kControlLoopDriving));
   return result;
 }
 ::std::unique_ptr<DrivetrainInputReader> DrivetrainInputReader::Make(
@@ -256,8 +292,11 @@
           SteeringWheelDrivetrainInputReader::Make(dt_config.default_high_gear);
       break;
     case InputType::kPistol:
-      drivetrain_input_reader =
-          PistolDrivetrainInputReader::Make(dt_config.default_high_gear);
+      drivetrain_input_reader = PistolDrivetrainInputReader::Make(
+          dt_config.default_high_gear,
+          dt_config.pistol_grip_shift_enables_line_follow
+              ? PistolDrivetrainInputReader::TopButtonUse::kLineFollow
+              : PistolDrivetrainInputReader::TopButtonUse::kShift);
       break;
     case InputType::kXbox:
       drivetrain_input_reader = XboxDrivetrainInputReader::Make();
diff --git a/aos/input/drivetrain_input.h b/aos/input/drivetrain_input.h
index 5274cfa..80046ee 100644
--- a/aos/input/drivetrain_input.h
+++ b/aos/input/drivetrain_input.h
@@ -35,17 +35,28 @@
 // joystick types.
 class DrivetrainInputReader {
  public:
+  // What to use the turn1/2 buttons for.
+  enum class TurnButtonUse {
+    // Use the button to enable control loop driving.
+    kControlLoopDriving,
+    // Use the button to set line following mode.
+    kLineFollow,
+  };
   // Inputs driver station button and joystick locations
   DrivetrainInputReader(driver_station::JoystickAxis wheel,
                         driver_station::JoystickAxis throttle,
                         driver_station::ButtonLocation quick_turn,
                         driver_station::ButtonLocation turn1,
-                        driver_station::ButtonLocation turn2)
+                        TurnButtonUse turn1_use,
+                        driver_station::ButtonLocation turn2,
+                        TurnButtonUse turn2_use)
       : wheel_(wheel),
         throttle_(throttle),
         quick_turn_(quick_turn),
         turn1_(turn1),
-        turn2_(turn2) {}
+        turn1_use_(turn1_use),
+        turn2_(turn2),
+        turn2_use_(turn2_use) {}
 
   virtual ~DrivetrainInputReader() = default;
 
@@ -78,8 +89,12 @@
   const driver_station::JoystickAxis wheel_;
   const driver_station::JoystickAxis throttle_;
   const driver_station::ButtonLocation quick_turn_;
+  // Button for enabling control loop driving.
   const driver_station::ButtonLocation turn1_;
+  const TurnButtonUse turn1_use_;
+  // But for enabling line following.
   const driver_station::ButtonLocation turn2_;
+  const TurnButtonUse turn2_use_;
 
   // Structure containing the (potentially adjusted) steering and throttle
   // values from the joysticks.
@@ -134,9 +149,18 @@
  public:
   using DrivetrainInputReader::DrivetrainInputReader;
 
+  // What to use the top two buttons for on the pistol grip.
+  enum class TopButtonUse {
+    // Normal shifting.
+    kShift,
+    // Line following (currently just uses top button).
+    kLineFollow,
+  };
+
   // Creates a DrivetrainInputReader with the corresponding joystick ports and
   // axis for the (cheap) pistol grip controller.
-  static std::unique_ptr<PistolDrivetrainInputReader> Make(bool default_high_gear);
+  static std::unique_ptr<PistolDrivetrainInputReader> Make(
+      bool default_high_gear, TopButtonUse top_button_use);
 
  private:
   PistolDrivetrainInputReader(
@@ -158,7 +182,8 @@
       driver_station::ButtonLocation turn1,
       driver_station::ButtonLocation turn2)
       : DrivetrainInputReader(wheel_high, throttle_high, quick_turn, turn1,
-                              turn2),
+                              TurnButtonUse::kLineFollow, turn2,
+                              TurnButtonUse::kLineFollow),
         wheel_low_(wheel_low),
         wheel_velocity_high_(wheel_velocity_high),
         wheel_velocity_low_(wheel_velocity_low),
diff --git a/frc971/control_loops/drivetrain/BUILD b/frc971/control_loops/drivetrain/BUILD
index a4d2ca6..d3ed728 100644
--- a/frc971/control_loops/drivetrain/BUILD
+++ b/frc971/control_loops/drivetrain/BUILD
@@ -46,6 +46,7 @@
     deps = [
         ":drivetrain_config",
         "//aos/containers:priority_queue",
+        "//aos/util:math",
         "//frc971/control_loops:c2d",
         "//frc971/control_loops:runge_kutta",
         "//third_party/eigen",
diff --git a/frc971/control_loops/drivetrain/drivetrain_config.h b/frc971/control_loops/drivetrain/drivetrain_config.h
index 0f57bf9..53c2315 100644
--- a/frc971/control_loops/drivetrain/drivetrain_config.h
+++ b/frc971/control_loops/drivetrain/drivetrain_config.h
@@ -92,6 +92,9 @@
 
   Scalar wheel_multiplier;
 
+  // Whether the shift button on the pistol grip enables line following mode.
+  bool pistol_grip_shift_enables_line_follow = false;
+
   // Converts the robot state to a linear distance position, velocity.
   static Eigen::Matrix<Scalar, 2, 1> LeftRightToLinear(
       const Eigen::Matrix<Scalar, 7, 1> &left_right) {
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index 82f409c..119386a 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -4,6 +4,7 @@
 #include <chrono>
 
 #include "aos/containers/priority_queue.h"
+#include "aos/util/math.h"
 #include "frc971/control_loops/c2d.h"
 #include "frc971/control_loops/runge_kutta.h"
 #include "Eigen/Dense"
@@ -48,11 +49,8 @@
     kLeftVelocity = 4,
     kRightEncoder = 5,
     kRightVelocity = 6,
-    kLeftVoltageError = 7,
-    kRightVoltageError = 8 ,
-    kAngularError = 9,
   };
-  static constexpr int kNStates = 10;
+  static constexpr int kNStates = 7;
   static constexpr int kNInputs = 2;
   // Number of previous samples to save.
   static constexpr int kSaveSamples = 50;
@@ -70,19 +68,11 @@
   // variable-size measurement updates.
   typedef Eigen::Matrix<Scalar, kNOutputs, 1> Output;
   typedef Eigen::Matrix<Scalar, kNStates, kNStates> StateSquare;
-  // State is [x_position, y_position, theta, Kalman States], where
-  // Kalman States are the states from the standard drivetrain Kalman Filter,
-  // which is: [left encoder, left ground vel, right encoder, right ground vel,
-  // left voltage error, right voltage error, angular_error], where:
-  // left/right encoder should correspond directly to encoder readings
-  // left/right velocities are the velocity of the left/right sides over the
+  // State is [x_position, y_position, theta, left encoder, left ground vel,
+  // right encoder, right ground vel]. left/right encoder should correspond
+  // directly to encoder readings left/right velocities are the velocity of the
+  // left/right sides over the
   //   ground (i.e., corrected for angular_error).
-  // voltage errors are the difference between commanded and effective voltage,
-  //   used to estimate consistent modelling errors (e.g., friction).
-  // angular error is the difference between the angular velocity as estimated
-  //   by the encoders vs. estimated by the gyro, such as might be caused by
-  //   wheels on one side of the drivetrain being too small or one side's
-  //   wheels slipping more than the other.
   typedef Eigen::Matrix<Scalar, kNStates, 1> State;
 
   // Constructs a HybridEkf for a particular drivetrain.
@@ -406,9 +396,7 @@
 
   // Encoder derivatives
   A_continuous_(kLeftEncoder, kLeftVelocity) = 1.0;
-  A_continuous_(kLeftEncoder, kAngularError) = 1.0;
   A_continuous_(kRightEncoder, kRightVelocity) = 1.0;
-  A_continuous_(kRightEncoder, kAngularError) = -1.0;
 
   // Pull velocity derivatives from velocity matrices.
   // Note that this looks really awkward (doesn't use
@@ -425,21 +413,22 @@
   B_continuous_.setZero();
   B_continuous_.row(kLeftVelocity) = vel_coefs.B_continuous.row(0);
   B_continuous_.row(kRightVelocity) = vel_coefs.B_continuous.row(1);
-  A_continuous_.template block<kNStates, kNInputs>(0, 7) = B_continuous_;
 
   Q_continuous_.setZero();
   // TODO(james): Improve estimates of process noise--e.g., X/Y noise can
   // probably be reduced when we are stopped because you rarely jump randomly.
   // Or maybe it's more appropriate to scale wheelspeed noise with wheelspeed,
   // since the wheels aren't likely to slip much stopped.
-  Q_continuous_(kX, kX) = 0.005;
-  Q_continuous_(kY, kY) = 0.005;
-  Q_continuous_(kTheta, kTheta) = 0.001;
-  Q_continuous_.template block<7, 7>(3, 3) =
-      dt_config_.make_kf_drivetrain_loop().observer().coefficients().Q;
+  Q_continuous_(kX, kX) = 0.01;
+  Q_continuous_(kY, kY) = 0.01;
+  Q_continuous_(kTheta, kTheta) = 0.0002;
+  Q_continuous_(kLeftEncoder, kLeftEncoder) = ::std::pow(0.03, 2.0);
+  Q_continuous_(kRightEncoder, kRightEncoder) = ::std::pow(0.03, 2.0);
+  Q_continuous_(kLeftVelocity, kLeftVelocity) = ::std::pow(0.1, 2.0);
+  Q_continuous_(kRightVelocity, kRightVelocity) = ::std::pow(0.1, 2.0);
 
   P_.setZero();
-  P_.diagonal() << 0.1, 0.1, 0.01, 0.02, 0.01, 0.02, 0.01, 1, 1, 0.03;
+  P_.diagonal() << 0.1, 0.1, 0.01, 0.02, 0.01, 0.02, 0.01;
 
   H_encoders_and_gyro_.setZero();
   // Encoders are stored directly in the state matrix, so are a minor
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
index 27119b1..1702ec4 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
+++ b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
@@ -52,22 +52,16 @@
     EXPECT_EQ(Xdot_ekf(StateIdx::kX, 0), ctheta * (left_vel + right_vel) / 2.0);
     EXPECT_EQ(Xdot_ekf(StateIdx::kY, 0), stheta * (left_vel + right_vel) / 2.0);
     EXPECT_EQ(Xdot_ekf(StateIdx::kTheta, 0), (right_vel - left_vel) / diameter);
-    EXPECT_EQ(Xdot_ekf(StateIdx::kLeftEncoder, 0),
-              left_vel + X(StateIdx::kAngularError, 0));
-    EXPECT_EQ(Xdot_ekf(StateIdx::kRightEncoder, 0),
-              right_vel - X(StateIdx::kAngularError, 0));
+    EXPECT_EQ(Xdot_ekf(StateIdx::kLeftEncoder, 0), left_vel);
+    EXPECT_EQ(Xdot_ekf(StateIdx::kRightEncoder, 0), right_vel);
 
     Eigen::Matrix<double, 2, 1> vel_x(X(StateIdx::kLeftVelocity, 0),
                                       X(StateIdx::kRightVelocity, 0));
     Eigen::Matrix<double, 2, 1> expected_vel_X =
         velocity_plant_coefs_.A_continuous * vel_x +
-        velocity_plant_coefs_.B_continuous *
-            (U + X.middleRows<2>(StateIdx::kLeftVoltageError));
+        velocity_plant_coefs_.B_continuous * U;
     EXPECT_EQ(Xdot_ekf(StateIdx::kLeftVelocity, 0), expected_vel_X(0, 0));
     EXPECT_EQ(Xdot_ekf(StateIdx::kRightVelocity, 0), expected_vel_X(1, 0));
-
-    // Dynamics don't expect error terms to change:
-    EXPECT_EQ(0.0, Xdot_ekf.bottomRows<3>().squaredNorm());
   }
   State DiffEq(const State &X, const Input &U) {
     return ekf_.DiffEq(X, U);
@@ -93,18 +87,14 @@
   CheckDiffEq(State::Zero(), Input::Zero());
   CheckDiffEq(State::Zero(), {-5.0, 5.0});
   CheckDiffEq(State::Zero(), {12.0, -3.0});
-  CheckDiffEq((State() << 100.0, 200.0, M_PI, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, M_PI, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
-  CheckDiffEq((State() << 100.0, 200.0, 2.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, 2.0, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
-  CheckDiffEq((State() << 100.0, 200.0, -2.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, -2.0, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
   // And check that a theta outisde of [-M_PI, M_PI] works.
-  CheckDiffEq((State() << 100.0, 200.0, 200.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, 200.0, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
 }
 
@@ -112,7 +102,7 @@
 // with zero change in time, the state should approach the estimation.
 TEST_F(HybridEkfTest, ZeroTimeCorrect) {
   HybridEkf<>::Output Z(0.5, 0.5, 1);
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setIdentity();
   auto h = [H](const State &X, const Input &) { return H * X; };
   auto dhdx = [H](const State &) { return H; };
@@ -140,7 +130,7 @@
   HybridEkf<>::Output Z(0, 0, 0);
   // Use true_X to track what we think the true robot state is.
   State true_X = ekf_.X_hat();
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setZero();
   auto h = [H](const State &X, const Input &) { return H * X; };
   auto dhdx = [H](const State &) { return H; };
@@ -171,9 +161,6 @@
   EXPECT_NEAR(ekf_.X_hat(StateIdx::kLeftVelocity) * 0.8,
               ekf_.X_hat(StateIdx::kRightVelocity),
               ekf_.X_hat(StateIdx::kLeftVelocity) * 0.1);
-  EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kLeftVoltageError));
-  EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kRightVoltageError));
-  EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kAngularError));
   const double ending_p_norm = ekf_.P().norm();
   // Due to lack of corrections, noise should've increased.
   EXPECT_GT(ending_p_norm, starting_p_norm * 1.10);
@@ -193,7 +180,7 @@
 TEST_P(HybridEkfOldCorrectionsTest, CreateOldCorrection) {
   HybridEkf<>::Output Z;
   Z.setZero();
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setZero();
   auto h_zero = [H](const State &X, const Input &) { return H * X; };
   auto dhdx_zero = [H](const State &) { return H; };
@@ -231,7 +218,7 @@
   expected_X_hat(0, 0) = Z(0, 0);
   expected_X_hat(1, 0) = Z(1, 0) + modeled_X_hat(0, 0);
   expected_X_hat(2, 0) = Z(2, 0);
-  EXPECT_LT((expected_X_hat.topRows<7>() - ekf_.X_hat().topRows<7>()).norm(),
+  EXPECT_LT((expected_X_hat - ekf_.X_hat()).norm(),
            1e-3)
       << "X_hat: " << ekf_.X_hat() << " expected " << expected_X_hat;
   // The covariance after the predictions but before the corrections should
@@ -249,7 +236,7 @@
 TEST_F(HybridEkfTest, DiscardTooOldCorrection) {
   HybridEkf<>::Output Z;
   Z.setZero();
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setZero();
   auto h_zero = [H](const State &X, const Input &) { return H * X; };
   auto dhdx_zero = [H](const State &) { return H; };
@@ -304,11 +291,11 @@
 }
 
 // Tests that encoder updates cause everything to converge properly in the
-// presence of voltage error.
+// presence of an initial velocity error.
 TEST_F(HybridEkfTest, PerfectEncoderUpdatesWithVoltageError) {
   State true_X = ekf_.X_hat();
-  true_X(StateIdx::kLeftVoltageError, 0) = 2.0;
-  true_X(StateIdx::kRightVoltageError, 0) = 2.0;
+  true_X(StateIdx::kLeftVelocity, 0) = 0.2;
+  true_X(StateIdx::kRightVelocity, 0) = 0.2;
   Input U(5.0, 5.0);
   for (int ii = 0; ii < 1000; ++ii) {
     true_X = Update(true_X, U);
@@ -328,11 +315,11 @@
 
 // Tests encoder/gyro updates when we have some errors in our estimate.
 TEST_F(HybridEkfTest, PerfectEncoderUpdateConverges) {
-  // In order to simulate modelling errors, we add an angular_error and start
-  // the encoder values slightly off.
+  // In order to simulate modelling errors, we start the encoder values slightly
+  // off.
   State true_X = ekf_.X_hat();
-  true_X(StateIdx::kAngularError, 0) = 1.0;
   true_X(StateIdx::kLeftEncoder, 0) += 2.0;
+  true_X(StateIdx::kLeftVelocity, 0) = 0.1;
   true_X(StateIdx::kRightEncoder, 0) -= 2.0;
   // After enough time, everything should converge to near-perfect (if there
   // were any errors in the original absolute state (x/y/theta) state, then we
@@ -350,7 +337,7 @@
                                    dt_config_.robot_radius / 2.0,
                                U, t0_ + (ii + 1) * dt_config_.dt);
   }
-  EXPECT_NEAR((true_X - ekf_.X_hat()).norm(), 0.0, 1e-5)
+  EXPECT_NEAR((true_X - ekf_.X_hat()).norm(), 0.0, 1e-4)
       << "Expected non-x/y estimates to converge to correct. "
          "Estimated X_hat:\n"
       << ekf_.X_hat() << "\ntrue X:\n"
@@ -359,11 +346,11 @@
 
 // Tests encoder/gyro updates in a realistic-ish scenario with noise:
 TEST_F(HybridEkfTest, RealisticEncoderUpdateConverges) {
-  // In order to simulate modelling errors, we add an angular_error and start
-  // the encoder values slightly off.
+  // In order to simulate modelling errors, we start the encoder values slightly
+  // off.
   State true_X = ekf_.X_hat();
-  true_X(StateIdx::kAngularError, 0) = 1.0;
   true_X(StateIdx::kLeftEncoder, 0) += 2.0;
+  true_X(StateIdx::kLeftVelocity, 0) = 0.1;
   true_X(StateIdx::kRightEncoder, 0) -= 2.0;
   Input U(10.0, 5.0);
   for (int ii = 0; ii < 100; ++ii) {
@@ -377,7 +364,7 @@
         U, t0_ + (ii + 1) * dt_config_.dt);
   }
   EXPECT_NEAR(
-      (true_X.bottomRows<9>() - ekf_.X_hat().bottomRows<9>()).squaredNorm(),
+      (true_X.bottomRows<6>() - ekf_.X_hat().bottomRows<6>()).squaredNorm(),
       0.0, 2e-3)
       << "Expected non-x/y estimates to converge to correct. "
          "Estimated X_hat:\n" << ekf_.X_hat() << "\ntrue X:\n" << true_X;
@@ -411,7 +398,7 @@
   // Check that we die when only one of h and dhdx are provided:
   EXPECT_DEATH(ekf_.Correct({1, 2, 3}, &U, {}, {},
                             [](const State &) {
-                              return Eigen::Matrix<double, 3, 10>::Zero();
+                              return Eigen::Matrix<double, 3, 7>::Zero();
                             },
                             {}, t0_ + ::std::chrono::seconds(1)),
                "make_h");
diff --git a/frc971/control_loops/drivetrain/line_follow_drivetrain.cc b/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
index d085d17..b5417af 100644
--- a/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
+++ b/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
@@ -182,7 +182,8 @@
   // Because we assume the target selector may have some internal state (e.g.,
   // not confirming a target until some time as passed), we should call
   // UpdateSelection every time.
-  bool new_target = target_selector_->UpdateSelection(abs_state);
+  bool new_target =
+      target_selector_->UpdateSelection(abs_state, goal_velocity_);
   if (freeze_target_) {
     // When freezing the target, only make changes if we didn't have a good
     // target before.
diff --git a/frc971/control_loops/drivetrain/localizer.h b/frc971/control_loops/drivetrain/localizer.h
index af07089..f0685dd 100644
--- a/frc971/control_loops/drivetrain/localizer.h
+++ b/frc971/control_loops/drivetrain/localizer.h
@@ -16,9 +16,12 @@
   // Take the state as [x, y, theta, left_vel, right_vel]
   // If unable to determine what target to go for, returns false. If a viable
   // target is selected, then returns true and sets target_pose.
+  // command_speed is the goal speed of the current drivetrain, generally
+  // generated from the throttle and meant to signify driver intent.
   // TODO(james): Some implementations may also want a drivetrain goal so that
   // driver intent can be divined more directly.
-  virtual bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state) = 0;
+  virtual bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
+                               double command_speed) = 0;
   // Gets the current target pose. Should only be called if UpdateSelection has
   // returned true.
   virtual TypedPose<double> TargetPose() const = 0;
@@ -59,7 +62,7 @@
 // manually set the target selector state.
 class TrivialTargetSelector : public TargetSelectorInterface {
  public:
-  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &) override {
+  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &, double) override {
     return has_target_;
   }
   TypedPose<double> TargetPose() const override { return pose_; }
@@ -95,9 +98,12 @@
   }
 
   void ResetPosition(double x, double y, double theta) override {
+    const double left_encoder = ekf_.X_hat(StateIdx::kLeftEncoder);
+    const double right_encoder = ekf_.X_hat(StateIdx::kRightEncoder);
     ekf_.ResetInitialState(
         ::aos::monotonic_clock::now(),
-        (Ekf::State() << x, y, theta, 0, 0, 0, 0, 0, 0, 0).finished(),
+        (Ekf::State() << x, y, theta, left_encoder, 0, right_encoder, 0)
+            .finished(),
         ekf_.P());
   };
 
@@ -110,12 +116,8 @@
   double right_velocity() const override {
     return ekf_.X_hat(StateIdx::kRightVelocity);
   }
-  double left_voltage_error() const override {
-    return ekf_.X_hat(StateIdx::kLeftVoltageError);
-  }
-  double right_voltage_error() const override {
-    return ekf_.X_hat(StateIdx::kRightVoltageError);
-  }
+  double left_voltage_error() const override { return 0.0; }
+  double right_voltage_error() const override { return 0.0; }
 
   TrivialTargetSelector *target_selector() override {
     return &target_selector_;
diff --git a/third_party/cimg/BUILD b/third_party/cimg/BUILD
new file mode 100644
index 0000000..0dfc79b
--- /dev/null
+++ b/third_party/cimg/BUILD
@@ -0,0 +1,13 @@
+licenses(["notice"])
+
+cc_library(
+    name = "CImg",
+    hdrs = glob([
+        "CImg.h",
+        "plugins/*.h",
+    ]),
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party/libjpeg",
+    ],
+)
diff --git a/third_party/cimg/CImg.h b/third_party/cimg/CImg.h
index 20f1fc6..fc52e13 100644
--- a/third_party/cimg/CImg.h
+++ b/third_party/cimg/CImg.h
@@ -436,7 +436,7 @@
 // (see methods 'CImg<T>::{load,save}_jpeg()').
 #ifdef cimg_use_jpeg
 extern "C" {
-#include "jpeglib.h"
+#include "third_party/libjpeg/jpeglib.h"
 #include "setjmp.h"
 }
 #endif
diff --git a/y2019/BUILD b/y2019/BUILD
index 4f11c36..276f09c 100644
--- a/y2019/BUILD
+++ b/y2019/BUILD
@@ -1,5 +1,6 @@
 load("//frc971:downloader.bzl", "robot_downloader")
 load("//aos/build:queues.bzl", "queue_library")
+load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library")
 
 robot_downloader(
     start_binaries = [
@@ -113,6 +114,7 @@
     ],
     deps = [
         ":status_light",
+        ":vision_proto",
         "//aos:init",
         "//aos/actions:action_lib",
         "//aos/input:action_joystick_input",
@@ -127,8 +129,10 @@
         "//frc971/autonomous:auto_queue",
         "//frc971/autonomous:base_autonomous_actor",
         "//frc971/control_loops/drivetrain:drivetrain_queue",
+        "//frc971/control_loops/drivetrain:localizer_queue",
         "//y2019/control_loops/drivetrain:drivetrain_base",
         "//y2019/control_loops/superstructure:superstructure_queue",
+        "@com_google_protobuf//:protobuf",
     ],
 )
 
@@ -140,6 +144,12 @@
     visibility = ["//visibility:public"],
 )
 
+cc_proto_library(
+    name = "vision_proto",
+    srcs = ["vision.proto"],
+    visibility = ["//visibility:public"],
+)
+
 py_library(
     name = "python_init",
     srcs = ["__init__.py"],
diff --git a/y2019/constants.cc b/y2019/constants.cc
index 0a9a429..285cb21 100644
--- a/y2019/constants.cc
+++ b/y2019/constants.cc
@@ -82,7 +82,7 @@
   elevator_params->zeroing_voltage = 3.0;
   elevator_params->operating_voltage = 12.0;
   elevator_params->zeroing_profile_params = {0.1, 1.0};
-  elevator_params->default_profile_params = {4.0, 16.0};
+  elevator_params->default_profile_params = {4.0, 13.0};
   elevator_params->range = Values::kElevatorRange();
   elevator_params->make_integral_loop =
       &control_loops::superstructure::elevator::MakeIntegralElevatorLoop;
@@ -143,9 +143,9 @@
   stilts_params->zeroing_constants.allowable_encoder_error = 0.9;
 
   r->camera_noise_parameters = {.max_viewable_distance = 10.0,
-                                .heading_noise = 0.02,
-                                .nominal_distance_noise = 0.06,
-                                .nominal_skew_noise = 0.1,
+                                .heading_noise = 0.2,
+                                .nominal_distance_noise = 0.3,
+                                .nominal_skew_noise = 0.35,
                                 .nominal_height_noise = 0.01};
 
   // Deliberately make FOV a bit large so that we are overly conservative in
@@ -201,6 +201,8 @@
 
       stilts_params->zeroing_constants.measured_absolute_position = 0.043580;
       stilts->potentiometer_offset = -0.093820 + 0.0124 - 0.008334 + 0.004507;
+
+      FillCameraPoses(vision::PracticeBotTeensyId(), &r->cameras);
       break;
 
     case kCodingRobotTeamNumber:
@@ -306,8 +308,8 @@
   constexpr double kHpSlotY = InchToMeters((26 * 12 + 10.5) / 2.0 - 25.9);
   constexpr double kHpSlotTheta = M_PI;
 
-  constexpr double kNormalZ = 0.80;
-  constexpr double kPortZ = 0.99;
+  constexpr double kNormalZ = 0.85;
+  constexpr double kPortZ = 1.04;
 
   const Pose far_side_cargo_bay({kFarSideCargoBayX, kSideCargoBayY, kNormalZ},
                                 kSideCargoBayTheta);
diff --git a/y2019/control_loops/drivetrain/drivetrain_base.cc b/y2019/control_loops/drivetrain/drivetrain_base.cc
index 02bd805..1dfe99a 100644
--- a/y2019/control_loops/drivetrain/drivetrain_base.cc
+++ b/y2019/control_loops/drivetrain/drivetrain_base.cc
@@ -28,20 +28,31 @@
       ::frc971::control_loops::drivetrain::GyroType::IMU_Z_GYRO,
       ::frc971::control_loops::drivetrain::IMUType::IMU_X,
 
-      drivetrain::MakeDrivetrainLoop, drivetrain::MakeVelocityDrivetrainLoop,
+      drivetrain::MakeDrivetrainLoop,
+      drivetrain::MakeVelocityDrivetrainLoop,
       drivetrain::MakeKFDrivetrainLoop,
       drivetrain::MakeHybridVelocityDrivetrainLoop,
 
       chrono::duration_cast<chrono::nanoseconds>(
           chrono::duration<double>(drivetrain::kDt)),
-      drivetrain::kRobotRadius, drivetrain::kWheelRadius, drivetrain::kV,
+      drivetrain::kRobotRadius,
+      drivetrain::kWheelRadius,
+      drivetrain::kV,
 
-      drivetrain::kHighGearRatio, drivetrain::kLowGearRatio, drivetrain::kJ,
-      drivetrain::kMass, kThreeStateDriveShifter, kThreeStateDriveShifter,
-      true /* default_high_gear */, 0 /* down_offset if using constants use
-                                   constants::GetValues().down_error */,
-      0.7 /* wheel_non_linearity */, 1.2 /* quickturn_wheel_multiplier */,
+      drivetrain::kHighGearRatio,
+      drivetrain::kLowGearRatio,
+      drivetrain::kJ,
+      drivetrain::kMass,
+      kThreeStateDriveShifter,
+      kThreeStateDriveShifter,
+      true /* default_high_gear */,
+      0 /* down_offset if using constants use
+     constants::GetValues().down_error */
+      ,
+      0.7 /* wheel_non_linearity */,
+      1.2 /* quickturn_wheel_multiplier */,
       1.2 /* wheel_multiplier */,
+      true /*pistol_grip_shift_enables_line_follow*/,
   };
 
   return kDrivetrainConfig;
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.cc b/y2019/control_loops/drivetrain/event_loop_localizer.cc
index 5a4264d..dad667f 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.cc
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.cc
@@ -33,6 +33,7 @@
       localizer_(dt_config, &robot_pose_) {
   localizer_.ResetInitialState(::aos::monotonic_clock::now(),
                                Localizer::State::Zero(), localizer_.P());
+  ResetPosition(0.5, 3.4, 0.0);
   frame_fetcher_ = event_loop_->MakeFetcher<CameraFrame>(
       ".y2019.control_loops.drivetrain.camera_frames");
 }
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.h b/y2019/control_loops/drivetrain/event_loop_localizer.h
index 3c2cf4e..9207ec1 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.h
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.h
@@ -35,7 +35,16 @@
 
   void Reset(const Localizer::State &state);
   void ResetPosition(double x, double y, double theta) override {
-    Reset((Localizer::State() << x, y, theta, 0, 0, 0, 0, 0, 0, 0).finished());
+    // When we reset the state, we want to keep the encoder positions intact, so
+    // we copy from the original state and reset everything else.
+    Localizer::State new_state = localizer_.X_hat();
+    new_state.x() = x;
+    new_state.y() = y;
+    new_state(2, 0) = theta;
+    // Velocity terms.
+    new_state(4, 0) = 0.0;
+    new_state(6, 0) = 0.0;
+    Reset(new_state);
   }
 
   void Update(const ::Eigen::Matrix<double, 2, 1> &U,
@@ -55,12 +64,8 @@
   double right_velocity() const override {
     return localizer_.X_hat(StateIdx::kRightVelocity);
   }
-  double left_voltage_error() const override {
-    return localizer_.X_hat(StateIdx::kLeftVoltageError);
-  }
-  double right_voltage_error() const override {
-    return localizer_.X_hat(StateIdx::kRightVoltageError);
-  }
+  double left_voltage_error() const override { return 0.0; }
+  double right_voltage_error() const override { return 0.0; }
 
   TargetSelector *target_selector() override {
     return &target_selector_;
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
index 8a65170..e086e36 100644
--- a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -67,7 +67,7 @@
   void SetStartingPosition(const Eigen::Matrix<double, 3, 1> &xytheta) {
     *drivetrain_motor_plant_.mutable_state() << xytheta.x(), xytheta.y(),
         xytheta(2, 0), 0.0, 0.0;
-    Eigen::Matrix<double, 10, 1> localizer_state;
+    Eigen::Matrix<double, 7, 1> localizer_state;
     localizer_state.setZero();
     localizer_state.block<3, 1>(0, 0) = xytheta;
     localizer_.Reset(localizer_state);
@@ -296,7 +296,7 @@
       .Send();
   RunForTime(chrono::seconds(3));
   VerifyNearGoal();
-  VerifyEstimatorAccurate(1e-5);
+  VerifyEstimatorAccurate(1e-4);
 }
 
 namespace {
@@ -310,11 +310,11 @@
   SetStartingPosition({4, 3, M_PI});
   my_drivetrain_queue_.goal.MakeWithBuilder()
       .controller_type(3)
-      .throttle(0.5)
+      .throttle(0.9)
       .Send();
   RunForTime(chrono::seconds(10));
 
-  VerifyEstimatorAccurate(1e-10);
+  VerifyEstimatorAccurate(1e-8);
   // Due to the fact that we aren't modulating the throttle, we don't try to hit
   // the target exactly. Instead, just run slightly past the target:
   EXPECT_LT(::std::abs(::aos::math::DiffAngle(
diff --git a/y2019/control_loops/drivetrain/localizer.h b/y2019/control_loops/drivetrain/localizer.h
index f8849ce..b717837 100644
--- a/y2019/control_loops/drivetrain/localizer.h
+++ b/y2019/control_loops/drivetrain/localizer.h
@@ -237,7 +237,7 @@
         // Compute the ckalman update for this step:
         const TargetView &view = camera_views[jj];
         const Eigen::Matrix<Scalar, kNOutputs, kNStates> H =
-            HMatrix(*view.target, target_view);
+            HMatrix(*view.target, camera.pose());
         const Eigen::Matrix<Scalar, kNStates, kNOutputs> PH = P * H.transpose();
         const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> S = H * PH + R;
         // Note: The inverse here should be very cheap so long as kNOutputs = 3.
@@ -354,11 +354,11 @@
   }
 
   Eigen::Matrix<Scalar, kNOutputs, kNStates> HMatrix(
-      const Target &target, const TargetView &target_view) {
+      const Target &target, const Pose &camera_pose) {
     // To calculate dheading/d{x,y,theta}:
     // heading = arctan2(target_pos - camera_pos) - camera_theta
     Eigen::Matrix<Scalar, 3, 1> target_pos = target.pose().abs_pos();
-    Eigen::Matrix<Scalar, 3, 1> camera_pos = target_view.camera_pose.abs_pos();
+    Eigen::Matrix<Scalar, 3, 1> camera_pos = camera_pose.abs_pos();
     Scalar diffx = target_pos.x() - camera_pos.x();
     Scalar diffy = target_pos.y() - camera_pos.y();
     Scalar norm2 = diffx * diffx + diffy * diffy;
diff --git a/y2019/control_loops/drivetrain/localizer_test.cc b/y2019/control_loops/drivetrain/localizer_test.cc
index 8b6f7ed..f062234 100644
--- a/y2019/control_loops/drivetrain/localizer_test.cc
+++ b/y2019/control_loops/drivetrain/localizer_test.cc
@@ -399,7 +399,7 @@
     U(1, 0) = ::std::max(::std::min(U(1, 0), 12.0), -12.0);
 
     state = ::frc971::control_loops::RungeKuttaU(
-        [this](const ::Eigen::Matrix<double, 10, 1> &X,
+        [this](const ::Eigen::Matrix<double, 7, 1> &X,
                const ::Eigen::Matrix<double, 2, 1> &U) { return DiffEq(X, U); },
         state, U,
         ::std::chrono::duration_cast<::std::chrono::duration<double>>(
@@ -418,7 +418,7 @@
                            ::std::pow(state(StateIdx::kRightVelocity, 0), 2)) /
                    3.0);
       TestLocalizer::State disturbance;
-      disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0;
+      disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0;
       disturbance *= disturbance_scale;
       state += disturbance;
     }
@@ -498,11 +498,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/false,
@@ -514,11 +512,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/false,
@@ -529,11 +525,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/false,
@@ -544,26 +538,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0, 0.0)
-                .finished(),
-            /*noisify=*/false,
-            /*disturb=*/false,
-            /*estimate_tolerance=*/1e-4,
-            /*goal_tolerance=*/2e-2,
-        }),
-        // Repeats perfect scenario, but add voltage + angular errors:
-        LocalizerTestParams({
-            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
-            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
-             0.5, 0.02)
-                .finished(),
-            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0, 0.0)
+            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/false,
@@ -574,11 +551,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/true,
@@ -589,16 +564,14 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.1, -5.1, 0.03, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.1, -5.1, 0.03, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/true,
             /*estimate_tolerance=*/0.15,
-            /*goal_tolerance=*/0.5,
+            /*goal_tolerance=*/0.8,
         }),
         // Try another spline, just in case the one I was using is special for
         // some reason; this path will also go straight up to a target, to
@@ -606,11 +579,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.5, 3.5, 4.0, 8.0, 11.0, 10.2}},
             /*control_pts_y=*/{{1.0, 1.0, -3.0, -2.0, -3.5, -3.65}},
-            (TestLocalizer::State() << 0.6, 1.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.6, 1.01, 0.01, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/false,
diff --git a/y2019/control_loops/drivetrain/target_selector.cc b/y2019/control_loops/drivetrain/target_selector.cc
index 2b70ab7..449ce64 100644
--- a/y2019/control_loops/drivetrain/target_selector.cc
+++ b/y2019/control_loops/drivetrain/target_selector.cc
@@ -11,9 +11,9 @@
       back_viewer_({&robot_pose_, {0.0, 0.0, 0.0}, M_PI}, kFakeFov, fake_noise_,
                    constants::Field().targets(), {}) {}
 
-bool TargetSelector::UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state) {
-  const double speed = (state(3, 0) + state(4, 0)) / 2.0;
-  if (::std::abs(speed) < kMinDecisionSpeed) {
+bool TargetSelector::UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
+                                     double command_speed) {
+  if (::std::abs(command_speed) < kMinDecisionSpeed) {
     return false;
   }
   *robot_pose_.mutable_pos() << state.x(), state.y(), 0.0;
@@ -21,7 +21,7 @@
   ::aos::SizedArray<FakeCamera::TargetView,
                     y2019::constants::Field::kNumTargets>
       target_views;
-  if (speed > 0) {
+  if (command_speed > 0) {
     target_views = front_viewer_.target_views();
   } else {
     target_views = back_viewer_.target_views();
diff --git a/y2019/control_loops/drivetrain/target_selector.h b/y2019/control_loops/drivetrain/target_selector.h
index d3f1a9d..965d7cc 100644
--- a/y2019/control_loops/drivetrain/target_selector.h
+++ b/y2019/control_loops/drivetrain/target_selector.h
@@ -29,14 +29,15 @@
 
   TargetSelector();
 
-  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state) override;
+  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
+                       double command_speed) override;
   Pose TargetPose() const override { return target_pose_; }
 
  private:
-  static constexpr double kFakeFov = M_PI;
+  static constexpr double kFakeFov = M_PI * 0.7;
   // Longitudinal speed at which the robot must be going in order for us to make
   // a decision.
-  static constexpr double kMinDecisionSpeed = 0.1;  // m/s
+  static constexpr double kMinDecisionSpeed = 0.7;  // m/s
   Pose robot_pose_;
   Pose target_pose_;
   // For the noise of our fake cameras, we only really care about the max
diff --git a/y2019/control_loops/drivetrain/target_selector_test.cc b/y2019/control_loops/drivetrain/target_selector_test.cc
index 7c0ad7f..4b440c2 100644
--- a/y2019/control_loops/drivetrain/target_selector_test.cc
+++ b/y2019/control_loops/drivetrain/target_selector_test.cc
@@ -18,10 +18,12 @@
 
 // Tests the target selector with:
 // -The [x, y, theta, left_vel, right_vel] state to test at
+// -The current driver commanded speed.
 // -Whether we expect to see a target.
 // -If (1) is true, the pose we expect to get back.
 struct TestParams {
   State state;
+  double command_speed;
   bool expect_target;
   Pose expected_pose;
 };
@@ -31,7 +33,8 @@
   TargetSelector selector;
   bool expect_target = GetParam().expect_target;
   const State state = GetParam().state;
-  ASSERT_EQ(expect_target, selector.UpdateSelection(state))
+  ASSERT_EQ(expect_target,
+            selector.UpdateSelection(state, GetParam().command_speed))
       << "We expected a return of " << expect_target << " at state "
       << state.transpose();
   if (expect_target) {
@@ -54,31 +57,42 @@
     ::testing::Values(
         // When we are far away from anything, we should not register any
         // targets:
-        TestParams{(State() << 0.0, 0.0, 0.0, 1.0, 1.0).finished(), false, {}},
+        TestParams{
+            (State() << 0.0, 0.0, 0.0, 1.0, 1.0).finished(), 1.0, false, {}},
         // Aim for a human-player spot; at low speeds we should not register
         // anything.
-        TestParams{
-            (State() << 4.0, 2.0, M_PI, 0.05, 0.05).finished(), false, {}},
-        TestParams{
-            (State() << 4.0, 2.0, M_PI, -0.05, -0.05).finished(), false, {}},
-        TestParams{(State() << 4.0, 2.0, M_PI, 0.5, 0.5).finished(), true,
+        TestParams{(State() << 4.0, 2.0, M_PI, 0.05, 0.05).finished(),
+                   0.05,
+                   false,
+                   {}},
+        TestParams{(State() << 4.0, 2.0, M_PI, -0.05, -0.05).finished(),
+                   -0.05,
+                   false,
+                   {}},
+        TestParams{(State() << 4.0, 2.0, M_PI, 0.5, 0.5).finished(), 1.0, true,
                    HPSlotLeft()},
         // Put ourselves between the rocket and cargo ship; we should see the
         // portal driving one direction and the near cargo ship port the other.
-        TestParams{(State() << 6.0, 2.0, -M_PI_2, 0.5, 0.5).finished(), true,
-                   CargoNearLeft()},
-        TestParams{(State() << 6.0, 2.0, M_PI_2, -0.5, -0.5).finished(), true,
-                   CargoNearLeft()},
-        TestParams{(State() << 6.0, 2.0, -M_PI_2, -0.5, -0.5).finished(), true,
-                   RocketPortalLeft()},
-        TestParams{(State() << 6.0, 2.0, M_PI_2, 0.5, 0.5).finished(), true,
-                   RocketPortalLeft()},
+        // We also command a speed opposite the current direction of motion and
+        // confirm that that behaves as expected.
+        TestParams{(State() << 6.0, 2.0, -M_PI_2, -0.5, -0.5).finished(), 1.0,
+                   true, CargoNearLeft()},
+        TestParams{(State() << 6.0, 2.0, M_PI_2, 0.5, 0.5).finished(), -1.0,
+                   true, CargoNearLeft()},
+        TestParams{(State() << 6.0, 2.0, -M_PI_2, 0.5, 0.5).finished(), -1.0,
+                   true, RocketPortalLeft()},
+        TestParams{(State() << 6.0, 2.0, M_PI_2, -0.5, -0.5).finished(), 1.0,
+                   true, RocketPortalLeft()},
         // And we shouldn't see anything spinning in place:
-        TestParams{
-            (State() << 6.0, 2.0, M_PI_2, -0.5, 0.5).finished(), false, {}},
+        TestParams{(State() << 6.0, 2.0, M_PI_2, -0.5, 0.5).finished(),
+                   0.0,
+                   false,
+                   {}},
         // Drive backwards off the field--we should not see anything.
-        TestParams{
-            (State() << -0.1, 0.0, 0.0, -0.5, -0.5).finished(), false, {}}));
+        TestParams{(State() << -0.1, 0.0, 0.0, -0.5, -0.5).finished(),
+                   -1.0,
+                   false,
+                   {}}));
 
 }  // namespace testing
 }  // namespace control_loops
diff --git a/y2019/image_streamer/BUILD b/y2019/image_streamer/BUILD
new file mode 100644
index 0000000..4885da0
--- /dev/null
+++ b/y2019/image_streamer/BUILD
@@ -0,0 +1,35 @@
+package(default_visibility = ["//visibility:public"])
+
+cc_binary(
+    name = "image_streamer",
+    srcs = ["image_streamer.cc"],
+    deps = [
+        ":flip_image",
+        "//aos/logging",
+        "//aos/logging:implementations",
+        "//aos/vision/blob:codec",
+        "//aos/vision/events:epoll_events",
+        "//aos/vision/events:socket_types",
+        "//aos/vision/events:udp",
+        "//aos/vision/image:image_stream",
+        "//aos/vision/image:reader",
+        "//y2019:vision_proto",
+        "@com_github_gflags_gflags//:gflags",
+    ],
+)
+
+cc_library(
+    name = "flip_image",
+    srcs = ["flip_image.cc"],
+    hdrs = ["flip_image.h"],
+    copts = [
+        "-Wno-format-nonliteral",
+        "-Wno-cast-align",
+        "-Wno-cast-qual",
+        "-Wno-error=type-limits",
+    ],
+    deps = [
+        "//third_party/cimg:CImg",
+        "//third_party/libjpeg",
+    ],
+)
diff --git a/y2019/image_streamer/README_ODROID_setup.txt b/y2019/image_streamer/README_ODROID_setup.txt
new file mode 100644
index 0000000..16f7287
--- /dev/null
+++ b/y2019/image_streamer/README_ODROID_setup.txt
@@ -0,0 +1,120 @@
+# To build and deploy the image_streamer code to the ODROID, run
+#    ./deploy.sh 10.9.71.179
+# This will also configure and copy a supervisor configuration
+# file, vision.conf, to /etc/supervisor/conf.d.
+
+# While the code can work with two cameras, as of March 4, 2019,
+# the robot only has one driver camera on it.  To use
+# two cameras, set --single_camera to false.  Use the
+# exposure flag to set the camera exposure.
+
+# To view a camera on a Debian laptop use:
+mplayer -tv device=/dev/video0 tv://
+# or if the video is not on video0 try
+mplayer -tv device=/dev/video4 tv://
+# Show available camera formats.
+# This came from https://superuser.com/questions/494575/ffmpeg-open-webcam-using-yuyv-but-i-want-mjpeg
+ffmpeg -f video4linux2 -list_formats all -i /dev/video2
+
+# Jevois notes:
+# To mount a jevois disk on a debian system, use y2019/vision/jevois-cmd
+cd y2019/vision/
+sudo ./jevois-cmd help
+sudo ./jevois-cmd usbsd  # This will mount the disk.
+# If needed, install python-serial to run jevois-cmd
+sudo apt-get install python-serial
+
+# Austin debugged the camera startup problems with supervisorctl on the ODROID.
+# Here is a copy of how he did it.  The problem was the logging
+# was not starting propery.  Austin added a flag to image_streamer
+# so that it would not start logging by default.
+
+root@odroid:~# supervisorctl
+exposure_loop                    RUNNING   pid 625, uptime 0:09:09
+vision                           FATAL     Exited too quickly (process log may have details)
+supervisor> tail -1000 vision stderr
+SURE_ABSOLUTE from 297 to 300
+image_streamer(656)(00018): DEBUG   at 0000000021.963290s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(656)(00019): DEBUG   at 0000000021.963585s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(656)(00020): INFO    at 0000000021.965102s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(656)(00021): INFO    at 0000000021.967263s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(656)(00022): DEBUG   at 0000000021.969020s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(656)(00023): DEBUG   at 0000000021.969275s: aos/vision/image/reader.cc: 301: Start: done with first queue
+image_streamer: y2019/vision/image_streamer.cc:60: BlobLog::BlobLog(const char *, const char *): Assertion `ofst_.is_open()' failed.
+
+supervisor> restart vision
+vision: ERROR (not running)
+vision: ERROR (spawn error)
+supervisor> status
+exposure_loop                    RUNNING   pid 625, uptime 0:09:31
+vision                           STARTING
+supervisor> status
+exposure_loop                    RUNNING   pid 625, uptime 0:09:33
+vision                           BACKOFF   Exited too quickly (process log may have details)
+supervisor> status
+exposure_loop                    RUNNING   pid 625, uptime 0:09:34
+vision                           BACKOFF   Exited too quickly (process log may have details)
+supervisor> tail -1000 vision stderr
+SURE_ABSOLUTE from 297 to 300
+image_streamer(864)(00018): DEBUG   at 0000000590.870582s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(864)(00019): DEBUG   at 0000000590.870856s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(864)(00020): INFO    at 0000000590.872400s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(864)(00021): INFO    at 0000000590.874543s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(864)(00022): DEBUG   at 0000000590.876289s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(864)(00023): DEBUG   at 0000000590.876547s: aos/vision/image/reader.cc: 301: Start: done with first queue
+image_streamer: y2019/vision/image_streamer.cc:60: BlobLog::BlobLog(const char *, const char *): Assertion `ofst_.is_open()' failed.
+
+supervisor> tail -1000 vision stdout
+SURE_ABSOLUTE from 297 to 300
+image_streamer(864)(00018): DEBUG   at 0000000590.870582s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(864)(00019): DEBUG   at 0000000590.870856s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(864)(00020): INFO    at 0000000590.872400s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(864)(00021): INFO    at 0000000590.874543s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(864)(00022): DEBUG   at 0000000590.876289s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(864)(00023): DEBUG   at 0000000590.876547s: aos/vision/image/reader.cc: 301: Start: done with first queue
+image_streamer: y2019/vision/image_streamer.cc:60: BlobLog::BlobLog(const char *, const char *): Assertion `ofst_.is_open()' failed.
+
+root@odroid:~# /root/image_streamer --single_camera=true --camera0_exposure=600
+image_streamer(890)(00000): INFO    at 0000000667.025668s: y2019/vision/image_streamer.cc: 298: main: In main.
+image_streamer(890)(00001): INFO    at 0000000667.025722s: y2019/vision/image_streamer.cc: 300: main: before setting up tcp_server.
+image_streamer(890)(00002): INFO    at 0000000667.025819s: aos/vision/events/tcp_server.cc: 76: SocketBindListenOnPort: connected to port: 80 on fd: 3
+image_streamer(890)(00003): INFO    at 0000000667.025844s: y2019/vision/image_streamer.cc: 302: main: after setting up tcp_server.
+image_streamer(890)(00004): INFO    at 0000000667.025872s: y2019/vision/image_streamer.cc: 303: main: In main.
+image_streamer(890)(00005): INFO    at 0000000667.025896s: y2019/vision/image_streamer.cc: 305: main: In main.
+image_streamer(890)(00006): INFO    at 0000000667.025913s: y2019/vision/image_streamer.cc: 307: main: In main.
+image_streamer(890)(00007): INFO    at 0000000667.025933s: y2019/vision/image_streamer.cc: 309: main: In main.
+image_streamer(890)(00008): INFO    at 0000000667.025952s: y2019/vision/image_streamer.cc: 311: main: In main.
+image_streamer(890)(00009): INFO    at 0000000667.025968s: y2019/vision/image_streamer.cc: 314: main: In main.
+image_streamer(890)(00010): INFO    at 0000000667.025985s: y2019/vision/image_streamer.cc: 317: main: In main.
+image_streamer(890)(00011): INFO    at 0000000667.026001s: y2019/vision/image_streamer.cc: 319: main: In main.
+image_streamer(890)(00012): INFO    at 0000000667.026017s: y2019/vision/image_streamer.cc: 322: main: In main.
+Before setting up udp socket 5001
+image_streamer(890)(00013): INFO    at 0000000667.026090s: y2019/vision/image_streamer.cc: 324: main: In main.
+image_streamer(890)(00014): INFO    at 0000000667.026142s: y2019/vision/image_streamer.cc: 328: main: In main.
+image_streamer(890)(00015): WARNING at 0000000667.026220s: aos/vision/image/reader.cc: 217: Init: xioctl VIDIOC_S_CROP due to 25 (Inappropriate ioctl for device)
+image_streamer(890)(00016): DEBUG   at 0000000667.026646s: aos/vision/image/reader.cc: 162: SetCameraControl: Camera control V4L2_CID_EXPOSURE_AUTO was already 1
+image_streamer(890)(00017): DEBUG   at 0000000667.027819s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_EXPOSURE_ABSOLUTE from 300 to 600
+image_streamer(890)(00018): DEBUG   at 0000000667.027887s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(890)(00019): DEBUG   at 0000000667.027956s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(890)(00020): INFO    at 0000000667.029905s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(890)(00021): INFO    at 0000000667.031824s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(890)(00022): DEBUG   at 0000000667.033340s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(890)(00023): DEBUG   at 0000000667.033369s: aos/vision/image/reader.cc: 301: Start: done with first queue
+Logging to file (./logging/blob_record_38.dat)
+Running Camera
+image_streamer(890)(00024): DEBUG   at 0000000667.236660s: aos/vision/image/reader.cc: 162: SetCameraControl: Camera control V4L2_CID_EXPOSURE_AUTO was already 1
+image_streamer(890)(00025): DEBUG   at 0000000667.238058s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_EXPOSURE_ABSOLUTE from 15 to 600
+image_streamer(890)(00026): INFO    at 0000000667.238178s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29161
+image_streamer(890)(00027): INFO    at 0000000667.238205s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00028): INFO    at 0000000667.252786s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29521
+image_streamer(890)(00029): INFO    at 0000000667.252809s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00030): INFO    at 0000000667.272826s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29559
+image_streamer(890)(00031): INFO    at 0000000667.272848s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00032): INFO    at 0000000667.316659s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29640
+image_streamer(890)(00033): INFO    at 0000000667.316680s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00034): INFO    at 0000000667.377320s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 65435
+image_streamer(890)(00035): INFO    at 0000000667.377346s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00036): INFO    at 0000000667.412857s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 65651
+image_streamer(890)(00037): INFO    at 0000000667.412945s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00038): INFO    at 0000000667.444955s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 65648
+
diff --git a/y2019/image_streamer/deploy.sh b/y2019/image_streamer/deploy.sh
new file mode 100755
index 0000000..584d09e
--- /dev/null
+++ b/y2019/image_streamer/deploy.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+set -e
+
+echo ""
+echo "USAGE: $0 ODROID_ip_address"
+echo "Example: $0 10.9.71.179"
+echo "Example: $0 10.99.71.179"
+echo ""
+
+if [ $# != 1 ]
+  then
+    echo "Illegal number of parameters"
+    exit
+fi
+
+if [[ $1 == -*[hH]* ]]
+  then
+    exit
+fi
+
+# Get the script directory (from https://devhints.io/bash)
+DIR="${0%/*}"
+
+# Move into the script directory
+cd "${DIR}"
+echo "# Working in `pwd`"
+
+ODROID_IP_ADDRESS=$1
+ODROID="root@${ODROID_IP_ADDRESS}"
+# Get the IP address of the roboRIO from the ODROID IP address
+# This is needed to properly configure supervisorctl on the ODROID
+# for image_streamer to communicate with the roboRIO.
+ROBORIO=`echo ${ODROID_IP_ADDRESS} | sed 's/\.[0-9]*$/.2/'`
+
+echo "# Using ODORID  ${ODROID}"
+echo "# Using roboRIO ${ROBORIO}"
+
+# This builds the ODROID image_streamer code.
+echo -e "\n# Building image_streamer"
+(
+set -x
+bazel build -c opt //y2019/image_streamer:image_streamer --cpu=armhf-debian
+)
+
+echo -e "\n# Copy files to ODROID"
+(
+set -x
+rsync -av --progress ../../bazel-bin/y2019/image_streamer/image_streamer "${ODROID}":.
+rsync -av --progress README_ODROID_setup.txt "${ODROID}":.
+rsync -av --progress vision.conf "${ODROID}":/etc/supervisor/conf.d/
+)
+
+echo "# Make sure supervisorctl has the correct IP address."
+(
+set -x
+ssh "${ODROID}" sed -i -e "'/image_streamer/ s/10.9.71.2/${ROBORIO}/'" /etc/supervisor/conf.d/vision.conf
+
+ssh "${ODROID}" sync
+)
+
+echo -e "\nCan't restart image_streamer with supervisorctl because the USB devices don't come up reliably..." >&2
+echo "Restart the ODROID now" >&2
diff --git a/y2019/image_streamer/flip_image.cc b/y2019/image_streamer/flip_image.cc
new file mode 100644
index 0000000..6ff00ed
--- /dev/null
+++ b/y2019/image_streamer/flip_image.cc
@@ -0,0 +1,15 @@
+#include "flip_image.h"
+
+#define cimg_display 0
+#define cimg_use_jpeg
+#define cimg_plugin "plugins/jpeg_buffer.h"
+#include "third_party/cimg/CImg.h"
+
+void flip_image(const char *input, const int input_size, JOCTET *buffer,
+                unsigned int *buffer_size) {
+  ::cimg_library::CImg<unsigned char> image;
+  image.load_jpeg_buffer((JOCTET *)(input), input_size);
+  image.mirror("xy");
+
+  image.save_jpeg_buffer(buffer, *buffer_size, 80);
+}
diff --git a/y2019/image_streamer/flip_image.h b/y2019/image_streamer/flip_image.h
new file mode 100644
index 0000000..6a59e96
--- /dev/null
+++ b/y2019/image_streamer/flip_image.h
@@ -0,0 +1,12 @@
+#ifndef Y2019_IMAGE_STREAMER_FLIP_IMAGE_H_
+#define Y2019_IMAGE_STREAMER_FLIP_IMAGE_H_
+
+#include <stddef.h>
+#include <stdio.h>
+#include "third_party/libjpeg/jerror.h"
+#include "third_party/libjpeg/jpeglib.h"
+
+void flip_image(const char *input, const int input_size, JOCTET *buffer,
+                unsigned int *buffer_size);
+
+#endif  // Y2019_IMAGE_STREAMER_FLIP_IMAGE_H
diff --git a/y2019/image_streamer/image_streamer.cc b/y2019/image_streamer/image_streamer.cc
new file mode 100644
index 0000000..ed581e2
--- /dev/null
+++ b/y2019/image_streamer/image_streamer.cc
@@ -0,0 +1,391 @@
+#include "aos/vision/image/image_stream.h"
+
+#include <sys/stat.h>
+#include <deque>
+#include <fstream>
+#include <string>
+
+#include "aos/logging/implementations.h"
+#include "aos/logging/logging.h"
+#include "aos/vision/blob/codec.h"
+#include "aos/vision/events/socket_types.h"
+#include "aos/vision/events/udp.h"
+#include "aos/vision/image/reader.h"
+#include "gflags/gflags.h"
+#include "y2019/image_streamer/flip_image.h"
+#include "y2019/vision.pb.h"
+
+using ::aos::events::DataSocket;
+using ::aos::events::RXUdpSocket;
+using ::aos::events::TCPServer;
+using ::aos::vision::DataRef;
+using ::aos::vision::Int32Codec;
+using ::aos::monotonic_clock;
+using ::y2019::VisionControl;
+
+DEFINE_string(roborio_ip, "10.9.71.2", "RoboRIO IP Address");
+DEFINE_string(log, "",
+              "If non-empty, log images to the specified prefix with the image "
+              "index appended to the filename");
+DEFINE_bool(single_camera, true, "If true, only use video0");
+DEFINE_int32(camera0_exposure, 600, "Exposure for video0");
+DEFINE_int32(camera1_exposure, 600, "Exposure for video1");
+
+aos::vision::DataRef mjpg_header =
+    "HTTP/1.0 200 OK\r\n"
+    "Server: YourServerName\r\n"
+    "Connection: close\r\n"
+    "Max-Age: 0\r\n"
+    "Expires: 0\r\n"
+    "Cache-Control: no-cache, private\r\n"
+    "Pragma: no-cache\r\n"
+    "Content-Type: multipart/x-mixed-replace; "
+    "boundary=--boundary\r\n\r\n";
+
+struct Frame {
+  std::string data;
+};
+
+inline bool FileExist(const std::string &name) {
+  struct stat buffer;
+  return (stat(name.c_str(), &buffer) == 0);
+}
+
+class BlobLog {
+ public:
+  explicit BlobLog(const char *prefix, const char *extension) {
+    int index = 0;
+    while (true) {
+      std::string file = prefix + std::to_string(index) + extension;
+      if (FileExist(file)) {
+        index++;
+      } else {
+        printf("Logging to file (%s)\n", file.c_str());
+        ofst_.open(file);
+        assert(ofst_.is_open());
+        break;
+      }
+    }
+  }
+
+  ~BlobLog() { ofst_.close(); }
+
+  void WriteLogEntry(DataRef data) { ofst_.write(&data[0], data.size()); }
+
+ private:
+  std::ofstream ofst_;
+};
+
+class UdpClient : public ::aos::events::EpollEvent {
+ public:
+  UdpClient(int port, ::std::function<void(void *, size_t)> callback)
+      : ::aos::events::EpollEvent(RXUdpSocket::SocketBindListenOnPort(port)),
+        callback_(callback) {}
+
+ private:
+  ::std::function<void(void *, size_t)> callback_;
+
+  void ReadEvent() override {
+    char data[1024];
+    size_t received_data_size = Recv(data, sizeof(data));
+    callback_(data, received_data_size);
+  }
+
+  size_t Recv(void *data, int size) {
+    return PCHECK(recv(fd(), static_cast<char *>(data), size, 0));
+  }
+};
+
+// TODO(aschuh & michael) Pull this out.
+template <typename PB>
+class ProtoUdpClient : public UdpClient {
+ public:
+  ProtoUdpClient(int port, ::std::function<void(const PB &)> proto_callback)
+      : UdpClient(port, ::std::bind(&ProtoUdpClient::ReadData, this,
+                                    ::std::placeholders::_1,
+                                    ::std::placeholders::_2)),
+        proto_callback_(proto_callback) {}
+
+ private:
+  ::std::function<void(const PB &)> proto_callback_;
+
+  void ReadData(void *data, size_t size) {
+    PB pb;
+    // TODO(Brian): Do something useful if parsing fails.
+    pb.ParseFromArray(data, size);
+    proto_callback_(pb);
+  }
+};
+
+class MjpegDataSocket : public aos::events::SocketConnection {
+ public:
+  MjpegDataSocket(aos::events::TCPServerBase *server, int fd)
+      : aos::events::SocketConnection(server, fd) {
+    SetEvents(EPOLLOUT | EPOLLET);
+  }
+
+  ~MjpegDataSocket() { printf("Closed connection on descriptor %d\n", fd()); }
+
+  void DirectEvent(uint32_t events) override {
+    if (events & EPOLLOUT) {
+      NewDataToSend();
+      events &= ~EPOLLOUT;
+    }
+    // Other end hung up.  Ditch the connection.
+    if (events & EPOLLHUP) {
+      CloseConnection();
+      events &= ~EPOLLHUP;
+      return;
+    }
+    if (events) {
+      aos::events::EpollEvent::DirectEvent(events);
+    }
+  }
+
+  void ReadEvent() override {
+    ssize_t count;
+    char buf[512];
+    while (true) {
+      // Always read everything so epoll won't return immediately.
+      count = read(fd(), &buf, sizeof buf);
+      if (count <= 0) {
+        if (errno != EAGAIN) {
+          CloseConnection();
+          return;
+        }
+        break;
+      } else if (!ready_to_receive_) {
+        // This 4 should match "\r\n\r\n".length();
+        if (match_i_ >= 4) {
+          printf("reading after last match\n");
+          continue;
+        }
+        for (char c : aos::vision::DataRef(&buf[0], count)) {
+          if (c == "\r\n\r\n"[match_i_]) {
+            ++match_i_;
+            if (match_i_ >= 4) {
+              if (!ready_to_receive_) {
+                ready_to_receive_ = true;
+                RasterHeader();
+              }
+            }
+          } else if (match_i_ != 0) {
+            if (c == '\r') match_i_ = 1;
+          }
+        }
+      }
+    }
+  }
+
+  void RasterHeader() {
+    output_buffer_.push_back(mjpg_header);
+    NewDataToSend();
+  }
+
+  void RasterFrame(std::shared_ptr<Frame> frame) {
+    if (!output_buffer_.empty() || !ready_to_receive_) return;
+    sending_frame_ = frame;
+    aos::vision::DataRef data = frame->data;
+
+    size_t n_written = snprintf(data_header_tmp_, sizeof(data_header_tmp_),
+                                "--boundary\r\n"
+                                "Content-type: image/jpg\r\n"
+                                "Content-Length: %zu\r\n\r\n",
+                                data.size());
+    // This should never happen because the buffer should be properly sized.
+    if (n_written == sizeof(data_header_tmp_)) {
+      fprintf(stderr, "wrong sized buffer\n");
+      exit(-1);
+    }
+    LOG(INFO, "Frame size in bytes: data.size() = %zu\n", data.size());
+    output_buffer_.push_back(aos::vision::DataRef(data_header_tmp_, n_written));
+    output_buffer_.push_back(data);
+    output_buffer_.push_back("\r\n\r\n");
+    NewDataToSend();
+  }
+
+  void NewFrame(std::shared_ptr<Frame> frame) { RasterFrame(std::move(frame)); }
+
+  void NewDataToSend() {
+    while (!output_buffer_.empty()) {
+      auto &data = *output_buffer_.begin();
+
+      while (!data.empty()) {
+        int len = send(fd(), data.data(), data.size(), MSG_NOSIGNAL);
+        if (len == -1) {
+          if (errno == EAGAIN) {
+            // Next thinggy will pick this up.
+            return;
+          } else {
+            CloseConnection();
+            return;
+          }
+        } else {
+          data.remove_prefix(len);
+        }
+      }
+      output_buffer_.pop_front();
+    }
+  }
+
+ private:
+  char data_header_tmp_[512];
+  std::shared_ptr<Frame> sending_frame_;
+  std::deque<aos::vision::DataRef> output_buffer_;
+
+  bool ready_to_receive_ = false;
+  void CloseConnection() {
+    loop()->Delete(this);
+    close(fd());
+    delete this;
+  }
+  size_t match_i_ = 0;
+};
+
+class CameraStream : public ::aos::vision::ImageStreamEvent {
+ public:
+  CameraStream(::aos::vision::CameraParams params, const ::std::string &fname,
+               TCPServer<MjpegDataSocket> *tcp_server, bool log,
+               ::std::function<void()> frame_callback)
+      : ImageStreamEvent(fname, params),
+        tcp_server_(tcp_server),
+        frame_callback_(frame_callback) {
+    if (log) {
+      log_.reset(new BlobLog(FLAGS_log.c_str(), ".dat"));
+    }
+  }
+
+  void set_active(bool active) { active_ = active; }
+
+  void set_flip(bool flip) { flip_ = flip; }
+
+  bool active() const { return active_; }
+
+  void ProcessImage(DataRef data,
+                    monotonic_clock::time_point /*monotonic_now*/) {
+    ++sampling;
+    // 20 is the sampling rate.
+    if (sampling == 20) {
+      int tmp_size = data.size() + sizeof(int32_t);
+      char *buf;
+      std::string log_record;
+      log_record.resize(tmp_size, 0);
+      {
+        buf = Int32Codec::Write(&log_record[0], tmp_size);
+        data.copy(buf, data.size());
+      }
+      if (log_) {
+        log_->WriteLogEntry(log_record);
+      }
+      sampling = 0;
+    }
+
+    std::string image_out;
+
+    if (flip_) {
+      unsigned int out_size = image_buffer_out_.size();
+      flip_image(data.data(), data.size(), &image_buffer_out_[0], &out_size);
+      image_out.assign(&image_buffer_out_[0], &image_buffer_out_[out_size]);
+    } else {
+      image_out = std::string(data);
+    }
+
+    if (active_) {
+      auto frame = std::make_shared<Frame>(Frame{image_out});
+      tcp_server_->Broadcast(
+          [frame](MjpegDataSocket *event) { event->NewFrame(frame); });
+    }
+    frame_callback_();
+  }
+
+ private:
+  int sampling = 0;
+  TCPServer<MjpegDataSocket> *tcp_server_;
+  ::std::unique_ptr<BlobLog> log_;
+  ::std::function<void()> frame_callback_;
+  bool active_ = false;
+  bool flip_ = false;
+  std::array<JOCTET, 100000> image_buffer_out_;
+};
+
+int main(int argc, char **argv) {
+  gflags::ParseCommandLineFlags(&argc, &argv, false);
+  ::aos::logging::Init();
+  ::aos::logging::AddImplementation(
+      new ::aos::logging::StreamLogImplementation(stderr));
+  TCPServer<MjpegDataSocket> tcp_server_(80);
+  aos::vision::CameraParams params0;
+  params0.set_exposure(FLAGS_camera0_exposure);
+  params0.set_brightness(-40);
+  params0.set_width(320);
+  // params0.set_fps(10);
+  params0.set_height(240);
+
+  aos::vision::CameraParams params1 = params0;
+  params1.set_exposure(FLAGS_camera1_exposure);
+
+  ::y2019::VisionStatus vision_status;
+  LOG(INFO,
+      "The UDP socket should be on port 5001 to 10.9.71.2 for "
+      "the competition robot.\n");
+  LOG(INFO, "Starting UDP socket on port 5001 to %s\n",
+      FLAGS_roborio_ip.c_str());
+  ::aos::events::ProtoTXUdpSocket<::y2019::VisionStatus> status_socket(
+      FLAGS_roborio_ip.c_str(), 5001);
+
+  ::std::unique_ptr<CameraStream> camera1;
+  ::std::unique_ptr<CameraStream> camera0(new CameraStream(
+      params0, "/dev/video0", &tcp_server_, !FLAGS_log.empty(),
+      [&camera0, &status_socket, &vision_status]() {
+        vision_status.set_low_frame_count(vision_status.low_frame_count() + 1);
+        LOG(INFO, "Got a frame cam0\n");
+        if (camera0->active()) {
+          status_socket.Send(vision_status);
+        }
+      }));
+  if (!FLAGS_single_camera) {
+    camera1.reset(new CameraStream(
+        params1, "/dev/video1", &tcp_server_, false,
+        [&camera1, &status_socket, &vision_status]() {
+          vision_status.set_high_frame_count(vision_status.high_frame_count() +
+                                             1);
+          LOG(INFO, "Got a frame cam1\n");
+          if (camera1->active()) {
+            status_socket.Send(vision_status);
+          }
+        }));
+  }
+
+  ProtoUdpClient<VisionControl> udp_client(
+      5000, [&camera0, &camera1](const VisionControl &vision_control) {
+        bool cam0_active = false;
+        camera0->set_flip(vision_control.flip_image());
+        if (camera1) {
+          camera1->set_flip(vision_control.flip_image());
+          cam0_active = !vision_control.high_video();
+          camera0->set_active(!vision_control.high_video());
+          camera1->set_active(vision_control.high_video());
+        } else {
+          cam0_active = true;
+          camera0->set_active(true);
+        }
+        LOG(INFO, "Got control packet, cam%d active\n", cam0_active ? 0 : 1);
+      });
+
+  // Default to camera0
+  camera0->set_active(true);
+  if (camera1) {
+    camera1->set_active(false);
+  }
+
+  aos::events::EpollLoop loop;
+  loop.Add(&tcp_server_);
+  loop.Add(camera0.get());
+  if (camera1) {
+    loop.Add(camera1.get());
+  }
+  loop.Add(&udp_client);
+
+  printf("Running Camera\n");
+  loop.Run();
+}
diff --git a/y2019/image_streamer/videomappings.cfg b/y2019/image_streamer/videomappings.cfg
new file mode 100644
index 0000000..0c6c91b
--- /dev/null
+++ b/y2019/image_streamer/videomappings.cfg
@@ -0,0 +1,544 @@
+############ 971 Spartan Robotics
+#
+# This JeVois configuration file works with the 2019 image_streamer program
+# on the ODROID that is used to send images back to the driverstation.  Each uncomented
+# line in this file is a camera configuration.  The JeVois camaera can have
+# many configurations.  I think it can have as many as 50.  The default configuration
+# has a '*' at the end of the line.  To avoid confusion and make it clear which
+# configuration to use, only one configuration is active in this file.
+#
+# This file resides on the JeVois in /JEVOIS/config/videomappings.cfg
+# It can be editted by mounting the microSD card on a computer and editting it.
+# It can also be editted by mounting the JeVois disk under Debian.  This
+# is done by running
+#    sudo y2019/vision/tools/jevois-cmd usbsd
+# This mounts the disk in /media/${USER}/JEVOIS
+#
+# Michael, Bahar, and Jay.  March 1, 2019.
+#
+#
+######################################################################################################################
+#
+# JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
+# California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
+#
+# This file is part of the JeVois Smart Embedded Machine Vision Toolkit.  This program is free software; you can
+# redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
+# Foundation, version 2.  This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+# License for more details.  You should have received a copy of the GNU General Public License along with this program;
+# if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
+# Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
+######################################################################################################################
+#
+# JeVois smart camera operation modes and video mappings
+#
+# Format is: <USBmode> <USBwidth> <USBheight> <USBfps> <CAMmode> <CAMwidth> <CAMheight> <CAMfps> <Vendor> <Module> [*]
+#
+# CamMode can be only one of: YUYV, BAYER, RGB565
+# USBmode can be only one of: YUYV, GREY, MJPG, BAYER, RGB565, BGR24, NONE
+
+# USB to camera mode mappings (when USBmode is not NONE) are organized according to output format, which should be
+# unique (no two entries in this file should have same USBmode/USBwidth/USBheight/USBfps). Indeed, these modes can only
+# be selected by the host computer's video grabbing software, and they are selected by picking an output format in that
+# software. These modes cannot be chosen by the JeVois system itself. For these modes, the Module's process(inframe,
+# outframe) function will be called on every frame. Beware that Macs will not accept modes for which USBwidth is not a
+# multiple of 16.
+
+# Camera-only modes (when USBmode is NONE) mode mappings have no video output over USB, and are selected by interacting
+# with the JeVois hardware over serial ports. When USBmode is NONE, USBwidth, USBHeight, and USBfps are ignored and
+# should be set to 0 here. For these modes, the Module's process(inframe) function will be called on every frame. These
+# modes are usually the ones you would use when interfacing the JeVois camera to an Arduino or similar system that
+# cannot stream video over USB and will just receive data from the JeVois camera over a serial port.
+
+# The optional * at the end of one line indicates the format that should be the default one announced by the device to
+# the USB host. This is the one that most webcam programs will select by default when you start them. Note that the
+# guvcview program on linux seems to ignore this and to instead select the last mode you had selected the last time you
+# used the camera. This * cannot be on a mapping that has NONE USBmode. There should be only one * in the whole file.
+
+# Model JeVois-A33 camera sensor supported resolutions and frame rates:
+#
+#  SXGA (1280 x 1024): up to 15 fps
+#   VGA ( 640 x  480): up to 30 fps
+#   CIF ( 352 x  288): up to 60 fps
+#  QVGA ( 320 x  240): up to 60 fps
+#  QCIF ( 176 x  144): up to 120 fps
+# QQVGA ( 160 x  120): up to 60 fps
+# QQCIF (  88 x   72): up to 120 fps
+
+# Frame rates can be set to any value from 0.1fps to the maximum supported for the selected resolution. This is very
+# useful to avoid dropping frames. For example if you have an algorithm that runs at 26.3fps after all possible
+# optimizations, you can set the camera (and usb) frame rate to 26.3 and you will not drop frames (unless your algorithm
+# momentarily performs slower, hence adding a small margin may be a good idea, e.g., select 26.1fps camera and usb
+# rates). This is better than setting the frame rate to 30.0 as this would mean that every so often you would miss the
+# next camera frame and then have to wait for the next one to be captured. If your algorithm really runs at 26.3fps but
+# you specify 30.0fps camera frame rate, then the frames will actually end up being pumped to USB at only 15.0fps (i.e.,
+# by the time you finish processing the current frame, you have missed the next one from the camera, and you need to
+# wait for the following one).
+
+# Note on USB transfer rate: the maximum actual pixel data transfer rate is 3070*8000 = 23.9 Mbytes/s (which is 3kb/USB
+# microframe, max "high bandwidth" setting). Although USB 2.0 has a maximum theoretical rate of 480 Mbit/s, this
+# includes protocol overhead and not all of the bandwidth is available for isochronous (real-time stream) transfers,
+# which we use.  This means that SXGA YUYV (2 bytes/pixel) can only transfer at a max rate of ~9.3 fps over the USB
+# link, although the camera can grab SXGA YUYV at 15 fps. SXGA in Bayer can achieve 15 fps transfer over USB since it
+# only uses 1 byte/pixel.
+
+# To test various video formats on a Linux host, the best is usually to use guvcview. However, this has two issues: 1)
+# it adds some formats which we do not natively support, like RGB3, YU12, YV12, etc, probably by doing some pixel
+# conversion internally over the actual supported modes; 2) support for RGB565 seems brittle, guvcview often crashes
+# when changing resolutions in RGB565 (called RGBP for RGB Packed).
+#
+# Hence, you may want to also try ffplay from the ffmpeg project, which can display all our supported modes and will
+# reject a mode if it does not exactly match what the hardware supports. Example:
+#
+#   ffplay /dev/video0 -pixel_format yuyv422 -video_size 640x480
+#
+# The pixel_format values are: 'yuyv422' for YUYV, 'gray' for GRAY, 'rgb565' for RGB565, 'mjpeg' for MJPG, 'bgr24' for
+# BGR24, and 'bayer_rggb8' for BAYER. You can run 'ffplay -pix_fmts' to see the list of pixel formats that ffplay
+# supports.
+#
+# Here is another example where we record the output of JeVois to a file:
+#
+#   ffmpeg -f v4l2 -pixel_format rgb565 -video_size 320x240 -framerate 22 -i /dev/video0 output.mp4
+
+# On Mac OSX, we recommend using the CamTwist app, as described in the JeVois documentation. You can also use ffplay for
+# OSX: Download the pre-compiled ffplay binary from the ffmpeg web site, and then run:
+#
+#   ~/bin/ffplay -f avfoundation -i "JeVois" -video_size 640x300 -framerate 60 -pixel_format yuyv422
+#
+# (assuming you saved ffplay into your ~/bin/ directory).
+
+# Mac compatibility notes: The JeVois smart camera is correctly detected on Macs and works with PhotoBooth as long as:
+# 1) you have a mapping that outputs YUYV 640x480 (this is the one that PhotoBooth will select (at least on recent OSX
+# like El Capitan, etc); beware that it will also flip the image horizontally); 2) you have no active (not
+# commented-out) mapping with BAYER, RGB565, or BGR24 output. If you have any un-commented mapping with BAYER, RGB565,
+# or BGR24 in your videomappings.cfg, your JeVois smart camera will still be detected by your Mac, PhotoBooth will start
+# and try to use the camera, but it will only display a black screen. Our guess is that this is a bug in the Mac camera
+# driver. It is ok to have additional mappings with YUYV output, as well as mappings with MJPG or GREY output.
+
+#####################################################################################################
+#### Pass-through and simple pixel format conversion modes:
+#####################################################################################################
+#
+##YUYV 1280 960 15.0 BAYER 1280 960 15.0 JeVois Convert
+##YUYV 1280 720 15.0 BAYER 1280 720 15.0 JeVois Convert
+##YUYV 640 480 30.0 BAYER 640 480 30.0 JeVois Convert
+##YUYV 640 360 30.0 BAYER 640 360 30.0 JeVois Convert
+##YUYV 320 240 60.0 BAYER 320 240 60.0 JeVois Convert
+##YUYV 320 180 60.0 BAYER 320 180 60.0 JeVois Convert
+##YUYV 160 120 60.0 BAYER 160 120 60.0 JeVois Convert
+##YUYV 160 90 60.0 BAYER 160 90 60.0 JeVois Convert
+#
+##BAYER 1280 960 15.0 BAYER 1280 960 15.0 JeVois PassThrough
+##BAYER 1280 720 15.0 BAYER 1280 720 15.0 JeVois PassThrough
+##BAYER 640 480 30.0 BAYER 640 480 30.0 JeVois PassThrough
+##BAYER 640 360 30.0 BAYER 640 360 30.0 JeVois PassThrough
+##BAYER 320 240 60.0 BAYER 320 240 60.0 JeVois PassThrough
+##BAYER 320 180 60.0 BAYER 320 180 60.0 JeVois PassThrough
+##BAYER 160 120 60.0 BAYER 160 120 60.0 JeVois PassThrough
+##BAYER 160 90 60.0 BAYER 160 90 60.0 JeVois PassThrough
+#
+##BAYER 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+##BGR24 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+##GREY 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+##RGB565 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+#
+##MJPG 640 480 20.0 YUYV 640 480 20.0 JeVois Convert
+##MJPG 352 288 60.0 BAYER 352 288 60.0 JeVois Convert
+##MJPG 320 240 30.0 RGB565 320 240 30.0 JeVois Convert
+##MJPG 320 240 15.0 YUYV 320 240 15.0 JeVois Convert
+##MJPG 320 240 60.0 RGB565 320 240 60.0 JeVois Convert
+##MJPG 176 144 120.0 BAYER 176 144 120.0 JeVois Convert
+##MJPG 160 120 60.0 YUYV 160 120 60.0 JeVois Convert
+##MJPG 88 72 120.0 RGB565 88 72 120.0 JeVois Convert
+#
+##BAYER 1280 1024 15.0 BAYER 1280 1024 15.0 JeVois PassThrough
+##BAYER 640 480 30.0 BAYER 640 480 30.0 JeVois PassThrough
+##BAYER 352 288 60.0 BAYER 352 288 60.0 JeVois PassThrough
+##BAYER 320 240 60.0 BAYER 320 240 60.0 JeVois PassThrough
+##BAYER 176 144 120.0 BAYER 176 144 120.0 JeVois PassThrough
+##BAYER 160 120 60.0 BAYER 160 120 60.0 JeVois PassThrough
+##BAYER 88 72 120.0 BAYER 88 72 120.0 JeVois PassThrough
+#
+##RGB565 1280 1024 15.0 RGB565 1280 1024 15.0 JeVois PassThrough
+##RGB565 640 480 30.0 RGB565 640 480 30.0 JeVois PassThrough
+##RGB565 320 240 60.0 RGB565 320 240 60.0 JeVois PassThrough
+##RGB565 176 144 120.0 RGB565 176 144 120.0 JeVois PassThrough
+##RGB565 160 120 60.0 RGB565 160 120 60.0 JeVois PassThrough
+##RGB565 88 72 120.0 RGB565 88 72 120.0 JeVois PassThrough
+#
+##YUYV 1280 1024 7.5 YUYV 1280 1024 7.5 JeVois PassThrough
+## Bahar and Michael uncommented the following line.  2/27/2019
+##YUYV 640 480 30.0 YUYV 640 480 30.0 JeVois PassThrough
+# BAYER looked good.
+#MJPG 640 480 30.0 BAYER 640 480 30.0 JeVois Convert *
+MJPG 640 480 30.0 RGB565 640 480 30.0 JeVois Convert *
+##YUYV 640 480 30.0 YUYV 640 480 30.0 JeVois SaveVideo
+##YUYV 640 480 19.6 YUYV 640 480 19.6 JeVois PassThrough
+##YUYV 640 480 12.0 YUYV 640 480 12.0 JeVois PassThrough
+##YUYV 640 480 8.3 YUYV 640 480 8.3 JeVois PassThrough
+##YUYV 640 480 7.5 YUYV 640 480 7.5 JeVois PassThrough
+##YUYV 640 480 5.5 YUYV 640 480 5.5 JeVois PassThrough
+#
+#YUYV 320 240 60.0 YUYV 320 240 60.0 JeVois SaveVideo
+##YUYV 320 240 30.0 YUYV 320 240 30.0 JeVois SaveVideo
+##YUYV 320 240 15.0 YUYV 320 240 15.0 JeVois SaveVideo
+#
+##YUYV 160 120 60.0 YUYV 160 120 60.0 JeVois SaveVideo
+##YUYV 160 120 30.0 YUYV 160 120 30.0 JeVois PassThrough
+#
+##YUYV 352 288 60.0 YUYV 352 288 60.0 JeVois SaveVideo
+##YUYV 352 288 30.0 YUYV 352 288 30.0 JeVois PassThrough
+#
+#YUYV 176 144 120.0 YUYV 176 144 120.0 JeVois SaveVideo
+##YUYV 176 144 60.0 YUYV 176 144 60.0 JeVois PassThrough
+##YUYV 176 144 30.0 YUYV 176 144 30.0 JeVois PassThrough
+#
+##YUYV 88 72 120.0 YUYV 88 72 120.0 JeVois SaveVideo
+##YUYV 88 72 60.0 YUYV 88 72 60.0 JeVois PassThrough
+##YUYV 88 72 30.0 YUYV 88 72 30.0 JeVois PassThrough
+#
+#####################################################################################################
+#### Save video to disk, no preview over USB
+#####################################################################################################
+#
+#NONE 0 0 0 YUYV 320 240 60.0 JeVois SaveVideo
+#NONE 0 0 0 YUYV 320 240 30.0 JeVois SaveVideo
+#NONE 0 0 0 YUYV 176 144 120.0 JeVois SaveVideo
+#
+#####################################################################################################
+#### Demo: Saliency + gist + face detection + object recognition
+#####################################################################################################
+#
+#YUYV 640 312 50.0 YUYV 320 240 50.0 JeVois DemoSalGistFaceObj
+#
+#####################################################################################################
+#### Demo: JeVois intro movie, then Saliency + gist + face detection + object recognition
+#####################################################################################################
+#
+#YUYV 640 360 30.0 YUYV 320 240 30.0 JeVois JeVoisIntro
+#YUYV 640 480 30.0 YUYV 320 240 30.0 JeVois JeVoisIntro
+#
+#####################################################################################################
+#### Demo: Saliency and gist
+#####################################################################################################
+#
+##YUYV 176 90 120.0 YUYV 88 72 120.0 JeVois DemoSaliency
+##YUYV 320 150 60.0 YUYV 160 120 60.0 JeVois DemoSaliency
+##YUYV 352 180 120.0 YUYV 176 144 120.0 JeVois DemoSaliency
+##YUYV 352 180 100.0 YUYV 176 144 100.0 JeVois DemoSaliency
+## Michael and Bahar removed the ' *' from the end of the following line. 2/27/2019
+#YUYV 640 300 60.0 YUYV 320 240 60.0 JeVois DemoSaliency
+##YUYV 704 360 30.0 YUYV 352 288 30.0 JeVois DemoSaliency
+##YUYV 1280 600 15.0 YUYV 640 480 15.0 JeVois DemoSaliency
+#
+#####################################################################################################
+#### Production: Saliency and gist
+#####################################################################################################
+#
+## saliency + feature maps + gist
+##GREY 120 25 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## saliency + feature maps
+##GREY 120 15 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## saliency + gist
+##GREY 20 73 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## saliency only
+##GREY 20 15 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## gist only
+##GREY 72 16 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+#####################################################################################################
+#### Demo: Background subtraction
+#####################################################################################################
+#
+##YUYV 640 240 15.0 YUYV 320 240 15.0 JeVois DemoBackgroundSubtract
+#YUYV 320 120 30.0 YUYV 160 120 30.0 JeVois DemoBackgroundSubtract
+#
+#####################################################################################################
+#### Demo: QR-code and barcode detection and decoding
+#####################################################################################################
+#
+##YUYV 640 526 15.0 YUYV 640 480 15.0 JeVois DemoQRcode
+#YUYV 320 286 30.0 YUYV 320 240 30.0 JeVois DemoQRcode
+##NONE 0 0 0 YUYV 640 480 15.0 JeVois DemoQRcode
+##NONE 0 0 0 YUYV 320 240 30.0 JeVois DemoQRcode
+#
+#####################################################################################################
+#### Road following using vanishing point
+#####################################################################################################
+#
+#NONE 0 0 0 YUYV 320 240 30.0 JeVois RoadNavigation
+##NONE 0 0 0 YUYV 176 144 120.0 JeVois RoadNavigation
+#YUYV 320 256 30.0 YUYV 320 240 30.0 JeVois RoadNavigation
+##YUYV 176 160 120.0 YUYV 176 144 120.0 JeVois RoadNavigation
+#
+#####################################################################################################
+#### Demo of ARM-Neon SIMD image processing
+#####################################################################################################
+#
+#YUYV 960 240 30.0 YUYV 320 240 30.0 JeVois DemoNeon
+#
+#####################################################################################################
+#### Dense SIFT using VLfeat library
+#####################################################################################################
+#
+## very slow, min keypoint step is 17
+##YUYV 448 240 5.0 YUYV 320 240 5.0 JeVois DenseSift
+#
+## slow too, min keypoint step is 11
+##YUYV 288 120 5.0 YUYV 160 120 5.0 JeVois DenseSift
+#
+## raw keypoints only, assuming step=11, binsize=8
+#GREY 128 117 5.0 YUYV 160 120 5.0 JeVois DenseSift
+#
+#####################################################################################################
+#### Salient regions
+#####################################################################################################
+#
+#YUYV 64 192 25.0 YUYV 320 240 25.0 JeVois SalientRegions
+##YUYV 100 400 10.0 YUYV 640 480 10.0 JeVois SalientRegions
+#
+#####################################################################################################
+#### Superpixel image segmentation/clustering
+#####################################################################################################
+#
+#GREY 320 240 30.0 YUYV 320 240 30.0 JeVois SuperPixelSeg
+#
+#####################################################################################################
+#### Eye tracking using the openEyes toolkit
+#####################################################################################################
+#
+##GREY 640 480 30.0 YUYV 640 480 30.0 JeVois DemoEyeTracker
+##GREY 320 240 60.0 YUYV 320 240 60.0 JeVois DemoEyeTracker
+#GREY 176 144 120.0 YUYV 176 144 120.0 JeVois DemoEyeTracker
+#
+#####################################################################################################
+#### Demo: ArUco augmented-reality markers detection and decoding
+#####################################################################################################
+#
+#NONE 0 0 0.0 YUYV 320 240 30.0 JeVois DemoArUco
+#YUYV 320 260 30.0 YUYV 320 240 30.0 JeVois DemoArUco
+#YUYV 640 500 20.0 YUYV 640 480 20.0 JeVois DemoArUco
+#
+#####################################################################################################
+#### Edge detection using Canny
+#####################################################################################################
+#
+#GREY 640 480 29.0 YUYV 640 480 29.0 JeVois EdgeDetection
+#GREY 320 240 59.0 YUYV 320 240 59.0 JeVois EdgeDetection
+##GREY 176 144 119.0 YUYV 176 144 119.0 JeVois EdgeDetection
+#
+#####################################################################################################
+#### Edge detection using 4 Canny filters in parallel, with different settings
+#####################################################################################################
+#
+#GREY 320 960 45.0 YUYV 320 240 45.0 JeVois EdgeDetectionX4
+#
+#####################################################################################################
+#### Color-based object tracker
+#####################################################################################################
+#
+#NONE 0 0 0.0 YUYV 320 240 60.0 JeVois ObjectTracker
+#YUYV 320 254 60.0 YUYV 320 240 60.0 JeVois ObjectTracker
+#
+#####################################################################################################
+#### GPU color image processing demo
+#####################################################################################################
+#
+##RGB565 320 240 22.0 YUYV 320 240 22.0 JeVois DemoGPU
+#
+#####################################################################################################
+#### Combo CPU multithreaded saliency/gist + 4x GPU grayscale image processing demo
+#####################################################################################################
+#
+#GREY 160 495 60.0 YUYV 160 120 60.0 JeVois DemoCPUGPU
+#
+#####################################################################################################
+#### Fast optical flow computation
+#####################################################################################################
+#
+#GREY 176 288 100 YUYV 176 144 100 JeVois OpticalFlow
+#
+#####################################################################################################
+#### Object detection using SURF keypoints
+#####################################################################################################
+#
+#YUYV 320 252 30.0 YUYV 320 240 30.0 JeVois ObjectDetect
+#
+#####################################################################################################
+#### Salient region detection and identification using SURF keypoints
+#####################################################################################################
+#
+#YUYV 320 288 30.0 YUYV 320 240 30.0 JeVois SaliencySURF
+#
+#####################################################################################################
+#### CPU + GPU + NEON burn test
+#####################################################################################################
+#
+##YUYV 640 300 10.0 YUYV 320 240 10.0 JeVois BurnTest
+#
+#####################################################################################################
+#### Python tests
+#####################################################################################################
+#
+#YUYV 640 480 15.0 YUYV 640 480 15.0 JeVois PythonTest
+#GREY 640 480 20.0 YUYV 640 480 20.0 JeVois PythonOpenCV
+#YUYV 352 288 30.0 YUYV 352 288 30.0 JeVois PythonSandbox
+#
+#####################################################################################################
+#### Image color filtering
+#####################################################################################################
+#
+#YUYV 640 240 30.0 YUYV 320 240 30.0 JeVois ColorFiltering
+#
+#####################################################################################################
+#### Dice counting tutorial
+#####################################################################################################
+#
+#YUYV 640 480 7.5 YUYV 640 480 7.5 JeVois DiceCounter
+#
+#####################################################################################################
+#### Augmented reality markers with ARtoolkit
+#####################################################################################################
+#
+#NONE 0 0 0 YUYV 320 240 60.0 JeVois DemoARtoolkit
+#NONE 0 0 0 YUYV 640 480 30.0 JeVois DemoARtoolkit
+#NONE 0 0 0 YUYV 1280 1024 15.0 JeVois DemoARtoolkit
+#YUYV 320 258 60.0 YUYV 320 240 60.0 JeVois DemoARtoolkit
+##YUYV 640 498 30.0 YUYV 640 480 30.0 JeVois DemoARtoolkit
+#
+#####################################################################################################
+#### Augmented reality markers with ARtoolkit, ArUco, and QR-Code
+#####################################################################################################
+#
+##YUYV 320 306 50.0 YUYV 320 240 50.0 JeVois MarkersCombo
+#YUYV 640 546 20.0 YUYV 640 480 20.0 JeVois MarkersCombo
+#
+#####################################################################################################
+#### Detect objects in scenes using darknet YOLO deep neural network
+#####################################################################################################
+#
+#YUYV 1280 480 15.0 YUYV 640 480 15.0 JeVois DarknetYOLO
+#
+#####################################################################################################
+#### Detect objects in scenes using darknet deep neural network
+#####################################################################################################
+#
+#YUYV 544 240 15.0 YUYV 320 240 15.0 JeVois DarknetSingle
+##YUYV 448 240 15.0 YUYV 320 240 15.0 JeVois DarknetSingle
+##YUYV 864 480 15.0 YUYV 640 480 15.0 JeVois DarknetSingle
+##YUYV 1088 480 15.0 YUYV 640 480 15.0 JeVois DarknetSingle
+#
+#####################################################################################################
+#### Detect salient objects in scenes using saliency + darknet deep neural network
+#####################################################################################################
+#
+##YUYV 460 240 15.0 YUYV 320 240 15.0 JeVois DarknetSaliency # not for mac (width not multiple of 16)
+##YUYV 560 240 15.0 YUYV 320 240 15.0 JeVois DarknetSaliency
+#YUYV 880 480 10.0 YUYV 640 480 10.0 JeVois DarknetSaliency
+#
+#####################################################################################################
+#### FIRST robotics object detection example in C++
+#####################################################################################################
+#
+##YUYV 352 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
+##YUYV 176 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
+#YUYV 640 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
+#YUYV 320 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
+#NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstVision
+#NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstVision
+#
+#####################################################################################################
+#### FIRST robotics object detection example in Python
+#####################################################################################################
+#
+#YUYV 640 252 60.0 YUYV 320 240 60.0 JeVois FirstPython
+##YUYV 320 252 60.0 YUYV 320 240 60.0 JeVois FirstPython
+#NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstPython
+#NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstPython
+#
+#####################################################################################################
+#### Object detection using SURF and 6D pose estimation
+#####################################################################################################
+#
+#YUYV 320 262 15.0 YUYV 320 240 15.0 JeVois PythonObject6D
+##YUYV 640 502 10.0 YUYV 640 480 10.0 JeVois PythonObject6D
+#NONE 0 0 0.0 YUYV 320 240 15.0 JeVois PythonObject6D
+#
+#####################################################################################################
+#### Edge detection using 4 Canny filters in parallel, with different settings, example python parallel
+#####################################################################################################
+#
+## Disabled by default because Python multiprocessing is very buggy. Note that enabling this mapping may also
+## render your JeVois camera undetectable by OSX hosts.
+#
+##YUYV 1280 240 30.0 YUYV 320 240 30.0 JeVois PythonParallel
+#
+#####################################################################################################
+#### Detect objects in scenes using tensorflow deep neural network
+#####################################################################################################
+#
+##YUYV 560 240 15.0 YUYV 320 240 15.0 JeVois TensorFlowSingle
+#YUYV 464 240 15.0 YUYV 320 240 15.0 JeVois TensorFlowSingle
+##YUYV 880 480 15.0 YUYV 640 480 15.0 JeVois TensorFlowSingle
+##YUYV 1104 480 15.0 YUYV 640 480 15.0 JeVois TensorFlowSingle
+#
+#####################################################################################################
+#### Detect salient objects in scenes using saliency + tensorflow deep neural network
+#####################################################################################################
+#
+##YUYV 448 240 30.0 YUYV 320 240 30.0 JeVois TensorFlowSaliency
+#YUYV 512 240 30.0 YUYV 320 240 30.0 JeVois TensorFlowSaliency
+##YUYV 544 240 30.0 YUYV 320 240 30.0 JeVois TensorFlowSaliency
+#
+#####################################################################################################
+#### Detect objects in scenes using tensorflow deep neural network, easy version
+#####################################################################################################
+#
+#YUYV 320 308 30.0 YUYV 320 240 30.0 JeVois TensorFlowEasy
+##YUYV 640 548 30.0 YUYV 640 480 30.0 JeVois TensorFlowEasy
+#YUYV 1280 1092 7.0 YUYV 1280 1024 7.0 JeVois TensorFlowEasy
+#
+#####################################################################################################
+#### ArUco augmented-reality markers detection and decoding + color blob detection
+#####################################################################################################
+#
+#YUYV 320 266 30.0 YUYV 320 240 30.0 JeVois ArUcoBlob
+#
+#####################################################################################################
+#### Detect and identify objects in scenes using OpenCV DNN detection framework
+#####################################################################################################
+#
+#YUYV 640 502 20.0 YUYV 640 480 20.0 JeVois PyDetectionDNN
+#YUYV 640 498 15.0 YUYV 640 480 15.0 JeVois DetectionDNN
+#
+#####################################################################################################
+#### Simple demo of the ICM-20948 IMU attached to the AR0135 global shutter sensor
+#####################################################################################################
+#
+#YUYV 640 512 40.0 YUYV 640 480 40.0 JeVois DemoIMU
+#
+#####################################################################################################
+#### Object classification using OpenCV DNN in Python
+#####################################################################################################
+#
+#YUYV 320 264 30.0 YUYV 320 240 30.0 JeVois PyClassificationDNN
+#
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+## Modules provided by jevoisextra
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+#
+#YUYV 320 264 15.0 YUYV 320 240 15.0 JeVois FaceDetector
+#
diff --git a/y2019/image_streamer/vision.conf b/y2019/image_streamer/vision.conf
new file mode 100644
index 0000000..df789dd
--- /dev/null
+++ b/y2019/image_streamer/vision.conf
@@ -0,0 +1,5 @@
+[program:vision]
+command=/root/image_streamer --single_camera=true --camera0_exposure=600 --roborio_ip=10.9.71.2
+redirect_stderr=false
+autostart=true
+autorestart=true
diff --git a/y2019/jevois/teensy.cc b/y2019/jevois/teensy.cc
index 9ccc7d4..45a35b0 100644
--- a/y2019/jevois/teensy.cc
+++ b/y2019/jevois/teensy.cc
@@ -932,38 +932,38 @@
   PORTD_PCR6 = PORT_PCR_MUX(1);
 
   // These go to CAM1.
-  // UART0_RX (peripheral) is UART1_RX (schematic).
+  // UART0_RX (peripheral) is UART1_RX (schematic) is UART1_TX_RAW (label TX).
   PORTA_PCR15 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                 0 /* !PS to pull down */;
-  // UART0_TX (peripheral) is UART1_TX (schematic).
+  // UART0_TX (peripheral) is UART1_TX (schematic) is UART1_RX_RAW (label RX).
   PORTA_PCR14 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM0.
-  // UART1_RX (peripheral) is UART0_RX (schematic).
+  // UART1_RX (peripheral) is UART0_RX (schematic) is UART0_TX_RAW (label TX).
   PORTC_PCR3 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                0 /* !PS to pull down */;
-  // UART1_TX (peripheral) is UART0_TX (schematic).
+  // UART1_TX (peripheral) is UART0_TX (schematic) is UART0_RX_RAW (label RX).
   PORTC_PCR4 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM2.
-  // UART2_RX
+  // UART2_RX is UART2_TX_RAW (label TX).
   PORTD_PCR2 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                0 /* !PS to pull down */;
-  // UART2_TX
+  // UART2_TX is UART2_RX_RAW (label RX).
   PORTD_PCR3 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM3.
-  // UART3_RX
+  // UART3_RX is UART3_TX_RAW (label TX).
   PORTB_PCR10 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                 0 /* !PS to pull down */;
-  // UART3_TX
+  // UART3_TX is UART3_RX_RAW (label RX).
   PORTB_PCR11 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM4.
-  // UART4_RX
+  // UART4_RX is UART4_TX_RAW (label TX).
   PORTE_PCR25 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                 0 /* !PS to pull down */;
-  // UART4_TX
+  // UART4_TX is UART4_RX_RAW (label RX).
   PORTE_PCR24 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   Uarts uarts;
diff --git a/y2019/joystick_reader.cc b/y2019/joystick_reader.cc
index 26a8ce1..b08d6b6 100644
--- a/y2019/joystick_reader.cc
+++ b/y2019/joystick_reader.cc
@@ -11,28 +11,38 @@
 #include "aos/input/joystick_input.h"
 #include "aos/logging/logging.h"
 #include "aos/logging/logging.h"
+#include "aos/network/team_number.h"
 #include "aos/util/log_interval.h"
+#include "aos/vision/events/udp.h"
+#include "external/com_google_protobuf/src/google/protobuf/stubs/stringprintf.h"
 #include "frc971/autonomous/auto.q.h"
 #include "frc971/autonomous/base_autonomous_actor.h"
 #include "frc971/control_loops/drivetrain/drivetrain.q.h"
+#include "frc971/control_loops/drivetrain/localizer.q.h"
 
 #include "y2019/control_loops/drivetrain/drivetrain_base.h"
 #include "y2019/control_loops/superstructure/superstructure.q.h"
 #include "y2019/status_light.q.h"
+#include "y2019/vision.pb.h"
 
 using ::y2019::control_loops::superstructure::superstructure_queue;
+using ::frc971::control_loops::drivetrain::localizer_control;
 using ::aos::input::driver_station::ButtonLocation;
 using ::aos::input::driver_station::ControlBit;
 using ::aos::input::driver_station::JoystickAxis;
 using ::aos::input::driver_station::POVLocation;
+using ::aos::events::ProtoTXUdpSocket;
 
 namespace y2019 {
 namespace input {
 namespace joysticks {
 
+using google::protobuf::StringPrintf;
+
 const ButtonLocation kSuctionBall(3, 13);
 const ButtonLocation kSuctionHatch(3, 12);
 const ButtonLocation kDeployStilt(3, 8);
+const ButtonLocation kHalfStilt(3, 6);
 const ButtonLocation kFallOver(3, 9);
 
 struct ElevatorWristPosition {
@@ -63,6 +73,13 @@
 const ButtonLocation kPanelHPIntakeBackward(5, 5);
 
 const ButtonLocation kRelease(2, 4);
+const ButtonLocation kResetLocalizer(4, 3);
+
+const ButtonLocation kAutoPanel(3, 10);
+const ButtonLocation kAutoPanelIntermediate(4, 6);
+
+const ElevatorWristPosition kAutoPanelPos{0.0, -M_PI / 2.0};
+const ElevatorWristPosition kAutoPanelIntermediatePos{0.34, -M_PI / 2.0};
 
 const ElevatorWristPosition kStowPos{0.36, 0.0};
 
@@ -104,10 +121,13 @@
       : ::aos::input::ActionJoystickInput(
             event_loop,
             ::y2019::control_loops::drivetrain::GetDrivetrainConfig()) {
+    const uint16_t team = ::aos::network::GetTeamNumber();
     superstructure_queue.goal.FetchLatest();
     if (superstructure_queue.goal.get()) {
       grab_piece_ = superstructure_queue.goal->suction.grab_piece;
     }
+    video_tx_.reset(new ProtoTXUdpSocket<VisionControl>(
+        StringPrintf("10.%d.%d.179", team / 100, team % 100), 5000));
   }
 
   void HandleTeleop(const ::aos::input::driver_station::Data &data) {
@@ -121,6 +141,16 @@
 
     auto new_superstructure_goal = superstructure_queue.goal.MakeMessage();
 
+    if (data.PosEdge(kResetLocalizer)) {
+      auto localizer_resetter = localizer_control.MakeMessage();
+      localizer_resetter->x = 0.4;
+      localizer_resetter->y = 3.4;
+      localizer_resetter->theta = 0.0;
+      if (!localizer_resetter.Send()) {
+        LOG(ERROR, "Failed to reset localizer.\n");
+      }
+    }
+
     if (data.IsPressed(kSuctionBall)) {
       grab_piece_ = true;
     } else if (data.IsPressed(kSuctionHatch)) {
@@ -158,8 +188,12 @@
       } else {
         new_superstructure_goal->stilts.profile_params.max_acceleration = 2.0;
       }
+    } else if (data.IsPressed(kHalfStilt)) {
+      new_superstructure_goal->stilts.unsafe_goal = 0.345;
+      new_superstructure_goal->stilts.profile_params.max_velocity = 0.65;
+      new_superstructure_goal->stilts.profile_params.max_acceleration = 0.75;
     } else {
-      new_superstructure_goal->stilts.unsafe_goal = 0.01;
+      new_superstructure_goal->stilts.unsafe_goal = 0.005;
       new_superstructure_goal->stilts.profile_params.max_velocity = 0.25;
       new_superstructure_goal->stilts.profile_params.max_acceleration = 2.0;
     }
@@ -170,6 +204,12 @@
       stilts_was_above_ = false;
     }
 
+    if (data.IsPressed(kAutoPanel)) {
+      elevator_wrist_pos_ = kAutoPanelPos;
+    } else if (data.IsPressed(kAutoPanelIntermediate)) {
+      elevator_wrist_pos_ = kAutoPanelIntermediatePos;
+    }
+
     if (switch_ball_) {
       if (superstructure_queue.status->has_piece) {
         new_superstructure_goal->wrist.profile_params.max_acceleration = 20;
@@ -263,6 +303,8 @@
       new_superstructure_goal->suction.gamepiece_mode = 1;
     }
 
+    vision_control_.set_flip_image(elevator_wrist_pos_.wrist < 0);
+
     new_superstructure_goal->suction.grab_piece = grab_piece_;
 
     new_superstructure_goal->elevator.unsafe_goal =
@@ -273,6 +315,8 @@
     if (!new_superstructure_goal.Send()) {
       LOG(ERROR, "Sending superstructure goal failed.\n");
     }
+
+    video_tx_->Send(vision_control_);
   }
 
  private:
@@ -282,6 +326,9 @@
 
   bool switch_ball_ = false;
   bool stilts_was_above_ = false;
+
+  VisionControl vision_control_;
+  ::std::unique_ptr<ProtoTXUdpSocket<VisionControl>> video_tx_;
 };
 
 }  // namespace joysticks
diff --git a/y2019/vision.proto b/y2019/vision.proto
new file mode 100644
index 0000000..6a78805
--- /dev/null
+++ b/y2019/vision.proto
@@ -0,0 +1,13 @@
+syntax = "proto2";
+
+package y2019;
+
+message VisionControl {
+  optional bool high_video = 1;
+  optional bool flip_image = 2;
+}
+
+message VisionStatus {
+  optional uint32 high_frame_count = 1;
+  optional uint32 low_frame_count = 2;
+}