Merge "Set all the LED pins to GPIO mode"
diff --git a/aos/input/drivetrain_input.cc b/aos/input/drivetrain_input.cc
index 67ee9f3..02957a8 100644
--- a/aos/input/drivetrain_input.cc
+++ b/aos/input/drivetrain_input.cc
@@ -23,8 +23,6 @@
 
 void DrivetrainInputReader::HandleDrivetrain(
     const ::aos::input::driver_station::Data &data) {
-  bool is_control_loop_driving = false;
-
   const auto wheel_and_throttle = GetWheelAndThrottle(data);
   const double wheel = wheel_and_throttle.wheel;
   const double wheel_velocity = wheel_and_throttle.wheel_velocity;
@@ -39,7 +37,32 @@
     robot_velocity_ = drivetrain_queue.status->robot_speed;
   }
 
-  if (data.PosEdge(turn1_) || data.PosEdge(turn2_)) {
+  bool is_control_loop_driving = false;
+  bool is_line_following = false;
+
+  if (data.IsPressed(turn1_)) {
+    switch (turn1_use_) {
+      case TurnButtonUse::kControlLoopDriving:
+        is_control_loop_driving = true;
+        break;
+      case TurnButtonUse::kLineFollow:
+        is_line_following = true;
+        break;
+    }
+  }
+
+  if (data.IsPressed(turn2_)) {
+    switch (turn2_use_) {
+      case TurnButtonUse::kControlLoopDriving:
+        is_control_loop_driving = true;
+        break;
+      case TurnButtonUse::kLineFollow:
+        is_line_following = true;
+        break;
+    }
+  }
+
+  if (is_control_loop_driving) {
     if (drivetrain_queue.status.get()) {
       left_goal_ = drivetrain_queue.status->estimated_left_position;
       right_goal_ = drivetrain_queue.status->estimated_right_position;
@@ -49,9 +72,6 @@
       left_goal_ - wheel * wheel_multiplier_ + throttle * 0.3;
   const double current_right_goal =
       right_goal_ + wheel * wheel_multiplier_ + throttle * 0.3;
-  if (data.IsPressed(turn1_) || data.IsPressed(turn2_)) {
-    is_control_loop_driving = true;
-  }
   auto new_drivetrain_goal = drivetrain_queue.goal.MakeMessage();
   new_drivetrain_goal->wheel = wheel;
   new_drivetrain_goal->wheel_velocity = wheel_velocity;
@@ -61,7 +81,8 @@
   new_drivetrain_goal->throttle_torque = throttle_torque;
   new_drivetrain_goal->highgear = high_gear;
   new_drivetrain_goal->quickturn = data.IsPressed(quick_turn_);
-  new_drivetrain_goal->controller_type = is_control_loop_driving ? 1 : 0;
+  new_drivetrain_goal->controller_type =
+      is_line_following ? 3 : (is_control_loop_driving ? 1 : 0);
   new_drivetrain_goal->left_goal = current_left_goal;
   new_drivetrain_goal->right_goal = current_right_goal;
 
@@ -192,15 +213,17 @@
   const ButtonLocation kTurn1(1, 7);
   const ButtonLocation kTurn2(1, 11);
   std::unique_ptr<SteeringWheelDrivetrainInputReader> result(
-      new SteeringWheelDrivetrainInputReader(kSteeringWheel, kDriveThrottle,
-                                             kQuickTurn, kTurn1, kTurn2));
+      new SteeringWheelDrivetrainInputReader(
+          kSteeringWheel, kDriveThrottle, kQuickTurn, kTurn1,
+          TurnButtonUse::kControlLoopDriving, kTurn2,
+          TurnButtonUse::kControlLoopDriving));
   result.get()->set_default_high_gear(default_high_gear);
 
   return result;
 }
 
 std::unique_ptr<PistolDrivetrainInputReader> PistolDrivetrainInputReader::Make(
-    bool default_high_gear) {
+    bool default_high_gear, TopButtonUse top_button_use) {
   // Pistol Grip controller
   const JoystickAxis kTriggerHigh(1, 1), kTriggerLow(1, 4),
       kTriggerVelocityHigh(1, 2), kTriggerVelocityLow(1, 5),
@@ -211,12 +234,23 @@
       kWheelTorqueLow(2, 6);
 
   const ButtonLocation kQuickTurn(1, 3);
-  const ButtonLocation kShiftHigh(1, 1);
-  const ButtonLocation kShiftLow(1, 2);
 
-  // Nop
-  const ButtonLocation kTurn1(1, 9);
-  const ButtonLocation kTurn2(1, 10);
+  const ButtonLocation TopButton(1, 1);
+  const ButtonLocation SecondButton(1, 2);
+  // Non-existant button for nops.
+  const ButtonLocation DummyButton(1, 10);
+
+  // TODO(james): Make a copy assignment operator for ButtonLocation so we don't
+  // have to shoehorn in these ternary operators.
+  const ButtonLocation kTurn1 =
+      (top_button_use == TopButtonUse::kLineFollow) ? TopButton : DummyButton;
+  // Turn2 currently does nothing on the pistol grip, ever.
+  const ButtonLocation kTurn2 = DummyButton;
+  const ButtonLocation kShiftHigh =
+      (top_button_use == TopButtonUse::kShift) ? TopButton : DummyButton;
+  const ButtonLocation kShiftLow =
+      (top_button_use == TopButtonUse::kShift) ? SecondButton : DummyButton;
+
   std::unique_ptr<PistolDrivetrainInputReader> result(
       new PistolDrivetrainInputReader(
           kWheelHigh, kWheelLow, kTriggerVelocityHigh, kTriggerVelocityLow,
@@ -239,7 +273,9 @@
 
   std::unique_ptr<XboxDrivetrainInputReader> result(
       new XboxDrivetrainInputReader(kSteeringWheel, kDriveThrottle, kQuickTurn,
-                                    kTurn1, kTurn2));
+                                    kTurn1, TurnButtonUse::kControlLoopDriving,
+                                    kTurn2,
+                                    TurnButtonUse::kControlLoopDriving));
   return result;
 }
 ::std::unique_ptr<DrivetrainInputReader> DrivetrainInputReader::Make(
@@ -256,8 +292,11 @@
           SteeringWheelDrivetrainInputReader::Make(dt_config.default_high_gear);
       break;
     case InputType::kPistol:
-      drivetrain_input_reader =
-          PistolDrivetrainInputReader::Make(dt_config.default_high_gear);
+      drivetrain_input_reader = PistolDrivetrainInputReader::Make(
+          dt_config.default_high_gear,
+          dt_config.pistol_grip_shift_enables_line_follow
+              ? PistolDrivetrainInputReader::TopButtonUse::kLineFollow
+              : PistolDrivetrainInputReader::TopButtonUse::kShift);
       break;
     case InputType::kXbox:
       drivetrain_input_reader = XboxDrivetrainInputReader::Make();
diff --git a/aos/input/drivetrain_input.h b/aos/input/drivetrain_input.h
index 5274cfa..80046ee 100644
--- a/aos/input/drivetrain_input.h
+++ b/aos/input/drivetrain_input.h
@@ -35,17 +35,28 @@
 // joystick types.
 class DrivetrainInputReader {
  public:
+  // What to use the turn1/2 buttons for.
+  enum class TurnButtonUse {
+    // Use the button to enable control loop driving.
+    kControlLoopDriving,
+    // Use the button to set line following mode.
+    kLineFollow,
+  };
   // Inputs driver station button and joystick locations
   DrivetrainInputReader(driver_station::JoystickAxis wheel,
                         driver_station::JoystickAxis throttle,
                         driver_station::ButtonLocation quick_turn,
                         driver_station::ButtonLocation turn1,
-                        driver_station::ButtonLocation turn2)
+                        TurnButtonUse turn1_use,
+                        driver_station::ButtonLocation turn2,
+                        TurnButtonUse turn2_use)
       : wheel_(wheel),
         throttle_(throttle),
         quick_turn_(quick_turn),
         turn1_(turn1),
-        turn2_(turn2) {}
+        turn1_use_(turn1_use),
+        turn2_(turn2),
+        turn2_use_(turn2_use) {}
 
   virtual ~DrivetrainInputReader() = default;
 
@@ -78,8 +89,12 @@
   const driver_station::JoystickAxis wheel_;
   const driver_station::JoystickAxis throttle_;
   const driver_station::ButtonLocation quick_turn_;
+  // Button for enabling control loop driving.
   const driver_station::ButtonLocation turn1_;
+  const TurnButtonUse turn1_use_;
+  // But for enabling line following.
   const driver_station::ButtonLocation turn2_;
+  const TurnButtonUse turn2_use_;
 
   // Structure containing the (potentially adjusted) steering and throttle
   // values from the joysticks.
@@ -134,9 +149,18 @@
  public:
   using DrivetrainInputReader::DrivetrainInputReader;
 
+  // What to use the top two buttons for on the pistol grip.
+  enum class TopButtonUse {
+    // Normal shifting.
+    kShift,
+    // Line following (currently just uses top button).
+    kLineFollow,
+  };
+
   // Creates a DrivetrainInputReader with the corresponding joystick ports and
   // axis for the (cheap) pistol grip controller.
-  static std::unique_ptr<PistolDrivetrainInputReader> Make(bool default_high_gear);
+  static std::unique_ptr<PistolDrivetrainInputReader> Make(
+      bool default_high_gear, TopButtonUse top_button_use);
 
  private:
   PistolDrivetrainInputReader(
@@ -158,7 +182,8 @@
       driver_station::ButtonLocation turn1,
       driver_station::ButtonLocation turn2)
       : DrivetrainInputReader(wheel_high, throttle_high, quick_turn, turn1,
-                              turn2),
+                              TurnButtonUse::kLineFollow, turn2,
+                              TurnButtonUse::kLineFollow),
         wheel_low_(wheel_low),
         wheel_velocity_high_(wheel_velocity_high),
         wheel_velocity_low_(wheel_velocity_low),
diff --git a/aos/vision/blob/BUILD b/aos/vision/blob/BUILD
index 69a2cc5..81afb93 100644
--- a/aos/vision/blob/BUILD
+++ b/aos/vision/blob/BUILD
@@ -37,9 +37,28 @@
 
 cc_library(
     name = "threshold",
-    hdrs = ["threshold.h"],
+    srcs = [
+        "threshold.cc",
+    ],
+    hdrs = [
+        "threshold.h",
+    ],
     deps = [
         ":range_image",
+        "//aos/logging",
+        "//aos/vision/image:image_types",
+    ],
+)
+
+cc_test(
+    name = "threshold_test",
+    srcs = [
+        "threshold_test.cc",
+    ],
+    deps = [
+        ":range_image",
+        ":threshold",
+        "//aos/testing:googletest",
         "//aos/vision/image:image_types",
     ],
 )
diff --git a/aos/vision/blob/range_image.cc b/aos/vision/blob/range_image.cc
index c01a919..81a8c48 100644
--- a/aos/vision/blob/range_image.cc
+++ b/aos/vision/blob/range_image.cc
@@ -5,18 +5,10 @@
 
 namespace aos {
 namespace vision {
+namespace {
 
-void DrawRangeImage(const RangeImage &rimg, ImagePtr outbuf, PixelRef color) {
-  for (int i = 0; i < (int)rimg.ranges().size(); ++i) {
-    int y = rimg.min_y() + i;
-    for (ImageRange rng : rimg.ranges()[i]) {
-      for (int x = rng.st; x < rng.ed; ++x) {
-        outbuf.get_px(x, y) = color;
-      }
-    }
-  }
-}
-
+// Merge sort of multiple range images into a single range image.
+// They must not overlap.
 RangeImage MergeRangeImage(const BlobList &blobl) {
   if (blobl.size() == 1) return blobl[0];
 
@@ -48,6 +40,8 @@
   }
 }
 
+}  // namespace
+
 std::string ShortDebugPrint(const BlobList &blobl) {
   RangeImage rimg = MergeRangeImage(blobl);
   std::string out;
@@ -88,6 +82,10 @@
   }
 }
 
+void PrintTo(const ImageRange &range, std::ostream *os) {
+  *os << "{" << range.st << ", " << range.ed << "}";
+}
+
 void RangeImage::Flip(int image_width, int image_height) {
   std::reverse(ranges_.begin(), ranges_.end());
   for (std::vector<ImageRange> &range : ranges_) {
@@ -102,6 +100,31 @@
   min_y_ = image_height - static_cast<int>(ranges_.size()) - min_y_;
 }
 
+void PrintTo(const RangeImage &range, std::ostream *os) {
+  *os << "{min_y=" << range.min_y()
+      << ", ranges={";
+  bool first_row = true;
+  for (const auto &row : range) {
+    if (first_row) {
+      first_row = false;
+    } else {
+      *os << ", ";
+    }
+    *os << "{";
+    bool first_value = true;
+    for (const auto &value : row) {
+      if (first_value) {
+        first_value = false;
+      } else {
+        *os << ", ";
+      }
+      *os << "{" << value.st << ", " << value.ed << "}";
+    }
+    *os << "}";
+  }
+  *os << "}}";
+}
+
 int RangeImage::npixels() {
   if (npixelsc_ > 0) {
     return npixelsc_;
diff --git a/aos/vision/blob/range_image.h b/aos/vision/blob/range_image.h
index 3647890..a735442 100644
--- a/aos/vision/blob/range_image.h
+++ b/aos/vision/blob/range_image.h
@@ -21,8 +21,14 @@
   int calc_width() const { return ed - st; }
 
   bool operator<(const ImageRange &o) const { return st < o.st; }
+  bool operator==(const ImageRange &other) const {
+    return st == other.st && ed == other.ed;
+  }
+  bool operator!=(const ImageRange &other) const { return !(*this == other); }
 };
 
+void PrintTo(const ImageRange &range, std::ostream *os);
+
 // Image in pre-thresholded run-length encoded format.
 class RangeImage {
  public:
@@ -31,9 +37,22 @@
   explicit RangeImage(int l) { ranges_.reserve(l); }
   RangeImage() {}
 
+  bool operator==(const RangeImage &other) const {
+    if (min_y_ != other.min_y_) { return false; }
+    if (ranges_ != other.ranges_) { return false; }
+    return true;
+  }
+  bool operator!=(const RangeImage &other) const { return !(*this == other); }
+
   int size() const { return ranges_.size(); }
 
+  // Returns the total number of included pixels.
   int npixels();
+  // Calculates the total number of included pixels.
+  //
+  // TODO(Brian): Present a nicer API than the current duality between this and
+  // npixels(), which is annoying because npixels() has to modify the cached
+  // data so it can't be const.
   int calc_area() const;
 
   void Flip(ImageFormat fmt) { Flip(fmt.w, fmt.h); }
@@ -59,22 +78,19 @@
   // minimum index in y where the blob starts
   int min_y_ = 0;
 
-  // ranges are always sorted in y then x order
+  // Each vector<ImageRange> represents all the matched ranges in a given row.
+  // Each ImageRange within that row represents a run of pixels which matches.
   std::vector<std::vector<ImageRange>> ranges_;
 
   // Cached pixel count.
   int npixelsc_ = -1;
 };
 
+void PrintTo(const RangeImage &range, std::ostream *os);
+
 typedef std::vector<RangeImage> BlobList;
 typedef std::vector<const RangeImage *> BlobLRef;
 
-void DrawRangeImage(const RangeImage &rimg, ImagePtr outbuf, PixelRef color);
-
-// Merge sort of multiple range images into a single range image.
-// They must not overlap.
-RangeImage MergeRangeImage(const BlobList &blobl);
-
 // Debug print range image as ranges.
 std::string ShortDebugPrint(const BlobList &blobl);
 // Debug print range image as ### for the ranges.
diff --git a/aos/vision/blob/threshold.cc b/aos/vision/blob/threshold.cc
new file mode 100644
index 0000000..74809d1
--- /dev/null
+++ b/aos/vision/blob/threshold.cc
@@ -0,0 +1,233 @@
+#include "aos/vision/blob/threshold.h"
+
+#include "aos/logging/logging.h"
+
+namespace aos {
+namespace vision {
+
+// Expands to a unique value for each combination of values for 5 bools.
+#define MASH(v0, v1, v2, v3, v4)                                  \
+  ((uint8_t(v0) << 4) | (uint8_t(v1) << 3) | (uint8_t(v2) << 2) | \
+   (uint8_t(v3) << 1) | (uint8_t(v4)))
+
+// At a high level, the algorithm is the same as the slow thresholding, except
+// it operates in 4-pixel chunks. The handling for each of these chunks is
+// manually flattened (via codegen) into a 32-case switch statement. There are
+// 2^4 cases for each pixel being in or out, along with another set of cases
+// depending on whether the start of the chunk is in a range or not.
+RangeImage FastYuyvYThreshold(ImageFormat fmt, const char *data,
+                              uint8_t value) {
+  CHECK_EQ(0, fmt.w % 4);
+  std::vector<std::vector<ImageRange>> result;
+  result.reserve(fmt.h);
+
+  // Iterate through each row.
+  for (int y = 0; y < fmt.h; ++y) {
+    // The start of the data for the current row.
+    const char *const current_row = fmt.w * y * 2 + data;
+    bool in_range = false;
+    int current_range_start = -1;
+    std::vector<ImageRange> current_row_ranges;
+    // Iterate through each 4-pixel chunk
+    for (int x = 0; x < fmt.w / 4; ++x) {
+      // The per-channel (YUYV) values in the current chunk.
+      uint8_t chunk_channels[8];
+      memcpy(&chunk_channels[0], current_row + x * 4 * 2, 8);
+      const uint8_t pattern =
+          MASH(in_range, chunk_channels[0] > value, chunk_channels[2] > value,
+               chunk_channels[4] > value, chunk_channels[6] > value);
+      switch (pattern) {
+        // clang-format off
+/*
+# Ruby code to generate the below code:
+32.times do |v|
+        puts "case MASH(#{[v[4], v[3], v[2], v[1], v[0]].join(", ")}):"
+        in_range = v[4]
+        current_range_start = "current_range_start"
+        4.times do |i|
+                if v[3 - i] != in_range
+                        if (in_range == 1)
+                                puts "  current_row_ranges.emplace_back(ImageRange(#{current_range_start}, x * 4 + #{i}));"
+                        else
+                                current_range_start = "x * 4 + #{i}"
+                        end
+                        in_range = v[3 - i]
+                end
+        end
+        if (current_range_start != "current_range_start")
+                puts "  current_range_start = #{current_range_start};"
+        end
+        if (in_range != v[4])
+                puts "  in_range = #{["false", "true"][v[0]]};"
+        end
+        puts "  break;"
+end
+*/
+        // clang-format on
+        case MASH(0, 0, 0, 0, 0):
+          break;
+        case MASH(0, 0, 0, 0, 1):
+          current_range_start = x * 4 + 3;
+          in_range = true;
+          break;
+        case MASH(0, 0, 0, 1, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
+          current_range_start = x * 4 + 2;
+          break;
+        case MASH(0, 0, 0, 1, 1):
+          current_range_start = x * 4 + 2;
+          in_range = true;
+          break;
+        case MASH(0, 0, 1, 0, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
+          current_range_start = x * 4 + 1;
+          break;
+        case MASH(0, 0, 1, 0, 1):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
+          current_range_start = x * 4 + 3;
+          in_range = true;
+          break;
+        case MASH(0, 0, 1, 1, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 1, x * 4 + 3));
+          current_range_start = x * 4 + 1;
+          break;
+        case MASH(0, 0, 1, 1, 1):
+          current_range_start = x * 4 + 1;
+          in_range = true;
+          break;
+        case MASH(0, 1, 0, 0, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
+          current_range_start = x * 4 + 0;
+          break;
+        case MASH(0, 1, 0, 0, 1):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
+          current_range_start = x * 4 + 3;
+          in_range = true;
+          break;
+        case MASH(0, 1, 0, 1, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
+          current_range_start = x * 4 + 2;
+          break;
+        case MASH(0, 1, 0, 1, 1):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
+          current_range_start = x * 4 + 2;
+          in_range = true;
+          break;
+        case MASH(0, 1, 1, 0, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 2));
+          current_range_start = x * 4 + 0;
+          break;
+        case MASH(0, 1, 1, 0, 1):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 2));
+          current_range_start = x * 4 + 3;
+          in_range = true;
+          break;
+        case MASH(0, 1, 1, 1, 0):
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 0, x * 4 + 3));
+          current_range_start = x * 4 + 0;
+          break;
+        case MASH(0, 1, 1, 1, 1):
+          current_range_start = x * 4 + 0;
+          in_range = true;
+          break;
+        case MASH(1, 0, 0, 0, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          in_range = false;
+          break;
+        case MASH(1, 0, 0, 0, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_range_start = x * 4 + 3;
+          break;
+        case MASH(1, 0, 0, 1, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
+          current_range_start = x * 4 + 2;
+          in_range = false;
+          break;
+        case MASH(1, 0, 0, 1, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_range_start = x * 4 + 2;
+          break;
+        case MASH(1, 0, 1, 0, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
+          current_range_start = x * 4 + 1;
+          in_range = false;
+          break;
+        case MASH(1, 0, 1, 0, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
+          current_range_start = x * 4 + 3;
+          break;
+        case MASH(1, 0, 1, 1, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 1, x * 4 + 3));
+          current_range_start = x * 4 + 1;
+          in_range = false;
+          break;
+        case MASH(1, 0, 1, 1, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 0));
+          current_range_start = x * 4 + 1;
+          break;
+        case MASH(1, 1, 0, 0, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 1));
+          in_range = false;
+          break;
+        case MASH(1, 1, 0, 0, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 1));
+          current_range_start = x * 4 + 3;
+          break;
+        case MASH(1, 1, 0, 1, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 1));
+          current_row_ranges.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
+          current_range_start = x * 4 + 2;
+          in_range = false;
+          break;
+        case MASH(1, 1, 0, 1, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 1));
+          current_range_start = x * 4 + 2;
+          break;
+        case MASH(1, 1, 1, 0, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 2));
+          in_range = false;
+          break;
+        case MASH(1, 1, 1, 0, 1):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 2));
+          current_range_start = x * 4 + 3;
+          break;
+        case MASH(1, 1, 1, 1, 0):
+          current_row_ranges.emplace_back(
+              ImageRange(current_range_start, x * 4 + 3));
+          in_range = false;
+          break;
+        case MASH(1, 1, 1, 1, 1):
+          break;
+      }
+    }
+    if (in_range) {
+      current_row_ranges.emplace_back(ImageRange(current_range_start, fmt.w));
+    }
+    result.push_back(current_row_ranges);
+  }
+  return RangeImage(0, std::move(result));
+}
+
+#undef MASH
+
+}  // namespace vision
+}  // namespace aos
diff --git a/aos/vision/blob/threshold.h b/aos/vision/blob/threshold.h
index eef5b20..9891722 100644
--- a/aos/vision/blob/threshold.h
+++ b/aos/vision/blob/threshold.h
@@ -1,56 +1,86 @@
-#ifndef _AOS_VIISON_BLOB_THRESHOLD_H_
-#define _AOS_VIISON_BLOB_THRESHOLD_H_
+#ifndef AOS_VISION_BLOB_THRESHOLD_H_
+#define AOS_VISION_BLOB_THRESHOLD_H_
 
 #include "aos/vision/blob/range_image.h"
 #include "aos/vision/image/image_types.h"
 
 namespace aos {
 namespace vision {
+namespace threshold_internal {
 
-// ThresholdFn should be a lambda.
-template <typename ThresholdFn>
-RangeImage DoThreshold(ImageFormat fmt, ThresholdFn &&fn) {
-  std::vector<std::vector<ImageRange>> ranges;
-  ranges.reserve(fmt.h);
+// Performs thresholding in a given region using a function which determines
+// whether a given point is in or out of the region.
+//
+// fn must return a bool when called with two integers (x, y).
+template <typename PointTestFn>
+RangeImage ThresholdPointsWithFunction(ImageFormat fmt, PointTestFn &&fn) {
+  static_assert(
+      std::is_convertible<PointTestFn, std::function<bool(int, int)>>::value,
+      "Invalid threshold function");
+  std::vector<std::vector<ImageRange>> result;
+  result.reserve(fmt.h);
+  // Iterate through each row.
   for (int y = 0; y < fmt.h; ++y) {
-    bool p_score = false;
-    int pstart = -1;
-    std::vector<ImageRange> rngs;
+    // Whether we're currently in a range.
+    bool in_range = false;
+    int current_range_start = -1;
+    std::vector<ImageRange> current_row_ranges;
+    // Iterate through each pixel.
     for (int x = 0; x < fmt.w; ++x) {
-      if (fn(x, y) != p_score) {
-        if (p_score) {
-          rngs.emplace_back(ImageRange(pstart, x));
+      if (fn(x, y) != in_range) {
+        if (in_range) {
+          current_row_ranges.emplace_back(ImageRange(current_range_start, x));
         } else {
-          pstart = x;
+          current_range_start = x;
         }
-        p_score = !p_score;
+        in_range = !in_range;
       }
     }
-    if (p_score) {
-      rngs.emplace_back(ImageRange(pstart, fmt.w));
+    if (in_range) {
+      current_row_ranges.emplace_back(ImageRange(current_range_start, fmt.w));
     }
-    ranges.push_back(rngs);
+    result.push_back(current_row_ranges);
   }
-  return RangeImage(0, std::move(ranges));
+  return RangeImage(0, std::move(result));
 }
 
-// ThresholdFn should be a lambda.
+}  // namespace threshold_internal
+
+// Thresholds an image using a function which determines whether a given pixel
+// value is in or out of the region.
+//
+// fn must return a bool when called with a PixelRef.
 template <typename ThresholdFn>
-RangeImage DoThreshold(const ImagePtr &img, ThresholdFn &&fn) {
-  return DoThreshold(img.fmt(),
-                     [&](int x, int y) { return fn(img.get_px(x, y)); });
+RangeImage ThresholdImageWithFunction(const ImagePtr &img, ThresholdFn &&fn) {
+  static_assert(
+      std::is_convertible<ThresholdFn, std::function<bool(PixelRef)>>::value,
+      "Invalid threshold function");
+  return threshold_internal::ThresholdPointsWithFunction(
+      img.fmt(), [&](int x, int y) { return fn(img.get_px(x, y)); });
 }
 
-// YUYV image types:
-inline RangeImage DoThresholdYUYV(ImageFormat fmt, const char *data,
-                                  uint8_t value) {
-  return DoThreshold(fmt, [&](int x, int y) {
-    uint8_t v = data[y * fmt.w * 2 + x * 2];
-    return v > value;
-  });
+// Thresholds an image in YUYV format, selecting pixels with a Y (luma) greater
+// than value.
+//
+// This is implemented via a simple function that pulls out the Y values and
+// compares them each. It mostly exists for tests to compare against
+// FastYuyvYThreshold, because it's obviously correct.
+inline RangeImage SlowYuyvYThreshold(ImageFormat fmt, const char *data,
+                                     uint8_t value) {
+  return threshold_internal::ThresholdPointsWithFunction(
+      fmt, [&](int x, int y) {
+        uint8_t v = data[x * 2 + y * fmt.w * 2];
+        return v > value;
+      });
 }
 
+// Thresholds an image in YUYV format, selecting pixels with a Y (luma) greater
+// than value. The width must be a multiple of 4.
+//
+// This is implemented via some tricky bit shuffling that goes fast.
+RangeImage FastYuyvYThreshold(ImageFormat fmt, const char *data, uint8_t value);
+
 }  // namespace vision
 }  // namespace aos
 
-#endif  //  _AOS_VIISON_BLOB_THRESHOLD_H_
+#endif  //  AOS_VISION_BLOB_THRESHOLD_H_
diff --git a/aos/vision/blob/threshold_test.cc b/aos/vision/blob/threshold_test.cc
new file mode 100644
index 0000000..108da35
--- /dev/null
+++ b/aos/vision/blob/threshold_test.cc
@@ -0,0 +1,193 @@
+#include "aos/vision/blob/threshold.h"
+
+#include <random>
+#include <vector>
+
+#include "aos/vision/blob/range_image.h"
+#include "aos/vision/image/image_types.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace aos {
+namespace vision {
+namespace testing {
+
+class YuyvYThresholdTest : public ::testing::Test {
+ public:
+  std::vector<char> RandomImage(ImageFormat format) {
+    std::vector<char> result;
+    result.resize(format.w * format.h * 2);
+    std::uniform_int_distribution<char> distribution(
+        std::numeric_limits<char>::min(), std::numeric_limits<char>::max());
+    for (size_t i = 0; i < result.size(); ++i) {
+      result[i] = distribution(generator_);
+    }
+    return result;
+  }
+
+ private:
+  std::minstd_rand generator_;
+};
+
+// Verifies that a simple image is thresholded correctly.
+//
+// Specifically, we want to get this result from the thresholding:
+//  --+-----
+//  +------+
+//  -++++++-
+//  +++-++++
+//  --------
+//  ++++-+++
+//  ++++++++
+//  +-+-+--+
+TEST_F(YuyvYThresholdTest, SimpleImage) {
+  ImageFormat format;
+  format.w = 8;
+  format.h = 8;
+
+  std::vector<std::vector<ImageRange>> expected_ranges;
+  std::vector<char> image;
+  image.resize(8 * 8 * 2);
+  //  --+-----
+  image[0 * 2 + 0 * 16] = 0;
+  image[1 * 2 + 0 * 16] = 0;
+  image[2 * 2 + 0 * 16] = 128;
+  image[3 * 2 + 0 * 16] = 127;
+  image[4 * 2 + 0 * 16] = 0;
+  image[5 * 2 + 0 * 16] = 0;
+  image[6 * 2 + 0 * 16] = 0;
+  image[7 * 2 + 0 * 16] = 0;
+  expected_ranges.push_back({{{2, 3}}});
+  //  +------+
+  image[0 * 2 + 1 * 16] = 128;
+  image[1 * 2 + 1 * 16] = 0;
+  image[2 * 2 + 1 * 16] = 0;
+  image[3 * 2 + 1 * 16] = 10;
+  image[4 * 2 + 1 * 16] = 30;
+  image[5 * 2 + 1 * 16] = 50;
+  image[6 * 2 + 1 * 16] = 70;
+  image[7 * 2 + 1 * 16] = 255;
+  expected_ranges.push_back({{{0, 1}, {7, 8}}});
+  //  -++++++-
+  image[0 * 2 + 2 * 16] = 73;
+  image[1 * 2 + 2 * 16] = 246;
+  image[2 * 2 + 2 * 16] = 247;
+  image[3 * 2 + 2 * 16] = 248;
+  image[4 * 2 + 2 * 16] = 249;
+  image[5 * 2 + 2 * 16] = 250;
+  image[6 * 2 + 2 * 16] = 250;
+  image[7 * 2 + 2 * 16] = 45;
+  expected_ranges.push_back({{{1, 7}}});
+  //  +++-++++
+  image[0 * 2 + 3 * 16] = 128;
+  image[1 * 2 + 3 * 16] = 134;
+  image[2 * 2 + 3 * 16] = 250;
+  image[3 * 2 + 3 * 16] = 0;
+  image[4 * 2 + 3 * 16] = 230;
+  image[5 * 2 + 3 * 16] = 230;
+  image[6 * 2 + 3 * 16] = 230;
+  image[7 * 2 + 3 * 16] = 210;
+  expected_ranges.push_back({{{0, 3}, {4, 8}}});
+  //  --------
+  image[0 * 2 + 4 * 16] = 7;
+  image[1 * 2 + 4 * 16] = 120;
+  image[2 * 2 + 4 * 16] = 127;
+  image[3 * 2 + 4 * 16] = 0;
+  image[4 * 2 + 4 * 16] = 50;
+  image[5 * 2 + 4 * 16] = 80;
+  image[6 * 2 + 4 * 16] = 110;
+  image[7 * 2 + 4 * 16] = 25;
+  expected_ranges.push_back({{}});
+  //  ++++-+++
+  image[0 * 2 + 5 * 16] = 140;
+  image[1 * 2 + 5 * 16] = 140;
+  image[2 * 2 + 5 * 16] = 140;
+  image[3 * 2 + 5 * 16] = 140;
+  image[4 * 2 + 5 * 16] = 0;
+  image[5 * 2 + 5 * 16] = 140;
+  image[6 * 2 + 5 * 16] = 140;
+  image[7 * 2 + 5 * 16] = 140;
+  expected_ranges.push_back({{{0, 4}, {5, 8}}});
+  //  ++++++++
+  image[0 * 2 + 6 * 16] = 128;
+  image[1 * 2 + 6 * 16] = 128;
+  image[2 * 2 + 6 * 16] = 128;
+  image[3 * 2 + 6 * 16] = 128;
+  image[4 * 2 + 6 * 16] = 128;
+  image[5 * 2 + 6 * 16] = 128;
+  image[6 * 2 + 6 * 16] = 128;
+  image[7 * 2 + 6 * 16] = 128;
+  expected_ranges.push_back({{{0, 8}}});
+  //  +-+-+--+
+  image[0 * 2 + 7 * 16] = 200;
+  image[1 * 2 + 7 * 16] = 0;
+  image[2 * 2 + 7 * 16] = 200;
+  image[3 * 2 + 7 * 16] = 0;
+  image[4 * 2 + 7 * 16] = 200;
+  image[5 * 2 + 7 * 16] = 0;
+  image[6 * 2 + 7 * 16] = 0;
+  image[7 * 2 + 7 * 16] = 200;
+  expected_ranges.push_back({{{0, 1}, {2, 3}, {4, 5}, {7, 8}}});
+  const RangeImage expected_result(0, std::move(expected_ranges));
+
+  const auto slow_result = SlowYuyvYThreshold(format, image.data(), 127);
+  ASSERT_EQ(expected_result, slow_result);
+  const auto fast_result = FastYuyvYThreshold(format, image.data(), 127);
+  ASSERT_EQ(expected_result, fast_result);
+}
+
+// Verifies that a couple of completely random images match.
+TEST_F(YuyvYThresholdTest, Random) {
+  for (int i = 0; i < 10; ++i) {
+    ImageFormat small_format;
+    small_format.w = 16;
+    small_format.h = 16;
+    const auto small_image = RandomImage(small_format);
+    const auto slow_result =
+        SlowYuyvYThreshold(small_format, small_image.data(), 127);
+    const auto fast_result =
+        FastYuyvYThreshold(small_format, small_image.data(), 127);
+    ASSERT_EQ(slow_result, fast_result);
+  }
+  for (int i = 0; i < 10; ++i) {
+    ImageFormat large_format;
+    large_format.w = 1024;
+    large_format.h = 512;
+    const auto large_image = RandomImage(large_format);
+    const auto slow_result =
+        SlowYuyvYThreshold(large_format, large_image.data(), 127);
+    const auto fast_result =
+        FastYuyvYThreshold(large_format, large_image.data(), 127);
+    ASSERT_EQ(slow_result, fast_result);
+  }
+}
+
+// Verifies that changing the U and V values doesn't affect the result.
+TEST_F(YuyvYThresholdTest, UVIgnored) {
+  ImageFormat format;
+  format.w = 32;
+  format.h = 20;
+  const auto baseline_image = RandomImage(format);
+  const auto baseline_result =
+      SlowYuyvYThreshold(format, baseline_image.data(), 127);
+  for (int i = 0; i < 5; ++i) {
+    auto tweaked_image = RandomImage(format);
+    for (int y = 0; y < format.h; ++y) {
+      for (int x = 0; x < format.w; ++x) {
+        tweaked_image[x * 2 + y * format.w * 2] =
+            baseline_image[x * 2 + y * format.w * 2];
+      }
+    }
+
+    const auto slow_result =
+        SlowYuyvYThreshold(format, tweaked_image.data(), 127);
+    ASSERT_EQ(baseline_result, slow_result);
+    const auto fast_result =
+        FastYuyvYThreshold(format, tweaked_image.data(), 127);
+    ASSERT_EQ(baseline_result, fast_result);
+  }
+}
+
+}  // namespace testing
+}  // namespace vision
+}  // namespace aos
diff --git a/aos/vision/debug/overlay.h b/aos/vision/debug/overlay.h
index 8eb8a23..c668e17 100644
--- a/aos/vision/debug/overlay.h
+++ b/aos/vision/debug/overlay.h
@@ -129,6 +129,11 @@
     lines_.emplace_back(std::pair<Segment<2>, PixelRef>(seg, newColor));
   }
 
+  void DrawCross(::Eigen::Vector2f center, int width,
+                 aos::vision::PixelRef color) {
+    DrawCross(aos::vision::Vector<2>(center.x(), center.y()), width, color);
+  }
+
   void DrawCross(aos::vision::Vector<2> center, int width,
                  aos::vision::PixelRef color) {
     using namespace aos::vision;
diff --git a/aos/vision/math/vector.h b/aos/vision/math/vector.h
index 642c63f..5e7520f 100644
--- a/aos/vision/math/vector.h
+++ b/aos/vision/math/vector.h
@@ -91,6 +91,7 @@
   double Mag() const { return data_.norm(); }
 
   // Get underlying data structure
+  // TODO(austin): Everyone uses column not row vectors.
   ::Eigen::Matrix<double, 1, size> GetData() const { return data_; }
 
   // Set underlying data structure
diff --git a/aos/vision/tools/jpeg_vision_test.cc b/aos/vision/tools/jpeg_vision_test.cc
index 072b57b..806fd80 100644
--- a/aos/vision/tools/jpeg_vision_test.cc
+++ b/aos/vision/tools/jpeg_vision_test.cc
@@ -74,7 +74,7 @@
     prev_data_ = data.to_string();
 
     // Threshold the image with the given lambda.
-    RangeImage rimg = DoThreshold(img_ptr, [](PixelRef &px) {
+    RangeImage rimg = ThresholdImageWithFunction(img_ptr, [](PixelRef &px) {
       if (px.g > 88) {
         uint8_t min = std::min(px.b, px.r);
         uint8_t max = std::max(px.b, px.r);
diff --git a/frc971/control_loops/drivetrain/BUILD b/frc971/control_loops/drivetrain/BUILD
index a4d2ca6..d3ed728 100644
--- a/frc971/control_loops/drivetrain/BUILD
+++ b/frc971/control_loops/drivetrain/BUILD
@@ -46,6 +46,7 @@
     deps = [
         ":drivetrain_config",
         "//aos/containers:priority_queue",
+        "//aos/util:math",
         "//frc971/control_loops:c2d",
         "//frc971/control_loops:runge_kutta",
         "//third_party/eigen",
diff --git a/frc971/control_loops/drivetrain/drivetrain.q b/frc971/control_loops/drivetrain/drivetrain.q
index 66e9c32..74be414 100644
--- a/frc971/control_loops/drivetrain/drivetrain.q
+++ b/frc971/control_loops/drivetrain/drivetrain.q
@@ -57,6 +57,18 @@
   float right_velocity;
 };
 
+// For logging state of the line follower.
+struct LineFollowLogging {
+  // Whether we are currently freezing target choice.
+  bool frozen;
+  // Whether we currently have a target.
+  bool have_target;
+  // Absolute position of the current goal.
+  float x;
+  float y;
+  float theta;
+};
+
 queue_group DrivetrainQueue {
   implements aos.control_loops.ControlLoop;
 
@@ -184,6 +196,7 @@
     GearLogging gear_logging;
     CIMLogging cim_logging;
     TrajectoryLogging trajectory_logging;
+    LineFollowLogging line_follow_logging;
   };
 
   queue Goal goal;
diff --git a/frc971/control_loops/drivetrain/drivetrain_config.h b/frc971/control_loops/drivetrain/drivetrain_config.h
index 0f57bf9..53c2315 100644
--- a/frc971/control_loops/drivetrain/drivetrain_config.h
+++ b/frc971/control_loops/drivetrain/drivetrain_config.h
@@ -92,6 +92,9 @@
 
   Scalar wheel_multiplier;
 
+  // Whether the shift button on the pistol grip enables line following mode.
+  bool pistol_grip_shift_enables_line_follow = false;
+
   // Converts the robot state to a linear distance position, velocity.
   static Eigen::Matrix<Scalar, 2, 1> LeftRightToLinear(
       const Eigen::Matrix<Scalar, 7, 1> &left_right) {
diff --git a/frc971/control_loops/drivetrain/drivetrain_lib_test.cc b/frc971/control_loops/drivetrain/drivetrain_lib_test.cc
index 5216916..b00ab3f 100644
--- a/frc971/control_loops/drivetrain/drivetrain_lib_test.cc
+++ b/frc971/control_loops/drivetrain/drivetrain_lib_test.cc
@@ -732,6 +732,15 @@
   goal.Send();
 
   RunForTime(chrono::seconds(5));
+
+  my_drivetrain_queue_.status.FetchLatest();
+  EXPECT_TRUE(my_drivetrain_queue_.status->line_follow_logging.frozen);
+  EXPECT_TRUE(my_drivetrain_queue_.status->line_follow_logging.have_target);
+  EXPECT_EQ(1.0, my_drivetrain_queue_.status->line_follow_logging.x);
+  EXPECT_EQ(1.0, my_drivetrain_queue_.status->line_follow_logging.y);
+  EXPECT_FLOAT_EQ(M_PI_4,
+                  my_drivetrain_queue_.status->line_follow_logging.theta);
+
   // Should have run off the end of the target, running along the y=x line.
   EXPECT_LT(1.0, drivetrain_motor_plant_.GetPosition().x());
   EXPECT_NEAR(drivetrain_motor_plant_.GetPosition().x(),
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf.h b/frc971/control_loops/drivetrain/hybrid_ekf.h
index 82f409c..119386a 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf.h
+++ b/frc971/control_loops/drivetrain/hybrid_ekf.h
@@ -4,6 +4,7 @@
 #include <chrono>
 
 #include "aos/containers/priority_queue.h"
+#include "aos/util/math.h"
 #include "frc971/control_loops/c2d.h"
 #include "frc971/control_loops/runge_kutta.h"
 #include "Eigen/Dense"
@@ -48,11 +49,8 @@
     kLeftVelocity = 4,
     kRightEncoder = 5,
     kRightVelocity = 6,
-    kLeftVoltageError = 7,
-    kRightVoltageError = 8 ,
-    kAngularError = 9,
   };
-  static constexpr int kNStates = 10;
+  static constexpr int kNStates = 7;
   static constexpr int kNInputs = 2;
   // Number of previous samples to save.
   static constexpr int kSaveSamples = 50;
@@ -70,19 +68,11 @@
   // variable-size measurement updates.
   typedef Eigen::Matrix<Scalar, kNOutputs, 1> Output;
   typedef Eigen::Matrix<Scalar, kNStates, kNStates> StateSquare;
-  // State is [x_position, y_position, theta, Kalman States], where
-  // Kalman States are the states from the standard drivetrain Kalman Filter,
-  // which is: [left encoder, left ground vel, right encoder, right ground vel,
-  // left voltage error, right voltage error, angular_error], where:
-  // left/right encoder should correspond directly to encoder readings
-  // left/right velocities are the velocity of the left/right sides over the
+  // State is [x_position, y_position, theta, left encoder, left ground vel,
+  // right encoder, right ground vel]. left/right encoder should correspond
+  // directly to encoder readings left/right velocities are the velocity of the
+  // left/right sides over the
   //   ground (i.e., corrected for angular_error).
-  // voltage errors are the difference between commanded and effective voltage,
-  //   used to estimate consistent modelling errors (e.g., friction).
-  // angular error is the difference between the angular velocity as estimated
-  //   by the encoders vs. estimated by the gyro, such as might be caused by
-  //   wheels on one side of the drivetrain being too small or one side's
-  //   wheels slipping more than the other.
   typedef Eigen::Matrix<Scalar, kNStates, 1> State;
 
   // Constructs a HybridEkf for a particular drivetrain.
@@ -406,9 +396,7 @@
 
   // Encoder derivatives
   A_continuous_(kLeftEncoder, kLeftVelocity) = 1.0;
-  A_continuous_(kLeftEncoder, kAngularError) = 1.0;
   A_continuous_(kRightEncoder, kRightVelocity) = 1.0;
-  A_continuous_(kRightEncoder, kAngularError) = -1.0;
 
   // Pull velocity derivatives from velocity matrices.
   // Note that this looks really awkward (doesn't use
@@ -425,21 +413,22 @@
   B_continuous_.setZero();
   B_continuous_.row(kLeftVelocity) = vel_coefs.B_continuous.row(0);
   B_continuous_.row(kRightVelocity) = vel_coefs.B_continuous.row(1);
-  A_continuous_.template block<kNStates, kNInputs>(0, 7) = B_continuous_;
 
   Q_continuous_.setZero();
   // TODO(james): Improve estimates of process noise--e.g., X/Y noise can
   // probably be reduced when we are stopped because you rarely jump randomly.
   // Or maybe it's more appropriate to scale wheelspeed noise with wheelspeed,
   // since the wheels aren't likely to slip much stopped.
-  Q_continuous_(kX, kX) = 0.005;
-  Q_continuous_(kY, kY) = 0.005;
-  Q_continuous_(kTheta, kTheta) = 0.001;
-  Q_continuous_.template block<7, 7>(3, 3) =
-      dt_config_.make_kf_drivetrain_loop().observer().coefficients().Q;
+  Q_continuous_(kX, kX) = 0.01;
+  Q_continuous_(kY, kY) = 0.01;
+  Q_continuous_(kTheta, kTheta) = 0.0002;
+  Q_continuous_(kLeftEncoder, kLeftEncoder) = ::std::pow(0.03, 2.0);
+  Q_continuous_(kRightEncoder, kRightEncoder) = ::std::pow(0.03, 2.0);
+  Q_continuous_(kLeftVelocity, kLeftVelocity) = ::std::pow(0.1, 2.0);
+  Q_continuous_(kRightVelocity, kRightVelocity) = ::std::pow(0.1, 2.0);
 
   P_.setZero();
-  P_.diagonal() << 0.1, 0.1, 0.01, 0.02, 0.01, 0.02, 0.01, 1, 1, 0.03;
+  P_.diagonal() << 0.1, 0.1, 0.01, 0.02, 0.01, 0.02, 0.01;
 
   H_encoders_and_gyro_.setZero();
   // Encoders are stored directly in the state matrix, so are a minor
diff --git a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
index 27119b1..1702ec4 100644
--- a/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
+++ b/frc971/control_loops/drivetrain/hybrid_ekf_test.cc
@@ -52,22 +52,16 @@
     EXPECT_EQ(Xdot_ekf(StateIdx::kX, 0), ctheta * (left_vel + right_vel) / 2.0);
     EXPECT_EQ(Xdot_ekf(StateIdx::kY, 0), stheta * (left_vel + right_vel) / 2.0);
     EXPECT_EQ(Xdot_ekf(StateIdx::kTheta, 0), (right_vel - left_vel) / diameter);
-    EXPECT_EQ(Xdot_ekf(StateIdx::kLeftEncoder, 0),
-              left_vel + X(StateIdx::kAngularError, 0));
-    EXPECT_EQ(Xdot_ekf(StateIdx::kRightEncoder, 0),
-              right_vel - X(StateIdx::kAngularError, 0));
+    EXPECT_EQ(Xdot_ekf(StateIdx::kLeftEncoder, 0), left_vel);
+    EXPECT_EQ(Xdot_ekf(StateIdx::kRightEncoder, 0), right_vel);
 
     Eigen::Matrix<double, 2, 1> vel_x(X(StateIdx::kLeftVelocity, 0),
                                       X(StateIdx::kRightVelocity, 0));
     Eigen::Matrix<double, 2, 1> expected_vel_X =
         velocity_plant_coefs_.A_continuous * vel_x +
-        velocity_plant_coefs_.B_continuous *
-            (U + X.middleRows<2>(StateIdx::kLeftVoltageError));
+        velocity_plant_coefs_.B_continuous * U;
     EXPECT_EQ(Xdot_ekf(StateIdx::kLeftVelocity, 0), expected_vel_X(0, 0));
     EXPECT_EQ(Xdot_ekf(StateIdx::kRightVelocity, 0), expected_vel_X(1, 0));
-
-    // Dynamics don't expect error terms to change:
-    EXPECT_EQ(0.0, Xdot_ekf.bottomRows<3>().squaredNorm());
   }
   State DiffEq(const State &X, const Input &U) {
     return ekf_.DiffEq(X, U);
@@ -93,18 +87,14 @@
   CheckDiffEq(State::Zero(), Input::Zero());
   CheckDiffEq(State::Zero(), {-5.0, 5.0});
   CheckDiffEq(State::Zero(), {12.0, -3.0});
-  CheckDiffEq((State() << 100.0, 200.0, M_PI, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, M_PI, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
-  CheckDiffEq((State() << 100.0, 200.0, 2.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, 2.0, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
-  CheckDiffEq((State() << 100.0, 200.0, -2.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, -2.0, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
   // And check that a theta outisde of [-M_PI, M_PI] works.
-  CheckDiffEq((State() << 100.0, 200.0, 200.0, 1.234, 0.5, 1.2, 0.6, 3.0, -4.0,
-               0.3).finished(),
+  CheckDiffEq((State() << 100.0, 200.0, 200.0, 1.234, 0.5, 1.2, 0.6).finished(),
               {5.0, 6.0});
 }
 
@@ -112,7 +102,7 @@
 // with zero change in time, the state should approach the estimation.
 TEST_F(HybridEkfTest, ZeroTimeCorrect) {
   HybridEkf<>::Output Z(0.5, 0.5, 1);
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setIdentity();
   auto h = [H](const State &X, const Input &) { return H * X; };
   auto dhdx = [H](const State &) { return H; };
@@ -140,7 +130,7 @@
   HybridEkf<>::Output Z(0, 0, 0);
   // Use true_X to track what we think the true robot state is.
   State true_X = ekf_.X_hat();
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setZero();
   auto h = [H](const State &X, const Input &) { return H * X; };
   auto dhdx = [H](const State &) { return H; };
@@ -171,9 +161,6 @@
   EXPECT_NEAR(ekf_.X_hat(StateIdx::kLeftVelocity) * 0.8,
               ekf_.X_hat(StateIdx::kRightVelocity),
               ekf_.X_hat(StateIdx::kLeftVelocity) * 0.1);
-  EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kLeftVoltageError));
-  EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kRightVoltageError));
-  EXPECT_EQ(0.0, ekf_.X_hat(StateIdx::kAngularError));
   const double ending_p_norm = ekf_.P().norm();
   // Due to lack of corrections, noise should've increased.
   EXPECT_GT(ending_p_norm, starting_p_norm * 1.10);
@@ -193,7 +180,7 @@
 TEST_P(HybridEkfOldCorrectionsTest, CreateOldCorrection) {
   HybridEkf<>::Output Z;
   Z.setZero();
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setZero();
   auto h_zero = [H](const State &X, const Input &) { return H * X; };
   auto dhdx_zero = [H](const State &) { return H; };
@@ -231,7 +218,7 @@
   expected_X_hat(0, 0) = Z(0, 0);
   expected_X_hat(1, 0) = Z(1, 0) + modeled_X_hat(0, 0);
   expected_X_hat(2, 0) = Z(2, 0);
-  EXPECT_LT((expected_X_hat.topRows<7>() - ekf_.X_hat().topRows<7>()).norm(),
+  EXPECT_LT((expected_X_hat - ekf_.X_hat()).norm(),
            1e-3)
       << "X_hat: " << ekf_.X_hat() << " expected " << expected_X_hat;
   // The covariance after the predictions but before the corrections should
@@ -249,7 +236,7 @@
 TEST_F(HybridEkfTest, DiscardTooOldCorrection) {
   HybridEkf<>::Output Z;
   Z.setZero();
-  Eigen::Matrix<double, 3, 10> H;
+  Eigen::Matrix<double, 3, 7> H;
   H.setZero();
   auto h_zero = [H](const State &X, const Input &) { return H * X; };
   auto dhdx_zero = [H](const State &) { return H; };
@@ -304,11 +291,11 @@
 }
 
 // Tests that encoder updates cause everything to converge properly in the
-// presence of voltage error.
+// presence of an initial velocity error.
 TEST_F(HybridEkfTest, PerfectEncoderUpdatesWithVoltageError) {
   State true_X = ekf_.X_hat();
-  true_X(StateIdx::kLeftVoltageError, 0) = 2.0;
-  true_X(StateIdx::kRightVoltageError, 0) = 2.0;
+  true_X(StateIdx::kLeftVelocity, 0) = 0.2;
+  true_X(StateIdx::kRightVelocity, 0) = 0.2;
   Input U(5.0, 5.0);
   for (int ii = 0; ii < 1000; ++ii) {
     true_X = Update(true_X, U);
@@ -328,11 +315,11 @@
 
 // Tests encoder/gyro updates when we have some errors in our estimate.
 TEST_F(HybridEkfTest, PerfectEncoderUpdateConverges) {
-  // In order to simulate modelling errors, we add an angular_error and start
-  // the encoder values slightly off.
+  // In order to simulate modelling errors, we start the encoder values slightly
+  // off.
   State true_X = ekf_.X_hat();
-  true_X(StateIdx::kAngularError, 0) = 1.0;
   true_X(StateIdx::kLeftEncoder, 0) += 2.0;
+  true_X(StateIdx::kLeftVelocity, 0) = 0.1;
   true_X(StateIdx::kRightEncoder, 0) -= 2.0;
   // After enough time, everything should converge to near-perfect (if there
   // were any errors in the original absolute state (x/y/theta) state, then we
@@ -350,7 +337,7 @@
                                    dt_config_.robot_radius / 2.0,
                                U, t0_ + (ii + 1) * dt_config_.dt);
   }
-  EXPECT_NEAR((true_X - ekf_.X_hat()).norm(), 0.0, 1e-5)
+  EXPECT_NEAR((true_X - ekf_.X_hat()).norm(), 0.0, 1e-4)
       << "Expected non-x/y estimates to converge to correct. "
          "Estimated X_hat:\n"
       << ekf_.X_hat() << "\ntrue X:\n"
@@ -359,11 +346,11 @@
 
 // Tests encoder/gyro updates in a realistic-ish scenario with noise:
 TEST_F(HybridEkfTest, RealisticEncoderUpdateConverges) {
-  // In order to simulate modelling errors, we add an angular_error and start
-  // the encoder values slightly off.
+  // In order to simulate modelling errors, we start the encoder values slightly
+  // off.
   State true_X = ekf_.X_hat();
-  true_X(StateIdx::kAngularError, 0) = 1.0;
   true_X(StateIdx::kLeftEncoder, 0) += 2.0;
+  true_X(StateIdx::kLeftVelocity, 0) = 0.1;
   true_X(StateIdx::kRightEncoder, 0) -= 2.0;
   Input U(10.0, 5.0);
   for (int ii = 0; ii < 100; ++ii) {
@@ -377,7 +364,7 @@
         U, t0_ + (ii + 1) * dt_config_.dt);
   }
   EXPECT_NEAR(
-      (true_X.bottomRows<9>() - ekf_.X_hat().bottomRows<9>()).squaredNorm(),
+      (true_X.bottomRows<6>() - ekf_.X_hat().bottomRows<6>()).squaredNorm(),
       0.0, 2e-3)
       << "Expected non-x/y estimates to converge to correct. "
          "Estimated X_hat:\n" << ekf_.X_hat() << "\ntrue X:\n" << true_X;
@@ -411,7 +398,7 @@
   // Check that we die when only one of h and dhdx are provided:
   EXPECT_DEATH(ekf_.Correct({1, 2, 3}, &U, {}, {},
                             [](const State &) {
-                              return Eigen::Matrix<double, 3, 10>::Zero();
+                              return Eigen::Matrix<double, 3, 7>::Zero();
                             },
                             {}, t0_ + ::std::chrono::seconds(1)),
                "make_h");
diff --git a/frc971/control_loops/drivetrain/line_follow_drivetrain.cc b/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
index c4ca5ef..b5417af 100644
--- a/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
+++ b/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
@@ -182,7 +182,8 @@
   // Because we assume the target selector may have some internal state (e.g.,
   // not confirming a target until some time as passed), we should call
   // UpdateSelection every time.
-  bool new_target = target_selector_->UpdateSelection(abs_state);
+  bool new_target =
+      target_selector_->UpdateSelection(abs_state, goal_velocity_);
   if (freeze_target_) {
     // When freezing the target, only make changes if we didn't have a good
     // target before.
@@ -233,6 +234,15 @@
   U_ *= (maxU > kMaxVoltage) ? kMaxVoltage / maxU : 1.0;
 }
 
+void LineFollowDrivetrain::PopulateStatus(
+    ::frc971::control_loops::DrivetrainQueue::Status *status) const {
+  status->line_follow_logging.frozen = freeze_target_;
+  status->line_follow_logging.have_target = have_target_;
+  status->line_follow_logging.x = target_pose_.abs_pos().x();
+  status->line_follow_logging.y = target_pose_.abs_pos().y();
+  status->line_follow_logging.theta = target_pose_.abs_theta();
+}
+
 }  // namespace drivetrain
 }  // namespace control_loops
 }  // namespace frc971
diff --git a/frc971/control_loops/drivetrain/line_follow_drivetrain.h b/frc971/control_loops/drivetrain/line_follow_drivetrain.h
index 83fba20..de9ff5a 100644
--- a/frc971/control_loops/drivetrain/line_follow_drivetrain.h
+++ b/frc971/control_loops/drivetrain/line_follow_drivetrain.h
@@ -41,9 +41,8 @@
   // over.
   bool SetOutput(
       ::frc971::control_loops::DrivetrainQueue::Output *output);
-  // TODO(james): Populate.
   void PopulateStatus(
-      ::frc971::control_loops::DrivetrainQueue::Status * /*status*/) const {}
+      ::frc971::control_loops::DrivetrainQueue::Status *status) const;
 
  private:
   // Nominal max voltage.
diff --git a/frc971/control_loops/drivetrain/localizer.h b/frc971/control_loops/drivetrain/localizer.h
index af07089..f0685dd 100644
--- a/frc971/control_loops/drivetrain/localizer.h
+++ b/frc971/control_loops/drivetrain/localizer.h
@@ -16,9 +16,12 @@
   // Take the state as [x, y, theta, left_vel, right_vel]
   // If unable to determine what target to go for, returns false. If a viable
   // target is selected, then returns true and sets target_pose.
+  // command_speed is the goal speed of the current drivetrain, generally
+  // generated from the throttle and meant to signify driver intent.
   // TODO(james): Some implementations may also want a drivetrain goal so that
   // driver intent can be divined more directly.
-  virtual bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state) = 0;
+  virtual bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
+                               double command_speed) = 0;
   // Gets the current target pose. Should only be called if UpdateSelection has
   // returned true.
   virtual TypedPose<double> TargetPose() const = 0;
@@ -59,7 +62,7 @@
 // manually set the target selector state.
 class TrivialTargetSelector : public TargetSelectorInterface {
  public:
-  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &) override {
+  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &, double) override {
     return has_target_;
   }
   TypedPose<double> TargetPose() const override { return pose_; }
@@ -95,9 +98,12 @@
   }
 
   void ResetPosition(double x, double y, double theta) override {
+    const double left_encoder = ekf_.X_hat(StateIdx::kLeftEncoder);
+    const double right_encoder = ekf_.X_hat(StateIdx::kRightEncoder);
     ekf_.ResetInitialState(
         ::aos::monotonic_clock::now(),
-        (Ekf::State() << x, y, theta, 0, 0, 0, 0, 0, 0, 0).finished(),
+        (Ekf::State() << x, y, theta, left_encoder, 0, right_encoder, 0)
+            .finished(),
         ekf_.P());
   };
 
@@ -110,12 +116,8 @@
   double right_velocity() const override {
     return ekf_.X_hat(StateIdx::kRightVelocity);
   }
-  double left_voltage_error() const override {
-    return ekf_.X_hat(StateIdx::kLeftVoltageError);
-  }
-  double right_voltage_error() const override {
-    return ekf_.X_hat(StateIdx::kRightVoltageError);
-  }
+  double left_voltage_error() const override { return 0.0; }
+  double right_voltage_error() const override { return 0.0; }
 
   TrivialTargetSelector *target_selector() override {
     return &target_selector_;
diff --git a/third_party/cimg/BUILD b/third_party/cimg/BUILD
new file mode 100644
index 0000000..0dfc79b
--- /dev/null
+++ b/third_party/cimg/BUILD
@@ -0,0 +1,13 @@
+licenses(["notice"])
+
+cc_library(
+    name = "CImg",
+    hdrs = glob([
+        "CImg.h",
+        "plugins/*.h",
+    ]),
+    visibility = ["//visibility:public"],
+    deps = [
+        "//third_party/libjpeg",
+    ],
+)
diff --git a/third_party/cimg/CImg.h b/third_party/cimg/CImg.h
index 20f1fc6..fc52e13 100644
--- a/third_party/cimg/CImg.h
+++ b/third_party/cimg/CImg.h
@@ -436,7 +436,7 @@
 // (see methods 'CImg<T>::{load,save}_jpeg()').
 #ifdef cimg_use_jpeg
 extern "C" {
-#include "jpeglib.h"
+#include "third_party/libjpeg/jpeglib.h"
 #include "setjmp.h"
 }
 #endif
diff --git a/y2016/vision/target_sender.cc b/y2016/vision/target_sender.cc
index d9208ec..3e29085 100644
--- a/y2016/vision/target_sender.cc
+++ b/y2016/vision/target_sender.cc
@@ -98,8 +98,8 @@
     DecodeJpeg(data, &image_);
     auto fmt = image_.fmt();
 
-    RangeImage rimg =
-        DoThreshold(image_.get(), [](PixelRef &px) { return (px.g > 88); });
+    RangeImage rimg = ThresholdImageWithFunction(
+        image_.get(), [](PixelRef px) { return (px.g > 88); });
 
     // flip the right image as this camera is mount backward
     if (camera_index_ == 0) {
diff --git a/y2017/vision/target_finder.cc b/y2017/vision/target_finder.cc
index a6b049c..91e801e 100644
--- a/y2017/vision/target_finder.cc
+++ b/y2017/vision/target_finder.cc
@@ -103,17 +103,18 @@
 }
 
 aos::vision::RangeImage TargetFinder::Threshold(aos::vision::ImagePtr image) {
-  return aos::vision::DoThreshold(image, [&](aos::vision::PixelRef &px) {
-    if (px.g > 88) {
-      uint8_t min = std::min(px.b, px.r);
-      uint8_t max = std::max(px.b, px.r);
-      if (min >= px.g || max >= px.g) return false;
-      uint8_t a = px.g - min;
-      uint8_t b = px.g - max;
-      return (a > 10 && b > 10);
-    }
-    return false;
-  });
+  return aos::vision::ThresholdImageWithFunction(
+      image, [&](aos::vision::PixelRef px) {
+        if (px.g > 88) {
+          uint8_t min = std::min(px.b, px.r);
+          uint8_t max = std::max(px.b, px.r);
+          if (min >= px.g || max >= px.g) return false;
+          uint8_t a = px.g - min;
+          uint8_t b = px.g - max;
+          return (a > 10 && b > 10);
+        }
+        return false;
+      });
 }
 
 void TargetFinder::PreFilter(BlobList &imgs) {
diff --git a/y2019/BUILD b/y2019/BUILD
index 4f11c36..276f09c 100644
--- a/y2019/BUILD
+++ b/y2019/BUILD
@@ -1,5 +1,6 @@
 load("//frc971:downloader.bzl", "robot_downloader")
 load("//aos/build:queues.bzl", "queue_library")
+load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library")
 
 robot_downloader(
     start_binaries = [
@@ -113,6 +114,7 @@
     ],
     deps = [
         ":status_light",
+        ":vision_proto",
         "//aos:init",
         "//aos/actions:action_lib",
         "//aos/input:action_joystick_input",
@@ -127,8 +129,10 @@
         "//frc971/autonomous:auto_queue",
         "//frc971/autonomous:base_autonomous_actor",
         "//frc971/control_loops/drivetrain:drivetrain_queue",
+        "//frc971/control_loops/drivetrain:localizer_queue",
         "//y2019/control_loops/drivetrain:drivetrain_base",
         "//y2019/control_loops/superstructure:superstructure_queue",
+        "@com_google_protobuf//:protobuf",
     ],
 )
 
@@ -140,6 +144,12 @@
     visibility = ["//visibility:public"],
 )
 
+cc_proto_library(
+    name = "vision_proto",
+    srcs = ["vision.proto"],
+    visibility = ["//visibility:public"],
+)
+
 py_library(
     name = "python_init",
     srcs = ["__init__.py"],
diff --git a/y2019/constants.cc b/y2019/constants.cc
index 0a9a429..285cb21 100644
--- a/y2019/constants.cc
+++ b/y2019/constants.cc
@@ -82,7 +82,7 @@
   elevator_params->zeroing_voltage = 3.0;
   elevator_params->operating_voltage = 12.0;
   elevator_params->zeroing_profile_params = {0.1, 1.0};
-  elevator_params->default_profile_params = {4.0, 16.0};
+  elevator_params->default_profile_params = {4.0, 13.0};
   elevator_params->range = Values::kElevatorRange();
   elevator_params->make_integral_loop =
       &control_loops::superstructure::elevator::MakeIntegralElevatorLoop;
@@ -143,9 +143,9 @@
   stilts_params->zeroing_constants.allowable_encoder_error = 0.9;
 
   r->camera_noise_parameters = {.max_viewable_distance = 10.0,
-                                .heading_noise = 0.02,
-                                .nominal_distance_noise = 0.06,
-                                .nominal_skew_noise = 0.1,
+                                .heading_noise = 0.2,
+                                .nominal_distance_noise = 0.3,
+                                .nominal_skew_noise = 0.35,
                                 .nominal_height_noise = 0.01};
 
   // Deliberately make FOV a bit large so that we are overly conservative in
@@ -201,6 +201,8 @@
 
       stilts_params->zeroing_constants.measured_absolute_position = 0.043580;
       stilts->potentiometer_offset = -0.093820 + 0.0124 - 0.008334 + 0.004507;
+
+      FillCameraPoses(vision::PracticeBotTeensyId(), &r->cameras);
       break;
 
     case kCodingRobotTeamNumber:
@@ -306,8 +308,8 @@
   constexpr double kHpSlotY = InchToMeters((26 * 12 + 10.5) / 2.0 - 25.9);
   constexpr double kHpSlotTheta = M_PI;
 
-  constexpr double kNormalZ = 0.80;
-  constexpr double kPortZ = 0.99;
+  constexpr double kNormalZ = 0.85;
+  constexpr double kPortZ = 1.04;
 
   const Pose far_side_cargo_bay({kFarSideCargoBayX, kSideCargoBayY, kNormalZ},
                                 kSideCargoBayTheta);
diff --git a/y2019/control_loops/drivetrain/BUILD b/y2019/control_loops/drivetrain/BUILD
index de52215..5a4dd40 100644
--- a/y2019/control_loops/drivetrain/BUILD
+++ b/y2019/control_loops/drivetrain/BUILD
@@ -121,12 +121,34 @@
 )
 
 cc_library(
+    name = "target_selector",
+    srcs = ["target_selector.cc"],
+    hdrs = ["target_selector.h"],
+    deps = [
+        ":camera",
+        "//frc971/control_loops:pose",
+        "//frc971/control_loops/drivetrain:localizer",
+        "//y2019:constants",
+    ],
+)
+
+cc_test(
+    name = "target_selector_test",
+    srcs = ["target_selector_test.cc"],
+    deps = [
+        ":target_selector",
+        "//aos/testing:googletest",
+    ],
+)
+
+cc_library(
     name = "event_loop_localizer",
     srcs = ["event_loop_localizer.cc"],
     hdrs = ["event_loop_localizer.h"],
     deps = [
         ":camera_queue",
         ":localizer",
+        ":target_selector",
         "//frc971/control_loops/drivetrain:localizer",
         "//y2019:constants",
     ],
diff --git a/y2019/control_loops/drivetrain/camera.h b/y2019/control_loops/drivetrain/camera.h
index 2ab4b4f..d59ac21 100644
--- a/y2019/control_loops/drivetrain/camera.h
+++ b/y2019/control_loops/drivetrain/camera.h
@@ -213,14 +213,18 @@
     // This number is unitless and if greater than 1, implies that the target is
     // visible to the camera and if less than 1 implies it is too small to be
     // registered on the camera.
-    Scalar apparent_width =
-        ::std::max<Scalar>(0.0, ::std::cos(view->reading.skew) *
-                                    noise_parameters_.max_viewable_distance /
-                                    view->reading.distance);
+    const Scalar cosskew = ::std::cos(view->reading.skew);
+    Scalar apparent_width = ::std::max<Scalar>(
+        0.0, cosskew * noise_parameters_.max_viewable_distance /
+                 view->reading.distance);
+    // If we got wildly invalid distance or skew measurements, then set a very
+    // small apparent width.
+    if (view->reading.distance < 0 || cosskew < 0) {
+      apparent_width = 0.01;
+    }
     // As both a sanity check and for the sake of numerical stability, manually
-    // set apparent_width to something "very small" if the distance is too
-    // close.
-    if (view->reading.distance < 0.01) {
+    // set apparent_width to something "very small" if it is near zero.
+    if (apparent_width < 0.01) {
       apparent_width = 0.01;
     }
 
diff --git a/y2019/control_loops/drivetrain/camera_test.cc b/y2019/control_loops/drivetrain/camera_test.cc
index ce1a2a4..d0a4b85 100644
--- a/y2019/control_loops/drivetrain/camera_test.cc
+++ b/y2019/control_loops/drivetrain/camera_test.cc
@@ -154,10 +154,11 @@
   EXPECT_EQ(1u, camera_.target_views().size());
 }
 
+using Reading = TestCamera::TargetView::Reading;
+
 // Checks that reading noises have the expected characteristics (mostly, going
 // up linearly with distance):
 TEST_F(CameraTest, DistanceNoiseTest) {
-  using Reading = TestCamera::TargetView::Reading;
   const Reading normal_noise = camera_.target_views()[0].noise;
   // Get twice as close:
   base_pose_.mutable_pos()->y() /= 2.0;
@@ -169,6 +170,40 @@
   EXPECT_EQ(normal_noise.heading, closer_noise.heading);
 }
 
+class CameraViewParamTest : public CameraTest,
+                            public ::testing::WithParamInterface<Reading> {};
+
+// Checks that invalid or absurd measurements result in large but finite noises
+// and non-visible targets.
+TEST_P(CameraViewParamTest, InvalidReading) {
+  TestCamera::TargetView view;
+  view.reading = GetParam();
+  bool visible = true;
+  camera_.PopulateNoise(&view, &visible);
+  // Target should not be visible
+  EXPECT_FALSE(visible);
+  // We should end up with finite but very large noises when things are invalid.
+  EXPECT_TRUE(::std::isfinite(view.noise.heading));
+  EXPECT_TRUE(::std::isfinite(view.noise.distance));
+  EXPECT_TRUE(::std::isfinite(view.noise.skew));
+  EXPECT_TRUE(::std::isfinite(view.noise.height));
+  // Don't check heading noise because it is always constant.
+  EXPECT_LT(10, view.noise.distance);
+  EXPECT_LT(10, view.noise.skew);
+  EXPECT_LT(5, view.noise.height);
+}
+
+INSTANTIATE_TEST_CASE_P(
+    InvalidMeasurements, CameraViewParamTest,
+    ::testing::Values(
+        // heading, distance, height, skew
+        Reading({100.0, -10.0, -10.0, -3.0}), Reading({0.0, 1.0, 0.0, -3.0}),
+        Reading({0.0, 1.0, 0.0, 3.0}), Reading({0.0, 1.0, 0.0, 9.0}),
+        Reading({0.0, ::std::numeric_limits<double>::quiet_NaN(), 0.0, 0.0}),
+        Reading({0.0, ::std::numeric_limits<double>::infinity(), 0.0, 0.0}),
+        Reading({0.0, 1.0, 0.0, ::std::numeric_limits<double>::infinity()}),
+        Reading({0.0, 1.0, 0.0, ::std::numeric_limits<double>::quiet_NaN()})));
+
 }  // namespace testing
 }  // namespace control_loops
 }  // namespace y2019
diff --git a/y2019/control_loops/drivetrain/drivetrain_base.cc b/y2019/control_loops/drivetrain/drivetrain_base.cc
index 02bd805..1dfe99a 100644
--- a/y2019/control_loops/drivetrain/drivetrain_base.cc
+++ b/y2019/control_loops/drivetrain/drivetrain_base.cc
@@ -28,20 +28,31 @@
       ::frc971::control_loops::drivetrain::GyroType::IMU_Z_GYRO,
       ::frc971::control_loops::drivetrain::IMUType::IMU_X,
 
-      drivetrain::MakeDrivetrainLoop, drivetrain::MakeVelocityDrivetrainLoop,
+      drivetrain::MakeDrivetrainLoop,
+      drivetrain::MakeVelocityDrivetrainLoop,
       drivetrain::MakeKFDrivetrainLoop,
       drivetrain::MakeHybridVelocityDrivetrainLoop,
 
       chrono::duration_cast<chrono::nanoseconds>(
           chrono::duration<double>(drivetrain::kDt)),
-      drivetrain::kRobotRadius, drivetrain::kWheelRadius, drivetrain::kV,
+      drivetrain::kRobotRadius,
+      drivetrain::kWheelRadius,
+      drivetrain::kV,
 
-      drivetrain::kHighGearRatio, drivetrain::kLowGearRatio, drivetrain::kJ,
-      drivetrain::kMass, kThreeStateDriveShifter, kThreeStateDriveShifter,
-      true /* default_high_gear */, 0 /* down_offset if using constants use
-                                   constants::GetValues().down_error */,
-      0.7 /* wheel_non_linearity */, 1.2 /* quickturn_wheel_multiplier */,
+      drivetrain::kHighGearRatio,
+      drivetrain::kLowGearRatio,
+      drivetrain::kJ,
+      drivetrain::kMass,
+      kThreeStateDriveShifter,
+      kThreeStateDriveShifter,
+      true /* default_high_gear */,
+      0 /* down_offset if using constants use
+     constants::GetValues().down_error */
+      ,
+      0.7 /* wheel_non_linearity */,
+      1.2 /* quickturn_wheel_multiplier */,
       1.2 /* wheel_multiplier */,
+      true /*pistol_grip_shift_enables_line_follow*/,
   };
 
   return kDrivetrainConfig;
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.cc b/y2019/control_loops/drivetrain/event_loop_localizer.cc
index 981701a..dad667f 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.cc
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.cc
@@ -33,9 +33,9 @@
       localizer_(dt_config, &robot_pose_) {
   localizer_.ResetInitialState(::aos::monotonic_clock::now(),
                                Localizer::State::Zero(), localizer_.P());
+  ResetPosition(0.5, 3.4, 0.0);
   frame_fetcher_ = event_loop_->MakeFetcher<CameraFrame>(
       ".y2019.control_loops.drivetrain.camera_frames");
-  target_selector_.set_has_target(false);
 }
 
 void EventLoopLocalizer::Reset(const Localizer::State &state) {
@@ -58,6 +58,16 @@
 void EventLoopLocalizer::HandleFrame(const CameraFrame &frame) {
   // We need to construct TargetView's and pass them to the localizer:
   ::aos::SizedArray<TargetView, kMaxTargetsPerFrame> views;
+  // Note: num_targets and camera are unsigned integers and so don't need to be
+  // checked for < 0.
+  if (frame.num_targets > kMaxTargetsPerFrame) {
+    LOG(ERROR, "Got bad num_targets %d\n", frame.num_targets);
+    return;
+  }
+  if (frame.camera > cameras_.size()) {
+    LOG(ERROR, "Got bad camera number %d\n", frame.camera);
+    return;
+  }
   for (int ii = 0; ii < frame.num_targets; ++ii) {
     TargetView view;
     view.reading.heading = frame.targets[ii].heading;
diff --git a/y2019/control_loops/drivetrain/event_loop_localizer.h b/y2019/control_loops/drivetrain/event_loop_localizer.h
index 773410f..9207ec1 100644
--- a/y2019/control_loops/drivetrain/event_loop_localizer.h
+++ b/y2019/control_loops/drivetrain/event_loop_localizer.h
@@ -3,8 +3,9 @@
 
 #include "frc971/control_loops/drivetrain/localizer.h"
 #include "y2019/constants.h"
-#include "y2019/control_loops/drivetrain/localizer.h"
 #include "y2019/control_loops/drivetrain/camera.q.h"
+#include "y2019/control_loops/drivetrain/localizer.h"
+#include "y2019/control_loops/drivetrain/target_selector.h"
 
 namespace y2019 {
 namespace control_loops {
@@ -34,7 +35,16 @@
 
   void Reset(const Localizer::State &state);
   void ResetPosition(double x, double y, double theta) override {
-    Reset((Localizer::State() << x, y, theta, 0, 0, 0, 0, 0, 0, 0).finished());
+    // When we reset the state, we want to keep the encoder positions intact, so
+    // we copy from the original state and reset everything else.
+    Localizer::State new_state = localizer_.X_hat();
+    new_state.x() = x;
+    new_state.y() = y;
+    new_state(2, 0) = theta;
+    // Velocity terms.
+    new_state(4, 0) = 0.0;
+    new_state(6, 0) = 0.0;
+    Reset(new_state);
   }
 
   void Update(const ::Eigen::Matrix<double, 2, 1> &U,
@@ -54,15 +64,10 @@
   double right_velocity() const override {
     return localizer_.X_hat(StateIdx::kRightVelocity);
   }
-  double left_voltage_error() const override {
-    return localizer_.X_hat(StateIdx::kLeftVoltageError);
-  }
-  double right_voltage_error() const override {
-    return localizer_.X_hat(StateIdx::kRightVoltageError);
-  }
+  double left_voltage_error() const override { return 0.0; }
+  double right_voltage_error() const override { return 0.0; }
 
-  ::frc971::control_loops::drivetrain::TrivialTargetSelector *target_selector()
-      override {
+  TargetSelector *target_selector() override {
     return &target_selector_;
   }
 
@@ -79,7 +84,7 @@
   Pose robot_pose_;
   const ::std::array<Camera, constants::Values::kNumCameras> cameras_;
   Localizer localizer_;
-  ::frc971::control_loops::drivetrain::TrivialTargetSelector target_selector_;
+  TargetSelector target_selector_;
 };
 
 // Constructs the cameras based on the constants in the //y2019/constants.*
diff --git a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
index 3dde98d..e086e36 100644
--- a/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
+++ b/y2019/control_loops/drivetrain/localized_drivetrain_test.cc
@@ -67,7 +67,7 @@
   void SetStartingPosition(const Eigen::Matrix<double, 3, 1> &xytheta) {
     *drivetrain_motor_plant_.mutable_state() << xytheta.x(), xytheta.y(),
         xytheta(2, 0), 0.0, 0.0;
-    Eigen::Matrix<double, 10, 1> localizer_state;
+    Eigen::Matrix<double, 7, 1> localizer_state;
     localizer_state.setZero();
     localizer_state.block<3, 1>(0, 0) = xytheta;
     localizer_.Reset(localizer_state);
@@ -135,10 +135,18 @@
            ++jj) {
         EventLoopLocalizer::TargetView view = target_views[jj];
         ++frame.num_targets;
-        frame.targets[jj].heading = view.reading.heading;
-        frame.targets[jj].distance = view.reading.distance;
-        frame.targets[jj].skew = view.reading.skew;
-        frame.targets[jj].height = view.reading.height;
+        const float nan = ::std::numeric_limits<float>::quiet_NaN();
+        if (send_bad_frames_) {
+          frame.targets[jj].heading = nan;
+          frame.targets[jj].distance = nan;
+          frame.targets[jj].skew = nan;
+          frame.targets[jj].height = nan;
+        } else {
+          frame.targets[jj].heading = view.reading.heading;
+          frame.targets[jj].distance = view.reading.distance;
+          frame.targets[jj].skew = view.reading.skew;
+          frame.targets[jj].height = view.reading.height;
+        }
       }
       camera_delay_queue_.emplace(monotonic_clock::now(), frame);
     }
@@ -183,9 +191,11 @@
       camera_delay_queue_;
 
   void set_enable_cameras(bool enable) { enable_cameras_ = enable; }
+  void set_bad_frames(bool enable) { send_bad_frames_ = enable; }
 
  private:
   bool enable_cameras_ = false;
+  bool send_bad_frames_ = false;
 };
 
 // Tests that no camera updates, combined with a perfect model, results in no
@@ -202,6 +212,20 @@
   VerifyEstimatorAccurate(1e-10);
 }
 
+// Bad camera updates (NaNs) should have no effect.
+TEST_F(LocalizedDrivetrainTest, BadCameraUpdate) {
+  set_enable_cameras(true);
+  set_bad_frames(true);
+  my_drivetrain_queue_.goal.MakeWithBuilder()
+      .controller_type(1)
+      .left_goal(-1.0)
+      .right_goal(1.0)
+      .Send();
+  RunForTime(chrono::seconds(3));
+  VerifyNearGoal();
+  VerifyEstimatorAccurate(1e-10);
+}
+
 // Tests that camera udpates with a perfect models results in no errors.
 TEST_F(LocalizedDrivetrainTest, PerfectCameraUpdate) {
   set_enable_cameras(true);
@@ -272,7 +296,33 @@
       .Send();
   RunForTime(chrono::seconds(3));
   VerifyNearGoal();
-  VerifyEstimatorAccurate(1e-5);
+  VerifyEstimatorAccurate(1e-4);
+}
+
+namespace {
+EventLoopLocalizer::Pose HPSlotLeft() { return constants::Field().targets()[7].pose(); }
+}  // namespace
+
+// Tests that using the line following drivetrain and just driving straight
+// forward from roughly the right spot gets us to the HP slot.
+TEST_F(LocalizedDrivetrainTest, LineFollowToHPSlot) {
+  set_enable_cameras(false);
+  SetStartingPosition({4, 3, M_PI});
+  my_drivetrain_queue_.goal.MakeWithBuilder()
+      .controller_type(3)
+      .throttle(0.9)
+      .Send();
+  RunForTime(chrono::seconds(10));
+
+  VerifyEstimatorAccurate(1e-8);
+  // Due to the fact that we aren't modulating the throttle, we don't try to hit
+  // the target exactly. Instead, just run slightly past the target:
+  EXPECT_LT(::std::abs(::aos::math::DiffAngle(
+                M_PI, drivetrain_motor_plant_.state()(2, 0))),
+            1e-5);
+  EXPECT_GT(HPSlotLeft().abs_pos().x(), drivetrain_motor_plant_.state().x());
+  EXPECT_NEAR(HPSlotLeft().abs_pos().y(),
+              drivetrain_motor_plant_.state().y(), 1e-4);
 }
 
 }  // namespace testing
diff --git a/y2019/control_loops/drivetrain/localizer.h b/y2019/control_loops/drivetrain/localizer.h
index 095c8a9..b717837 100644
--- a/y2019/control_loops/drivetrain/localizer.h
+++ b/y2019/control_loops/drivetrain/localizer.h
@@ -53,6 +53,11 @@
       return;
     }
 
+    if (!SanitizeTargets(targets)) {
+      LOG(ERROR, "Throwing out targets due to in insane values.\n");
+      return;
+    }
+
     if (t > HybridEkf::latest_t()) {
       LOG(ERROR,
           "target observations must be older than most recent encoder/gyro "
@@ -97,11 +102,36 @@
   // TODO(james): Tune
   static constexpr Scalar kRejectionScore = 1.0;
 
+  // Checks that the targets coming in make some sense--mostly to prevent NaNs
+  // or the such from propagating.
+  bool SanitizeTargets(
+      const ::aos::SizedArray<TargetView, max_targets_per_frame> &targets) {
+    for (const TargetView &view : targets) {
+      const typename TargetView::Reading reading = view.reading;
+      if (!(::std::isfinite(reading.heading) &&
+            ::std::isfinite(reading.distance) &&
+            ::std::isfinite(reading.skew) && ::std::isfinite(reading.height))) {
+        LOG(ERROR, "Got non-finite values in target.\n");
+        return false;
+      }
+      if (reading.distance < 0) {
+        LOG(ERROR, "Got negative distance.\n");
+        return false;
+      }
+      if (::std::abs(::aos::math::NormalizeAngle(reading.skew)) > M_PI_2) {
+        LOG(ERROR, "Got skew > pi / 2.\n");
+        return false;
+      }
+    }
+    return true;
+  }
+
   // Computes the measurement (z) and noise covariance (R) matrices for a given
   // TargetView.
   void TargetViewToMatrices(const TargetView &view, Output *z,
                             Eigen::Matrix<Scalar, kNOutputs, kNOutputs> *R) {
-    *z << view.reading.heading, view.reading.distance, view.reading.skew;
+    *z << view.reading.heading, view.reading.distance,
+        ::aos::math::NormalizeAngle(view.reading.skew);
     // TODO(james): R should account as well for our confidence in the target
     // matching. However, handling that properly requires thing a lot more about
     // the probabilities.
@@ -207,7 +237,7 @@
         // Compute the ckalman update for this step:
         const TargetView &view = camera_views[jj];
         const Eigen::Matrix<Scalar, kNOutputs, kNStates> H =
-            HMatrix(*view.target, target_view);
+            HMatrix(*view.target, camera.pose());
         const Eigen::Matrix<Scalar, kNStates, kNOutputs> PH = P * H.transpose();
         const Eigen::Matrix<Scalar, kNOutputs, kNOutputs> S = H * PH + R;
         // Note: The inverse here should be very cheap so long as kNOutputs = 3.
@@ -273,7 +303,11 @@
       ::std::array<int, max_targets_per_frame> best_frames =
           MatchFrames(scores, best_matches, target_views.size());
       for (size_t ii = 0; ii < target_views.size(); ++ii) {
-        int view_idx = best_frames[ii];
+        size_t view_idx = best_frames[ii];
+        if (view_idx < 0 || view_idx >= camera_views.size()) {
+          LOG(ERROR, "Somehow, the view scorer failed.\n");
+          continue;
+        }
         const Eigen::Matrix<Scalar, kNOutputs, kNStates> best_H =
             all_H_matrices[ii][view_idx];
         const TargetView best_view = camera_views[view_idx];
@@ -320,11 +354,11 @@
   }
 
   Eigen::Matrix<Scalar, kNOutputs, kNStates> HMatrix(
-      const Target &target, const TargetView &target_view) {
+      const Target &target, const Pose &camera_pose) {
     // To calculate dheading/d{x,y,theta}:
     // heading = arctan2(target_pos - camera_pos) - camera_theta
     Eigen::Matrix<Scalar, 3, 1> target_pos = target.pose().abs_pos();
-    Eigen::Matrix<Scalar, 3, 1> camera_pos = target_view.camera_pose.abs_pos();
+    Eigen::Matrix<Scalar, 3, 1> camera_pos = camera_pose.abs_pos();
     Scalar diffx = target_pos.x() - camera_pos.x();
     Scalar diffy = target_pos.y() - camera_pos.y();
     Scalar norm2 = diffx * diffx + diffy * diffy;
@@ -364,6 +398,7 @@
           best_matches,
       int n_views) {
     ::std::array<int, max_targets_per_frame> best_set;
+    best_set.fill(-1);
     Scalar best_score;
     // We start out without having "used" any views/targets:
     ::aos::SizedArray<bool, max_targets_per_frame> used_views;
diff --git a/y2019/control_loops/drivetrain/localizer_test.cc b/y2019/control_loops/drivetrain/localizer_test.cc
index 3b09e64..f062234 100644
--- a/y2019/control_loops/drivetrain/localizer_test.cc
+++ b/y2019/control_loops/drivetrain/localizer_test.cc
@@ -327,6 +327,8 @@
   ::std::normal_distribution<> normal_;
 };
 
+using ::std::chrono::milliseconds;
+
 // Tests that, when we attempt to follow a spline and use the localizer to
 // perform the state estimation, we end up roughly where we should and that
 // the localizer has a valid position estimate.
@@ -340,15 +342,20 @@
   // we just trigger all the cameras at once, rather than offsetting them or
   // anything.
   const int camera_period = 5; // cycles
-  // The amount of time to delay the camera images from when they are taken.
-  const ::std::chrono::milliseconds camera_latency(50);
+  // The amount of time to delay the camera images from when they are taken, for
+  // each camera.
+  const ::std::array<milliseconds, 4> camera_latencies{
+      {milliseconds(45), milliseconds(50), milliseconds(55),
+       milliseconds(100)}};
 
-  // A queue of camera frames so that we can add a time delay to the data
-  // coming from the cameras.
-  ::std::queue<::std::tuple<
-      ::aos::monotonic_clock::time_point, const TestCamera *,
-      ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>>>
-      camera_queue;
+  // A queue of camera frames for each camera so that we can add a time delay to
+  // the data coming from the cameras.
+  ::std::array<
+      ::std::queue<::std::tuple<
+          ::aos::monotonic_clock::time_point, const TestCamera *,
+          ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>>>,
+      4>
+      camera_queues;
 
   // Start time, for debugging.
   const auto begin = ::std::chrono::steady_clock::now();
@@ -392,7 +399,7 @@
     U(1, 0) = ::std::max(::std::min(U(1, 0), 12.0), -12.0);
 
     state = ::frc971::control_loops::RungeKuttaU(
-        [this](const ::Eigen::Matrix<double, 10, 1> &X,
+        [this](const ::Eigen::Matrix<double, 7, 1> &X,
                const ::Eigen::Matrix<double, 2, 1> &U) { return DiffEq(X, U); },
         state, U,
         ::std::chrono::duration_cast<::std::chrono::duration<double>>(
@@ -411,7 +418,7 @@
                            ::std::pow(state(StateIdx::kRightVelocity, 0), 2)) /
                    3.0);
       TestLocalizer::State disturbance;
-      disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0;
+      disturbance << 0.02, 0.02, 0.001, 0.03, 0.02, 0.0, 0.0;
       disturbance *= disturbance_scale;
       state += disturbance;
     }
@@ -431,34 +438,37 @@
                                      right_enc + Normal(encoder_noise_),
                                      gyro + Normal(gyro_noise_), U, t);
 
-    // Clear out the camera frames that we are ready to pass to the localizer.
-    while (!camera_queue.empty() &&
-           ::std::get<0>(camera_queue.front()) < t - camera_latency) {
-      const auto tuple = camera_queue.front();
-      camera_queue.pop();
-      ::aos::monotonic_clock::time_point t_obs = ::std::get<0>(tuple);
-      const TestCamera *camera = ::std::get<1>(tuple);
-      ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame> views =
-          ::std::get<2>(tuple);
-      localizer_.UpdateTargets(*camera, views, t_obs);
-    }
+    for (size_t cam_idx = 0; cam_idx < camera_queues.size(); ++cam_idx) {
+      auto &camera_queue = camera_queues[cam_idx];
+      // Clear out the camera frames that we are ready to pass to the localizer.
+      while (!camera_queue.empty() && ::std::get<0>(camera_queue.front()) <
+                                          t - camera_latencies[cam_idx]) {
+        const auto tuple = camera_queue.front();
+        camera_queue.pop();
+        ::aos::monotonic_clock::time_point t_obs = ::std::get<0>(tuple);
+        const TestCamera *camera = ::std::get<1>(tuple);
+        ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame> views =
+            ::std::get<2>(tuple);
+        localizer_.UpdateTargets(*camera, views, t_obs);
+      }
 
-    // Actually take all the images and store them in the queue.
-    if (i % camera_period == 0) {
-      for (size_t jj = 0; jj < true_cameras_.size(); ++jj) {
-        const auto target_views = true_cameras_[jj].target_views();
-        ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>
-            pass_views;
-        for (size_t ii = 0;
-             ii < ::std::min(kNumTargetsPerFrame, target_views.size()); ++ii) {
-          TestCamera::TargetView view = target_views[ii];
-          Noisify(&view);
-          pass_views.push_back(view);
+      // Actually take all the images and store them in the queue.
+      if (i % camera_period == 0) {
+        for (size_t jj = 0; jj < true_cameras_.size(); ++jj) {
+          const auto target_views = true_cameras_[jj].target_views();
+          ::aos::SizedArray<TestCamera::TargetView, kNumTargetsPerFrame>
+              pass_views;
+          for (size_t ii = 0;
+               ii < ::std::min(kNumTargetsPerFrame, target_views.size());
+               ++ii) {
+            TestCamera::TargetView view = target_views[ii];
+            Noisify(&view);
+            pass_views.push_back(view);
+          }
+          camera_queue.emplace(t, &robot_cameras_[jj], pass_views);
         }
-        camera_queue.emplace(t, &robot_cameras_[jj], pass_views);
       }
     }
-
   }
 
   const auto end = ::std::chrono::steady_clock::now();
@@ -488,11 +498,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/false,
@@ -504,11 +512,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/false,
@@ -519,11 +525,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/false,
@@ -534,26 +538,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0, 0.0)
-                .finished(),
-            /*noisify=*/false,
-            /*disturb=*/false,
-            /*estimate_tolerance=*/1e-4,
-            /*goal_tolerance=*/2e-2,
-        }),
-        // Repeats perfect scenario, but add voltage + angular errors:
-        LocalizerTestParams({
-            /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
-            /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
-             0.5, 0.02)
-                .finished(),
-            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0, 0.0)
+            (TestLocalizer::State() << 0.1, -5.1, -0.01, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/false,
@@ -564,11 +551,9 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/false,
             /*disturb=*/true,
@@ -579,16 +564,14 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.0, 3.0, 3.0, 0.0, 1.0, 1.0}},
             /*control_pts_y=*/{{-5.0, -5.0, 2.0, 2.0, 2.0, 3.0}},
-            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.1, -5.1, 0.03, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.1, -5.1, 0.03, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/true,
             /*estimate_tolerance=*/0.15,
-            /*goal_tolerance=*/0.5,
+            /*goal_tolerance=*/0.8,
         }),
         // Try another spline, just in case the one I was using is special for
         // some reason; this path will also go straight up to a target, to
@@ -596,15 +579,13 @@
         LocalizerTestParams({
             /*control_pts_x=*/{{0.5, 3.5, 4.0, 8.0, 11.0, 10.2}},
             /*control_pts_y=*/{{1.0, 1.0, -3.0, -2.0, -3.5, -3.65}},
-            (TestLocalizer::State() << 0.6, 1.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.6, 1.01, 0.01, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
-            (TestLocalizer::State() << 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-             0.0, 0.0)
+            (TestLocalizer::State() << 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)
                 .finished(),
             /*noisify=*/true,
             /*disturb=*/false,
-            /*estimate_tolerance=*/0.1,
+            /*estimate_tolerance=*/0.15,
             /*goal_tolerance=*/0.5,
         })));
 
diff --git a/y2019/control_loops/drivetrain/target_selector.cc b/y2019/control_loops/drivetrain/target_selector.cc
new file mode 100644
index 0000000..449ce64
--- /dev/null
+++ b/y2019/control_loops/drivetrain/target_selector.cc
@@ -0,0 +1,48 @@
+#include "y2019/control_loops/drivetrain/target_selector.h"
+
+namespace y2019 {
+namespace control_loops {
+
+constexpr double TargetSelector::kFakeFov;
+
+TargetSelector::TargetSelector()
+    : front_viewer_({&robot_pose_, {0.0, 0.0, 0.0}, 0.0}, kFakeFov, fake_noise_,
+                    constants::Field().targets(), {}),
+      back_viewer_({&robot_pose_, {0.0, 0.0, 0.0}, M_PI}, kFakeFov, fake_noise_,
+                   constants::Field().targets(), {}) {}
+
+bool TargetSelector::UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
+                                     double command_speed) {
+  if (::std::abs(command_speed) < kMinDecisionSpeed) {
+    return false;
+  }
+  *robot_pose_.mutable_pos() << state.x(), state.y(), 0.0;
+  robot_pose_.set_theta(state(2, 0));
+  ::aos::SizedArray<FakeCamera::TargetView,
+                    y2019::constants::Field::kNumTargets>
+      target_views;
+  if (command_speed > 0) {
+    target_views = front_viewer_.target_views();
+  } else {
+    target_views = back_viewer_.target_views();
+  }
+
+  if (target_views.empty()) {
+    // We can't see any targets...
+    return false;
+  }
+
+  // Choose the target that has the smallest distance noise (currently, this
+  // means the largest target in the camera view).
+  double largest_target_noise = ::std::numeric_limits<double>::infinity();
+  for (const auto &view : target_views) {
+    if (view.noise.distance < largest_target_noise) {
+      target_pose_ = view.target->pose();
+      largest_target_noise = view.noise.distance;
+    }
+  }
+  return true;
+}
+
+}  // namespace control_loops
+}  // namespace y2019
diff --git a/y2019/control_loops/drivetrain/target_selector.h b/y2019/control_loops/drivetrain/target_selector.h
new file mode 100644
index 0000000..965d7cc
--- /dev/null
+++ b/y2019/control_loops/drivetrain/target_selector.h
@@ -0,0 +1,59 @@
+#ifndef Y2019_CONTROL_LOOPS_DRIVETRAIN_TARGET_SELECTOR_H_
+#define Y2019_CONTROL_LOOPS_DRIVETRAIN_TARGET_SELECTOR_H_
+
+#include "frc971/control_loops/pose.h"
+#include "frc971/control_loops/drivetrain/localizer.h"
+#include "y2019/constants.h"
+#include "y2019/control_loops/drivetrain/camera.h"
+
+namespace y2019 {
+namespace control_loops {
+
+// A class to identify which target the driver is currently driving towards so
+// that we can guide them into the target.
+// The current high-level algorithm is to:
+// (a) Determine which direction (forwards vs. backwardS) the driver is driving.
+// (b) Find the largest target in the X degree field-of-view to the front/back
+// of the robot, where X depends on how much of an angle we expect the driver to
+// typically want to drive in at.
+// (c) Assume that said largest target is the target that the driver wants to
+// drive to.
+class TargetSelector
+    : public ::frc971::control_loops::drivetrain::TargetSelectorInterface {
+ public:
+  typedef ::frc971::control_loops::TypedPose<double> Pose;
+  // For the virtual camera that we use to identify targets, ignore all
+  // obstacles and just assume that we have perfect field of view.
+  typedef TypedCamera<y2019::constants::Field::kNumTargets,
+                      /*num_obstacles=*/0, double> FakeCamera;
+
+  TargetSelector();
+
+  bool UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
+                       double command_speed) override;
+  Pose TargetPose() const override { return target_pose_; }
+
+ private:
+  static constexpr double kFakeFov = M_PI * 0.7;
+  // Longitudinal speed at which the robot must be going in order for us to make
+  // a decision.
+  static constexpr double kMinDecisionSpeed = 0.7;  // m/s
+  Pose robot_pose_;
+  Pose target_pose_;
+  // For the noise of our fake cameras, we only really care about the max
+  // distance, which will be the maximum distance we let ourselves guide in
+  // from. The distance noise is set so that we can use the camera's estimate of
+  // the relative size of the targets.
+  FakeCamera::NoiseParameters fake_noise_ = {.max_viewable_distance = 5 /*m*/,
+                                             .heading_noise = 0,
+                                             .nominal_distance_noise = 1,
+                                             .nominal_skew_noise = 0,
+                                             .nominal_height_noise = 0};
+  FakeCamera front_viewer_;
+  FakeCamera back_viewer_;
+};
+
+}  // namespace control_loops
+}  // namespace y2019
+
+#endif  // Y2019_CONTROL_LOOPS_DRIVETRAIN_TARGET_SELECTOR_H_
diff --git a/y2019/control_loops/drivetrain/target_selector_test.cc b/y2019/control_loops/drivetrain/target_selector_test.cc
new file mode 100644
index 0000000..4b440c2
--- /dev/null
+++ b/y2019/control_loops/drivetrain/target_selector_test.cc
@@ -0,0 +1,99 @@
+#include "y2019/control_loops/drivetrain/target_selector.h"
+
+#include "gtest/gtest.h"
+
+namespace y2019 {
+namespace control_loops {
+namespace testing {
+
+typedef ::frc971::control_loops::TypedPose<double> Pose;
+typedef ::Eigen::Matrix<double, 5, 1> State;
+
+namespace {
+// Accessors to get some useful particular targets on the field:
+Pose HPSlotLeft() { return constants::Field().targets()[7].pose(); }
+Pose CargoNearLeft() { return constants::Field().targets()[2].pose(); }
+Pose RocketPortalLeft() { return constants::Field().targets()[4].pose(); }
+}  // namespace
+
+// Tests the target selector with:
+// -The [x, y, theta, left_vel, right_vel] state to test at
+// -The current driver commanded speed.
+// -Whether we expect to see a target.
+// -If (1) is true, the pose we expect to get back.
+struct TestParams {
+  State state;
+  double command_speed;
+  bool expect_target;
+  Pose expected_pose;
+};
+class TargetSelectorParamTest : public ::testing::TestWithParam<TestParams> {};
+
+TEST_P(TargetSelectorParamTest, ExpectReturn) {
+  TargetSelector selector;
+  bool expect_target = GetParam().expect_target;
+  const State state = GetParam().state;
+  ASSERT_EQ(expect_target,
+            selector.UpdateSelection(state, GetParam().command_speed))
+      << "We expected a return of " << expect_target << " at state "
+      << state.transpose();
+  if (expect_target) {
+    const Pose expected_pose = GetParam().expected_pose;
+    const Pose actual_pose = selector.TargetPose();
+    const ::Eigen::Vector3d expected_pos = expected_pose.abs_pos();
+    const ::Eigen::Vector3d actual_pos = actual_pose.abs_pos();
+    const double expected_angle = expected_pose.abs_theta();
+    const double actual_angle = actual_pose.abs_theta();
+    EXPECT_EQ(expected_pos, actual_pos)
+        << "Expected the pose to be at " << expected_pos.transpose()
+        << " but got " << actual_pos.transpose() << " with the robot at "
+        << state.transpose();
+    EXPECT_EQ(expected_angle, actual_angle);
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(
+    TargetSelectorTest, TargetSelectorParamTest,
+    ::testing::Values(
+        // When we are far away from anything, we should not register any
+        // targets:
+        TestParams{
+            (State() << 0.0, 0.0, 0.0, 1.0, 1.0).finished(), 1.0, false, {}},
+        // Aim for a human-player spot; at low speeds we should not register
+        // anything.
+        TestParams{(State() << 4.0, 2.0, M_PI, 0.05, 0.05).finished(),
+                   0.05,
+                   false,
+                   {}},
+        TestParams{(State() << 4.0, 2.0, M_PI, -0.05, -0.05).finished(),
+                   -0.05,
+                   false,
+                   {}},
+        TestParams{(State() << 4.0, 2.0, M_PI, 0.5, 0.5).finished(), 1.0, true,
+                   HPSlotLeft()},
+        // Put ourselves between the rocket and cargo ship; we should see the
+        // portal driving one direction and the near cargo ship port the other.
+        // We also command a speed opposite the current direction of motion and
+        // confirm that that behaves as expected.
+        TestParams{(State() << 6.0, 2.0, -M_PI_2, -0.5, -0.5).finished(), 1.0,
+                   true, CargoNearLeft()},
+        TestParams{(State() << 6.0, 2.0, M_PI_2, 0.5, 0.5).finished(), -1.0,
+                   true, CargoNearLeft()},
+        TestParams{(State() << 6.0, 2.0, -M_PI_2, 0.5, 0.5).finished(), -1.0,
+                   true, RocketPortalLeft()},
+        TestParams{(State() << 6.0, 2.0, M_PI_2, -0.5, -0.5).finished(), 1.0,
+                   true, RocketPortalLeft()},
+        // And we shouldn't see anything spinning in place:
+        TestParams{(State() << 6.0, 2.0, M_PI_2, -0.5, 0.5).finished(),
+                   0.0,
+                   false,
+                   {}},
+        // Drive backwards off the field--we should not see anything.
+        TestParams{(State() << -0.1, 0.0, 0.0, -0.5, -0.5).finished(),
+                   -1.0,
+                   false,
+                   {}}));
+
+}  // namespace testing
+}  // namespace control_loops
+}  // namespace y2019
diff --git a/y2019/image_streamer/BUILD b/y2019/image_streamer/BUILD
new file mode 100644
index 0000000..4885da0
--- /dev/null
+++ b/y2019/image_streamer/BUILD
@@ -0,0 +1,35 @@
+package(default_visibility = ["//visibility:public"])
+
+cc_binary(
+    name = "image_streamer",
+    srcs = ["image_streamer.cc"],
+    deps = [
+        ":flip_image",
+        "//aos/logging",
+        "//aos/logging:implementations",
+        "//aos/vision/blob:codec",
+        "//aos/vision/events:epoll_events",
+        "//aos/vision/events:socket_types",
+        "//aos/vision/events:udp",
+        "//aos/vision/image:image_stream",
+        "//aos/vision/image:reader",
+        "//y2019:vision_proto",
+        "@com_github_gflags_gflags//:gflags",
+    ],
+)
+
+cc_library(
+    name = "flip_image",
+    srcs = ["flip_image.cc"],
+    hdrs = ["flip_image.h"],
+    copts = [
+        "-Wno-format-nonliteral",
+        "-Wno-cast-align",
+        "-Wno-cast-qual",
+        "-Wno-error=type-limits",
+    ],
+    deps = [
+        "//third_party/cimg:CImg",
+        "//third_party/libjpeg",
+    ],
+)
diff --git a/y2019/image_streamer/README_ODROID_setup.txt b/y2019/image_streamer/README_ODROID_setup.txt
new file mode 100644
index 0000000..16f7287
--- /dev/null
+++ b/y2019/image_streamer/README_ODROID_setup.txt
@@ -0,0 +1,120 @@
+# To build and deploy the image_streamer code to the ODROID, run
+#    ./deploy.sh 10.9.71.179
+# This will also configure and copy a supervisor configuration
+# file, vision.conf, to /etc/supervisor/conf.d.
+
+# While the code can work with two cameras, as of March 4, 2019,
+# the robot only has one driver camera on it.  To use
+# two cameras, set --single_camera to false.  Use the
+# exposure flag to set the camera exposure.
+
+# To view a camera on a Debian laptop use:
+mplayer -tv device=/dev/video0 tv://
+# or if the video is not on video0 try
+mplayer -tv device=/dev/video4 tv://
+# Show available camera formats.
+# This came from https://superuser.com/questions/494575/ffmpeg-open-webcam-using-yuyv-but-i-want-mjpeg
+ffmpeg -f video4linux2 -list_formats all -i /dev/video2
+
+# Jevois notes:
+# To mount a jevois disk on a debian system, use y2019/vision/jevois-cmd
+cd y2019/vision/
+sudo ./jevois-cmd help
+sudo ./jevois-cmd usbsd  # This will mount the disk.
+# If needed, install python-serial to run jevois-cmd
+sudo apt-get install python-serial
+
+# Austin debugged the camera startup problems with supervisorctl on the ODROID.
+# Here is a copy of how he did it.  The problem was the logging
+# was not starting propery.  Austin added a flag to image_streamer
+# so that it would not start logging by default.
+
+root@odroid:~# supervisorctl
+exposure_loop                    RUNNING   pid 625, uptime 0:09:09
+vision                           FATAL     Exited too quickly (process log may have details)
+supervisor> tail -1000 vision stderr
+SURE_ABSOLUTE from 297 to 300
+image_streamer(656)(00018): DEBUG   at 0000000021.963290s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(656)(00019): DEBUG   at 0000000021.963585s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(656)(00020): INFO    at 0000000021.965102s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(656)(00021): INFO    at 0000000021.967263s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(656)(00022): DEBUG   at 0000000021.969020s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(656)(00023): DEBUG   at 0000000021.969275s: aos/vision/image/reader.cc: 301: Start: done with first queue
+image_streamer: y2019/vision/image_streamer.cc:60: BlobLog::BlobLog(const char *, const char *): Assertion `ofst_.is_open()' failed.
+
+supervisor> restart vision
+vision: ERROR (not running)
+vision: ERROR (spawn error)
+supervisor> status
+exposure_loop                    RUNNING   pid 625, uptime 0:09:31
+vision                           STARTING
+supervisor> status
+exposure_loop                    RUNNING   pid 625, uptime 0:09:33
+vision                           BACKOFF   Exited too quickly (process log may have details)
+supervisor> status
+exposure_loop                    RUNNING   pid 625, uptime 0:09:34
+vision                           BACKOFF   Exited too quickly (process log may have details)
+supervisor> tail -1000 vision stderr
+SURE_ABSOLUTE from 297 to 300
+image_streamer(864)(00018): DEBUG   at 0000000590.870582s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(864)(00019): DEBUG   at 0000000590.870856s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(864)(00020): INFO    at 0000000590.872400s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(864)(00021): INFO    at 0000000590.874543s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(864)(00022): DEBUG   at 0000000590.876289s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(864)(00023): DEBUG   at 0000000590.876547s: aos/vision/image/reader.cc: 301: Start: done with first queue
+image_streamer: y2019/vision/image_streamer.cc:60: BlobLog::BlobLog(const char *, const char *): Assertion `ofst_.is_open()' failed.
+
+supervisor> tail -1000 vision stdout
+SURE_ABSOLUTE from 297 to 300
+image_streamer(864)(00018): DEBUG   at 0000000590.870582s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(864)(00019): DEBUG   at 0000000590.870856s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(864)(00020): INFO    at 0000000590.872400s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(864)(00021): INFO    at 0000000590.874543s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(864)(00022): DEBUG   at 0000000590.876289s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(864)(00023): DEBUG   at 0000000590.876547s: aos/vision/image/reader.cc: 301: Start: done with first queue
+image_streamer: y2019/vision/image_streamer.cc:60: BlobLog::BlobLog(const char *, const char *): Assertion `ofst_.is_open()' failed.
+
+root@odroid:~# /root/image_streamer --single_camera=true --camera0_exposure=600
+image_streamer(890)(00000): INFO    at 0000000667.025668s: y2019/vision/image_streamer.cc: 298: main: In main.
+image_streamer(890)(00001): INFO    at 0000000667.025722s: y2019/vision/image_streamer.cc: 300: main: before setting up tcp_server.
+image_streamer(890)(00002): INFO    at 0000000667.025819s: aos/vision/events/tcp_server.cc: 76: SocketBindListenOnPort: connected to port: 80 on fd: 3
+image_streamer(890)(00003): INFO    at 0000000667.025844s: y2019/vision/image_streamer.cc: 302: main: after setting up tcp_server.
+image_streamer(890)(00004): INFO    at 0000000667.025872s: y2019/vision/image_streamer.cc: 303: main: In main.
+image_streamer(890)(00005): INFO    at 0000000667.025896s: y2019/vision/image_streamer.cc: 305: main: In main.
+image_streamer(890)(00006): INFO    at 0000000667.025913s: y2019/vision/image_streamer.cc: 307: main: In main.
+image_streamer(890)(00007): INFO    at 0000000667.025933s: y2019/vision/image_streamer.cc: 309: main: In main.
+image_streamer(890)(00008): INFO    at 0000000667.025952s: y2019/vision/image_streamer.cc: 311: main: In main.
+image_streamer(890)(00009): INFO    at 0000000667.025968s: y2019/vision/image_streamer.cc: 314: main: In main.
+image_streamer(890)(00010): INFO    at 0000000667.025985s: y2019/vision/image_streamer.cc: 317: main: In main.
+image_streamer(890)(00011): INFO    at 0000000667.026001s: y2019/vision/image_streamer.cc: 319: main: In main.
+image_streamer(890)(00012): INFO    at 0000000667.026017s: y2019/vision/image_streamer.cc: 322: main: In main.
+Before setting up udp socket 5001
+image_streamer(890)(00013): INFO    at 0000000667.026090s: y2019/vision/image_streamer.cc: 324: main: In main.
+image_streamer(890)(00014): INFO    at 0000000667.026142s: y2019/vision/image_streamer.cc: 328: main: In main.
+image_streamer(890)(00015): WARNING at 0000000667.026220s: aos/vision/image/reader.cc: 217: Init: xioctl VIDIOC_S_CROP due to 25 (Inappropriate ioctl for device)
+image_streamer(890)(00016): DEBUG   at 0000000667.026646s: aos/vision/image/reader.cc: 162: SetCameraControl: Camera control V4L2_CID_EXPOSURE_AUTO was already 1
+image_streamer(890)(00017): DEBUG   at 0000000667.027819s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_EXPOSURE_ABSOLUTE from 300 to 600
+image_streamer(890)(00018): DEBUG   at 0000000667.027887s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_BRIGHTNESS from -3 to -40
+image_streamer(890)(00019): DEBUG   at 0000000667.027956s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_GAIN from 16 to 0
+image_streamer(890)(00020): INFO    at 0000000667.029905s: aos/vision/image/reader.cc: 277: Init: framerate ended up at 1/30
+image_streamer(890)(00021): INFO    at 0000000667.031824s: aos/vision/image/reader.cc: 59: Reader: Bat Vision Successfully Initialized.
+image_streamer(890)(00022): DEBUG   at 0000000667.033340s: aos/vision/image/reader.cc: 292: Start: queueing buffers for the first time
+image_streamer(890)(00023): DEBUG   at 0000000667.033369s: aos/vision/image/reader.cc: 301: Start: done with first queue
+Logging to file (./logging/blob_record_38.dat)
+Running Camera
+image_streamer(890)(00024): DEBUG   at 0000000667.236660s: aos/vision/image/reader.cc: 162: SetCameraControl: Camera control V4L2_CID_EXPOSURE_AUTO was already 1
+image_streamer(890)(00025): DEBUG   at 0000000667.238058s: aos/vision/image/reader.cc: 175: SetCameraControl: Set camera control V4L2_CID_EXPOSURE_ABSOLUTE from 15 to 600
+image_streamer(890)(00026): INFO    at 0000000667.238178s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29161
+image_streamer(890)(00027): INFO    at 0000000667.238205s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00028): INFO    at 0000000667.252786s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29521
+image_streamer(890)(00029): INFO    at 0000000667.252809s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00030): INFO    at 0000000667.272826s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29559
+image_streamer(890)(00031): INFO    at 0000000667.272848s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00032): INFO    at 0000000667.316659s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 29640
+image_streamer(890)(00033): INFO    at 0000000667.316680s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00034): INFO    at 0000000667.377320s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 65435
+image_streamer(890)(00035): INFO    at 0000000667.377346s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00036): INFO    at 0000000667.412857s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 65651
+image_streamer(890)(00037): INFO    at 0000000667.412945s: y2019/vision/image_streamer.cc: 335: operator(): Got a frame cam0
+image_streamer(890)(00038): INFO    at 0000000667.444955s: y2019/vision/image_streamer.cc: 278: ProcessImage: Data has length 65648
+
diff --git a/y2019/image_streamer/deploy.sh b/y2019/image_streamer/deploy.sh
new file mode 100755
index 0000000..584d09e
--- /dev/null
+++ b/y2019/image_streamer/deploy.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+set -e
+
+echo ""
+echo "USAGE: $0 ODROID_ip_address"
+echo "Example: $0 10.9.71.179"
+echo "Example: $0 10.99.71.179"
+echo ""
+
+if [ $# != 1 ]
+  then
+    echo "Illegal number of parameters"
+    exit
+fi
+
+if [[ $1 == -*[hH]* ]]
+  then
+    exit
+fi
+
+# Get the script directory (from https://devhints.io/bash)
+DIR="${0%/*}"
+
+# Move into the script directory
+cd "${DIR}"
+echo "# Working in `pwd`"
+
+ODROID_IP_ADDRESS=$1
+ODROID="root@${ODROID_IP_ADDRESS}"
+# Get the IP address of the roboRIO from the ODROID IP address
+# This is needed to properly configure supervisorctl on the ODROID
+# for image_streamer to communicate with the roboRIO.
+ROBORIO=`echo ${ODROID_IP_ADDRESS} | sed 's/\.[0-9]*$/.2/'`
+
+echo "# Using ODORID  ${ODROID}"
+echo "# Using roboRIO ${ROBORIO}"
+
+# This builds the ODROID image_streamer code.
+echo -e "\n# Building image_streamer"
+(
+set -x
+bazel build -c opt //y2019/image_streamer:image_streamer --cpu=armhf-debian
+)
+
+echo -e "\n# Copy files to ODROID"
+(
+set -x
+rsync -av --progress ../../bazel-bin/y2019/image_streamer/image_streamer "${ODROID}":.
+rsync -av --progress README_ODROID_setup.txt "${ODROID}":.
+rsync -av --progress vision.conf "${ODROID}":/etc/supervisor/conf.d/
+)
+
+echo "# Make sure supervisorctl has the correct IP address."
+(
+set -x
+ssh "${ODROID}" sed -i -e "'/image_streamer/ s/10.9.71.2/${ROBORIO}/'" /etc/supervisor/conf.d/vision.conf
+
+ssh "${ODROID}" sync
+)
+
+echo -e "\nCan't restart image_streamer with supervisorctl because the USB devices don't come up reliably..." >&2
+echo "Restart the ODROID now" >&2
diff --git a/y2019/image_streamer/flip_image.cc b/y2019/image_streamer/flip_image.cc
new file mode 100644
index 0000000..6ff00ed
--- /dev/null
+++ b/y2019/image_streamer/flip_image.cc
@@ -0,0 +1,15 @@
+#include "flip_image.h"
+
+#define cimg_display 0
+#define cimg_use_jpeg
+#define cimg_plugin "plugins/jpeg_buffer.h"
+#include "third_party/cimg/CImg.h"
+
+void flip_image(const char *input, const int input_size, JOCTET *buffer,
+                unsigned int *buffer_size) {
+  ::cimg_library::CImg<unsigned char> image;
+  image.load_jpeg_buffer((JOCTET *)(input), input_size);
+  image.mirror("xy");
+
+  image.save_jpeg_buffer(buffer, *buffer_size, 80);
+}
diff --git a/y2019/image_streamer/flip_image.h b/y2019/image_streamer/flip_image.h
new file mode 100644
index 0000000..6a59e96
--- /dev/null
+++ b/y2019/image_streamer/flip_image.h
@@ -0,0 +1,12 @@
+#ifndef Y2019_IMAGE_STREAMER_FLIP_IMAGE_H_
+#define Y2019_IMAGE_STREAMER_FLIP_IMAGE_H_
+
+#include <stddef.h>
+#include <stdio.h>
+#include "third_party/libjpeg/jerror.h"
+#include "third_party/libjpeg/jpeglib.h"
+
+void flip_image(const char *input, const int input_size, JOCTET *buffer,
+                unsigned int *buffer_size);
+
+#endif  // Y2019_IMAGE_STREAMER_FLIP_IMAGE_H
diff --git a/y2019/image_streamer/image_streamer.cc b/y2019/image_streamer/image_streamer.cc
new file mode 100644
index 0000000..ed581e2
--- /dev/null
+++ b/y2019/image_streamer/image_streamer.cc
@@ -0,0 +1,391 @@
+#include "aos/vision/image/image_stream.h"
+
+#include <sys/stat.h>
+#include <deque>
+#include <fstream>
+#include <string>
+
+#include "aos/logging/implementations.h"
+#include "aos/logging/logging.h"
+#include "aos/vision/blob/codec.h"
+#include "aos/vision/events/socket_types.h"
+#include "aos/vision/events/udp.h"
+#include "aos/vision/image/reader.h"
+#include "gflags/gflags.h"
+#include "y2019/image_streamer/flip_image.h"
+#include "y2019/vision.pb.h"
+
+using ::aos::events::DataSocket;
+using ::aos::events::RXUdpSocket;
+using ::aos::events::TCPServer;
+using ::aos::vision::DataRef;
+using ::aos::vision::Int32Codec;
+using ::aos::monotonic_clock;
+using ::y2019::VisionControl;
+
+DEFINE_string(roborio_ip, "10.9.71.2", "RoboRIO IP Address");
+DEFINE_string(log, "",
+              "If non-empty, log images to the specified prefix with the image "
+              "index appended to the filename");
+DEFINE_bool(single_camera, true, "If true, only use video0");
+DEFINE_int32(camera0_exposure, 600, "Exposure for video0");
+DEFINE_int32(camera1_exposure, 600, "Exposure for video1");
+
+aos::vision::DataRef mjpg_header =
+    "HTTP/1.0 200 OK\r\n"
+    "Server: YourServerName\r\n"
+    "Connection: close\r\n"
+    "Max-Age: 0\r\n"
+    "Expires: 0\r\n"
+    "Cache-Control: no-cache, private\r\n"
+    "Pragma: no-cache\r\n"
+    "Content-Type: multipart/x-mixed-replace; "
+    "boundary=--boundary\r\n\r\n";
+
+struct Frame {
+  std::string data;
+};
+
+inline bool FileExist(const std::string &name) {
+  struct stat buffer;
+  return (stat(name.c_str(), &buffer) == 0);
+}
+
+class BlobLog {
+ public:
+  explicit BlobLog(const char *prefix, const char *extension) {
+    int index = 0;
+    while (true) {
+      std::string file = prefix + std::to_string(index) + extension;
+      if (FileExist(file)) {
+        index++;
+      } else {
+        printf("Logging to file (%s)\n", file.c_str());
+        ofst_.open(file);
+        assert(ofst_.is_open());
+        break;
+      }
+    }
+  }
+
+  ~BlobLog() { ofst_.close(); }
+
+  void WriteLogEntry(DataRef data) { ofst_.write(&data[0], data.size()); }
+
+ private:
+  std::ofstream ofst_;
+};
+
+class UdpClient : public ::aos::events::EpollEvent {
+ public:
+  UdpClient(int port, ::std::function<void(void *, size_t)> callback)
+      : ::aos::events::EpollEvent(RXUdpSocket::SocketBindListenOnPort(port)),
+        callback_(callback) {}
+
+ private:
+  ::std::function<void(void *, size_t)> callback_;
+
+  void ReadEvent() override {
+    char data[1024];
+    size_t received_data_size = Recv(data, sizeof(data));
+    callback_(data, received_data_size);
+  }
+
+  size_t Recv(void *data, int size) {
+    return PCHECK(recv(fd(), static_cast<char *>(data), size, 0));
+  }
+};
+
+// TODO(aschuh & michael) Pull this out.
+template <typename PB>
+class ProtoUdpClient : public UdpClient {
+ public:
+  ProtoUdpClient(int port, ::std::function<void(const PB &)> proto_callback)
+      : UdpClient(port, ::std::bind(&ProtoUdpClient::ReadData, this,
+                                    ::std::placeholders::_1,
+                                    ::std::placeholders::_2)),
+        proto_callback_(proto_callback) {}
+
+ private:
+  ::std::function<void(const PB &)> proto_callback_;
+
+  void ReadData(void *data, size_t size) {
+    PB pb;
+    // TODO(Brian): Do something useful if parsing fails.
+    pb.ParseFromArray(data, size);
+    proto_callback_(pb);
+  }
+};
+
+class MjpegDataSocket : public aos::events::SocketConnection {
+ public:
+  MjpegDataSocket(aos::events::TCPServerBase *server, int fd)
+      : aos::events::SocketConnection(server, fd) {
+    SetEvents(EPOLLOUT | EPOLLET);
+  }
+
+  ~MjpegDataSocket() { printf("Closed connection on descriptor %d\n", fd()); }
+
+  void DirectEvent(uint32_t events) override {
+    if (events & EPOLLOUT) {
+      NewDataToSend();
+      events &= ~EPOLLOUT;
+    }
+    // Other end hung up.  Ditch the connection.
+    if (events & EPOLLHUP) {
+      CloseConnection();
+      events &= ~EPOLLHUP;
+      return;
+    }
+    if (events) {
+      aos::events::EpollEvent::DirectEvent(events);
+    }
+  }
+
+  void ReadEvent() override {
+    ssize_t count;
+    char buf[512];
+    while (true) {
+      // Always read everything so epoll won't return immediately.
+      count = read(fd(), &buf, sizeof buf);
+      if (count <= 0) {
+        if (errno != EAGAIN) {
+          CloseConnection();
+          return;
+        }
+        break;
+      } else if (!ready_to_receive_) {
+        // This 4 should match "\r\n\r\n".length();
+        if (match_i_ >= 4) {
+          printf("reading after last match\n");
+          continue;
+        }
+        for (char c : aos::vision::DataRef(&buf[0], count)) {
+          if (c == "\r\n\r\n"[match_i_]) {
+            ++match_i_;
+            if (match_i_ >= 4) {
+              if (!ready_to_receive_) {
+                ready_to_receive_ = true;
+                RasterHeader();
+              }
+            }
+          } else if (match_i_ != 0) {
+            if (c == '\r') match_i_ = 1;
+          }
+        }
+      }
+    }
+  }
+
+  void RasterHeader() {
+    output_buffer_.push_back(mjpg_header);
+    NewDataToSend();
+  }
+
+  void RasterFrame(std::shared_ptr<Frame> frame) {
+    if (!output_buffer_.empty() || !ready_to_receive_) return;
+    sending_frame_ = frame;
+    aos::vision::DataRef data = frame->data;
+
+    size_t n_written = snprintf(data_header_tmp_, sizeof(data_header_tmp_),
+                                "--boundary\r\n"
+                                "Content-type: image/jpg\r\n"
+                                "Content-Length: %zu\r\n\r\n",
+                                data.size());
+    // This should never happen because the buffer should be properly sized.
+    if (n_written == sizeof(data_header_tmp_)) {
+      fprintf(stderr, "wrong sized buffer\n");
+      exit(-1);
+    }
+    LOG(INFO, "Frame size in bytes: data.size() = %zu\n", data.size());
+    output_buffer_.push_back(aos::vision::DataRef(data_header_tmp_, n_written));
+    output_buffer_.push_back(data);
+    output_buffer_.push_back("\r\n\r\n");
+    NewDataToSend();
+  }
+
+  void NewFrame(std::shared_ptr<Frame> frame) { RasterFrame(std::move(frame)); }
+
+  void NewDataToSend() {
+    while (!output_buffer_.empty()) {
+      auto &data = *output_buffer_.begin();
+
+      while (!data.empty()) {
+        int len = send(fd(), data.data(), data.size(), MSG_NOSIGNAL);
+        if (len == -1) {
+          if (errno == EAGAIN) {
+            // Next thinggy will pick this up.
+            return;
+          } else {
+            CloseConnection();
+            return;
+          }
+        } else {
+          data.remove_prefix(len);
+        }
+      }
+      output_buffer_.pop_front();
+    }
+  }
+
+ private:
+  char data_header_tmp_[512];
+  std::shared_ptr<Frame> sending_frame_;
+  std::deque<aos::vision::DataRef> output_buffer_;
+
+  bool ready_to_receive_ = false;
+  void CloseConnection() {
+    loop()->Delete(this);
+    close(fd());
+    delete this;
+  }
+  size_t match_i_ = 0;
+};
+
+class CameraStream : public ::aos::vision::ImageStreamEvent {
+ public:
+  CameraStream(::aos::vision::CameraParams params, const ::std::string &fname,
+               TCPServer<MjpegDataSocket> *tcp_server, bool log,
+               ::std::function<void()> frame_callback)
+      : ImageStreamEvent(fname, params),
+        tcp_server_(tcp_server),
+        frame_callback_(frame_callback) {
+    if (log) {
+      log_.reset(new BlobLog(FLAGS_log.c_str(), ".dat"));
+    }
+  }
+
+  void set_active(bool active) { active_ = active; }
+
+  void set_flip(bool flip) { flip_ = flip; }
+
+  bool active() const { return active_; }
+
+  void ProcessImage(DataRef data,
+                    monotonic_clock::time_point /*monotonic_now*/) {
+    ++sampling;
+    // 20 is the sampling rate.
+    if (sampling == 20) {
+      int tmp_size = data.size() + sizeof(int32_t);
+      char *buf;
+      std::string log_record;
+      log_record.resize(tmp_size, 0);
+      {
+        buf = Int32Codec::Write(&log_record[0], tmp_size);
+        data.copy(buf, data.size());
+      }
+      if (log_) {
+        log_->WriteLogEntry(log_record);
+      }
+      sampling = 0;
+    }
+
+    std::string image_out;
+
+    if (flip_) {
+      unsigned int out_size = image_buffer_out_.size();
+      flip_image(data.data(), data.size(), &image_buffer_out_[0], &out_size);
+      image_out.assign(&image_buffer_out_[0], &image_buffer_out_[out_size]);
+    } else {
+      image_out = std::string(data);
+    }
+
+    if (active_) {
+      auto frame = std::make_shared<Frame>(Frame{image_out});
+      tcp_server_->Broadcast(
+          [frame](MjpegDataSocket *event) { event->NewFrame(frame); });
+    }
+    frame_callback_();
+  }
+
+ private:
+  int sampling = 0;
+  TCPServer<MjpegDataSocket> *tcp_server_;
+  ::std::unique_ptr<BlobLog> log_;
+  ::std::function<void()> frame_callback_;
+  bool active_ = false;
+  bool flip_ = false;
+  std::array<JOCTET, 100000> image_buffer_out_;
+};
+
+int main(int argc, char **argv) {
+  gflags::ParseCommandLineFlags(&argc, &argv, false);
+  ::aos::logging::Init();
+  ::aos::logging::AddImplementation(
+      new ::aos::logging::StreamLogImplementation(stderr));
+  TCPServer<MjpegDataSocket> tcp_server_(80);
+  aos::vision::CameraParams params0;
+  params0.set_exposure(FLAGS_camera0_exposure);
+  params0.set_brightness(-40);
+  params0.set_width(320);
+  // params0.set_fps(10);
+  params0.set_height(240);
+
+  aos::vision::CameraParams params1 = params0;
+  params1.set_exposure(FLAGS_camera1_exposure);
+
+  ::y2019::VisionStatus vision_status;
+  LOG(INFO,
+      "The UDP socket should be on port 5001 to 10.9.71.2 for "
+      "the competition robot.\n");
+  LOG(INFO, "Starting UDP socket on port 5001 to %s\n",
+      FLAGS_roborio_ip.c_str());
+  ::aos::events::ProtoTXUdpSocket<::y2019::VisionStatus> status_socket(
+      FLAGS_roborio_ip.c_str(), 5001);
+
+  ::std::unique_ptr<CameraStream> camera1;
+  ::std::unique_ptr<CameraStream> camera0(new CameraStream(
+      params0, "/dev/video0", &tcp_server_, !FLAGS_log.empty(),
+      [&camera0, &status_socket, &vision_status]() {
+        vision_status.set_low_frame_count(vision_status.low_frame_count() + 1);
+        LOG(INFO, "Got a frame cam0\n");
+        if (camera0->active()) {
+          status_socket.Send(vision_status);
+        }
+      }));
+  if (!FLAGS_single_camera) {
+    camera1.reset(new CameraStream(
+        params1, "/dev/video1", &tcp_server_, false,
+        [&camera1, &status_socket, &vision_status]() {
+          vision_status.set_high_frame_count(vision_status.high_frame_count() +
+                                             1);
+          LOG(INFO, "Got a frame cam1\n");
+          if (camera1->active()) {
+            status_socket.Send(vision_status);
+          }
+        }));
+  }
+
+  ProtoUdpClient<VisionControl> udp_client(
+      5000, [&camera0, &camera1](const VisionControl &vision_control) {
+        bool cam0_active = false;
+        camera0->set_flip(vision_control.flip_image());
+        if (camera1) {
+          camera1->set_flip(vision_control.flip_image());
+          cam0_active = !vision_control.high_video();
+          camera0->set_active(!vision_control.high_video());
+          camera1->set_active(vision_control.high_video());
+        } else {
+          cam0_active = true;
+          camera0->set_active(true);
+        }
+        LOG(INFO, "Got control packet, cam%d active\n", cam0_active ? 0 : 1);
+      });
+
+  // Default to camera0
+  camera0->set_active(true);
+  if (camera1) {
+    camera1->set_active(false);
+  }
+
+  aos::events::EpollLoop loop;
+  loop.Add(&tcp_server_);
+  loop.Add(camera0.get());
+  if (camera1) {
+    loop.Add(camera1.get());
+  }
+  loop.Add(&udp_client);
+
+  printf("Running Camera\n");
+  loop.Run();
+}
diff --git a/y2019/image_streamer/videomappings.cfg b/y2019/image_streamer/videomappings.cfg
new file mode 100644
index 0000000..0c6c91b
--- /dev/null
+++ b/y2019/image_streamer/videomappings.cfg
@@ -0,0 +1,544 @@
+############ 971 Spartan Robotics
+#
+# This JeVois configuration file works with the 2019 image_streamer program
+# on the ODROID that is used to send images back to the driverstation.  Each uncomented
+# line in this file is a camera configuration.  The JeVois camaera can have
+# many configurations.  I think it can have as many as 50.  The default configuration
+# has a '*' at the end of the line.  To avoid confusion and make it clear which
+# configuration to use, only one configuration is active in this file.
+#
+# This file resides on the JeVois in /JEVOIS/config/videomappings.cfg
+# It can be editted by mounting the microSD card on a computer and editting it.
+# It can also be editted by mounting the JeVois disk under Debian.  This
+# is done by running
+#    sudo y2019/vision/tools/jevois-cmd usbsd
+# This mounts the disk in /media/${USER}/JEVOIS
+#
+# Michael, Bahar, and Jay.  March 1, 2019.
+#
+#
+######################################################################################################################
+#
+# JeVois Smart Embedded Machine Vision Toolkit - Copyright (C) 2016 by Laurent Itti, the University of Southern
+# California (USC), and iLab at USC. See http://iLab.usc.edu and http://jevois.org for information about this project.
+#
+# This file is part of the JeVois Smart Embedded Machine Vision Toolkit.  This program is free software; you can
+# redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software
+# Foundation, version 2.  This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+# License for more details.  You should have received a copy of the GNU General Public License along with this program;
+# if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# Contact information: Laurent Itti - 3641 Watt Way, HNB-07A - Los Angeles, CA 90089-2520 - USA.
+# Tel: +1 213 740 3527 - itti@pollux.usc.edu - http://iLab.usc.edu - http://jevois.org
+######################################################################################################################
+#
+# JeVois smart camera operation modes and video mappings
+#
+# Format is: <USBmode> <USBwidth> <USBheight> <USBfps> <CAMmode> <CAMwidth> <CAMheight> <CAMfps> <Vendor> <Module> [*]
+#
+# CamMode can be only one of: YUYV, BAYER, RGB565
+# USBmode can be only one of: YUYV, GREY, MJPG, BAYER, RGB565, BGR24, NONE
+
+# USB to camera mode mappings (when USBmode is not NONE) are organized according to output format, which should be
+# unique (no two entries in this file should have same USBmode/USBwidth/USBheight/USBfps). Indeed, these modes can only
+# be selected by the host computer's video grabbing software, and they are selected by picking an output format in that
+# software. These modes cannot be chosen by the JeVois system itself. For these modes, the Module's process(inframe,
+# outframe) function will be called on every frame. Beware that Macs will not accept modes for which USBwidth is not a
+# multiple of 16.
+
+# Camera-only modes (when USBmode is NONE) mode mappings have no video output over USB, and are selected by interacting
+# with the JeVois hardware over serial ports. When USBmode is NONE, USBwidth, USBHeight, and USBfps are ignored and
+# should be set to 0 here. For these modes, the Module's process(inframe) function will be called on every frame. These
+# modes are usually the ones you would use when interfacing the JeVois camera to an Arduino or similar system that
+# cannot stream video over USB and will just receive data from the JeVois camera over a serial port.
+
+# The optional * at the end of one line indicates the format that should be the default one announced by the device to
+# the USB host. This is the one that most webcam programs will select by default when you start them. Note that the
+# guvcview program on linux seems to ignore this and to instead select the last mode you had selected the last time you
+# used the camera. This * cannot be on a mapping that has NONE USBmode. There should be only one * in the whole file.
+
+# Model JeVois-A33 camera sensor supported resolutions and frame rates:
+#
+#  SXGA (1280 x 1024): up to 15 fps
+#   VGA ( 640 x  480): up to 30 fps
+#   CIF ( 352 x  288): up to 60 fps
+#  QVGA ( 320 x  240): up to 60 fps
+#  QCIF ( 176 x  144): up to 120 fps
+# QQVGA ( 160 x  120): up to 60 fps
+# QQCIF (  88 x   72): up to 120 fps
+
+# Frame rates can be set to any value from 0.1fps to the maximum supported for the selected resolution. This is very
+# useful to avoid dropping frames. For example if you have an algorithm that runs at 26.3fps after all possible
+# optimizations, you can set the camera (and usb) frame rate to 26.3 and you will not drop frames (unless your algorithm
+# momentarily performs slower, hence adding a small margin may be a good idea, e.g., select 26.1fps camera and usb
+# rates). This is better than setting the frame rate to 30.0 as this would mean that every so often you would miss the
+# next camera frame and then have to wait for the next one to be captured. If your algorithm really runs at 26.3fps but
+# you specify 30.0fps camera frame rate, then the frames will actually end up being pumped to USB at only 15.0fps (i.e.,
+# by the time you finish processing the current frame, you have missed the next one from the camera, and you need to
+# wait for the following one).
+
+# Note on USB transfer rate: the maximum actual pixel data transfer rate is 3070*8000 = 23.9 Mbytes/s (which is 3kb/USB
+# microframe, max "high bandwidth" setting). Although USB 2.0 has a maximum theoretical rate of 480 Mbit/s, this
+# includes protocol overhead and not all of the bandwidth is available for isochronous (real-time stream) transfers,
+# which we use.  This means that SXGA YUYV (2 bytes/pixel) can only transfer at a max rate of ~9.3 fps over the USB
+# link, although the camera can grab SXGA YUYV at 15 fps. SXGA in Bayer can achieve 15 fps transfer over USB since it
+# only uses 1 byte/pixel.
+
+# To test various video formats on a Linux host, the best is usually to use guvcview. However, this has two issues: 1)
+# it adds some formats which we do not natively support, like RGB3, YU12, YV12, etc, probably by doing some pixel
+# conversion internally over the actual supported modes; 2) support for RGB565 seems brittle, guvcview often crashes
+# when changing resolutions in RGB565 (called RGBP for RGB Packed).
+#
+# Hence, you may want to also try ffplay from the ffmpeg project, which can display all our supported modes and will
+# reject a mode if it does not exactly match what the hardware supports. Example:
+#
+#   ffplay /dev/video0 -pixel_format yuyv422 -video_size 640x480
+#
+# The pixel_format values are: 'yuyv422' for YUYV, 'gray' for GRAY, 'rgb565' for RGB565, 'mjpeg' for MJPG, 'bgr24' for
+# BGR24, and 'bayer_rggb8' for BAYER. You can run 'ffplay -pix_fmts' to see the list of pixel formats that ffplay
+# supports.
+#
+# Here is another example where we record the output of JeVois to a file:
+#
+#   ffmpeg -f v4l2 -pixel_format rgb565 -video_size 320x240 -framerate 22 -i /dev/video0 output.mp4
+
+# On Mac OSX, we recommend using the CamTwist app, as described in the JeVois documentation. You can also use ffplay for
+# OSX: Download the pre-compiled ffplay binary from the ffmpeg web site, and then run:
+#
+#   ~/bin/ffplay -f avfoundation -i "JeVois" -video_size 640x300 -framerate 60 -pixel_format yuyv422
+#
+# (assuming you saved ffplay into your ~/bin/ directory).
+
+# Mac compatibility notes: The JeVois smart camera is correctly detected on Macs and works with PhotoBooth as long as:
+# 1) you have a mapping that outputs YUYV 640x480 (this is the one that PhotoBooth will select (at least on recent OSX
+# like El Capitan, etc); beware that it will also flip the image horizontally); 2) you have no active (not
+# commented-out) mapping with BAYER, RGB565, or BGR24 output. If you have any un-commented mapping with BAYER, RGB565,
+# or BGR24 in your videomappings.cfg, your JeVois smart camera will still be detected by your Mac, PhotoBooth will start
+# and try to use the camera, but it will only display a black screen. Our guess is that this is a bug in the Mac camera
+# driver. It is ok to have additional mappings with YUYV output, as well as mappings with MJPG or GREY output.
+
+#####################################################################################################
+#### Pass-through and simple pixel format conversion modes:
+#####################################################################################################
+#
+##YUYV 1280 960 15.0 BAYER 1280 960 15.0 JeVois Convert
+##YUYV 1280 720 15.0 BAYER 1280 720 15.0 JeVois Convert
+##YUYV 640 480 30.0 BAYER 640 480 30.0 JeVois Convert
+##YUYV 640 360 30.0 BAYER 640 360 30.0 JeVois Convert
+##YUYV 320 240 60.0 BAYER 320 240 60.0 JeVois Convert
+##YUYV 320 180 60.0 BAYER 320 180 60.0 JeVois Convert
+##YUYV 160 120 60.0 BAYER 160 120 60.0 JeVois Convert
+##YUYV 160 90 60.0 BAYER 160 90 60.0 JeVois Convert
+#
+##BAYER 1280 960 15.0 BAYER 1280 960 15.0 JeVois PassThrough
+##BAYER 1280 720 15.0 BAYER 1280 720 15.0 JeVois PassThrough
+##BAYER 640 480 30.0 BAYER 640 480 30.0 JeVois PassThrough
+##BAYER 640 360 30.0 BAYER 640 360 30.0 JeVois PassThrough
+##BAYER 320 240 60.0 BAYER 320 240 60.0 JeVois PassThrough
+##BAYER 320 180 60.0 BAYER 320 180 60.0 JeVois PassThrough
+##BAYER 160 120 60.0 BAYER 160 120 60.0 JeVois PassThrough
+##BAYER 160 90 60.0 BAYER 160 90 60.0 JeVois PassThrough
+#
+##BAYER 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+##BGR24 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+##GREY 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+##RGB565 640 480 26.8 YUYV 640 480 26.8 JeVois Convert
+#
+##MJPG 640 480 20.0 YUYV 640 480 20.0 JeVois Convert
+##MJPG 352 288 60.0 BAYER 352 288 60.0 JeVois Convert
+##MJPG 320 240 30.0 RGB565 320 240 30.0 JeVois Convert
+##MJPG 320 240 15.0 YUYV 320 240 15.0 JeVois Convert
+##MJPG 320 240 60.0 RGB565 320 240 60.0 JeVois Convert
+##MJPG 176 144 120.0 BAYER 176 144 120.0 JeVois Convert
+##MJPG 160 120 60.0 YUYV 160 120 60.0 JeVois Convert
+##MJPG 88 72 120.0 RGB565 88 72 120.0 JeVois Convert
+#
+##BAYER 1280 1024 15.0 BAYER 1280 1024 15.0 JeVois PassThrough
+##BAYER 640 480 30.0 BAYER 640 480 30.0 JeVois PassThrough
+##BAYER 352 288 60.0 BAYER 352 288 60.0 JeVois PassThrough
+##BAYER 320 240 60.0 BAYER 320 240 60.0 JeVois PassThrough
+##BAYER 176 144 120.0 BAYER 176 144 120.0 JeVois PassThrough
+##BAYER 160 120 60.0 BAYER 160 120 60.0 JeVois PassThrough
+##BAYER 88 72 120.0 BAYER 88 72 120.0 JeVois PassThrough
+#
+##RGB565 1280 1024 15.0 RGB565 1280 1024 15.0 JeVois PassThrough
+##RGB565 640 480 30.0 RGB565 640 480 30.0 JeVois PassThrough
+##RGB565 320 240 60.0 RGB565 320 240 60.0 JeVois PassThrough
+##RGB565 176 144 120.0 RGB565 176 144 120.0 JeVois PassThrough
+##RGB565 160 120 60.0 RGB565 160 120 60.0 JeVois PassThrough
+##RGB565 88 72 120.0 RGB565 88 72 120.0 JeVois PassThrough
+#
+##YUYV 1280 1024 7.5 YUYV 1280 1024 7.5 JeVois PassThrough
+## Bahar and Michael uncommented the following line.  2/27/2019
+##YUYV 640 480 30.0 YUYV 640 480 30.0 JeVois PassThrough
+# BAYER looked good.
+#MJPG 640 480 30.0 BAYER 640 480 30.0 JeVois Convert *
+MJPG 640 480 30.0 RGB565 640 480 30.0 JeVois Convert *
+##YUYV 640 480 30.0 YUYV 640 480 30.0 JeVois SaveVideo
+##YUYV 640 480 19.6 YUYV 640 480 19.6 JeVois PassThrough
+##YUYV 640 480 12.0 YUYV 640 480 12.0 JeVois PassThrough
+##YUYV 640 480 8.3 YUYV 640 480 8.3 JeVois PassThrough
+##YUYV 640 480 7.5 YUYV 640 480 7.5 JeVois PassThrough
+##YUYV 640 480 5.5 YUYV 640 480 5.5 JeVois PassThrough
+#
+#YUYV 320 240 60.0 YUYV 320 240 60.0 JeVois SaveVideo
+##YUYV 320 240 30.0 YUYV 320 240 30.0 JeVois SaveVideo
+##YUYV 320 240 15.0 YUYV 320 240 15.0 JeVois SaveVideo
+#
+##YUYV 160 120 60.0 YUYV 160 120 60.0 JeVois SaveVideo
+##YUYV 160 120 30.0 YUYV 160 120 30.0 JeVois PassThrough
+#
+##YUYV 352 288 60.0 YUYV 352 288 60.0 JeVois SaveVideo
+##YUYV 352 288 30.0 YUYV 352 288 30.0 JeVois PassThrough
+#
+#YUYV 176 144 120.0 YUYV 176 144 120.0 JeVois SaveVideo
+##YUYV 176 144 60.0 YUYV 176 144 60.0 JeVois PassThrough
+##YUYV 176 144 30.0 YUYV 176 144 30.0 JeVois PassThrough
+#
+##YUYV 88 72 120.0 YUYV 88 72 120.0 JeVois SaveVideo
+##YUYV 88 72 60.0 YUYV 88 72 60.0 JeVois PassThrough
+##YUYV 88 72 30.0 YUYV 88 72 30.0 JeVois PassThrough
+#
+#####################################################################################################
+#### Save video to disk, no preview over USB
+#####################################################################################################
+#
+#NONE 0 0 0 YUYV 320 240 60.0 JeVois SaveVideo
+#NONE 0 0 0 YUYV 320 240 30.0 JeVois SaveVideo
+#NONE 0 0 0 YUYV 176 144 120.0 JeVois SaveVideo
+#
+#####################################################################################################
+#### Demo: Saliency + gist + face detection + object recognition
+#####################################################################################################
+#
+#YUYV 640 312 50.0 YUYV 320 240 50.0 JeVois DemoSalGistFaceObj
+#
+#####################################################################################################
+#### Demo: JeVois intro movie, then Saliency + gist + face detection + object recognition
+#####################################################################################################
+#
+#YUYV 640 360 30.0 YUYV 320 240 30.0 JeVois JeVoisIntro
+#YUYV 640 480 30.0 YUYV 320 240 30.0 JeVois JeVoisIntro
+#
+#####################################################################################################
+#### Demo: Saliency and gist
+#####################################################################################################
+#
+##YUYV 176 90 120.0 YUYV 88 72 120.0 JeVois DemoSaliency
+##YUYV 320 150 60.0 YUYV 160 120 60.0 JeVois DemoSaliency
+##YUYV 352 180 120.0 YUYV 176 144 120.0 JeVois DemoSaliency
+##YUYV 352 180 100.0 YUYV 176 144 100.0 JeVois DemoSaliency
+## Michael and Bahar removed the ' *' from the end of the following line. 2/27/2019
+#YUYV 640 300 60.0 YUYV 320 240 60.0 JeVois DemoSaliency
+##YUYV 704 360 30.0 YUYV 352 288 30.0 JeVois DemoSaliency
+##YUYV 1280 600 15.0 YUYV 640 480 15.0 JeVois DemoSaliency
+#
+#####################################################################################################
+#### Production: Saliency and gist
+#####################################################################################################
+#
+## saliency + feature maps + gist
+##GREY 120 25 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## saliency + feature maps
+##GREY 120 15 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## saliency + gist
+##GREY 20 73 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## saliency only
+##GREY 20 15 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+## gist only
+##GREY 72 16 60.0 YUYV 320 240 60.0 JeVois SaliencyGist
+#
+#####################################################################################################
+#### Demo: Background subtraction
+#####################################################################################################
+#
+##YUYV 640 240 15.0 YUYV 320 240 15.0 JeVois DemoBackgroundSubtract
+#YUYV 320 120 30.0 YUYV 160 120 30.0 JeVois DemoBackgroundSubtract
+#
+#####################################################################################################
+#### Demo: QR-code and barcode detection and decoding
+#####################################################################################################
+#
+##YUYV 640 526 15.0 YUYV 640 480 15.0 JeVois DemoQRcode
+#YUYV 320 286 30.0 YUYV 320 240 30.0 JeVois DemoQRcode
+##NONE 0 0 0 YUYV 640 480 15.0 JeVois DemoQRcode
+##NONE 0 0 0 YUYV 320 240 30.0 JeVois DemoQRcode
+#
+#####################################################################################################
+#### Road following using vanishing point
+#####################################################################################################
+#
+#NONE 0 0 0 YUYV 320 240 30.0 JeVois RoadNavigation
+##NONE 0 0 0 YUYV 176 144 120.0 JeVois RoadNavigation
+#YUYV 320 256 30.0 YUYV 320 240 30.0 JeVois RoadNavigation
+##YUYV 176 160 120.0 YUYV 176 144 120.0 JeVois RoadNavigation
+#
+#####################################################################################################
+#### Demo of ARM-Neon SIMD image processing
+#####################################################################################################
+#
+#YUYV 960 240 30.0 YUYV 320 240 30.0 JeVois DemoNeon
+#
+#####################################################################################################
+#### Dense SIFT using VLfeat library
+#####################################################################################################
+#
+## very slow, min keypoint step is 17
+##YUYV 448 240 5.0 YUYV 320 240 5.0 JeVois DenseSift
+#
+## slow too, min keypoint step is 11
+##YUYV 288 120 5.0 YUYV 160 120 5.0 JeVois DenseSift
+#
+## raw keypoints only, assuming step=11, binsize=8
+#GREY 128 117 5.0 YUYV 160 120 5.0 JeVois DenseSift
+#
+#####################################################################################################
+#### Salient regions
+#####################################################################################################
+#
+#YUYV 64 192 25.0 YUYV 320 240 25.0 JeVois SalientRegions
+##YUYV 100 400 10.0 YUYV 640 480 10.0 JeVois SalientRegions
+#
+#####################################################################################################
+#### Superpixel image segmentation/clustering
+#####################################################################################################
+#
+#GREY 320 240 30.0 YUYV 320 240 30.0 JeVois SuperPixelSeg
+#
+#####################################################################################################
+#### Eye tracking using the openEyes toolkit
+#####################################################################################################
+#
+##GREY 640 480 30.0 YUYV 640 480 30.0 JeVois DemoEyeTracker
+##GREY 320 240 60.0 YUYV 320 240 60.0 JeVois DemoEyeTracker
+#GREY 176 144 120.0 YUYV 176 144 120.0 JeVois DemoEyeTracker
+#
+#####################################################################################################
+#### Demo: ArUco augmented-reality markers detection and decoding
+#####################################################################################################
+#
+#NONE 0 0 0.0 YUYV 320 240 30.0 JeVois DemoArUco
+#YUYV 320 260 30.0 YUYV 320 240 30.0 JeVois DemoArUco
+#YUYV 640 500 20.0 YUYV 640 480 20.0 JeVois DemoArUco
+#
+#####################################################################################################
+#### Edge detection using Canny
+#####################################################################################################
+#
+#GREY 640 480 29.0 YUYV 640 480 29.0 JeVois EdgeDetection
+#GREY 320 240 59.0 YUYV 320 240 59.0 JeVois EdgeDetection
+##GREY 176 144 119.0 YUYV 176 144 119.0 JeVois EdgeDetection
+#
+#####################################################################################################
+#### Edge detection using 4 Canny filters in parallel, with different settings
+#####################################################################################################
+#
+#GREY 320 960 45.0 YUYV 320 240 45.0 JeVois EdgeDetectionX4
+#
+#####################################################################################################
+#### Color-based object tracker
+#####################################################################################################
+#
+#NONE 0 0 0.0 YUYV 320 240 60.0 JeVois ObjectTracker
+#YUYV 320 254 60.0 YUYV 320 240 60.0 JeVois ObjectTracker
+#
+#####################################################################################################
+#### GPU color image processing demo
+#####################################################################################################
+#
+##RGB565 320 240 22.0 YUYV 320 240 22.0 JeVois DemoGPU
+#
+#####################################################################################################
+#### Combo CPU multithreaded saliency/gist + 4x GPU grayscale image processing demo
+#####################################################################################################
+#
+#GREY 160 495 60.0 YUYV 160 120 60.0 JeVois DemoCPUGPU
+#
+#####################################################################################################
+#### Fast optical flow computation
+#####################################################################################################
+#
+#GREY 176 288 100 YUYV 176 144 100 JeVois OpticalFlow
+#
+#####################################################################################################
+#### Object detection using SURF keypoints
+#####################################################################################################
+#
+#YUYV 320 252 30.0 YUYV 320 240 30.0 JeVois ObjectDetect
+#
+#####################################################################################################
+#### Salient region detection and identification using SURF keypoints
+#####################################################################################################
+#
+#YUYV 320 288 30.0 YUYV 320 240 30.0 JeVois SaliencySURF
+#
+#####################################################################################################
+#### CPU + GPU + NEON burn test
+#####################################################################################################
+#
+##YUYV 640 300 10.0 YUYV 320 240 10.0 JeVois BurnTest
+#
+#####################################################################################################
+#### Python tests
+#####################################################################################################
+#
+#YUYV 640 480 15.0 YUYV 640 480 15.0 JeVois PythonTest
+#GREY 640 480 20.0 YUYV 640 480 20.0 JeVois PythonOpenCV
+#YUYV 352 288 30.0 YUYV 352 288 30.0 JeVois PythonSandbox
+#
+#####################################################################################################
+#### Image color filtering
+#####################################################################################################
+#
+#YUYV 640 240 30.0 YUYV 320 240 30.0 JeVois ColorFiltering
+#
+#####################################################################################################
+#### Dice counting tutorial
+#####################################################################################################
+#
+#YUYV 640 480 7.5 YUYV 640 480 7.5 JeVois DiceCounter
+#
+#####################################################################################################
+#### Augmented reality markers with ARtoolkit
+#####################################################################################################
+#
+#NONE 0 0 0 YUYV 320 240 60.0 JeVois DemoARtoolkit
+#NONE 0 0 0 YUYV 640 480 30.0 JeVois DemoARtoolkit
+#NONE 0 0 0 YUYV 1280 1024 15.0 JeVois DemoARtoolkit
+#YUYV 320 258 60.0 YUYV 320 240 60.0 JeVois DemoARtoolkit
+##YUYV 640 498 30.0 YUYV 640 480 30.0 JeVois DemoARtoolkit
+#
+#####################################################################################################
+#### Augmented reality markers with ARtoolkit, ArUco, and QR-Code
+#####################################################################################################
+#
+##YUYV 320 306 50.0 YUYV 320 240 50.0 JeVois MarkersCombo
+#YUYV 640 546 20.0 YUYV 640 480 20.0 JeVois MarkersCombo
+#
+#####################################################################################################
+#### Detect objects in scenes using darknet YOLO deep neural network
+#####################################################################################################
+#
+#YUYV 1280 480 15.0 YUYV 640 480 15.0 JeVois DarknetYOLO
+#
+#####################################################################################################
+#### Detect objects in scenes using darknet deep neural network
+#####################################################################################################
+#
+#YUYV 544 240 15.0 YUYV 320 240 15.0 JeVois DarknetSingle
+##YUYV 448 240 15.0 YUYV 320 240 15.0 JeVois DarknetSingle
+##YUYV 864 480 15.0 YUYV 640 480 15.0 JeVois DarknetSingle
+##YUYV 1088 480 15.0 YUYV 640 480 15.0 JeVois DarknetSingle
+#
+#####################################################################################################
+#### Detect salient objects in scenes using saliency + darknet deep neural network
+#####################################################################################################
+#
+##YUYV 460 240 15.0 YUYV 320 240 15.0 JeVois DarknetSaliency # not for mac (width not multiple of 16)
+##YUYV 560 240 15.0 YUYV 320 240 15.0 JeVois DarknetSaliency
+#YUYV 880 480 10.0 YUYV 640 480 10.0 JeVois DarknetSaliency
+#
+#####################################################################################################
+#### FIRST robotics object detection example in C++
+#####################################################################################################
+#
+##YUYV 352 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
+##YUYV 176 194 120.0 YUYV 176 144 120.0 JeVois FirstVision
+#YUYV 640 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
+#YUYV 320 290 60.0 YUYV 320 240 60.0 JeVois FirstVision
+#NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstVision
+#NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstVision
+#
+#####################################################################################################
+#### FIRST robotics object detection example in Python
+#####################################################################################################
+#
+#YUYV 640 252 60.0 YUYV 320 240 60.0 JeVois FirstPython
+##YUYV 320 252 60.0 YUYV 320 240 60.0 JeVois FirstPython
+#NONE 0 0 0.0 YUYV 320 240 60.0 JeVois FirstPython
+#NONE 0 0 0.0 YUYV 176 144 120.0 JeVois FirstPython
+#
+#####################################################################################################
+#### Object detection using SURF and 6D pose estimation
+#####################################################################################################
+#
+#YUYV 320 262 15.0 YUYV 320 240 15.0 JeVois PythonObject6D
+##YUYV 640 502 10.0 YUYV 640 480 10.0 JeVois PythonObject6D
+#NONE 0 0 0.0 YUYV 320 240 15.0 JeVois PythonObject6D
+#
+#####################################################################################################
+#### Edge detection using 4 Canny filters in parallel, with different settings, example python parallel
+#####################################################################################################
+#
+## Disabled by default because Python multiprocessing is very buggy. Note that enabling this mapping may also
+## render your JeVois camera undetectable by OSX hosts.
+#
+##YUYV 1280 240 30.0 YUYV 320 240 30.0 JeVois PythonParallel
+#
+#####################################################################################################
+#### Detect objects in scenes using tensorflow deep neural network
+#####################################################################################################
+#
+##YUYV 560 240 15.0 YUYV 320 240 15.0 JeVois TensorFlowSingle
+#YUYV 464 240 15.0 YUYV 320 240 15.0 JeVois TensorFlowSingle
+##YUYV 880 480 15.0 YUYV 640 480 15.0 JeVois TensorFlowSingle
+##YUYV 1104 480 15.0 YUYV 640 480 15.0 JeVois TensorFlowSingle
+#
+#####################################################################################################
+#### Detect salient objects in scenes using saliency + tensorflow deep neural network
+#####################################################################################################
+#
+##YUYV 448 240 30.0 YUYV 320 240 30.0 JeVois TensorFlowSaliency
+#YUYV 512 240 30.0 YUYV 320 240 30.0 JeVois TensorFlowSaliency
+##YUYV 544 240 30.0 YUYV 320 240 30.0 JeVois TensorFlowSaliency
+#
+#####################################################################################################
+#### Detect objects in scenes using tensorflow deep neural network, easy version
+#####################################################################################################
+#
+#YUYV 320 308 30.0 YUYV 320 240 30.0 JeVois TensorFlowEasy
+##YUYV 640 548 30.0 YUYV 640 480 30.0 JeVois TensorFlowEasy
+#YUYV 1280 1092 7.0 YUYV 1280 1024 7.0 JeVois TensorFlowEasy
+#
+#####################################################################################################
+#### ArUco augmented-reality markers detection and decoding + color blob detection
+#####################################################################################################
+#
+#YUYV 320 266 30.0 YUYV 320 240 30.0 JeVois ArUcoBlob
+#
+#####################################################################################################
+#### Detect and identify objects in scenes using OpenCV DNN detection framework
+#####################################################################################################
+#
+#YUYV 640 502 20.0 YUYV 640 480 20.0 JeVois PyDetectionDNN
+#YUYV 640 498 15.0 YUYV 640 480 15.0 JeVois DetectionDNN
+#
+#####################################################################################################
+#### Simple demo of the ICM-20948 IMU attached to the AR0135 global shutter sensor
+#####################################################################################################
+#
+#YUYV 640 512 40.0 YUYV 640 480 40.0 JeVois DemoIMU
+#
+#####################################################################################################
+#### Object classification using OpenCV DNN in Python
+#####################################################################################################
+#
+#YUYV 320 264 30.0 YUYV 320 240 30.0 JeVois PyClassificationDNN
+#
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+## Modules provided by jevoisextra
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+#####################################################################################################
+#
+#YUYV 320 264 15.0 YUYV 320 240 15.0 JeVois FaceDetector
+#
diff --git a/y2019/image_streamer/vision.conf b/y2019/image_streamer/vision.conf
new file mode 100644
index 0000000..df789dd
--- /dev/null
+++ b/y2019/image_streamer/vision.conf
@@ -0,0 +1,5 @@
+[program:vision]
+command=/root/image_streamer --single_camera=true --camera0_exposure=600 --roborio_ip=10.9.71.2
+redirect_stderr=false
+autostart=true
+autorestart=true
diff --git a/y2019/jevois/teensy.cc b/y2019/jevois/teensy.cc
index 2998f01..570892f 100644
--- a/y2019/jevois/teensy.cc
+++ b/y2019/jevois/teensy.cc
@@ -935,38 +935,38 @@
   PORTD_PCR6 = PORT_PCR_MUX(1);
 
   // These go to CAM1.
-  // UART0_RX (peripheral) is UART1_RX (schematic).
+  // UART0_RX (peripheral) is UART1_RX (schematic) is UART1_TX_RAW (label TX).
   PORTA_PCR15 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                 0 /* !PS to pull down */;
-  // UART0_TX (peripheral) is UART1_TX (schematic).
+  // UART0_TX (peripheral) is UART1_TX (schematic) is UART1_RX_RAW (label RX).
   PORTA_PCR14 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM0.
-  // UART1_RX (peripheral) is UART0_RX (schematic).
+  // UART1_RX (peripheral) is UART0_RX (schematic) is UART0_TX_RAW (label TX).
   PORTC_PCR3 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                0 /* !PS to pull down */;
-  // UART1_TX (peripheral) is UART0_TX (schematic).
+  // UART1_TX (peripheral) is UART0_TX (schematic) is UART0_RX_RAW (label RX).
   PORTC_PCR4 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM2.
-  // UART2_RX
+  // UART2_RX is UART2_TX_RAW (label TX).
   PORTD_PCR2 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                0 /* !PS to pull down */;
-  // UART2_TX
+  // UART2_TX is UART2_RX_RAW (label RX).
   PORTD_PCR3 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM3.
-  // UART3_RX
+  // UART3_RX is UART3_TX_RAW (label TX).
   PORTB_PCR10 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                 0 /* !PS to pull down */;
-  // UART3_TX
+  // UART3_TX is UART3_RX_RAW (label RX).
   PORTB_PCR11 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   // These go to CAM4.
-  // UART4_RX
+  // UART4_RX is UART4_TX_RAW (label TX).
   PORTE_PCR25 = PORT_PCR_DSE | PORT_PCR_MUX(3) | PORT_PCR_PE /* Do a pull */ |
                 0 /* !PS to pull down */;
-  // UART4_TX
+  // UART4_TX is UART4_RX_RAW (label RX).
   PORTE_PCR24 = PORT_PCR_DSE | PORT_PCR_MUX(3);
 
   Uarts uarts;
diff --git a/y2019/joystick_reader.cc b/y2019/joystick_reader.cc
index 26a8ce1..c4a1caf 100644
--- a/y2019/joystick_reader.cc
+++ b/y2019/joystick_reader.cc
@@ -2,6 +2,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <chrono>
 
 #include "aos/actions/actions.h"
 #include "aos/init.h"
@@ -11,28 +12,38 @@
 #include "aos/input/joystick_input.h"
 #include "aos/logging/logging.h"
 #include "aos/logging/logging.h"
+#include "aos/network/team_number.h"
 #include "aos/util/log_interval.h"
+#include "aos/vision/events/udp.h"
+#include "external/com_google_protobuf/src/google/protobuf/stubs/stringprintf.h"
 #include "frc971/autonomous/auto.q.h"
 #include "frc971/autonomous/base_autonomous_actor.h"
 #include "frc971/control_loops/drivetrain/drivetrain.q.h"
+#include "frc971/control_loops/drivetrain/localizer.q.h"
 
 #include "y2019/control_loops/drivetrain/drivetrain_base.h"
 #include "y2019/control_loops/superstructure/superstructure.q.h"
 #include "y2019/status_light.q.h"
+#include "y2019/vision.pb.h"
 
 using ::y2019::control_loops::superstructure::superstructure_queue;
+using ::frc971::control_loops::drivetrain::localizer_control;
 using ::aos::input::driver_station::ButtonLocation;
 using ::aos::input::driver_station::ControlBit;
 using ::aos::input::driver_station::JoystickAxis;
 using ::aos::input::driver_station::POVLocation;
+using ::aos::events::ProtoTXUdpSocket;
 
 namespace y2019 {
 namespace input {
 namespace joysticks {
 
+using google::protobuf::StringPrintf;
+
 const ButtonLocation kSuctionBall(3, 13);
 const ButtonLocation kSuctionHatch(3, 12);
 const ButtonLocation kDeployStilt(3, 8);
+const ButtonLocation kHalfStilt(3, 6);
 const ButtonLocation kFallOver(3, 9);
 
 struct ElevatorWristPosition {
@@ -63,6 +74,13 @@
 const ButtonLocation kPanelHPIntakeBackward(5, 5);
 
 const ButtonLocation kRelease(2, 4);
+const ButtonLocation kResetLocalizer(4, 3);
+
+const ButtonLocation kAutoPanel(3, 10);
+const ButtonLocation kAutoPanelIntermediate(4, 6);
+
+const ElevatorWristPosition kAutoPanelPos{0.0, -M_PI / 2.0};
+const ElevatorWristPosition kAutoPanelIntermediatePos{0.34, -M_PI / 2.0};
 
 const ElevatorWristPosition kStowPos{0.36, 0.0};
 
@@ -104,10 +122,13 @@
       : ::aos::input::ActionJoystickInput(
             event_loop,
             ::y2019::control_loops::drivetrain::GetDrivetrainConfig()) {
+    const uint16_t team = ::aos::network::GetTeamNumber();
     superstructure_queue.goal.FetchLatest();
     if (superstructure_queue.goal.get()) {
       grab_piece_ = superstructure_queue.goal->suction.grab_piece;
     }
+    video_tx_.reset(new ProtoTXUdpSocket<VisionControl>(
+        StringPrintf("10.%d.%d.179", team / 100, team % 100), 5000));
   }
 
   void HandleTeleop(const ::aos::input::driver_station::Data &data) {
@@ -121,6 +142,16 @@
 
     auto new_superstructure_goal = superstructure_queue.goal.MakeMessage();
 
+    if (data.PosEdge(kResetLocalizer)) {
+      auto localizer_resetter = localizer_control.MakeMessage();
+      localizer_resetter->x = 0.4;
+      localizer_resetter->y = 3.4;
+      localizer_resetter->theta = 0.0;
+      if (!localizer_resetter.Send()) {
+        LOG(ERROR, "Failed to reset localizer.\n");
+      }
+    }
+
     if (data.IsPressed(kSuctionBall)) {
       grab_piece_ = true;
     } else if (data.IsPressed(kSuctionHatch)) {
@@ -158,8 +189,12 @@
       } else {
         new_superstructure_goal->stilts.profile_params.max_acceleration = 2.0;
       }
+    } else if (data.IsPressed(kHalfStilt)) {
+      new_superstructure_goal->stilts.unsafe_goal = 0.345;
+      new_superstructure_goal->stilts.profile_params.max_velocity = 0.65;
+      new_superstructure_goal->stilts.profile_params.max_acceleration = 0.75;
     } else {
-      new_superstructure_goal->stilts.unsafe_goal = 0.01;
+      new_superstructure_goal->stilts.unsafe_goal = 0.005;
       new_superstructure_goal->stilts.profile_params.max_velocity = 0.25;
       new_superstructure_goal->stilts.profile_params.max_acceleration = 2.0;
     }
@@ -170,6 +205,12 @@
       stilts_was_above_ = false;
     }
 
+    if (data.IsPressed(kAutoPanel)) {
+      elevator_wrist_pos_ = kAutoPanelPos;
+    } else if (data.IsPressed(kAutoPanelIntermediate)) {
+      elevator_wrist_pos_ = kAutoPanelIntermediatePos;
+    }
+
     if (switch_ball_) {
       if (superstructure_queue.status->has_piece) {
         new_superstructure_goal->wrist.profile_params.max_acceleration = 20;
@@ -263,6 +304,8 @@
       new_superstructure_goal->suction.gamepiece_mode = 1;
     }
 
+    vision_control_.set_flip_image(elevator_wrist_pos_.wrist < 0);
+
     new_superstructure_goal->suction.grab_piece = grab_piece_;
 
     new_superstructure_goal->elevator.unsafe_goal =
@@ -273,6 +316,12 @@
     if (!new_superstructure_goal.Send()) {
       LOG(ERROR, "Sending superstructure goal failed.\n");
     }
+
+    auto time_now = ::aos::monotonic_clock::now();
+    if (time_now > last_vision_control_ + ::std::chrono::milliseconds(50)) {
+      video_tx_->Send(vision_control_);
+      last_vision_control_ = time_now;
+    }
   }
 
  private:
@@ -282,6 +331,11 @@
 
   bool switch_ball_ = false;
   bool stilts_was_above_ = false;
+
+  VisionControl vision_control_;
+  ::std::unique_ptr<ProtoTXUdpSocket<VisionControl>> video_tx_;
+  ::aos::monotonic_clock::time_point last_vision_control_ =
+      ::aos::monotonic_clock::time_point::min();
 };
 
 }  // namespace joysticks
diff --git a/y2019/vision.proto b/y2019/vision.proto
new file mode 100644
index 0000000..6a78805
--- /dev/null
+++ b/y2019/vision.proto
@@ -0,0 +1,13 @@
+syntax = "proto2";
+
+package y2019;
+
+message VisionControl {
+  optional bool high_video = 1;
+  optional bool flip_image = 2;
+}
+
+message VisionStatus {
+  optional uint32 high_frame_count = 1;
+  optional uint32 low_frame_count = 2;
+}
diff --git a/y2019/vision/BUILD b/y2019/vision/BUILD
index 90c7dff..5cfc52b 100644
--- a/y2019/vision/BUILD
+++ b/y2019/vision/BUILD
@@ -74,7 +74,6 @@
 gtk_dependent_cc_binary(
     name = "debug_viewer",
     srcs = ["debug_viewer.cc"],
-    copts = ["-Wno-unused-variable"],
     restricted_to = VISION_TARGETS,
     deps = [
         ":target_finder",
@@ -83,6 +82,7 @@
         "//aos/vision/blob:transpose",
         "//aos/vision/debug:debug_framework",
         "//aos/vision/math:vector",
+        "@com_github_gflags_gflags//:gflags",
     ],
 )
 
diff --git a/y2019/vision/constants.cc b/y2019/vision/constants.cc
index 0fa8eda..310faf6 100644
--- a/y2019/vision/constants.cc
+++ b/y2019/vision/constants.cc
@@ -7,12 +7,12 @@
 
 CameraCalibration camera_1 = {
     {
-        -0.874694 / 180.0 * M_PI, 338.619, 2.14651 / 180.0 * M_PI,
+        -0.873939 / 180.0 * M_PI, 338.976, 2.44587 / 180.0 * M_PI,
     },
     {
-        {{-5.44063 * kInchesToMeters, 2.83405 * kInchesToMeters,
-          33.0386 * kInchesToMeters}},
-        181.723 / 180.0 * M_PI,
+        {{-5.46283 * kInchesToMeters, 2.98951 * kInchesToMeters,
+          33.0848 * kInchesToMeters}},
+        181.951 / 180.0 * M_PI,
     },
     {
         1,
@@ -61,12 +61,12 @@
 
 CameraCalibration camera_6 = {
     {
-        -1.17595 / 180.0 * M_PI, 346.997, 0.987547 / 180.0 * M_PI,
+        -1.15844 / 180.0 * M_PI, 348.161, 1.16894 / 180.0 * M_PI,
     },
     {
-        {{4.88124 * kInchesToMeters, 2.15528 * kInchesToMeters,
-          33.1686 * kInchesToMeters}},
-        -12.0018 / 180.0 * M_PI,
+        {{4.73183 * kInchesToMeters, 2.0984 * kInchesToMeters,
+          33.2023 * kInchesToMeters}},
+        -11.8598 / 180.0 * M_PI,
     },
     {
         6,
@@ -79,12 +79,12 @@
 
 CameraCalibration camera_7 = {
     {
-        -2.30729 / 180.0 * M_PI, 339.894, 1.16684 / 180.0 * M_PI,
+        -2.24098 / 180.0 * M_PI, 339.231, 1.15487 / 180.0 * M_PI,
     },
     {
-        {{3.62399 * kInchesToMeters, 3.94792 * kInchesToMeters,
-          33.3196 * kInchesToMeters}},
-        18.5828 / 180.0 * M_PI,
+        {{3.50224 * kInchesToMeters, 3.95441 * kInchesToMeters,
+          33.3469 * kInchesToMeters}},
+        18.6782 / 180.0 * M_PI,
     },
     {
         7,
@@ -97,12 +97,12 @@
 
 CameraCalibration camera_8 = {
     {
-        37.0966 / 180.0 * M_PI, 339.997, 0.265968 / 180.0 * M_PI,
+        37.1859 / 180.0 * M_PI, 339.517, 0.0405714 / 180.0 * M_PI,
     },
     {
-        {{3.53674 * kInchesToMeters, 5.25891 * kInchesToMeters,
-          12.6869 * kInchesToMeters}},
-        92.4773 / 180.0 * M_PI,
+        {{3.57002 * kInchesToMeters, 5.26966 * kInchesToMeters,
+          12.6807 * kInchesToMeters}},
+        92.6787 / 180.0 * M_PI,
     },
     {
         8,
@@ -115,12 +115,12 @@
 
 CameraCalibration camera_9 = {
     {
-        35.3461 / 180.0 * M_PI, 337.599, 3.34351 / 180.0 * M_PI,
+        35.4154 / 180.0 * M_PI, 337.471, 3.30546 / 180.0 * M_PI,
     },
     {
-        {{4.24216 * kInchesToMeters, -2.97032 * kInchesToMeters,
-          11.323 * kInchesToMeters}},
-        -93.3026 / 180.0 * M_PI,
+        {{4.25679 * kInchesToMeters, -2.93066 * kInchesToMeters,
+          11.3228 * kInchesToMeters}},
+        -93.219 / 180.0 * M_PI,
     },
     {
         9,
@@ -133,12 +133,12 @@
 
 CameraCalibration camera_10 = {
     {
-        -0.165199 / 180.0 * M_PI, 340.666, 0.596842 / 180.0 * M_PI,
+        -0.190556 / 180.0 * M_PI, 345.022, 0.468494 / 180.0 * M_PI,
     },
     {
-        {{-5.23103 * kInchesToMeters, 2.96098 * kInchesToMeters,
-          33.2867 * kInchesToMeters}},
-        182.121 / 180.0 * M_PI,
+        {{-4.83005 * kInchesToMeters, 2.95565 * kInchesToMeters,
+          33.3624 * kInchesToMeters}},
+        182.204 / 180.0 * M_PI,
     },
     {
         10,
@@ -151,12 +151,12 @@
 
 CameraCalibration camera_14 = {
     {
-        -0.0729684 / 180.0 * M_PI, 343.569, 0.685893 / 180.0 * M_PI,
+        0.108434 / 180.0 * M_PI, 338.756, 0.606249 / 180.0 * M_PI,
     },
     {
-        {{5.53867 * kInchesToMeters, 2.08897 * kInchesToMeters,
-          33.1766 * kInchesToMeters}},
-        -12.4121 / 180.0 * M_PI,
+        {{5.90372 * kInchesToMeters, 2.08009 * kInchesToMeters,
+          33.1342 * kInchesToMeters}},
+        -12.4624 / 180.0 * M_PI,
     },
     {
         14,
@@ -169,12 +169,12 @@
 
 CameraCalibration camera_15 = {
     {
-        -0.715538 / 180.0 * M_PI, 336.509, 1.54169 / 180.0 * M_PI,
+        -0.855459 / 180.0 * M_PI, 348.799, 1.4559 / 180.0 * M_PI,
     },
     {
-        {{4.57935 * kInchesToMeters, 4.16624 * kInchesToMeters,
-          33.433 * kInchesToMeters}},
-        20.9856 / 180.0 * M_PI,
+        {{3.15291 * kInchesToMeters, 4.16556 * kInchesToMeters,
+          33.5924 * kInchesToMeters}},
+        20.3884 / 180.0 * M_PI,
     },
     {
         15,
@@ -205,12 +205,12 @@
 
 CameraCalibration camera_17 = {
     {
-        34.7631 / 180.0 * M_PI, 337.946, 2.48943 / 180.0 * M_PI,
+        34.8231 / 180.0 * M_PI, 338.051, 2.43035 / 180.0 * M_PI,
     },
     {
-        {{3.15808 * kInchesToMeters, -2.5734 * kInchesToMeters,
-          11.8507 * kInchesToMeters}},
-        -92.1953 / 180.0 * M_PI,
+        {{3.17222 * kInchesToMeters, -2.49752 * kInchesToMeters,
+          11.8333 * kInchesToMeters}},
+        -92.1018 / 180.0 * M_PI,
     },
     {
         17,
@@ -223,12 +223,12 @@
 
 CameraCalibration camera_18 = {
     {
-        33.9292 / 180.0 * M_PI, 338.44, -1.71889 / 180.0 * M_PI,
+        33.9761 / 180.0 * M_PI, 338.017, -2.32243 / 180.0 * M_PI,
     },
     {
-        {{3.82945 * kInchesToMeters, 5.51444 * kInchesToMeters,
-          12.3803 * kInchesToMeters}},
-        96.0112 / 180.0 * M_PI,
+        {{3.95182 * kInchesToMeters, 5.50479 * kInchesToMeters,
+          12.3506 * kInchesToMeters}},
+        96.4141 / 180.0 * M_PI,
     },
     {
         18,
diff --git a/y2019/vision/debug_viewer.cc b/y2019/vision/debug_viewer.cc
index 35f2af3..3c174b4 100644
--- a/y2019/vision/debug_viewer.cc
+++ b/y2019/vision/debug_viewer.cc
@@ -8,6 +8,7 @@
 #include "aos/vision/blob/transpose.h"
 #include "aos/vision/debug/debug_framework.h"
 #include "aos/vision/math/vector.h"
+#include "gflags/gflags.h"
 
 using aos::vision::ImageRange;
 using aos::vision::ImageFormat;
@@ -18,6 +19,8 @@
 using aos::vision::Segment;
 using aos::vision::PixelRef;
 
+DEFINE_int32(camera, 10, "The camera to use the intrinsics for");
+
 namespace y2019 {
 namespace vision {
 
@@ -53,15 +56,18 @@
 
 class FilterHarness : public aos::vision::FilterHarness {
  public:
+ FilterHarness() {
+   *(target_finder_.mutable_intrinsics()) = GetCamera(FLAGS_camera)->intrinsics;
+ }
   aos::vision::RangeImage Threshold(aos::vision::ImagePtr image) override {
-    return finder_.Threshold(image);
+    return target_finder_.Threshold(image);
   }
 
   void InstallViewer(aos::vision::BlobStreamViewer *viewer) override {
     viewer_ = viewer;
     viewer_->SetScale(2.0);
     overlays_.push_back(&overlay_);
-    overlays_.push_back(finder_.GetOverlay());
+    overlays_.push_back(target_finder_.GetOverlay());
     viewer_->view()->SetOverlays(&overlays_);
   }
 
@@ -87,25 +93,26 @@
     }
 
     // Remove bad blobs.
-    finder_.PreFilter(&imgs);
+    target_finder_.PreFilter(&imgs);
 
     // Find polygons from blobs.
-    std::vector<std::vector<Segment<2>>> raw_polys;
+    ::std::vector<Polygon> raw_polys;
     for (const RangeImage &blob : imgs) {
       // Convert blobs to contours in the corrected space.
-      ContourNode* contour = finder_.GetContour(blob);
+      ContourNode *contour = target_finder_.GetContour(blob);
       if (draw_contours_) {
         DrawContour(contour, {255, 0, 0});
       }
-      finder_.UnWarpContour(contour);
+      ::std::vector<::Eigen::Vector2f> unwarped_contour =
+          target_finder_.UnWarpContour(contour);
       if (draw_contours_) {
-        DrawContour(contour, {0, 0, 255});
+        DrawContour(unwarped_contour, {0, 0, 255});
       }
 
       // Process to polygons.
-      std::vector<Segment<2>> polygon =
-          finder_.FillPolygon(contour, draw_raw_poly_);
-      if (polygon.empty()) {
+      const Polygon polygon = target_finder_.FindPolygon(
+          ::std::move(unwarped_contour), draw_raw_poly_);
+      if (polygon.segments.empty()) {
         if (!draw_contours_) {
           DrawBlob(blob, {255, 0, 0});
         }
@@ -115,14 +122,17 @@
           DrawBlob(blob, {0, 0, 255});
         }
         if (draw_raw_poly_) {
-          std::vector<PixelRef> colors = GetNColors(polygon.size());
+          std::vector<PixelRef> colors = GetNColors(polygon.segments.size());
           std::vector<Vector<2>> corners;
-          for (size_t i = 0; i < 4; ++i) {
-            corners.push_back(polygon[i].Intersect(polygon[(i + 1) % 4]));
+          for (size_t i = 0; i < polygon.segments.size(); ++i) {
+            corners.push_back(polygon.segments[i].Intersect(
+                polygon.segments[(i + 1) % polygon.segments.size()]));
           }
 
-          for (size_t i = 0; i < 4; ++i) {
-            overlay_.AddLine(corners[i], corners[(i + 1) % 4], colors[i]);
+          for (size_t i = 0; i < polygon.segments.size(); ++i) {
+            overlay_.AddLine(corners[i],
+                             corners[(i + 1) % polygon.segments.size()],
+                             colors[i]);
           }
         }
       }
@@ -130,16 +140,17 @@
 
     // Calculate each component side of a possible target.
     std::vector<TargetComponent> target_component_list =
-        finder_.FillTargetComponentList(raw_polys);
+        target_finder_.FillTargetComponentList(raw_polys, draw_components_);
     if (draw_components_) {
-      for (const TargetComponent &comp : target_component_list) {
-        DrawComponent(comp, {0, 255, 255}, {0, 255, 255}, {255, 0, 0},
+      for (const TargetComponent &component : target_component_list) {
+        DrawComponent(component, {0, 255, 255}, {0, 255, 255}, {255, 0, 0},
                       {0, 0, 255});
+        overlay_.DrawCross(component.bottom_point, 4, {128, 0, 255});
       }
     }
 
     // Put the compenents together into targets.
-    std::vector<Target> target_list = finder_.FindTargetsFromComponents(
+    std::vector<Target> target_list = target_finder_.FindTargetsFromComponents(
         target_component_list, draw_raw_target_);
     if (draw_raw_target_) {
       for (const Target &target : target_list) {
@@ -150,12 +161,18 @@
     // Use the solver to generate an intermediate version of our results.
     std::vector<IntermediateResult> results;
     for (const Target &target : target_list) {
-      results.emplace_back(finder_.ProcessTargetToResult(target, draw_raw_IR_));
-      if (draw_raw_IR_) DrawResult(results.back(), {255, 128, 0});
+      results.emplace_back(
+          target_finder_.ProcessTargetToResult(target, draw_raw_IR_));
+      if (draw_raw_IR_) {
+        IntermediateResult updatable_result = results.back();
+        target_finder_.MaybePickAndUpdateResult(&updatable_result,
+                                                draw_raw_IR_);
+        DrawResult(updatable_result, {255, 128, 0});
+      }
     }
 
     // Check that our current results match possible solutions.
-    results = finder_.FilterResults(results, 0);
+    results = target_finder_.FilterResults(results, 0, draw_results_);
     if (draw_results_) {
       for (const IntermediateResult &res : results) {
         DrawTarget(res, {0, 255, 0});
@@ -215,6 +232,18 @@
     }
   }
 
+  void DrawContour(const ::std::vector<::Eigen::Vector2f> &contour,
+                   PixelRef color) {
+    if (viewer_) {
+      for (size_t i = 0; i < contour.size(); ++i) {
+        Vector<2> a(contour[i].x(), contour[i].y());
+        Vector<2> b(contour[(i + 1) % contour.size()].x(),
+                    contour[(i + 1) % contour.size()].y());
+        overlay_.AddLine(a, b, color);
+      }
+    }
+  }
+
   void DrawComponent(const TargetComponent &comp, PixelRef top_color,
                      PixelRef bot_color, PixelRef in_color,
                      PixelRef out_color) {
@@ -239,15 +268,15 @@
   }
 
   void DrawResult(const IntermediateResult &result, PixelRef color) {
-    Target target =
-        Project(finder_.GetTemplateTarget(), intrinsics(), result.extrinsics);
+    Target target = Project(target_finder_.GetTemplateTarget(), intrinsics(),
+                            result.extrinsics);
     DrawComponent(target.left, color, color, color, color);
     DrawComponent(target.right, color, color, color, color);
   }
 
   void DrawTarget(const IntermediateResult &result, PixelRef color) {
-    Target target =
-        Project(finder_.GetTemplateTarget(), intrinsics(), result.extrinsics);
+    Target target = Project(target_finder_.GetTemplateTarget(), intrinsics(),
+                            result.extrinsics);
     Segment<2> leftAx((target.left.top + target.left.inside) * 0.5,
                       (target.left.bottom + target.left.outside) * 0.5);
     leftAx.Set(leftAx.A() * 0.9 + leftAx.B() * 0.1,
@@ -278,11 +307,13 @@
     overlay_.AddLine(p3 + leftAx.B(), p3 + rightAx.B(), {0, 255, 0});
   }
 
-  const IntrinsicParams &intrinsics() const { return finder_.intrinsics(); }
+  const IntrinsicParams &intrinsics() const {
+    return target_finder_.intrinsics();
+  }
 
  private:
   // implementation of the filter pipeline.
-  TargetFinder finder_;
+  TargetFinder target_finder_;
   aos::vision::BlobStreamViewer *viewer_ = nullptr;
   aos::vision::PixelLinesOverlay overlay_;
   std::vector<aos::vision::OverlayBase *> overlays_;
@@ -301,6 +332,8 @@
 }  // namespace y2017
 
 int main(int argc, char **argv) {
+  ::gflags::ParseCommandLineFlags(&argc, &argv, true);
+
   y2019::vision::FilterHarness filter_harness;
   aos::vision::DebugFrameworkMain(argc, argv, &filter_harness,
                                   aos::vision::CameraParams());
diff --git a/y2019/vision/global_calibration.cc b/y2019/vision/global_calibration.cc
index d21e184..304f4cb 100644
--- a/y2019/vision/global_calibration.cc
+++ b/y2019/vision/global_calibration.cc
@@ -153,26 +153,27 @@
 
     const ::aos::vision::ImageFormat fmt{640, 480};
     ::aos::vision::BlobList imgs =
-        ::aos::vision::FindBlobs(aos::vision::DoThresholdYUYV(
+        ::aos::vision::FindBlobs(aos::vision::SlowYuyvYThreshold(
             fmt, frame.data.data(), TargetFinder::GetThresholdValue()));
     target_finder.PreFilter(&imgs);
 
     constexpr bool verbose = false;
-    ::std::vector<std::vector<Segment<2>>> raw_polys;
+    ::std::vector<Polygon> raw_polys;
     for (const RangeImage &blob : imgs) {
       // Convert blobs to contours in the corrected space.
       ContourNode *contour = target_finder.GetContour(blob);
-      target_finder.UnWarpContour(contour);
-      const ::std::vector<Segment<2>> polygon =
-          target_finder.FillPolygon(contour, verbose);
-      if (!polygon.empty()) {
+      ::std::vector<::Eigen::Vector2f> unwarped_contour =
+          target_finder.UnWarpContour(contour);
+      const Polygon polygon =
+          target_finder.FindPolygon(::std::move(unwarped_contour), verbose);
+      if (!polygon.segments.empty()) {
         raw_polys.push_back(polygon);
       }
     }
 
     // Calculate each component side of a possible target.
     const ::std::vector<TargetComponent> target_component_list =
-        target_finder.FillTargetComponentList(raw_polys);
+        target_finder.FillTargetComponentList(raw_polys, verbose);
 
     // Put the compenents together into targets.
     const ::std::vector<Target> target_list =
diff --git a/y2019/vision/target_finder.cc b/y2019/vision/target_finder.cc
index 45678eb..a56d82c 100644
--- a/y2019/vision/target_finder.cc
+++ b/y2019/vision/target_finder.cc
@@ -11,13 +11,14 @@
 
 aos::vision::RangeImage TargetFinder::Threshold(aos::vision::ImagePtr image) {
   const uint8_t threshold_value = GetThresholdValue();
-  return aos::vision::DoThreshold(image, [&](aos::vision::PixelRef &px) {
-    if (px.g > threshold_value && px.b > threshold_value &&
-        px.r > threshold_value) {
-      return true;
-    }
-    return false;
-  });
+  return aos::vision::ThresholdImageWithFunction(
+      image, [&](aos::vision::PixelRef px) {
+        if (px.g > threshold_value && px.b > threshold_value &&
+            px.r > threshold_value) {
+          return true;
+        }
+        return false;
+      });
 }
 
 // Filter blobs on size.
@@ -33,7 +34,7 @@
       imgs->end());
 }
 
-ContourNode* TargetFinder::GetContour(const RangeImage &blob) {
+ContourNode *TargetFinder::GetContour(const RangeImage &blob) {
   alloc_.reset();
   return RangeImgToContour(blob, &alloc_);
 }
@@ -41,6 +42,10 @@
 // TODO(ben): These values will be moved into the constants.h file.
 namespace {
 
+::Eigen::Vector2f AosVectorToEigenVector(Vector<2> in) {
+  return ::Eigen::Vector2f(in.x(), in.y());
+}
+
 constexpr double f_x = 481.4957;
 constexpr double c_x = 341.215;
 constexpr double f_y = 484.314;
@@ -59,155 +64,209 @@
 
 }
 
-Point UnWarpPoint(const Point &point, int iterations) {
+::Eigen::Vector2f UnWarpPoint(const Point point) {
   const double x0 = ((double)point.x - c_x) / f_x;
   const double y0 = ((double)point.y - c_y) / f_y;
   double x = x0;
   double y = y0;
   for (int i = 0; i < iterations; i++) {
     const double r_sqr = x * x + y * y;
-    const double coeff =
-        1.0 + r_sqr * (k_1 + k_2 * r_sqr * (1.0 + k_3 * r_sqr));
+    const double coeff = 1.0 + r_sqr * (k_1 + r_sqr * (k_2 + r_sqr * (k_3)));
     x = x0 / coeff;
     y = y0 / coeff;
   }
-  double nx = x * f_x_prime + c_x_prime;
-  double ny = y * f_y_prime + c_y_prime;
-  Point p = {static_cast<int>(nx), static_cast<int>(ny)};
-  return p;
+  const double nx = x * f_x_prime + c_x_prime;
+  const double ny = y * f_y_prime + c_y_prime;
+  return ::Eigen::Vector2f(nx, ny);
 }
 
-void TargetFinder::UnWarpContour(ContourNode *start) const {
+::std::vector<::Eigen::Vector2f> TargetFinder::UnWarpContour(
+    ContourNode *start) const {
+  ::std::vector<::Eigen::Vector2f> result;
   ContourNode *node = start;
   while (node->next != start) {
-    node->set_point(UnWarpPoint(node->pt, iterations));
+    result.push_back(UnWarpPoint(node->pt));
     node = node->next;
   }
-  node->set_point(UnWarpPoint(node->pt, iterations));
+  result.push_back(UnWarpPoint(node->pt));
+  return result;
 }
 
 // TODO: Try hierarchical merge for this.
 // Convert blobs into polygons.
-std::vector<aos::vision::Segment<2>> TargetFinder::FillPolygon(
-    ContourNode* start, bool verbose) {
+Polygon TargetFinder::FindPolygon(::std::vector<::Eigen::Vector2f> &&contour,
+                                  bool verbose) {
   if (verbose) printf("Process Polygon.\n");
 
-  struct Pt {
-    float x;
-    float y;
-  };
-  std::vector<Pt> points;
+  ::std::vector<::Eigen::Vector2f> slopes;
 
   // Collect all slopes from the contour.
-  Point previous_point = start->pt;
-  for (ContourNode *node = start; node->next != start;) {
-    node = node->next;
+  ::Eigen::Vector2f previous_point = contour[0];
+  for (size_t i = 0; i < contour.size(); ++i) {
+    ::Eigen::Vector2f next_point = contour[(i + 1) % contour.size()];
 
-    Point current_point = node->pt;
+    slopes.push_back(next_point - previous_point);
 
-    points.push_back({static_cast<float>(current_point.x - previous_point.x),
-                      static_cast<float>(current_point.y - previous_point.y)});
-
-    previous_point = current_point;
+    previous_point = next_point;
   }
 
-  const int num_points = points.size();
-  auto get_pt = [&points, num_points](int i) {
-    return points[(i + num_points * 2) % num_points];
+  const int num_points = slopes.size();
+  auto get_pt = [&slopes, num_points](int i) {
+    return slopes[(i + num_points * 2) % num_points];
   };
 
-  std::vector<Pt> filtered_points = points;
+  // Bigger objects should be more filtered.  Filter roughly proportional to the
+  // perimeter of the object.
+  const int range = slopes.size() / 50;
+  if (verbose) printf("Corner range: %d.\n", range);
+
+  ::std::vector<::Eigen::Vector2f> filtered_slopes = slopes;
   // Three box filter makith a guassian?
   // Run gaussian filter over the slopes 3 times.  That'll get us pretty close
   // to running a gausian over it.
   for (int k = 0; k < 3; ++k) {
-    const int window_size = 2;
-    for (size_t i = 0; i < points.size(); ++i) {
-      Pt a{0.0, 0.0};
+    const int window_size = ::std::max(2, range);
+    for (size_t i = 0; i < slopes.size(); ++i) {
+      ::Eigen::Vector2f a = ::Eigen::Vector2f::Zero();
       for (int j = -window_size; j <= window_size; ++j) {
-        Pt p = get_pt(j + i);
-        a.x += p.x;
-        a.y += p.y;
+        ::Eigen::Vector2f p = get_pt(j + i);
+        a += p;
       }
-      a.x /= (window_size * 2 + 1);
-      a.y /= (window_size * 2 + 1);
+      a /= (window_size * 2 + 1);
 
-      const float scale = 1.0 + (i / float(points.size() * 10));
-      a.x *= scale;
-      a.y *= scale;
-      filtered_points[i] = a;
+      filtered_slopes[i] = a;
     }
-    points = filtered_points;
+    slopes = filtered_slopes;
+  }
+  if (verbose) printf("Point count: %zu.\n", slopes.size());
+
+  ::std::vector<float> corner_metric(slopes.size(), 0.0);
+
+  for (size_t i = 0; i < slopes.size(); ++i) {
+    const ::Eigen::Vector2f a = get_pt(i - ::std::max(3, range));
+    const ::Eigen::Vector2f b = get_pt(i + ::std::max(3, range));
+    corner_metric[i] = (a - b).squaredNorm();
   }
 
-  // Heuristic which says if a particular slope is part of a corner.
-  auto is_corner = [&](size_t i) {
-    const Pt a = get_pt(i - 3);
-    const Pt b = get_pt(i + 3);
-    const double dx = (a.x - b.x);
-    const double dy = (a.y - b.y);
-    return dx * dx + dy * dy > 0.25;
-  };
-
-  bool prev_v = is_corner(-1);
+  // We want to find the Nth highest peaks.
+  // Clever algorithm: Find the highest point.  Then, walk forwards and
+  // backwards to find the next valley each direction which is over x% lower
+  // than the peak.
+  // We want to ignore those points in the future.  Set them to 0.
+  // Repeat until we've found the Nth highest peak.
 
   // Find all centers of corners.
-  // Because they round, multiple points may be a corner.
-  std::vector<size_t> edges;
-  size_t kBad = points.size() + 10;
-  size_t prev_up = kBad;
-  size_t wrapped_n = prev_up;
+  // Because they round, multiple slopes may be a corner.
+  ::std::vector<size_t> edges;
 
-  for (size_t i = 0; i < points.size(); ++i) {
-    bool v = is_corner(i);
-    if (prev_v && !v) {
-      if (prev_up == kBad) {
-        wrapped_n = i;
-      } else {
-        edges.push_back((prev_up + i - 1) / 2);
+  constexpr float peak_acceptance_ratio = 0.16;
+  constexpr float valley_ratio = 0.75;
+
+  float highest_peak_value = 0.0;
+
+  // Nth higest points.
+  while (edges.size() < 5) {
+    const ::std::vector<float>::iterator max_element =
+        ::std::max_element(corner_metric.begin(), corner_metric.end());
+    const size_t highest_index =
+        ::std::distance(corner_metric.begin(), max_element);
+    const float max_value = *max_element;
+    if (edges.size() == 0) {
+      highest_peak_value = max_value;
+    }
+    if (max_value < highest_peak_value * peak_acceptance_ratio &&
+        edges.size() == 4) {
+      if (verbose)
+        printf("Rejecting index: %zu, %f (%f %%)\n", highest_index, max_value,
+               max_value / highest_peak_value);
+      break;
+    }
+    const float valley_value = max_value * valley_ratio;
+
+    if (verbose)
+      printf("Highest index: %zu, %f (%f %%)\n", highest_index, max_value,
+             max_value / highest_peak_value);
+
+    bool foothill = false;
+    {
+      float min_value = max_value;
+      size_t fwd_index = (highest_index + 1) % corner_metric.size();
+      while (true) {
+        const float current_value = corner_metric[fwd_index];
+
+        if (current_value == -1.0) {
+          if (min_value >= valley_value) {
+            if (verbose) printf("Foothill\n");
+            foothill = true;
+          }
+          break;
+        }
+
+        min_value = ::std::min(current_value, min_value);
+
+        if (min_value < valley_value && current_value > min_value) {
+          break;
+        }
+        // Kill!!!
+        corner_metric[fwd_index] = -1.0;
+
+        fwd_index = (fwd_index + 1) % corner_metric.size();
       }
     }
-    if (v && !prev_v) {
-      prev_up = i;
+
+    {
+      float min_value = max_value;
+      size_t rev_index =
+          (highest_index - 1 + corner_metric.size()) % corner_metric.size();
+      while (true) {
+        const float current_value = corner_metric[rev_index];
+
+        if (current_value == -1.0) {
+          if (min_value >= valley_value) {
+            if (verbose) printf("Foothill\n");
+            foothill = true;
+          }
+          break;
+        }
+
+        min_value = ::std::min(current_value, min_value);
+
+        if (min_value < valley_value && current_value > min_value) {
+          break;
+        }
+        // Kill!!!
+        corner_metric[rev_index] = -1.0;
+
+        rev_index =
+            (rev_index - 1 + corner_metric.size()) % corner_metric.size();
+      }
     }
-    prev_v = v;
+
+    *max_element = -1.0;
+    if (!foothill) {
+      edges.push_back(highest_index);
+    }
   }
 
-  if (wrapped_n != kBad) {
-    edges.push_back(((prev_up + points.size() + wrapped_n - 1) / 2) % points.size());
-  }
+  ::std::sort(edges.begin(), edges.end());
 
   if (verbose) printf("Edge Count (%zu).\n", edges.size());
 
-  // Get all CountourNodes from the contour.
-  using aos::vision::PixelRef;
-  std::vector<ContourNode *> segments;
-  {
-    std::vector<ContourNode *> segments_all;
-
-    for (ContourNode *node = start; node->next != start;) {
-      node = node->next;
-      segments_all.push_back(node);
-    }
-    for (size_t i : edges) {
-      segments.push_back(segments_all[i]);
-    }
-  }
-  if (verbose) printf("Segment Count (%zu).\n", segments.size());
-
   // Run best-fits over each line segment.
-  std::vector<Segment<2>> seg_list;
-  if (segments.size() == 4) {
-    for (size_t i = 0; i < segments.size(); ++i) {
-      ContourNode *segment_end = segments[(i + 1) % segments.size()];
-      ContourNode *segment_start = segments[i];
+  Polygon polygon;
+  if (edges.size() >= 3) {
+    for (size_t i = 0; i < edges.size(); ++i) {
+      // Include the corners in both line fits.
+      const size_t segment_start_index = edges[i];
+      const size_t segment_end_index =
+          (edges[(i + 1) % edges.size()] + 1) % contour.size();
       float mx = 0.0;
       float my = 0.0;
       int n = 0;
-      for (ContourNode *node = segment_start; node != segment_end;
-           node = node->next) {
-        mx += node->pt.x;
-        my += node->pt.y;
+      for (size_t j = segment_start_index; j != segment_end_index;
+           (j = (j + 1) % contour.size())) {
+        mx += contour[j].x();
+        my += contour[j].y();
         ++n;
         // (x - [x] / N) ** 2 = [x * x] - 2 * [x] * [x] / N + [x] * [x] / N / N;
       }
@@ -217,10 +276,10 @@
       float xx = 0.0;
       float xy = 0.0;
       float yy = 0.0;
-      for (ContourNode *node = segment_start; node != segment_end;
-           node = node->next) {
-        const float x = node->pt.x - mx;
-        const float y = node->pt.y - my;
+      for (size_t j = segment_start_index; j != segment_end_index;
+           (j = (j + 1) % contour.size())) {
+        const float x = contour[j].x() - mx;
+        const float y = contour[j].y() - my;
         xx += x * x;
         xy += x * y;
         yy += y * y;
@@ -241,7 +300,7 @@
         x /= norm;
         y /= norm;
 
-        seg_list.push_back(
+        polygon.segments.push_back(
             Segment<2>(Vector<2>(mx, my), Vector<2>(mx + x, my + y)));
       }
 
@@ -257,23 +316,25 @@
       */
     }
   }
-  if (verbose) printf("Poly Count (%zu).\n", seg_list.size());
-  return seg_list;
+  if (verbose) printf("Poly Count (%zu).\n", polygon.segments.size());
+  polygon.contour = ::std::move(contour);
+  return polygon;
 }
 
 // Convert segments into target components (left or right)
-std::vector<TargetComponent> TargetFinder::FillTargetComponentList(
-    const std::vector<std::vector<Segment<2>>> &seg_list) {
-  std::vector<TargetComponent> list;
+::std::vector<TargetComponent> TargetFinder::FillTargetComponentList(
+    const ::std::vector<Polygon> &seg_list, bool verbose) {
+  ::std::vector<TargetComponent> list;
   TargetComponent new_target;
-  for (const std::vector<Segment<2>> &poly : seg_list) {
+  for (const Polygon &polygon : seg_list) {
     // Reject missized pollygons for now. Maybe rectify them here in the future;
-    if (poly.size() != 4) {
+    if (polygon.segments.size() != 4) {
       continue;
     }
-    std::vector<Vector<2>> corners;
+    ::std::vector<Vector<2>> corners;
     for (size_t i = 0; i < 4; ++i) {
-      Vector<2> corner = poly[i].Intersect(poly[(i + 1) % 4]);
+      Vector<2> corner =
+          polygon.segments[i].Intersect(polygon.segments[(i + 1) % 4]);
       if (::std::isnan(corner.x()) || ::std::isnan(corner.y())) {
         break;
       }
@@ -285,7 +346,7 @@
 
     // Select the closest two points. Short side of the rectangle.
     double min_dist = -1;
-    std::pair<size_t, size_t> closest;
+    ::std::pair<size_t, size_t> closest;
     for (size_t i = 0; i < 4; ++i) {
       size_t next = (i + 1) % 4;
       double nd = corners[i].SquaredDistanceTo(corners[next]);
@@ -371,8 +432,34 @@
       }
     }
 
+    // Take the vector which points from the bottom to the top of the target
+    // along the outside edge.
+    const ::Eigen::Vector2f outer_edge_vector =
+        AosVectorToEigenVector(new_target.top - new_target.outside);
+    // Now, dot each point in the perimeter along this vector.  The one with the
+    // smallest component will be the one closest to the bottom along this
+    // direction vector.
+    ::Eigen::Vector2f smallest_point = polygon.contour[0];
+    float smallest_value = outer_edge_vector.transpose() * smallest_point;
+    for (const ::Eigen::Vector2f point : polygon.contour) {
+      const float current_value = outer_edge_vector.transpose() * point;
+      if (current_value < smallest_value) {
+        smallest_value = current_value;
+        smallest_point = point;
+      }
+    }
+
+    // This piece of the target should be ready now.
+    new_target.bottom_point = smallest_point;
+    if (verbose) {
+      printf("Lowest point in the blob is (%f, %f)\n", smallest_point.x(),
+             smallest_point.y());
+    }
+
     // This piece of the target should be ready now.
     list.emplace_back(new_target);
+
+    if (verbose) printf("Happy with a target\n");
   }
 
   return list;
@@ -395,16 +482,6 @@
       Target new_target;
       const TargetComponent &b = component_list[j];
 
-      // Reject targets that are too far off vertically.
-      Vector<2> a_center = a.major_axis.Center();
-      if (a_center.y() > b.bottom.y() || a_center.y() < b.top.y()) {
-        continue;
-      }
-      Vector<2> b_center = b.major_axis.Center();
-      if (b_center.y() > a.bottom.y() || b_center.y() < a.top.y()) {
-        continue;
-      }
-
       if (a.is_right && !b.is_right) {
         if (a.top.x() > b.top.x()) {
           new_target.right = a;
@@ -417,6 +494,9 @@
           new_target.left = a;
           target_valid = true;
         }
+      } else if (verbose) {
+        printf("Found same side components: %s.\n",
+               a.is_right ? "right" : "left");
       }
       if (target_valid) {
         target_list.emplace_back(new_target);
@@ -427,12 +507,42 @@
   return target_list;
 }
 
+bool TargetFinder::MaybePickAndUpdateResult(IntermediateResult *result,
+                                            bool verbose) {
+  // Based on a linear regression between error and distance to target.
+  // Closer targets can have a higher error because they are bigger.
+  const double acceptable_error =
+      std::max(2 * (21 - 12 * result->extrinsics.z), 50.0);
+  if (result->solver_error < acceptable_error) {
+    if (verbose) {
+      printf("Using an 8 point solve: %f < %f \n", result->solver_error,
+             acceptable_error);
+    }
+    return true;
+  } else if (result->backup_solver_error < acceptable_error) {
+    if (verbose) {
+      printf("Using a 4 point solve: %f < %f \n", result->backup_solver_error,
+             acceptable_error);
+    }
+    IntermediateResult backup;
+    result->extrinsics = result->backup_extrinsics;
+    result->solver_error = result->backup_solver_error;
+    return true;
+  } else if (verbose) {
+    printf("Rejecting a target with errors: (%f, %f) > %f \n",
+           result->solver_error, result->backup_solver_error, acceptable_error);
+  }
+  return false;
+}
+
 std::vector<IntermediateResult> TargetFinder::FilterResults(
-    const std::vector<IntermediateResult> &results, uint64_t print_rate) {
+    const std::vector<IntermediateResult> &results, uint64_t print_rate,
+    bool verbose) {
   std::vector<IntermediateResult> filtered;
   for (const IntermediateResult &res : results) {
-    if (res.solver_error < 75.0) {
-      filtered.emplace_back(res);
+    IntermediateResult updatable_result = res;
+    if (MaybePickAndUpdateResult(&updatable_result, verbose)) {
+      filtered.emplace_back(updatable_result);
     }
   }
   frame_count_++;
diff --git a/y2019/vision/target_finder.h b/y2019/vision/target_finder.h
index 1ba79ba..c7de67a 100644
--- a/y2019/vision/target_finder.h
+++ b/y2019/vision/target_finder.h
@@ -18,6 +18,11 @@
 using aos::vision::Vector;
 using aos::vision::ContourNode;
 
+struct Polygon {
+  ::std::vector<aos::vision::Segment<2>> segments;
+  ::std::vector<::Eigen::Vector2f> contour;
+};
+
 class TargetFinder {
  public:
   TargetFinder();
@@ -30,16 +35,15 @@
   // filter out obvious or durranged blobs.
   void PreFilter(BlobList *imgs);
 
-  ContourNode* GetContour(const RangeImage &blob);
-  void UnWarpContour(ContourNode* start) const;
+  ContourNode *GetContour(const RangeImage &blob);
+  ::std::vector<::Eigen::Vector2f> UnWarpContour(ContourNode *start) const;
 
   // Turn a blob into a polgygon.
-  std::vector<aos::vision::Segment<2>> FillPolygon(ContourNode *start,
-                                                   bool verbose);
+  Polygon FindPolygon(::std::vector<::Eigen::Vector2f> &&contour, bool verbose);
 
   // Turn a bloblist into components of a target.
   std::vector<TargetComponent> FillTargetComponentList(
-      const std::vector<std::vector<aos::vision::Segment<2>>> &seg_list);
+      const ::std::vector<Polygon> &seg_list, bool verbose);
 
   // Piece the compenents together into a target.
   std::vector<Target> FindTargetsFromComponents(
@@ -48,8 +52,13 @@
   // Given a target solve for the transformation of the template target.
   IntermediateResult ProcessTargetToResult(const Target &target, bool verbose);
 
+  // Returns true if a target is good, and false otherwise.  Picks the 4 vs 8
+  // point solution depending on which one looks right.
+  bool MaybePickAndUpdateResult(IntermediateResult *result, bool verbose);
+
   std::vector<IntermediateResult> FilterResults(
-      const std::vector<IntermediateResult> &results, uint64_t print_rate);
+      const std::vector<IntermediateResult> &results, uint64_t print_rate,
+      bool verbose);
 
   // Get the local overlay for debug if we are doing that.
   aos::vision::PixelLinesOverlay *GetOverlay() { return &overlay_; }
diff --git a/y2019/vision/target_geometry.cc b/y2019/vision/target_geometry.cc
index d4b8a54..6df1a27 100644
--- a/y2019/vision/target_geometry.cc
+++ b/y2019/vision/target_geometry.cc
@@ -46,9 +46,11 @@
 }
 
 std::array<Vector<2>, 8> Target::ToPointList() const {
-  return std::array<Vector<2>, 8>{{right.top, right.inside, right.bottom,
-                                   right.outside, left.top, left.inside,
-                                   left.bottom, left.outside}};
+  // Note, the even points are fit with the line solver in the 4 point solution
+  // while the odds are fit with the point matcher.
+  return std::array<Vector<2>, 8>{{right.top, right.outside, right.inside,
+                                   right.bottom, left.top, left.outside,
+                                   left.inside, left.bottom}};
 }
 
 Vector<2> Project(Vector<2> pt, const IntrinsicParams &intrinsics,
@@ -141,8 +143,8 @@
 }
 
 // Used at runtime on a single image given camera parameters.
-struct RuntimeCostFunctor {
-  RuntimeCostFunctor(Vector<2> result, Vector<2> template_pt,
+struct PointCostFunctor {
+  PointCostFunctor(Vector<2> result, Vector<2> template_pt,
                      IntrinsicParams intrinsics)
       : result(result), template_pt(template_pt), intrinsics(intrinsics) {}
 
@@ -159,13 +161,81 @@
   IntrinsicParams intrinsics;
 };
 
+// Find the distance from a lower target point to the 'vertical' line it should
+// be on.
+struct LineCostFunctor {
+  LineCostFunctor(Vector<2> result, Segment<2> template_seg,
+                  IntrinsicParams intrinsics)
+      : result(result), template_seg(template_seg), intrinsics(intrinsics) {}
+
+  bool operator()(const double *const x, double *residual) const {
+    const auto extrinsics = ExtrinsicParams::get(x);
+    const Vector<2> p1 = Project(template_seg.A(), intrinsics, extrinsics);
+    const Vector<2> p2 = Project(template_seg.B(), intrinsics, extrinsics);
+    // distance from line (P1, P2) to point result
+    double dx = p2.x() - p1.x();
+    double dy = p2.y() - p1.y();
+    double denom = p2.DistanceTo(p1);
+    residual[0] = ::std::abs(dy * result.x() - dx * result.y() +
+                             p2.x() * p1.y() - p2.y() * p1.x()) /
+                  denom;
+    return true;
+  }
+
+  Vector<2> result;
+  Segment<2> template_seg;
+  IntrinsicParams intrinsics;
+};
+
+// Find the distance that the bottom point is outside the target and penalize
+// that linearly.
+class BottomPointCostFunctor {
+ public:
+  BottomPointCostFunctor(::Eigen::Vector2f bottom_point,
+                         Segment<2> template_seg, IntrinsicParams intrinsics)
+      : bottom_point_(bottom_point.x(), bottom_point.y()),
+        template_seg_(template_seg),
+        intrinsics_(intrinsics) {}
+
+  bool operator()(const double *const x, double *residual) const {
+    const ExtrinsicParams extrinsics = ExtrinsicParams::get(x);
+    const Vector<2> p1 = Project(template_seg_.A(), intrinsics_, extrinsics);
+    const Vector<2> p2 = Project(template_seg_.B(), intrinsics_, extrinsics);
+
+    // Construct a vector pointed perpendicular to the line.  This vector is
+    // pointed down out the bottom of the target.
+    ::Eigen::Vector2d down_axis(-(p1.y() - p2.y()), p1.x() - p2.x());
+    down_axis.normalize();
+
+    // Positive means out.
+    const double component =
+        down_axis.transpose() * (bottom_point_ - p1.GetData().transpose());
+
+    if (component > 0) {
+      residual[0] = component * 1.0;
+    } else {
+      residual[0] = 0.0;
+    }
+    return true;
+  }
+
+ private:
+  ::Eigen::Vector2d bottom_point_;
+  Segment<2> template_seg_;
+
+  IntrinsicParams intrinsics_;
+};
+
 IntermediateResult TargetFinder::ProcessTargetToResult(const Target &target,
                                                        bool verbose) {
   // Memory for the ceres solver.
-  double params[ExtrinsicParams::kNumParams];
-  default_extrinsics_.set(&params[0]);
+  double params_8point[ExtrinsicParams::kNumParams];
+  default_extrinsics_.set(&params_8point[0]);
+  double params_4point[ExtrinsicParams::kNumParams];
+  default_extrinsics_.set(&params_4point[0]);
 
-  Problem problem;
+  Problem problem_8point;
+  Problem problem_4point;
 
   ::std::array<aos::vision::Vector<2>, 8> target_value = target.ToPointList();
   ::std::array<aos::vision::Vector<2>, 8> template_value =
@@ -175,30 +245,87 @@
     aos::vision::Vector<2> a = template_value[i];
     aos::vision::Vector<2> b = target_value[i];
 
-    problem.AddResidualBlock(
-        new NumericDiffCostFunction<RuntimeCostFunctor, CENTRAL, 2, 4>(
-            new RuntimeCostFunctor(b, a, intrinsics_)),
-        NULL, &params[0]);
+    if (i % 2 == 1) {
+      aos::vision::Vector<2> a2 = template_value[i-1];
+      aos::vision::Segment<2> line = Segment<2>(a, a2);
+
+      problem_4point.AddResidualBlock(
+          new NumericDiffCostFunction<LineCostFunctor, CENTRAL, 1, 4>(
+              new LineCostFunctor(b, line, intrinsics_)),
+          NULL, &params_4point[0]);
+    } else {
+      problem_4point.AddResidualBlock(
+          new NumericDiffCostFunction<PointCostFunctor, CENTRAL, 2, 4>(
+              new PointCostFunctor(b, a, intrinsics_)),
+          NULL, &params_4point[0]);
+    }
+
+    problem_8point.AddResidualBlock(
+        new NumericDiffCostFunction<PointCostFunctor, CENTRAL, 2, 4>(
+            new PointCostFunctor(b, a, intrinsics_)),
+        NULL, &params_8point[0]);
   }
 
   Solver::Options options;
   options.minimizer_progress_to_stdout = false;
-  Solver::Summary summary;
-  Solve(options, &problem, &summary);
+  Solver::Summary summary_8point;
+  Solve(options, &problem_8point, &summary_8point);
+
+
+  // So, let's sneak up on it.  Start by warm-starting it with where we got on the 8 point solution.
+  ExtrinsicParams::get(&params_8point[0]).set(&params_4point[0]);
+  // Then solve without the bottom constraint.
+  Solver::Summary summary_4point1;
+  Solve(options, &problem_4point, &summary_4point1);
+
+  // Now, add a large cost for the bottom point being below the bottom line.
+  problem_4point.AddResidualBlock(
+      new NumericDiffCostFunction<BottomPointCostFunctor, CENTRAL, 1, 4>(
+          new BottomPointCostFunctor(target.left.bottom_point,
+                                     Segment<2>(target_template_.left.outside,
+                                                target_template_.left.bottom),
+                                     intrinsics_)),
+      NULL, &params_4point[0]);
+  // Make sure to point the segment the other way so when we do a -pi/2 rotation
+  // on the line, it points down in target space.
+  problem_4point.AddResidualBlock(
+      new NumericDiffCostFunction<BottomPointCostFunctor, CENTRAL, 1, 4>(
+          new BottomPointCostFunctor(target.right.bottom_point,
+                                     Segment<2>(target_template_.right.bottom,
+                                                target_template_.right.outside),
+                                     intrinsics_)),
+      NULL, &params_4point[0]);
+
+  // And then re-solve.
+  Solver::Summary summary_4point2;
+  Solve(options, &problem_4point, &summary_4point2);
 
   IntermediateResult IR;
-  IR.extrinsics = ExtrinsicParams::get(&params[0]);
-  IR.solver_error = summary.final_cost;
+  IR.extrinsics = ExtrinsicParams::get(&params_8point[0]);
+  IR.solver_error = summary_8point.final_cost;
+  IR.backup_extrinsics = ExtrinsicParams::get(&params_4point[0]);
+  IR.backup_solver_error = summary_4point2.final_cost;
 
   if (verbose) {
-    std::cout << summary.BriefReport() << "\n";
+    std::cout << "rup = " << intrinsics_.mount_angle * 180 / M_PI << ";\n";
+    std::cout << "fl = " << intrinsics_.focal_length << ";\n";
+    std::cout << "8 points:\n";
+    std::cout << summary_8point.BriefReport() << "\n";
+    std::cout << "error = " << summary_8point.final_cost << ";\n";
     std::cout << "y = " << IR.extrinsics.y / kInchesToMeters << ";\n";
     std::cout << "z = " << IR.extrinsics.z / kInchesToMeters << ";\n";
     std::cout << "r1 = " << IR.extrinsics.r1 * 180 / M_PI << ";\n";
     std::cout << "r2 = " << IR.extrinsics.r2 * 180 / M_PI << ";\n";
-    std::cout << "rup = " << intrinsics_.mount_angle * 180 / M_PI << ";\n";
-    std::cout << "fl = " << intrinsics_.focal_length << ";\n";
-    std::cout << "error = " << summary.final_cost << ";\n";
+    std::cout << "4 points:\n";
+    std::cout << summary_4point1.BriefReport() << "\n";
+    std::cout << "error = " << summary_4point1.final_cost << ";\n\n";
+    std::cout << "4 points:\n";
+    std::cout << summary_4point2.BriefReport() << "\n";
+    std::cout << "error = " << summary_4point2.final_cost << ";\n\n";
+    std::cout << "y = " << IR.backup_extrinsics.y / kInchesToMeters << ";\n";
+    std::cout << "z = " << IR.backup_extrinsics.z / kInchesToMeters << ";\n";
+    std::cout << "r1 = " << IR.backup_extrinsics.r1 * 180 / M_PI << ";\n";
+    std::cout << "r2 = " << IR.backup_extrinsics.r2 * 180 / M_PI << ";\n";
   }
   return IR;
 }
diff --git a/y2019/vision/target_sender.cc b/y2019/vision/target_sender.cc
index f322aeb..3a6fbfe 100644
--- a/y2019/vision/target_sender.cc
+++ b/y2019/vision/target_sender.cc
@@ -64,213 +64,6 @@
 using aos::vision::RangeImage;
 using aos::vision::ImageFormat;
 
-#define MASH(v0, v1, v2, v3, v4)                                  \
-  ((uint8_t(v0) << 4) | (uint8_t(v1) << 3) | (uint8_t(v2) << 2) | \
-   (uint8_t(v3) << 1) | (uint8_t(v4)))
-
-// YUYV image types:
-inline RangeImage DoThresholdYUYV(ImageFormat fmt, const char *data,
-                                  uint8_t value) {
-  std::vector<std::vector<ImageRange>> ranges;
-  ranges.reserve(fmt.h);
-  for (int y = 0; y < fmt.h; ++y) {
-    const char *row = fmt.w * y * 2 + data;
-    bool p_score = false;
-    int pstart = -1;
-    std::vector<ImageRange> rngs;
-    for (int x = 0; x < fmt.w / 4; ++x) {
-      uint8_t v[8];
-      memcpy(&v[0], row + x * 4 * 2, 8);
-      uint8_t pattern =
-          MASH(p_score, v[0] > value, v[2] > value, v[4] > value, v[6] > value);
-      switch (pattern) {
-        /*
-# Ruby code to generate the below code:
-32.times do |v|
-        puts "case MASH(#{[v[4], v[3], v[2], v[1], v[0]].join(", ")}):"
-        p_score = v[4]
-        pstart = "pstart"
-        4.times do |i|
-                if v[3 - i] != p_score
-                        if (p_score == 1)
-                                puts "  rngs.emplace_back(ImageRange(#{pstart},
-x * 4 + #{i}));"
-                        else
-                                pstart = "x * 4 + #{i}"
-                        end
-                        p_score = v[3 - i]
-                end
-        end
-        if (pstart != "pstart")
-                puts "  pstart = #{pstart};"
-        end
-        if (p_score != v[4])
-                puts "  p_score = #{["false", "true"][v[0]]};"
-        end
-        puts "  break;"
-end
-*/
-        case MASH(0, 0, 0, 0, 0):
-          break;
-        case MASH(0, 0, 0, 0, 1):
-          pstart = x * 4 + 3;
-          p_score = true;
-          break;
-        case MASH(0, 0, 0, 1, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
-          pstart = x * 4 + 2;
-          break;
-        case MASH(0, 0, 0, 1, 1):
-          pstart = x * 4 + 2;
-          p_score = true;
-          break;
-        case MASH(0, 0, 1, 0, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
-          pstart = x * 4 + 1;
-          break;
-        case MASH(0, 0, 1, 0, 1):
-          rngs.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
-          pstart = x * 4 + 3;
-          p_score = true;
-          break;
-        case MASH(0, 0, 1, 1, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 1, x * 4 + 3));
-          pstart = x * 4 + 1;
-          break;
-        case MASH(0, 0, 1, 1, 1):
-          pstart = x * 4 + 1;
-          p_score = true;
-          break;
-        case MASH(0, 1, 0, 0, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
-          pstart = x * 4 + 0;
-          break;
-        case MASH(0, 1, 0, 0, 1):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
-          pstart = x * 4 + 3;
-          p_score = true;
-          break;
-        case MASH(0, 1, 0, 1, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
-          rngs.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
-          pstart = x * 4 + 2;
-          break;
-        case MASH(0, 1, 0, 1, 1):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 1));
-          pstart = x * 4 + 2;
-          p_score = true;
-          break;
-        case MASH(0, 1, 1, 0, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 2));
-          pstart = x * 4 + 0;
-          break;
-        case MASH(0, 1, 1, 0, 1):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 2));
-          pstart = x * 4 + 3;
-          p_score = true;
-          break;
-        case MASH(0, 1, 1, 1, 0):
-          rngs.emplace_back(ImageRange(x * 4 + 0, x * 4 + 3));
-          pstart = x * 4 + 0;
-          break;
-        case MASH(0, 1, 1, 1, 1):
-          pstart = x * 4 + 0;
-          p_score = true;
-          break;
-        case MASH(1, 0, 0, 0, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          p_score = false;
-          break;
-        case MASH(1, 0, 0, 0, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          pstart = x * 4 + 3;
-          break;
-        case MASH(1, 0, 0, 1, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          rngs.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
-          pstart = x * 4 + 2;
-          p_score = false;
-          break;
-        case MASH(1, 0, 0, 1, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          pstart = x * 4 + 2;
-          break;
-        case MASH(1, 0, 1, 0, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          rngs.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
-          pstart = x * 4 + 1;
-          p_score = false;
-          break;
-        case MASH(1, 0, 1, 0, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          rngs.emplace_back(ImageRange(x * 4 + 1, x * 4 + 2));
-          pstart = x * 4 + 3;
-          break;
-        case MASH(1, 0, 1, 1, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          rngs.emplace_back(ImageRange(x * 4 + 1, x * 4 + 3));
-          pstart = x * 4 + 1;
-          p_score = false;
-          break;
-        case MASH(1, 0, 1, 1, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 0));
-          pstart = x * 4 + 1;
-          break;
-        case MASH(1, 1, 0, 0, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 1));
-          p_score = false;
-          break;
-        case MASH(1, 1, 0, 0, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 1));
-          pstart = x * 4 + 3;
-          break;
-        case MASH(1, 1, 0, 1, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 1));
-          rngs.emplace_back(ImageRange(x * 4 + 2, x * 4 + 3));
-          pstart = x * 4 + 2;
-          p_score = false;
-          break;
-        case MASH(1, 1, 0, 1, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 1));
-          pstart = x * 4 + 2;
-          break;
-        case MASH(1, 1, 1, 0, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 2));
-          p_score = false;
-          break;
-        case MASH(1, 1, 1, 0, 1):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 2));
-          pstart = x * 4 + 3;
-          break;
-        case MASH(1, 1, 1, 1, 0):
-          rngs.emplace_back(ImageRange(pstart, x * 4 + 3));
-          p_score = false;
-          break;
-        case MASH(1, 1, 1, 1, 1):
-          break;
-      }
-
-      for (int i = 0; i < 4; ++i) {
-        if ((v[i * 2] > value) != p_score) {
-          if (p_score) {
-            rngs.emplace_back(ImageRange(pstart, x * 4 + i));
-          } else {
-            pstart = x * 4 + i;
-          }
-          p_score = !p_score;
-        }
-      }
-    }
-    if (p_score) {
-      rngs.emplace_back(ImageRange(pstart, fmt.w));
-    }
-    ranges.push_back(rngs);
-  }
-  return RangeImage(0, std::move(ranges));
-}
-
-#undef MASH
-
 int main(int argc, char **argv) {
   (void)argc;
   (void)argv;
@@ -301,43 +94,44 @@
   camera0->set_on_frame([&](DataRef data,
                             monotonic_clock::time_point monotonic_now) {
     aos::vision::ImageFormat fmt{640, 480};
-    // Use threshold from aos::vision. This will run at 15 FPS.
-    aos::vision::BlobList imgs =
-        aos::vision::FindBlobs(aos::vision::DoThresholdYUYV(fmt, data.data(), 120));
+    aos::vision::BlobList imgs = aos::vision::FindBlobs(
+        aos::vision::SlowYuyvYThreshold(fmt, data.data(), 120));
     finder_.PreFilter(&imgs);
     LOG(INFO, "Blobs: (%zu).\n", imgs.size());
 
-    bool verbose = false;
-    std::vector<std::vector<Segment<2>>> raw_polys;
+    constexpr bool verbose = false;
+    ::std::vector<Polygon> raw_polys;
     for (const RangeImage &blob : imgs) {
       // Convert blobs to contours in the corrected space.
       ContourNode* contour = finder_.GetContour(blob);
-      finder_.UnWarpContour(contour);
-      std::vector<Segment<2>> polygon = finder_.FillPolygon(contour, verbose);
-      if (!polygon.empty()) {
+      ::std::vector<::Eigen::Vector2f> unwarped_contour =
+          finder_.UnWarpContour(contour);
+      const Polygon polygon =
+          finder_.FindPolygon(::std::move(unwarped_contour), verbose);
+      if (!polygon.segments.empty()) {
         raw_polys.push_back(polygon);
       }
     }
     LOG(INFO, "Polygons: (%zu).\n", raw_polys.size());
 
     // Calculate each component side of a possible target.
-    std::vector<TargetComponent> target_component_list =
-        finder_.FillTargetComponentList(raw_polys);
+    ::std::vector<TargetComponent> target_component_list =
+        finder_.FillTargetComponentList(raw_polys, verbose);
     LOG(INFO, "Components: (%zu).\n", target_component_list.size());
 
     // Put the compenents together into targets.
-    std::vector<Target> target_list =
+    ::std::vector<Target> target_list =
         finder_.FindTargetsFromComponents(target_component_list, verbose);
     LOG(INFO, "Potential Target: (%zu).\n", target_list.size());
 
     // Use the solver to generate an intermediate version of our results.
-    std::vector<IntermediateResult> results;
+    ::std::vector<IntermediateResult> results;
     for (const Target &target : target_list) {
       results.emplace_back(finder_.ProcessTargetToResult(target, verbose));
     }
     LOG(INFO, "Raw Results: (%zu).\n", results.size());
 
-    results = finder_.FilterResults(results, 30);
+    results = finder_.FilterResults(results, 30, verbose);
     LOG(INFO, "Results: (%zu).\n", results.size());
 
     // TODO: Select top 3 (randomly?)
diff --git a/y2019/vision/target_types.h b/y2019/vision/target_types.h
index 3174b90..8ee1f4c 100644
--- a/y2019/vision/target_types.h
+++ b/y2019/vision/target_types.h
@@ -25,12 +25,20 @@
     }
   }
   bool is_right;
-  aos::vision::Vector<2> top;
-  aos::vision::Vector<2> inside;
-  aos::vision::Vector<2> outside;
-  aos::vision::Vector<2> bottom;
+  // The point which is the upper outside point on this side of the target pair.
+  ::aos::vision::Vector<2> top;
+  // The point which is the upper inside point on this side of the target pair.
+  ::aos::vision::Vector<2> inside;
+  // The point which is the outer bottom point on this side of the target pair.
+  ::aos::vision::Vector<2> outside;
+  // The point which is the inner bottom point on this side of the target pair.
+  ::aos::vision::Vector<2> bottom;
 
   aos::vision::Segment<2> major_axis;
+
+  // The point with is the "lowest" along the outer edge.  This point is useful
+  // for making sure clipped targets are "big enough" to cover all the pixels.
+  ::Eigen::Vector2f bottom_point;
 };
 
 // Convert back to screen space for final result.
@@ -92,6 +100,11 @@
 
   // Error from solver calulations.
   double solver_error;
+
+  // extrinsics and error from a more relaxed problem.
+  ExtrinsicParams backup_extrinsics;
+
+  double backup_solver_error;
 };
 
 // Final foramtting ready for output on the wire.