Merge "Make the superstructure splines faster and more gentle"
diff --git a/WORKSPACE b/WORKSPACE
index 698c066..f411682 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -769,9 +769,9 @@
     deps = ["@//third_party/allwpilib/wpimath"],
 )
 """,
-    sha256 = "decff0a28fa4a167696cc2e1122b6a5acd2fef01d3bfd356d8cad25bb487a191",
+    sha256 = "340a9c8e726e2eb365b7a40a722df05fe7c7072c5c4a617fa0218eb6d074ad9f",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.10/api-cpp-23.0.10-headers.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.11/api-cpp-23.0.11-headers.zip",
     ],
 )
 
@@ -793,9 +793,9 @@
     target_compatible_with = ['@//tools/platforms/hardware:roborio'],
 )
 """,
-    sha256 = "00aea02c583d109056e2716e73b7d70e84d5c56a6daebd1dc9f4612c430894f8",
+    sha256 = "11f392bebfe54f512be9ef59809e1a10c4497e0ce92970645f054e7e04fe7ef6",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.10/api-cpp-23.0.10-linuxathena.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.11/api-cpp-23.0.11-linuxathena.zip",
     ],
 )
 
@@ -808,9 +808,9 @@
     hdrs = glob(['ctre/**/*.h', 'ctre/**/*.hpp']),
 )
 """,
-    sha256 = "3d503df97b711c150c0b50238f644c528e55d5b82418c8e3970c79faa14b749c",
+    sha256 = "7585e1bd9e581dd745e7f040ab521b966b40a04d05bc7fa82d6dafe2fb65764e",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.10/tools-23.0.10-headers.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.11/tools-23.0.11-headers.zip",
     ],
 )
 
@@ -832,9 +832,9 @@
     target_compatible_with = ['@//tools/platforms/hardware:roborio'],
 )
 """,
-    sha256 = "4ada1ed9e11c208da9e8a8e8a648a0fe426e6717121ebc2f1392ae3ddc7f2b8c",
+    sha256 = "b1daadfe782c43ed32c2e1a3956998f9604a3fc9282ef866fd8dc1482f3b8cc9",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.10/tools-23.0.10-linuxathena.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.11/tools-23.0.11-linuxathena.zip",
     ],
 )
 
diff --git a/frc971/can_logger/BUILD b/frc971/can_logger/BUILD
new file mode 100644
index 0000000..a3b002a
--- /dev/null
+++ b/frc971/can_logger/BUILD
@@ -0,0 +1,39 @@
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+
+cc_binary(
+    name = "can_logger",
+    srcs = [
+        "can_logger_main.cc",
+    ],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":can_logger_lib",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+        "//aos/time",
+        "@com_github_google_glog//:glog",
+    ],
+)
+
+cc_library(
+    name = "can_logger_lib",
+    srcs = [
+        "can_logger.cc",
+        "can_logger.h",
+    ],
+    deps = [
+        ":can_logging_fbs",
+        "//aos/events:event_loop",
+        "//aos/scoped:scoped_fd",
+        "@com_github_google_glog//:glog",
+    ],
+)
+
+flatbuffer_cc_library(
+    name = "can_logging_fbs",
+    srcs = [
+        "can_logging.fbs",
+    ],
+    gen_reflections = 1,
+    visibility = ["//visibility:public"],
+)
diff --git a/frc971/can_logger/can_logger.cc b/frc971/can_logger/can_logger.cc
new file mode 100644
index 0000000..6b4258a
--- /dev/null
+++ b/frc971/can_logger/can_logger.cc
@@ -0,0 +1,88 @@
+#include "frc971/can_logger/can_logger.h"
+
+namespace frc971 {
+namespace can_logger {
+
+CanLogger::CanLogger(aos::EventLoop *event_loop,
+                     std::string_view interface_name)
+    : fd_(socket(PF_CAN, SOCK_RAW | SOCK_NONBLOCK, CAN_RAW)),
+      frames_sender_(event_loop->MakeSender<CanFrame>("/can")) {
+  struct ifreq ifr;
+  strcpy(ifr.ifr_name, interface_name.data());
+  PCHECK(ioctl(fd_.get(), SIOCGIFINDEX, &ifr) == 0)
+      << "Failed to get index for interface " << interface_name;
+
+  int enable_canfd = true;
+  PCHECK(setsockopt(fd_.get(), SOL_CAN_RAW, CAN_RAW_FD_FRAMES, &enable_canfd,
+                    sizeof(enable_canfd)) == 0)
+      << "Failed to enable CAN FD";
+
+  struct sockaddr_can addr;
+  addr.can_family = AF_CAN;
+  addr.can_ifindex = ifr.ifr_ifindex;
+
+  PCHECK(bind(fd_.get(), reinterpret_cast<struct sockaddr *>(&addr),
+              sizeof(addr)) == 0)
+      << "Failed to bind socket to interface " << interface_name;
+
+  int recieve_buffer_size;
+  socklen_t opt_size = sizeof(recieve_buffer_size);
+  PCHECK(getsockopt(fd_.get(), SOL_SOCKET, SO_RCVBUF, &recieve_buffer_size,
+                    &opt_size) == 0);
+  CHECK_EQ(opt_size, sizeof(recieve_buffer_size));
+  VLOG(0) << "CAN recieve bufffer is " << recieve_buffer_size << " bytes large";
+
+  aos::TimerHandler *timer_handler = event_loop->AddTimer([this]() { Poll(); });
+  timer_handler->set_name("CAN logging Loop");
+  timer_handler->Setup(event_loop->monotonic_now(), kPollPeriod);
+}
+
+void CanLogger::Poll() {
+  VLOG(2) << "Polling";
+  int frames_read = 0;
+  while (ReadFrame()) {
+    frames_read++;
+  }
+  VLOG(1) << "Read " << frames_read << " frames to end of buffer";
+}
+
+bool CanLogger::ReadFrame() {
+  errno = 0;
+  struct canfd_frame frame;
+  ssize_t bytes_read = read(fd_.get(), &frame, sizeof(struct canfd_frame));
+
+  if (bytes_read < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+    // There are no more frames sitting in the recieve buffer.
+    return false;
+  }
+
+  VLOG(2) << "Read " << bytes_read << " bytes";
+  PCHECK(bytes_read > 0);
+  PCHECK(bytes_read == static_cast<ssize_t>(CAN_MTU) ||
+         bytes_read == static_cast<ssize_t>(CANFD_MTU))
+      << "Incomplete can frame";
+
+  struct timeval tv;
+  PCHECK(ioctl(fd_.get(), SIOCGSTAMP, &tv) == 0)
+      << "Failed to get timestamp of CAN frame";
+
+  aos::Sender<CanFrame>::Builder builder = frames_sender_.MakeBuilder();
+
+  auto frame_data = builder.fbb()->CreateVector<uint8_t>(frame.data, frame.len);
+
+  CanFrame::Builder can_frame_builder = builder.MakeBuilder<CanFrame>();
+  can_frame_builder.add_can_id(frame.can_id);
+  can_frame_builder.add_data(frame_data);
+  can_frame_builder.add_monotonic_timestamp_ns(
+      static_cast<std::chrono::nanoseconds>(
+          std::chrono::seconds(tv.tv_sec) +
+          std::chrono::microseconds(tv.tv_usec))
+          .count());
+
+  builder.CheckOk(builder.Send(can_frame_builder.Finish()));
+
+  return true;
+}
+
+}  // namespace can_logger
+}  // namespace frc971
diff --git a/frc971/can_logger/can_logger.h b/frc971/can_logger/can_logger.h
new file mode 100644
index 0000000..cf37841
--- /dev/null
+++ b/frc971/can_logger/can_logger.h
@@ -0,0 +1,50 @@
+#ifndef FRC971_CAN_LOGGER_CAN_LOGGER_H_
+#define FRC971_CAN_LOGGER_CAN_LOGGER_H_
+
+#include <linux/can.h>
+#include <linux/can/raw.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+
+#include <chrono>
+
+#include "aos/events/event_loop.h"
+#include "aos/realtime.h"
+#include "aos/scoped/scoped_fd.h"
+#include "frc971/can_logger/can_logging_generated.h"
+
+namespace frc971 {
+namespace can_logger {
+
+// This class listens to all the traffic on a SocketCAN interface and sends it
+// on the aos event loop so it can be logged with the aos logging
+// infrastructure.
+class CanLogger {
+ public:
+  static constexpr std::chrono::milliseconds kPollPeriod =
+      std::chrono::milliseconds(100);
+
+  CanLogger(aos::EventLoop *event_loop,
+            std::string_view interface_name = "can0");
+
+  CanLogger(const CanLogger &) = delete;
+  CanLogger &operator=(const CanLogger &) = delete;
+
+ private:
+  void Poll();
+
+  // Read a CAN frame from the socket and send it on the event loop
+  // Returns true if successful and false if the recieve buffer is empty.
+  bool ReadFrame();
+
+  aos::ScopedFD fd_;
+  aos::Sender<CanFrame> frames_sender_;
+};
+
+}  // namespace can_logger
+}  // namespace frc971
+
+#endif  // FRC971_CAN_LOGGER_CAN_LOGGER_H_
diff --git a/frc971/can_logger/can_logger_main.cc b/frc971/can_logger/can_logger_main.cc
new file mode 100644
index 0000000..42c7162
--- /dev/null
+++ b/frc971/can_logger/can_logger_main.cc
@@ -0,0 +1,18 @@
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "frc971/can_logger/can_logger.h"
+
+int main(int argc, char **argv) {
+  ::aos::InitGoogle(&argc, &argv);
+
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig("aos_config.json");
+
+  ::aos::ShmEventLoop event_loop(&config.message());
+
+  frc971::can_logger::CanLogger can_logger(&event_loop, "can0");
+
+  event_loop.Run();
+
+  return 0;
+}
diff --git a/frc971/can_logger/can_logging.fbs b/frc971/can_logger/can_logging.fbs
new file mode 100644
index 0000000..d6ec8b9
--- /dev/null
+++ b/frc971/can_logger/can_logging.fbs
@@ -0,0 +1,13 @@
+namespace frc971.can_logger;
+
+// A message to represent a single CAN or CAN FD frame.
+table CanFrame {
+  // CAN id + flags
+  can_id:uint32 (id: 0);
+  // The body of the CAN frame up to 64 bytes long.
+  data:[ubyte] (id: 1);
+  // The hardware timestamp of when the frame came in
+  monotonic_timestamp_ns:uint64 (id: 2);
+}
+
+root_type CanFrame;
diff --git a/frc971/vision/target_map.fbs b/frc971/vision/target_map.fbs
index de79744..10f48cd 100644
--- a/frc971/vision/target_map.fbs
+++ b/frc971/vision/target_map.fbs
@@ -48,6 +48,15 @@
   // NOTE: not filtered by aprilrobotics.cc so that we can log
   // more detections.
   distortion_factor:double (id: 5);
+
+  // Ratio of pose_error from the best estimation to
+  // pose error of the second best estimation.
+  // Only filled out if this pose represents a live detection.
+  // This should be significantly less than 1,
+  // otherwise this pose may be a wrong solution.
+  // NOTE: not filtered by aprilrobotics.cc so that we can log
+  // more detections.
+  pose_error_ratio:double (id: 6);
 }
 
 // Map of all target poses on a field.
diff --git a/frc971/vision/visualize_robot.cc b/frc971/vision/visualize_robot.cc
index eb2da9a..55932ef 100644
--- a/frc971/vision/visualize_robot.cc
+++ b/frc971/vision/visualize_robot.cc
@@ -1,11 +1,12 @@
 #include "frc971/vision/visualize_robot.h"
-#include "glog/logging.h"
 
 #include <opencv2/calib3d.hpp>
 #include <opencv2/core/eigen.hpp>
 #include <opencv2/highgui/highgui.hpp>
 #include <opencv2/imgproc.hpp>
 
+#include "glog/logging.h"
+
 namespace frc971 {
 namespace vision {
 
@@ -42,15 +43,17 @@
   return projected_point;
 }
 
-void VisualizeRobot::DrawLine(Eigen::Vector3d start3d, Eigen::Vector3d end3d) {
+void VisualizeRobot::DrawLine(Eigen::Vector3d start3d, Eigen::Vector3d end3d,
+                              cv::Scalar color) {
   cv::Point start2d = ProjectPoint(start3d);
   cv::Point end2d = ProjectPoint(end3d);
 
-  cv::line(image_, start2d, end2d, cv::Scalar(0, 200, 0));
+  cv::line(image_, start2d, end2d, color);
 }
 
 void VisualizeRobot::DrawFrameAxes(Eigen::Affine3d H_world_target,
-                                   std::string label, double axis_scale) {
+                                   std::string label, cv::Scalar label_color,
+                                   double axis_scale) {
   // Map origin to display from global (world) frame to camera frame
   Eigen::Affine3d H_viewpoint_target =
       H_world_viewpoint_.inverse() * H_world_target;
@@ -79,13 +82,13 @@
     label_coord[0] = label_coord[0] / label_coord[2] + 5;
     label_coord[1] = label_coord[1] / label_coord[2];
     cv::putText(image_, label, cv::Point(label_coord[0], label_coord[1]),
-                cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(0, 0, 255));
+                cv::FONT_HERSHEY_PLAIN, 1.0, label_color);
   }
 }
 
 void VisualizeRobot::DrawRobotOutline(Eigen::Affine3d H_world_robot,
-                                      std::string label) {
-  DrawFrameAxes(H_world_robot, label);
+                                      std::string label, cv::Scalar color) {
+  DrawFrameAxes(H_world_robot, label, color);
   const double kBotHalfWidth = 0.75 / 2.0;
   const double kBotHalfLength = 1.0 / 2.0;
   // Compute coordinates for front/rear and right/left corners
@@ -98,10 +101,10 @@
   Eigen::Vector3d rr_corner =
       H_world_robot * Eigen::Vector3d(-kBotHalfLength, kBotHalfWidth, 0);
 
-  DrawLine(fr_corner, fl_corner);
-  DrawLine(fl_corner, rl_corner);
-  DrawLine(rl_corner, rr_corner);
-  DrawLine(rr_corner, fr_corner);
+  DrawLine(fr_corner, fl_corner, color);
+  DrawLine(fl_corner, rl_corner, color);
+  DrawLine(rl_corner, rr_corner, color);
+  DrawLine(rr_corner, fr_corner, color);
 }
 
 }  // namespace vision
diff --git a/frc971/vision/visualize_robot.h b/frc971/vision/visualize_robot.h
index f5e75af..cd8b4d0 100644
--- a/frc971/vision/visualize_robot.h
+++ b/frc971/vision/visualize_robot.h
@@ -24,6 +24,14 @@
   // Set image on which to draw
   void SetImage(cv::Mat image) { image_ = image; }
 
+  // Sets image to all black.
+  // Uses default_size if no image has been set yet, else image_.size()
+  void ClearImage(cv::Size default_size = cv::Size(1280, 720)) {
+    auto image_size = (image_.data == nullptr ? default_size : image_.size());
+    cv::Mat black_image_mat = cv::Mat::zeros(image_size, CV_8UC3);
+    SetImage(black_image_mat);
+  }
+
   // Set the viewpoint of the camera relative to a global origin
   void SetViewpoint(Eigen::Affine3d view_origin) {
     H_world_viewpoint_ = view_origin;
@@ -49,16 +57,19 @@
   cv::Point ProjectPoint(Eigen::Vector3d point3d);
 
   // Draw a line connecting two 3D points
-  void DrawLine(Eigen::Vector3d start, Eigen::Vector3d end);
+  void DrawLine(Eigen::Vector3d start, Eigen::Vector3d end,
+                cv::Scalar color = cv::Scalar(0, 200, 0));
 
   // Draw coordinate frame for a target frame relative to the world frame
   // Axes are drawn (x,y,z) -> (red, green, blue)
   void DrawFrameAxes(Eigen::Affine3d H_world_target, std::string label = "",
+                     cv::Scalar label_color = cv::Scalar(0, 0, 255),
                      double axis_scale = 0.25);
 
   // TODO<Jim>: Also implement DrawBoardOutline?  Maybe one function w/
   // parameters?
-  void DrawRobotOutline(Eigen::Affine3d H_world_robot, std::string label = "");
+  void DrawRobotOutline(Eigen::Affine3d H_world_robot, std::string label = "",
+                        cv::Scalar color = cv::Scalar(0, 200, 0));
 
   Eigen::Affine3d H_world_viewpoint_;  // Where to view the world from
   cv::Mat image_;                      // Image to draw on
diff --git a/y2023/BUILD b/y2023/BUILD
index 095b856..08713b7 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -37,6 +37,7 @@
         "//y2023/autonomous:binaries",
         ":joystick_reader",
         ":wpilib_interface",
+        "//frc971/can_logger",
         "//aos/network:message_bridge_client",
         "//aos/network:message_bridge_server",
         "//y2023/control_loops/drivetrain:drivetrain",
@@ -211,6 +212,7 @@
         "//y2023/control_loops/superstructure:superstructure_output_fbs",
         "//y2023/control_loops/superstructure:superstructure_position_fbs",
         "//y2023/control_loops/superstructure:superstructure_status_fbs",
+        "//frc971/can_logger:can_logging_fbs",
     ],
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
diff --git a/y2023/constants/971.json b/y2023/constants/971.json
index 2f91d1b..9f783fa 100644
--- a/y2023/constants/971.json
+++ b/y2023/constants/971.json
@@ -4,7 +4,7 @@
       "calibration": {% include 'y2023/vision/calib_files/calibration_pi-971-1_cam-23-05_ext_2023-03-05.json' %}
     },
     {
-      "calibration": {% include 'y2023/vision/calib_files/calibration_pi-971-2_cam-23-06_ext_2023-02-22.json' %}
+      "calibration": {% include 'y2023/vision/calib_files/calibration_pi-971-2_cam-23-06_ext_2023-03-25.json' %}
     },
     {
       "calibration": {% include 'y2023/vision/calib_files/calibration_pi-971-3_cam-23-07_ext_2023-02-22.json' %}
diff --git a/y2023/localizer/localizer.cc b/y2023/localizer/localizer.cc
index 4bb07e2..efffca3 100644
--- a/y2023/localizer/localizer.cc
+++ b/y2023/localizer/localizer.cc
@@ -9,6 +9,9 @@
 
 DEFINE_double(max_pose_error, 1e-6,
               "Throw out target poses with a higher pose error than this");
+DEFINE_double(
+    max_pose_error_ratio, 0.4,
+    "Throw out target poses with a higher pose error ratio than this");
 DEFINE_double(distortion_noise_scalar, 1.0,
               "Scale the target pose distortion factor by this when computing "
               "the noise.");
@@ -337,6 +340,11 @@
             << target.pose_error();
     return RejectImage(camera_index, RejectionReason::HIGH_POSE_ERROR,
                        &builder);
+  } else if (target.pose_error_ratio() > FLAGS_max_pose_error_ratio) {
+    VLOG(1) << "Rejecting target due to high pose error ratio "
+            << target.pose_error_ratio();
+    return RejectImage(camera_index, RejectionReason::HIGH_POSE_ERROR_RATIO,
+                       &builder);
   }
 
   double theta_at_capture = state_at_capture.value()(StateIdx::kTheta);
diff --git a/y2023/localizer/localizer_test.cc b/y2023/localizer/localizer_test.cc
index 7248b33..e8b7d56 100644
--- a/y2023/localizer/localizer_test.cc
+++ b/y2023/localizer/localizer_test.cc
@@ -155,6 +155,7 @@
             target_builder.add_position(position_offset);
             target_builder.add_orientation(quat_offset);
             target_builder.add_pose_error(pose_error_);
+            target_builder.add_pose_error_ratio(pose_error_ratio_);
             auto target_offset = target_builder.Finish();
 
             auto targets_offset = builder.fbb()->CreateVector({target_offset});
@@ -277,6 +278,7 @@
 
   uint64_t send_target_id_ = kTargetId;
   double pose_error_ = 1e-7;
+  double pose_error_ratio_ = 0.1;
   double implied_yaw_error_ = 0.0;
 
   gflags::FlagSaver flag_saver_;
@@ -507,4 +509,27 @@
       status_fetcher_->statistics()->Get(0)->total_candidates());
 }
 
+// Tests that we correctly reject a detection with a high pose error ratio.
+TEST_F(LocalizerTest, HighPoseErrorRatio) {
+  output_voltages_ << 0.0, 0.0;
+  send_targets_ = true;
+  // Send the minimum pose error to be rejected
+  constexpr double kEps = 1e-9;
+  pose_error_ratio_ = 0.4 + kEps;
+
+  event_loop_factory_.RunFor(std::chrono::seconds(4));
+  CHECK(status_fetcher_.Fetch());
+  ASSERT_TRUE(status_fetcher_->has_statistics());
+  ASSERT_EQ(4u /* number of cameras */, status_fetcher_->statistics()->size());
+  ASSERT_EQ(0, status_fetcher_->statistics()->Get(0)->total_accepted());
+  ASSERT_LT(10, status_fetcher_->statistics()->Get(0)->total_candidates());
+  ASSERT_EQ(
+      status_fetcher_->statistics()
+          ->Get(0)
+          ->rejection_reasons()
+          ->Get(static_cast<size_t>(RejectionReason::HIGH_POSE_ERROR_RATIO))
+          ->count(),
+      status_fetcher_->statistics()->Get(0)->total_candidates());
+}
+
 }  // namespace y2023::localizer::testing
diff --git a/y2023/localizer/status.fbs b/y2023/localizer/status.fbs
index 0ea779f..854603e 100644
--- a/y2023/localizer/status.fbs
+++ b/y2023/localizer/status.fbs
@@ -24,6 +24,8 @@
   HIGH_DISTANCE_TO_TARGET = 6,
   // The robot was travelling too fast; we don't trust the target.
   ROBOT_TOO_FAST = 7,
+  // Pose estimation error ratio was higher than any normal detection.
+  HIGH_POSE_ERROR_RATIO = 8,
 }
 
 table RejectionCount {
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index cf2244a..45ab7db 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -1,14 +1,14 @@
 #include "y2023/vision/aprilrobotics.h"
 
-#include "y2023/vision/vision_util.h"
-
 #include <opencv2/highgui.hpp>
 
+#include "y2023/vision/vision_util.h"
+
 DEFINE_bool(
     debug, false,
     "If true, dump a ton of debug and crash on the first valid detection.");
 
-DEFINE_double(min_decision_margin, 75.0,
+DEFINE_double(min_decision_margin, 50.0,
               "Minimum decision margin (confidence) for an apriltag detection");
 DEFINE_int32(pixel_border, 10,
              "Size of image border within which to reject detected corners");
@@ -17,6 +17,8 @@
     "Maximum expected value for unscaled distortion factors. Will scale "
     "distortion factors so that this value (and a higher distortion) maps to "
     "1.0.");
+DEFINE_uint64(pose_estimation_iterations, 50,
+              "Number of iterations for apriltag pose estimation.");
 
 namespace y2023 {
 namespace vision {
@@ -24,9 +26,12 @@
 namespace chrono = std::chrono;
 
 AprilRoboticsDetector::AprilRoboticsDetector(aos::EventLoop *event_loop,
-                                             std::string_view channel_name)
+                                             std::string_view channel_name,
+                                             bool flip_image)
     : calibration_data_(event_loop),
       image_size_(0, 0),
+      flip_image_(flip_image),
+      node_name_(event_loop->node()->name()->string_view()),
       ftrace_(),
       image_callback_(event_loop, channel_name,
                       [&](cv::Mat image_color_mat,
@@ -94,8 +99,11 @@
 
   auto builder = target_map_sender_.MakeBuilder();
   std::vector<flatbuffers::Offset<frc971::vision::TargetPoseFbs>> target_poses;
-  for (const auto &detection : result.detections) {
-    target_poses.emplace_back(BuildTargetPose(detection, builder.fbb()));
+  for (auto &detection : result.detections) {
+    auto *fbb = builder.fbb();
+    auto pose = BuildTargetPose(detection, fbb);
+    DestroyPose(&detection.pose);
+    target_poses.emplace_back(pose);
   }
   const auto target_poses_offset = builder.fbb()->CreateVector(target_poses);
   auto target_map_builder = builder.MakeBuilder<frc971::vision::TargetMap>();
@@ -123,7 +131,7 @@
   return frc971::vision::CreateTargetPoseFbs(
       *fbb, detection.det.id, position_offset, orientation_offset,
       detection.det.decision_margin, detection.pose_error,
-      detection.distortion_factor);
+      detection.distortion_factor, detection.pose_error_ratio);
 }
 
 void AprilRoboticsDetector::UndistortDetection(
@@ -179,6 +187,11 @@
   return corner_points;
 }
 
+void AprilRoboticsDetector::DestroyPose(apriltag_pose_t *pose) const {
+  matd_destroy(pose->R);
+  matd_destroy(pose->t);
+}
+
 AprilRoboticsDetector::DetectionResult AprilRoboticsDetector::DetectTags(
     cv::Mat image, aos::monotonic_clock::time_point eof) {
   cv::Mat color_image;
@@ -252,8 +265,13 @@
       const aos::monotonic_clock::time_point before_pose_estimation =
           aos::monotonic_clock::now();
 
-      apriltag_pose_t pose;
-      double pose_error = estimate_tag_pose(&info, &pose);
+      apriltag_pose_t pose_1;
+      apriltag_pose_t pose_2;
+      double pose_error_1;
+      double pose_error_2;
+      estimate_tag_pose_orthogonal_iteration(&info, &pose_error_1, &pose_1,
+                                             &pose_error_2, &pose_2,
+                                             FLAGS_pose_estimation_iterations);
 
       const aos::monotonic_clock::time_point after_pose_estimation =
           aos::monotonic_clock::now();
@@ -262,7 +280,8 @@
                                           before_pose_estimation)
                      .count()
               << " seconds for pose estimation";
-      VLOG(1) << "Pose err: " << pose_error;
+      VLOG(1) << "Pose err 1: " << pose_error_1;
+      VLOG(1) << "Pose err 2: " << pose_error_2;
 
       // Send undistorted corner points in pink
       std::vector<cv::Point2f> corner_points = MakeCornerVector(det);
@@ -273,10 +292,29 @@
       double distortion_factor =
           ComputeDistortionFactor(orig_corner_points, corner_points);
 
+      // We get two estimates for poses.
+      // Choose the one with the lower estimation error
+      bool use_pose_1 = (pose_error_1 < pose_error_2);
+      auto best_pose = (use_pose_1 ? pose_1 : pose_2);
+      auto secondary_pose = (use_pose_1 ? pose_2 : pose_1);
+      double best_pose_error = (use_pose_1 ? pose_error_1 : pose_error_2);
+      double secondary_pose_error = (use_pose_1 ? pose_error_2 : pose_error_1);
+
+      CHECK_NE(best_pose_error, std::numeric_limits<double>::infinity())
+          << "Got no valid pose estimations, this should not be possible.";
+      double pose_error_ratio = best_pose_error / secondary_pose_error;
+
+      // Destroy the secondary pose if we got one
+      if (secondary_pose_error != std::numeric_limits<double>::infinity()) {
+        DestroyPose(&secondary_pose);
+      }
+
       results.emplace_back(Detection{.det = *det,
-                                     .pose = pose,
-                                     .pose_error = pose_error,
-                                     .distortion_factor = distortion_factor});
+                                     .pose = best_pose,
+                                     .pose_error = best_pose_error,
+                                     .distortion_factor = distortion_factor,
+                                     .pose_error_ratio = pose_error_ratio});
+
       if (FLAGS_visualize) {
         // Draw raw (distorted) corner points in green
         cv::line(color_image, orig_corner_points[0], orig_corner_points[1],
@@ -309,9 +347,11 @@
   if (FLAGS_visualize) {
     // Display the result
     // Rotate by 180 degrees to make it upright
-    // TDOO<Jim>: Make this a flag, since we don't want it for box of pis
-    cv::rotate(color_image, color_image, 1);
-    cv::imshow("AprilRoboticsDetector Image", color_image);
+    if (flip_image_) {
+      cv::rotate(color_image, color_image, 1);
+    }
+    cv::imshow(absl::StrCat("AprilRoboticsDetector Image ", node_name_),
+               color_image);
   }
 
   const auto corners_offset = builder.fbb()->CreateVector(foxglove_corners);
diff --git a/y2023/vision/aprilrobotics.h b/y2023/vision/aprilrobotics.h
index 7caa848..01e3138 100644
--- a/y2023/vision/aprilrobotics.h
+++ b/y2023/vision/aprilrobotics.h
@@ -30,6 +30,7 @@
     apriltag_pose_t pose;
     double pose_error;
     double distortion_factor;
+    double pose_error_ratio;
   };
 
   struct DetectionResult {
@@ -38,11 +39,15 @@
   };
 
   AprilRoboticsDetector(aos::EventLoop *event_loop,
-                        std::string_view channel_name);
+                        std::string_view channel_name, bool flip_image = true);
   ~AprilRoboticsDetector();
 
   void SetWorkerpoolAffinities();
 
+  // Deletes the heap-allocated rotation and translation pointers in the given
+  // pose
+  void DestroyPose(apriltag_pose_t *pose) const;
+
   // Undistorts the april tag corners using the camera calibration
   void UndistortDetection(apriltag_detection_t *det) const;
 
@@ -78,6 +83,8 @@
   std::optional<cv::Mat> extrinsics_;
   cv::Mat dist_coeffs_;
   cv::Size image_size_;
+  bool flip_image_;
+  std::string_view node_name_;
 
   aos::Ftrace ftrace_;
 
diff --git a/y2023/vision/calib_files/calibration_pi-971-2_cam-23-06_ext_2023-03-25.json b/y2023/vision/calib_files/calibration_pi-971-2_cam-23-06_ext_2023-03-25.json
new file mode 100644
index 0000000..d8b5227
--- /dev/null
+++ b/y2023/vision/calib_files/calibration_pi-971-2_cam-23-06_ext_2023-03-25.json
@@ -0,0 +1 @@
+{ "node_name": "pi2", "team_number": 971, "intrinsics": [ 893.242981, 0.0, 639.796692, 0.0, 892.498718, 354.109344, 0.0, 0.0, 1.0 ], "fixed_extrinsics": { "data": [ 0.858536,    0.224804,    0.460847,    0.198133, 0.473832, -0.00434901,   -0.880604,   -0.221657, -0.195959,    0.974394,   -0.110253,    0.593406, 0.0, 0.0, 0.0, 1.0 ] }, "dist_coeffs": [ -0.451751, 0.252422, 0.000531, 0.000079, -0.079369 ], "calibration_timestamp": 1358501526409252911, "camera_id": "23-06" }
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index a110794..e9ef662 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -35,6 +35,15 @@
 DEFINE_string(pi, "pi1", "Pi name to generate mcap log for; defaults to pi1.");
 DEFINE_double(max_pose_error, 1e-6,
               "Throw out target poses with a higher pose error than this");
+DEFINE_double(
+    max_pose_error_ratio, 0.4,
+    "Throw out target poses with a higher pose error ratio than this");
+DEFINE_uint64(wait_key, 0,
+              "Time in ms to wait between images, if no click (0 to wait "
+              "indefinitely until click).");
+DEFINE_uint64(skip_to, 1,
+              "Start at combined image of this number (1 is the first image)");
+DEFINE_bool(solve, true, "Whether to solve for the field's target map.");
 
 namespace y2023 {
 namespace vision {
@@ -43,6 +52,7 @@
 using frc971::vision::PoseUtils;
 using frc971::vision::TargetMap;
 using frc971::vision::TargetMapper;
+using frc971::vision::VisualizeRobot;
 namespace calibration = frc971::vision::calibration;
 
 // Change reference frame from camera to robot
@@ -53,8 +63,14 @@
   return H_robot_target;
 }
 
-frc971::vision::VisualizeRobot vis_robot_;
 const int kImageWidth = 1000;
+// Map from pi node name to color for drawing
+const std::map<std::string, cv::Scalar> kPiColors = {
+    {"pi1", cv::Scalar(255, 0, 255)},
+    {"pi2", cv::Scalar(255, 255, 0)},
+    {"pi3", cv::Scalar(0, 255, 255)},
+    {"pi4", cv::Scalar(255, 165, 0)},
+};
 
 // Add detected apriltag poses relative to the robot to
 // timestamped_target_detections
@@ -63,12 +79,25 @@
                      std::vector<DataAdapter::TimestampedDetection>
                          *timestamped_target_detections,
                      Eigen::Affine3d extrinsics, std::string node_name,
-                     frc971::vision::TargetMapper mapper) {
+                     frc971::vision::TargetMapper target_loc_mapper,
+                     std::set<std::string> *drawn_nodes,
+                     VisualizeRobot *vis_robot, size_t *display_count) {
+  bool drew = false;
+  std::stringstream label;
+  label << node_name << " - ";
+
   for (const auto *target_pose_fbs : *map.target_poses()) {
     // Skip detections with high pose errors
     if (target_pose_fbs->pose_error() > FLAGS_max_pose_error) {
-      LOG(INFO) << " Skipping tag " << target_pose_fbs->id()
-                << " due to pose error of " << target_pose_fbs->pose_error();
+      VLOG(1) << " Skipping tag " << target_pose_fbs->id()
+              << " due to pose error of " << target_pose_fbs->pose_error();
+      continue;
+    }
+    // Skip detections with high pose error ratios
+    if (target_pose_fbs->pose_error_ratio() > FLAGS_max_pose_error_ratio) {
+      VLOG(1) << " Skipping tag " << target_pose_fbs->id()
+              << " due to pose error ratio of "
+              << target_pose_fbs->pose_error_ratio();
       continue;
     }
 
@@ -97,26 +126,58 @@
             .id = static_cast<TargetMapper::TargetId>(target_pose.id)});
 
     if (FLAGS_visualize) {
+      // If we've already drawn in the current image,
+      // display it before clearing and adding the new poses
+      if (drawn_nodes->count(node_name) != 0) {
+        (*display_count)++;
+        cv::putText(vis_robot->image_,
+                    "Poses #" + std::to_string(*display_count),
+                    cv::Point(600, 10), cv::FONT_HERSHEY_PLAIN, 1.0,
+                    cv::Scalar(255, 255, 255));
+
+        if (*display_count >= FLAGS_skip_to) {
+          cv::imshow("View", vis_robot->image_);
+          cv::waitKey(FLAGS_wait_key);
+        } else {
+          VLOG(1) << "At poses #" << std::to_string(*display_count);
+        }
+        vis_robot->ClearImage();
+        drawn_nodes->clear();
+      }
+
       Eigen::Affine3d H_world_target = PoseUtils::Pose3dToAffine3d(
-          mapper.GetTargetPoseById(target_pose_fbs->id())->pose);
+          target_loc_mapper.GetTargetPoseById(target_pose_fbs->id())->pose);
       Eigen::Affine3d H_world_robot = H_world_target * H_robot_target.inverse();
-      Eigen::Affine3d H_world_camera =
-          H_world_target * H_camera_target.inverse();
-      LOG(INFO) << node_name << ", " << target_pose_fbs->id()
-                << ", t = " << pi_distributed_time
-                << ", pose_error = " << target_pose_fbs->pose_error()
-                << ", robot_pos (x,y,z) + "
-                << H_world_robot.translation().transpose();
-      vis_robot_.DrawRobotOutline(
-          H_world_robot, node_name + "-" +
-                             std::to_string(target_pose_fbs->id()) + "  " +
-                             std::to_string(target_pose_fbs->pose_error() /
-                                            FLAGS_max_pose_error));
-      vis_robot_.DrawFrameAxes(H_world_camera, node_name);
-      vis_robot_.DrawFrameAxes(H_world_target,
-                               std::to_string(target_pose_fbs->id()));
+      VLOG(2) << node_name << ", " << target_pose_fbs->id()
+              << ", t = " << pi_distributed_time
+              << ", pose_error = " << target_pose_fbs->pose_error()
+              << ", pose_error_ratio = " << target_pose_fbs->pose_error_ratio()
+              << ", robot_pos (x,y,z) + "
+              << H_world_robot.translation().transpose();
+
+      label << "id " << target_pose_fbs->id() << ": err "
+            << (target_pose_fbs->pose_error() / FLAGS_max_pose_error)
+            << " ratio " << target_pose_fbs->pose_error_ratio() << " ";
+
+      vis_robot->DrawRobotOutline(H_world_robot,
+                                  std::to_string(target_pose_fbs->id()),
+                                  kPiColors.at(node_name));
+
+      vis_robot->DrawFrameAxes(H_world_target,
+                               std::to_string(target_pose_fbs->id()),
+                               kPiColors.at(node_name));
+      drew = true;
     }
   }
+  if (drew) {
+    size_t pi_number =
+        static_cast<size_t>(node_name[node_name.size() - 1] - '0');
+    cv::putText(vis_robot->image_, label.str(),
+                cv::Point(10, 10 + 20 * pi_number), cv::FONT_HERSHEY_PLAIN, 1.0,
+                kPiColors.at(node_name));
+
+    drawn_nodes->emplace(node_name);
+  }
 }
 
 // Get images from pi and pass apriltag positions to HandleAprilTags()
@@ -125,10 +186,15 @@
     aos::logger::LogReader *reader,
     std::vector<DataAdapter::TimestampedDetection>
         *timestamped_target_detections,
-    std::vector<std::unique_ptr<AprilRoboticsDetector>> *detectors) {
+    std::vector<std::unique_ptr<AprilRoboticsDetector>> *detectors,
+    std::set<std::string> *drawn_nodes, VisualizeRobot *vis_robot,
+    size_t *display_count) {
   static constexpr std::string_view kImageChannel = "/camera";
+  // For the robots, we need to flip the image since the cameras are rolled by
+  // 180 degrees
+  bool flip_image = (FLAGS_team_number != 7971);
   auto detector_ptr = std::make_unique<AprilRoboticsDetector>(
-      detection_event_loop, kImageChannel);
+      detection_event_loop, kImageChannel, flip_image);
   // Get the camera extrinsics
   cv::Mat extrinsics_cv = detector_ptr->extrinsics().value();
   Eigen::Matrix4d extrinsics_matrix;
@@ -150,14 +216,8 @@
 
     std::string node_name = detection_event_loop->node()->name()->str();
     HandleAprilTags(map, pi_distributed_time, timestamped_target_detections,
-                    extrinsics, node_name, target_loc_mapper);
-    if (FLAGS_visualize) {
-      cv::imshow("View", vis_robot_.image_);
-      cv::waitKey();
-      cv::Mat image_mat =
-          cv::Mat::zeros(cv::Size(kImageWidth, kImageWidth), CV_8UC3);
-      vis_robot_.SetImage(image_mat);
-    }
+                    extrinsics, node_name, target_loc_mapper, drawn_nodes,
+                    vis_robot, display_count);
   });
 }
 
@@ -176,8 +236,9 @@
   aos::logger::LogReader reader(
       aos::logger::SortParts(unsorted_logfiles),
       config.has_value() ? &config->message() : nullptr);
-  // Send new april tag poses. This allows us to take a log of images, then play
-  // with the april detection code and see those changes take effect in mapping
+  // Send new april tag poses. This allows us to take a log of images, then
+  // play with the april detection code and see those changes take effect in
+  // mapping
   constexpr size_t kNumPis = 4;
   for (size_t i = 1; i <= kNumPis; i++) {
     reader.RemapLoggedChannel(absl::StrFormat("/pi%u/camera", i),
@@ -196,6 +257,9 @@
                           FLAGS_constants_path);
 
   std::vector<std::unique_ptr<AprilRoboticsDetector>> detectors;
+  VisualizeRobot vis_robot;
+  std::set<std::string> drawn_nodes;
+  size_t display_count = 0;
 
   const aos::Node *pi1 =
       aos::configuration::GetNode(reader.configuration(), "pi1");
@@ -204,7 +268,8 @@
   std::unique_ptr<aos::EventLoop> pi1_mapping_event_loop =
       reader.event_loop_factory()->MakeEventLoop("pi1_mapping", pi1);
   HandlePiCaptures(pi1_detection_event_loop.get(), pi1_mapping_event_loop.get(),
-                   &reader, &timestamped_target_detections, &detectors);
+                   &reader, &timestamped_target_detections, &detectors,
+                   &drawn_nodes, &vis_robot, &display_count);
 
   const aos::Node *pi2 =
       aos::configuration::GetNode(reader.configuration(), "pi2");
@@ -213,7 +278,8 @@
   std::unique_ptr<aos::EventLoop> pi2_mapping_event_loop =
       reader.event_loop_factory()->MakeEventLoop("pi2_mapping", pi2);
   HandlePiCaptures(pi2_detection_event_loop.get(), pi2_mapping_event_loop.get(),
-                   &reader, &timestamped_target_detections, &detectors);
+                   &reader, &timestamped_target_detections, &detectors,
+                   &drawn_nodes, &vis_robot, &display_count);
 
   const aos::Node *pi3 =
       aos::configuration::GetNode(reader.configuration(), "pi3");
@@ -222,7 +288,8 @@
   std::unique_ptr<aos::EventLoop> pi3_mapping_event_loop =
       reader.event_loop_factory()->MakeEventLoop("pi3_mapping", pi3);
   HandlePiCaptures(pi3_detection_event_loop.get(), pi3_mapping_event_loop.get(),
-                   &reader, &timestamped_target_detections, &detectors);
+                   &reader, &timestamped_target_detections, &detectors,
+                   &drawn_nodes, &vis_robot, &display_count);
 
   const aos::Node *pi4 =
       aos::configuration::GetNode(reader.configuration(), "pi4");
@@ -231,7 +298,8 @@
   std::unique_ptr<aos::EventLoop> pi4_mapping_event_loop =
       reader.event_loop_factory()->MakeEventLoop("pi4_mapping", pi4);
   HandlePiCaptures(pi4_detection_event_loop.get(), pi4_mapping_event_loop.get(),
-                   &reader, &timestamped_target_detections, &detectors);
+                   &reader, &timestamped_target_detections, &detectors,
+                   &drawn_nodes, &vis_robot, &display_count);
 
   std::unique_ptr<aos::EventLoop> mcap_event_loop;
   std::unique_ptr<aos::McapLogger> relogger;
@@ -252,20 +320,20 @@
   }
 
   if (FLAGS_visualize) {
-    cv::Mat image_mat =
-        cv::Mat::zeros(cv::Size(kImageWidth, kImageWidth), CV_8UC3);
-    vis_robot_.SetImage(image_mat);
+    vis_robot.ClearImage();
     const double kFocalLength = 500.0;
-    vis_robot_.SetDefaultViewpoint(kImageWidth, kFocalLength);
+    vis_robot.SetDefaultViewpoint(kImageWidth, kFocalLength);
   }
 
   reader.event_loop_factory()->Run();
 
-  auto target_constraints =
-      DataAdapter::MatchTargetDetections(timestamped_target_detections);
+  if (FLAGS_solve) {
+    auto target_constraints =
+        DataAdapter::MatchTargetDetections(timestamped_target_detections);
 
-  frc971::vision::TargetMapper mapper(FLAGS_json_path, target_constraints);
-  mapper.Solve(FLAGS_field_name, FLAGS_output_dir);
+    frc971::vision::TargetMapper mapper(FLAGS_json_path, target_constraints);
+    mapper.Solve(FLAGS_field_name, FLAGS_output_dir);
+  }
 
   // Clean up all the pointers
   for (auto &detector_ptr : detectors) {
diff --git a/y2023/wpilib_interface.cc b/y2023/wpilib_interface.cc
index cd52cfd..2f41c91 100644
--- a/y2023/wpilib_interface.cc
+++ b/y2023/wpilib_interface.cc
@@ -114,16 +114,12 @@
 }  // namespace
 
 static constexpr int kCANFalconCount = 6;
-static constexpr int kCANSignalsCount = 4;
-static constexpr int kRollerSignalsCount = 4;
 static constexpr units::frequency::hertz_t kCANUpdateFreqHz = 200_Hz;
 
 class Falcon {
  public:
   Falcon(int device_id, std::string canbus,
-         aos::SizedArray<ctre::phoenixpro::BaseStatusSignalValue *,
-                         kCANFalconCount * kCANSignalsCount +
-                             kRollerSignalsCount> *signals)
+         std::vector<ctre::phoenixpro::BaseStatusSignalValue *> *signals)
       : talon_(device_id, canbus),
         device_id_(device_id),
         device_temp_(talon_.GetDeviceTemp()),
@@ -134,9 +130,7 @@
     // device temp is not timesynced so don't add it to the list of signals
     device_temp_.SetUpdateFrequency(kCANUpdateFreqHz);
 
-    CHECK_EQ(kCANSignalsCount, 4);
     CHECK_NOTNULL(signals);
-    CHECK_LE(signals->size() + 4u, signals->capacity());
 
     supply_voltage_.SetUpdateFrequency(kCANUpdateFreqHz);
     signals->push_back(&supply_voltage_);
@@ -279,8 +273,11 @@
 
 class CANSensorReader {
  public:
-  CANSensorReader(aos::EventLoop *event_loop)
+  CANSensorReader(
+      aos::EventLoop *event_loop,
+      std::vector<ctre::phoenixpro::BaseStatusSignalValue *> signals_registry)
       : event_loop_(event_loop),
+        signals_(signals_registry.begin(), signals_registry.end()),
         can_position_sender_(
             event_loop->MakeSender<drivetrain::CANPosition>("/drivetrain")),
         roller_falcon_data_(std::nullopt) {
@@ -294,12 +291,6 @@
     });
   }
 
-  aos::SizedArray<ctre::phoenixpro::BaseStatusSignalValue *,
-                  kCANFalconCount * kCANSignalsCount + kRollerSignalsCount>
-      *get_signals_registry() {
-    return &signals_;
-  };
-
   void set_falcons(std::shared_ptr<Falcon> right_front,
                    std::shared_ptr<Falcon> right_back,
                    std::shared_ptr<Falcon> right_under,
@@ -323,16 +314,8 @@
 
  private:
   void Loop() {
-    CHECK_EQ(signals_.size(), 28u);
     ctre::phoenix::StatusCode status =
-        ctre::phoenixpro::BaseStatusSignalValue::WaitForAll(
-            2000_ms, {signals_[0],  signals_[1],  signals_[2],  signals_[3],
-                      signals_[4],  signals_[5],  signals_[6],  signals_[7],
-                      signals_[8],  signals_[9],  signals_[10], signals_[11],
-                      signals_[12], signals_[13], signals_[14], signals_[15],
-                      signals_[16], signals_[17], signals_[18], signals_[19],
-                      signals_[20], signals_[21], signals_[22], signals_[23],
-                      signals_[24], signals_[25], signals_[26], signals_[27]});
+        ctre::phoenixpro::BaseStatusSignalValue::WaitForAll(2000_ms, signals_);
 
     if (!status.IsOK()) {
       AOS_LOG(ERROR, "Failed to read signals from falcons: %s: %s",
@@ -383,9 +366,7 @@
 
   aos::EventLoop *event_loop_;
 
-  aos::SizedArray<ctre::phoenixpro::BaseStatusSignalValue *,
-                  kCANFalconCount * kCANSignalsCount + kRollerSignalsCount>
-      signals_;
+  const std::vector<ctre::phoenixpro::BaseStatusSignalValue *> signals_;
   aos::Sender<drivetrain::CANPosition> can_position_sender_;
 
   std::shared_ptr<Falcon> right_front_, right_back_, right_under_, left_front_,
@@ -934,25 +915,27 @@
     std::shared_ptr<frc::DigitalOutput> superstructure_reading =
         make_unique<frc::DigitalOutput>(25);
 
+    std::vector<ctre::phoenixpro::BaseStatusSignalValue *> signals_registry;
+    std::shared_ptr<Falcon> right_front =
+        std::make_shared<Falcon>(1, "Drivetrain Bus", &signals_registry);
+    std::shared_ptr<Falcon> right_back =
+        std::make_shared<Falcon>(2, "Drivetrain Bus", &signals_registry);
+    std::shared_ptr<Falcon> right_under =
+        std::make_shared<Falcon>(3, "Drivetrain Bus", &signals_registry);
+    std::shared_ptr<Falcon> left_front =
+        std::make_shared<Falcon>(4, "Drivetrain Bus", &signals_registry);
+    std::shared_ptr<Falcon> left_back =
+        std::make_shared<Falcon>(5, "Drivetrain Bus", &signals_registry);
+    std::shared_ptr<Falcon> left_under =
+        std::make_shared<Falcon>(6, "Drivetrain Bus", &signals_registry);
+    std::shared_ptr<Falcon> roller =
+        std::make_shared<Falcon>(13, "Drivetrain Bus", &signals_registry);
+
     // Thread 3.
     ::aos::ShmEventLoop can_sensor_reader_event_loop(&config.message());
     can_sensor_reader_event_loop.set_name("CANSensorReader");
-    CANSensorReader can_sensor_reader(&can_sensor_reader_event_loop);
-
-    std::shared_ptr<Falcon> right_front = std::make_shared<Falcon>(
-        1, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
-    std::shared_ptr<Falcon> right_back = std::make_shared<Falcon>(
-        2, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
-    std::shared_ptr<Falcon> right_under = std::make_shared<Falcon>(
-        3, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
-    std::shared_ptr<Falcon> left_front = std::make_shared<Falcon>(
-        4, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
-    std::shared_ptr<Falcon> left_back = std::make_shared<Falcon>(
-        5, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
-    std::shared_ptr<Falcon> left_under = std::make_shared<Falcon>(
-        6, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
-    std::shared_ptr<Falcon> roller = std::make_shared<Falcon>(
-        13, "Drivetrain Bus", can_sensor_reader.get_signals_registry());
+    CANSensorReader can_sensor_reader(&can_sensor_reader_event_loop,
+                                      std::move(signals_registry));
 
     can_sensor_reader.set_falcons(right_front, right_back, right_under,
                                   left_front, left_back, left_under, roller);
diff --git a/y2023/y2023_roborio.json b/y2023/y2023_roborio.json
index cf3a14e..452a570 100644
--- a/y2023/y2023_roborio.json
+++ b/y2023/y2023_roborio.json
@@ -268,6 +268,14 @@
       "max_size": 448
     },
     {
+      "name": "/can",
+      "type": "frc971.can_logger.CanFrame",
+      "source_node": "roborio",
+      "frequency": 6000,
+      "num_senders": 2,
+      "max_size": 200
+    },
+    {
       "name": "/drivetrain",
       "type": "y2023.control_loops.drivetrain.CANPosition",
       "source_node": "roborio",
@@ -566,6 +574,13 @@
       "nodes": [
         "roborio"
       ]
+    },
+    {
+      "name": "can_logger",
+      "executable_name": "can_logger",
+      "nodes": [
+        "roborio"
+      ]
     }
   ],
   "maps": [