Update constants and vision in y2024_swerve

Copy most of constants from y2024 and change to y2024_swerve instead of y2024
Set specific constants values from constants.cc file in the common.jinja2 and constants.fbs
Copy over vision from y2024 and changed files to use y2024_swerve

Change-Id: I3f27667e838e1a454ee1d21a5d4e9cee79565054
Signed-off-by: Yash Maheshwari <yashmahe2018@gmail.com>
diff --git a/y2024_swerve/vision/BUILD b/y2024_swerve/vision/BUILD
new file mode 100644
index 0000000..2951130
--- /dev/null
+++ b/y2024_swerve/vision/BUILD
@@ -0,0 +1,86 @@
+filegroup(
+    name = "image_streamer_start",
+    srcs = ["image_streamer_start.sh"],
+    visibility = ["//visibility:public"],
+)
+
+cc_binary(
+    name = "target_mapping",
+    srcs = [
+        "target_mapping.cc",
+        "vision_util.cc",
+        "vision_util.h",
+    ],
+    data = [
+        "//y2024_swerve:aos_config",
+        "//y2024_swerve/constants:constants.json",
+        "//y2024_swerve/vision:maps",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//y2024_swerve:__subpackages__"],
+    deps = [
+        "//aos:init",
+        "//aos/events:simulated_event_loop",
+        "//aos/events/logging:log_reader",
+        "//aos/util:mcap_logger",
+        "//frc971/constants:constants_sender_lib",
+        "//frc971/control_loops:pose",
+        "//frc971/vision:calibration_fbs",
+        "//frc971/vision:charuco_lib",
+        "//frc971/vision:target_mapper",
+        "//frc971/vision:vision_util_lib",
+        "//frc971/vision:visualize_robot",
+        "//third_party:opencv",
+        "//y2024_swerve/constants:constants_fbs",
+        "//y2024_swerve/constants:simulated_constants_sender",
+    ],
+)
+
+cc_binary(
+    name = "apriltag_detector",
+    srcs = [
+        "apriltag_detector.cc",
+        "vision_util.cc",
+        "vision_util.h",
+    ],
+    features = ["cuda"],
+    target_compatible_with = ["@platforms//cpu:arm64"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//aos:configuration",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+        "//frc971/orin:gpu_apriltag_lib",
+        "//third_party:cudart",
+        "//third_party/apriltag",
+        "//y2024_swerve/constants:constants_fbs",
+        "@com_github_gflags_gflags//:gflags",
+        "@com_github_google_glog//:glog",
+        "@com_github_nvidia_cccl//:cccl",
+        "@com_github_nvidia_cuco//:cuco",
+    ],
+)
+
+cc_binary(
+    name = "viewer",
+    srcs = [
+        "viewer.cc",
+        "vision_util.cc",
+        "vision_util.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = [
+        "//y2024_swerve:__subpackages__",
+    ],
+    deps = [
+        "//aos:init",
+        "//aos:json_to_flatbuffer",
+        "//aos/events:shm_event_loop",
+        "//frc971/constants:constants_sender_lib",
+        "//frc971/vision:vision_fbs",
+        "//frc971/vision:vision_util_lib",
+        "//third_party:opencv",
+        "//y2024_swerve/constants:constants_fbs",
+        "@com_google_absl//absl/strings",
+    ],
+)
diff --git a/y2024_swerve/vision/README.md b/y2024_swerve/vision/README.md
new file mode 100644
index 0000000..c81581f
--- /dev/null
+++ b/y2024_swerve/vision/README.md
@@ -0,0 +1,25 @@
+How to use the extrinsic calibration for camera-to-camera calibration
+
+This all assumes you have cameras that have been intrinsically
+calibrated, and that pi1 has a valid extrinsic calibration (from robot
+origin / IMU to the pi1 camera).
+
+It by default will compute the extrinsics for each of the other cameras (pi2,
+pi3, pi4) relative to the robot origin (IMU origin).
+
+Steps:
+* Place two Aruco Diamond markers about 1 meter apart
+  (center-to-center) at a height so that they will be in view of the
+  cameras when the robot is about 1-2m away
+* Start with the robot in a position that both markers are fully in
+  view by one camera
+* Enable logging for all cameras
+* Rotate the robot in place through a full circle, so that each camera sees both tags, and at times just one tag.
+* Stop the logging and copy the files to your laptop
+* Run the calibration code on the resulting files, e.g.,
+  * `bazel run -c opt //y2023/vision:calibrate_multi_cameras -- /data/PATH_TO_LOGS --team_number 971 --target_type charuco_diamond
+  * Can add "--visualize" flag to see the camera views and marker detections
+  * I've sometimes found it necessary to add the "--skip_missing_forwarding_entries" flag-- as guided by the logging messages
+* From the output, copy the calculated ("Updated") extrinsic into the
+  corresponding calibration file for the right robot and camera in the
+  calib_files directory
diff --git a/y2024_swerve/vision/apriltag_detector.cc b/y2024_swerve/vision/apriltag_detector.cc
new file mode 100644
index 0000000..4d4025e
--- /dev/null
+++ b/y2024_swerve/vision/apriltag_detector.cc
@@ -0,0 +1,52 @@
+
+#include <string>
+
+#include "aos/init.h"
+#include "frc971/orin/gpu_apriltag.h"
+#include "y2024_swerve/constants/constants_generated.h"
+#include "y2024_swerve/vision/vision_util.h"
+
+DEFINE_string(channel, "/camera", "Channel name");
+DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
+
+void GpuApriltagDetector() {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  frc971::constants::WaitForConstants<y2024_swerve::Constants>(
+      &config.message());
+
+  aos::ShmEventLoop event_loop(&config.message());
+
+  const frc971::constants::ConstantsFetcher<y2024_swerve::Constants>
+      calibration_data(&event_loop);
+
+  CHECK(FLAGS_channel.length() == 8);
+  int camera_id = std::stoi(FLAGS_channel.substr(7, 1));
+  const frc971::vision::calibration::CameraCalibration *calibration =
+      y2024_swerve::vision::FindCameraCalibration(
+          calibration_data.constants(),
+          event_loop.node()->name()->string_view(), camera_id);
+
+  frc971::apriltag::ApriltagDetector detector(&event_loop, FLAGS_channel,
+                                              calibration);
+
+  // TODO(austin): Figure out our core pinning strategy.
+  // event_loop.SetRuntimeAffinity(aos::MakeCpusetFromCpus({5}));
+
+  LOG(INFO) << "Setting scheduler priority";
+  struct sched_param param;
+  param.sched_priority = 21;
+  PCHECK(sched_setscheduler(0, SCHED_FIFO, &param) == 0);
+
+  LOG(INFO) << "Running event loop";
+  // TODO(austin): Pre-warm it...
+  event_loop.Run();
+}  // namespace frc971::apriltag
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  GpuApriltagDetector();
+
+  return 0;
+}
diff --git a/y2024_swerve/vision/image_streamer_start.sh b/y2024_swerve/vision/image_streamer_start.sh
new file mode 100755
index 0000000..48d9da7
--- /dev/null
+++ b/y2024_swerve/vision/image_streamer_start.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Some configurations to avoid dropping frames
+# 640x480@30fps, 400x300@60fps.
+# Bitrate 500000-1500000
+WIDTH=640
+HEIGHT=480
+BITRATE=1500000
+LISTEN_ON="/camera/downsized"
+# Don't interfere with field webpage
+STREAMING_PORT=1181
+
+# Handle weirdness with openssl and gstreamer
+export OPENSSL_CONF=""
+
+# Enable for verbose logging
+#export GST_DEBUG=4
+
+export LD_LIBRARY_PATH=/usr/lib/aarch64-linux-gnu/gstreamer-1.0
+
+exec ./image_streamer --width=$WIDTH --height=$HEIGHT --bitrate=$BITRATE --listen_on=$LISTEN_ON --config=aos_config.json --streaming_port=$STREAMING_PORT
+
diff --git a/y2024_swerve/vision/maps/BUILD b/y2024_swerve/vision/maps/BUILD
new file mode 100644
index 0000000..38191a4
--- /dev/null
+++ b/y2024_swerve/vision/maps/BUILD
@@ -0,0 +1,7 @@
+filegroup(
+    name = "maps",
+    srcs = glob([
+        "*.json",
+    ]),
+    visibility = ["//visibility:public"],
+)
diff --git a/y2024_swerve/vision/maps/target_map.json b/y2024_swerve/vision/maps/target_map.json
new file mode 100644
index 0000000..2a8dfef
--- /dev/null
+++ b/y2024_swerve/vision/maps/target_map.json
@@ -0,0 +1,236 @@
+{
+/* Targets have positive Z axis pointing into the board, positive X to the right
+   when looking at the board, and positive Y is down when looking at the board.
+   This means that you will get an identity rotation from the camera to target
+   frame when the target is upright, flat, and centered in the camera's view.
+
+   The global frame as the origin at the center of the field, positive X points
+   at the red driver's station, and positive Z points straight up.
+   */
+    "target_poses": [
+        {
+            "id": 1,
+            "position": {
+                "x": 6.809,
+                "y": -3.860,
+                "z": 1.361
+            },
+            "orientation": {
+                "w": 0.1830127,
+                "x": -0.1830127,
+                "y": 0.6830127,
+                "z": -0.6830127
+            }
+        },
+        {
+            "id": 2,
+            "position": {
+                "x": 7.915,
+                "y": -3.223,
+                "z": 1.361
+            },
+            "orientation": {
+                "w": 0.1830127,
+                "x": -0.1830127,
+                "y": 0.6830127,
+                "z": -0.6830127
+            }
+        },
+        {
+            "id": 3,
+            "position": {
+                "x": 8.309,
+                "y": 0.877,
+                "z": 1.456
+            },
+            "orientation": {
+                "w": 0.5,
+                "x": -0.5,
+                "y": 0.5,
+                "z": -0.5
+            }
+        },
+        {
+            "id": 4,
+            "position": {
+                "x": 8.309,
+                "y": 1.442,
+                "z": 1.456
+            },
+            "orientation": {
+                "w": 0.5,
+                "x": -0.5,
+                "y": 0.5,
+                "z": -0.5
+            }
+        },
+        {
+            "id": 5,
+            "position": {
+                "x": 6.428,
+                "y": 4.099,
+                "z": 1.361
+            },
+            "orientation": {
+                "w": 0.7071068,
+                "x": -0.7071068,
+                "y": 0.0,
+                "z": 0.0
+            }
+        },
+        {
+            "id": 6,
+            "position": {
+                "x": -6.430,
+                "y": 4.099,
+                "z": 1.361
+            },
+            "orientation": {
+                "w": 0.7071068,
+                "x": -0.7071068,
+                "y": 0.0,
+                "z": 0.0
+            }
+        },
+        {
+            "id": 7,
+            "position": {
+                "x": -8.309,
+                "y": 1.442,
+                "z": 1.474
+            },
+            "orientation": {
+                "w": 0.5,
+                "x": -0.5,
+                "y": -0.5,
+                "z": 0.5
+            }
+        },
+        {
+            "id": 8,
+            "position": {
+                "x": -8.309,
+                "y": 0.877,
+                "z": 1.474
+            },
+            "orientation": {
+                "w": 0.5,
+                "x": -0.5,
+                "y": -0.5,
+                "z": 0.5
+            }
+        },
+        {
+            "id": 9,
+            "position": {
+                "x": -7.915,
+                "y": -3.223,
+                "z": 1.361
+            },
+            "orientation": {
+                "w": 0.1830127,
+                "x": -0.1830127,
+                "y": -0.6830127,
+                "z": 0.6830127
+            }
+        },
+        {
+            "id": 10,
+            "position": {
+                "x": -6.809,
+                "y": -3.860,
+                "z": 1.361
+            },
+            "orientation": {
+                "w": 0.1830127,
+                "x": -0.1830127,
+                "y": -0.6830127,
+                "z": 0.6830127
+            }
+        },
+        {
+            "id": 11,
+            "position": {
+                "x": 3.629,
+                "y": -0.393,
+                "z": 1.326
+            },
+            "orientation": {
+                "w": 0.6830127,
+                "x": -0.6830127,
+                "y": -0.1830127,
+                "z": 0.1830127
+            }
+        },
+        {
+            "id": 12,
+            "position": {
+                "x": 3.630,
+                "y": 0.392,
+                "z": 1.326
+            },
+            "orientation": {
+                "w": 0.1830127,
+                "x": -0.1830127,
+                "y": -0.6830127,
+                "z": 0.6830127
+            }
+        },
+        {
+            "id": 13,
+            "position": {
+                "x": 2.949,
+                "y": -0.000,
+                "z": 1.326
+            },
+            "orientation": {
+                "w": 0.5,
+                "x": -0.5,
+                "y": 0.5,
+                "z": -0.5
+            }
+        },
+        {
+            "id": 14,
+            "position": {
+                "x": -2.949,
+                "y": -0.000,
+                "z": 1.326
+            },
+            "orientation": {
+                "w": 0.5,
+                "x": -0.5,
+                "y": -0.5,
+                "z": 0.5
+            }
+        },
+        {
+            "id": 15,
+            "position": {
+                "x": -3.629,
+                "y": 0.393,
+                "z": 1.326
+            },
+            "orientation": {
+                "w": 0.1830127,
+                "x": -0.1830127,
+                "y": 0.6830127,
+                "z": -0.6830127
+            }
+        },
+        {
+            "id": 16,
+            "position": {
+                "x": -3.630,
+                "y": -0.392,
+                "z": 1.326
+            },
+            "orientation": {
+                "w": 0.6830127,
+                "x": -0.6830127,
+                "y": 0.1830127,
+                "z": -0.1830127
+            }
+        }
+    ]
+}
diff --git a/y2024_swerve/vision/rename_calibration_file.sh b/y2024_swerve/vision/rename_calibration_file.sh
new file mode 100755
index 0000000..dcdf2f2
--- /dev/null
+++ b/y2024_swerve/vision/rename_calibration_file.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+# Helper script to rename the camera calibration file when moving to new robot
+
+# grep isn't happy with set
+# set -e
+
+
+usage_and_exit () {
+    echo
+    echo "Usage:"
+    echo "$0 ORIG_FILENAME NEW_TEAM_NUMBER NEW_ORIN_NUMBER NEW_CAMERA_NUMBER"
+    echo
+    exit 2
+}
+
+if [[ $# -ne 4 ]]; then
+    echo "ERROR: Requires 4 parameters"
+    usage_and_exit
+fi
+
+ORIG_FILENAME=$1
+NEW_TEAM_NUMBER=$2
+NEW_ORIN_NUMBER=$3
+NEW_CAMERA_NUMBER=$4
+
+if [[ ! -x ${ORIG_FILENAME} ]]; then
+    echo "${ORIG_FILENAME} does not exist"
+    usage_and_exit
+fi
+
+check_971=`echo "${NEW_TEAM_NUMBER}" | grep "971"`
+if [[ ${check_971} == "" ]]; then
+    echo "NEW_TEAM_NUMBER (${NEW_TEAM_NUMBER}) does not contain '971'"
+    usage_and_exit
+fi
+
+if [[ ${NEW_ORIN_NUMBER} != 1 && ${NEW_ORIN_NUMBER} != 2 ]]; then
+    echo "NEW_ORIN_NUMBER (${NEW_ORIN_NUMBER}) must be either 1 or 2"
+    usage_and_exit
+fi
+
+if [[ ${NEW_CAMERA_NUMBER} != 0 && ${NEW_CAMERA_NUMBER} != 1 ]]; then
+    echo "NEW_CAMERA_NUMBER (${NEW_CAMERA_NUMBER}) must be either 0 or 1"
+    usage_and_exit
+fi
+
+# Extract parts of the filename, based on just the basename
+# This assumes filenames of the form:
+# calibration_orin-971-1-0_cam-24-01_2024-02-07_20-11-35.566609408.json
+IFS='_' read -r -a name_parts <<< `basename "${ORIG_FILENAME}"`
+
+echo "For ${ORIG_FILENAME}:"
+for element in "${name_parts[@]}"
+do
+    echo "$element"
+done
+
+# Rename file based on this new info (be sure to handle paths properly)
+NEW_FILENAME=`dirname ${ORIG_FILENAME}`"/${name_parts[0]}_orin-${NEW_TEAM_NUMBER}-${NEW_ORIN_NUMBER}-${NEW_CAMERA_NUMBER}_${name_parts[2]}_${name_parts[3]}_${name_parts[4]}"
+
+echo
+echo "For camera id: ${name_parts[2]}"
+echo "Renaming from:"
+echo "${ORIG_FILENAME} to: "
+echo "${NEW_FILENAME}"
+echo
+echo "and changing from "
+echo "${name_parts[1]} to: "
+echo "orin-${NEW_TEAM_NUMBER}-${NEW_ORIN_NUMBER}-${NEW_CAMERA_NUMBER}"
+echo 
+
+mv ${ORIG_FILENAME} ${NEW_FILENAME}
+
+
+echo "REPLACING ORIN_NUMBER"
+sed -i s/orin./orin${NEW_ORIN_NUMBER}/ ${NEW_FILENAME}
+
+echo "Replacing TEAM NUMBER"
+sed -i s/\"team_number\"\:\ [1-9]*\,/\"team_number\"\:\ ${NEW_TEAM_NUMBER},/ ${NEW_FILENAME}
+
+echo "REPLACING CAMERA_NUMBER"
+sed -i s/\"camera_number\"\:\ [0-9]/\"camera_number\"\:\ ${NEW_CAMERA_NUMBER}/ ${NEW_FILENAME}
+
+
+
diff --git a/y2024_swerve/vision/target_mapping.cc b/y2024_swerve/vision/target_mapping.cc
new file mode 100644
index 0000000..c540654
--- /dev/null
+++ b/y2024_swerve/vision/target_mapping.cc
@@ -0,0 +1,485 @@
+#include <string>
+
+#include "Eigen/Dense"
+#include "opencv2/aruco.hpp"
+#include "opencv2/calib3d.hpp"
+#include "opencv2/core/eigen.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+
+#include "aos/configuration.h"
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/init.h"
+#include "aos/util/mcap_logger.h"
+#include "frc971/constants/constants_sender_lib.h"
+#include "frc971/control_loops/pose.h"
+#include "frc971/vision/calibration_generated.h"
+#include "frc971/vision/charuco_lib.h"
+#include "frc971/vision/target_mapper.h"
+#include "frc971/vision/vision_generated.h"
+#include "frc971/vision/vision_util_lib.h"
+#include "frc971/vision/visualize_robot.h"
+#include "y2024_swerve/constants/simulated_constants_sender.h"
+#include "y2024_swerve/vision/vision_util.h"
+
+DEFINE_string(config, "",
+              "If set, override the log's config file with this one.");
+DEFINE_string(constants_path, "y2024_swerve/constants/constants.json",
+              "Path to the constant file");
+DEFINE_string(dump_constraints_to, "/tmp/mapping_constraints.txt",
+              "Write the target constraints to this path");
+DEFINE_string(dump_stats_to, "/tmp/mapping_stats.txt",
+              "Write the mapping stats to this path");
+DEFINE_string(field_name, "crescendo",
+              "Field name, for the output json filename and flatbuffer field");
+DEFINE_string(json_path, "y2024_swerve/vision/maps/target_map.json",
+              "Specify path for json with initial pose guesses.");
+DEFINE_double(max_pose_error, 1e-6,
+              "Throw out target poses with a higher pose error than this");
+DEFINE_double(
+    max_pose_error_ratio, 0.4,
+    "Throw out target poses with a higher pose error ratio than this");
+DEFINE_string(mcap_output_path, "", "Log to output.");
+DEFINE_string(output_dir, "y2024_swerve/vision/maps",
+              "Directory to write solved target map to");
+DEFINE_double(pause_on_distance, 2.0,
+              "Pause if two consecutive implied robot positions differ by more "
+              "than this many meters");
+DEFINE_string(orin, "orin1",
+              "Orin name to generate mcap log for; defaults to orin1.");
+DEFINE_uint64(skip_to, 1,
+              "Start at combined image of this number (1 is the first image)");
+DEFINE_bool(solve, true, "Whether to solve for the field's target map.");
+DEFINE_bool(split_field, false,
+            "Whether to break solve into two sides of field");
+DEFINE_int32(team_number, 0,
+             "Required: Use the calibration for a node with this team number");
+DEFINE_uint64(wait_key, 1,
+              "Time in ms to wait between images, if no click (0 to wait "
+              "indefinitely until click).");
+
+DECLARE_int32(frozen_target_id);
+DECLARE_int32(min_target_id);
+DECLARE_int32(max_target_id);
+DECLARE_bool(visualize_solver);
+
+namespace y2024_swerve::vision {
+using frc971::vision::DataAdapter;
+using frc971::vision::ImageCallback;
+using frc971::vision::PoseUtils;
+using frc971::vision::TargetMap;
+using frc971::vision::TargetMapper;
+using frc971::vision::VisualizeRobot;
+namespace calibration = frc971::vision::calibration;
+
+// Class to handle reading target poses from a replayed log,
+// displaying various debug info, and passing the poses to
+// frc971::vision::TargetMapper for field mapping.
+class TargetMapperReplay {
+ public:
+  TargetMapperReplay(aos::logger::LogReader *reader);
+
+  // Solves for the target poses with the accumulated detections if FLAGS_solve.
+  void MaybeSolve();
+
+ private:
+  static constexpr int kImageWidth = 1280;
+
+  // Contains fixed target poses without solving, for use with visualization
+  static const TargetMapper kFixedTargetMapper;
+
+  // Map of TargetId to alliance "color" for splitting field
+  static std::map<uint, std::string> kIdAllianceMap;
+
+  // Change reference frame from camera to robot
+  static Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camera_target,
+                                                Eigen::Affine3d extrinsics);
+
+  // Adds april tag detections into the detection list, and handles
+  // visualization
+  void HandleAprilTags(const TargetMap &map,
+                       aos::distributed_clock::time_point node_distributed_time,
+                       std::string camera_name, Eigen::Affine3d extrinsics);
+  // Gets images from the given pi and passes apriltag positions to
+  // HandleAprilTags()
+  void HandleNodeCaptures(
+      aos::EventLoop *mapping_event_loop,
+      frc971::constants::ConstantsFetcher<y2024_swerve::Constants>
+          *constants_fetcher,
+      int camera_number);
+
+  aos::logger::LogReader *reader_;
+  // April tag detections from all pis
+  std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections_;
+
+  VisualizeRobot vis_robot_;
+  // Set of camera names which are currently drawn on the display
+  std::set<std::string> drawn_cameras_;
+  // Number of frames displayed
+  size_t display_count_;
+  // Last time we drew onto the display image.
+  // This is different from when we actually call imshow() to update
+  // the display window
+  aos::distributed_clock::time_point last_draw_time_;
+
+  Eigen::Affine3d last_H_world_robot_;
+  // Maximum distance between consecutive T_world_robot's in one display frame,
+  // used to determine if we need to pause for the user to see this frame
+  // clearly
+  double max_delta_T_world_robot_;
+  double ignore_count_;
+
+  std::vector<std::unique_ptr<aos::EventLoop>> mapping_event_loops_;
+
+  std::unique_ptr<aos::EventLoop> mcap_event_loop_;
+  std::unique_ptr<aos::McapLogger> relogger_;
+};
+
+std::vector<CameraNode> node_list(y2024_swerve::vision::CreateNodeList());
+
+std::map<std::string, int> camera_ordering_map(
+    y2024_swerve::vision::CreateOrderingMap(node_list));
+
+std::map<uint, std::string> TargetMapperReplay::kIdAllianceMap = {
+    {1, "red"},  {2, "red"},   {3, "red"},   {4, "red"},
+    {5, "red"},  {6, "blue"},  {7, "blue"},  {8, "blue"},
+    {9, "blue"}, {10, "blue"}, {11, "red"},  {12, "red"},
+    {13, "red"}, {14, "blue"}, {15, "blue"}, {16, "blue"}};
+
+const auto TargetMapperReplay::kFixedTargetMapper =
+    TargetMapper(FLAGS_json_path, ceres::examples::VectorOfConstraints{});
+
+Eigen::Affine3d TargetMapperReplay::CameraToRobotDetection(
+    Eigen::Affine3d H_camera_target, Eigen::Affine3d extrinsics) {
+  const Eigen::Affine3d H_robot_camera = extrinsics;
+  const Eigen::Affine3d H_robot_target = H_robot_camera * H_camera_target;
+  return H_robot_target;
+}
+
+TargetMapperReplay::TargetMapperReplay(aos::logger::LogReader *reader)
+    : reader_(reader),
+      timestamped_target_detections_(),
+      vis_robot_(cv::Size(1280, 1000)),
+      drawn_cameras_(),
+      display_count_(0),
+      last_draw_time_(aos::distributed_clock::min_time),
+      last_H_world_robot_(Eigen::Matrix4d::Identity()),
+      max_delta_T_world_robot_(0.0) {
+  reader_->RemapLoggedChannel("/orin1/constants", "y2024_swerve.Constants");
+  reader_->RemapLoggedChannel("/imu/constants", "y2024_swerve.Constants");
+  // If it's Box of Orins, don't remap roborio constants
+  reader_->MaybeRemapLoggedChannel<Constants>("/roborio/constants");
+  reader_->Register();
+
+  SendSimulationConstants(reader_->event_loop_factory(), FLAGS_team_number,
+                          FLAGS_constants_path);
+
+  if (FLAGS_visualize_solver) {
+    vis_robot_.ClearImage();
+    // Set focal length to zoomed in, to view extrinsics
+    const double kFocalLength = 1500.0;
+    vis_robot_.SetDefaultViewpoint(kImageWidth, kFocalLength);
+  }
+
+  for (const CameraNode &camera_node : node_list) {
+    const aos::Node *node = aos::configuration::GetNode(
+        reader_->configuration(), camera_node.node_name.c_str());
+
+    mapping_event_loops_.emplace_back(
+        reader_->event_loop_factory()->MakeEventLoop(
+            camera_node.node_name + "mapping", node));
+
+    frc971::constants::ConstantsFetcher<y2024_swerve::Constants>
+        constants_fetcher(
+            mapping_event_loops_[mapping_event_loops_.size() - 1].get());
+    HandleNodeCaptures(
+        mapping_event_loops_[mapping_event_loops_.size() - 1].get(),
+        &constants_fetcher, camera_node.camera_number);
+
+    if (FLAGS_visualize_solver) {
+      // Show the extrinsics calibration to start, for reference to confirm
+      const auto *calibration = FindCameraCalibration(
+          constants_fetcher.constants(),
+          mapping_event_loops_.back()->node()->name()->string_view(),
+          camera_node.camera_number);
+      cv::Mat extrinsics_cv =
+          frc971::vision::CameraExtrinsics(calibration).value();
+      Eigen::Matrix4d extrinsics_matrix;
+      cv::cv2eigen(extrinsics_cv, extrinsics_matrix);
+      const auto extrinsics = Eigen::Affine3d(extrinsics_matrix);
+
+      vis_robot_.DrawRobotOutline(extrinsics, camera_node.camera_name(),
+                                  kOrinColors.at(camera_node.camera_name()));
+    }
+  }
+
+  if (FLAGS_visualize_solver) {
+    cv::imshow("Extrinsics", vis_robot_.image_);
+    cv::waitKey(0);
+    vis_robot_.ClearImage();
+    // Reset focal length to more zoomed out view for field
+    const double kFocalLength = 500.0;
+    vis_robot_.SetDefaultViewpoint(kImageWidth, kFocalLength);
+  }
+}
+
+// Add detected apriltag poses relative to the robot to
+// timestamped_target_detections
+void TargetMapperReplay::HandleAprilTags(
+    const TargetMap &map,
+    aos::distributed_clock::time_point node_distributed_time,
+    std::string camera_name, Eigen::Affine3d extrinsics) {
+  bool drew = false;
+  std::stringstream label;
+  label << camera_name << " - ";
+
+  if (map.target_poses()->size() == 0) {
+    VLOG(2) << "Got 0 AprilTags for camera " << camera_name;
+    return;
+  }
+
+  for (const auto *target_pose_fbs : *map.target_poses()) {
+    // Skip detections with invalid ids
+    if (static_cast<TargetMapper::TargetId>(target_pose_fbs->id()) <
+            FLAGS_min_target_id ||
+        static_cast<TargetMapper::TargetId>(target_pose_fbs->id()) >
+            FLAGS_max_target_id) {
+      VLOG(1) << "Skipping tag with invalid id of " << target_pose_fbs->id();
+      continue;
+    }
+
+    // Skip detections with high pose errors
+    if (target_pose_fbs->pose_error() > FLAGS_max_pose_error) {
+      VLOG(1) << "Skipping tag " << target_pose_fbs->id()
+              << " due to pose error of " << target_pose_fbs->pose_error();
+      continue;
+    }
+    // Skip detections with high pose error ratios
+    if (target_pose_fbs->pose_error_ratio() > FLAGS_max_pose_error_ratio) {
+      VLOG(1) << "Skipping tag " << target_pose_fbs->id()
+              << " due to pose error ratio of "
+              << target_pose_fbs->pose_error_ratio();
+      continue;
+    }
+
+    const TargetMapper::TargetPose target_pose =
+        PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+
+    Eigen::Affine3d H_camera_target =
+        Eigen::Translation3d(target_pose.pose.p) * target_pose.pose.q;
+    Eigen::Affine3d H_robot_target =
+        CameraToRobotDetection(H_camera_target, extrinsics);
+
+    ceres::examples::Pose3d target_pose_camera =
+        PoseUtils::Affine3dToPose3d(H_camera_target);
+    double distance_from_camera = target_pose_camera.p.norm();
+    double distortion_factor = target_pose_fbs->distortion_factor();
+
+    double distance_threshold = 5.0;
+    if (distance_from_camera > distance_threshold) {
+      ignore_count_++;
+      LOG(INFO) << "Ignored " << ignore_count_ << " AprilTags with distance "
+                << distance_from_camera << " > " << distance_threshold;
+      continue;
+    }
+
+    CHECK(map.has_monotonic_timestamp_ns())
+        << "Need detection timestamps for mapping";
+
+    // Detection is usable, so store it
+    timestamped_target_detections_.emplace_back(
+        DataAdapter::TimestampedDetection{
+            .time = node_distributed_time,
+            .H_robot_target = H_robot_target,
+            .distance_from_camera = distance_from_camera,
+            .distortion_factor = distortion_factor,
+            .id = static_cast<TargetMapper::TargetId>(target_pose.id)});
+
+    if (FLAGS_visualize_solver) {
+      // If we've already drawn this camera_name in the current image,
+      // display the image before clearing and adding the new poses
+      if (drawn_cameras_.count(camera_name) != 0) {
+        display_count_++;
+        cv::putText(vis_robot_.image_,
+                    "Poses #" + std::to_string(display_count_),
+                    cv::Point(600, 10), cv::FONT_HERSHEY_PLAIN, 1.0,
+                    cv::Scalar(255, 255, 255));
+
+        if (display_count_ >= FLAGS_skip_to) {
+          VLOG(1) << "Showing image for camera " << camera_name
+                  << " since we've drawn it already";
+          cv::imshow("View", vis_robot_.image_);
+          // Pause if delta_T is too large, but only after first image (to make
+          // sure the delta's are correct)
+          if (max_delta_T_world_robot_ > FLAGS_pause_on_distance &&
+              display_count_ > 1) {
+            LOG(INFO) << "Pausing since the delta between robot estimates is "
+                      << max_delta_T_world_robot_ << " which is > threshold of "
+                      << FLAGS_pause_on_distance;
+            cv::waitKey(0);
+          } else {
+            cv::waitKey(FLAGS_wait_key);
+          }
+          max_delta_T_world_robot_ = 0.0;
+        } else {
+          VLOG(2) << "At poses #" << std::to_string(display_count_);
+        }
+        vis_robot_.ClearImage();
+        drawn_cameras_.clear();
+      }
+
+      Eigen::Affine3d H_world_target = PoseUtils::Pose3dToAffine3d(
+          kFixedTargetMapper.GetTargetPoseById(target_pose_fbs->id())->pose);
+      Eigen::Affine3d H_world_robot = H_world_target * H_robot_target.inverse();
+      VLOG(2) << camera_name << ", id " << target_pose_fbs->id()
+              << ", t = " << node_distributed_time
+              << ", pose_error = " << target_pose_fbs->pose_error()
+              << ", pose_error_ratio = " << target_pose_fbs->pose_error_ratio()
+              << ", robot_pos (x,y,z) = "
+              << H_world_robot.translation().transpose();
+
+      label << "id " << target_pose_fbs->id()
+            << ": err (% of max): " << target_pose_fbs->pose_error() << " ("
+            << (target_pose_fbs->pose_error() / FLAGS_max_pose_error)
+            << ") err_ratio: " << target_pose_fbs->pose_error_ratio() << " ";
+
+      vis_robot_.DrawRobotOutline(H_world_robot, camera_name,
+                                  kOrinColors.at(camera_name));
+      vis_robot_.DrawFrameAxes(H_world_target,
+                               std::to_string(target_pose_fbs->id()),
+                               kOrinColors.at(camera_name));
+
+      double delta_T_world_robot =
+          (H_world_robot.translation() - last_H_world_robot_.translation())
+              .norm();
+      max_delta_T_world_robot_ =
+          std::max(delta_T_world_robot, max_delta_T_world_robot_);
+
+      VLOG(1) << "Drew in info for camera " << camera_name << " and target #"
+              << target_pose_fbs->id();
+      drew = true;
+      last_draw_time_ = node_distributed_time;
+      last_H_world_robot_ = H_world_robot;
+    }
+  }
+  if (FLAGS_visualize_solver) {
+    if (drew) {
+      // Collect all the labels from a given camera, and add the text
+      // TODO: Need to fix this one
+      int position_number = camera_ordering_map[camera_name];
+      cv::putText(vis_robot_.image_, label.str(),
+                  cv::Point(10, 30 + 20 * position_number),
+                  cv::FONT_HERSHEY_PLAIN, 1.0, kOrinColors.at(camera_name));
+      drawn_cameras_.emplace(camera_name);
+    } else if (node_distributed_time - last_draw_time_ >
+                   std::chrono::milliseconds(30) &&
+               display_count_ >= FLAGS_skip_to && drew) {
+      // TODO: Check on 30ms value-- does this make sense?
+      double delta_t = (node_distributed_time - last_draw_time_).count() / 1e6;
+      VLOG(1) << "Last result was " << delta_t << "ms ago";
+      cv::putText(vis_robot_.image_, "No detections in last 30ms",
+                  cv::Point(10, 0), cv::FONT_HERSHEY_PLAIN, 1.0,
+                  kOrinColors.at(camera_name));
+      // Display and clear the image if we haven't draw in a while
+      VLOG(1) << "Displaying image due to time lapse";
+      cv::imshow("View", vis_robot_.image_);
+      cv::waitKey(FLAGS_wait_key);
+      max_delta_T_world_robot_ = 0.0;
+      drawn_cameras_.clear();
+    }
+  }
+}
+
+void TargetMapperReplay::HandleNodeCaptures(
+    aos::EventLoop *mapping_event_loop,
+    frc971::constants::ConstantsFetcher<y2024_swerve::Constants>
+        *constants_fetcher,
+    int camera_number) {
+  // Get the camera extrinsics
+  std::string node_name =
+      std::string(mapping_event_loop->node()->name()->string_view());
+  const auto *calibration = FindCameraCalibration(
+      constants_fetcher->constants(), node_name, camera_number);
+  cv::Mat extrinsics_cv = frc971::vision::CameraExtrinsics(calibration).value();
+  Eigen::Matrix4d extrinsics_matrix;
+  cv::cv2eigen(extrinsics_cv, extrinsics_matrix);
+  const auto extrinsics = Eigen::Affine3d(extrinsics_matrix);
+  std::string camera_name = absl::StrFormat(
+      "/%s/camera%d", mapping_event_loop->node()->name()->str(), camera_number);
+
+  mapping_event_loop->MakeWatcher(
+      camera_name.c_str(), [this, mapping_event_loop, extrinsics,
+                            camera_name](const TargetMap &map) {
+        aos::distributed_clock::time_point node_distributed_time =
+            reader_->event_loop_factory()
+                ->GetNodeEventLoopFactory(mapping_event_loop->node())
+                ->ToDistributedClock(aos::monotonic_clock::time_point(
+                    aos::monotonic_clock::duration(
+                        map.monotonic_timestamp_ns())));
+
+        HandleAprilTags(map, node_distributed_time, camera_name, extrinsics);
+      });
+}
+
+void TargetMapperReplay::MaybeSolve() {
+  if (FLAGS_solve) {
+    auto target_constraints =
+        DataAdapter::MatchTargetDetections(timestamped_target_detections_);
+
+    if (FLAGS_split_field) {
+      // Remove constraints between the two sides of the field - these are
+      // basically garbage because of how far the camera is. We will use seeding
+      // below to connect the two sides
+      target_constraints.erase(
+          std::remove_if(
+              target_constraints.begin(), target_constraints.end(),
+              [](const auto &constraint) {
+                return (
+                    kIdAllianceMap[static_cast<uint>(constraint.id_begin)] !=
+                    kIdAllianceMap[static_cast<uint>(constraint.id_end)]);
+              }),
+          target_constraints.end());
+    }
+
+    LOG(INFO) << "Solving for locations of tags with "
+              << target_constraints.size() << " constraints";
+    TargetMapper mapper(FLAGS_json_path, target_constraints);
+    mapper.Solve(FLAGS_field_name, FLAGS_output_dir);
+
+    if (!FLAGS_dump_constraints_to.empty()) {
+      mapper.DumpConstraints(FLAGS_dump_constraints_to);
+    }
+    if (!FLAGS_dump_stats_to.empty()) {
+      mapper.DumpStats(FLAGS_dump_stats_to);
+    }
+    mapper.PrintDiffs();
+  }
+}
+
+void MappingMain(int argc, char *argv[]) {
+  std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections;
+
+  std::optional<aos::FlatbufferDetachedBuffer<aos::Configuration>> config =
+      (FLAGS_config.empty()
+           ? std::nullopt
+           : std::make_optional(aos::configuration::ReadConfig(FLAGS_config)));
+
+  // Open logfiles
+  aos::logger::LogReader reader(
+      aos::logger::SortParts(aos::logger::FindLogs(argc, argv)),
+      config.has_value() ? &config->message() : nullptr);
+
+  TargetMapperReplay mapper_replay(&reader);
+  reader.event_loop_factory()->Run();
+  mapper_replay.MaybeSolve();
+}
+
+}  // namespace y2024_swerve::vision
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  y2024_swerve::vision::MappingMain(argc, argv);
+}
diff --git a/y2024_swerve/vision/viewer.cc b/y2024_swerve/vision/viewer.cc
new file mode 100644
index 0000000..143868d
--- /dev/null
+++ b/y2024_swerve/vision/viewer.cc
@@ -0,0 +1,122 @@
+#include "absl/strings/match.h"
+#include "opencv2/calib3d.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/time/time.h"
+#include "frc971/constants/constants_sender_lib.h"
+#include "frc971/vision/vision_generated.h"
+#include "frc971/vision/vision_util_lib.h"
+#include "y2024_swerve/vision/vision_util.h"
+
+DEFINE_string(capture, "",
+              "If set, capture a single image and save it to this filename.");
+DEFINE_string(channel, "/camera", "Channel name for the image.");
+DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
+DEFINE_int32(rate, 100, "Time in milliseconds to wait between images");
+DEFINE_double(scale, 1.0, "Scale factor for images being displayed");
+
+namespace y2024_swerve::vision {
+namespace {
+
+using frc971::vision::CameraImage;
+
+bool DisplayLoop(const cv::Mat intrinsics, const cv::Mat dist_coeffs,
+                 aos::Fetcher<CameraImage> *image_fetcher) {
+  const CameraImage *image;
+
+  // Read next image
+  if (!image_fetcher->Fetch()) {
+    VLOG(2) << "Couldn't fetch next image";
+    return true;
+  }
+  image = image_fetcher->get();
+  CHECK(image != nullptr) << "Couldn't read image";
+
+  // Create color image:
+  cv::Mat image_color_mat(cv::Size(image->cols(), image->rows()), CV_8UC2,
+                          (void *)image->data()->data());
+  cv::Mat bgr_image(cv::Size(image->cols(), image->rows()), CV_8UC3);
+  cv::cvtColor(image_color_mat, bgr_image, cv::COLOR_YUV2BGR_YUYV);
+
+  if (!FLAGS_capture.empty()) {
+    if (absl::EndsWith(FLAGS_capture, ".bfbs")) {
+      aos::WriteFlatbufferToFile(FLAGS_capture,
+                                 image_fetcher->CopyFlatBuffer());
+    } else {
+      cv::imwrite(FLAGS_capture, bgr_image);
+    }
+
+    return false;
+  }
+
+  cv::Mat undistorted_image;
+  cv::undistort(bgr_image, undistorted_image, intrinsics, dist_coeffs);
+  if (FLAGS_scale != 1.0) {
+    cv::resize(undistorted_image, undistorted_image, cv::Size(), FLAGS_scale,
+               FLAGS_scale);
+  }
+  cv::imshow("Display", undistorted_image);
+
+  int keystroke = cv::waitKey(1);
+  if ((keystroke & 0xFF) == static_cast<int>('c')) {
+    // Convert again, to get clean image
+    cv::cvtColor(image_color_mat, bgr_image, cv::COLOR_YUV2BGR_YUYV);
+    std::stringstream name;
+    name << "capture-" << aos::realtime_clock::now() << ".png";
+    cv::imwrite(name.str(), bgr_image);
+    LOG(INFO) << "Saved image file: " << name.str();
+  } else if ((keystroke & 0xFF) == static_cast<int>('q')) {
+    return false;
+  }
+  return true;
+}
+
+void ViewerMain() {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  frc971::constants::WaitForConstants<y2024_swerve::Constants>(
+      &config.message());
+
+  aos::ShmEventLoop event_loop(&config.message());
+
+  frc971::constants::ConstantsFetcher<y2024_swerve::Constants>
+      constants_fetcher(&event_loop);
+  CHECK(FLAGS_channel.length() == 8);
+  int camera_id = std::stoi(FLAGS_channel.substr(7, 1));
+  const auto *calibration_data = FindCameraCalibration(
+      constants_fetcher.constants(), event_loop.node()->name()->string_view(),
+      camera_id);
+  const cv::Mat intrinsics = frc971::vision::CameraIntrinsics(calibration_data);
+  const cv::Mat dist_coeffs =
+      frc971::vision::CameraDistCoeffs(calibration_data);
+
+  aos::Fetcher<CameraImage> image_fetcher =
+      event_loop.MakeFetcher<CameraImage>(FLAGS_channel);
+
+  // Run the display loop
+  event_loop.AddPhasedLoop(
+      [&](int) {
+        if (!DisplayLoop(intrinsics, dist_coeffs, &image_fetcher)) {
+          LOG(INFO) << "Calling event_loop Exit";
+          event_loop.Exit();
+        };
+      },
+      ::std::chrono::milliseconds(FLAGS_rate));
+
+  event_loop.Run();
+
+  image_fetcher = aos::Fetcher<CameraImage>();
+}
+
+}  // namespace
+}  // namespace y2024_swerve::vision
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  y2024_swerve::vision::ViewerMain();
+}
diff --git a/y2024_swerve/vision/vision_util.cc b/y2024_swerve/vision/vision_util.cc
new file mode 100644
index 0000000..e08bfaa
--- /dev/null
+++ b/y2024_swerve/vision/vision_util.cc
@@ -0,0 +1,48 @@
+#include "y2024_swerve/vision/vision_util.h"
+
+#include "glog/logging.h"
+
+namespace y2024_swerve::vision {
+
+// Store a list of ordered cameras as you progress around the robot/box of orins
+std::vector<CameraNode> CreateNodeList() {
+  std::vector<CameraNode> list;
+
+  list.push_back({.node_name = "imu", .camera_number = 0});
+  list.push_back({.node_name = "imu", .camera_number = 1});
+  list.push_back({.node_name = "orin1", .camera_number = 1});
+  list.push_back({.node_name = "orin1", .camera_number = 0});
+
+  return list;
+}
+
+// From the node_list, create a numbering scheme from 0 to 3
+std::map<std::string, int> CreateOrderingMap(
+    std::vector<CameraNode> &node_list) {
+  std::map<std::string, int> map;
+
+  for (uint i = 0; i < node_list.size(); i++) {
+    map.insert({node_list.at(i).camera_name(), i});
+  }
+
+  return map;
+}
+
+const frc971::vision::calibration::CameraCalibration *FindCameraCalibration(
+    const y2024_swerve::Constants &calibration_data, std::string_view node_name,
+    int camera_number) {
+  CHECK(calibration_data.robot()->has_cameras());
+  for (const y2024_swerve::CameraConfiguration *candidate :
+       *calibration_data.robot()->cameras()) {
+    CHECK(candidate->has_calibration());
+    if (candidate->calibration()->node_name()->string_view() != node_name ||
+        candidate->calibration()->camera_number() != camera_number) {
+      continue;
+    }
+    return candidate->calibration();
+  }
+  LOG(FATAL) << ": Failed to find camera calibration for " << node_name
+             << " and camera number " << camera_number;
+}
+
+}  // namespace y2024_swerve::vision
diff --git a/y2024_swerve/vision/vision_util.h b/y2024_swerve/vision/vision_util.h
new file mode 100644
index 0000000..6fed3c8
--- /dev/null
+++ b/y2024_swerve/vision/vision_util.h
@@ -0,0 +1,41 @@
+#ifndef Y2024_SWERVE_VISION_VISION_UTIL_H_
+#define Y2024_SWERVE_VISION_VISION_UTIL_H_
+#include <map>
+#include <string_view>
+
+#include "opencv2/imgproc.hpp"
+
+#include "y2024_swerve/constants/constants_generated.h"
+
+namespace y2024_swerve::vision {
+
+// Generate unique colors for each camera
+const auto kOrinColors = std::map<std::string, cv::Scalar>{
+    {"/orin1/camera0", cv::Scalar(255, 0, 255)},
+    {"/orin1/camera1", cv::Scalar(255, 255, 0)},
+    {"/imu/camera0", cv::Scalar(0, 255, 255)},
+    {"/imu/camera1", cv::Scalar(255, 165, 0)},
+};
+
+// Structure to store node name (e.g., orin1, imu), number, and a usable string
+struct CameraNode {
+  std::string node_name;
+  int camera_number;
+
+  inline const std::string camera_name() const {
+    return "/" + node_name + "/camera" + std::to_string(camera_number);
+  }
+};
+
+std::vector<CameraNode> CreateNodeList();
+
+std::map<std::string, int> CreateOrderingMap(
+    std::vector<CameraNode> &node_list);
+
+const frc971::vision::calibration::CameraCalibration *FindCameraCalibration(
+    const y2024_swerve::Constants &calibration_data, std::string_view node_name,
+    int camera_number);
+
+}  // namespace y2024_swerve::vision
+
+#endif  // y2024_SWERVE_VISION_VISION_UTIL_H_