Add pose estimation error to TargetMap

Signed-off-by: milind-u <milind.upadhyay@gmail.com>
Change-Id: I86104646bedd32124b85f6717b5445ff7e1d6136
diff --git a/frc971/vision/target_map.fbs b/frc971/vision/target_map.fbs
index b3fd6e9..7017e7f 100644
--- a/frc971/vision/target_map.fbs
+++ b/frc971/vision/target_map.fbs
@@ -31,6 +31,14 @@
   // FLAGS_min_decision_margin in aprilrobotics.cc are already filtered
   // out before sending.
   confidence:double (id: 3);
+
+  // Object-space error of the tag from pose estimation.
+  // Only filled out if this pose represents a live detection.
+  // Tags which are seen completely usually have a pose_error < 1e-6,
+  // and a higher error could imply that part of the tag is being blocked.
+  // NOTE: not filtered by aprilrobotics.cc so that we can log
+  // more detections.
+  pose_error:double (id: 4);
 }
 
 // Map of all target poses on a field.
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index c2e063a..891490d 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -20,12 +20,13 @@
                                              std::string_view channel_name)
     : calibration_data_(event_loop),
       ftrace_(),
-      image_callback_(event_loop, channel_name,
-                      [&](cv::Mat image_color_mat,
-                          const aos::monotonic_clock::time_point eof) {
-                        HandleImage(image_color_mat, eof);
-                      },
-                      chrono::milliseconds(5)),
+      image_callback_(
+          event_loop, channel_name,
+          [&](cv::Mat image_color_mat,
+              const aos::monotonic_clock::time_point eof) {
+            HandleImage(image_color_mat, eof);
+          },
+          chrono::milliseconds(5)),
       target_map_sender_(
           event_loop->MakeSender<frc971::vision::TargetMap>("/camera")),
       image_annotations_sender_(
@@ -80,13 +81,12 @@
 
 void AprilRoboticsDetector::HandleImage(cv::Mat image_grayscale,
                                         aos::monotonic_clock::time_point eof) {
-  std::vector<std::pair<apriltag_detection_t, apriltag_pose_t>> detections =
-      DetectTags(image_grayscale, eof);
+  std::vector<Detection> detections = DetectTags(image_grayscale, eof);
 
   auto builder = target_map_sender_.MakeBuilder();
   std::vector<flatbuffers::Offset<frc971::vision::TargetPoseFbs>> target_poses;
-  for (const auto &[detection, pose] : detections) {
-    target_poses.emplace_back(BuildTargetPose(pose, detection, builder.fbb()));
+  for (const auto &detection : detections) {
+    target_poses.emplace_back(BuildTargetPose(detection, builder.fbb()));
   }
   const auto target_poses_offset = builder.fbb()->CreateVector(target_poses);
   auto target_map_builder = builder.MakeBuilder<frc971::vision::TargetMap>();
@@ -96,22 +96,23 @@
 }
 
 flatbuffers::Offset<frc971::vision::TargetPoseFbs>
-AprilRoboticsDetector::BuildTargetPose(const apriltag_pose_t &pose,
-                                       const apriltag_detection_t &det,
+AprilRoboticsDetector::BuildTargetPose(const Detection &detection,
                                        flatbuffers::FlatBufferBuilder *fbb) {
   const auto T =
-      Eigen::Translation3d(pose.t->data[0], pose.t->data[1], pose.t->data[2]);
+      Eigen::Translation3d(detection.pose.t->data[0], detection.pose.t->data[1],
+                           detection.pose.t->data[2]);
   const auto position_offset =
       frc971::vision::CreatePosition(*fbb, T.x(), T.y(), T.z());
 
   // Aprilrobotics stores the rotation matrix in row-major order
   const auto orientation = Eigen::Quaterniond(
-      Eigen::Matrix<double, 3, 3, Eigen::RowMajor>(pose.R->data));
+      Eigen::Matrix<double, 3, 3, Eigen::RowMajor>(detection.pose.R->data));
   const auto orientation_offset = frc971::vision::CreateQuaternion(
       *fbb, orientation.w(), orientation.x(), orientation.y(), orientation.z());
 
   return frc971::vision::CreateTargetPoseFbs(
-      *fbb, det.id, position_offset, orientation_offset, det.decision_margin);
+      *fbb, detection.det.id, position_offset, orientation_offset,
+      detection.det.decision_margin, detection.pose_error);
 }
 
 void AprilRoboticsDetector::UndistortDetection(
@@ -136,9 +137,8 @@
   }
 }
 
-std::vector<std::pair<apriltag_detection_t, apriltag_pose_t>>
-AprilRoboticsDetector::DetectTags(cv::Mat image,
-                                  aos::monotonic_clock::time_point eof) {
+std::vector<AprilRoboticsDetector::Detection> AprilRoboticsDetector::DetectTags(
+    cv::Mat image, aos::monotonic_clock::time_point eof) {
   const aos::monotonic_clock::time_point start_time =
       aos::monotonic_clock::now();
 
@@ -155,7 +155,7 @@
   zarray_t *detections = apriltag_detector_detect(tag_detector_, &im);
   ftrace_.FormatMessage("Done detecting\n");
 
-  std::vector<std::pair<apriltag_detection_t, apriltag_pose_t>> results;
+  std::vector<Detection> results;
 
   std::vector<std::vector<cv::Point2f>> orig_corners_vector;
   std::vector<std::vector<cv::Point2f>> corners_vector;
@@ -206,7 +206,7 @@
 
       VLOG(1) << "err: " << err;
 
-      results.emplace_back(*det, pose);
+      results.emplace_back(Detection{*det, pose, err});
 
       const aos::monotonic_clock::time_point after_pose_estimation =
           aos::monotonic_clock::now();
diff --git a/y2023/vision/aprilrobotics.h b/y2023/vision/aprilrobotics.h
index 1e5b2e8..a16986e 100644
--- a/y2023/vision/aprilrobotics.h
+++ b/y2023/vision/aprilrobotics.h
@@ -23,6 +23,13 @@
 
 class AprilRoboticsDetector {
  public:
+  // Aprilrobotics representation of a tag detection
+  struct Detection {
+    apriltag_detection_t det;
+    apriltag_pose_t pose;
+    double pose_error;
+  };
+
   AprilRoboticsDetector(aos::EventLoop *event_loop,
                         std::string_view channel_name);
   ~AprilRoboticsDetector();
@@ -32,8 +39,8 @@
   // Undistorts the april tag corners using the camera calibration
   void UndistortDetection(apriltag_detection_t *det) const;
 
-  std::vector<std::pair<apriltag_detection_t, apriltag_pose_t>> DetectTags(
-      cv::Mat image, aos::monotonic_clock::time_point eof);
+  std::vector<Detection> DetectTags(cv::Mat image,
+                                    aos::monotonic_clock::time_point eof);
 
   const std::optional<cv::Mat> extrinsics() const { return extrinsics_; }
   const cv::Mat intrinsics() const { return intrinsics_; }
@@ -43,8 +50,7 @@
   void HandleImage(cv::Mat image, aos::monotonic_clock::time_point eof);
 
   flatbuffers::Offset<frc971::vision::TargetPoseFbs> BuildTargetPose(
-      const apriltag_pose_t &pose, const apriltag_detection_t &det,
-      flatbuffers::FlatBufferBuilder *fbb);
+      const Detection &detection, flatbuffers::FlatBufferBuilder *fbb);
 
   apriltag_family_t *tag_family_;
   apriltag_detector_t *tag_detector_;