Add pose error ratio to target poses

This lets us filter out estimates that had a secondary pose
estimate which was almost as good as the best one. We should
throw out such estimates, they have been causing one tag-based
implied robot position that is completely different from the others.

Adding this filter to target_mapping prevents jumping implied robot
positions, at least at distances from the charge station and closer.
Also makes it so that all our HP-based estimates are accurate.

Signed-off-by: milind-u <milind.upadhyay@gmail.com>
Change-Id: I11e42a4886b8401eeb6f57cf8e547ce3a8ff90b7
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index 9fd846d..b543dc7 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -17,6 +17,8 @@
     "Maximum expected value for unscaled distortion factors. Will scale "
     "distortion factors so that this value (and a higher distortion) maps to "
     "1.0.");
+DEFINE_uint64(pose_estimation_iterations, 50,
+              "Number of iterations for apriltag pose estimation.");
 
 namespace y2023 {
 namespace vision {
@@ -98,8 +100,11 @@
 
   auto builder = target_map_sender_.MakeBuilder();
   std::vector<flatbuffers::Offset<frc971::vision::TargetPoseFbs>> target_poses;
-  for (const auto &detection : result.detections) {
-    target_poses.emplace_back(BuildTargetPose(detection, builder.fbb()));
+  for (auto &detection : result.detections) {
+    auto *fbb = builder.fbb();
+    auto pose = BuildTargetPose(detection, fbb);
+    DestroyPose(&detection.pose);
+    target_poses.emplace_back(pose);
   }
   const auto target_poses_offset = builder.fbb()->CreateVector(target_poses);
   auto target_map_builder = builder.MakeBuilder<frc971::vision::TargetMap>();
@@ -127,7 +132,7 @@
   return frc971::vision::CreateTargetPoseFbs(
       *fbb, detection.det.id, position_offset, orientation_offset,
       detection.det.decision_margin, detection.pose_error,
-      detection.distortion_factor);
+      detection.distortion_factor, detection.pose_error_ratio);
 }
 
 void AprilRoboticsDetector::UndistortDetection(
@@ -183,6 +188,11 @@
   return corner_points;
 }
 
+void AprilRoboticsDetector::DestroyPose(apriltag_pose_t *pose) const {
+  matd_destroy(pose->R);
+  matd_destroy(pose->t);
+}
+
 AprilRoboticsDetector::DetectionResult AprilRoboticsDetector::DetectTags(
     cv::Mat image, aos::monotonic_clock::time_point eof) {
   cv::Mat color_image;
@@ -256,8 +266,13 @@
       const aos::monotonic_clock::time_point before_pose_estimation =
           aos::monotonic_clock::now();
 
-      apriltag_pose_t pose;
-      double pose_error = estimate_tag_pose(&info, &pose);
+      apriltag_pose_t pose_1;
+      apriltag_pose_t pose_2;
+      double pose_error_1;
+      double pose_error_2;
+      estimate_tag_pose_orthogonal_iteration(&info, &pose_error_1, &pose_1,
+                                             &pose_error_2, &pose_2,
+                                             FLAGS_pose_estimation_iterations);
 
       const aos::monotonic_clock::time_point after_pose_estimation =
           aos::monotonic_clock::now();
@@ -266,7 +281,8 @@
                                           before_pose_estimation)
                      .count()
               << " seconds for pose estimation";
-      VLOG(1) << "Pose err: " << pose_error;
+      VLOG(1) << "Pose err 1: " << pose_error_1;
+      VLOG(1) << "Pose err 2: " << pose_error_2;
 
       // Send undistorted corner points in pink
       std::vector<cv::Point2f> corner_points = MakeCornerVector(det);
@@ -277,10 +293,29 @@
       double distortion_factor =
           ComputeDistortionFactor(orig_corner_points, corner_points);
 
+      // We get two estimates for poses.
+      // Choose the one with the lower estimation error
+      bool use_pose_1 = (pose_error_1 < pose_error_2);
+      auto best_pose = (use_pose_1 ? pose_1 : pose_2);
+      auto secondary_pose = (use_pose_1 ? pose_2 : pose_1);
+      double best_pose_error = (use_pose_1 ? pose_error_1 : pose_error_2);
+      double secondary_pose_error = (use_pose_1 ? pose_error_2 : pose_error_1);
+
+      CHECK_NE(best_pose_error, std::numeric_limits<double>::infinity())
+          << "Got no valid pose estimations, this should not be possible.";
+      double pose_error_ratio = best_pose_error / secondary_pose_error;
+
+      // Destroy the secondary pose if we got one
+      if (secondary_pose_error != std::numeric_limits<double>::infinity()) {
+        DestroyPose(&secondary_pose);
+      }
+
       results.emplace_back(Detection{.det = *det,
-                                     .pose = pose,
-                                     .pose_error = pose_error,
-                                     .distortion_factor = distortion_factor});
+                                     .pose = best_pose,
+                                     .pose_error = best_pose_error,
+                                     .distortion_factor = distortion_factor,
+                                     .pose_error_ratio = pose_error_ratio});
+
       if (FLAGS_visualize) {
         // Draw raw (distorted) corner points in green
         cv::line(color_image, orig_corner_points[0], orig_corner_points[1],
diff --git a/y2023/vision/aprilrobotics.h b/y2023/vision/aprilrobotics.h
index 66d82a1..01e3138 100644
--- a/y2023/vision/aprilrobotics.h
+++ b/y2023/vision/aprilrobotics.h
@@ -30,6 +30,7 @@
     apriltag_pose_t pose;
     double pose_error;
     double distortion_factor;
+    double pose_error_ratio;
   };
 
   struct DetectionResult {
@@ -43,6 +44,10 @@
 
   void SetWorkerpoolAffinities();
 
+  // Deletes the heap-allocated rotation and translation pointers in the given
+  // pose
+  void DestroyPose(apriltag_pose_t *pose) const;
+
   // Undistorts the april tag corners using the camera calibration
   void UndistortDetection(apriltag_detection_t *det) const;
 
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index d618f77..e9ef662 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -35,9 +35,14 @@
 DEFINE_string(pi, "pi1", "Pi name to generate mcap log for; defaults to pi1.");
 DEFINE_double(max_pose_error, 1e-6,
               "Throw out target poses with a higher pose error than this");
+DEFINE_double(
+    max_pose_error_ratio, 0.4,
+    "Throw out target poses with a higher pose error ratio than this");
 DEFINE_uint64(wait_key, 0,
               "Time in ms to wait between images, if no click (0 to wait "
               "indefinitely until click).");
+DEFINE_uint64(skip_to, 1,
+              "Start at combined image of this number (1 is the first image)");
 DEFINE_bool(solve, true, "Whether to solve for the field's target map.");
 
 namespace y2023 {
@@ -84,8 +89,15 @@
   for (const auto *target_pose_fbs : *map.target_poses()) {
     // Skip detections with high pose errors
     if (target_pose_fbs->pose_error() > FLAGS_max_pose_error) {
-      LOG(INFO) << " Skipping tag " << target_pose_fbs->id()
-                << " due to pose error of " << target_pose_fbs->pose_error();
+      VLOG(1) << " Skipping tag " << target_pose_fbs->id()
+              << " due to pose error of " << target_pose_fbs->pose_error();
+      continue;
+    }
+    // Skip detections with high pose error ratios
+    if (target_pose_fbs->pose_error_ratio() > FLAGS_max_pose_error_ratio) {
+      VLOG(1) << " Skipping tag " << target_pose_fbs->id()
+              << " due to pose error ratio of "
+              << target_pose_fbs->pose_error_ratio();
       continue;
     }
 
@@ -123,8 +135,12 @@
                     cv::Point(600, 10), cv::FONT_HERSHEY_PLAIN, 1.0,
                     cv::Scalar(255, 255, 255));
 
-        cv::imshow("View", vis_robot->image_);
-        cv::waitKey(FLAGS_wait_key);
+        if (*display_count >= FLAGS_skip_to) {
+          cv::imshow("View", vis_robot->image_);
+          cv::waitKey(FLAGS_wait_key);
+        } else {
+          VLOG(1) << "At poses #" << std::to_string(*display_count);
+        }
         vis_robot->ClearImage();
         drawn_nodes->clear();
       }
@@ -132,14 +148,16 @@
       Eigen::Affine3d H_world_target = PoseUtils::Pose3dToAffine3d(
           target_loc_mapper.GetTargetPoseById(target_pose_fbs->id())->pose);
       Eigen::Affine3d H_world_robot = H_world_target * H_robot_target.inverse();
-      LOG(INFO) << node_name << ", " << target_pose_fbs->id()
-                << ", t = " << pi_distributed_time
-                << ", pose_error = " << target_pose_fbs->pose_error()
-                << ", robot_pos (x,y,z) + "
-                << H_world_robot.translation().transpose();
+      VLOG(2) << node_name << ", " << target_pose_fbs->id()
+              << ", t = " << pi_distributed_time
+              << ", pose_error = " << target_pose_fbs->pose_error()
+              << ", pose_error_ratio = " << target_pose_fbs->pose_error_ratio()
+              << ", robot_pos (x,y,z) + "
+              << H_world_robot.translation().transpose();
 
       label << "id " << target_pose_fbs->id() << ": err "
-            << (target_pose_fbs->pose_error() / FLAGS_max_pose_error) << " ";
+            << (target_pose_fbs->pose_error() / FLAGS_max_pose_error)
+            << " ratio " << target_pose_fbs->pose_error_ratio() << " ";
 
       vis_robot->DrawRobotOutline(H_world_robot,
                                   std::to_string(target_pose_fbs->id()),