Add distortion factor to TargetMap
Tells us how much each detection was effected by distortion so we can
filter this in the localizer.
Signed-off-by: milind-u <milind.upadhyay@gmail.com>
Change-Id: I3d1ffd3cddad9ec1949c1378558f71a48c534f03
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index 891490d..e694b5e 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -19,6 +19,7 @@
AprilRoboticsDetector::AprilRoboticsDetector(aos::EventLoop *event_loop,
std::string_view channel_name)
: calibration_data_(event_loop),
+ image_size_(0, 0),
ftrace_(),
image_callback_(
event_loop, channel_name,
@@ -81,6 +82,8 @@
void AprilRoboticsDetector::HandleImage(cv::Mat image_grayscale,
aos::monotonic_clock::time_point eof) {
+ image_size_ = image_grayscale.size();
+
std::vector<Detection> detections = DetectTags(image_grayscale, eof);
auto builder = target_map_sender_.MakeBuilder();
@@ -112,7 +115,8 @@
return frc971::vision::CreateTargetPoseFbs(
*fbb, detection.det.id, position_offset, orientation_offset,
- detection.det.decision_margin, detection.pose_error);
+ detection.det.decision_margin, detection.pose_error,
+ detection.distortion_factor);
}
void AprilRoboticsDetector::UndistortDetection(
@@ -137,6 +141,25 @@
}
}
+double AprilRoboticsDetector::ComputeDistortionFactor(
+ const std::vector<cv::Point2f> &orig_corners,
+ const std::vector<cv::Point2f> &corners) {
+ CHECK_EQ(orig_corners.size(), 4ul);
+ CHECK_EQ(corners.size(), 4ul);
+
+ double avg_distance = 0.0;
+ for (size_t i = 0; i < corners.size(); i++) {
+ avg_distance += cv::norm(orig_corners[i] - corners[i]);
+ }
+ avg_distance /= corners.size();
+
+ // Normalize avg_distance by dividing by the image size
+ double distortion_factor =
+ avg_distance /
+ static_cast<double>(image_size_.width * image_size_.height);
+ return distortion_factor;
+}
+
std::vector<AprilRoboticsDetector::Detection> AprilRoboticsDetector::DetectTags(
cv::Mat image, aos::monotonic_clock::time_point eof) {
const aos::monotonic_clock::time_point start_time =
@@ -177,8 +200,6 @@
VLOG(1) << "Found tag number " << det->id << " hamming: " << det->hamming
<< " margin: " << det->decision_margin;
- const aos::monotonic_clock::time_point before_pose_estimation =
- aos::monotonic_clock::now();
// First create an apriltag_detection_info_t struct using your known
// parameters.
apriltag_detection_info_t info;
@@ -201,21 +222,20 @@
UndistortDetection(det);
+ const aos::monotonic_clock::time_point before_pose_estimation =
+ aos::monotonic_clock::now();
+
apriltag_pose_t pose;
- double err = estimate_tag_pose(&info, &pose);
-
- VLOG(1) << "err: " << err;
-
- results.emplace_back(Detection{*det, pose, err});
+ double pose_error = estimate_tag_pose(&info, &pose);
const aos::monotonic_clock::time_point after_pose_estimation =
aos::monotonic_clock::now();
-
VLOG(1) << "Took "
<< chrono::duration<double>(after_pose_estimation -
before_pose_estimation)
.count()
<< " seconds for pose estimation";
+ VLOG(1) << "Pose err: " << pose_error;
std::vector<cv::Point2f> corner_points;
corner_points.emplace_back(det->p[0][0], det->p[0][1]);
@@ -224,6 +244,14 @@
corner_points.emplace_back(det->p[3][0], det->p[3][1]);
corners_vector.emplace_back(corner_points);
+
+ double distortion_factor =
+ ComputeDistortionFactor(orig_corner_points, corner_points);
+
+ results.emplace_back(Detection{.det = *det,
+ .pose = pose,
+ .pose_error = pose_error,
+ .distortion_factor = distortion_factor});
}
}
diff --git a/y2023/vision/aprilrobotics.h b/y2023/vision/aprilrobotics.h
index a16986e..fd371c7 100644
--- a/y2023/vision/aprilrobotics.h
+++ b/y2023/vision/aprilrobotics.h
@@ -28,6 +28,7 @@
apriltag_detection_t det;
apriltag_pose_t pose;
double pose_error;
+ double distortion_factor;
};
AprilRoboticsDetector(aos::EventLoop *event_loop,
@@ -52,6 +53,12 @@
flatbuffers::Offset<frc971::vision::TargetPoseFbs> BuildTargetPose(
const Detection &detection, flatbuffers::FlatBufferBuilder *fbb);
+ // Computes the distortion effect on this detection taking the scaled average
+ // delta between orig_corners (distorted corners) and corners (undistorted
+ // corners)
+ double ComputeDistortionFactor(const std::vector<cv::Point2f> &orig_corners,
+ const std::vector<cv::Point2f> &corners);
+
apriltag_family_t *tag_family_;
apriltag_detector_t *tag_detector_;
@@ -61,6 +68,7 @@
cv::Mat projection_matrix_;
std::optional<cv::Mat> extrinsics_;
cv::Mat dist_coeffs_;
+ cv::Size image_size_;
aos::Ftrace ftrace_;