Allow visualizing detections from multiple pis
That way, we can see how much different pose estimates agree with each
other on where the robot is.
Signed-off-by: milind-u <milind.upadhyay@gmail.com>
Change-Id: Ibc5dbe86f5b836ad77b356fa74bc752cc652afab
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index cf2244a..9fd846d 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -1,9 +1,9 @@
#include "y2023/vision/aprilrobotics.h"
-#include "y2023/vision/vision_util.h"
-
#include <opencv2/highgui.hpp>
+#include "y2023/vision/vision_util.h"
+
DEFINE_bool(
debug, false,
"If true, dump a ton of debug and crash on the first valid detection.");
@@ -24,16 +24,20 @@
namespace chrono = std::chrono;
AprilRoboticsDetector::AprilRoboticsDetector(aos::EventLoop *event_loop,
- std::string_view channel_name)
+ std::string_view channel_name,
+ bool flip_image)
: calibration_data_(event_loop),
image_size_(0, 0),
+ flip_image_(flip_image),
+ node_name_(event_loop->node()->name()->string_view()),
ftrace_(),
- image_callback_(event_loop, channel_name,
- [&](cv::Mat image_color_mat,
- const aos::monotonic_clock::time_point eof) {
- HandleImage(image_color_mat, eof);
- },
- chrono::milliseconds(5)),
+ image_callback_(
+ event_loop, channel_name,
+ [&](cv::Mat image_color_mat,
+ const aos::monotonic_clock::time_point eof) {
+ HandleImage(image_color_mat, eof);
+ },
+ chrono::milliseconds(5)),
target_map_sender_(
event_loop->MakeSender<frc971::vision::TargetMap>("/camera")),
image_annotations_sender_(
@@ -309,9 +313,11 @@
if (FLAGS_visualize) {
// Display the result
// Rotate by 180 degrees to make it upright
- // TDOO<Jim>: Make this a flag, since we don't want it for box of pis
- cv::rotate(color_image, color_image, 1);
- cv::imshow("AprilRoboticsDetector Image", color_image);
+ if (flip_image_) {
+ cv::rotate(color_image, color_image, 1);
+ }
+ cv::imshow(absl::StrCat("AprilRoboticsDetector Image ", node_name_),
+ color_image);
}
const auto corners_offset = builder.fbb()->CreateVector(foxglove_corners);
diff --git a/y2023/vision/aprilrobotics.h b/y2023/vision/aprilrobotics.h
index 7caa848..66d82a1 100644
--- a/y2023/vision/aprilrobotics.h
+++ b/y2023/vision/aprilrobotics.h
@@ -38,7 +38,7 @@
};
AprilRoboticsDetector(aos::EventLoop *event_loop,
- std::string_view channel_name);
+ std::string_view channel_name, bool flip_image = true);
~AprilRoboticsDetector();
void SetWorkerpoolAffinities();
@@ -78,6 +78,8 @@
std::optional<cv::Mat> extrinsics_;
cv::Mat dist_coeffs_;
cv::Size image_size_;
+ bool flip_image_;
+ std::string_view node_name_;
aos::Ftrace ftrace_;
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index a110794..d618f77 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -35,6 +35,10 @@
DEFINE_string(pi, "pi1", "Pi name to generate mcap log for; defaults to pi1.");
DEFINE_double(max_pose_error, 1e-6,
"Throw out target poses with a higher pose error than this");
+DEFINE_uint64(wait_key, 0,
+ "Time in ms to wait between images, if no click (0 to wait "
+ "indefinitely until click).");
+DEFINE_bool(solve, true, "Whether to solve for the field's target map.");
namespace y2023 {
namespace vision {
@@ -43,6 +47,7 @@
using frc971::vision::PoseUtils;
using frc971::vision::TargetMap;
using frc971::vision::TargetMapper;
+using frc971::vision::VisualizeRobot;
namespace calibration = frc971::vision::calibration;
// Change reference frame from camera to robot
@@ -53,8 +58,14 @@
return H_robot_target;
}
-frc971::vision::VisualizeRobot vis_robot_;
const int kImageWidth = 1000;
+// Map from pi node name to color for drawing
+const std::map<std::string, cv::Scalar> kPiColors = {
+ {"pi1", cv::Scalar(255, 0, 255)},
+ {"pi2", cv::Scalar(255, 255, 0)},
+ {"pi3", cv::Scalar(0, 255, 255)},
+ {"pi4", cv::Scalar(255, 165, 0)},
+};
// Add detected apriltag poses relative to the robot to
// timestamped_target_detections
@@ -63,7 +74,13 @@
std::vector<DataAdapter::TimestampedDetection>
*timestamped_target_detections,
Eigen::Affine3d extrinsics, std::string node_name,
- frc971::vision::TargetMapper mapper) {
+ frc971::vision::TargetMapper target_loc_mapper,
+ std::set<std::string> *drawn_nodes,
+ VisualizeRobot *vis_robot, size_t *display_count) {
+ bool drew = false;
+ std::stringstream label;
+ label << node_name << " - ";
+
for (const auto *target_pose_fbs : *map.target_poses()) {
// Skip detections with high pose errors
if (target_pose_fbs->pose_error() > FLAGS_max_pose_error) {
@@ -97,26 +114,52 @@
.id = static_cast<TargetMapper::TargetId>(target_pose.id)});
if (FLAGS_visualize) {
+ // If we've already drawn in the current image,
+ // display it before clearing and adding the new poses
+ if (drawn_nodes->count(node_name) != 0) {
+ (*display_count)++;
+ cv::putText(vis_robot->image_,
+ "Poses #" + std::to_string(*display_count),
+ cv::Point(600, 10), cv::FONT_HERSHEY_PLAIN, 1.0,
+ cv::Scalar(255, 255, 255));
+
+ cv::imshow("View", vis_robot->image_);
+ cv::waitKey(FLAGS_wait_key);
+ vis_robot->ClearImage();
+ drawn_nodes->clear();
+ }
+
Eigen::Affine3d H_world_target = PoseUtils::Pose3dToAffine3d(
- mapper.GetTargetPoseById(target_pose_fbs->id())->pose);
+ target_loc_mapper.GetTargetPoseById(target_pose_fbs->id())->pose);
Eigen::Affine3d H_world_robot = H_world_target * H_robot_target.inverse();
- Eigen::Affine3d H_world_camera =
- H_world_target * H_camera_target.inverse();
LOG(INFO) << node_name << ", " << target_pose_fbs->id()
<< ", t = " << pi_distributed_time
<< ", pose_error = " << target_pose_fbs->pose_error()
<< ", robot_pos (x,y,z) + "
<< H_world_robot.translation().transpose();
- vis_robot_.DrawRobotOutline(
- H_world_robot, node_name + "-" +
- std::to_string(target_pose_fbs->id()) + " " +
- std::to_string(target_pose_fbs->pose_error() /
- FLAGS_max_pose_error));
- vis_robot_.DrawFrameAxes(H_world_camera, node_name);
- vis_robot_.DrawFrameAxes(H_world_target,
- std::to_string(target_pose_fbs->id()));
+
+ label << "id " << target_pose_fbs->id() << ": err "
+ << (target_pose_fbs->pose_error() / FLAGS_max_pose_error) << " ";
+
+ vis_robot->DrawRobotOutline(H_world_robot,
+ std::to_string(target_pose_fbs->id()),
+ kPiColors.at(node_name));
+
+ vis_robot->DrawFrameAxes(H_world_target,
+ std::to_string(target_pose_fbs->id()),
+ kPiColors.at(node_name));
+ drew = true;
}
}
+ if (drew) {
+ size_t pi_number =
+ static_cast<size_t>(node_name[node_name.size() - 1] - '0');
+ cv::putText(vis_robot->image_, label.str(),
+ cv::Point(10, 10 + 20 * pi_number), cv::FONT_HERSHEY_PLAIN, 1.0,
+ kPiColors.at(node_name));
+
+ drawn_nodes->emplace(node_name);
+ }
}
// Get images from pi and pass apriltag positions to HandleAprilTags()
@@ -125,10 +168,15 @@
aos::logger::LogReader *reader,
std::vector<DataAdapter::TimestampedDetection>
*timestamped_target_detections,
- std::vector<std::unique_ptr<AprilRoboticsDetector>> *detectors) {
+ std::vector<std::unique_ptr<AprilRoboticsDetector>> *detectors,
+ std::set<std::string> *drawn_nodes, VisualizeRobot *vis_robot,
+ size_t *display_count) {
static constexpr std::string_view kImageChannel = "/camera";
+ // For the robots, we need to flip the image since the cameras are rolled by
+ // 180 degrees
+ bool flip_image = (FLAGS_team_number != 7971);
auto detector_ptr = std::make_unique<AprilRoboticsDetector>(
- detection_event_loop, kImageChannel);
+ detection_event_loop, kImageChannel, flip_image);
// Get the camera extrinsics
cv::Mat extrinsics_cv = detector_ptr->extrinsics().value();
Eigen::Matrix4d extrinsics_matrix;
@@ -150,14 +198,8 @@
std::string node_name = detection_event_loop->node()->name()->str();
HandleAprilTags(map, pi_distributed_time, timestamped_target_detections,
- extrinsics, node_name, target_loc_mapper);
- if (FLAGS_visualize) {
- cv::imshow("View", vis_robot_.image_);
- cv::waitKey();
- cv::Mat image_mat =
- cv::Mat::zeros(cv::Size(kImageWidth, kImageWidth), CV_8UC3);
- vis_robot_.SetImage(image_mat);
- }
+ extrinsics, node_name, target_loc_mapper, drawn_nodes,
+ vis_robot, display_count);
});
}
@@ -176,8 +218,9 @@
aos::logger::LogReader reader(
aos::logger::SortParts(unsorted_logfiles),
config.has_value() ? &config->message() : nullptr);
- // Send new april tag poses. This allows us to take a log of images, then play
- // with the april detection code and see those changes take effect in mapping
+ // Send new april tag poses. This allows us to take a log of images, then
+ // play with the april detection code and see those changes take effect in
+ // mapping
constexpr size_t kNumPis = 4;
for (size_t i = 1; i <= kNumPis; i++) {
reader.RemapLoggedChannel(absl::StrFormat("/pi%u/camera", i),
@@ -196,6 +239,9 @@
FLAGS_constants_path);
std::vector<std::unique_ptr<AprilRoboticsDetector>> detectors;
+ VisualizeRobot vis_robot;
+ std::set<std::string> drawn_nodes;
+ size_t display_count = 0;
const aos::Node *pi1 =
aos::configuration::GetNode(reader.configuration(), "pi1");
@@ -204,7 +250,8 @@
std::unique_ptr<aos::EventLoop> pi1_mapping_event_loop =
reader.event_loop_factory()->MakeEventLoop("pi1_mapping", pi1);
HandlePiCaptures(pi1_detection_event_loop.get(), pi1_mapping_event_loop.get(),
- &reader, ×tamped_target_detections, &detectors);
+ &reader, ×tamped_target_detections, &detectors,
+ &drawn_nodes, &vis_robot, &display_count);
const aos::Node *pi2 =
aos::configuration::GetNode(reader.configuration(), "pi2");
@@ -213,7 +260,8 @@
std::unique_ptr<aos::EventLoop> pi2_mapping_event_loop =
reader.event_loop_factory()->MakeEventLoop("pi2_mapping", pi2);
HandlePiCaptures(pi2_detection_event_loop.get(), pi2_mapping_event_loop.get(),
- &reader, ×tamped_target_detections, &detectors);
+ &reader, ×tamped_target_detections, &detectors,
+ &drawn_nodes, &vis_robot, &display_count);
const aos::Node *pi3 =
aos::configuration::GetNode(reader.configuration(), "pi3");
@@ -222,7 +270,8 @@
std::unique_ptr<aos::EventLoop> pi3_mapping_event_loop =
reader.event_loop_factory()->MakeEventLoop("pi3_mapping", pi3);
HandlePiCaptures(pi3_detection_event_loop.get(), pi3_mapping_event_loop.get(),
- &reader, ×tamped_target_detections, &detectors);
+ &reader, ×tamped_target_detections, &detectors,
+ &drawn_nodes, &vis_robot, &display_count);
const aos::Node *pi4 =
aos::configuration::GetNode(reader.configuration(), "pi4");
@@ -231,7 +280,8 @@
std::unique_ptr<aos::EventLoop> pi4_mapping_event_loop =
reader.event_loop_factory()->MakeEventLoop("pi4_mapping", pi4);
HandlePiCaptures(pi4_detection_event_loop.get(), pi4_mapping_event_loop.get(),
- &reader, ×tamped_target_detections, &detectors);
+ &reader, ×tamped_target_detections, &detectors,
+ &drawn_nodes, &vis_robot, &display_count);
std::unique_ptr<aos::EventLoop> mcap_event_loop;
std::unique_ptr<aos::McapLogger> relogger;
@@ -252,20 +302,20 @@
}
if (FLAGS_visualize) {
- cv::Mat image_mat =
- cv::Mat::zeros(cv::Size(kImageWidth, kImageWidth), CV_8UC3);
- vis_robot_.SetImage(image_mat);
+ vis_robot.ClearImage();
const double kFocalLength = 500.0;
- vis_robot_.SetDefaultViewpoint(kImageWidth, kFocalLength);
+ vis_robot.SetDefaultViewpoint(kImageWidth, kFocalLength);
}
reader.event_loop_factory()->Run();
- auto target_constraints =
- DataAdapter::MatchTargetDetections(timestamped_target_detections);
+ if (FLAGS_solve) {
+ auto target_constraints =
+ DataAdapter::MatchTargetDetections(timestamped_target_detections);
- frc971::vision::TargetMapper mapper(FLAGS_json_path, target_constraints);
- mapper.Solve(FLAGS_field_name, FLAGS_output_dir);
+ frc971::vision::TargetMapper mapper(FLAGS_json_path, target_constraints);
+ mapper.Solve(FLAGS_field_name, FLAGS_output_dir);
+ }
// Clean up all the pointers
for (auto &detector_ptr : detectors) {