Adding multi-camera extrinsic calibration between cameras
Uses pair of Aruco Diamonds to build extrinsics between neighboring cameras
Small refactor of charuco_lib to allow construction without requiring event_loop
Change max_pose_error to 1e-7 based on data from SFR match
Also loosen decision_margin to 50 based on the same data
Small cleanup/typos in target_mapping
Change-Id: If3b902612cb99e4f6e8a20291561fac766e6bacc
Signed-off-by: Jim Ostrowski <yimmy13@gmail.com>
diff --git a/frc971/vision/charuco_lib.cc b/frc971/vision/charuco_lib.cc
index 82cc6b3..b8edcae 100644
--- a/frc971/vision/charuco_lib.cc
+++ b/frc971/vision/charuco_lib.cc
@@ -25,8 +25,10 @@
DEFINE_uint32(
min_charucos, 10,
"The mininum number of aruco targets in charuco board required to match.");
-DEFINE_uint32(min_id, 12, "Minimum valid charuco id");
-DEFINE_uint32(max_id, 15, "Minimum valid charuco id");
+DEFINE_uint32(min_id, 0, "Minimum valid charuco id");
+DEFINE_uint32(max_diamonds, 0,
+ "Maximum number of diamonds to see. Set to 0 for no limit");
+DEFINE_uint32(max_id, 15, "Maximum valid charuco id");
DEFINE_bool(visualize, false, "Whether to visualize the resulting data.");
DEFINE_bool(
draw_axes, false,
@@ -280,9 +282,21 @@
}
CharucoExtractor::CharucoExtractor(
+ const calibration::CameraCalibration *calibration,
+ const TargetType target_type)
+ : event_loop_(nullptr),
+ target_type_(target_type),
+ calibration_(CHECK_NOTNULL(calibration)) {
+ VLOG(2) << "Configuring CharucoExtractor without event_loop";
+ SetupTargetData();
+ VLOG(2) << "Camera matrix:\n" << calibration_.CameraIntrinsics();
+ VLOG(2) << "Distortion Coefficients:\n" << calibration_.CameraDistCoeffs();
+}
+
+CharucoExtractor::CharucoExtractor(
aos::EventLoop *event_loop,
- const calibration::CameraCalibration *calibration, TargetType target_type,
- std::string_view image_channel,
+ const calibration::CameraCalibration *calibration,
+ const TargetType target_type, std::string_view image_channel,
std::function<void(cv::Mat, monotonic_clock::time_point,
std::vector<cv::Vec4i>,
std::vector<std::vector<cv::Point2f>>, bool,
@@ -303,9 +317,34 @@
void CharucoExtractor::HandleImage(cv::Mat rgb_image,
const monotonic_clock::time_point eof) {
+ // Set up the variables we'll use in the callback function
+ bool valid = false;
+ // ids and corners for final, refined board / marker detections
+ // Using Vec4i type since it supports Charuco Diamonds
+ // And overloading it using 1st int in Vec4i for others target types
+ std::vector<cv::Vec4i> result_ids;
+ std::vector<std::vector<cv::Point2f>> result_corners;
+
+ // Return a list of poses; for Charuco Board there will be just one
+ std::vector<Eigen::Vector3d> rvecs_eigen;
+ std::vector<Eigen::Vector3d> tvecs_eigen;
+ ProcessImage(rgb_image, eof, event_loop_->monotonic_now(), result_ids,
+ result_corners, valid, rvecs_eigen, tvecs_eigen);
+
+ handle_charuco_(rgb_image, eof, result_ids, result_corners, valid,
+ rvecs_eigen, tvecs_eigen);
+}
+
+void CharucoExtractor::ProcessImage(
+ cv::Mat rgb_image, const monotonic_clock::time_point eof,
+ const monotonic_clock::time_point current_time,
+ std::vector<cv::Vec4i> &result_ids,
+ std::vector<std::vector<cv::Point2f>> &result_corners, bool &valid,
+ std::vector<Eigen::Vector3d> &rvecs_eigen,
+ std::vector<Eigen::Vector3d> &tvecs_eigen) {
const double age_double =
- std::chrono::duration_cast<std::chrono::duration<double>>(
- event_loop_->monotonic_now() - eof)
+ std::chrono::duration_cast<std::chrono::duration<double>>(current_time -
+ eof)
.count();
// Have found this useful if there is blurry / noisy images
@@ -318,22 +357,10 @@
cv::cvtColor(thresh, rgb_image, cv::COLOR_GRAY2RGB);
}
- // Set up the variables we'll use in the callback function
- bool valid = false;
- // Return a list of poses; for Charuco Board there will be just one
- std::vector<Eigen::Vector3d> rvecs_eigen;
- std::vector<Eigen::Vector3d> tvecs_eigen;
-
// ids and corners for initial aruco marker detections
std::vector<int> marker_ids;
std::vector<std::vector<cv::Point2f>> marker_corners;
- // ids and corners for final, refined board / marker detections
- // Using Vec4i type since it supports Charuco Diamonds
- // And overloading it using 1st int in Vec4i for others target types
- std::vector<cv::Vec4i> result_ids;
- std::vector<std::vector<cv::Point2f>> result_corners;
-
// Do initial marker detection; this is the same for all target types
cv::aruco::detectMarkers(rgb_image, dictionary_, marker_corners, marker_ids);
cv::aruco::drawDetectedMarkers(rgb_image, marker_corners, marker_ids);
@@ -429,11 +456,14 @@
square_length_ / marker_length_,
diamond_corners, diamond_ids);
- // Check that we have exactly one charuco diamond. For calibration, we
- // can constrain things so that this is the case
- if (diamond_ids.size() == 1) {
- // TODO<Jim>: Could probably make this check more general than requiring
- // range of ids
+ // Check that we have an acceptable number of diamonds detected.
+ // Should be at least one, and no more than FLAGS_max_diamonds.
+ // Different calibration routines will require different values for this
+ if (diamond_ids.size() > 0 &&
+ (FLAGS_max_diamonds == 0 ||
+ diamond_ids.size() <= FLAGS_max_diamonds)) {
+ // TODO<Jim>: Could probably make this check more general than
+ // requiring range of ids
bool all_valid_ids = true;
for (uint i = 0; i < 4; i++) {
uint id = diamond_ids[0][i];
@@ -446,14 +476,15 @@
cv::aruco::drawDetectedDiamonds(rgb_image, diamond_corners,
diamond_ids);
- // estimate pose for diamonds doesn't return valid, so marking true
+ // estimate pose for diamonds doesn't return valid, so marking
+ // true
valid = true;
std::vector<cv::Vec3d> rvecs, tvecs;
cv::aruco::estimatePoseSingleMarkers(
diamond_corners, square_length_, calibration_.CameraIntrinsics(),
calibration_.CameraDistCoeffs(), rvecs, tvecs);
- DrawTargetPoses(rgb_image, rvecs, tvecs);
+ DrawTargetPoses(rgb_image, rvecs, tvecs);
PackPoseResults(rvecs, tvecs, &rvecs_eigen, &tvecs_eigen);
result_ids = diamond_ids;
result_corners = diamond_corners;
@@ -463,12 +494,12 @@
} else {
if (diamond_ids.size() == 0) {
// OK to not see any markers sometimes
- VLOG(2)
- << "Found aruco markers, but no valid charuco diamond targets";
+ VLOG(2) << "Found aruco markers, but no valid charuco diamond "
+ "targets";
} else {
- // But should never detect multiple
- LOG(FATAL) << "Found multiple charuco diamond markers. Should only "
- "be one";
+ VLOG(2) << "Found too many number of diamond markers, which likely "
+ "means false positives were detected: "
+ << diamond_ids.size() << " > " << FLAGS_max_diamonds;
}
}
} else {
@@ -476,9 +507,6 @@
<< static_cast<uint8_t>(target_type_);
}
}
-
- handle_charuco_(rgb_image, eof, result_ids, result_corners, valid,
- rvecs_eigen, tvecs_eigen);
}
flatbuffers::Offset<foxglove::ImageAnnotations> BuildAnnotations(
diff --git a/frc971/vision/charuco_lib.h b/frc971/vision/charuco_lib.h
index 8af7b8c..2f274bb 100644
--- a/frc971/vision/charuco_lib.h
+++ b/frc971/vision/charuco_lib.h
@@ -93,6 +93,11 @@
// extracted from it.
class CharucoExtractor {
public:
+ // Setting up a constructor that doesn't require an event_loop, so we can call
+ // and get results back from ProcessImage directly
+ CharucoExtractor(const calibration::CameraCalibration *calibration,
+ const TargetType target_type);
+
// The callback takes the following arguments:
// cv::Mat -> image with overlays drawn on it.
// monotonic_clock::time_point -> Time on this node when this image was
@@ -117,7 +122,16 @@
std::vector<Eigen::Vector3d>)> &&handle_charuco_fn);
// Handles the image by detecting the charuco board in it.
- void HandleImage(cv::Mat rgb_image, aos::monotonic_clock::time_point eof);
+ void HandleImage(cv::Mat rgb_image,
+ const aos::monotonic_clock::time_point eof);
+
+ void ProcessImage(cv::Mat rgb_image,
+ const aos::monotonic_clock::time_point eof,
+ const aos::monotonic_clock::time_point current_time,
+ std::vector<cv::Vec4i> &result_ids,
+ std::vector<std::vector<cv::Point2f>> &result_corners,
+ bool &valid, std::vector<Eigen::Vector3d> &rvecs_eigen,
+ std::vector<Eigen::Vector3d> &tvecs_eigen);
// Returns the aruco dictionary in use.
cv::Ptr<cv::aruco::Dictionary> dictionary() const { return dictionary_; }
@@ -140,8 +154,8 @@
void DrawTargetPoses(cv::Mat rgb_image, std::vector<cv::Vec3d> rvecs,
std::vector<cv::Vec3d> tvecs);
- // Helper function to convert rotation (rvecs) and translation (tvecs) vectors
- // into Eigen vectors and store in corresponding vectors
+ // Helper function to convert rotation (rvecs) and translation (tvecs)
+ // vectors into Eigen vectors and store in corresponding vectors
void PackPoseResults(std::vector<cv::Vec3d> &rvecs,
std::vector<cv::Vec3d> &tvecs,
std::vector<Eigen::Vector3d> *rvecs_eigen,
@@ -184,7 +198,8 @@
const foxglove::PointsAnnotationType line_type =
foxglove::PointsAnnotationType::POINTS);
-// Creates a PointsAnnotation to build up ImageAnnotations with different types
+// Creates a PointsAnnotation to build up ImageAnnotations with different
+// types
flatbuffers::Offset<foxglove::PointsAnnotation> BuildPointsAnnotation(
flatbuffers::FlatBufferBuilder *fbb,
const aos::monotonic_clock::time_point monotonic_now,
diff --git a/frc971/vision/target_mapper.cc b/frc971/vision/target_mapper.cc
index c4af163..8077f16 100644
--- a/frc971/vision/target_mapper.cc
+++ b/frc971/vision/target_mapper.cc
@@ -303,6 +303,9 @@
ceres::LocalParameterization *quaternion_local_parameterization =
new ceres::EigenQuaternionParameterization;
+ int min_constraint_id = std::numeric_limits<int>::max();
+ int max_constraint_id = std::numeric_limits<int>::min();
+
for (ceres::examples::VectorOfConstraints::const_iterator constraints_iter =
constraints.begin();
constraints_iter != constraints.end(); ++constraints_iter) {
@@ -325,6 +328,15 @@
<< "Should have counted constraints for " << id_pair.first << "->"
<< id_pair.second;
+ VLOG(1) << "Adding constraint pair: " << id_pair.first << " and "
+ << id_pair.second;
+ // Store min & max id's; assumes first id < second id
+ if (id_pair.first < min_constraint_id) {
+ min_constraint_id = id_pair.first;
+ }
+ if (id_pair.second > max_constraint_id) {
+ max_constraint_id = id_pair.second;
+ }
// Normalize constraint cost by occurances
size_t constraint_count = constraint_counts_[id_pair];
// Scale all costs so the total cost comes out to more reasonable numbers
@@ -358,6 +370,12 @@
// setting one of the poses as constant so the optimizer cannot change it.
CHECK_NE(poses->count(FLAGS_frozen_target_id), 0ul)
<< "Got no poses for frozen target id " << FLAGS_frozen_target_id;
+ CHECK_GE(FLAGS_frozen_target_id, min_constraint_id)
+ << "target to freeze index " << FLAGS_frozen_target_id
+ << " must be in range of constraints, > " << min_constraint_id;
+ CHECK_LE(FLAGS_frozen_target_id, max_constraint_id)
+ << "target to freeze index " << FLAGS_frozen_target_id
+ << " must be in range of constraints, < " << max_constraint_id;
ceres::examples::MapOfPoses::iterator pose_start_iter =
poses->find(FLAGS_frozen_target_id);
CHECK(pose_start_iter != poses->end()) << "There are no poses.";
@@ -479,8 +497,8 @@
PoseUtils::Pose3dToAffine3d(target_poses_.at(id_start)).inverse() *
PoseUtils::Pose3dToAffine3d(target_poses_.at(id_end));
auto constraint = PoseUtils::Affine3dToPose3d(H_start_end);
- LOG(INFO) << id_start << "->" << id_end << ": " << constraint.p.norm()
- << " meters";
+ VLOG(1) << id_start << "->" << id_end << ": " << constraint.p.norm()
+ << " meters";
}
}
}
diff --git a/y2023/vision/BUILD b/y2023/vision/BUILD
index d5168c1..ef300e3 100644
--- a/y2023/vision/BUILD
+++ b/y2023/vision/BUILD
@@ -83,10 +83,10 @@
"//frc971/vision:calibration_fbs",
"//frc971/vision:charuco_lib",
"//frc971/vision:target_mapper",
+ "//frc971/vision:visualize_robot",
"//third_party:opencv",
"//y2023/constants:constants_fbs",
"//y2023/constants:simulated_constants_sender",
- "@org_tuxfamily_eigen//:eigen",
],
)
@@ -203,6 +203,36 @@
)
cc_binary(
+ name = "calibrate_multi_cameras",
+ srcs = [
+ "calibrate_multi_cameras.cc",
+ ],
+ data = [
+ "//y2023:aos_config",
+ "//y2023/constants:constants.json",
+ "//y2023/vision:maps",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//y2023:__subpackages__"],
+ deps = [
+ ":aprilrobotics_lib",
+ "//aos:init",
+ "//aos/events:simulated_event_loop",
+ "//aos/events/logging:log_reader",
+ "//aos/util:mcap_logger",
+ "//frc971/constants:constants_sender_lib",
+ "//frc971/control_loops:pose",
+ "//frc971/vision:calibration_fbs",
+ "//frc971/vision:charuco_lib",
+ "//frc971/vision:target_mapper",
+ "//third_party:opencv",
+ "//y2023/constants:constants_fbs",
+ "//y2023/constants:simulated_constants_sender",
+ "@org_tuxfamily_eigen//:eigen",
+ ],
+)
+
+cc_binary(
name = "game_pieces_detector",
srcs = [
"game_pieces_main.cc",
diff --git a/y2023/vision/README.md b/y2023/vision/README.md
new file mode 100644
index 0000000..55d217f
--- /dev/null
+++ b/y2023/vision/README.md
@@ -0,0 +1,25 @@
+How to use the extrinsic calibration for camera-to-camera calibration
+
+This all assumes you have cameras that have been intrinsically
+calibrated, and that pi1 has a valid extrinsic calibration (from robot
+origin / IMU to the pi1 camera).
+
+It by default will compute the extrinsics for each of the other cameras (pi2,
+pi3, pi4) relative to the robot origin (IMU origin).
+
+Steps:
+* Place two Aruco Diamond markers about 1 meter apart
+ (center-to-center) at a height so that they will be in view of the
+ cameras when the robot is about 1-2m away
+* Start with the robot in a position that both markers are fully in
+ view by one camera
+* Enable logging for all cameras
+* Rotate the robot in place through a full circle, so that each camera sees both tags, and at times just one tag.
+* Stop the logging and copy the files to your laptop
+* Run the calibration code on the resulting files, e.g.,
+ * `bazel run -c opt //y2023/vision:calibrate_multi_cameras -- /data/PATH_TO_LOGS --team_number 971 --target_type charuco_diamond
+ * Can add "--visualize" flag to see the camera views and marker detections
+ * I've sometimes found it necessary to add the "--skip_missing_forwarding_entries" flag-- as guided by the logging messages
+* From the output, copy the calculated ("Updated") extrinsic into the
+ corresponding calibration file for the right robot and camera in the
+ calib_files directory
\ No newline at end of file
diff --git a/y2023/vision/calibrate_multi_cameras.cc b/y2023/vision/calibrate_multi_cameras.cc
new file mode 100644
index 0000000..aaff033
--- /dev/null
+++ b/y2023/vision/calibrate_multi_cameras.cc
@@ -0,0 +1,456 @@
+#include <numeric>
+
+#include "aos/configuration.h"
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/init.h"
+#include "aos/util/mcap_logger.h"
+#include "frc971/control_loops/pose.h"
+#include "frc971/control_loops/quaternion_utils.h"
+#include "frc971/vision/calibration_generated.h"
+#include "frc971/vision/charuco_lib.h"
+#include "frc971/vision/target_mapper.h"
+#include "frc971/vision/visualize_robot.h"
+// clang-format off
+// OpenCV eigen files must be included after Eigen includes
+#include "opencv2/aruco.hpp"
+#include "opencv2/calib3d.hpp"
+#include "opencv2/core/eigen.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+// clang-format on
+#include "y2023/constants/simulated_constants_sender.h"
+#include "y2023/vision/aprilrobotics.h"
+#include "y2023/vision/vision_util.h"
+
+DEFINE_string(config, "",
+ "If set, override the log's config file with this one.");
+DEFINE_string(constants_path, "y2023/constants/constants.json",
+ "Path to the constant file");
+DEFINE_string(target_type, "charuco_diamond",
+ "Type of target being used [aruco, charuco, charuco_diamond]");
+DEFINE_int32(team_number, 0,
+ "Required: Use the calibration for a node with this team number");
+DEFINE_uint64(
+ wait_key, 1,
+ "Time in ms to wait between images (0 to wait indefinitely until click)");
+
+// Calibrate extrinsic relationship between cameras using two targets
+// seen jointly between cameras. Uses two types of information: 1)
+// when a single camera sees two targets, we estimate the pose between
+// targets, and 2) when two separate cameras each see a target, we can
+// use the pose between targets to estimate the pose between cameras.
+
+// We then create the extrinsics for the robot by starting with the
+// given extrinsic for camera 1 (between imu/robot and camera frames)
+// and then map each subsequent camera based on the data collected and
+// the extrinsic poses computed here.
+
+// TODO<Jim>: Should export a new extrinsic file for each camera
+// TODO<Jim>: Not currently using estimate from pi1->pi4-- should do full
+// estimation, and probably also include camera->imu extrinsics from all
+// cameras, not just pi1
+// TODO<Jim>: Should add ability to do this with apriltags, since they're on the
+// field
+
+namespace y2023 {
+namespace vision {
+using frc971::vision::DataAdapter;
+using frc971::vision::ImageCallback;
+using frc971::vision::PoseUtils;
+using frc971::vision::TargetMap;
+using frc971::vision::TargetMapper;
+namespace calibration = frc971::vision::calibration;
+
+static constexpr double kImagePeriodMs =
+ 1.0 / 30.0 * 1000.0; // Image capture period in ms
+
+// Change reference frame from camera to robot
+Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camera_target,
+ Eigen::Affine3d extrinsics) {
+ const Eigen::Affine3d H_robot_camera = extrinsics;
+ const Eigen::Affine3d H_robot_target = H_robot_camera * H_camera_target;
+ return H_robot_target;
+}
+
+struct TimestampedPiDetection {
+ aos::distributed_clock::time_point time;
+ // Pose of target relative to robot
+ Eigen::Affine3d H_camera_target;
+ // name of pi
+ std::string pi_name;
+ int board_id;
+};
+
+TimestampedPiDetection last_observation;
+std::vector<std::pair<TimestampedPiDetection, TimestampedPiDetection>>
+ detection_list;
+std::vector<TimestampedPiDetection> two_board_extrinsics_list;
+
+// TODO<jim>: Implement variance calcs
+Eigen::Affine3d ComputeAveragePose(
+ std::vector<Eigen::Vector3d> &translation_list,
+ std::vector<Eigen::Vector4d> &rotation_list,
+ Eigen::Vector3d *translation_variance = nullptr,
+ Eigen::Vector3d *rotation_variance = nullptr) {
+ Eigen::Vector3d avg_translation =
+ std::accumulate(translation_list.begin(), translation_list.end(),
+ Eigen::Vector3d{0, 0, 0}) /
+ translation_list.size();
+
+ // TODO<Jim>: Use QuaternionMean from quaternion_utils.cc (but this
+ // requires a fixed number of quaternions to be averaged
+ Eigen::Vector4d avg_rotation =
+ std::accumulate(rotation_list.begin(), rotation_list.end(),
+ Eigen::Vector4d{0, 0, 0, 0}) /
+ rotation_list.size();
+ // Normalize, so it's a valid quaternion
+ avg_rotation = avg_rotation / avg_rotation.norm();
+ Eigen::Quaterniond avg_rotation_q{avg_rotation[0], avg_rotation[1],
+ avg_rotation[2], avg_rotation[3]};
+ Eigen::Translation3d translation(avg_translation);
+ Eigen::Affine3d return_pose = translation * avg_rotation_q;
+ if (translation_variance != nullptr) {
+ *translation_variance = Eigen::Vector3d();
+ }
+ if (rotation_variance != nullptr) {
+ LOG(INFO) << *rotation_variance;
+ }
+
+ return return_pose;
+}
+
+Eigen::Affine3d ComputeAveragePose(
+ std::vector<Eigen::Affine3d> &pose_list,
+ Eigen::Vector3d *translation_variance = nullptr,
+ Eigen::Vector3d *rotation_variance = nullptr) {
+ std::vector<Eigen::Vector3d> translation_list;
+ std::vector<Eigen::Vector4d> rotation_list;
+
+ for (Eigen::Affine3d pose : pose_list) {
+ translation_list.push_back(pose.translation());
+ Eigen::Quaterniond quat(pose.rotation().matrix());
+ rotation_list.push_back(
+ Eigen::Vector4d(quat.w(), quat.x(), quat.y(), quat.z()));
+ }
+
+ return ComputeAveragePose(translation_list, rotation_list,
+ translation_variance, rotation_variance);
+}
+
+Eigen::Affine3d ComputeAveragePose(
+ std::vector<TimestampedPiDetection> &pose_list,
+ Eigen::Vector3d *translation_variance = nullptr,
+ Eigen::Vector3d *rotation_variance = nullptr) {
+ std::vector<Eigen::Vector3d> translation_list;
+ std::vector<Eigen::Vector4d> rotation_list;
+
+ for (TimestampedPiDetection pose : pose_list) {
+ translation_list.push_back(pose.H_camera_target.translation());
+ Eigen::Quaterniond quat(pose.H_camera_target.rotation().matrix());
+ rotation_list.push_back(
+ Eigen::Vector4d(quat.w(), quat.x(), quat.y(), quat.z()));
+ }
+ return ComputeAveragePose(translation_list, rotation_list,
+ translation_variance, rotation_variance);
+}
+
+void ProcessImage(aos::EventLoop *event_loop, cv::Mat rgb_image,
+ const aos::monotonic_clock::time_point eof,
+ aos::distributed_clock::time_point distributed_eof,
+ frc971::vision::CharucoExtractor &charuco_extractor,
+ std::string pi_name) {
+ std::vector<cv::Vec4i> charuco_ids;
+ std::vector<std::vector<cv::Point2f>> charuco_corners;
+ bool valid = false;
+ std::vector<Eigen::Vector3d> rvecs_eigen;
+ std::vector<Eigen::Vector3d> tvecs_eigen;
+ charuco_extractor.ProcessImage(rgb_image, eof, event_loop->monotonic_now(),
+ charuco_ids, charuco_corners, valid,
+ rvecs_eigen, tvecs_eigen);
+
+ if (valid) {
+ if (tvecs_eigen.size() == 2) {
+ VLOG(2) << "Saw two boards in same view from " << pi_name;
+ // Handle when we see two boards at once
+ std::vector<TimestampedPiDetection> detections;
+ for (uint i = 0; i < tvecs_eigen.size(); i++) {
+ Eigen::Quaternion<double> rotation(
+ frc971::controls::ToQuaternionFromRotationVector(rvecs_eigen[i]));
+ Eigen::Translation3d translation(tvecs_eigen[i]);
+ Eigen::Affine3d H_camera_board = translation * rotation;
+ TimestampedPiDetection new_observation{
+ .time = distributed_eof,
+ .H_camera_target = H_camera_board,
+ .pi_name = pi_name,
+ .board_id = charuco_ids[i][0]};
+ detections.emplace_back(new_observation);
+ }
+ Eigen::Affine3d H_boardA_boardB =
+ detections[0].H_camera_target.inverse() *
+ detections[1].H_camera_target;
+
+ TimestampedPiDetection board_extrinsic{.time = distributed_eof,
+ .H_camera_target = H_boardA_boardB,
+ .pi_name = pi_name,
+ .board_id = charuco_ids[0][0]};
+
+ // Make sure we've got them in the right order (sorted by id)
+ if (detections[0].board_id < detections[1].board_id) {
+ two_board_extrinsics_list.push_back(board_extrinsic);
+ } else {
+ // Flip them around
+ board_extrinsic.H_camera_target =
+ board_extrinsic.H_camera_target.inverse();
+ board_extrinsic.board_id = charuco_ids[1][0];
+ two_board_extrinsics_list.push_back(board_extrinsic);
+ }
+ } else {
+ VLOG(1) << "Saw single board in camera " << pi_name;
+ Eigen::Quaternion<double> rotation(
+ frc971::controls::ToQuaternionFromRotationVector(rvecs_eigen[0]));
+ Eigen::Translation3d translation(tvecs_eigen[0]);
+ Eigen::Affine3d H_camera_board = translation * rotation;
+ TimestampedPiDetection new_observation{.time = distributed_eof,
+ .H_camera_target = H_camera_board,
+ .pi_name = pi_name,
+ .board_id = charuco_ids[0][0]};
+
+ VLOG(2) << "Checking versus last result from " << last_observation.pi_name
+ << " at time " << last_observation.time << " with delta time = "
+ << std::abs((distributed_eof - last_observation.time).count());
+ // Only take two observations if they're near enough to each other
+ // in time. This should be within +/- kImagePeriodMs of each other (e.g.,
+ // +/-16.666ms for 30 Hz capture). This should make sure
+ // we're always getting the closest images, and not miss too many possible
+ // pairs, between cameras
+ // TODO<Jim>: Should also check that (rotational) velocity of the robot is
+ // small
+ if (std::abs((distributed_eof - last_observation.time).count()) <
+ static_cast<int>(kImagePeriodMs / 2.0 * 1000000)) {
+ Eigen::Affine3d H_camera1_board = last_observation.H_camera_target;
+ Eigen::Affine3d H_camera1_camera2 =
+ H_camera1_board * H_camera_board.inverse();
+ VLOG(1) << "Storing observation between " << last_observation.pi_name
+ << ", target " << last_observation.board_id << " and "
+ << new_observation.pi_name << ", target "
+ << new_observation.board_id << " is "
+ << H_camera1_camera2.matrix();
+ // Sort by pi numbering
+ if (last_observation.pi_name < new_observation.pi_name) {
+ detection_list.push_back(
+ std::pair(last_observation, new_observation));
+ } else if (last_observation.pi_name > new_observation.pi_name) {
+ detection_list.push_back(
+ std::pair(new_observation, last_observation));
+ } else {
+ LOG(WARNING) << "Got 2 observations in a row from same pi. Probably "
+ "not too much of an issue???";
+ }
+ } else {
+ VLOG(2) << "Storing observation for " << pi_name << " at time "
+ << distributed_eof;
+ last_observation = new_observation;
+ }
+ }
+ }
+ if (FLAGS_visualize) {
+ std::string image_name = pi_name + " Image";
+ cv::Mat rgb_small;
+ cv::resize(rgb_image, rgb_small, cv::Size(), 0.5, 0.5);
+ cv::imshow(image_name, rgb_small);
+ cv::waitKey(FLAGS_wait_key);
+ }
+}
+
+void ExtrinsicsMain(int argc, char *argv[]) {
+ std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections;
+
+ std::optional<aos::FlatbufferDetachedBuffer<aos::Configuration>> config =
+ (FLAGS_config.empty()
+ ? std::nullopt
+ : std::make_optional(aos::configuration::ReadConfig(FLAGS_config)));
+
+ // open logfiles
+ aos::logger::LogReader reader(
+ aos::logger::SortParts(aos::logger::FindLogs(argc, argv)),
+ config.has_value() ? &config->message() : nullptr);
+
+ constexpr size_t kNumPis = 4;
+ for (size_t i = 1; i <= kNumPis; i++) {
+ reader.RemapLoggedChannel(absl::StrFormat("/pi%u/camera", i),
+ "frc971.vision.TargetMap");
+ reader.RemapLoggedChannel(absl::StrFormat("/pi%u/camera", i),
+ "foxglove.ImageAnnotations");
+ reader.RemapLoggedChannel(absl::StrFormat("/pi%u/constants", i),
+ "y2023.Constants");
+ }
+ reader.RemapLoggedChannel("/imu/constants", "y2023.Constants");
+ reader.Register();
+
+ SendSimulationConstants(reader.event_loop_factory(), FLAGS_team_number,
+ FLAGS_constants_path);
+
+ LOG(INFO) << "Using target type " << FLAGS_target_type;
+ std::vector<std::string> node_list;
+ node_list.push_back("pi1");
+ node_list.push_back("pi2");
+ node_list.push_back("pi3");
+ node_list.push_back("pi4");
+
+ std::vector<std::unique_ptr<aos::EventLoop>> detection_event_loops;
+ std::vector<frc971::vision::CharucoExtractor *> charuco_extractors;
+ std::vector<frc971::vision::ImageCallback *> image_callbacks;
+ std::vector<Eigen::Affine3d> default_extrinsics;
+
+ for (uint i = 0; i < node_list.size(); i++) {
+ std::string node = node_list[i];
+ const aos::Node *pi =
+ aos::configuration::GetNode(reader.configuration(), node.c_str());
+
+ detection_event_loops.emplace_back(
+ reader.event_loop_factory()->MakeEventLoop(
+ (node + "_detection").c_str(), pi));
+
+ frc971::constants::ConstantsFetcher<y2023::Constants> constants_fetcher(
+ detection_event_loops.back().get());
+ aos::FlatbufferDetachedBuffer<calibration::CameraCalibration> intrinsics =
+ aos::RecursiveCopyFlatBuffer(y2023::vision::FindCameraCalibration(
+ constants_fetcher.constants(), node));
+
+ const calibration::CameraCalibration *calibration =
+ FindCameraCalibration(constants_fetcher.constants(), node);
+
+ frc971::vision::TargetType target_type =
+ frc971::vision::TargetTypeFromString(FLAGS_target_type);
+ frc971::vision::CharucoExtractor *charuco_ext =
+ new frc971::vision::CharucoExtractor(calibration, target_type);
+ charuco_extractors.emplace_back(charuco_ext);
+
+ cv::Mat extrinsics_cv = CameraExtrinsics(calibration).value();
+ Eigen::Matrix4d extrinsics_matrix;
+ cv::cv2eigen(extrinsics_cv, extrinsics_matrix);
+ const auto ext_H_robot_pi = Eigen::Affine3d(extrinsics_matrix);
+ default_extrinsics.emplace_back(ext_H_robot_pi);
+
+ LOG(INFO) << "Got extrinsics for " << node << " as\n"
+ << default_extrinsics.back().matrix();
+
+ frc971::vision::ImageCallback *image_callback =
+ new frc971::vision::ImageCallback(
+ detection_event_loops[i].get(), "/" + node_list[i] + "/camera",
+ [&reader, &charuco_extractors, &detection_event_loops, &node_list,
+ i](cv::Mat rgb_image, const aos::monotonic_clock::time_point eof) {
+ aos::distributed_clock::time_point pi_distributed_time =
+ reader.event_loop_factory()
+ ->GetNodeEventLoopFactory(
+ detection_event_loops[i].get()->node())
+ ->ToDistributedClock(eof);
+ ProcessImage(detection_event_loops[i].get(), rgb_image, eof,
+ pi_distributed_time, *charuco_extractors[i],
+ node_list[i]);
+ });
+
+ image_callbacks.emplace_back(image_callback);
+ }
+
+ const auto ext_H_robot_piA = default_extrinsics[0];
+ const auto ext_H_robot_piB = default_extrinsics[1];
+
+ reader.event_loop_factory()->Run();
+
+ for (auto node : node_list) {
+ std::vector<TimestampedPiDetection> pose_list;
+ for (auto ext : two_board_extrinsics_list) {
+ if (ext.pi_name == node) {
+ pose_list.push_back(ext);
+ }
+ }
+ Eigen::Affine3d avg_pose = ComputeAveragePose(pose_list);
+ VLOG(1) << "Estimate from " << node << " with " << pose_list.size()
+ << " observations is:\n"
+ << avg_pose.matrix();
+ }
+ Eigen::Affine3d avg_pose = ComputeAveragePose(two_board_extrinsics_list);
+ LOG(INFO) << "Estimate of two board pose using all nodes with "
+ << two_board_extrinsics_list.size() << " observations is:\n"
+ << avg_pose.matrix() << "\n";
+
+ LOG(INFO) << "Got " << detection_list.size() << " extrinsic observations";
+ Eigen::Affine3d H_target0_target1 = avg_pose;
+ int base_target_id = detection_list[0].first.board_id;
+ std::vector<Eigen::Affine3d> H_cameraA_cameraB_list;
+ std::vector<Eigen::Affine3d> updated_extrinsics;
+ // Use the first node's extrinsics as our base, and fix from there
+ updated_extrinsics.push_back(default_extrinsics[0]);
+ LOG(INFO) << "Default extrinsic for node " << node_list[0] << " is "
+ << default_extrinsics[0].matrix();
+ for (uint i = 0; i < node_list.size() - 1; i++) {
+ H_cameraA_cameraB_list.clear();
+ for (auto [poseA, poseB] : detection_list) {
+ if ((poseA.pi_name == node_list[i]) &&
+ (poseB.pi_name == node_list[i + 1])) {
+ Eigen::Affine3d H_cameraA_target0 = poseA.H_camera_target;
+ // If this pose isn't referenced to target0, correct that
+ if (poseA.board_id != base_target_id) {
+ H_cameraA_target0 = H_cameraA_target0 * H_target0_target1.inverse();
+ }
+
+ Eigen::Affine3d H_cameraB_target0 = poseB.H_camera_target;
+ if (poseB.board_id != base_target_id) {
+ H_cameraB_target0 = H_cameraB_target0 * H_target0_target1.inverse();
+ }
+ Eigen::Affine3d H_cameraA_cameraB =
+ H_cameraA_target0 * H_cameraB_target0.inverse();
+ H_cameraA_cameraB_list.push_back(H_cameraA_cameraB);
+ }
+ }
+ CHECK(H_cameraA_cameraB_list.size() > 0)
+ << "Failed with zero poses for node " << node_list[i];
+ if (H_cameraA_cameraB_list.size() > 0) {
+ Eigen::Affine3d avg_H_cameraA_cameraB =
+ ComputeAveragePose(H_cameraA_cameraB_list);
+ LOG(INFO) << "From " << node_list[i] << " to " << node_list[i + 1]
+ << " found " << H_cameraA_cameraB_list.size()
+ << " observations, and the average pose is:\n"
+ << avg_H_cameraA_cameraB.matrix();
+ Eigen::Affine3d default_H_cameraA_camera_B =
+ default_extrinsics[i].inverse() * default_extrinsics[i + 1];
+ LOG(INFO) << "Compare this to that from default values:\n"
+ << default_H_cameraA_camera_B.matrix();
+ // Next extrinsic is just previous one * avg_delta_pose
+ Eigen::Affine3d next_extrinsic =
+ updated_extrinsics.back() * avg_H_cameraA_cameraB;
+ LOG(INFO)
+ << "Difference between averaged and default delta poses has |T| = "
+ << (avg_H_cameraA_cameraB * default_H_cameraA_camera_B.inverse())
+ .translation()
+ .norm()
+ << " and |R| = "
+ << Eigen::AngleAxisd(
+ (avg_H_cameraA_cameraB * default_H_cameraA_camera_B.inverse())
+ .rotation())
+ .angle();
+ updated_extrinsics.push_back(next_extrinsic);
+ LOG(INFO) << "Default Extrinsic for " << node_list[i + 1] << " is \n"
+ << default_extrinsics[i + 1].matrix();
+ LOG(INFO) << "--> Updated Extrinsic for " << node_list[i + 1] << " is \n"
+ << next_extrinsic.matrix();
+ }
+ }
+
+ // Cleanup
+ for (uint i = 0; i < image_callbacks.size(); i++) {
+ delete charuco_extractors[i];
+ delete image_callbacks[i];
+ }
+}
+} // namespace vision
+} // namespace y2023
+
+int main(int argc, char **argv) {
+ aos::InitGoogle(&argc, &argv);
+ y2023::vision::ExtrinsicsMain(argc, argv);
+}
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index 2ca6f66..eec545e 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -20,38 +20,39 @@
#include "y2023/vision/aprilrobotics.h"
#include "y2023/vision/vision_util.h"
-DEFINE_string(json_path, "y2023/vision/maps/target_map.json",
- "Specify path for json with initial pose guesses.");
DEFINE_string(config, "",
"If set, override the log's config file with this one.");
DEFINE_string(constants_path, "y2023/constants/constants.json",
"Path to the constant file");
-DEFINE_string(output_dir, "y2023/vision/maps",
- "Directory to write solved target map to");
+DEFINE_string(dump_constraints_to, "/tmp/mapping_constraints.txt",
+ "Write the target constraints to this path");
+DEFINE_string(dump_stats_to, "/tmp/mapping_stats.txt",
+ "Write the mapping stats to this path");
DEFINE_string(field_name, "charged_up",
"Field name, for the output json filename and flatbuffer field");
-DEFINE_int32(team_number, 0,
- "Required: Use the calibration for a node with this team number");
-DEFINE_string(mcap_output_path, "/tmp/log.mcap", "Log to output.");
-DEFINE_string(pi, "pi1", "Pi name to generate mcap log for; defaults to pi1.");
+DEFINE_string(json_path, "y2023/vision/maps/target_map.json",
+ "Specify path for json with initial pose guesses.");
DEFINE_double(max_pose_error, 1e-6,
"Throw out target poses with a higher pose error than this");
DEFINE_double(
max_pose_error_ratio, 0.4,
"Throw out target poses with a higher pose error ratio than this");
-DEFINE_uint64(wait_key, 0,
- "Time in ms to wait between images, if no click (0 to wait "
- "indefinitely until click).");
+DEFINE_string(mcap_output_path, "", "Log to output.");
+DEFINE_string(output_dir, "y2023/vision/maps",
+ "Directory to write solved target map to");
DEFINE_double(pause_on_distance, 1.0,
"Pause if two consecutive implied robot positions differ by more "
"than this many meters");
+DEFINE_string(pi, "pi1", "Pi name to generate mcap log for; defaults to pi1.");
DEFINE_uint64(skip_to, 1,
"Start at combined image of this number (1 is the first image)");
DEFINE_bool(solve, true, "Whether to solve for the field's target map.");
-DEFINE_string(dump_constraints_to, "/tmp/mapping_constraints.txt",
- "Write the target constraints to this path");
-DEFINE_string(dump_stats_to, "/tmp/mapping_stats.txt",
- "Write the target constraints to this path");
+DEFINE_int32(team_number, 0,
+ "Required: Use the calibration for a node with this team number");
+DEFINE_uint64(wait_key, 1,
+ "Time in ms to wait between images, if no click (0 to wait "
+ "indefinitely until click).");
+
DECLARE_int32(frozen_target_id);
DECLARE_int32(min_target_id);
DECLARE_int32(max_target_id);
@@ -307,7 +308,6 @@
vis_robot_.DrawRobotOutline(H_world_robot,
std::to_string(target_pose_fbs->id()),
kPiColors.at(node_name));
-
vis_robot_.DrawFrameAxes(H_world_target,
std::to_string(target_pose_fbs->id()),
kPiColors.at(node_name));
@@ -385,6 +385,7 @@
}),
target_constraints.end());
+ LOG(INFO) << "Solving for locations of tags";
TargetMapper mapper(FLAGS_json_path, target_constraints);
mapper.Solve(FLAGS_field_name, FLAGS_output_dir);