Changes to camera extrinsic calibration to improve accuracy
Adds Outlier Rejection
Do proper rotation (Quaternion) averaging
When camera sees two targets at once, store that, and store
a single observation to match with other cameras. This leads
to a lot more good connections
Removed dead code, and refactored a couple pieces, e.g., writing extrinsic file
Also refactored some of the utilities to use quaternion averaging from utils,
and move other utility functions to vision_utils_lib
Change-Id: I918ce9c937d80717daa6659abbb139006506d4cc
Signed-off-by: Jim Ostrowski <yimmy13@gmail.com>
diff --git a/frc971/control_loops/quaternion_utils.h b/frc971/control_loops/quaternion_utils.h
index 3e40e3f4..c2c854c 100644
--- a/frc971/control_loops/quaternion_utils.h
+++ b/frc971/control_loops/quaternion_utils.h
@@ -8,19 +8,17 @@
namespace frc971::controls {
-// Function to compute the quaternion average of a bunch of quaternions. Each
-// column in the input matrix is a quaternion (optionally scaled by it's
-// weight).
-template <int SM>
-inline Eigen::Matrix<double, 4, 1> QuaternionMean(
- Eigen::Matrix<double, 4, SM> input) {
+// Helper function to extract mean quaternion from A*A^T of quaternion list
+// This allows us to support multiple formats of the input quaternion list
+inline Eigen::Matrix<double, 4, 1> ExtractQuaternionMean(
+ Eigen::Matrix<double, 4, 4> input) {
// Algorithm to compute the average of a bunch of quaternions:
- // http://www.acsu.buffalo.edu/~johnc/ave_quat07.pdf
-
- Eigen::Matrix<double, 4, 4> m = input * input.transpose();
+ // http://www.acsu.buffalo.edu/~johnc/ave_quat07.pdf
+ // See also:
+ // https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20070017872.pdf
Eigen::EigenSolver<Eigen::Matrix<double, 4, 4>> solver;
- solver.compute(m);
+ solver.compute(input);
Eigen::EigenSolver<Eigen::Matrix<double, 4, 4>>::EigenvectorsType
eigenvectors = solver.eigenvectors();
@@ -47,6 +45,37 @@
return eigenvectors.col(max_index).real().normalized();
}
+// Function to compute the quaternion average of a bunch of quaternions. Each
+// column in the 4xN input Matrix is a quaternion (optionally scaled by it's
+// weight).
+template <int N>
+inline Eigen::Matrix<double, 4, 1> QuaternionMean(
+ Eigen::Matrix<double, 4, N> quaternions) {
+ Eigen::Matrix<double, 4, 4> m = quaternions * quaternions.transpose();
+
+ return ExtractQuaternionMean(m);
+}
+
+// Function to compute the quaternion average of a bunch of quaternions.
+// This allows for passing in a variable size list (vector) of quaternions
+
+// For reference (since I've been bitten by it):
+// Eigen::Quaternion stores and prints coefficients as [x, y, z, w]
+// and initializes using a Vector4d in this order, BUT
+// initializes with scalars as Eigen::Quaternion{w, x, y, z}
+inline Eigen::Vector4d QuaternionMean(
+ std::vector<Eigen::Vector4d> quaternion_list) {
+ CHECK(quaternion_list.size() != 0)
+ << "Must have at least one quaternion to compute an average!";
+
+ Eigen::Matrix<double, 4, 4> m = Eigen::Matrix4d::Zero();
+ for (Eigen::Vector4d quaternion : quaternion_list) {
+ m += quaternion * quaternion.transpose();
+ }
+
+ return Eigen::Vector4d(ExtractQuaternionMean(m));
+}
+
// Converts from a quaternion to a rotation vector, where the rotation vector's
// direction represents the axis to rotate around and its magnitude represents
// the number of radians to rotate.
@@ -74,7 +103,7 @@
inline Eigen::Vector4d QuaternionDerivative(Eigen::Vector3d omega,
const Eigen::Vector4d &q_matrix) {
// See https://www.ashwinnarayan.com/post/how-to-integrate-quaternions/ for
- // another resource on quaternion integration and derivitives.
+ // another resource on quaternion integration and derivatives.
Eigen::Quaternion<double> q(q_matrix);
Eigen::Quaternion<double> omega_q;
diff --git a/frc971/control_loops/quaternion_utils_test.cc b/frc971/control_loops/quaternion_utils_test.cc
index 2eaeaa0..63f4507 100644
--- a/frc971/control_loops/quaternion_utils_test.cc
+++ b/frc971/control_loops/quaternion_utils_test.cc
@@ -17,6 +17,7 @@
// return the original quaternion.
TEST(DownEstimatorTest, QuaternionMean) {
Eigen::Matrix<double, 4, 7> vectors;
+ std::vector<Eigen::Vector4d> quaternion_list;
vectors.col(0) << 0, 0, 0, 1;
for (int i = 0; i < 3; ++i) {
Eigen::Matrix<double, 4, 1> perturbation;
@@ -29,6 +30,7 @@
for (int i = 0; i < 7; ++i) {
vectors.col(i).normalize();
+ quaternion_list.push_back(Eigen::Vector4d(vectors.col(i)));
}
Eigen::Matrix<double, 4, 1> mean = QuaternionMean(vectors);
@@ -36,6 +38,15 @@
for (int i = 0; i < 4; ++i) {
EXPECT_NEAR(mean(i, 0), vectors(i, 0), 0.001) << ": Failed on index " << i;
}
+
+ // Test version of code that passes in a vector of quaternions
+ mean = QuaternionMean(quaternion_list);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_NEAR(mean(i, 0), quaternion_list[0](i, 0), 0.001)
+ << ": Failed on index " << i << " with mean = " << mean
+ << " and quat_list = " << quaternion_list[0];
+ }
}
// Tests that ToRotationVectorFromQuaternion works for a 0 rotation.
diff --git a/frc971/vision/BUILD b/frc971/vision/BUILD
index 48a8fb1..dc40bab 100644
--- a/frc971/vision/BUILD
+++ b/frc971/vision/BUILD
@@ -238,6 +238,7 @@
":target_map_fbs",
"//aos/events:simulated_event_loop",
"//frc971/control_loops:control_loop",
+ "//frc971/vision:vision_util_lib",
"//frc971/vision:visualize_robot",
"//frc971/vision/ceres:pose_graph_3d_lib",
"//third_party:opencv",
@@ -437,11 +438,16 @@
hdrs = ["vision_util_lib.h"],
visibility = ["//visibility:public"],
deps = [
+ "//aos/util:math",
+ "//frc971/control_loops:quaternion_utils",
"//frc971/vision:calibration_fbs",
+ "//frc971/vision/ceres:pose_graph_3d_lib",
"//third_party:opencv",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings:str_format",
+ "@com_google_ceres_solver//:ceres",
+ "@org_tuxfamily_eigen//:eigen",
],
)
diff --git a/frc971/vision/target_mapper.cc b/frc971/vision/target_mapper.cc
index 36f0a29..99b67d2 100644
--- a/frc971/vision/target_mapper.cc
+++ b/frc971/vision/target_mapper.cc
@@ -5,6 +5,7 @@
#include "frc971/control_loops/control_loop.h"
#include "frc971/vision/ceres/pose_graph_3d_error_term.h"
#include "frc971/vision/geometry.h"
+#include "frc971/vision/vision_util_lib.h"
ABSL_FLAG(uint64_t, max_num_iterations, 100,
"Maximum number of iterations for the ceres solver");
@@ -28,89 +29,6 @@
"Whether to do a final fit of the solved map to the original map");
namespace frc971::vision {
-Eigen::Affine3d PoseUtils::Pose3dToAffine3d(
- const ceres::examples::Pose3d &pose3d) {
- Eigen::Affine3d H_world_pose =
- Eigen::Translation3d(pose3d.p(0), pose3d.p(1), pose3d.p(2)) * pose3d.q;
- return H_world_pose;
-}
-
-ceres::examples::Pose3d PoseUtils::Affine3dToPose3d(const Eigen::Affine3d &H) {
- return ceres::examples::Pose3d{.p = H.translation(),
- .q = Eigen::Quaterniond(H.rotation())};
-}
-
-ceres::examples::Pose3d PoseUtils::ComputeRelativePose(
- const ceres::examples::Pose3d &pose_1,
- const ceres::examples::Pose3d &pose_2) {
- Eigen::Affine3d H_world_1 = Pose3dToAffine3d(pose_1);
- Eigen::Affine3d H_world_2 = Pose3dToAffine3d(pose_2);
-
- // Get the location of 2 in the 1 frame
- Eigen::Affine3d H_1_2 = H_world_1.inverse() * H_world_2;
- return Affine3dToPose3d(H_1_2);
-}
-
-ceres::examples::Pose3d PoseUtils::ComputeOffsetPose(
- const ceres::examples::Pose3d &pose_1,
- const ceres::examples::Pose3d &pose_2_relative) {
- auto H_world_1 = Pose3dToAffine3d(pose_1);
- auto H_1_2 = Pose3dToAffine3d(pose_2_relative);
- auto H_world_2 = H_world_1 * H_1_2;
-
- return Affine3dToPose3d(H_world_2);
-}
-
-Eigen::Quaterniond PoseUtils::EulerAnglesToQuaternion(
- const Eigen::Vector3d &rpy) {
- Eigen::AngleAxisd roll_angle(rpy.x(), Eigen::Vector3d::UnitX());
- Eigen::AngleAxisd pitch_angle(rpy.y(), Eigen::Vector3d::UnitY());
- Eigen::AngleAxisd yaw_angle(rpy.z(), Eigen::Vector3d::UnitZ());
-
- return yaw_angle * pitch_angle * roll_angle;
-}
-
-Eigen::Vector3d PoseUtils::QuaternionToEulerAngles(
- const Eigen::Quaterniond &q) {
- return RotationMatrixToEulerAngles(q.toRotationMatrix());
-}
-
-Eigen::Vector3d PoseUtils::RotationMatrixToEulerAngles(
- const Eigen::Matrix3d &R) {
- double roll = aos::math::NormalizeAngle(std::atan2(R(2, 1), R(2, 2)));
- double pitch = aos::math::NormalizeAngle(-std::asin(R(2, 0)));
- double yaw = aos::math::NormalizeAngle(std::atan2(R(1, 0), R(0, 0)));
-
- return Eigen::Vector3d(roll, pitch, yaw);
-}
-
-flatbuffers::Offset<TargetPoseFbs> PoseUtils::TargetPoseToFbs(
- const TargetMapper::TargetPose &target_pose,
- flatbuffers::FlatBufferBuilder *fbb) {
- const auto position_offset =
- CreatePosition(*fbb, target_pose.pose.p(0), target_pose.pose.p(1),
- target_pose.pose.p(2));
- const auto orientation_offset =
- CreateQuaternion(*fbb, target_pose.pose.q.w(), target_pose.pose.q.x(),
- target_pose.pose.q.y(), target_pose.pose.q.z());
- return CreateTargetPoseFbs(*fbb, target_pose.id, position_offset,
- orientation_offset);
-}
-
-TargetMapper::TargetPose PoseUtils::TargetPoseFromFbs(
- const TargetPoseFbs &target_pose_fbs) {
- return {.id = static_cast<TargetMapper::TargetId>(target_pose_fbs.id()),
- .pose = ceres::examples::Pose3d{
- Eigen::Vector3d(target_pose_fbs.position()->x(),
- target_pose_fbs.position()->y(),
- target_pose_fbs.position()->z()),
- Eigen::Quaterniond(target_pose_fbs.orientation()->w(),
- target_pose_fbs.orientation()->x(),
- target_pose_fbs.orientation()->y(),
- target_pose_fbs.orientation()->z())
- .normalized()}};
-}
-
ceres::examples::VectorOfConstraints DataAdapter::MatchTargetDetections(
const std::vector<DataAdapter::TimestampedDetection>
×tamped_target_detections,
@@ -245,7 +163,7 @@
aos::JsonFileToFlatbuffer<TargetMap>(target_poses_path);
for (const auto *target_pose_fbs : *target_map.message().target_poses()) {
ideal_target_poses_[target_pose_fbs->id()] =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs).pose;
+ TargetPoseFromFbs(*target_pose_fbs).pose;
}
target_poses_ = ideal_target_poses_;
CountConstraints();
@@ -263,6 +181,33 @@
CountConstraints();
}
+flatbuffers::Offset<TargetPoseFbs> TargetMapper::TargetPoseToFbs(
+ const TargetMapper::TargetPose &target_pose,
+ flatbuffers::FlatBufferBuilder *fbb) {
+ const auto position_offset =
+ CreatePosition(*fbb, target_pose.pose.p(0), target_pose.pose.p(1),
+ target_pose.pose.p(2));
+ const auto orientation_offset =
+ CreateQuaternion(*fbb, target_pose.pose.q.w(), target_pose.pose.q.x(),
+ target_pose.pose.q.y(), target_pose.pose.q.z());
+ return CreateTargetPoseFbs(*fbb, target_pose.id, position_offset,
+ orientation_offset);
+}
+
+TargetMapper::TargetPose TargetMapper::TargetPoseFromFbs(
+ const TargetPoseFbs &target_pose_fbs) {
+ return {.id = static_cast<TargetMapper::TargetId>(target_pose_fbs.id()),
+ .pose = ceres::examples::Pose3d{
+ Eigen::Vector3d(target_pose_fbs.position()->x(),
+ target_pose_fbs.position()->y(),
+ target_pose_fbs.position()->z()),
+ Eigen::Quaterniond(target_pose_fbs.orientation()->w(),
+ target_pose_fbs.orientation()->x(),
+ target_pose_fbs.orientation()->y(),
+ target_pose_fbs.orientation()->z())
+ .normalized()}};
+}
+
namespace {
std::pair<TargetMapper::TargetId, TargetMapper::TargetId> MakeIdPair(
const ceres::examples::Constraint3d &constraint) {
@@ -580,7 +525,7 @@
std::vector<flatbuffers::Offset<TargetPoseFbs>> target_poses_fbs;
for (const auto &[id, pose] : target_poses_) {
target_poses_fbs.emplace_back(
- PoseUtils::TargetPoseToFbs(TargetPose{.id = id, .pose = pose}, &fbb));
+ TargetPoseToFbs(TargetPose{.id = id, .pose = pose}, &fbb));
}
const auto field_name_offset = fbb.CreateString(field_name);
diff --git a/frc971/vision/target_mapper.h b/frc971/vision/target_mapper.h
index cae1c69..aa34006 100644
--- a/frc971/vision/target_mapper.h
+++ b/frc971/vision/target_mapper.h
@@ -9,8 +9,11 @@
#include "aos/events/simulated_event_loop.h"
#include "frc971/vision/ceres/types.h"
#include "frc971/vision/target_map_generated.h"
+#include "frc971/vision/vision_util_lib.h"
#include "frc971/vision/visualize_robot.h"
+ABSL_DECLARE_FLAG(double, outlier_std_devs);
+
namespace frc971::vision {
// Estimates positions of vision targets (ex. April Tags) using
@@ -46,6 +49,15 @@
// Prints target poses into a TargetMap flatbuffer json
std::string MapToJson(std::string_view field_name) const;
+ // Builds a TargetPoseFbs from a TargetPose
+ static flatbuffers::Offset<TargetPoseFbs> TargetPoseToFbs(
+ const TargetMapper::TargetPose &target_pose,
+ flatbuffers::FlatBufferBuilder *fbb);
+
+ // Converts a TargetPoseFbs to a TargetPose
+ static TargetMapper::TargetPose TargetPoseFromFbs(
+ const TargetPoseFbs &target_pose_fbs);
+
static std::optional<TargetPose> GetTargetPoseById(
std::vector<TargetPose> target_poses, TargetId target_id);
@@ -134,43 +146,6 @@
Stats stats_with_outliers_;
};
-// Utility functions for dealing with ceres::examples::Pose3d structs
-class PoseUtils {
- public:
- // Embeds a 3d pose into an affine transformation
- static Eigen::Affine3d Pose3dToAffine3d(
- const ceres::examples::Pose3d &pose3d);
- // Inverse of above function
- static ceres::examples::Pose3d Affine3dToPose3d(const Eigen::Affine3d &H);
-
- // Computes pose_2 relative to pose_1. This is equivalent to (pose_1^-1 *
- // pose_2)
- static ceres::examples::Pose3d ComputeRelativePose(
- const ceres::examples::Pose3d &pose_1,
- const ceres::examples::Pose3d &pose_2);
-
- // Computes pose_2 given a pose_1 and pose_2 relative to pose_1. This is
- // equivalent to (pose_1 * pose_2_relative)
- static ceres::examples::Pose3d ComputeOffsetPose(
- const ceres::examples::Pose3d &pose_1,
- const ceres::examples::Pose3d &pose_2_relative);
-
- // Converts a rotation with roll, pitch, and yaw into a quaternion
- static Eigen::Quaterniond EulerAnglesToQuaternion(const Eigen::Vector3d &rpy);
- // Inverse of above function
- static Eigen::Vector3d QuaternionToEulerAngles(const Eigen::Quaterniond &q);
- // Converts a 3d rotation matrix into a rotation with roll, pitch, and yaw
- static Eigen::Vector3d RotationMatrixToEulerAngles(const Eigen::Matrix3d &R);
-
- // Builds a TargetPoseFbs from a TargetPose
- static flatbuffers::Offset<TargetPoseFbs> TargetPoseToFbs(
- const TargetMapper::TargetPose &target_pose,
- flatbuffers::FlatBufferBuilder *fbb);
- // Converts a TargetPoseFbs to a TargetPose
- static TargetMapper::TargetPose TargetPoseFromFbs(
- const TargetPoseFbs &target_pose_fbs);
-};
-
// Transforms robot position and target detection data into target constraints
// to be used for mapping.
class DataAdapter {
diff --git a/frc971/vision/target_mapper_test.cc b/frc971/vision/target_mapper_test.cc
index 4e9db6f..62f969c 100644
--- a/frc971/vision/target_mapper_test.cc
+++ b/frc971/vision/target_mapper_test.cc
@@ -12,11 +12,13 @@
#include "aos/testing/path.h"
#include "aos/testing/random_seed.h"
#include "aos/util/math.h"
+#include "vision_util_lib.h"
ABSL_DECLARE_FLAG(int32_t, min_target_id);
ABSL_DECLARE_FLAG(int32_t, max_target_id);
namespace frc971::vision {
+using frc971::vision::PoseUtils;
namespace {
constexpr double kToleranceMeters = 0.05;
@@ -151,44 +153,6 @@
} // namespace
-TEST(PoseUtilsTest, EulerAnglesAndQuaternionConversions) {
- // Make sure that the conversions are consistent back and forth.
- // These angles shouldn't get changed to a different, equivalent roll pitch
- // yaw.
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, M_PI);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, -M_PI);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, M_PI_2);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, -M_PI_2);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, 0.0);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, M_PI_4, 0.0);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, -M_PI_4, 0.0);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, -M_PI_4, M_PI_4);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(M_PI_4, -M_PI_4, M_PI_4);
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(-M_PI_2, -M_PI_4, M_PI_4);
-
- // Now, do a sweep of roll, pitch, and yaws in the normalized
- // range.
- // - roll: (-pi/2, pi/2)
- // - pitch: (-pi/2, pi/2)
- // - yaw: [-pi, pi)
- constexpr double kThetaMaxRoll = M_PI_2 - kToleranceRadians;
- constexpr double kThetaMaxPitch = M_PI_2 - kToleranceRadians;
- constexpr double kThetaMaxYaw = M_PI;
- constexpr double kDeltaTheta = M_PI / 16;
-
- for (double roll = -kThetaMaxRoll; roll < kThetaMaxRoll;
- roll += kDeltaTheta) {
- for (double pitch = -kThetaMaxPitch; pitch < kThetaMaxPitch;
- pitch += kDeltaTheta) {
- for (double yaw = -kThetaMaxYaw; yaw < kThetaMaxYaw; yaw += kDeltaTheta) {
- SCOPED_TRACE(
- absl::StrFormat("roll: %f, pitch: %f, yaw: %f", roll, pitch, yaw));
- EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(roll, pitch, yaw);
- }
- }
- }
-}
-
TEST(DataAdapterTest, ComputeConfidence) {
// Check the confidence matrices. Don't check the actual values
// in case the constants change, just check that the confidence of contraints
@@ -500,7 +464,7 @@
ceres::examples::MapOfPoses target_poses;
for (const auto *target_pose_fbs : *target_map_fbs.message().target_poses()) {
const TargetMapper::TargetPose target_pose =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+ TargetMapper::TargetPoseFromFbs(*target_pose_fbs);
actual_target_poses.emplace_back(target_pose);
target_poses[target_pose.id] = target_pose.pose;
}
diff --git a/frc971/vision/vision_util_lib.cc b/frc971/vision/vision_util_lib.cc
index 089f3ac..944fa76 100644
--- a/frc971/vision/vision_util_lib.cc
+++ b/frc971/vision/vision_util_lib.cc
@@ -1,9 +1,14 @@
#include "frc971/vision/vision_util_lib.h"
+#include <numeric>
+
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_format.h"
+#include "aos/util/math.h"
+#include "frc971/control_loops/quaternion_utils.h"
+
namespace frc971::vision {
std::optional<cv::Mat> CameraExtrinsics(
@@ -75,4 +80,147 @@
return calibration_filename;
}
+Eigen::Affine3d PoseUtils::Pose3dToAffine3d(
+ const ceres::examples::Pose3d &pose3d) {
+ Eigen::Affine3d H_world_pose =
+ Eigen::Translation3d(pose3d.p(0), pose3d.p(1), pose3d.p(2)) * pose3d.q;
+ return H_world_pose;
+}
+
+ceres::examples::Pose3d PoseUtils::Affine3dToPose3d(const Eigen::Affine3d &H) {
+ return ceres::examples::Pose3d{.p = H.translation(),
+ .q = Eigen::Quaterniond(H.rotation())};
+}
+
+ceres::examples::Pose3d PoseUtils::ComputeRelativePose(
+ const ceres::examples::Pose3d &pose_1,
+ const ceres::examples::Pose3d &pose_2) {
+ Eigen::Affine3d H_world_1 = Pose3dToAffine3d(pose_1);
+ Eigen::Affine3d H_world_2 = Pose3dToAffine3d(pose_2);
+
+ // Get the location of 2 in the 1 frame
+ Eigen::Affine3d H_1_2 = H_world_1.inverse() * H_world_2;
+ return Affine3dToPose3d(H_1_2);
+}
+
+ceres::examples::Pose3d PoseUtils::ComputeOffsetPose(
+ const ceres::examples::Pose3d &pose_1,
+ const ceres::examples::Pose3d &pose_2_relative) {
+ auto H_world_1 = Pose3dToAffine3d(pose_1);
+ auto H_1_2 = Pose3dToAffine3d(pose_2_relative);
+ auto H_world_2 = H_world_1 * H_1_2;
+
+ return Affine3dToPose3d(H_world_2);
+}
+
+Eigen::Quaterniond PoseUtils::EulerAnglesToQuaternion(
+ const Eigen::Vector3d &rpy) {
+ Eigen::AngleAxisd roll_angle(rpy.x(), Eigen::Vector3d::UnitX());
+ Eigen::AngleAxisd pitch_angle(rpy.y(), Eigen::Vector3d::UnitY());
+ Eigen::AngleAxisd yaw_angle(rpy.z(), Eigen::Vector3d::UnitZ());
+
+ return yaw_angle * pitch_angle * roll_angle;
+}
+
+Eigen::Vector3d PoseUtils::QuaternionToEulerAngles(
+ const Eigen::Quaterniond &q) {
+ return RotationMatrixToEulerAngles(q.toRotationMatrix());
+}
+
+Eigen::Vector3d PoseUtils::RotationMatrixToEulerAngles(
+ const Eigen::Matrix3d &R) {
+ double roll = aos::math::NormalizeAngle(std::atan2(R(2, 1), R(2, 2)));
+ double pitch = aos::math::NormalizeAngle(-std::asin(R(2, 0)));
+ double yaw = aos::math::NormalizeAngle(std::atan2(R(1, 0), R(0, 0)));
+
+ return Eigen::Vector3d(roll, pitch, yaw);
+}
+
+// Compute the average pose given a list of translations (as
+// Eigen::Vector3d's) and rotations (as Eigen::Vector4d quaternions)
+// Returned as an Eigen::Affine3d pose
+// Also, compute the variance of each of these list of vectors
+
+// NOTE: variance for rotations can get dicey, so we've cheated a
+// little by doing two things:
+// 1) Computing variance relative to the mean, so that we're doing
+// this on small angles and don't have to deal with wrapping at
+// 180/360 degrees
+// 2) Returning the variance in Euler angles, since I'm not sure of a
+// better way to represent variance for rotations. (Maybe log of
+// rotations, in the Lie algebra?)
+
+Eigen::Affine3d ComputeAveragePose(
+ std::vector<Eigen::Vector3d> &translation_list,
+ std::vector<Eigen::Vector4d> &rotation_list,
+ Eigen::Vector3d *translation_variance, Eigen::Vector3d *rotation_variance) {
+ Eigen::Vector3d avg_translation =
+ std::accumulate(translation_list.begin(), translation_list.end(),
+ Eigen::Vector3d{0, 0, 0}) /
+ translation_list.size();
+
+ Eigen::Quaterniond avg_rotation_q(
+ frc971::controls::QuaternionMean(rotation_list));
+ Eigen::Affine3d average_pose =
+ Eigen::Translation3d(avg_translation) * avg_rotation_q;
+
+ CHECK_EQ(translation_list.size(), rotation_list.size());
+ if (translation_variance != nullptr) {
+ CHECK(rotation_variance != nullptr);
+ Eigen::Vector3d translation_variance_sum(0.0, 0.0, 0.0);
+ Eigen::Vector3d rotation_variance_sum(0.0, 0.0, 0.0);
+ for (uint i = 0; i < translation_list.size(); i++) {
+ Eigen::Quaterniond rotation_q(rotation_list[i]);
+ Eigen::Affine3d pose =
+ Eigen::Translation3d(translation_list[i]) * rotation_q;
+ Eigen::Affine3d delta_pose = average_pose * pose.inverse();
+ translation_variance_sum =
+ translation_variance_sum +
+ Eigen::Vector3d(delta_pose.translation().array().square());
+ rotation_variance_sum =
+ rotation_variance_sum +
+ Eigen::Vector3d(PoseUtils::RotationMatrixToEulerAngles(
+ delta_pose.rotation().matrix())
+ .array()
+ .square());
+ }
+ // Compute the variance on the translations (in m)
+ if (translation_variance != nullptr) {
+ CHECK(translation_list.size() > 1)
+ << "Have to have at least two translations to compute variance";
+ *translation_variance =
+ translation_variance_sum / translation_list.size();
+ }
+
+ // Compute the variance on the rotations (in euler angles, radians),
+ // referenced to the mean, to remove issues with Euler angles by
+ // keeping them near zero
+ if (rotation_variance != nullptr) {
+ CHECK(rotation_list.size() > 1)
+ << "Have to have at least two rotations to compute variance";
+ *rotation_variance = rotation_variance_sum / rotation_list.size();
+ }
+ }
+ return average_pose;
+}
+
+// Helper function to compute average pose when supplied with list
+// of Eigen::Affine3d's
+Eigen::Affine3d ComputeAveragePose(std::vector<Eigen::Affine3d> &pose_list,
+ Eigen::Vector3d *translation_variance,
+ Eigen::Vector3d *rotation_variance) {
+ std::vector<Eigen::Vector3d> translation_list;
+ std::vector<Eigen::Vector4d> rotation_list;
+
+ for (Eigen::Affine3d pose : pose_list) {
+ translation_list.push_back(pose.translation());
+ Eigen::Quaterniond quat(pose.rotation().matrix());
+ rotation_list.push_back(
+ Eigen::Vector4d(quat.x(), quat.y(), quat.z(), quat.w()));
+ }
+
+ return ComputeAveragePose(translation_list, rotation_list,
+ translation_variance, rotation_variance);
+}
+
} // namespace frc971::vision
diff --git a/frc971/vision/vision_util_lib.h b/frc971/vision/vision_util_lib.h
index a26af3b..ee621dd 100644
--- a/frc971/vision/vision_util_lib.h
+++ b/frc971/vision/vision_util_lib.h
@@ -3,9 +3,13 @@
#include <optional>
#include <string_view>
+#include "Eigen/Dense"
+#include "Eigen/Geometry"
+#include "Eigen/SVD"
#include "opencv2/imgproc.hpp"
#include "frc971/vision/calibration_generated.h"
+#include "frc971/vision/ceres/types.h"
// Extract the CameraExtrinsics from a CameraCalibration struct
namespace frc971::vision {
@@ -31,6 +35,55 @@
int camera_number, std::string camera_id,
std::string timestamp);
+// Utility functions for dealing with ceres::examples::Pose3d structs
+class PoseUtils {
+ public:
+ // Embeds a 3d pose into an affine transformation
+ static Eigen::Affine3d Pose3dToAffine3d(
+ const ceres::examples::Pose3d &pose3d);
+ // Inverse of above function
+ static ceres::examples::Pose3d Affine3dToPose3d(const Eigen::Affine3d &H);
+
+ // Computes pose_2 relative to pose_1. This is equivalent to (pose_1^-1 *
+ // pose_2)
+ static ceres::examples::Pose3d ComputeRelativePose(
+ const ceres::examples::Pose3d &pose_1,
+ const ceres::examples::Pose3d &pose_2);
+
+ // Computes pose_2 given a pose_1 and pose_2 relative to pose_1. This is
+ // equivalent to (pose_1 * pose_2_relative)
+ static ceres::examples::Pose3d ComputeOffsetPose(
+ const ceres::examples::Pose3d &pose_1,
+ const ceres::examples::Pose3d &pose_2_relative);
+
+ // Converts a rotation with roll, pitch, and yaw into a quaternion
+ static Eigen::Quaterniond EulerAnglesToQuaternion(const Eigen::Vector3d &rpy);
+ // Inverse of above function
+ static Eigen::Vector3d QuaternionToEulerAngles(const Eigen::Quaterniond &q);
+ // Converts a 3d rotation matrix into a rotation with roll, pitch, and yaw
+ static Eigen::Vector3d RotationMatrixToEulerAngles(const Eigen::Matrix3d &R);
+};
+
+// Compute the average of a set of quaternions
+Eigen::Vector4d QuaternionAverage(std::vector<Eigen::Vector4d> quaternions);
+
+// Compute the average pose given a list of translations (as
+// Eigen::Vector3d's) and rotations (as Eigen::Vector4d quaternions)
+// Returned as an Eigen::Affine3d pose
+// Also, compute the variance of each of these list of vectors
+Eigen::Affine3d ComputeAveragePose(
+ std::vector<Eigen::Vector3d> &translation_list,
+ std::vector<Eigen::Vector4d> &rotation_list,
+ Eigen::Vector3d *translation_variance = nullptr,
+ Eigen::Vector3d *rotation_variance = nullptr);
+
+// Helper function to compute average pose when supplied with list
+// of Eigen::Affine3d's
+Eigen::Affine3d ComputeAveragePose(
+ std::vector<Eigen::Affine3d> &pose_list,
+ Eigen::Vector3d *translation_variance = nullptr,
+ Eigen::Vector3d *rotation_variance = nullptr);
+
} // namespace frc971::vision
#endif // FRC971_VISION_VISION_UTIL_LIB_H_
diff --git a/frc971/vision/vision_util_lib_test.cc b/frc971/vision/vision_util_lib_test.cc
index ff9c0a3..bdf8258 100644
--- a/frc971/vision/vision_util_lib_test.cc
+++ b/frc971/vision/vision_util_lib_test.cc
@@ -1,7 +1,10 @@
#include "frc971/vision/vision_util_lib.h"
+#include "absl/strings/str_format.h"
#include "gtest/gtest.h"
+#include "aos/util/math.h"
+
namespace frc971::vision {
// For now, just testing extracting camera number from channel name
TEST(VisionUtilsTest, CameraNumberFromChannel) {
@@ -12,4 +15,114 @@
ASSERT_EQ(CameraNumberFromChannel("/orin1/camera1").value(), 1);
ASSERT_EQ(CameraNumberFromChannel("/orin1"), std::nullopt);
}
+
+namespace {
+constexpr double kToleranceRadians = 0.05;
+// Conversions between euler angles and quaternion result in slightly-off
+// doubles
+constexpr double kOrientationEqTolerance = 1e-10;
+} // namespace
+
+// Angles normalized by aos::math::NormalizeAngle()
+#define EXPECT_NORMALIZED_ANGLES_NEAR(theta1, theta2, tolerance) \
+ { \
+ double delta = std::abs(aos::math::DiffAngle(theta1, theta2)); \
+ /* Have to check delta - 2pi for the case that one angle is very */ \
+ /* close to -pi, and the other is very close to +pi */ \
+ EXPECT_TRUE(delta < tolerance || std::abs(aos::math::DiffAngle( \
+ delta, 2.0 * M_PI)) < tolerance); \
+ }
+
+#define EXPECT_POSE_NEAR(pose1, pose2) \
+ { \
+ for (size_t i = 0; i < 3; i++) { \
+ EXPECT_NEAR(pose1.p(i), pose2.p(i), kToleranceMeters); \
+ } \
+ auto rpy_1 = PoseUtils::QuaternionToEulerAngles(pose1.q); \
+ auto rpy_2 = PoseUtils::QuaternionToEulerAngles(pose2.q); \
+ for (size_t i = 0; i < 3; i++) { \
+ SCOPED_TRACE(absl::StrFormat("rpy_1(%d) = %f, rpy_2(%d) = %f", i, \
+ rpy_1(i), i, rpy_2(i))); \
+ EXPECT_NORMALIZED_ANGLES_NEAR(rpy_1(i), rpy_2(i), kToleranceRadians) \
+ } \
+ }
+
+#define EXPECT_POSE_EQ(pose1, pose2) \
+ EXPECT_EQ(pose1.p, pose2.p); \
+ EXPECT_EQ(pose1.q, pose2.q);
+
+#define EXPECT_QUATERNION_NEAR(q1, q2) \
+ EXPECT_NEAR(q1.x(), q2.x(), kOrientationEqTolerance) << q1 << " != " << q2; \
+ EXPECT_NEAR(q1.y(), q2.y(), kOrientationEqTolerance) << q1 << " != " << q2; \
+ EXPECT_NEAR(q1.z(), q2.z(), kOrientationEqTolerance) << q1 << " != " << q2; \
+ EXPECT_NEAR(q1.w(), q2.w(), kOrientationEqTolerance) << q1 << " != " << q2;
+
+// Expects same roll, pitch, and yaw values (not equivalent rotation)
+#define EXPECT_RPY_EQ(rpy_1, rpy_2) \
+ { \
+ for (size_t i = 0; i < 3; i++) { \
+ SCOPED_TRACE(absl::StrFormat("rpy_1(%d) = %f, rpy_2(%d) = %f", i, \
+ rpy_1(i), i, rpy_2(i))); \
+ EXPECT_NORMALIZED_ANGLES_NEAR(rpy_1(i), rpy_2(i), \
+ kOrientationEqTolerance) \
+ } \
+ }
+
+#define EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(roll, pitch, yaw) \
+ { \
+ auto rpy = Eigen::Vector3d(roll, pitch, yaw); \
+ auto converted_rpy = PoseUtils::QuaternionToEulerAngles( \
+ PoseUtils::EulerAnglesToQuaternion(rpy)); \
+ EXPECT_RPY_EQ(converted_rpy, rpy); \
+ }
+
+// Both confidence matrixes should have the same dimensions and be square
+#define EXPECT_CONFIDENCE_GT(confidence1, confidence2) \
+ { \
+ ASSERT_EQ(confidence1.rows(), confidence2.rows()); \
+ ASSERT_EQ(confidence1.rows(), confidence1.cols()); \
+ ASSERT_EQ(confidence2.rows(), confidence2.cols()); \
+ for (size_t i = 0; i < confidence1.rows(); i++) { \
+ EXPECT_GT(confidence1(i, i), confidence2(i, i)); \
+ } \
+ }
+
+TEST(PoseUtilsTest, EulerAnglesAndQuaternionConversions) {
+ // Make sure that the conversions are consistent back and forth.
+ // These angles shouldn't get changed to a different, equivalent roll pitch
+ // yaw.
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, M_PI);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, -M_PI);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, M_PI_2);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, -M_PI_2);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, 0.0, 0.0);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, M_PI_4, 0.0);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, -M_PI_4, 0.0);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(0.0, -M_PI_4, M_PI_4);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(M_PI_4, -M_PI_4, M_PI_4);
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(-M_PI_2, -M_PI_4, M_PI_4);
+
+ // Now, do a sweep of roll, pitch, and yaws in the normalized
+ // range.
+ // - roll: (-pi/2, pi/2)
+ // - pitch: (-pi/2, pi/2)
+ // - yaw: [-pi, pi)
+ constexpr double kThetaMaxRoll = M_PI_2 - kToleranceRadians;
+ constexpr double kThetaMaxPitch = M_PI_2 - kToleranceRadians;
+ constexpr double kThetaMaxYaw = M_PI;
+ constexpr double kDeltaTheta = M_PI / 16;
+
+ for (double roll = -kThetaMaxRoll; roll < kThetaMaxRoll;
+ roll += kDeltaTheta) {
+ for (double pitch = -kThetaMaxPitch; pitch < kThetaMaxPitch;
+ pitch += kDeltaTheta) {
+ for (double yaw = -kThetaMaxYaw; yaw < kThetaMaxYaw; yaw += kDeltaTheta) {
+ SCOPED_TRACE(
+ absl::StrFormat("roll: %f, pitch: %f, yaw: %f", roll, pitch, yaw));
+ EXPECT_EULER_ANGLES_QUATERNION_BACK_AND_FORTH_EQ(roll, pitch, yaw);
+ }
+ }
+ }
+}
+
} // namespace frc971::vision
diff --git a/y2023/vision/april_detection_test.cc b/y2023/vision/april_detection_test.cc
index 9f4f8b7..7e50b09 100644
--- a/y2023/vision/april_detection_test.cc
+++ b/y2023/vision/april_detection_test.cc
@@ -11,6 +11,7 @@
#include "frc971/constants/constants_sender_lib.h"
#include "frc971/vision/target_mapper.h"
#include "frc971/vision/vision_generated.h"
+#include "frc971/vision/vision_util_lib.h"
#include "y2023/constants/constants_generated.h"
#include "y2023/constants/constants_list_generated.h"
#include "y2023/vision/aprilrobotics.h"
@@ -61,7 +62,7 @@
ASSERT_EQ(april_pose_fetcher_->target_poses()->size(), 1);
frc971::vision::TargetMapper::TargetPose target_pose =
- frc971::vision::PoseUtils::TargetPoseFromFbs(
+ frc971::vision::TargetMapper::TargetPoseFromFbs(
*april_pose_fetcher_->target_poses()->Get(0));
ASSERT_EQ(april_pose_fetcher_->target_poses()->Get(0)->id(), 8);
diff --git a/y2023/vision/calibrate_multi_cameras.cc b/y2023/vision/calibrate_multi_cameras.cc
index 8032c764..5c29797 100644
--- a/y2023/vision/calibrate_multi_cameras.cc
+++ b/y2023/vision/calibrate_multi_cameras.cc
@@ -13,6 +13,7 @@
#include "frc971/vision/charuco_lib.h"
#include "frc971/vision/extrinsics_calibration.h"
#include "frc971/vision/target_mapper.h"
+#include "frc971/vision/vision_util_lib.h"
#include "frc971/vision/visualize_robot.h"
// clang-format off
// OpenCV eigen files must be included after Eigen includes
@@ -362,7 +363,7 @@
}
const TargetMapper::TargetPose target_pose =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+ TargetMapper::TargetPoseFromFbs(*target_pose_fbs);
target_poses.emplace_back(target_pose);
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index f33a83a..325ad90 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -15,6 +15,7 @@
#include "frc971/control_loops/pose.h"
#include "frc971/vision/calibration_generated.h"
#include "frc971/vision/target_mapper.h"
+#include "frc971/vision/vision_util_lib.h"
#include "frc971/vision/visualize_robot.h"
#include "y2023/constants/simulated_constants_sender.h"
#include "y2023/vision/aprilrobotics.h"
@@ -239,7 +240,7 @@
}
const TargetMapper::TargetPose target_pose =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+ TargetMapper::TargetPoseFromFbs(*target_pose_fbs);
Eigen::Affine3d H_camera_target =
Eigen::Translation3d(target_pose.pose.p) * target_pose.pose.q;
diff --git a/y2024/vision/calibrate_multi_cameras.cc b/y2024/vision/calibrate_multi_cameras.cc
index 7a9f412..b0fa33b 100644
--- a/y2024/vision/calibrate_multi_cameras.cc
+++ b/y2024/vision/calibrate_multi_cameras.cc
@@ -40,6 +40,9 @@
"Throw out target poses with a higher pose error than this");
ABSL_FLAG(double, max_pose_error_ratio, 0.4,
"Throw out target poses with a higher pose error ratio than this");
+ABSL_FLAG(bool, robot, false,
+ "If true we're calibrating extrinsics for the robot, use the "
+ "correct node path for the robot.");
ABSL_FLAG(std::string, output_folder, "/tmp",
"Directory in which to store the updated calibration files");
ABSL_FLAG(std::string, target_type, "charuco_diamond",
@@ -49,12 +52,10 @@
ABSL_FLAG(
uint64_t, wait_key, 1,
"Time in ms to wait between images (0 to wait indefinitely until click)");
-ABSL_FLAG(bool, robot, false,
- "If true we're calibrating extrinsics for the robot, use the "
- "correct node path for the robot.");
ABSL_DECLARE_FLAG(int32_t, min_target_id);
ABSL_DECLARE_FLAG(int32_t, max_target_id);
+ABSL_DECLARE_FLAG(double, outlier_std_devs);
// Calibrate extrinsic relationship between cameras using two targets
// seen jointly between cameras. Uses two types of information: 1)
@@ -72,8 +73,6 @@
// from all cameras, not just first camera
namespace y2024::vision {
-using frc971::vision::DataAdapter;
-using frc971::vision::ImageCallback;
using frc971::vision::PoseUtils;
using frc971::vision::TargetMap;
using frc971::vision::TargetMapper;
@@ -83,8 +82,8 @@
static constexpr double kImagePeriodMs =
1.0 / 60.0 * 1000.0; // Image capture period in ms
+// Set up our camera naming data
std::vector<CameraNode> node_list(y2024::vision::CreateNodeList());
-
std::map<std::string, int> ordering_map(
y2024::vision::CreateOrderingMap(node_list));
@@ -111,57 +110,8 @@
std::vector<TimestampedCameraDetection> two_board_extrinsics_list;
VisualizeRobot vis_robot_;
-// TODO<jim>: Implement variance calcs
-Eigen::Affine3d ComputeAveragePose(
- std::vector<Eigen::Vector3d> &translation_list,
- std::vector<Eigen::Vector4d> &rotation_list,
- Eigen::Vector3d *translation_variance = nullptr,
- Eigen::Vector3d *rotation_variance = nullptr) {
- Eigen::Vector3d avg_translation =
- std::accumulate(translation_list.begin(), translation_list.end(),
- Eigen::Vector3d{0, 0, 0}) /
- translation_list.size();
-
- // TODO<Jim>: Use QuaternionMean from quaternion_utils.cc (but this
- // requires a fixed number of quaternions to be averaged
- Eigen::Vector4d avg_rotation =
- std::accumulate(rotation_list.begin(), rotation_list.end(),
- Eigen::Vector4d{0, 0, 0, 0}) /
- rotation_list.size();
- // Normalize, so it's a valid quaternion
- avg_rotation = avg_rotation / avg_rotation.norm();
- Eigen::Quaterniond avg_rotation_q{avg_rotation[0], avg_rotation[1],
- avg_rotation[2], avg_rotation[3]};
- Eigen::Translation3d translation(avg_translation);
- Eigen::Affine3d return_pose = translation * avg_rotation_q;
- if (translation_variance != nullptr) {
- *translation_variance = Eigen::Vector3d();
- }
- if (rotation_variance != nullptr) {
- *rotation_variance = Eigen::Vector3d();
- }
-
- return return_pose;
-}
-
-Eigen::Affine3d ComputeAveragePose(
- std::vector<Eigen::Affine3d> &pose_list,
- Eigen::Vector3d *translation_variance = nullptr,
- Eigen::Vector3d *rotation_variance = nullptr) {
- std::vector<Eigen::Vector3d> translation_list;
- std::vector<Eigen::Vector4d> rotation_list;
-
- for (Eigen::Affine3d pose : pose_list) {
- translation_list.push_back(pose.translation());
- Eigen::Quaterniond quat(pose.rotation().matrix());
- rotation_list.push_back(
- Eigen::Vector4d(quat.w(), quat.x(), quat.y(), quat.z()));
- }
-
- return ComputeAveragePose(translation_list, rotation_list,
- translation_variance, rotation_variance);
-}
-
+// Helper function to compute average pose when supplied with list
+// of TimestampedCameraDetection's
Eigen::Affine3d ComputeAveragePose(
std::vector<TimestampedCameraDetection> &pose_list,
Eigen::Vector3d *translation_variance = nullptr,
@@ -172,18 +122,95 @@
for (TimestampedCameraDetection pose : pose_list) {
translation_list.push_back(pose.H_camera_target.translation());
Eigen::Quaterniond quat(pose.H_camera_target.rotation().matrix());
+ // NOTE: Eigen initialies as (x,y,z,w) from Vector4d's
rotation_list.push_back(
- Eigen::Vector4d(quat.w(), quat.x(), quat.y(), quat.z()));
+ Eigen::Vector4d(quat.x(), quat.y(), quat.z(), quat.w()));
}
- return ComputeAveragePose(translation_list, rotation_list,
- translation_variance, rotation_variance);
+ return frc971::vision::ComputeAveragePose(
+ translation_list, rotation_list, translation_variance, rotation_variance);
}
+// Do outlier rejection. Given a list of poses, compute the
+// mean and standard deviation, and throw out those more than
+// FLAGS_outlier_std_devs standard deviations away from the mean.
+// Repeat for the desired number of iterations or until we don't throw
+// out any more outliers
+void RemoveOutliers(std::vector<TimestampedCameraDetection> &pose_list,
+ int num_iterations = 1) {
+ for (int i = 1; i <= num_iterations; i++) {
+ Eigen::Vector3d translation_variance, rotation_variance;
+ Eigen::Affine3d avg_pose = ComputeAveragePose(
+ pose_list, &translation_variance, &rotation_variance);
+
+ size_t original_size = pose_list.size();
+ // Create a lambda to identify the outliers to be removed
+ auto remove_lambda = [translation_variance, rotation_variance,
+ avg_pose](const TimestampedCameraDetection &pose) {
+ Eigen::Affine3d H_delta = avg_pose * pose.H_camera_target.inverse();
+ // Compute the z-score for each dimension, and use the max to
+ // decide on outliers. This is similar to the Mahalanobis
+ // distance (scale by inverse of the covariance matrix), but we're
+ // treating each dimension independently
+ Eigen::Matrix3d translation_sigma = translation_variance.asDiagonal();
+ Eigen::Matrix3d rotation_sigma = rotation_variance.asDiagonal();
+ Eigen::Vector3d delta_rotation =
+ PoseUtils::RotationMatrixToEulerAngles(H_delta.rotation().matrix());
+ double max_translation_z_score = std::sqrt(
+ H_delta.translation()
+ .cwiseProduct(translation_sigma.inverse() * H_delta.translation())
+ .maxCoeff());
+ double max_rotation_z_score = std::sqrt(static_cast<double>(
+ (delta_rotation.array() *
+ (rotation_sigma.inverse() * delta_rotation).array())
+ .maxCoeff()));
+ double z_score = std::max(max_translation_z_score, max_rotation_z_score);
+ // Remove observations that vary significantly from the mean
+ if (z_score > absl::GetFlag(FLAGS_outlier_std_devs)) {
+ VLOG(1) << "Removing outlier with z_score " << z_score
+ << " relative to std dev = "
+ << absl::GetFlag(FLAGS_outlier_std_devs);
+ return true;
+ }
+ return false;
+ };
+ pose_list.erase(
+ std::remove_if(pose_list.begin(), pose_list.end(), remove_lambda),
+ pose_list.end());
+
+ VLOG(1) << "Iteration #" << i << ": removed "
+ << (original_size - pose_list.size())
+ << " outlier constraints out of " << original_size
+ << " total\nStd Dev's are: "
+ << translation_variance.array().sqrt().transpose() << "m and "
+ << rotation_variance.array().sqrt().transpose() * 180.0 / M_PI
+ << "deg";
+ if (original_size - pose_list.size() == 0) {
+ VLOG(1) << "At step " << i
+ << ", ending outlier rejection early due to convergence at "
+ << pose_list.size() << " elements.\nStd Dev's are: "
+ << translation_variance.array().sqrt().transpose() << "m and "
+ << rotation_variance.array().sqrt().transpose() * 180.0 / M_PI
+ << "deg";
+ break;
+ }
+ }
+}
+
+static std::map<std::string, aos::distributed_clock::time_point>
+ last_eofs_debug;
+int display_count = 1;
+
+// Take in list of poses from a camera observation and add to running list
+// One of two options:
+// 1) We see two boards in one view-- store this to get an estimate of
+// the offset between the two boards
+// 2) We see just one board-- save this and try to pair it with a previous
+// observation from another camera
void HandlePoses(cv::Mat rgb_image,
std::vector<TargetMapper::TargetPose> target_poses,
aos::distributed_clock::time_point distributed_eof,
std::string camera_name) {
- // This is used to transform points for visualization
+ // This (H_world_board) is used to transform points for visualization
// Assumes targets are aligned with x->right, y->up, z->out of board
Eigen::Affine3d H_world_board;
H_world_board = Eigen::Translation3d::Identity() *
@@ -229,6 +256,15 @@
// Store this observation of the transform between two boards
two_board_extrinsics_list.push_back(boardA_boardB);
+ // Also, store one of the observations, so we can potentially
+ // match against the next single-target observation that comes in
+ TimestampedCameraDetection new_observation{
+ .time = distributed_eof,
+ .H_camera_target = H_camera_boardA,
+ .camera_name = camera_name,
+ .board_id = target_poses[from_index].id};
+ last_observation = new_observation;
+
if (absl::GetFlag(FLAGS_visualize)) {
vis_robot_.DrawFrameAxes(
H_world_board,
@@ -252,8 +288,9 @@
.board_id = target_poses[0].id};
// Only take two observations if they're within half an image cycle of
- // each other (i.e., as close in time as possible) And, if two consecutive
- // observations are from the same camera, just replace with the newest one
+ // each other (i.e., as close in time as possible). And, if two
+ // consecutive observations are from the same camera, just replace with
+ // the newest one
if ((new_observation.camera_name != last_observation.camera_name) &&
(std::abs((distributed_eof - last_observation.time).count()) <
kImagePeriodMs / 2.0 * 1000000.0)) {
@@ -271,44 +308,51 @@
detection_list.push_back(new_pair);
// This bit is just for visualization and checking purposes-- use the
- // last two-board observation to figure out the current estimate
- // between the two cameras
+ // first two-board observation to figure out the current estimate
+ // between the two cameras (this could be incorrect, but it keeps it
+ // constant)
if (absl::GetFlag(FLAGS_visualize) &&
two_board_extrinsics_list.size() > 0) {
draw_vis = true;
- TimestampedCameraDetection &last_two_board_ext =
- two_board_extrinsics_list.back();
- Eigen::Affine3d &H_boardA_boardB = last_two_board_ext.H_camera_target;
- int boardA_boardB_id = last_two_board_ext.board_id;
+ TimestampedCameraDetection &first_two_board_ext =
+ two_board_extrinsics_list.front();
+ Eigen::Affine3d &H_boardA_boardB = first_two_board_ext.H_camera_target;
+ int boardA_id = first_two_board_ext.board_id;
TimestampedCameraDetection camera1_boardA = new_pair.first;
- TimestampedCameraDetection camera2_boardB = new_pair.second;
- // If camera1_boardA doesn't point to the first target, then swap
- // these two
- if (camera1_boardA.board_id != boardA_boardB_id) {
- camera1_boardA = new_pair.second;
- camera2_boardB = new_pair.first;
+ TimestampedCameraDetection camera2_boardA = new_pair.second;
+ Eigen::Affine3d H_camera1_boardA = camera1_boardA.H_camera_target;
+ Eigen::Affine3d H_camera2_boardA = camera2_boardA.H_camera_target;
+ // If camera1_boardA doesn't currently point to boardA, then fix it
+ if (camera1_boardA.board_id != boardA_id) {
+ H_camera1_boardA = H_camera1_boardA * H_boardA_boardB.inverse();
+ }
+ // If camera2_boardA doesn't currently point to boardA, then fix it
+ if (camera2_boardA.board_id != boardA_id) {
+ H_camera2_boardA = H_camera2_boardA * H_boardA_boardB.inverse();
}
VLOG(1) << "Camera " << camera1_boardA.camera_name << " seeing board "
<< camera1_boardA.board_id << " and camera "
- << camera2_boardB.camera_name << " seeing board "
- << camera2_boardB.board_id;
+ << camera2_boardA.camera_name << " seeing board "
+ << camera2_boardA.board_id;
- vis_robot_.DrawRobotOutline(
- H_world_board * camera1_boardA.H_camera_target.inverse(),
- camera1_boardA.camera_name,
- kOrinColors.at(camera1_boardA.camera_name));
- vis_robot_.DrawRobotOutline(
- H_world_board * H_boardA_boardB *
- camera2_boardB.H_camera_target.inverse(),
- camera2_boardB.camera_name,
- kOrinColors.at(camera2_boardB.camera_name));
+ // Draw the two poses of the cameras, and the locations of the
+ // boards We use "Board A" as the origin (with everything relative
+ // to H_world_board)
+ vis_robot_.DrawRobotOutline(H_world_board * H_camera1_boardA.inverse(),
+ camera1_boardA.camera_name,
+ kOrinColors.at(camera1_boardA.camera_name));
+ vis_robot_.DrawRobotOutline(H_world_board * H_camera2_boardA.inverse(),
+ camera2_boardA.camera_name,
+ kOrinColors.at(camera2_boardA.camera_name));
vis_robot_.DrawFrameAxes(
H_world_board,
- std::string("Board ") + std::to_string(last_two_board_ext.board_id),
+ std::string("Board ") +
+ std::to_string(first_two_board_ext.board_id),
cv::Scalar(0, 255, 0));
vis_robot_.DrawFrameAxes(H_world_board * H_boardA_boardB, "Board B",
cv::Scalar(255, 0, 0));
+
VLOG(1) << "Storing observation between " << new_pair.first.camera_name
<< ", target " << new_pair.first.board_id << " and "
<< new_pair.second.camera_name << ", target "
@@ -341,6 +385,10 @@
}
if (draw_vis) {
+ cv::putText(vis_robot_.image_,
+ "Display #" + std::to_string(display_count++),
+ cv::Point(600, 10), cv::FONT_HERSHEY_PLAIN, 1.0,
+ cv::Scalar(255, 255, 255));
cv::imshow("Overhead View", vis_robot_.image_);
cv::waitKey(absl::GetFlag(FLAGS_wait_key));
vis_robot_.ClearImage();
@@ -355,6 +403,14 @@
// Create empty RGB image in this case
cv::Mat rgb_image;
std::vector<TargetMapper::TargetPose> target_poses;
+ VLOG(1) << ": Diff since last image from " << camera_name << " is "
+ << (distributed_eof - last_eofs_debug.at(camera_name)).count() /
+ 1000000.0
+ << "ms";
+
+ if (last_eofs_debug.find(camera_name) == last_eofs_debug.end()) {
+ last_eofs_debug[camera_name] = distributed_eof;
+ }
for (const auto *target_pose_fbs : *map.target_poses()) {
// Skip detections with invalid ids
@@ -384,7 +440,7 @@
}
const TargetMapper::TargetPose target_pose =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+ TargetMapper::TargetPoseFromFbs(*target_pose_fbs);
target_poses.emplace_back(target_pose);
@@ -393,7 +449,13 @@
VLOG(1) << camera_name << " saw target " << target_pose.id
<< " from TargetMap at timestamp " << distributed_eof
<< " with pose = " << H_camera_target.matrix();
+ LOG(INFO) << "pose info for target " << target_pose_fbs->id()
+ << ": \nconfidence: " << target_pose_fbs->confidence()
+ << ", pose_error: " << target_pose_fbs->pose_error()
+ << ", pose_error_ratio: " << target_pose_fbs->pose_error_ratio()
+ << ", dist_factor: " << target_pose_fbs->distortion_factor();
}
+ last_eofs_debug[camera_name] = distributed_eof;
HandlePoses(rgb_image, target_poses, distributed_eof, camera_name);
}
@@ -432,12 +494,59 @@
HandlePoses(rgb_image, target_poses, distributed_eof, camera_name);
}
+void WriteExtrinsicFile(Eigen::Affine3d extrinsic, CameraNode camera_node,
+ const calibration::CameraCalibration *original_cal) {
+ // Write out this extrinsic to a file
+ flatbuffers::FlatBufferBuilder fbb;
+ flatbuffers::Offset<flatbuffers::Vector<float>> data_offset =
+ fbb.CreateVector(frc971::vision::MatrixToVector(extrinsic.matrix()));
+ calibration::TransformationMatrix::Builder matrix_builder(fbb);
+ matrix_builder.add_data(data_offset);
+ flatbuffers::Offset<calibration::TransformationMatrix>
+ extrinsic_calibration_offset = matrix_builder.Finish();
+
+ calibration::CameraCalibration::Builder calibration_builder(fbb);
+ calibration_builder.add_fixed_extrinsics(extrinsic_calibration_offset);
+ const aos::realtime_clock::time_point realtime_now =
+ aos::realtime_clock::now();
+ calibration_builder.add_calibration_timestamp(
+ realtime_now.time_since_epoch().count());
+ fbb.Finish(calibration_builder.Finish());
+ aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
+ solved_extrinsics = fbb.Release();
+
+ aos::FlatbufferDetachedBuffer<frc971::vision::calibration::CameraCalibration>
+ cal_copy = aos::RecursiveCopyFlatBuffer(original_cal);
+ cal_copy.mutable_message()->clear_fixed_extrinsics();
+ cal_copy.mutable_message()->clear_calibration_timestamp();
+ aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
+ merged_calibration = aos::MergeFlatBuffers(&cal_copy.message(),
+ &solved_extrinsics.message());
+
+ std::stringstream time_ss;
+ time_ss << realtime_now;
+
+ const std::string calibration_filename = frc971::vision::CalibrationFilename(
+ absl::GetFlag(FLAGS_output_folder), camera_node.node_name,
+ absl::GetFlag(FLAGS_team_number), camera_node.camera_number,
+ cal_copy.message().camera_id()->data(), time_ss.str());
+ LOG(INFO) << calibration_filename << " -> "
+ << aos::FlatbufferToJson(merged_calibration, {.multi_line = true});
+
+ aos::util::WriteStringToFileOrDie(
+ calibration_filename,
+ aos::FlatbufferToJson(merged_calibration, {.multi_line = true}));
+}
+
void ExtrinsicsMain(int argc, char *argv[]) {
vis_robot_ = VisualizeRobot(cv::Size(1000, 1000));
vis_robot_.ClearImage();
const double kFocalLength = 1000.0;
const int kImageWidth = 1000;
vis_robot_.SetDefaultViewpoint(kImageWidth, kFocalLength);
+ LOG(INFO) << "COPYTHIS, count, camera_name, target_id, timestamp, mag_T, "
+ "mag_R_deg, "
+ "confidence, pose_error, pose_error_ratio, distortion_factor";
std::optional<aos::FlatbufferDetachedBuffer<aos::Configuration>> config =
(absl::GetFlag(FLAGS_config).empty()
@@ -467,7 +576,6 @@
std::vector<std::unique_ptr<aos::EventLoop>> detection_event_loops;
std::vector<frc971::vision::CharucoExtractor *> charuco_extractors;
- std::vector<frc971::vision::ImageCallback *> image_callbacks;
std::vector<Eigen::Affine3d> default_extrinsics;
for (const CameraNode &camera_node : node_list) {
@@ -514,17 +622,50 @@
});
VLOG(1) << "Created watcher for using the detection event loop for "
<< camera_node.camera_name();
+
+ // Display images, if they exist
+ std::string camera_name = camera_node.camera_name();
+ detection_event_loops.back()->MakeWatcher(
+ camera_name.c_str(),
+ [camera_name](const frc971::vision::CameraImage &image) {
+ cv::Mat image_color_mat(cv::Size(image.cols(), image.rows()), CV_8UC2,
+ (void *)image.data()->data());
+ cv::Mat bgr_image(cv::Size(image.cols(), image.rows()), CV_8UC3);
+ cv::cvtColor(image_color_mat, bgr_image, cv::COLOR_YUV2BGR_YUYV);
+ cv::resize(bgr_image, bgr_image, cv::Size(500, 500));
+ cv::imshow(camera_name.c_str(), bgr_image);
+ cv::waitKey(1);
+ });
}
reader.event_loop_factory()->Run();
- // Do quick check to see what averaged two-board pose for
- // each camera is individually
CHECK_GT(two_board_extrinsics_list.size(), 0u)
<< "Must have at least one view of both boards";
int base_target_id = two_board_extrinsics_list[0].board_id;
VLOG(1) << "Base id for two_board_extrinsics_list is " << base_target_id;
+ int pre_outlier_num = two_board_extrinsics_list.size();
+ RemoveOutliers(two_board_extrinsics_list);
+
+ LOG(INFO) << "Started with " << pre_outlier_num
+ << " observations. After OUTLIER rejection, "
+ << two_board_extrinsics_list.size() << " observations remaining";
+ Eigen::Affine3d H_boardA_boardB_avg =
+ ComputeAveragePose(two_board_extrinsics_list);
+ LOG(INFO) << "Estimate of two board pose using all nodes with "
+ << two_board_extrinsics_list.size() << " observations is:\n"
+ << H_boardA_boardB_avg.matrix() << "\nOr translation of "
+ << H_boardA_boardB_avg.translation().transpose()
+ << " (m) and rotation (r,p,y) of "
+ << PoseUtils::RotationMatrixToEulerAngles(
+ H_boardA_boardB_avg.rotation().matrix())
+ .transpose() *
+ 180.0 / M_PI
+ << " (deg)";
+
+ // Do quick check to see what averaged two-board pose for
+ // each camera is individually, and compare with overall average
for (auto camera_node : node_list) {
std::vector<TimestampedCameraDetection> pose_list;
for (auto ext : two_board_extrinsics_list) {
@@ -534,32 +675,74 @@
pose_list.push_back(ext);
}
}
- Eigen::Affine3d avg_pose_from_camera = ComputeAveragePose(pose_list);
- VLOG(1) << "Estimate from " << camera_node.camera_name() << " with "
- << pose_list.size() << " observations is:\n"
- << avg_pose_from_camera.matrix();
+ CHECK(pose_list.size() > 0)
+ << "Didn't get any two_board extrinsics for camera "
+ << camera_node.camera_name();
+ Eigen::Vector3d translation_variance, rotation_variance;
+ Eigen::Affine3d avg_pose_from_camera = ComputeAveragePose(
+ pose_list, &translation_variance, &rotation_variance);
+
+ Eigen::Vector3d translation_std_dev = translation_variance.array().sqrt();
+ LOG(INFO) << camera_node.camera_name() << " has average pose from "
+ << pose_list.size() << " views of two targets of \n"
+ << avg_pose_from_camera.matrix()
+ << "\nTranslation standard deviation is "
+ << translation_std_dev.transpose();
+ double stdev_norm = translation_std_dev.norm();
+ double threshold = 0.03; // 3 cm threshold on translation variation
+ if (stdev_norm > threshold) {
+ LOG(INFO) << "WARNING: |STD_DEV| is " << stdev_norm * 100 << " > "
+ << threshold * 100 << " cm!!!!\nStd dev vector (in m) is "
+ << translation_std_dev.transpose();
+ }
+
+ Eigen::Vector3d rotation_std_dev = rotation_variance.array().sqrt();
+ LOG(INFO) << camera_node.camera_name()
+ << " with rotational standard deviation of: "
+ << rotation_std_dev.transpose() << " (radians)";
+ double rot_stdev_norm = rotation_std_dev.norm();
+ double rot_threshold = 3 * M_PI / 180.0; // Warn if more than 3 degrees
+ if (rot_stdev_norm > rot_threshold) {
+ LOG(INFO) << "WARNING: ROTATIONAL STD DEV is "
+ << rot_stdev_norm * 180.0 / M_PI << " > "
+ << rot_threshold * 180.0 / M_PI
+ << " degrees!!!!\nStd dev vector (in deg) is "
+ << (rotation_std_dev * 180.0 / M_PI).transpose();
+ }
+ // Check if a particular camera deviates significantly from the overall
+ // average Any of these factors could indicate a problem with that camera
+ Eigen::Affine3d delta_from_overall =
+ H_boardA_boardB_avg * avg_pose_from_camera.inverse();
+ LOG(INFO) << camera_node.camera_name()
+ << " had estimate different from pooled average of\n"
+ << "|dT| = " << delta_from_overall.translation().norm()
+ << "m and |dR| = "
+ << (PoseUtils::RotationMatrixToEulerAngles(
+ delta_from_overall.rotation().matrix()) *
+ 180.0 / M_PI)
+ .norm()
+ << " deg";
}
- Eigen::Affine3d H_boardA_boardB_avg =
- ComputeAveragePose(two_board_extrinsics_list);
- // TODO: Should probably do some outlier rejection
- VLOG(1) << "Estimate of two board pose using all nodes with "
- << two_board_extrinsics_list.size() << " observations is:\n"
- << H_boardA_boardB_avg.matrix() << "\n";
// Next, compute the relative camera poses
LOG(INFO) << "Got " << detection_list.size() << " extrinsic observations";
- std::vector<Eigen::Affine3d> H_camera1_camera2_list;
+ std::vector<TimestampedCameraDetection> H_camera1_camera2_list;
std::vector<Eigen::Affine3d> updated_extrinsics;
// Use the first node's extrinsics as our base, and fix from there
updated_extrinsics.push_back(default_extrinsics[0]);
LOG(INFO) << "Default extrinsic for camera " << node_list.at(0).camera_name()
- << " is " << default_extrinsics[0].matrix();
+ << " is\n"
+ << default_extrinsics[0].matrix();
for (uint i = 0; i < node_list.size() - 1; i++) {
H_camera1_camera2_list.clear();
// Go through the list, and find successive pairs of
- // cameras We'll be calculating and writing the second
- // of the pair (the i+1'th camera)
+ // cameras. We'll be calculating and writing the second
+ // of the pair's (the i+1'th camera) extrinsics
for (auto [pose1, pose2] : detection_list) {
+ // Note that this assumes our poses are ordered by the node_list
+ CHECK(!((pose1.camera_name == node_list.at(i + 1).camera_name()) &&
+ (pose2.camera_name == node_list.at(i).camera_name())))
+ << "The camera ordering on our detections is incorrect!";
if ((pose1.camera_name == node_list.at(i).camera_name()) &&
(pose2.camera_name == node_list.at(i + 1).camera_name())) {
Eigen::Affine3d H_camera1_boardA = pose1.H_camera_target;
@@ -584,7 +767,12 @@
// Compute camera1->camera2 map
Eigen::Affine3d H_camera1_camera2 =
H_camera1_boardA * H_camera2_boardA.inverse();
- H_camera1_camera2_list.push_back(H_camera1_camera2);
+ // Set the list up as TimestampedCameraDetection's to use the outlier
+ // removal function
+ TimestampedCameraDetection camera1_camera2{
+ .H_camera_target = H_camera1_camera2,
+ .camera_name = node_list.at(i + 1).camera_name()};
+ H_camera1_camera2_list.push_back(camera1_camera2);
VLOG(1) << "Map from camera " << pose1.camera_name << " and tag "
<< pose1.board_id << " with observation: \n"
<< pose1.H_camera_target.matrix() << "\n to camera "
@@ -619,6 +807,17 @@
<< H_camera1_camera2_list.size()
<< " observations, and the average pose is:\n"
<< H_camera1_camera2_avg.matrix();
+
+ RemoveOutliers(H_camera1_camera2_list);
+
+ H_camera1_camera2_avg = ComputeAveragePose(H_camera1_camera2_list);
+ LOG(INFO) << "After outlier rejection, from "
+ << node_list.at(i).camera_name() << " to "
+ << node_list.at(i + 1).camera_name() << " found "
+ << H_camera1_camera2_list.size()
+ << " observations, and the average pose is:\n"
+ << H_camera1_camera2_avg.matrix();
+
Eigen::Affine3d H_camera1_camera2_default =
default_extrinsics[i].inverse() * default_extrinsics[i + 1];
LOG(INFO) << "Compare this to that from default values:\n"
@@ -634,6 +833,7 @@
<< Eigen::AngleAxisd(H_camera1_camera2_diff.rotation()).angle() *
180.0 / M_PI
<< " degrees)";
+
// Next extrinsic is just previous one * avg_delta_pose
Eigen::Affine3d next_extrinsic =
updated_extrinsics.back() * H_camera1_camera2_avg;
@@ -645,51 +845,8 @@
<< node_list.at(i + 1).camera_name() << " is \n"
<< next_extrinsic.matrix();
- // Write out this extrinsic to a file
- flatbuffers::FlatBufferBuilder fbb;
- flatbuffers::Offset<flatbuffers::Vector<float>> data_offset =
- fbb.CreateVector(
- frc971::vision::MatrixToVector(next_extrinsic.matrix()));
- calibration::TransformationMatrix::Builder matrix_builder(fbb);
- matrix_builder.add_data(data_offset);
- flatbuffers::Offset<calibration::TransformationMatrix>
- extrinsic_calibration_offset = matrix_builder.Finish();
-
- calibration::CameraCalibration::Builder calibration_builder(fbb);
- calibration_builder.add_fixed_extrinsics(extrinsic_calibration_offset);
- const aos::realtime_clock::time_point realtime_now =
- aos::realtime_clock::now();
- calibration_builder.add_calibration_timestamp(
- realtime_now.time_since_epoch().count());
- fbb.Finish(calibration_builder.Finish());
- aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
- solved_extrinsics = fbb.Release();
-
- aos::FlatbufferDetachedBuffer<
- frc971::vision::calibration::CameraCalibration>
- cal_copy = aos::RecursiveCopyFlatBuffer(calibration_list[i + 1]);
- cal_copy.mutable_message()->clear_fixed_extrinsics();
- cal_copy.mutable_message()->clear_calibration_timestamp();
- aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
- merged_calibration = aos::MergeFlatBuffers(
- &cal_copy.message(), &solved_extrinsics.message());
-
- std::stringstream time_ss;
- time_ss << realtime_now;
-
- CameraNode &camera_node = node_list[i + 1];
- const std::string calibration_filename =
- frc971::vision::CalibrationFilename(
- absl::GetFlag(FLAGS_output_folder), camera_node.node_name,
- absl::GetFlag(FLAGS_team_number), camera_node.camera_number,
- cal_copy.message().camera_id()->data(), time_ss.str());
- LOG(INFO) << calibration_filename << " -> "
- << aos::FlatbufferToJson(merged_calibration,
- {.multi_line = true});
-
- aos::util::WriteStringToFileOrDie(
- calibration_filename,
- aos::FlatbufferToJson(merged_calibration, {.multi_line = true}));
+ WriteExtrinsicFile(next_extrinsic, node_list[i + 1],
+ calibration_list[i + 1]);
if (absl::GetFlag(FLAGS_visualize)) {
// Draw each of the updated extrinsic camera locations
@@ -712,9 +869,8 @@
}
// Cleanup
- for (uint i = 0; i < image_callbacks.size(); i++) {
+ for (uint i = 0; i < charuco_extractors.size(); i++) {
delete charuco_extractors[i];
- delete image_callbacks[i];
}
}
} // namespace y2024::vision
diff --git a/y2024/vision/target_mapping.cc b/y2024/vision/target_mapping.cc
index f5fa34c..d9b067a 100644
--- a/y2024/vision/target_mapping.cc
+++ b/y2024/vision/target_mapping.cc
@@ -266,7 +266,7 @@
}
const TargetMapper::TargetPose target_pose =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+ TargetMapper::TargetPoseFromFbs(*target_pose_fbs);
Eigen::Affine3d H_camera_target =
Eigen::Translation3d(target_pose.pose.p) * target_pose.pose.q;
diff --git a/y2024_swerve/vision/target_mapping.cc b/y2024_swerve/vision/target_mapping.cc
index b8b1f62..dd33f11 100644
--- a/y2024_swerve/vision/target_mapping.cc
+++ b/y2024_swerve/vision/target_mapping.cc
@@ -268,7 +268,7 @@
}
const TargetMapper::TargetPose target_pose =
- PoseUtils::TargetPoseFromFbs(*target_pose_fbs);
+ TargetMapper::TargetPoseFromFbs(*target_pose_fbs);
Eigen::Affine3d H_camera_target =
Eigen::Translation3d(target_pose.pose.p) * target_pose.pose.q;