Scale vision uncertainty by distance for mapping
Uncertainty should be based on distance since we get worse detections
farther out. Also, when we eventually do live localization based on the april tags,
we'll need to weigh them based on a distance-based uncertainty if we see
more than one at the same time.
Open to suggestions for a more principled distanced-based uncertainty.
Signed-off-by: Milind Upadhyay <milind.upadhyay@gmail.com>
Change-Id: I2e5b4e9e5de493a7dfb2bcaac739ab62f2ca65a2
diff --git a/frc971/vision/target_mapper.cc b/frc971/vision/target_mapper.cc
index ab4ee6d..9222039 100644
--- a/frc971/vision/target_mapper.cc
+++ b/frc971/vision/target_mapper.cc
@@ -149,7 +149,9 @@
auto robot_pose = interpolated_poses[it->time];
auto robot_delta_pose =
PoseUtils::ComputeRelativePose(last_robot_pose, robot_pose);
- auto confidence = ComputeConfidence(last_detection.time, it->time);
+ auto confidence = ComputeConfidence(last_detection.time, it->time,
+ last_detection.distance_from_camera,
+ it->distance_from_camera);
target_constraints.emplace_back(ComputeTargetConstraint(
last_detection, PoseUtils::Pose2dToAffine3d(robot_delta_pose), *it,
@@ -188,9 +190,9 @@
continue;
}
- // TODO(milind): better way to compute confidence since these detections are
- // likely very close in time together
- auto confidence = ComputeConfidence(last_detection.time, it->time);
+ auto confidence = ComputeConfidence(last_detection.time, it->time,
+ last_detection.distance_from_camera,
+ it->distance_from_camera);
target_constraints.emplace_back(
ComputeTargetConstraint(last_detection, *it, confidence));
}
@@ -200,7 +202,8 @@
Eigen::Matrix3d DataAdapter::ComputeConfidence(
aos::distributed_clock::time_point start,
- aos::distributed_clock::time_point end) {
+ aos::distributed_clock::time_point end, double distance_from_camera_start,
+ double distance_from_camera_end) {
constexpr size_t kX = 0;
constexpr size_t kY = 1;
constexpr size_t kTheta = 2;
@@ -221,14 +224,17 @@
}
{
- // Noise for vision-based target localizations
+ // Noise for vision-based target localizations. Multiplying this matrix by
+ // the distance from camera to target squared results in the uncertainty in
+ // that measurement
Eigen::Matrix3d Q_vision = Eigen::Matrix3d::Zero();
- Q_vision(kX, kX) = std::pow(0.09, 2);
- Q_vision(kY, kY) = std::pow(0.09, 2);
+ Q_vision(kX, kX) = std::pow(0.045, 2);
+ Q_vision(kY, kY) = std::pow(0.045, 2);
Q_vision(kTheta, kTheta) = std::pow(0.02, 2);
// Add uncertainty for the 2 vision measurements (1 at start and 1 at end)
- P += 2.0 * Q_vision;
+ P += Q_vision * std::pow(distance_from_camera_start, 2);
+ P += Q_vision * std::pow(distance_from_camera_end, 2);
}
return P.inverse();
diff --git a/frc971/vision/target_mapper.h b/frc971/vision/target_mapper.h
index 16a8343..edf2260 100644
--- a/frc971/vision/target_mapper.h
+++ b/frc971/vision/target_mapper.h
@@ -101,6 +101,9 @@
aos::distributed_clock::time_point time;
// Pose of target relative to robot
Eigen::Affine3d H_robot_target;
+ // Horizontal distance from camera to target, used for confidence
+ // calculation
+ double distance_from_camera;
TargetMapper::TargetId id;
};
@@ -131,7 +134,8 @@
// this matrix the "information"
static Eigen::Matrix3d ComputeConfidence(
aos::distributed_clock::time_point start,
- aos::distributed_clock::time_point end);
+ aos::distributed_clock::time_point end, double distance_from_camera_start,
+ double distance_from_camera_end);
private:
static ceres::examples::Pose2d InterpolatePose(
diff --git a/frc971/vision/target_mapper_test.cc b/frc971/vision/target_mapper_test.cc
index 6b48c81..ab06c5b 100644
--- a/frc971/vision/target_mapper_test.cc
+++ b/frc971/vision/target_mapper_test.cc
@@ -69,6 +69,16 @@
return ceres::examples::Pose2d{x, y, yaw_radians};
}
+// Assumes camera and robot origin are the same
+DataAdapter::TimestampedDetection MakeTimestampedDetection(
+ aos::distributed_clock::time_point time, Eigen::Affine3d H_robot_target,
+ TargetMapper::TargetId id) {
+ auto target_pose = PoseUtils::Affine3dToPose2d(H_robot_target);
+ return DataAdapter::TimestampedDetection{
+ time, H_robot_target,
+ std::sqrt(std::pow(target_pose.x, 2) + std::pow(target_pose.y, 2)), id};
+}
+
bool TargetIsInView(TargetMapper::TargetPose target_detection) {
// And check if it is within the fov of the robot /
// camera, assuming camera is pointing in the
@@ -94,6 +104,64 @@
} // namespace
+TEST(DataAdapterTest, ComputeConfidence) {
+ // Check the confidence matrices. Don't check the actual values
+ // in case the constants change, just check that the confidence of contraints
+ // decreases as time period or distances from camera increase.
+ {
+ // Vary time period
+ constexpr double kDistanceStart = 0.5;
+ constexpr double kDistanceEnd = 2.0;
+
+ Eigen::Matrix3d last_confidence = Eigen::Matrix3d::Zero();
+ for (size_t dt = 0; dt < 15; dt++) {
+ Eigen::Matrix3d confidence = DataAdapter::ComputeConfidence(
+ TimeInMs(0), TimeInMs(dt), kDistanceStart, kDistanceEnd);
+
+ if (dt != 0) {
+ // Confidence only decreases every 5ms (control loop period)
+ if (dt % 5 == 0) {
+ EXPECT_CONFIDENCE_GT(last_confidence, confidence);
+ } else {
+ EXPECT_EQ(last_confidence, confidence);
+ }
+ }
+ last_confidence = confidence;
+ }
+ }
+
+ {
+ // Vary distance at start
+ constexpr int kDt = 3;
+ constexpr double kDistanceEnd = 1.5;
+ Eigen::Matrix3d last_confidence = Eigen::Matrix3d::Zero();
+ for (double distance_start = 0.0; distance_start < 3.0;
+ distance_start += 0.5) {
+ Eigen::Matrix3d confidence = DataAdapter::ComputeConfidence(
+ TimeInMs(0), TimeInMs(kDt), distance_start, kDistanceEnd);
+ if (distance_start != 0.0) {
+ EXPECT_CONFIDENCE_GT(last_confidence, confidence);
+ }
+ last_confidence = confidence;
+ }
+ }
+
+ {
+ // Vary distance at end
+ constexpr int kDt = 2;
+ constexpr double kDistanceStart = 2.5;
+ Eigen::Matrix3d last_confidence = Eigen::Matrix3d::Zero();
+ for (double distance_end = 0.0; distance_end < 3.0; distance_end += 0.5) {
+ Eigen::Matrix3d confidence = DataAdapter::ComputeConfidence(
+ TimeInMs(0), TimeInMs(kDt), kDistanceStart, distance_end);
+ if (distance_end != 0.0) {
+ EXPECT_CONFIDENCE_GT(last_confidence, confidence);
+ }
+ last_confidence = confidence;
+ }
+ }
+}
+
TEST(DataAdapterTest, MatchTargetDetections) {
std::vector<DataAdapter::TimestampedPose> timestamped_robot_poses = {
{TimeInMs(0), ceres::examples::Pose2d{1.0, 2.0, 0.0}},
@@ -107,22 +175,22 @@
std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections =
{{TimeInMs(5),
PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, 0.0}),
- 0},
+ 1.0, 0},
{TimeInMs(9),
PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, 0.0}),
- 1},
+ 1.0, 1},
{TimeInMs(9),
PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, 0.0}),
- 2},
+ 1.0, 2},
{TimeInMs(15),
PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, 0.0}),
- 0},
+ 1.0, 0},
{TimeInMs(16),
PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, 0.0}),
- 2},
+ 1.0, 2},
{TimeInMs(27),
PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, 0.0}),
- 1}};
+ 1.0, 1}};
auto [target_constraints, robot_delta_poses] =
DataAdapter::MatchTargetDetections(timestamped_robot_poses,
timestamped_target_detections);
@@ -166,55 +234,30 @@
PoseUtils::ComputeOffsetPose(computed_robot_pose, robot_delta_poses[4]);
EXPECT_POSE_IN_RANGE(computed_robot_pose, timestamped_robot_poses[5].pose,
timestamped_robot_poses[6].pose);
-
- // Check the confidence matrices. Don't check the actual values
- // in case the constants change, just check the confidence of contraints
- // relative to each other, as constraints over longer time periods should have
- // lower confidence.
- const auto confidence_0ms =
- DataAdapter::ComputeConfidence(TimeInMs(0), TimeInMs(0));
- const auto confidence_1ms =
- DataAdapter::ComputeConfidence(TimeInMs(0), TimeInMs(1));
- const auto confidence_4ms =
- DataAdapter::ComputeConfidence(TimeInMs(0), TimeInMs(4));
- const auto confidence_6ms =
- DataAdapter::ComputeConfidence(TimeInMs(0), TimeInMs(6));
- const auto confidence_11ms =
- DataAdapter::ComputeConfidence(TimeInMs(0), TimeInMs(11));
-
- // Check relative magnitude of different confidences.
- // Confidences for 0-5ms, 5-10ms, and 10-15ms periods are equal
- // because they fit within one control loop iteration.
- EXPECT_EQ(confidence_0ms, confidence_1ms);
- EXPECT_EQ(confidence_1ms, confidence_4ms);
- EXPECT_CONFIDENCE_GT(confidence_4ms, confidence_6ms);
- EXPECT_CONFIDENCE_GT(confidence_6ms, confidence_11ms);
-
- // Check that confidences (information) of actual constraints are correct
- EXPECT_EQ(target_constraints[0].information, confidence_4ms);
- EXPECT_EQ(target_constraints[1].information, confidence_0ms);
- EXPECT_EQ(target_constraints[2].information, confidence_6ms);
- EXPECT_EQ(target_constraints[3].information, confidence_1ms);
- EXPECT_EQ(target_constraints[4].information, confidence_11ms);
}
TEST(DataAdapterTest, MatchTargetDetectionsWithoutRobotPosition) {
std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections =
- {{TimeInMs(5),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -5.0, 0.0}),
- 2},
- {TimeInMs(6),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -4.0, M_PI}),
- 0},
- {TimeInMs(10),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{3.0, -3.0, M_PI}),
- 1},
- {TimeInMs(13),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{4.0, -7.0, M_PI_2}),
- 2},
- {TimeInMs(14),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{4.0, -4.0, M_PI_2}),
- 2}};
+ {MakeTimestampedDetection(
+ TimeInMs(5),
+ PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{5.0, -5.0, 0.0}),
+ 2),
+ MakeTimestampedDetection(TimeInMs(6),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{5.0, -4.0, M_PI}),
+ 0),
+ MakeTimestampedDetection(TimeInMs(10),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{3.0, -3.0, M_PI}),
+ 1),
+ MakeTimestampedDetection(TimeInMs(13),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{4.0, -7.0, M_PI_2}),
+ 2),
+ MakeTimestampedDetection(TimeInMs(14),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{4.0, -4.0, M_PI_2}),
+ 2)};
constexpr auto kMaxDt = std::chrono::milliseconds(3);
auto target_constraints =
@@ -252,12 +295,14 @@
{TimeInMs(10), ceres::examples::Pose2d{-1.0, 0.0, 0.0}},
{TimeInMs(15), ceres::examples::Pose2d{-1.0, 0.0, 0.0}}};
std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections =
- {{TimeInMs(5),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{3.0, 0.0, M_PI}),
- 0},
- {TimeInMs(10),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{-4.0, 0.0, 0.0}),
- 1}};
+ {MakeTimestampedDetection(
+ TimeInMs(5),
+ PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{3.0, 0.0, M_PI}),
+ 0),
+ MakeTimestampedDetection(
+ TimeInMs(10),
+ PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{-4.0, 0.0, 0.0}),
+ 1)};
auto target_constraints =
DataAdapter::MatchTargetDetections(timestamped_robot_poses,
timestamped_target_detections)
@@ -282,16 +327,19 @@
{TimeInMs(15), ceres::examples::Pose2d{4.0, 0.0, 0.0}},
{TimeInMs(20), ceres::examples::Pose2d{-1.0, 0.0, 0.0}}};
std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections =
- {{TimeInMs(5),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{6.0, 0.0, M_PI}),
- 0},
- {TimeInMs(10),
- PoseUtils::Pose2dToAffine3d(
- ceres::examples::Pose2d{-8.0, 0.0, -M_PI_2}),
- 1},
- {TimeInMs(15),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{1.0, 0.0, M_PI}),
- 0}};
+ {MakeTimestampedDetection(
+ TimeInMs(5),
+ PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{6.0, 0.0, M_PI}),
+ 0),
+ MakeTimestampedDetection(
+ TimeInMs(10),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{-8.0, 0.0, -M_PI_2}),
+ 1),
+ MakeTimestampedDetection(
+ TimeInMs(15),
+ PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{1.0, 0.0, M_PI}),
+ 0)};
auto target_constraints =
DataAdapter::MatchTargetDetections(timestamped_robot_poses,
timestamped_target_detections)
@@ -315,13 +363,15 @@
{TimeInMs(10), ceres::examples::Pose2d{-1.0, 0.0, 0.0}},
{TimeInMs(15), ceres::examples::Pose2d{-1.01, -0.01, 0.004}}};
std::vector<DataAdapter::TimestampedDetection> timestamped_target_detections =
- {{TimeInMs(5),
- PoseUtils::Pose2dToAffine3d(
- ceres::examples::Pose2d{3.01, 0.001, M_PI - 0.001}),
- 0},
- {TimeInMs(10),
- PoseUtils::Pose2dToAffine3d(ceres::examples::Pose2d{-4.01, 0.0, 0.0}),
- 1}};
+ {MakeTimestampedDetection(
+ TimeInMs(5),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{3.01, 0.001, M_PI - 0.001}),
+ 0),
+ MakeTimestampedDetection(TimeInMs(10),
+ PoseUtils::Pose2dToAffine3d(
+ ceres::examples::Pose2d{-4.01, 0.0, 0.0}),
+ 1)};
auto target_constraints =
DataAdapter::MatchTargetDetections(timestamped_robot_poses,
@@ -396,12 +446,9 @@
aos::distributed_clock::time_point(std::chrono::milliseconds(t));
timestamped_robot_poses.emplace_back(DataAdapter::TimestampedPose{
.time = time_point, .pose = robot_pose});
- timestamped_target_detections.emplace_back(
- DataAdapter::TimestampedDetection{
- .time = time_point,
- .H_robot_target =
- PoseUtils::Pose2dToAffine3d(target_detection.pose),
- .id = target_detection.id});
+ timestamped_target_detections.emplace_back(MakeTimestampedDetection(
+ time_point, PoseUtils::Pose2dToAffine3d(target_detection.pose),
+ target_detection.id));
}
}
}
diff --git a/y2022/vision/target_mapping.cc b/y2022/vision/target_mapping.cc
index 112696e..090ac60 100644
--- a/y2022/vision/target_mapping.cc
+++ b/y2022/vision/target_mapping.cc
@@ -50,21 +50,13 @@
}
// Change reference frame from camera to robot
-Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camcv_target,
+Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camrob_target,
Eigen::Affine3d fixed_extrinsics,
Eigen::Affine3d turret_extrinsics,
double turret_position) {
- // With X, Y, Z being robot axes and x, y, z being camera axes,
- // x = -Y, y = -Z, z = X
- const Eigen::Affine3d H_camcv_camrob =
- Eigen::Affine3d((Eigen::Matrix4d() << 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, -1.0,
- 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)
- .finished());
-
const Eigen::Affine3d H_robot_camrob =
CameraTransform(fixed_extrinsics, turret_extrinsics, turret_position);
- const Eigen::Affine3d H_robot_target =
- H_robot_camrob * H_camcv_camrob.inverse() * H_camcv_target;
+ const Eigen::Affine3d H_robot_target = H_robot_camrob * H_camrob_target;
return H_robot_target;
}
@@ -94,15 +86,29 @@
Eigen::AngleAxisd r_angle = Eigen::AngleAxisd(
rvecs_eigen[tag].norm(), rvecs_eigen[tag] / rvecs_eigen[tag].norm());
CHECK(rvecs_eigen[tag].norm() != 0) << "rvecs norm = 0; divide by 0";
- Eigen::Affine3d H_camcv_target = T_camera_target * r_angle;
+ Eigen::Affine3d H_camcv_target = T_camera_target * r_angle;
+ // With X, Y, Z being robot axes and x, y, z being camera axes,
+ // x = -Y, y = -Z, z = X
+ static const Eigen::Affine3d H_camcv_camrob =
+ Eigen::Affine3d((Eigen::Matrix4d() << 0.0, -1.0, 0.0, 0.0, 0.0, 0.0,
+ -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)
+ .finished());
+ Eigen::Affine3d H_camrob_target = H_camcv_camrob.inverse() * H_camcv_target;
Eigen::Affine3d H_robot_target = CameraToRobotDetection(
- H_camcv_target, fixed_extrinsics, turret_extrinsics, turret_position);
+ H_camrob_target, fixed_extrinsics, turret_extrinsics, turret_position);
+
+ ceres::examples::Pose2d target_pose_camera =
+ PoseUtils::Affine3dToPose2d(H_camrob_target);
+ double distance_from_camera = std::sqrt(std::pow(target_pose_camera.x, 2) +
+ std::pow(target_pose_camera.y, 2));
timestamped_target_detections->emplace_back(
- DataAdapter::TimestampedDetection{.time = pi_distributed_time,
- .H_robot_target = H_robot_target,
- .id = april_ids[tag][0]});
+ DataAdapter::TimestampedDetection{
+ .time = pi_distributed_time,
+ .H_robot_target = H_robot_target,
+ .distance_from_camera = distance_from_camera,
+ .id = april_ids[tag][0]});
}
}