Scale vision uncertainty by distance for mapping
Uncertainty should be based on distance since we get worse detections
farther out. Also, when we eventually do live localization based on the april tags,
we'll need to weigh them based on a distance-based uncertainty if we see
more than one at the same time.
Open to suggestions for a more principled distanced-based uncertainty.
Signed-off-by: Milind Upadhyay <milind.upadhyay@gmail.com>
Change-Id: I2e5b4e9e5de493a7dfb2bcaac739ab62f2ca65a2
diff --git a/y2022/vision/target_mapping.cc b/y2022/vision/target_mapping.cc
index 112696e..090ac60 100644
--- a/y2022/vision/target_mapping.cc
+++ b/y2022/vision/target_mapping.cc
@@ -50,21 +50,13 @@
}
// Change reference frame from camera to robot
-Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camcv_target,
+Eigen::Affine3d CameraToRobotDetection(Eigen::Affine3d H_camrob_target,
Eigen::Affine3d fixed_extrinsics,
Eigen::Affine3d turret_extrinsics,
double turret_position) {
- // With X, Y, Z being robot axes and x, y, z being camera axes,
- // x = -Y, y = -Z, z = X
- const Eigen::Affine3d H_camcv_camrob =
- Eigen::Affine3d((Eigen::Matrix4d() << 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, -1.0,
- 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)
- .finished());
-
const Eigen::Affine3d H_robot_camrob =
CameraTransform(fixed_extrinsics, turret_extrinsics, turret_position);
- const Eigen::Affine3d H_robot_target =
- H_robot_camrob * H_camcv_camrob.inverse() * H_camcv_target;
+ const Eigen::Affine3d H_robot_target = H_robot_camrob * H_camrob_target;
return H_robot_target;
}
@@ -94,15 +86,29 @@
Eigen::AngleAxisd r_angle = Eigen::AngleAxisd(
rvecs_eigen[tag].norm(), rvecs_eigen[tag] / rvecs_eigen[tag].norm());
CHECK(rvecs_eigen[tag].norm() != 0) << "rvecs norm = 0; divide by 0";
- Eigen::Affine3d H_camcv_target = T_camera_target * r_angle;
+ Eigen::Affine3d H_camcv_target = T_camera_target * r_angle;
+ // With X, Y, Z being robot axes and x, y, z being camera axes,
+ // x = -Y, y = -Z, z = X
+ static const Eigen::Affine3d H_camcv_camrob =
+ Eigen::Affine3d((Eigen::Matrix4d() << 0.0, -1.0, 0.0, 0.0, 0.0, 0.0,
+ -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)
+ .finished());
+ Eigen::Affine3d H_camrob_target = H_camcv_camrob.inverse() * H_camcv_target;
Eigen::Affine3d H_robot_target = CameraToRobotDetection(
- H_camcv_target, fixed_extrinsics, turret_extrinsics, turret_position);
+ H_camrob_target, fixed_extrinsics, turret_extrinsics, turret_position);
+
+ ceres::examples::Pose2d target_pose_camera =
+ PoseUtils::Affine3dToPose2d(H_camrob_target);
+ double distance_from_camera = std::sqrt(std::pow(target_pose_camera.x, 2) +
+ std::pow(target_pose_camera.y, 2));
timestamped_target_detections->emplace_back(
- DataAdapter::TimestampedDetection{.time = pi_distributed_time,
- .H_robot_target = H_robot_target,
- .id = april_ids[tag][0]});
+ DataAdapter::TimestampedDetection{
+ .time = pi_distributed_time,
+ .H_robot_target = H_robot_target,
+ .distance_from_camera = distance_from_camera,
+ .id = april_ids[tag][0]});
}
}