Seed target map with fixed constraints
That way, we can take blue side logs without having to changed the fixed
ids. And this provides a base for the solver.
Signed-off-by: milind-u <milind.upadhyay@gmail.com>
Change-Id: Ib022f351723230cb96162a2c96530d20c695fc9b
diff --git a/frc971/vision/target_mapper.cc b/frc971/vision/target_mapper.cc
index ddeba10..3eba919 100644
--- a/frc971/vision/target_mapper.cc
+++ b/frc971/vision/target_mapper.cc
@@ -174,12 +174,14 @@
Q_vision(kOrientation3, kOrientation3) = std::pow(0.02, 2);
// Add uncertainty for the 2 vision measurements (1 at start and 1 at end)
- P += Q_vision * std::pow(detection_start.distance_from_camera, 2) *
- (1.0 +
- FLAGS_distortion_noise_scalar * detection_start.distortion_factor);
- P +=
- Q_vision * std::pow(detection_end.distance_from_camera, 2) *
- (1.0 + FLAGS_distortion_noise_scalar * detection_end.distortion_factor);
+ P += Q_vision * std::pow(detection_start.distance_from_camera *
+ (1.0 + FLAGS_distortion_noise_scalar *
+ detection_start.distortion_factor),
+ 2);
+ P += Q_vision * std::pow(detection_end.distance_from_camera *
+ (1.0 + FLAGS_distortion_noise_scalar *
+ detection_end.distortion_factor),
+ 2);
}
return P.inverse();
@@ -364,8 +366,10 @@
{.multi_line = true});
}
+} // namespace frc971::vision
+
std::ostream &operator<<(std::ostream &os, ceres::examples::Pose3d pose) {
- auto rpy = PoseUtils::QuaternionToEulerAngles(pose.q);
+ auto rpy = frc971::vision::PoseUtils::QuaternionToEulerAngles(pose.q);
os << absl::StrFormat(
"{x: %.3f, y: %.3f, z: %.3f, roll: %.3f, pitch: "
"%.3f, yaw: %.3f}",
@@ -380,5 +384,3 @@
<< constraint.t_be << "}";
return os;
}
-
-} // namespace frc971::vision
diff --git a/frc971/vision/target_mapper.h b/frc971/vision/target_mapper.h
index f5d1aef..c782992 100644
--- a/frc971/vision/target_mapper.h
+++ b/frc971/vision/target_mapper.h
@@ -136,7 +136,6 @@
const TimestampedDetection &detection_start,
const TimestampedDetection &detection_end);
- private:
// Computes the constraint between the start and end pose of the targets: the
// relative pose between the start and end target locations in the frame of
// the start target.
@@ -146,10 +145,10 @@
const TargetMapper::ConfidenceMatrix &confidence);
};
+} // namespace frc971::vision
+
std::ostream &operator<<(std::ostream &os, ceres::examples::Pose3d pose);
std::ostream &operator<<(std::ostream &os,
ceres::examples::Constraint3d constraint);
-} // namespace frc971::vision
-
#endif // FRC971_VISION_TARGET_MAPPER_H_
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index 45ab7db..f2e2592 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -33,12 +33,13 @@
flip_image_(flip_image),
node_name_(event_loop->node()->name()->string_view()),
ftrace_(),
- image_callback_(event_loop, channel_name,
- [&](cv::Mat image_color_mat,
- const aos::monotonic_clock::time_point eof) {
- HandleImage(image_color_mat, eof);
- },
- chrono::milliseconds(5)),
+ image_callback_(
+ event_loop, channel_name,
+ [&](cv::Mat image_color_mat,
+ const aos::monotonic_clock::time_point eof) {
+ HandleImage(image_color_mat, eof);
+ },
+ chrono::milliseconds(5)),
target_map_sender_(
event_loop->MakeSender<frc971::vision::TargetMap>("/camera")),
image_annotations_sender_(
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index 3f5355a..ba5f5d9 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -47,6 +47,9 @@
DEFINE_uint64(skip_to, 1,
"Start at combined image of this number (1 is the first image)");
DEFINE_bool(solve, true, "Whether to solve for the field's target map.");
+DEFINE_string(dump_constraints_to, "/tmp/constraints.txt",
+ "Write the target constraints to this path");
+DECLARE_uint64(frozen_target_id);
namespace y2023 {
namespace vision {
@@ -58,6 +61,9 @@
using frc971::vision::VisualizeRobot;
namespace calibration = frc971::vision::calibration;
+constexpr TargetMapper::TargetId kMinTargetId = 1;
+constexpr TargetMapper::TargetId kMaxTargetId = 8;
+
// Class to handle reading target poses from a replayed log,
// displaying various debug info, and passing the poses to
// frc971::vision::TargetMapper for field mapping.
@@ -222,6 +228,14 @@
label << node_name << " - ";
for (const auto *target_pose_fbs : *map.target_poses()) {
+ // Skip detections with invalid ids
+ if (target_pose_fbs->id() < kMinTargetId ||
+ target_pose_fbs->id() > kMaxTargetId) {
+ LOG(WARNING) << "Skipping tag with invalid id of "
+ << target_pose_fbs->id();
+ continue;
+ }
+
// Skip detections with high pose errors
if (target_pose_fbs->pose_error() > FLAGS_max_pose_error) {
VLOG(1) << " Skipping tag " << target_pose_fbs->id()
@@ -368,6 +382,63 @@
auto target_constraints =
DataAdapter::MatchTargetDetections(timestamped_target_detections_);
+ // Remove constraints between the two sides of the field - these are
+ // basically garbage because of how far the camera is. We will use seeding
+ // below to connect the two sides
+ target_constraints.erase(
+ std::remove_if(target_constraints.begin(), target_constraints.end(),
+ [](const auto &constraint) {
+ constexpr TargetMapper::TargetId kMaxRedId = 4;
+ TargetMapper::TargetId min_id =
+ std::min(constraint.id_begin, constraint.id_end);
+ TargetMapper::TargetId max_id =
+ std::max(constraint.id_begin, constraint.id_end);
+ return (min_id <= kMaxRedId && max_id > kMaxRedId);
+ }),
+ target_constraints.end());
+
+ if (!FLAGS_dump_constraints_to.empty()) {
+ std::ofstream fout(FLAGS_dump_constraints_to);
+ for (const auto &constraint : target_constraints) {
+ fout << constraint << std::endl;
+ }
+ fout.flush();
+ fout.close();
+ }
+
+ // Give seed constraints with a higher confidence to ground the solver.
+ // This "distance from camera" controls the noise of the seed measurement
+ constexpr double kSeedDistanceFromCamera = 1.0;
+
+ constexpr double kSeedDistortionFactor = 0.0;
+ const DataAdapter::TimestampedDetection frozen_detection_seed = {
+ .time = aos::distributed_clock::min_time,
+ .H_robot_target = PoseUtils::Pose3dToAffine3d(
+ kFixedTargetMapper.GetTargetPoseById(FLAGS_frozen_target_id)
+ .value()
+ .pose),
+ .distance_from_camera = kSeedDistanceFromCamera,
+ .distortion_factor = kSeedDistortionFactor,
+ .id = kMinTargetId};
+
+ for (TargetMapper::TargetId id = kMinTargetId; id <= kMaxTargetId; id++) {
+ if (id == static_cast<TargetMapper::TargetId>(FLAGS_frozen_target_id)) {
+ continue;
+ }
+
+ const DataAdapter::TimestampedDetection detection_seed = {
+ .time = aos::distributed_clock::min_time,
+ .H_robot_target = PoseUtils::Pose3dToAffine3d(
+ kFixedTargetMapper.GetTargetPoseById(id).value().pose),
+ .distance_from_camera = kSeedDistanceFromCamera,
+ .distortion_factor = kSeedDistortionFactor,
+ .id = id};
+ target_constraints.emplace_back(DataAdapter::ComputeTargetConstraint(
+ frozen_detection_seed, detection_seed,
+ DataAdapter::ComputeConfidence(frozen_detection_seed,
+ detection_seed)));
+ }
+
TargetMapper mapper(FLAGS_json_path, target_constraints);
mapper.Solve(FLAGS_field_name, FLAGS_output_dir);
}