Solve mapping problem in 3d
Solving for z, pitch, and roll will help us get better estimates for
the 2d pose that we actually care about.
Also, since we are only using the box of pis for mapping, deleted
functionality for mapping with robot position so I didn't have to
support that in 3d.
Change-Id: I87e99a148c4953e2e67b0b0e9b07aa9abe1cd158
Signed-off-by: milind-u <milind.upadhyay@gmail.com>
diff --git a/frc971/vision/target_mapper.h b/frc971/vision/target_mapper.h
index edf2260..7e2c323 100644
--- a/frc971/vision/target_mapper.h
+++ b/frc971/vision/target_mapper.h
@@ -4,6 +4,7 @@
#include <unordered_map>
#include "aos/events/simulated_event_loop.h"
+#include "ceres/ceres.h"
#include "frc971/vision/ceres/types.h"
#include "frc971/vision/target_map_generated.h"
@@ -16,12 +17,11 @@
class TargetMapper {
public:
using TargetId = int;
+ using ConfidenceMatrix = Eigen::Matrix<double, 6, 6>;
struct TargetPose {
TargetId id;
- // TOOD(milind): switch everything to 3d once we're more confident in 2d
- // solving
- ceres::examples::Pose2d pose;
+ ceres::examples::Pose3d pose;
};
// target_poses_path is the path to a TargetMap json with initial guesses for
@@ -29,11 +29,11 @@
// target_constraints are the deltas between consecutive target detections,
// and are usually prepared by the DataAdapter class below.
TargetMapper(std::string_view target_poses_path,
- std::vector<ceres::examples::Constraint2d> target_constraints);
+ const ceres::examples::VectorOfConstraints &target_constraints);
// Alternate constructor for tests.
// Takes in the actual intial guesses instead of a file containing them
- TargetMapper(std::map<TargetId, ceres::examples::Pose2d> target_poses,
- std::vector<ceres::examples::Constraint2d> target_constraints);
+ TargetMapper(const ceres::examples::MapOfPoses &target_poses,
+ const ceres::examples::VectorOfConstraints &target_constraints);
// Solves for the target map. If output_dir is set, the map will be saved to
// output_dir/field_name.json
@@ -46,56 +46,55 @@
static std::optional<TargetPose> GetTargetPoseById(
std::vector<TargetPose> target_poses, TargetId target_id);
- std::map<TargetId, ceres::examples::Pose2d> target_poses() {
- return target_poses_;
- }
+ ceres::examples::MapOfPoses target_poses() { return target_poses_; }
private:
// Constructs the nonlinear least squares optimization problem from the
// pose graph constraints.
void BuildOptimizationProblem(
- std::map<TargetId, ceres::examples::Pose2d> *target_poses,
- const std::vector<ceres::examples::Constraint2d> &constraints,
- ceres::Problem *problem);
+ const ceres::examples::VectorOfConstraints &constraints,
+ ceres::examples::MapOfPoses *poses, ceres::Problem *problem);
// Returns true if the solve was successful.
bool SolveOptimizationProblem(ceres::Problem *problem);
- std::map<TargetId, ceres::examples::Pose2d> target_poses_;
- std::vector<ceres::examples::Constraint2d> target_constraints_;
+ ceres::examples::MapOfPoses target_poses_;
+ ceres::examples::VectorOfConstraints target_constraints_;
};
-// Utility functions for dealing with ceres::examples::Pose2d structs
+// Utility functions for dealing with ceres::examples::Pose3d structs
class PoseUtils {
public:
- // Embeds a 2d pose into a 3d affine transformation to be used in 3d
- // computation
- static Eigen::Affine3d Pose2dToAffine3d(ceres::examples::Pose2d pose2d);
- // Assumes only x and y translation, and only z rotation (yaw)
- static ceres::examples::Pose2d Affine3dToPose2d(Eigen::Affine3d H);
+ // Embeds a 3d pose into an affine transformation
+ static Eigen::Affine3d Pose3dToAffine3d(
+ const ceres::examples::Pose3d &pose3d);
+ // Inverse of above function
+ static ceres::examples::Pose3d Affine3dToPose3d(const Eigen::Affine3d &H);
// Computes pose_2 relative to pose_1. This is equivalent to (pose_1^-1 *
// pose_2)
- static ceres::examples::Pose2d ComputeRelativePose(
- ceres::examples::Pose2d pose_1, ceres::examples::Pose2d pose_2);
+ static ceres::examples::Pose3d ComputeRelativePose(
+ const ceres::examples::Pose3d &pose_1,
+ const ceres::examples::Pose3d &pose_2);
// Computes pose_2 given a pose_1 and pose_2 relative to pose_1. This is
// equivalent to (pose_1 * pose_2_relative)
- static ceres::examples::Pose2d ComputeOffsetPose(
- ceres::examples::Pose2d pose_1, ceres::examples::Pose2d pose_2_relative);
+ static ceres::examples::Pose3d ComputeOffsetPose(
+ const ceres::examples::Pose3d &pose_1,
+ const ceres::examples::Pose3d &pose_2_relative);
+
+ // Converts a rotation with roll, pitch, and yaw into a quaternion
+ static Eigen::Quaterniond EulerAnglesToQuaternion(const Eigen::Vector3d &rpy);
+ // Inverse of above function
+ static Eigen::Vector3d QuaternionToEulerAngles(const Eigen::Quaterniond &q);
+ // Converts a 3d rotation matrix into a rotation with roll, pitch, and yaw
+ static Eigen::Vector3d RotationMatrixToEulerAngles(const Eigen::Matrix3d &R);
};
// Transforms robot position and target detection data into target constraints
-// to be used for mapping. Interpolates continous-time data, converting it to
-// discrete detection time steps.
+// to be used for mapping.
class DataAdapter {
public:
- // Pairs pose with a time point
- struct TimestampedPose {
- aos::distributed_clock::time_point time;
- ceres::examples::Pose2d pose;
- };
-
// Pairs target detection with a time point
struct TimestampedDetection {
aos::distributed_clock::time_point time;
@@ -107,58 +106,38 @@
TargetMapper::TargetId id;
};
- // Pairs consecutive target detections into constraints, and interpolates
- // robot poses based on time points to compute motion between detections. This
- // prepares data to be used by TargetMapper. Also returns vector of delta
- // robot poses corresponding to each constraint, to be used for testing.
- //
- // Assumes both inputs are in chronological order.
- static std::pair<std::vector<ceres::examples::Constraint2d>,
- std::vector<ceres::examples::Pose2d>>
- MatchTargetDetections(
- const std::vector<TimestampedPose> ×tamped_robot_poses,
- const std::vector<TimestampedDetection> ×tamped_target_detections);
-
// Pairs consecutive target detections that are not too far apart in time into
// constraints. Meant to be used on a system without a position measurement.
// Assumes timestamped_target_detections is in chronological order.
// max_dt is the maximum time between two target detections to match them up.
// If too much time passes, the recoding device (box of pis) could have moved
// too much
- static std::vector<ceres::examples::Constraint2d> MatchTargetDetections(
+ static ceres::examples::VectorOfConstraints MatchTargetDetections(
const std::vector<TimestampedDetection> ×tamped_target_detections,
aos::distributed_clock::duration max_dt = std::chrono::milliseconds(1));
// Computes inverse of covariance matrix, assuming there was a target
// detection between robot movement over the given time period. Ceres calls
// this matrix the "information"
- static Eigen::Matrix3d ComputeConfidence(
+ static TargetMapper::ConfidenceMatrix ComputeConfidence(
aos::distributed_clock::time_point start,
aos::distributed_clock::time_point end, double distance_from_camera_start,
double distance_from_camera_end);
private:
- static ceres::examples::Pose2d InterpolatePose(
- const TimestampedPose &pose_start, const TimestampedPose &pose_end,
- aos::distributed_clock::time_point time);
-
// Computes the constraint between the start and end pose of the targets: the
// relative pose between the start and end target locations in the frame of
- // the start target. Takes into account the robot motion in the time between
- // the two detections.
- static ceres::examples::Constraint2d ComputeTargetConstraint(
- const TimestampedDetection &target_detection_start,
- const Eigen::Affine3d &H_robotstart_robotend,
- const TimestampedDetection &target_detection_end,
- const Eigen::Matrix3d &confidence);
- // Same as above function, but assumes no robot motion between the two
- // detections
- static ceres::examples::Constraint2d ComputeTargetConstraint(
+ // the start target.
+ static ceres::examples::Constraint3d ComputeTargetConstraint(
const TimestampedDetection &target_detection_start,
const TimestampedDetection &target_detection_end,
- const Eigen::Matrix3d &confidence);
+ const TargetMapper::ConfidenceMatrix &confidence);
};
+std::ostream &operator<<(std::ostream &os, ceres::examples::Pose3d pose);
+std::ostream &operator<<(std::ostream &os,
+ ceres::examples::Constraint3d constraint);
+
} // namespace frc971::vision
#endif // FRC971_VISION_TARGET_MAPPER_H_