Add solver for april tag mapping

Takes in robot locations (at control loop frequency), and less frequent
localizations off of april tags. Builds a graph of constraints between
subsequent target detections and solves for the optimal target locations
on the field.

Change-Id: I1256f5914b7656d9e14735f9d994572688c3aaa8
Signed-off-by: Milind Upadhyay <milind.upadhyay@gmail.com>
diff --git a/frc971/vision/target_mapper.h b/frc971/vision/target_mapper.h
new file mode 100644
index 0000000..55d3c7e
--- /dev/null
+++ b/frc971/vision/target_mapper.h
@@ -0,0 +1,134 @@
+#ifndef FRC971_VISION_TARGET_MAPPER_H_
+#define FRC971_VISION_TARGET_MAPPER_H_
+
+#include <unordered_map>
+
+#include "aos/events/simulated_event_loop.h"
+#include "frc971/vision/ceres/types.h"
+
+namespace frc971::vision {
+
+// Estimates positions of vision targets (ex. April Tags) using
+// target detections relative to a robot (which were computed using robot
+// positions at the time of those detections). Solves SLAM problem to estimate
+// target locations using deltas between consecutive target detections.
+class TargetMapper {
+ public:
+  using TargetId = int;
+
+  struct TargetPose {
+    TargetId id;
+    ceres::examples::Pose2d pose;
+  };
+
+  // target_poses are initial guesses for the actual locations of the targets on
+  // the field.
+  // target_constraints are the deltas between consecutive target detections,
+  // and are usually prepared by the DataAdapter class below.
+  TargetMapper(std::map<TargetId, ceres::examples::Pose2d> target_poses,
+               std::vector<ceres::examples::Constraint2d> target_constraints);
+
+  void Solve();
+
+  static std::optional<TargetPose> GetTargetPoseById(
+      std::vector<TargetPose> target_poses, TargetId target_id);
+
+  std::map<TargetId, ceres::examples::Pose2d> target_poses() {
+    return target_poses_;
+  }
+
+ private:
+  // Output the poses to std::cout with format: ID: x y yaw_radians.
+  static void OutputPoses(const std::map<int, ceres::examples::Pose2d> &poses);
+
+  // Constructs the nonlinear least squares optimization problem from the
+  // pose graph constraints.
+  void BuildOptimizationProblem(
+      std::map<TargetId, ceres::examples::Pose2d> *target_poses,
+      const std::vector<ceres::examples::Constraint2d> &constraints,
+      ceres::Problem *problem);
+
+  // Returns true if the solve was successful.
+  bool SolveOptimizationProblem(ceres::Problem *problem);
+
+  std::map<TargetId, ceres::examples::Pose2d> target_poses_;
+  std::vector<ceres::examples::Constraint2d> target_constraints_;
+};
+
+// Utility functions for dealing with ceres::examples::Pose2d structs
+class PoseUtils {
+ public:
+  // Embeds a 2d pose into a 3d affine transformation to be used in 3d
+  // computation
+  static Eigen::Affine3d Pose2dToAffine3d(ceres::examples::Pose2d pose2d);
+  // Assumes only x and y translation, and only z rotation (yaw)
+  static ceres::examples::Pose2d Affine3dToPose2d(Eigen::Affine3d H);
+
+  // Computes pose_2 relative to pose_1. This is equivalent to (pose_1^-1 *
+  // pose_2)
+  static ceres::examples::Pose2d ComputeRelativePose(
+      ceres::examples::Pose2d pose_1, ceres::examples::Pose2d pose_2);
+
+  // Computes pose_2 given a pose_1 and pose_2 relative to pose_1. This is
+  // equivalent to (pose_1 * pose_2_relative)
+  static ceres::examples::Pose2d ComputeOffsetPose(
+      ceres::examples::Pose2d pose_1, ceres::examples::Pose2d pose_2_relative);
+};
+
+// Transforms robot position and target detection data into target constraints
+// to be used for mapping. Interpolates continous-time data, converting it to
+// discrete detection time steps.
+class DataAdapter {
+ public:
+  // Pairs pose with a time point
+  struct TimestampedPose {
+    aos::distributed_clock::time_point time;
+    ceres::examples::Pose2d pose;
+  };
+
+  // Pairs target detection with a time point
+  struct TimestampedDetection {
+    aos::distributed_clock::time_point time;
+    // Pose of target relative to robot
+    Eigen::Affine3d H_robot_target;
+    TargetMapper::TargetId id;
+  };
+
+  // Pairs consecutive target detections into constraints, and interpolates
+  // robot poses based on time points to compute motion between detections. This
+  // prepares data to be used by TargetMapper. Also returns vector of delta
+  // robot poses corresponding to each constraint, to be used for testing.
+  //
+  // Assumes both inputs are in chronological order.
+  static std::pair<std::vector<ceres::examples::Constraint2d>,
+                   std::vector<ceres::examples::Pose2d>>
+  MatchTargetDetections(
+      const std::vector<TimestampedPose> &timestamped_robot_poses,
+      const std::vector<TimestampedDetection> &timestamped_target_detections);
+
+  // Computes inverse of covariance matrix, assuming there was a target
+  // detection between robot movement over the given time period. Ceres calls
+  // this matrix the "information"
+  static Eigen::Matrix3d ComputeConfidence(
+      aos::distributed_clock::time_point start,
+      aos::distributed_clock::time_point end);
+
+ private:
+  static ceres::examples::Pose2d InterpolatePose(
+      const TimestampedPose &pose_start, const TimestampedPose &pose_end,
+      aos::distributed_clock::time_point time);
+
+  // Computes the constraint between the start and end pose of the targets: the
+  // relative pose between the start and end target locations in the frame of
+  // the start target. Takes into account the robot motion in the time between
+  // the two detections.
+  static ceres::examples::Constraint2d ComputeTargetConstraint(
+      const TimestampedDetection &target_detection_start,
+      const Eigen::Affine3d &H_robotstart_robotend,
+      const TimestampedDetection &target_detection_end,
+      const Eigen::Matrix3d &confidence);
+};
+
+}  // namespace frc971::vision
+
+#endif  // FRC971_VISION_TARGET_MAPPER_H_