Use extrinsics to seed vision solver

Signed-off-by: Milind Upadhyay <milind.upadhyay@gmail.com>
Change-Id: I674da5535fa3894468e66f11cbbc3e9d32d8cedf
diff --git a/y2022/vision/BUILD b/y2022/vision/BUILD
index a4e44f3..9e93343 100644
--- a/y2022/vision/BUILD
+++ b/y2022/vision/BUILD
@@ -185,6 +185,7 @@
         "//aos/time",
         "//frc971/control_loops:quaternion_utils",
         "//third_party:opencv",
+        "//y2022:constants",
         "@com_google_absl//absl/strings:str_format",
         "@com_google_ceres_solver//:ceres",
         "@org_tuxfamily_eigen//:eigen",
@@ -214,6 +215,7 @@
     visibility = ["//y2022:__subpackages__"],
     deps = [
         ":blob_detector_lib",
+        ":calibration_data",
         ":target_estimator_lib",
         "//aos:init",
         "//aos/events:shm_event_loop",
diff --git a/y2022/vision/camera_definition.py b/y2022/vision/camera_definition.py
index 21ae7f3..7ad2c0b 100644
--- a/y2022/vision/camera_definition.py
+++ b/y2022/vision/camera_definition.py
@@ -99,10 +99,8 @@
     T = np.array([0.0, 0.0, 0.0])
 
     if pi_number == "pi1":
-        # This is the turret camera
-        camera_pitch = -10.0 * np.pi / 180.0
-        is_turret = True
-        T = np.array([7.5 * 0.0254, -5.5 * 0.0254, 41.0 * 0.0254])
+        camera_pitch = -35.0 * np.pi / 180.0
+        T = np.array([0.0, 0.0, 37.0 * 0.0254])
     elif pi_number == "pi2":
         T = np.array([4.5 * 0.0254, 3.75 * 0.0254, 26.0 * 0.0254])
     elif pi_number == "pi3":
diff --git a/y2022/vision/camera_reader.h b/y2022/vision/camera_reader.h
index 3b1eca6..edc3a1a 100644
--- a/y2022/vision/camera_reader.h
+++ b/y2022/vision/camera_reader.h
@@ -25,8 +25,6 @@
 using namespace frc971::vision;
 
 // TODO<jim>: Probably need to break out LED control to separate process
-// TODO<jim>: Need to add sync with camera to strobe lights
-
 class CameraReader {
  public:
   CameraReader(aos::ShmEventLoop *event_loop,
@@ -66,18 +64,20 @@
   void ReadImage();
 
   cv::Mat CameraIntrinsics() const {
-    const cv::Mat result(3, 3, CV_32F,
-                         const_cast<void *>(static_cast<const void *>(
-                             camera_calibration_->intrinsics()->data())));
+    cv::Mat result(3, 3, CV_32F,
+                   const_cast<void *>(static_cast<const void *>(
+                       camera_calibration_->intrinsics()->data())));
+    result.convertTo(result, CV_64F);
     CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
     return result;
   }
 
   cv::Mat CameraExtrinsics() const {
-    const cv::Mat result(
+    cv::Mat result(
         4, 4, CV_32F,
         const_cast<void *>(static_cast<const void *>(
             camera_calibration_->fixed_extrinsics()->data()->data())));
+    result.convertTo(result, CV_64F);
     CHECK_EQ(result.total(),
              camera_calibration_->fixed_extrinsics()->data()->size());
     return result;
diff --git a/y2022/vision/target_estimator.cc b/y2022/vision/target_estimator.cc
index c78a1c6..130759c 100644
--- a/y2022/vision/target_estimator.cc
+++ b/y2022/vision/target_estimator.cc
@@ -10,6 +10,7 @@
 #include "opencv2/features2d.hpp"
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/imgproc.hpp"
+#include "y2022/constants.h"
 
 DEFINE_bool(freeze_roll, false, "If true, don't solve for roll");
 DEFINE_bool(freeze_pitch, false, "If true, don't solve for pitch");
@@ -103,8 +104,9 @@
       yaw_(M_PI),
       distance_(3.0),
       angle_to_camera_(0.0),
-      // TODO(milind): add IMU height
-      camera_height_(extrinsics.at<double>(2, 3)) {
+      // Seed camera height
+      camera_height_(extrinsics.at<double>(2, 3) +
+                     constants::Values::kImuHeight()) {
   cv::cv2eigen(intrinsics, intrinsics_);
   cv::cv2eigen(extrinsics, extrinsics_);
 }
@@ -119,6 +121,13 @@
     problem->SetParameterUpperBound(param, 0, max);
   }
 }
+
+// With X, Y, Z being hub axes and x, y, z being camera axes,
+// x = -Y, y = -Z, z = X
+const Eigen::Matrix3d kHubToCameraAxes =
+    (Eigen::Matrix3d() << 0.0, -1.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0)
+        .finished();
+
 }  // namespace
 
 void TargetEstimator::Solve(
@@ -177,18 +186,23 @@
   problem.AddResidualBlock(cost_function, nullptr, &roll_, &pitch_, &yaw_,
                            &distance_, &angle_to_camera_, &camera_height_);
 
-  // TODO(milind): seed values at localizer output, and constrain to be close to
-  // that.
+  // Compute the estimated rotation of the camera using the robot rotation.
+  const Eigen::Vector3d ypr_extrinsics =
+      (Eigen::Affine3d(extrinsics_).rotation() * kHubToCameraAxes)
+          .eulerAngles(2, 1, 0);
+  // TODO(milind): seed with localizer output as well
+  const double roll_seed = ypr_extrinsics.z();
+  const double pitch_seed = ypr_extrinsics.y();
 
-  // Constrain the rotation, otherwise there can be multiple solutions.
-  // There shouldn't be too much roll or pitch
-  constexpr double kMaxRoll = 0.1;
-  SetBoundsOrFreeze(&roll_, FLAGS_freeze_roll, -kMaxRoll, kMaxRoll, &problem);
+  // Constrain the rotation to be around the localizer's, otherwise there can be
+  // multiple solutions. There shouldn't be too much roll or pitch
+  constexpr double kMaxRollDelta = 0.1;
+  SetBoundsOrFreeze(&roll_, FLAGS_freeze_roll, roll_seed - kMaxRollDelta,
+                    roll_seed + kMaxRollDelta, &problem);
 
-  constexpr double kPitch = -35.0 * M_PI / 180.0;
   constexpr double kMaxPitchDelta = 0.15;
-  SetBoundsOrFreeze(&pitch_, FLAGS_freeze_pitch, kPitch - kMaxPitchDelta,
-                    kPitch + kMaxPitchDelta, &problem);
+  SetBoundsOrFreeze(&pitch_, FLAGS_freeze_pitch, pitch_seed - kMaxPitchDelta,
+                    pitch_seed + kMaxPitchDelta, &problem);
   // Constrain the yaw to where the target would be visible
   constexpr double kMaxYawDelta = M_PI / 4.0;
   SetBoundsOrFreeze(&yaw_, FLAGS_freeze_yaw, M_PI - kMaxYawDelta,
@@ -372,18 +386,12 @@
     Eigen::Transform<S, 3, Eigen::Affine> &H_hub_camera) const {
   using Vector3s = Eigen::Matrix<S, 3, 1>;
 
-  // With X, Y, Z being world axes and x, y, z being camera axes,
-  // x = Y, y = Z, z = -X
-  static const Eigen::Matrix3d kCameraAxisConversion =
-      (Eigen::Matrix3d() << 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0)
-          .finished();
   const Vector3s tape_point_hub_eigen =
       Vector3s(S(tape_point_hub.x), S(tape_point_hub.y), S(tape_point_hub.z));
   // Project the 3d tape point onto the image using the transformation and
   // intrinsics
   const Vector3s tape_point_proj =
-      intrinsics_ *
-      (kCameraAxisConversion * (H_hub_camera * tape_point_hub_eigen));
+      intrinsics_ * (kHubToCameraAxes * (H_hub_camera * tape_point_hub_eigen));
 
   // Normalize the projected point
   return {tape_point_proj.x() / tape_point_proj.z(),
diff --git a/y2022/vision/viewer.cc b/y2022/vision/viewer.cc
index feff592..dfcd49d 100644
--- a/y2022/vision/viewer.cc
+++ b/y2022/vision/viewer.cc
@@ -10,6 +10,7 @@
 #include "aos/time/time.h"
 #include "frc971/vision/vision_generated.h"
 #include "y2022/vision/blob_detector.h"
+#include "y2022/vision/calibration_data.h"
 #include "y2022/vision/target_estimate_generated.h"
 #include "y2022/vision/target_estimator.h"
 
@@ -18,6 +19,11 @@
 DEFINE_string(channel, "/camera", "Channel name for the image.");
 DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
 DEFINE_string(png_dir, "", "Path to a set of images to display.");
+DEFINE_string(calibration_node, "",
+              "If reading locally, use the calibration for this node");
+DEFINE_int32(
+    calibration_team_number, 971,
+    "If reading locally, use the calibration for a node with this team number");
 DEFINE_uint64(skip, 0,
               "Number of images to skip if doing local reading (png_dir set).");
 DEFINE_bool(show_features, true, "Show the blobs.");
@@ -171,18 +177,35 @@
   std::vector<cv::String> file_list;
   cv::glob(FLAGS_png_dir + "/*.png", file_list, false);
 
-  cv::Mat intrinsics = cv::Mat::zeros(cv::Size(3, 3), CV_64F);
-  intrinsics.at<double>(0, 0) = 391.63916;
-  intrinsics.at<double>(0, 1) = 0.0;
-  intrinsics.at<double>(0, 2) = 312.691162;
-  intrinsics.at<double>(1, 0) = 0.0;
-  intrinsics.at<double>(1, 1) = 391.535889;
-  intrinsics.at<double>(1, 2) = 267.138672;
-  intrinsics.at<double>(2, 0) = 0.0;
-  intrinsics.at<double>(2, 1) = 0.0;
-  intrinsics.at<double>(2, 2) = 1.0;
-  cv::Mat extrinsics = cv::Mat::zeros(cv::Size(4, 4), CV_64F);
-  extrinsics.at<double>(2, 3) = 0.9398;
+  const aos::FlatbufferSpan<calibration::CalibrationData> calibration_data(
+      CalibrationData());
+
+  const calibration::CameraCalibration *calibration = nullptr;
+  for (const calibration::CameraCalibration *candidate :
+       *calibration_data.message().camera_calibrations()) {
+    if ((candidate->node_name()->string_view() == FLAGS_calibration_node) &&
+        (candidate->team_number() == FLAGS_calibration_team_number)) {
+      calibration = candidate;
+      break;
+    }
+  }
+
+  CHECK(calibration) << "No calibration data found for node \""
+                     << FLAGS_calibration_node << "\" with team number "
+                     << FLAGS_calibration_team_number;
+
+  auto intrinsics_float = cv::Mat(3, 3, CV_32F,
+                                  const_cast<void *>(static_cast<const void *>(
+                                      calibration->intrinsics()->data())));
+  cv::Mat intrinsics;
+  intrinsics_float.convertTo(intrinsics, CV_64F);
+
+  const auto extrinsics_float =
+      cv::Mat(4, 4, CV_32F,
+              const_cast<void *>(static_cast<const void *>(
+                  calibration->fixed_extrinsics()->data()->data())));
+  cv::Mat extrinsics;
+  extrinsics_float.convertTo(extrinsics, CV_64F);
 
   TargetEstimator estimator(intrinsics, extrinsics);