Fixing the data sent as camera_to_target and adding field_to_camera

I am pretty sure camera_to_target is the inverse of what gets returned by solvePnPRansac.

Adding field_to_camera to make it more convenient for Alex to show robot pose estimate

Change-Id: I98d1ef64f8b142325e6f94b213885ae6868333ca
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index 23986bf..67613a1 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -70,6 +70,7 @@
                             const cv::Mat &descriptors,
                             const std::vector<std::vector<cv::DMatch>> &matches,
                             const std::vector<cv::Mat> &camera_target_list,
+                            const std::vector<cv::Mat> &field_camera_list,
                             aos::Sender<sift::ImageMatchResult> *result_sender,
                             bool send_details);
 
@@ -170,6 +171,7 @@
     const cv::Mat &descriptors,
     const std::vector<std::vector<cv::DMatch>> &matches,
     const std::vector<cv::Mat> &camera_target_list,
+    const std::vector<cv::Mat> &field_camera_list,
     aos::Sender<sift::ImageMatchResult> *result_sender, bool send_details) {
   auto builder = result_sender->MakeBuilder();
   const auto camera_calibration_offset =
@@ -187,6 +189,7 @@
 
   std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
 
+  CHECK_EQ(camera_target_list.size(), field_camera_list.size());
   for (size_t i = 0; i < camera_target_list.size(); ++i) {
     cv::Mat camera_target = camera_target_list[i];
     CHECK(camera_target.isContinuous());
@@ -194,12 +197,21 @@
         reinterpret_cast<float *>(camera_target.data), camera_target.total());
     const flatbuffers::Offset<sift::TransformationMatrix> transform_offset =
         sift::CreateTransformationMatrix(*builder.fbb(), data_offset);
+
+    cv::Mat field_camera = field_camera_list[i];
+    CHECK(field_camera.isContinuous());
+    const auto fc_data_offset = builder.fbb()->CreateVector<float>(
+        reinterpret_cast<float *>(field_camera.data), field_camera.total());
+    const flatbuffers::Offset<sift::TransformationMatrix> fc_transform_offset =
+        sift::CreateTransformationMatrix(*builder.fbb(), fc_data_offset);
+
     const flatbuffers::Offset<sift::TransformationMatrix>
         field_to_target_offset =
             aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb());
 
     sift::CameraPose::Builder pose_builder(*builder.fbb());
     pose_builder.add_camera_to_target(transform_offset);
+    pose_builder.add_field_to_camera(fc_transform_offset);
     pose_builder.add_field_to_target(field_to_target_offset);
     camera_poses.emplace_back(pose_builder.Finish());
   }
@@ -278,6 +290,7 @@
   // The minimum number of matches in a training image for us to use it.
   static constexpr int kMinimumMatchCount = 10;
   std::vector<cv::Mat> camera_target_list;
+  std::vector<cv::Mat> field_camera_list;
 
   for (size_t i = 0; i < per_image_matches.size(); ++i) {
     const PerImageMatches &per_image = per_image_matches[i];
@@ -285,15 +298,49 @@
       continue;
     }
 
+    // Transformations (rotations and translations) for various
+    // coordinate frames.  R_XXXX_vec is the Rodrigues (angle-axis)
+    // representation the 3x3 rotation R_XXXX
+    // Tranform from camera to target frame
     cv::Mat R_camera_target_vec, R_camera_target, T_camera_target;
+    // Tranform from camera to field origin (global) reference frame
+    cv::Mat R_camera_field_vec, R_camera_field, T_camera_field;
+    // Inverse of camera to field-- defines location of camera in
+    // global (field) reference frame
+    cv::Mat R_field_camera_vec, R_field_camera, T_field_camera;
+
     // Compute the pose of the camera (global origin relative to camera)
     cv::solvePnPRansac(per_image.training_points_3d, per_image.query_points,
                        CameraIntrinsics(), CameraDistCoeffs(),
-                       R_camera_target_vec, T_camera_target);
-    T_camera_target = T_camera_target.t();
-    // Convert Camera from angle-axis (3x1) to homogenous (3x3) representation
-    cv::Rodrigues(R_camera_target_vec, R_camera_target);
+                       R_camera_field_vec, T_camera_field);
 
+    // Get matrix version of R_camera_field
+    cv::Rodrigues(R_camera_field_vec, R_camera_field);
+
+    // Compute H_field_camera = H_camera_field^-1
+    R_field_camera = R_camera_field.t();
+    T_field_camera = -R_field_camera.dot(T_camera_field);
+    T_field_camera = T_field_camera.t();
+
+    // Extract the field_target transformation
+    const cv::Mat H_field_target(4, 4, CV_32F,
+                                 const_cast<void *>(static_cast<const void *>(
+                                     FieldToTarget(i)->data())));
+    const cv::Mat R_field_target =
+        H_field_target(cv::Range(0, 3), cv::Range(0, 3));
+    const cv::Mat T_field_target =
+        H_field_target(cv::Range(3, 4), cv::Range(0, 3));
+
+    // Use it to get the relative pose from camera to target
+    R_camera_target = R_camera_field.dot(R_field_target);
+    T_camera_target = R_camera_field.dot(T_field_target) + T_camera_field;
+
+    // Convert Camera from angle-axis (3x1) to homogenous (3x3) representation
+    // and turn T into 3x1 vector
+    cv::Rodrigues(R_camera_target_vec, R_camera_target);
+    T_camera_target = T_camera_target.t();
+
+    // Set H_camera_target
     {
       CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
       CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
@@ -302,15 +349,29 @@
       T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
       camera_target.at<float>(3, 3) = 1;
       CHECK(camera_target.isContinuous());
-      camera_target_list.push_back(camera_target);
+      camera_target_list.push_back(camera_target.clone());
+    }
+
+    // Set H_field_camera
+    {
+      CHECK_EQ(cv::Size(3, 3), R_field_camera.size());
+      CHECK_EQ(cv::Size(3, 1), T_field_camera.size());
+      cv::Mat field_camera = cv::Mat::zeros(4, 4, CV_32F);
+      R_field_camera.copyTo(field_camera(cv::Range(0, 3), cv::Range(0, 3)));
+      T_field_camera.copyTo(field_camera(cv::Range(3, 4), cv::Range(0, 3)));
+      field_camera.at<float>(3, 3) = 1;
+      CHECK(field_camera.isContinuous());
+      field_camera_list.push_back(field_camera.clone());
     }
   }
   // Now, send our two messages-- one large, with details for remote
   // debugging(features), and one smaller
   SendImageMatchResult(image, keypoints, descriptors, matches,
-                       camera_target_list, &detailed_result_sender_, true);
+                       camera_target_list, field_camera_list,
+                       &detailed_result_sender_, true);
   SendImageMatchResult(image, keypoints, descriptors, matches,
-                       camera_target_list, &result_sender_, false);
+                       camera_target_list, field_camera_list, &result_sender_,
+                       false);
 }
 
 void CameraReader::ReadImage() {