Break out Features from fb data stream; add dist_coeffs; name cleanup

Passed through yapf, buildifier, clang-format

Send two separate messages, one with detailed features, one without

Change-Id: I70b2bca2d647cd03e2bc538a9dee68ed8155355a
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 6850a27..60276c9 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -16,6 +16,7 @@
     hdrs = [
         "v4l2_reader.h",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         ":vision_fbs",
         "//aos/events:event_loop",
@@ -23,7 +24,6 @@
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/base",
     ],
-    visibility = ["//y2020:__subpackages__"],
 )
 
 cc_binary(
@@ -35,6 +35,7 @@
         "//tools:k8",
         "//tools:armhf-debian",
     ],
+    visibility = ["//y2020:__subpackages__"],
     deps = [
         ":v4l2_reader",
         ":vision_fbs",
@@ -43,12 +44,11 @@
         "//aos/events:shm_event_loop",
         "//aos/network:team_number",
         "//third_party:opencv",
-        "//y2020/vision/tools/python_code:sift_training_data",
         "//y2020/vision/sift:sift971",
         "//y2020/vision/sift:sift_fbs",
         "//y2020/vision/sift:sift_training_fbs",
+        "//y2020/vision/tools/python_code:sift_training_data",
     ],
-    visibility = ["//y2020:__subpackages__"],
 )
 
 flatbuffer_ts_library(
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index d9b5ea0..ed4082d 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -7,10 +7,10 @@
 #include "aos/init.h"
 #include "aos/network/team_number.h"
 
-#include "y2020/vision/tools/python_code/sift_training_data.h"
 #include "y2020/vision/sift/sift971.h"
 #include "y2020/vision/sift/sift_generated.h"
 #include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
 #include "y2020/vision/v4l2_reader.h"
 #include "y2020/vision/vision_generated.h"
 
@@ -31,6 +31,8 @@
         image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
         result_sender_(
             event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
+        detailed_result_sender_(
+            event_loop->MakeSender<sift::ImageMatchResult>("/camera/detailed")),
         read_image_timer_(event_loop->AddTimer([this]() {
           ReadImage();
           read_image_timer_->Setup(event_loop_->monotonic_now());
@@ -63,6 +65,14 @@
                const std::vector<cv::KeyPoint> &keypoints,
                const cv::Mat &descriptors);
 
+  void SendImageMatchResult(const CameraImage &image,
+                            const std::vector<cv::KeyPoint> &keypoints,
+                            const cv::Mat &descriptors,
+                            const std::vector<std::vector<cv::DMatch>> &matches,
+                            const std::vector<cv::Mat> &camera_target_list,
+                            aos::Sender<sift::ImageMatchResult> *result_sender,
+                            bool send_details);
+
   // Returns the 3D location for the specified training feature.
   cv::Point3f Training3dPoint(int training_image_index,
                               int feature_index) const {
@@ -93,6 +103,14 @@
     return result;
   }
 
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
   aos::EventLoop *const event_loop_;
   const sift::TrainingData *const training_data_;
   const sift::CameraCalibration *const camera_calibration_;
@@ -100,6 +118,7 @@
   cv::FlannBasedMatcher *const matcher_;
   aos::Sender<CameraImage> image_sender_;
   aos::Sender<sift::ImageMatchResult> result_sender_;
+  aos::Sender<sift::ImageMatchResult> detailed_result_sender_;
   // We schedule this immediately to read an image. Having it on a timer means
   // other things can run on the event loop in between.
   aos::TimerHandler *const read_image_timer_;
@@ -146,13 +165,58 @@
   }
 }
 
-void CameraReader::ProcessImage(const CameraImage &image) {
-  // Be ready to pack the results up and send them out. We can pack things into
-  // this memory as we go to allow reusing temporaries better.
-  auto builder = result_sender_.MakeBuilder();
+void CameraReader::SendImageMatchResult(
+    const CameraImage &image, const std::vector<cv::KeyPoint> &keypoints,
+    const cv::Mat &descriptors,
+    const std::vector<std::vector<cv::DMatch>> &matches,
+    const std::vector<cv::Mat> &camera_target_list,
+    aos::Sender<sift::ImageMatchResult> *result_sender, bool send_details) {
+  auto builder = result_sender->MakeBuilder();
   const auto camera_calibration_offset =
       aos::CopyFlatBuffer(camera_calibration_, builder.fbb());
 
+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+      features_offset;
+  if (send_details) {
+    features_offset = PackFeatures(builder.fbb(), keypoints, descriptors);
+  }
+
+  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
+
+  std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
+
+  for (size_t i = 0; i < camera_target_list.size(); ++i) {
+    cv::Mat camera_target = camera_target_list[i];
+    CHECK(camera_target.isContinuous());
+    const auto data_offset = builder.fbb()->CreateVector<float>(
+        reinterpret_cast<float *>(camera_target.data), camera_target.total());
+    auto transform_offset =
+        sift::CreateTransformationMatrix(*builder.fbb(), data_offset);
+
+    sift::CameraPose::Builder pose_builder(*builder.fbb());
+    pose_builder.add_camera_to_target(transform_offset);
+    pose_builder.add_field_to_target(
+        aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
+    camera_poses.emplace_back(pose_builder.Finish());
+  }
+  const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
+
+  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
+  result_builder.add_image_matches(image_matches_offset);
+  result_builder.add_camera_poses(camera_poses_offset);
+  if (send_details) {
+    result_builder.add_features(features_offset);
+  }
+  result_builder.add_image_monotonic_timestamp_ns(
+      image.monotonic_timestamp_ns());
+  result_builder.add_camera_calibration(camera_calibration_offset);
+
+  // TODO<Jim>: Need to add target point computed from matches and
+  // mapped by homography
+  builder.Send(result_builder.Finish());
+}
+
+void CameraReader::ProcessImage(const CameraImage &image) {
   // First, we need to extract the brightness information. This can't really be
   // fused into the beginning of the SIFT algorithm because the algorithm needs
   // to look at the base image directly. It also only takes 2ms on our images.
@@ -169,13 +233,10 @@
   std::vector<cv::KeyPoint> keypoints;
   cv::Mat descriptors;
   sift_->detectAndCompute(image_mat, cv::noArray(), keypoints, descriptors);
-  const auto features_offset =
-      PackFeatures(builder.fbb(), keypoints, descriptors);
 
   // Then, match those features against our training data.
   std::vector<std::vector<cv::DMatch>> matches;
   matcher_->knnMatch(/* queryDescriptors */ descriptors, matches, /* k */ 2);
-  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
 
   struct PerImageMatches {
     std::vector<const std::vector<cv::DMatch> *> matches;
@@ -212,8 +273,8 @@
 
   // The minimum number of matches in a training image for us to use it.
   static constexpr int kMinimumMatchCount = 10;
+  std::vector<cv::Mat> camera_target_list;
 
-  std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
   for (size_t i = 0; i < per_image_matches.size(); ++i) {
     const PerImageMatches &per_image = per_image_matches[i];
     if (per_image.matches.size() < kMinimumMatchCount) {
@@ -223,12 +284,11 @@
     cv::Mat R_camera_target_vec, R_camera_target, T_camera_target;
     // Compute the pose of the camera (global origin relative to camera)
     cv::solvePnPRansac(per_image.training_points_3d, per_image.query_points,
-                       CameraIntrinsics(), cv::noArray(), R_camera_target_vec,
-                       T_camera_target);
+                       CameraIntrinsics(), CameraDistCoeffs(),
+                       R_camera_target_vec, T_camera_target);
     // Convert Camera from angle-axis (3x1) to homogenous (3x3) representation
     cv::Rodrigues(R_camera_target_vec, R_camera_target);
 
-    sift::CameraPose::Builder pose_builder(*builder.fbb());
     {
       CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
       CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
@@ -237,25 +297,15 @@
       T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
       camera_target.at<float>(3, 3) = 1;
       CHECK(camera_target.isContinuous());
-      const auto data_offset = builder.fbb()->CreateVector<float>(
-          reinterpret_cast<float *>(camera_target.data), camera_target.total());
-      pose_builder.add_camera_to_target(
-          sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
+      camera_target_list.push_back(camera_target);
     }
-    pose_builder.add_field_to_target(
-        aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
-    camera_poses.emplace_back(pose_builder.Finish());
   }
-  const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
-
-  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
-  result_builder.add_image_matches(image_matches_offset);
-  result_builder.add_camera_poses(camera_poses_offset);
-  result_builder.add_features(features_offset);
-  result_builder.add_image_monotonic_timestamp_ns(
-      image.monotonic_timestamp_ns());
-  result_builder.add_camera_calibration(camera_calibration_offset);
-  builder.Send(result_builder.Finish());
+  // Now, send our two messages-- one large, with details for remote
+  // debugging(features), and one smaller
+  SendImageMatchResult(image, keypoints, descriptors, matches,
+                       camera_target_list, &detailed_result_sender_, true);
+  SendImageMatchResult(image, keypoints, descriptors, matches,
+                       camera_target_list, &result_sender_, false);
 }
 
 void CameraReader::ReadImage() {
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
index a6650fd..3fa33cf 100644
--- a/y2020/vision/sift/demo_sift_training.py
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -49,14 +49,14 @@
   TrainingImage.TrainingImageStart(fbb)
   TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
   # TODO(Brian): Fill out the transformation matrices.
-  training_image = TrainingImage.TrainingImageEnd(fbb)
+  training_image_offset = TrainingImage.TrainingImageEnd(fbb)
 
   TrainingData.TrainingDataStartImagesVector(fbb, 1)
-  fbb.PrependUOffsetTRelative(training_image)
-  images = fbb.EndVector(1)
+  fbb.PrependUOffsetTRelative(training_image_offset)
+  images_offset = fbb.EndVector(1)
 
   TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images)
+  TrainingData.TrainingDataAddImages(fbb, images_offset)
   fbb.Finish(TrainingData.TrainingDataEnd(fbb))
 
   bfbs = fbb.Output()
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 97c2b0a..8806957 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -60,8 +60,8 @@
 }
 
 table TransformationMatrix {
-  // The matrix data. This is a row-major 4x4 matrix.
-  // In other words, the bottom row is (0, 0, 0, 1).
+  // The matrix data for a row-major 4x4 homogeneous transformation matrix.
+  // This implies the bottom row is (0, 0, 0, 1).
   data:[float];
 }
 
@@ -97,6 +97,9 @@
   //   rotation around the Z axis by the turret angle
   //   turret_extrinsics
   turret_extrinsics:TransformationMatrix;
+
+  // This is the standard OpenCV 5 parameter distortion coefficients
+  dist_coeffs:[float];
 }
 
 // Contains the information the EKF wants from an image matched against a single
@@ -128,6 +131,7 @@
   // The matches from this image to each of the training images which matched.
   // Each member is against the same captured image.
   image_matches:[ImageMatch];
+
   // The transformations for this image for each of the training images which
   // matched.
   // TODO(Brian): Include some kind of covariance information for these.
@@ -141,6 +145,10 @@
 
   // Information about the camera which took this image.
   camera_calibration:CameraCalibration;
+
+  // 2D image coordinate representing target location on the matched image
+  target_point_x:float;
+  target_point_y:float;
 }
 
 root_type ImageMatchResult;
diff --git a/y2020/vision/sift/sift_training.fbs b/y2020/vision/sift/sift_training.fbs
index 7391e76..d4fa740 100644
--- a/y2020/vision/sift/sift_training.fbs
+++ b/y2020/vision/sift/sift_training.fbs
@@ -10,6 +10,10 @@
   // from the target to the field. See CameraPose in :sift_fbs for details of
   // the conventions of this.
   field_to_target:TransformationMatrix;
+
+  // 2D image coordinate representing target location on the training image
+  target_point_x:float;
+  target_point_y:float;
 }
 
 // Represents the information used to match incoming images against.
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index a130ed6..a932886 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -22,10 +22,10 @@
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
         "@bazel_tools//tools/python/runfiles",
         "@opencv_contrib_nonfree_amd64//:python_opencv",
-        "//external:python-glog",
     ],
 )
 
@@ -54,10 +54,10 @@
 py_binary(
     name = "load_sift_training_test",
     srcs = [
-        "camera_definition.py",
+        "camera_definition_test.py",
         "define_training_data.py",
         "load_sift_training.py",
-        "target_definition.py",
+        "target_definition_test.py",
         "train_and_match.py",
     ],
     args = [
@@ -71,10 +71,10 @@
     main = "load_sift_training.py",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
         "@bazel_tools//tools/python/runfiles",
         "@opencv_contrib_nonfree_amd64//:python_opencv",
-        "//external:python-glog",
     ],
 )
 
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index f6a3591..194ddd3 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -10,6 +10,7 @@
 
     pass
 
+
 class CameraExtrinsics:
     def __init__(self):
         self.R = []
@@ -24,7 +25,6 @@
         self.team_number = -1
 
 
-
 ### CAMERA DEFINITIONS
 
 # Robot camera has:
@@ -40,13 +40,11 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.distortion_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
 web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
index f8e17f8..65d1b68 100644
--- a/y2020/vision/tools/python_code/camera_definition_test.py
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -23,7 +23,6 @@
         self.team_number = -1
 
 
-
 ### CAMERA DEFINITIONS
 
 # Robot camera has:
@@ -39,13 +38,11 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.distortion_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
 web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
@@ -54,11 +51,11 @@
 
 camera_list = []
 
-for team_number in (971, 8971, 9971): 
+for team_number in (971, 8971, 9971):
     for (i, node_name) in enumerate(("pi-1", "pi-2", "pi-3", "pi-4", "pi-5")):
         camera_base = copy.deepcopy(web_cam_params)
         camera_base.node_name = node_name
         camera_base.team_number = team_number
-        camera_base.camera_ext.T = np.asarray(np.float32([i+1, i+1, i+1]))
+        camera_base.camera_ext.T = np.asarray(
+            np.float32([i + 1, i + 1, i + 1]))
         camera_list.append(camera_base)
-
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
index 3feaa99..5b959cd 100644
--- a/y2020/vision/tools/python_code/camera_param_test.cc
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -41,6 +41,8 @@
 class TrainingImage {
  public:
   cv::Mat features_;
+  float target_point_x_;
+  float target_point_y_;
   cv::Mat field_to_target_;
 };
 
@@ -66,7 +68,8 @@
 
     CopyTrainingFeatures();
     sift_camera_calibration_ = CameraParamTest::FindCameraCalibration();
-    camera_intrinsics_ = CameraIntrinsics();
+    camera_intrinsic_matrix_ = CameraIntrinsicMatrix();
+    camera_dist_coeffs_ = CameraDistCoeffs();
     camera_extrinsics_ = CameraExtrinsics();
   }
 
@@ -86,7 +89,7 @@
         ->field_to_target();
   }
 
-  cv::Mat CameraIntrinsics() const {
+  cv::Mat CameraIntrinsicMatrix() const {
     const cv::Mat result(3, 3, CV_32F,
                          const_cast<void *>(static_cast<const void *>(
                              sift_camera_calibration_->intrinsics()->data())));
@@ -94,6 +97,14 @@
     return result;
   }
 
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
   cv::Mat CameraExtrinsics() const {
     const cv::Mat result(
         4, 4, CV_32F,
@@ -111,7 +122,8 @@
 
   // We'll just extract the one that matches our current module
   const sift::CameraCalibration *sift_camera_calibration_;
-  cv::Mat camera_intrinsics_;
+  cv::Mat camera_intrinsic_matrix_;
+  cv::Mat camera_dist_coeffs_;
   cv::Mat camera_extrinsics_;
 
   TrainingData training_data_;
@@ -127,7 +139,7 @@
   int train_image_index = 0;
   for (const sift::TrainingImage *training_image :
        *sift_training_data_->images()) {
-    TrainingImage training_image_data;
+    TrainingImage tmp_training_image_data;
     cv::Mat features(training_image->features()->size(), 128, CV_32F);
     for (size_t i = 0; i < training_image->features()->size(); ++i) {
       const sift::Feature *feature_table = training_image->features()->Get(i);
@@ -149,10 +161,14 @@
         4, 4, CV_32F,
         const_cast<void *>(
             static_cast<const void *>(field_to_target_->data()->data())));
-    training_image_data.features_ = features;
-    training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.features_ = features;
+    tmp_training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.target_point_x_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_x();
+    tmp_training_image_data.target_point_y_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_y();
 
-    training_data_.images_.push_back(training_image_data);
+    training_data_.images_.push_back(tmp_training_image_data);
     train_image_index++;
   }
 }
@@ -224,13 +240,26 @@
       << camera_params.training_data_.images_[0].field_to_target_ << "\nvs.\n"
       << field_to_targets_0;
 
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_x_, 10.);
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_y_, 20.);
+
   float intrinsic_mat[9] = {810, 0, 320, 0, 810, 240, 0, 0, 1};
   cv::Mat intrinsic = cv::Mat(3, 3, CV_32F, intrinsic_mat);
-  cv::Mat intrinsic_diff = (intrinsic != camera_params.camera_intrinsics_);
+  cv::Mat intrinsic_diff =
+      (intrinsic != camera_params.camera_intrinsic_matrix_);
   bool intrinsic_eq = (cv::countNonZero(intrinsic_diff) == 0);
   ASSERT_TRUE(intrinsic_eq)
-      << "Mismatch on intrinsics: " << intrinsic << "\nvs.\n"
-      << camera_params.camera_intrinsics_;
+      << "Mismatch on camera intrinsic matrix: " << intrinsic << "\nvs.\n"
+      << camera_params.camera_intrinsic_matrix_;
+
+  float dist_coeff_mat[5] = {0., 0., 0., 0., 0.};
+  cv::Mat dist_coeff = cv::Mat(5, 1, CV_32F, dist_coeff_mat);
+  cv::Mat dist_coeff_diff = (dist_coeff != camera_params.camera_dist_coeffs_);
+  bool dist_coeff_eq = (cv::countNonZero(dist_coeff_diff) == 0);
+  ASSERT_TRUE(dist_coeff_eq)
+      << "Mismatch on camera distortion coefficients: " << dist_coeff
+      << "\nvs.\n"
+      << camera_params.camera_dist_coeffs_;
 
   float i_f = 3.0;
   float extrinsic_mat[16] = {0, 0,  1, i_f, -1, 0, 0, i_f,
@@ -246,4 +275,3 @@
 }  // namespace
 }  // namespace vision
 }  // namespace frc971
-
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 65f3342..651efe2 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -107,22 +107,28 @@
             fbb)
 
         # Create the TrainingImage feature vector
-        TrainingImage.TrainingImageStartFeaturesVector(fbb,
-                                                       len(features_vector))
+        TrainingImage.TrainingImageStartFeaturesVector(
+            fbb, len(features_vector))
         for feature in reversed(features_vector):
             fbb.PrependUOffsetTRelative(feature)
         features_vector_offset = fbb.EndVector(len(features_vector))
 
-        # Create the TrainingImage
+        # Add the TrainingImage data
         TrainingImage.TrainingImageStart(fbb)
-        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
-        TrainingImage.TrainingImageAddFieldToTarget(fbb,
-                                                    transformation_mat_offset)
-
-        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+        TrainingImage.TrainingImageAddFeatures(fbb,
+                                                       features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(
+            fbb, transformation_mat_offset)
+        TrainingImage.TrainingImageAddTargetPointX(
+            fbb, target_data.target_point_2d[0][0][0])
+        TrainingImage.TrainingImageAddTargetPointY(
+            fbb, target_data.target_point_2d[0][0][1])
+        images_vector.append(
+            TrainingImage.TrainingImageEnd(fbb))
 
     # Create and add Training Data of all targets
-    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+    TrainingData.TrainingDataStartImagesVector(fbb,
+                                                     len(images_vector))
     for training_image in reversed(images_vector):
         fbb.PrependUOffsetTRelative(training_image)
     images_vector_table = fbb.EndVector(len(images_vector))
@@ -155,6 +161,14 @@
             fbb.PrependFloat32(n)
         intrinsics_vector = fbb.EndVector(len(camera_int_list))
 
+        dist_coeff_list = camera_calib.camera_int.distortion_coeffs.ravel(
+        ).tolist()
+        CameraCalibration.CameraCalibrationStartDistCoeffsVector(
+            fbb, len(dist_coeff_list))
+        for n in reversed(dist_coeff_list):
+            fbb.PrependFloat32(n)
+        dist_coeff_vector = fbb.EndVector(len(dist_coeff_list))
+
         node_name_offset = fbb.CreateString(camera_calib.node_name)
         CameraCalibration.CameraCalibrationStart(fbb)
         CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
@@ -162,6 +176,8 @@
             fbb, camera_calib.team_number)
         CameraCalibration.CameraCalibrationAddIntrinsics(
             fbb, intrinsics_vector)
+        CameraCalibration.CameraCalibrationAddDistCoeffs(
+            fbb, dist_coeff_vector)
         CameraCalibration.CameraCalibrationAddFixedExtrinsics(
             fbb, fixed_extrinsics_vector)
         camera_calibration_vector.append(
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 1727dff..4c3f36b 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -181,7 +181,7 @@
     # These are manually captured by examining the images,
     # and entering the pixel values from the target center for each image.
     # These are currently only used for visualization of the target
-    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
+    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2)  # train_power_port_red.png
 
     # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
@@ -222,7 +222,7 @@
     ideal_loading_bay_red.target_position = np.array([field_length/2.,
                                                      loading_bay_edge_y + loading_bay_width/2.,
                                                       loading_bay_height/2.])
-    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
+    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
     training_target_loading_bay_red = TargetData(
@@ -285,7 +285,7 @@
     ideal_power_port_blue.target_position = np.array([field_length/2.,
                                                      -power_port_edge_y - power_port_width/2.,
                                                       power_port_target_height])
-    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2),  # train_power_port_blue.png
+    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2)  # train_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
     training_target_power_port_blue = TargetData(
@@ -325,7 +325,7 @@
     ideal_loading_bay_blue.target_position = np.array([-field_length/2.,
                                                      -loading_bay_edge_y - loading_bay_width/2.,
                                                        loading_bay_height/2.])
-    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
+    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
     training_target_loading_bay_blue = TargetData(
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
index 5432766..18df1e9 100644
--- a/y2020/vision/tools/python_code/target_definition_test.py
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -25,5 +25,6 @@
 
     target_data_test_1.target_rotation = np.identity(3, np.double)
     target_data_test_1.target_position = np.array([0., 1., 2.])
+    target_data_test_1.target_point_2d = np.array([10., 20.]).reshape(-1, 1, 2)
     target_data_list.append(target_data_test_1)
     return target_data_list