Break out Features from fb data stream; add dist_coeffs; name cleanup

Passed through yapf, buildifier, clang-format

Send two separate messages, one with detailed features, one without

Change-Id: I70b2bca2d647cd03e2bc538a9dee68ed8155355a
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index a130ed6..a932886 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -22,10 +22,10 @@
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
         "@bazel_tools//tools/python/runfiles",
         "@opencv_contrib_nonfree_amd64//:python_opencv",
-        "//external:python-glog",
     ],
 )
 
@@ -54,10 +54,10 @@
 py_binary(
     name = "load_sift_training_test",
     srcs = [
-        "camera_definition.py",
+        "camera_definition_test.py",
         "define_training_data.py",
         "load_sift_training.py",
-        "target_definition.py",
+        "target_definition_test.py",
         "train_and_match.py",
     ],
     args = [
@@ -71,10 +71,10 @@
     main = "load_sift_training.py",
     srcs_version = "PY2AND3",
     deps = [
+        "//external:python-glog",
         "//y2020/vision/sift:sift_fbs_python",
         "@bazel_tools//tools/python/runfiles",
         "@opencv_contrib_nonfree_amd64//:python_opencv",
-        "//external:python-glog",
     ],
 )
 
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index f6a3591..194ddd3 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -10,6 +10,7 @@
 
     pass
 
+
 class CameraExtrinsics:
     def __init__(self):
         self.R = []
@@ -24,7 +25,6 @@
         self.team_number = -1
 
 
-
 ### CAMERA DEFINITIONS
 
 # Robot camera has:
@@ -40,13 +40,11 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.distortion_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
 web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
index f8e17f8..65d1b68 100644
--- a/y2020/vision/tools/python_code/camera_definition_test.py
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -23,7 +23,6 @@
         self.team_number = -1
 
 
-
 ### CAMERA DEFINITIONS
 
 # Robot camera has:
@@ -39,13 +38,11 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5,1))
+web_cam_int.distortion_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.],
-                          [-1, 0,  0],
-                          [0, -1., 0]])
+web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
 web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
@@ -54,11 +51,11 @@
 
 camera_list = []
 
-for team_number in (971, 8971, 9971): 
+for team_number in (971, 8971, 9971):
     for (i, node_name) in enumerate(("pi-1", "pi-2", "pi-3", "pi-4", "pi-5")):
         camera_base = copy.deepcopy(web_cam_params)
         camera_base.node_name = node_name
         camera_base.team_number = team_number
-        camera_base.camera_ext.T = np.asarray(np.float32([i+1, i+1, i+1]))
+        camera_base.camera_ext.T = np.asarray(
+            np.float32([i + 1, i + 1, i + 1]))
         camera_list.append(camera_base)
-
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
index 3feaa99..5b959cd 100644
--- a/y2020/vision/tools/python_code/camera_param_test.cc
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -41,6 +41,8 @@
 class TrainingImage {
  public:
   cv::Mat features_;
+  float target_point_x_;
+  float target_point_y_;
   cv::Mat field_to_target_;
 };
 
@@ -66,7 +68,8 @@
 
     CopyTrainingFeatures();
     sift_camera_calibration_ = CameraParamTest::FindCameraCalibration();
-    camera_intrinsics_ = CameraIntrinsics();
+    camera_intrinsic_matrix_ = CameraIntrinsicMatrix();
+    camera_dist_coeffs_ = CameraDistCoeffs();
     camera_extrinsics_ = CameraExtrinsics();
   }
 
@@ -86,7 +89,7 @@
         ->field_to_target();
   }
 
-  cv::Mat CameraIntrinsics() const {
+  cv::Mat CameraIntrinsicMatrix() const {
     const cv::Mat result(3, 3, CV_32F,
                          const_cast<void *>(static_cast<const void *>(
                              sift_camera_calibration_->intrinsics()->data())));
@@ -94,6 +97,14 @@
     return result;
   }
 
+  cv::Mat CameraDistCoeffs() const {
+    const cv::Mat result(5, 1, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->dist_coeffs()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->dist_coeffs()->size());
+    return result;
+  }
+
   cv::Mat CameraExtrinsics() const {
     const cv::Mat result(
         4, 4, CV_32F,
@@ -111,7 +122,8 @@
 
   // We'll just extract the one that matches our current module
   const sift::CameraCalibration *sift_camera_calibration_;
-  cv::Mat camera_intrinsics_;
+  cv::Mat camera_intrinsic_matrix_;
+  cv::Mat camera_dist_coeffs_;
   cv::Mat camera_extrinsics_;
 
   TrainingData training_data_;
@@ -127,7 +139,7 @@
   int train_image_index = 0;
   for (const sift::TrainingImage *training_image :
        *sift_training_data_->images()) {
-    TrainingImage training_image_data;
+    TrainingImage tmp_training_image_data;
     cv::Mat features(training_image->features()->size(), 128, CV_32F);
     for (size_t i = 0; i < training_image->features()->size(); ++i) {
       const sift::Feature *feature_table = training_image->features()->Get(i);
@@ -149,10 +161,14 @@
         4, 4, CV_32F,
         const_cast<void *>(
             static_cast<const void *>(field_to_target_->data()->data())));
-    training_image_data.features_ = features;
-    training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.features_ = features;
+    tmp_training_image_data.field_to_target_ = field_to_target_mat;
+    tmp_training_image_data.target_point_x_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_x();
+    tmp_training_image_data.target_point_y_ =
+        sift_training_data_->images()->Get(train_image_index)->target_point_y();
 
-    training_data_.images_.push_back(training_image_data);
+    training_data_.images_.push_back(tmp_training_image_data);
     train_image_index++;
   }
 }
@@ -224,13 +240,26 @@
       << camera_params.training_data_.images_[0].field_to_target_ << "\nvs.\n"
       << field_to_targets_0;
 
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_x_, 10.);
+  ASSERT_EQ(camera_params.training_data_.images_[0].target_point_y_, 20.);
+
   float intrinsic_mat[9] = {810, 0, 320, 0, 810, 240, 0, 0, 1};
   cv::Mat intrinsic = cv::Mat(3, 3, CV_32F, intrinsic_mat);
-  cv::Mat intrinsic_diff = (intrinsic != camera_params.camera_intrinsics_);
+  cv::Mat intrinsic_diff =
+      (intrinsic != camera_params.camera_intrinsic_matrix_);
   bool intrinsic_eq = (cv::countNonZero(intrinsic_diff) == 0);
   ASSERT_TRUE(intrinsic_eq)
-      << "Mismatch on intrinsics: " << intrinsic << "\nvs.\n"
-      << camera_params.camera_intrinsics_;
+      << "Mismatch on camera intrinsic matrix: " << intrinsic << "\nvs.\n"
+      << camera_params.camera_intrinsic_matrix_;
+
+  float dist_coeff_mat[5] = {0., 0., 0., 0., 0.};
+  cv::Mat dist_coeff = cv::Mat(5, 1, CV_32F, dist_coeff_mat);
+  cv::Mat dist_coeff_diff = (dist_coeff != camera_params.camera_dist_coeffs_);
+  bool dist_coeff_eq = (cv::countNonZero(dist_coeff_diff) == 0);
+  ASSERT_TRUE(dist_coeff_eq)
+      << "Mismatch on camera distortion coefficients: " << dist_coeff
+      << "\nvs.\n"
+      << camera_params.camera_dist_coeffs_;
 
   float i_f = 3.0;
   float extrinsic_mat[16] = {0, 0,  1, i_f, -1, 0, 0, i_f,
@@ -246,4 +275,3 @@
 }  // namespace
 }  // namespace vision
 }  // namespace frc971
-
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 65f3342..651efe2 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -107,22 +107,28 @@
             fbb)
 
         # Create the TrainingImage feature vector
-        TrainingImage.TrainingImageStartFeaturesVector(fbb,
-                                                       len(features_vector))
+        TrainingImage.TrainingImageStartFeaturesVector(
+            fbb, len(features_vector))
         for feature in reversed(features_vector):
             fbb.PrependUOffsetTRelative(feature)
         features_vector_offset = fbb.EndVector(len(features_vector))
 
-        # Create the TrainingImage
+        # Add the TrainingImage data
         TrainingImage.TrainingImageStart(fbb)
-        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
-        TrainingImage.TrainingImageAddFieldToTarget(fbb,
-                                                    transformation_mat_offset)
-
-        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+        TrainingImage.TrainingImageAddFeatures(fbb,
+                                                       features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(
+            fbb, transformation_mat_offset)
+        TrainingImage.TrainingImageAddTargetPointX(
+            fbb, target_data.target_point_2d[0][0][0])
+        TrainingImage.TrainingImageAddTargetPointY(
+            fbb, target_data.target_point_2d[0][0][1])
+        images_vector.append(
+            TrainingImage.TrainingImageEnd(fbb))
 
     # Create and add Training Data of all targets
-    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+    TrainingData.TrainingDataStartImagesVector(fbb,
+                                                     len(images_vector))
     for training_image in reversed(images_vector):
         fbb.PrependUOffsetTRelative(training_image)
     images_vector_table = fbb.EndVector(len(images_vector))
@@ -155,6 +161,14 @@
             fbb.PrependFloat32(n)
         intrinsics_vector = fbb.EndVector(len(camera_int_list))
 
+        dist_coeff_list = camera_calib.camera_int.distortion_coeffs.ravel(
+        ).tolist()
+        CameraCalibration.CameraCalibrationStartDistCoeffsVector(
+            fbb, len(dist_coeff_list))
+        for n in reversed(dist_coeff_list):
+            fbb.PrependFloat32(n)
+        dist_coeff_vector = fbb.EndVector(len(dist_coeff_list))
+
         node_name_offset = fbb.CreateString(camera_calib.node_name)
         CameraCalibration.CameraCalibrationStart(fbb)
         CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
@@ -162,6 +176,8 @@
             fbb, camera_calib.team_number)
         CameraCalibration.CameraCalibrationAddIntrinsics(
             fbb, intrinsics_vector)
+        CameraCalibration.CameraCalibrationAddDistCoeffs(
+            fbb, dist_coeff_vector)
         CameraCalibration.CameraCalibrationAddFixedExtrinsics(
             fbb, fixed_extrinsics_vector)
         camera_calibration_vector.append(
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 1727dff..4c3f36b 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -181,7 +181,7 @@
     # These are manually captured by examining the images,
     # and entering the pixel values from the target center for each image.
     # These are currently only used for visualization of the target
-    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
+    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2)  # train_power_port_red.png
 
     # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
@@ -222,7 +222,7 @@
     ideal_loading_bay_red.target_position = np.array([field_length/2.,
                                                      loading_bay_edge_y + loading_bay_width/2.,
                                                       loading_bay_height/2.])
-    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
+    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
     training_target_loading_bay_red = TargetData(
@@ -285,7 +285,7 @@
     ideal_power_port_blue.target_position = np.array([field_length/2.,
                                                      -power_port_edge_y - power_port_width/2.,
                                                       power_port_target_height])
-    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2),  # train_power_port_blue.png
+    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2)  # train_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
     training_target_power_port_blue = TargetData(
@@ -325,7 +325,7 @@
     ideal_loading_bay_blue.target_position = np.array([-field_length/2.,
                                                      -loading_bay_edge_y - loading_bay_width/2.,
                                                        loading_bay_height/2.])
-    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
+    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
     training_target_loading_bay_blue = TargetData(
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
index 5432766..18df1e9 100644
--- a/y2020/vision/tools/python_code/target_definition_test.py
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -25,5 +25,6 @@
 
     target_data_test_1.target_rotation = np.identity(3, np.double)
     target_data_test_1.target_position = np.array([0., 1., 2.])
+    target_data_test_1.target_point_2d = np.array([10., 20.]).reshape(-1, 1, 2)
     target_data_list.append(target_data_test_1)
     return target_data_list