Finishing out write of camera_definition and target_definition to flatbuffer

Includes a lot more target_definition and ability to run commandline

Adds test of camera and target definition

Use deepcopy to make sure each calibration is independent of the other

Adding dependencies on python files

Changed argparser style of handling args

Change-Id: I132e1ffd0ef9de710ae03916de21442ed1cb2cc8
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index fb7b113..ded680d 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -23,6 +23,7 @@
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/base",
     ],
+    visibility = ["//y2020:__subpackages__"],
 )
 
 cc_binary(
@@ -47,6 +48,7 @@
         "//y2020/vision/sift:sift_fbs",
         "//y2020/vision/sift:sift_training_fbs",
     ],
+    visibility = ["//y2020:__subpackages__"],
 )
 
 flatbuffer_ts_library(
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index 06e4077..ca41905 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -128,7 +128,7 @@
 void CameraReader::CopyTrainingFeatures() {
   for (const sift::TrainingImage *training_image : *training_data_->images()) {
     cv::Mat features(training_image->features()->size(), 128, CV_32F);
-    for (size_t i = 0; i <  training_image->features()->size(); ++i) {
+    for (size_t i = 0; i < training_image->features()->size(); ++i) {
       const sift::Feature *feature_table = training_image->features()->Get(i);
 
       // We don't need this information right now, but make sure it's here to
@@ -157,8 +157,7 @@
   // fused into the beginning of the SIFT algorithm because the algorithm needs
   // to look at the base image directly. It also only takes 2ms on our images.
   // This is converting from YUYV to a grayscale image.
-  cv::Mat image_mat(
-      image.rows(), image.cols(), CV_8U);
+  cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
   CHECK(image_mat.isContinuous());
   const int number_pixels = image.rows() * image.cols();
   for (int i = 0; i < number_pixels; ++i) {
@@ -228,17 +227,17 @@
 
     sift::CameraPose::Builder pose_builder(*builder.fbb());
     {
-    CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
-    CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
-    cv::Mat camera_target = cv::Mat::zeros(4, 4, CV_32F);
-    R_camera_target.copyTo(camera_target(cv::Range(0, 3), cv::Range(0, 3)));
-    T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
-    camera_target.at<float>(3, 3) = 1;
-    CHECK(camera_target.isContinuous());
-    const auto data_offset = builder.fbb()->CreateVector<float>(
-        reinterpret_cast<float *>(camera_target.data), camera_target.total());
-    pose_builder.add_camera_to_target(
-        sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
+      CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
+      CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
+      cv::Mat camera_target = cv::Mat::zeros(4, 4, CV_32F);
+      R_camera_target.copyTo(camera_target(cv::Range(0, 3), cv::Range(0, 3)));
+      T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
+      camera_target.at<float>(3, 3) = 1;
+      CHECK(camera_target.isContinuous());
+      const auto data_offset = builder.fbb()->CreateVector<float>(
+          reinterpret_cast<float *>(camera_target.data), camera_target.total());
+      pose_builder.add_camera_to_target(
+          sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
     }
     pose_builder.add_field_to_target(
         aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
@@ -355,7 +354,8 @@
 
   aos::ShmEventLoop event_loop(&config.message());
   V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
-  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader, &matcher);
+  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader,
+                             &matcher);
 
   event_loop.Run();
 }
@@ -364,7 +364,6 @@
 }  // namespace vision
 }  // namespace frc971
 
-
 int main(int argc, char **argv) {
   aos::InitGoogle(&argc, &argv);
   frc971::vision::CameraReaderMain();
diff --git a/y2020/vision/sift/BUILD b/y2020/vision/sift/BUILD
index e2b00f8..d4a6f2a 100644
--- a/y2020/vision/sift/BUILD
+++ b/y2020/vision/sift/BUILD
@@ -28,6 +28,7 @@
         # TODO(Brian): Replace this with something more fine-grained from the
         # configuration fragment or something.
         "//tools/cpp:toolchain",
+        "@amd64_debian_sysroot//:sysroot_files",
     ],
     default_python_version = "PY3",
     main = "fast_gaussian_runner.py",
@@ -195,10 +196,12 @@
     ],
     namespace = "frc971.vision.sift",
     tables = [
+        "KeypointFieldLocation",
         "Feature",
         "Match",
         "ImageMatch",
         "TransformationMatrix",
+        "CameraCalibration",
         "CameraPose",
         "ImageMatchResult",
         "TrainingImage",
diff --git a/y2020/vision/sift/fast_gaussian_runner.py b/y2020/vision/sift/fast_gaussian_runner.py
index 9699fef..673c378 100755
--- a/y2020/vision/sift/fast_gaussian_runner.py
+++ b/y2020/vision/sift/fast_gaussian_runner.py
@@ -22,12 +22,13 @@
 
   commands = []
 
+  amd64_debian_sysroot = r.Rlocation('amd64_debian_sysroot/usr/lib/x86_64-linux-gnu/libc.so.6').rsplit('/', 4)[0]
   env = os.environ.copy()
   env['LD_LIBRARY_PATH'] = ':'.join([
-      'debian_amd64_sysroot/lib/x86_64-linux-gnu',
-      'debian_amd64_sysroot/lib',
-      'debian_amd64_sysroot/usr/lib/x86_64-linux-gnu',
-      'debian_amd64_sysroot/usr/lib',
+      amd64_debian_sysroot + '/lib/x86_64-linux-gnu',
+      amd64_debian_sysroot + '/lib',
+      amd64_debian_sysroot + '/usr/lib/x86_64-linux-gnu',
+      amd64_debian_sysroot + '/usr/lib',
   ])
 
   all_header = [
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index 670b664..3d7bedb 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -2,27 +2,29 @@
 
 py_binary(
     name = "load_sift_training",
-    data = [
-        ":test_images/train_power_port_red.png",
-        ":test_images/train_power_port_red_webcam.png",
-        ":test_images/train_power_port_blue.png",
-        ":test_images/train_loading_bay_red.png",
-        ":test_images/train_loading_bay_blue.png",
-    ],
-    srcs = ["load_sift_training.py",
+    srcs = [
         "camera_definition.py",
         "define_training_data.py",
+        "load_sift_training.py",
         "target_definition.py",
         "train_and_match.py",
     ],
-    args = ["sift_training_data.h",
+    args = [
+        "sift_training_data.h",
+    ],
+    data = [
+        ":test_images/train_loading_bay_blue.png",
+        ":test_images/train_loading_bay_red.png",
+        ":test_images/train_power_port_blue.png",
+        ":test_images/train_power_port_red.png",
+        ":test_images/train_power_port_red_webcam.png",
     ],
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
         "//y2020/vision/sift:sift_fbs_python",
-        "@opencv_contrib_nonfree_amd64//:python_opencv",
         "@bazel_tools//tools/python/runfiles",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
     ],
 )
 
@@ -41,9 +43,76 @@
 )
 
 cc_library(
-    name = "sift_training",
+    name = "sift_training_data",
     hdrs = [
         "sift_training_data.h",
     ],
     visibility = ["//visibility:public"],
 )
+
+py_binary(
+    name = "load_sift_training_test",
+    srcs = [
+        "camera_definition.py",
+        "define_training_data.py",
+        "load_sift_training.py",
+        "target_definition.py",
+        "train_and_match.py",
+    ],
+    args = [
+        "sift_training_data_test.h",
+        "test",
+    ],
+    data = [
+        ":test_images/train_power_port_red.png",
+    ],
+    default_python_version = "PY3",
+    main = "load_sift_training.py",
+    srcs_version = "PY2AND3",
+    deps = [
+        "//y2020/vision/sift:sift_fbs_python",
+        "@bazel_tools//tools/python/runfiles",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+    ],
+)
+
+genrule(
+    name = "run_load_sift_training_test",
+    outs = [
+        "sift_training_data_test.h",
+    ],
+    cmd = " ".join([
+        "$(location :load_sift_training_test)",
+        "$(location sift_training_data_test.h) test",
+    ]),
+    tools = [
+        ":load_sift_training_test",
+    ],
+)
+
+cc_library(
+    name = "sift_training_data_test",
+    hdrs = [
+        "sift_training_data_test.h",
+    ],
+    visibility = ["//visibility:public"],
+)
+
+cc_test(
+    name = "camera_param_test",
+    srcs = [
+        "camera_param_test.cc",
+    ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
+    deps = [
+        ":sift_training_data_test",
+        "//aos/testing:googletest",
+        "//third_party:opencv",
+        "//y2020/vision:vision_fbs",
+        "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
+    ],
+)
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 59f008f..ce25ea5 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -1,9 +1,6 @@
-import argparse
-import cv2
-import json
+import copy
 import math
 import numpy as np
-import time
 
 
 class CameraIntrinsics:
@@ -18,14 +15,14 @@
         self.R = []
         self.T = []
 
-    pass
 
 class CameraParameters:
     def __init__(self):
         self.camera_int = CameraIntrinsics()
         self.camera_ext = CameraExtrinsics()
+        self.node_name = ""
+        self.team_number = -1
 
-    pass
 
 
 ### CAMERA DEFINITIONS
@@ -50,8 +47,17 @@
 web_cam_ext.R = np.array([[0., 0., 1.],
                           [-1, 0,  0],
                           [0, -1., 0]])
-web_cam_ext.T = np.array([[0., 0., 0.]]).T
+web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
 web_cam_params.camera_int = web_cam_int
 web_cam_params.camera_ext = web_cam_ext
+
+camera_list = []
+
+for team_number in (971, 8971, 9971):
+    for node_name in ("pi-0", "pi-1", "pi-2", "pi-3", "pi-4"):
+        camera_base = copy.deepcopy(web_cam_params)
+        camera_base.node_name = node_name
+        camera_base.team_number = team_number
+        camera_list.append(camera_base)
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
new file mode 100644
index 0000000..f8e17f8
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -0,0 +1,64 @@
+import copy
+import math
+import numpy as np
+
+
+class CameraIntrinsics:
+    def __init__(self):
+        self.camera_matrix = []
+        self.distortion_coeffs = []
+
+
+class CameraExtrinsics:
+    def __init__(self):
+        self.R = []
+        self.T = []
+
+
+class CameraParameters:
+    def __init__(self):
+        self.camera_int = CameraIntrinsics()
+        self.camera_ext = CameraExtrinsics()
+        self.node_name = ""
+        self.team_number = -1
+
+
+
+### CAMERA DEFINITIONS
+
+# Robot camera has:
+# FOV_H = 93.*math.pi()/180.
+# FOV_V = 70.*math.pi()/180.
+
+# Create fake camera (based on USB webcam params)
+fx = 810.
+fy = 810.
+cx = 320.
+cy = 240.
+
+# Define a web_cam
+web_cam_int = CameraIntrinsics()
+web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
+web_cam_int.distortion_coeffs = np.zeros((5,1))
+
+web_cam_ext = CameraExtrinsics()
+# Camera rotation from robot x,y,z to opencv (z, -x, -y)
+web_cam_ext.R = np.array([[0., 0., 1.],
+                          [-1, 0,  0],
+                          [0, -1., 0]])
+web_cam_ext.T = np.array([0., 0., 0.])
+
+web_cam_params = CameraParameters()
+web_cam_params.camera_int = web_cam_int
+web_cam_params.camera_ext = web_cam_ext
+
+camera_list = []
+
+for team_number in (971, 8971, 9971): 
+    for (i, node_name) in enumerate(("pi-1", "pi-2", "pi-3", "pi-4", "pi-5")):
+        camera_base = copy.deepcopy(web_cam_params)
+        camera_base.node_name = node_name
+        camera_base.team_number = team_number
+        camera_base.camera_ext.T = np.asarray(np.float32([i+1, i+1, i+1]))
+        camera_list.append(camera_base)
+
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
new file mode 100644
index 0000000..3feaa99
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -0,0 +1,249 @@
+#include <unistd.h>
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#if 1
+#include "y2020/vision/tools/python_code/sift_training_data_test.h"
+#else
+// Using this include (and changing BUILD file) allows to test on real data
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+#endif
+
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/vision_generated.h"
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+class Feature {
+ public:
+  std::vector<float> descriptor_;
+  /*	float x_;
+  float y_;
+  float size_;
+  float angle_;
+  float response_;
+  int octave_;
+  cv::Point3f keypoint_field_location_;*/
+};
+
+class CameraCalibration {
+ public:
+  cv::Mat intrinsics_;
+  cv::Mat fixed_extrinsics_;
+};
+
+class TrainingImage {
+ public:
+  cv::Mat features_;
+  cv::Mat field_to_target_;
+};
+
+class TrainingData {
+ public:
+  std::vector<TrainingImage> images_;
+  CameraCalibration camera_calibration_;
+};
+
+class CameraParamTest {
+ public:
+  CameraParamTest() {
+    const auto training_data_bfbs = SiftTrainingData();
+    sift_training_data_ =
+        flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
+    {
+      flatbuffers::Verifier verifier(
+          reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
+          training_data_bfbs.size());
+
+      CHECK(sift_training_data_->Verify(verifier));
+    }
+
+    CopyTrainingFeatures();
+    sift_camera_calibration_ = CameraParamTest::FindCameraCalibration();
+    camera_intrinsics_ = CameraIntrinsics();
+    camera_extrinsics_ = CameraExtrinsics();
+  }
+
+  // Copies the information from sift_training_data_ into matcher_.
+  void CopyTrainingFeatures();
+
+  const sift::CameraCalibration *FindCameraCalibration() const;
+
+  // Returns the 2D image location for the specified training feature.
+  cv::Point2f Training2dPoint(int training_image_index, int feature_index);
+  // Returns the 3D location for the specified training feature.
+  cv::Point3f Training3dPoint(int training_image_index, int feature_index);
+
+  const sift::TransformationMatrix *FieldToTarget(int training_image_index) {
+    return sift_training_data_->images()
+        ->Get(training_image_index)
+        ->field_to_target();
+  }
+
+  cv::Mat CameraIntrinsics() const {
+    const cv::Mat result(3, 3, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->intrinsics()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->intrinsics()->size());
+    return result;
+  }
+
+  cv::Mat CameraExtrinsics() const {
+    const cv::Mat result(
+        4, 4, CV_32F,
+        const_cast<void *>(static_cast<const void *>(
+            sift_camera_calibration_->fixed_extrinsics()->data()->data())));
+    return result;
+  }
+
+  int number_training_images() const {
+    return sift_training_data_->images()->size();
+  }
+
+  const std::string node_name_ = "pi-3";
+  const int team_number_ = 971;
+
+  // We'll just extract the one that matches our current module
+  const sift::CameraCalibration *sift_camera_calibration_;
+  cv::Mat camera_intrinsics_;
+  cv::Mat camera_extrinsics_;
+
+  TrainingData training_data_;
+  const sift::TrainingData *sift_training_data_;
+  std::vector<sift::TransformationMatrix *> field_to_targets_;
+  std::vector<cv::Point2f> point_list_2d_;
+  std::vector<cv::Point3f> point_list_3d_;
+};
+
+// Copy in the keypoint descriptors, the 2d (image) location,
+// and the 3d (field) location
+void CameraParamTest::CopyTrainingFeatures() {
+  int train_image_index = 0;
+  for (const sift::TrainingImage *training_image :
+       *sift_training_data_->images()) {
+    TrainingImage training_image_data;
+    cv::Mat features(training_image->features()->size(), 128, CV_32F);
+    for (size_t i = 0; i < training_image->features()->size(); ++i) {
+      const sift::Feature *feature_table = training_image->features()->Get(i);
+      const flatbuffers::Vector<float> *const descriptor =
+          feature_table->descriptor();
+      CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
+      cv::Mat(1, descriptor->size(), CV_32F,
+              const_cast<void *>(static_cast<const void *>(descriptor->data())))
+          .copyTo(features(cv::Range(i, i + 1), cv::Range(0, 128)));
+
+      cv::Point2f point_2d = Training2dPoint(train_image_index, i);
+      point_list_2d_.push_back(point_2d);
+      cv::Point3f point_3d = Training3dPoint(train_image_index, i);
+      point_list_3d_.push_back(point_3d);
+    }
+    const sift::TransformationMatrix *field_to_target_ =
+        FieldToTarget(train_image_index);
+    const cv::Mat field_to_target_mat(
+        4, 4, CV_32F,
+        const_cast<void *>(
+            static_cast<const void *>(field_to_target_->data()->data())));
+    training_image_data.features_ = features;
+    training_image_data.field_to_target_ = field_to_target_mat;
+
+    training_data_.images_.push_back(training_image_data);
+    train_image_index++;
+  }
+}
+
+const sift::CameraCalibration *CameraParamTest::FindCameraCalibration() const {
+  for (const sift::CameraCalibration *candidate :
+       *sift_training_data_->camera_calibrations()) {
+    if (candidate->node_name()->string_view() != node_name_) {
+      continue;
+    }
+    if (candidate->team_number() != team_number_) {
+      continue;
+    }
+    return candidate;
+  }
+  LOG(INFO) << ": Failed to find camera calibration for " << node_name_
+            << " on " << team_number_;
+  return NULL;
+}
+
+// Returns the 2D image location for the specified training feature.
+cv::Point2f CameraParamTest::Training2dPoint(int training_image_index,
+                                             int feature_index) {
+  const float x = sift_training_data_->images()
+                      ->Get(training_image_index)
+                      ->features()
+                      ->Get(feature_index)
+                      ->x();
+  const float y = sift_training_data_->images()
+                      ->Get(training_image_index)
+                      ->features()
+                      ->Get(feature_index)
+                      ->y();
+  return cv::Point2f(x, y);
+}
+
+// Returns the 3D location for the specified training feature.
+cv::Point3f CameraParamTest::Training3dPoint(int training_image_index,
+                                             int feature_index) {
+  const sift::KeypointFieldLocation *const location =
+      sift_training_data_->images()
+          ->Get(training_image_index)
+          ->features()
+          ->Get(feature_index)
+          ->field_location();
+  return cv::Point3f(location->x(), location->y(), location->z());
+}
+
+TEST(CameraParamTest, TargetDataTest) {
+  CameraParamTest camera_params;
+
+  ASSERT_EQ(camera_params.number_training_images(), 1);
+  ASSERT_EQ(camera_params.point_list_2d_.size(), 676);
+  ASSERT_EQ(camera_params.point_list_2d_[0].x, 0);
+  ASSERT_EQ(camera_params.point_list_2d_[0].y, 1);
+  ASSERT_EQ(camera_params.point_list_3d_.size(), 676);
+  ASSERT_EQ(camera_params.point_list_3d_[0].x, 0);
+  ASSERT_EQ(camera_params.point_list_3d_[1].y, 1);
+  ASSERT_EQ(camera_params.point_list_3d_[2].z, 2);
+
+  float ftt_mat[16] = {1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 2, 0, 0, 0, 1};
+  cv::Mat field_to_targets_0 = cv::Mat(4, 4, CV_32F, ftt_mat);
+  cv::Mat ftt_diff =
+      (camera_params.training_data_.images_[0].field_to_target_ !=
+       field_to_targets_0);
+  bool ftt_eq = (cv::countNonZero(ftt_diff) == 0);
+  ASSERT_TRUE(ftt_eq)
+      << "Mismatch on field_to_target: "
+      << camera_params.training_data_.images_[0].field_to_target_ << "\nvs.\n"
+      << field_to_targets_0;
+
+  float intrinsic_mat[9] = {810, 0, 320, 0, 810, 240, 0, 0, 1};
+  cv::Mat intrinsic = cv::Mat(3, 3, CV_32F, intrinsic_mat);
+  cv::Mat intrinsic_diff = (intrinsic != camera_params.camera_intrinsics_);
+  bool intrinsic_eq = (cv::countNonZero(intrinsic_diff) == 0);
+  ASSERT_TRUE(intrinsic_eq)
+      << "Mismatch on intrinsics: " << intrinsic << "\nvs.\n"
+      << camera_params.camera_intrinsics_;
+
+  float i_f = 3.0;
+  float extrinsic_mat[16] = {0, 0,  1, i_f, -1, 0, 0, i_f,
+                             0, -1, 0, i_f, 0,  0, 0, 1};
+  cv::Mat extrinsic = cv::Mat(4, 4, CV_32F, extrinsic_mat);
+  cv::Mat extrinsic_diff = (extrinsic != camera_params.camera_extrinsics_);
+  bool extrinsic_eq = (cv::countNonZero(extrinsic_diff) == 0);
+  ASSERT_TRUE(extrinsic_eq)
+      << "Mismatch of extrinsic: " << extrinsic << "\nvs.\n"
+      << camera_params.camera_extrinsics_;
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
+
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 9fa4acf..fba847d 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -1,100 +1,216 @@
 #!/usr/bin/python3
 
 import cv2
+import numpy as np
 import sys
 import flatbuffers
-import target_definition
 
-import frc971.vision.sift.TrainingImage as TrainingImage
-import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.CameraCalibration as CameraCalibration
 import frc971.vision.sift.Feature as Feature
+import frc971.vision.sift.KeypointFieldLocation as KeypointFieldLocation
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TransformationMatrix as TransformationMatrix
+
+
+# Takes a 3x3 rotation matrix and 3x1 translation vector, and outputs 12
+# element list, suitable for outputing to flatbuffer
+def rot_and_trans_to_list(R, T):
+    output_list = []
+    for row in range(3):
+        for col in range(3):
+            output_list.append(R[row][col])
+        output_list.append(T[row])
+
+    output_list = output_list + [0., 0., 0., 1.]
+    return output_list
+
 
 def main():
 
-  output_path = sys.argv[1]
-  print("Writing file to ", output_path)
+    target_data_list = None
+    camera_calib_list = None
 
-  target_data_list = target_definition.compute_target_definition()
+    output_path = sys.argv[1]
 
-  fbb = flatbuffers.Builder(0)
+    if (len(sys.argv) > 2):
+        if sys.argv[2] == "test":
+            print("Loading test data")
+            import camera_definition_test
+            import target_definition_test
+            target_data_list = target_definition_test.compute_target_definition(
+            )
+            camera_calib_list = camera_definition_test.camera_list
+        else:
+            print("Unhandled arguments: '%s'" % sys.argv[2])
+            quit()
+    else:
+        print("Loading target configuration data")
+        import camera_definition
+        import target_definition
+        target_data_list = target_definition.compute_target_definition()
+        camera_calib_list = camera_definition.camera_list
 
-  images_vector = []
+    print("Writing file to ", output_path)
 
-  for target_data in target_data_list:
+    fbb = flatbuffers.Builder(0)
 
-    features_vector = []
+    images_vector = []
 
-    for keypoint, keypoint_3d, descriptor in zip(target_data.keypoint_list,
-                                                 target_data.keypoint_list_3d,
-                                                 target_data.descriptor_list):
+    # Iterate overall the training targets
+    for target_data in target_data_list:
 
-      Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
-      for n in reversed(descriptor):
-        fbb.PrependFloat32(n)
-      descriptor_vector = fbb.EndVector(len(descriptor))
+        features_vector = []
 
-      Feature.FeatureStart(fbb)
+        # Iterate over all the keypoints
+        for keypoint, keypoint_3d, descriptor in zip(
+                target_data.keypoint_list, target_data.keypoint_list_3d,
+                target_data.descriptor_list):
 
-      Feature.FeatureAddDescriptor(fbb, descriptor_vector)
-      Feature.FeatureAddX(fbb, keypoint.pt[0])
-      Feature.FeatureAddY(fbb, keypoint.pt[1])
-      Feature.FeatureAddSize(fbb, keypoint.size)
-      Feature.FeatureAddAngle(fbb, keypoint.angle)
-      Feature.FeatureAddResponse(fbb, keypoint.response)
-      Feature.FeatureAddOctave(fbb, keypoint.octave)
+            # Build the Descriptor vector
+            Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+            for n in reversed(descriptor):
+                fbb.PrependFloat32(n)
+            descriptor_vector = fbb.EndVector(len(descriptor))
 
-      features_vector.append(Feature.FeatureEnd(fbb))
+            # Add all the components to the each Feature
+            Feature.FeatureStart(fbb)
+            Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+            Feature.FeatureAddX(fbb, keypoint.pt[0])
+            Feature.FeatureAddY(fbb, keypoint.pt[1])
+            Feature.FeatureAddSize(fbb, keypoint.size)
+            Feature.FeatureAddAngle(fbb, keypoint.angle)
+            Feature.FeatureAddResponse(fbb, keypoint.response)
+            Feature.FeatureAddOctave(fbb, keypoint.octave)
 
-      ## TODO: Write 3d vector here
+            keypoint_3d_location = KeypointFieldLocation.CreateKeypointFieldLocation(
+                fbb, keypoint_3d[0][0], keypoint_3d[0][1], keypoint_3d[0][2])
 
-    TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
-    for feature in reversed(features_vector):
-      fbb.PrependUOffsetTRelative(feature)
-    features_vector_table = fbb.EndVector(len(features_vector))
+            Feature.FeatureAddFieldLocation(fbb, keypoint_3d_location)
 
-    TrainingImage.TrainingImageStart(fbb)
-    TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
-    # TODO(Brian): Fill out the transformation matrices.
-    images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+            features_vector.append(Feature.FeatureEnd(fbb))
 
-  TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
-  for training_image in reversed(images_vector):
-    fbb.PrependUOffsetTRelative(training_image)
-  images_vector_table = fbb.EndVector(len(images_vector))
+        # Create the field_to_target TransformationMatrix
+        field_to_target_list = rot_and_trans_to_list(
+            target_data.target_rotation, target_data.target_position)
+        TransformationMatrix.TransformationMatrixStartDataVector(
+            fbb, len(field_to_target_list))
+        for n in reversed(field_to_target_list):
+            fbb.PrependFloat32(n)
+        field_to_target_offset = fbb.EndVector(len(field_to_target_list))
 
-  TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images_vector_table)
-  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+        TransformationMatrix.TransformationMatrixStart(fbb)
+        TransformationMatrix.TransformationMatrixAddData(
+            fbb, field_to_target_offset)
+        transformation_mat_offset = TransformationMatrix.TransformationMatrixEnd(
+            fbb)
 
-  bfbs = fbb.Output()
+        # Create the TrainingImage feature vector
+        TrainingImage.TrainingImageStartFeaturesVector(fbb,
+                                                       len(features_vector))
+        for feature in reversed(features_vector):
+            fbb.PrependUOffsetTRelative(feature)
+        features_vector_offset = fbb.EndVector(len(features_vector))
 
-  output_prefix = [
-      b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-      b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-      b'#include <string_view>',
-      b'namespace frc971 {',
-      b'namespace vision {',
-      b'inline std::string_view SiftTrainingData() {',
-  ]
-  output_suffix = [
-      b'  return std::string_view(kData, sizeof(kData));',
-      b'}',
-      b'}  // namespace vision',
-      b'}  // namespace frc971',
-      b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-  ]
+        # Create the TrainingImage
+        TrainingImage.TrainingImageStart(fbb)
+        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(fbb,
+                                                    transformation_mat_offset)
 
-  with open(output_path, 'wb') as output:
-    for line in output_prefix:
-      output.write(line)
-      output.write(b'\n')
-    output.write(b'alignas(64) static constexpr char kData[] = "')
-    for byte in fbb.Output():
-      output.write(b'\\x' + (b'%x' % byte).zfill(2))
-    output.write(b'";\n')
-    for line in output_suffix:
-      output.write(line)
-      output.write(b'\n')
+        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+
+    # Create and add Training Data of all targets
+    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+    for training_image in reversed(images_vector):
+        fbb.PrependUOffsetTRelative(training_image)
+    images_vector_table = fbb.EndVector(len(images_vector))
+
+    # Create camera calibration data
+    camera_calibration_vector = []
+    for camera_calib in camera_calib_list:
+        fixed_extrinsics_list = rot_and_trans_to_list(
+            camera_calib.camera_ext.R, camera_calib.camera_ext.T)
+        TransformationMatrix.TransformationMatrixStartDataVector(
+            fbb, len(fixed_extrinsics_list))
+        for n in reversed(fixed_extrinsics_list):
+            fbb.PrependFloat32(n)
+        fixed_extrinsics_data_offset = fbb.EndVector(
+            len(fixed_extrinsics_list))
+
+        TransformationMatrix.TransformationMatrixStart(fbb)
+        TransformationMatrix.TransformationMatrixAddData(
+            fbb, fixed_extrinsics_data_offset)
+        fixed_extrinsics_vector = TransformationMatrix.TransformationMatrixEnd(
+            fbb)
+
+        # TODO: Need to add in distortion coefficients here
+        # For now, just send camera paramter matrix (fx, fy, cx, cy)
+        camera_int_list = camera_calib.camera_int.camera_matrix.ravel().tolist(
+        )
+        CameraCalibration.CameraCalibrationStartIntrinsicsVector(
+            fbb, len(camera_int_list))
+        for n in reversed(camera_int_list):
+            fbb.PrependFloat32(n)
+        intrinsics_vector = fbb.EndVector(len(camera_int_list))
+
+        node_name_offset = fbb.CreateString(camera_calib.node_name)
+        CameraCalibration.CameraCalibrationStart(fbb)
+        CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
+        CameraCalibration.CameraCalibrationAddTeamNumber(
+            fbb, camera_calib.team_number)
+        CameraCalibration.CameraCalibrationAddIntrinsics(
+            fbb, intrinsics_vector)
+        CameraCalibration.CameraCalibrationAddFixedExtrinsics(
+            fbb, fixed_extrinsics_vector)
+        camera_calibration_vector.append(
+            CameraCalibration.CameraCalibrationEnd(fbb))
+
+    TrainingData.TrainingDataStartCameraCalibrationsVector(
+        fbb, len(camera_calibration_vector))
+    for camera_calibration in reversed(camera_calibration_vector):
+        fbb.PrependUOffsetTRelative(camera_calibration)
+    camera_calibration_vector_table = fbb.EndVector(
+        len(camera_calibration_vector))
+
+    # Fill out TrainingData
+    TrainingData.TrainingDataStart(fbb)
+    TrainingData.TrainingDataAddImages(fbb, images_vector_table)
+    TrainingData.TrainingDataAddCameraCalibrations(
+        fbb, camera_calibration_vector_table)
+    fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+    bfbs = fbb.Output()
+
+    output_prefix = [
+        b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+        b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+        b'#include <string_view>',
+        b'namespace frc971 {',
+        b'namespace vision {',
+        b'inline std::string_view SiftTrainingData() {',
+    ]
+    output_suffix = [
+        b'  return std::string_view(kData, sizeof(kData));',
+        b'}',
+        b'}  // namespace vision',
+        b'}  // namespace frc971',
+        b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+    ]
+
+    # Write out the header file
+    with open(output_path, 'wb') as output:
+        for line in output_prefix:
+            output.write(line)
+            output.write(b'\n')
+        output.write(b'alignas(64) static constexpr char kData[] = "')
+        for byte in fbb.Output():
+            output.write(b'\\x' + (b'%x' % byte).zfill(2))
+        output.write(b'";\n')
+        for line in output_suffix:
+            output.write(line)
+            output.write(b'\n')
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 779eb0b..b467608 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -1,6 +1,5 @@
 import argparse
 import cv2
-import json
 import math
 import numpy as np
 
@@ -13,6 +12,7 @@
 USE_BAZEL = True
 VISUALIZE_KEYPOINTS = False
 
+
 def bazel_name_fix(filename):
     ret_name = filename
     if USE_BAZEL:
@@ -20,6 +20,7 @@
 
     return ret_name
 
+
 class TargetData:
     def __init__(self, filename):
         self.image_filename = filename
@@ -27,7 +28,8 @@
         if USE_BAZEL:
             from bazel_tools.tools.python.runfiles import runfiles
             r = runfiles.Create()
-            self.image_filename = r.Rlocation(bazel_name_fix(self.image_filename))
+            self.image_filename = r.Rlocation(
+                bazel_name_fix(self.image_filename))
         self.image = tam.load_images([self.image_filename])[0]
         self.polygon_list = []
         self.polygon_list_3d = []
@@ -35,8 +37,13 @@
         self.keypoint_list = []
         self.keypoint_list_3d = None  # numpy array of 3D points
         self.descriptor_list = []
+        self.target_rotation = None
+        self.target_position = None
 
-    def extract_features(self, feature_extractor):
+    def extract_features(self, feature_extractor=None):
+        if feature_extractor is None:
+            feature_extractor = tam.load_feature_extractor()
+
         kp_lists, desc_lists = tam.detect_and_compute(feature_extractor,
                                                       [self.image])
         self.keypoint_list = kp_lists[0]
@@ -73,6 +80,7 @@
 
         return point_list_3d
 
+
 def compute_target_definition():
     ############################################################
     # TARGET DEFINITIONS
@@ -94,7 +102,8 @@
     loading_bay_height = 0.94
 
     # Pick the target center location at halfway between top and bottom of the top panel
-    target_center_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
+    target_center_height = (
+        power_port_total_height + power_port_bottom_wing_height) / 2.
 
     # TODO: Still need to figure out what this angle actually is
     wing_angle = 20. * math.pi / 180.
@@ -109,17 +118,18 @@
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
     power_port_red_main_panel_polygon_points_2d = [(451, 679), (451, 304),
-                                                   (100, 302), (451, 74),
-                                                   (689, 74), (689, 302),
-                                                   (689, 679)]
+                                                   (100, 302), (451, 74), (689,
+                                                                           74),
+                                                   (689, 302), (689, 679)]
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_main_panel_polygon_points_3d = [
-        (field_length, -power_port_center_y + power_port_width / 2., 0.),
         (field_length, -power_port_center_y + power_port_width / 2.,
+         0.), (field_length, -power_port_center_y + power_port_width / 2.,
+               power_port_bottom_wing_height),
+        (field_length,
+         -power_port_center_y + power_port_width / 2. + power_port_wing_width,
          power_port_bottom_wing_height),
-        (field_length, -power_port_center_y + power_port_width / 2.
-         + power_port_wing_width, power_port_bottom_wing_height),
         (field_length, -power_port_center_y + power_port_width / 2.,
          power_port_total_height),
         (field_length, -power_port_center_y - power_port_width / 2.,
@@ -136,24 +146,41 @@
         (field_length, -power_port_center_y - power_port_width / 2.,
          power_port_total_height),
         (field_length - power_port_wing_width * math.sin(wing_angle),
-         -power_port_center_y - power_port_width / 2.
-         - power_port_wing_width * math.cos(wing_angle),
+         -power_port_center_y - power_port_width / 2. -
+         power_port_wing_width * math.cos(wing_angle),
          power_port_bottom_wing_height),
         (field_length, -power_port_center_y - power_port_width / 2.,
          power_port_bottom_wing_height)
     ]
 
     # Populate the red power port
-    ideal_power_port_red.polygon_list.append(power_port_red_main_panel_polygon_points_2d)
-    ideal_power_port_red.polygon_list_3d.append(power_port_red_main_panel_polygon_points_3d)
+    ideal_power_port_red.polygon_list.append(
+        power_port_red_main_panel_polygon_points_2d)
+    ideal_power_port_red.polygon_list_3d.append(
+        power_port_red_main_panel_polygon_points_3d)
 
-    ideal_power_port_red.polygon_list.append(power_port_red_wing_panel_polygon_points_2d)
-    ideal_power_port_red.polygon_list_3d.append(power_port_red_wing_panel_polygon_points_3d)
+    ideal_power_port_red.polygon_list.append(
+        power_port_red_wing_panel_polygon_points_2d)
+    ideal_power_port_red.polygon_list_3d.append(
+        power_port_red_wing_panel_polygon_points_3d)
+
+    # Define the pose of the target
+    # Location is on the ground, at the center of the target
+    # Orientation is with "x" pointing out of the field, and "z" up
+    # This way, if robot is facing target directly, the x-axes are aligned
+    # and the skew to target is zero
+    ideal_power_port_red.target_rotation = np.identity(3, np.double)
+    ideal_power_port_red.target_position = np.array(
+        [field_length, -power_port_center_y, 0.])
 
     # Add the ideal 3D target to our list
     ideal_target_list.append(ideal_power_port_red)
     # And add the training image we'll actually use to the training list
-    training_target_list.append(TargetData('test_images/train_power_port_red_webcam.png'))
+    training_target_power_port_red = TargetData(
+        'test_images/train_power_port_red_webcam.png')
+    training_target_power_port_red.target_rotation = ideal_power_port_red.target_rotation
+    training_target_power_port_red.target_position = ideal_power_port_red.target_position
+    training_target_list.append(training_target_power_port_red)
 
     ###
     ### Red Loading Bay
@@ -163,21 +190,33 @@
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
-    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651, 406)]
+    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651,
+                                                                          406)]
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_red_polygon_points_3d = [
         (field_length, loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length, loading_bay_edge_y, loading_bay_height),
-        (field_length, loading_bay_edge_y, 0.)
+        (field_length, loading_bay_edge_y + loading_bay_width,
+         loading_bay_height), (field_length, loading_bay_edge_y,
+                               loading_bay_height), (field_length,
+                                                     loading_bay_edge_y, 0.)
     ]
 
-    ideal_loading_bay_red.polygon_list.append(loading_bay_red_polygon_points_2d)
-    ideal_loading_bay_red.polygon_list_3d.append(loading_bay_red_polygon_points_3d)
+    ideal_loading_bay_red.polygon_list.append(
+        loading_bay_red_polygon_points_2d)
+    ideal_loading_bay_red.polygon_list_3d.append(
+        loading_bay_red_polygon_points_3d)
+    # Location of target
+    ideal_loading_bay_red.target_rotation = np.identity(3, np.double)
+    ideal_loading_bay_red.target_position = np.array(
+        [field_length, -loading_bay_edge_y - loading_bay_width / 2., 0.])
 
     ideal_target_list.append(ideal_loading_bay_red)
-    training_target_list.append(TargetData('test_images/train_loading_bay_red.png'))
+    training_target_loading_bay_red = TargetData(
+        'test_images/train_loading_bay_red.png')
+    training_target_loading_bay_red.target_rotation = ideal_loading_bay_red.target_rotation
+    training_target_loading_bay_red.target_position = ideal_loading_bay_red.target_position
+    training_target_list.append(training_target_loading_bay_red)
 
     ###
     ### Blue Power Port
@@ -188,16 +227,17 @@
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
     power_port_blue_main_panel_polygon_points_2d = [(438, 693), (438, 285),
-                                                    (93, 285), (440, 50),
-                                                    (692, 50), (692, 285),
-                                                    (692, 693)]
+                                                    (93, 285), (440, 50), (692,
+                                                                           50),
+                                                    (692, 285), (692, 693)]
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_main_panel_polygon_points_3d = [
-        (0., power_port_center_y - power_port_width / 2., 0.),
         (0., power_port_center_y - power_port_width / 2.,
-         power_port_bottom_wing_height),
-        (0., power_port_center_y - power_port_width / 2. - power_port_wing_width,
+         0.), (0., power_port_center_y - power_port_width / 2.,
+               power_port_bottom_wing_height),
+        (0.,
+         power_port_center_y - power_port_width / 2. - power_port_wing_width,
          power_port_bottom_wing_height),
         (0., power_port_center_y - power_port_width / 2.,
          power_port_total_height),
@@ -214,47 +254,82 @@
     power_port_blue_wing_panel_polygon_points_3d = [
         (0., power_port_center_y + power_port_width / 2.,
          power_port_total_height),
-        (power_port_wing_width * math.sin(wing_angle),
-         power_port_center_y - power_port_width / 2. +
-         power_port_wing_width * math.cos(wing_angle),
+        (power_port_wing_width * math.sin(wing_angle), power_port_center_y -
+         power_port_width / 2. + power_port_wing_width * math.cos(wing_angle),
          power_port_bottom_wing_height),
         (0., power_port_center_y + power_port_width / 2.,
          power_port_bottom_wing_height)
     ]
 
     # Populate the blue power port
-    ideal_power_port_blue.polygon_list.append(power_port_blue_main_panel_polygon_points_2d)
-    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_main_panel_polygon_points_3d)
+    ideal_power_port_blue.polygon_list.append(
+        power_port_blue_main_panel_polygon_points_2d)
+    ideal_power_port_blue.polygon_list_3d.append(
+        power_port_blue_main_panel_polygon_points_3d)
 
-    ideal_power_port_blue.polygon_list.append(power_port_blue_wing_panel_polygon_points_2d)
-    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_wing_panel_polygon_points_3d)
+    ideal_power_port_blue.polygon_list.append(
+        power_port_blue_wing_panel_polygon_points_2d)
+    ideal_power_port_blue.polygon_list_3d.append(
+        power_port_blue_wing_panel_polygon_points_3d)
+
+    # Location of target.  Rotation is pointing in -x direction
+    ideal_power_port_blue.target_rotation = -np.identity(3, np.double)
+    ideal_power_port_blue.target_rotation[2][2] = 1.
+    ideal_power_port_blue.target_position = np.array(
+        [0., power_port_center_y, 0.])
 
     ideal_target_list.append(ideal_power_port_blue)
-    training_target_list.append(TargetData('test_images/train_power_port_blue.png'))
+    training_target_power_port_blue = TargetData(
+        'test_images/train_power_port_blue.png')
+    training_target_power_port_blue.target_rotation = ideal_power_port_blue.target_rotation
+    training_target_power_port_blue.target_position = ideal_power_port_blue.target_position
+    training_target_list.append(training_target_power_port_blue)
 
     ###
     ### Blue Loading Bay
     ###
 
-    ideal_loading_bay_blue = TargetData('test_images/train_loading_bay_blue.png')
+    ideal_loading_bay_blue = TargetData(
+        'test_images/train_loading_bay_blue.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
-    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729, 434)]
+    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729,
+                                                                       434)]
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_blue_polygon_points_3d = [
+        (0., -loading_bay_edge_y - loading_bay_width, 0.),
+        (0., -loading_bay_edge_y - loading_bay_width,
+         loading_bay_height), (0., -loading_bay_edge_y,
+                               loading_bay_height), (0., -loading_bay_edge_y,
+                                                     0.)
+    ]
+    loading_bay_red_polygon_points_3d = [
         (field_length, loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length, loading_bay_edge_y, loading_bay_height),
-        (field_length, loading_bay_edge_y, 0.)
+        (field_length, loading_bay_edge_y + loading_bay_width,
+         loading_bay_height), (field_length, loading_bay_edge_y,
+                               loading_bay_height), (field_length,
+                                                     loading_bay_edge_y, 0.)
     ]
 
-    ideal_loading_bay_blue.polygon_list.append(loading_bay_blue_polygon_points_2d)
-    ideal_loading_bay_blue.polygon_list_3d.append(loading_bay_blue_polygon_points_3d)
+    ideal_loading_bay_blue.polygon_list.append(
+        loading_bay_blue_polygon_points_2d)
+    ideal_loading_bay_blue.polygon_list_3d.append(
+        loading_bay_blue_polygon_points_3d)
+
+    # Location of target
+    ideal_loading_bay_blue.target_rotation = -np.identity(3, np.double)
+    ideal_loading_bay_blue.target_rotation[2][2] = 1.
+    ideal_loading_bay_blue.target_position = np.array(
+        [0., loading_bay_edge_y + loading_bay_width / 2., 0.])
 
     ideal_target_list.append(ideal_loading_bay_blue)
-    training_target_list.append(TargetData('test_images/train_loading_bay_blue.png'))
+    training_target_loading_bay_blue = TargetData(
+        'test_images/train_loading_bay_blue.png')
+    training_target_loading_bay_blue.target_rotation = ideal_loading_bay_blue.target_rotation
+    training_target_loading_bay_blue.target_position = ideal_loading_bay_blue.target_position
+    training_target_list.append(training_target_loading_bay_blue)
 
     # Create feature extractor
     feature_extractor = tam.load_feature_extractor()
@@ -263,7 +338,9 @@
     camera_params = camera_definition.web_cam_params
 
     for ideal_target in ideal_target_list:
-        print("\nPreparing target for image %s" % ideal_target.image_filename)
+        if not USE_BAZEL:
+            print("\nPreparing target for image %s" %
+                  ideal_target.image_filename)
         ideal_target.extract_features(feature_extractor)
         ideal_target.filter_keypoints_by_polygons()
         ideal_target.compute_reprojection_maps()
@@ -272,16 +349,21 @@
 
         if VISUALIZE_KEYPOINTS:
             for i in range(len(ideal_target.polygon_list)):
-                ideal_pts_tmp = np.asarray(ideal_target.polygon_list[i]).reshape(
-                    -1, 2)
+                ideal_pts_tmp = np.asarray(
+                    ideal_target.polygon_list[i]).reshape(-1, 2)
                 ideal_pts_3d_tmp = np.asarray(
                     ideal_target.polygon_list_3d[i]).reshape(-1, 3)
                 # We can only compute pose if we have at least 4 points
                 # Only matters for reprojection for visualization
                 # Keeping this code here, since it's helpful when testing
                 if (len(ideal_target.polygon_list[i]) >= 4):
-                    img_copy = dtd.draw_polygon(ideal_target.image.copy(), ideal_target.polygon_list[i], (0,255,0), True)
-                    dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+                    img_copy = dtd.draw_polygon(ideal_target.image.copy(),
+                                                ideal_target.polygon_list[i],
+                                                (0, 255, 0), True)
+                    dtd.visualize_reprojections(
+                        img_copy, ideal_pts_tmp, ideal_pts_3d_tmp,
+                        camera_params.camera_int.camera_matrix,
+                        camera_params.camera_int.distortion_coeffs)
 
             for polygon in ideal_target.polygon_list:
                 img_copy = ideal_target.image.copy()
@@ -307,7 +389,9 @@
     AUTO_PROJECTION = True
 
     if AUTO_PROJECTION:
-        print("\n\nAuto projection of training keypoints to 3D using ideal images")
+        print(
+            "\n\nAuto projection of training keypoints to 3D using ideal images"
+        )
         # Match the captured training image against the "ideal" training image
         # and use those matches to pin down the 3D locations of the keypoints
 
@@ -316,30 +400,35 @@
             training_target = training_target_list[target_ind]
             ideal_target = ideal_target_list[target_ind]
 
-            print("\nPreparing target for image %s" % training_target.image_filename)
+            if not USE_BAZEL:
+                print("\nPreparing target for image %s" %
+                      training_target.image_filename)
             # Extract keypoints and descriptors for model
             training_target.extract_features(feature_extractor)
 
             # Create matcher that we'll use to match with ideal
             matcher = tam.train_matcher([training_target.descriptor_list])
 
-            matches_list = tam.compute_matches(matcher,
-                                               [training_target.descriptor_list],
-                                               [ideal_target.descriptor_list])
+            matches_list = tam.compute_matches(
+                matcher, [training_target.descriptor_list],
+                [ideal_target.descriptor_list])
 
             homography_list, matches_mask_list = tam.compute_homographies(
                 [training_target.keypoint_list], [ideal_target.keypoint_list],
                 matches_list)
 
             for polygon in ideal_target.polygon_list:
-                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
+                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(
+                    -1, 1, 2)
                 H_inv = np.linalg.inv(homography_list[0])
                 # We use the ideal target's polygons to define the polygons on
                 # the training target
-                transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
+                transformed_polygon = cv2.perspectiveTransform(
+                    ideal_pts_2d, H_inv)
                 training_target.polygon_list.append(transformed_polygon)
 
-            print("Started with %d keypoints" % len(training_target.keypoint_list))
+            print("Started with %d keypoints" % len(
+                training_target.keypoint_list))
 
             training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
                 training_target.keypoint_list, training_target.descriptor_list,
@@ -383,11 +472,11 @@
                 img_copy = training_target.image.copy()
                 for polygon in training_target.polygon_list:
                     pts = polygon.astype(int).reshape(-1, 2)
-                    img_copy = dtd.draw_polygon(img_copy, pts,
-                                             (255, 0, 0), True)
-                kp_tmp = np.asarray([
-                    (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
-                ]).reshape(-1, 2)
+                    img_copy = dtd.draw_polygon(img_copy, pts, (255, 0, 0),
+                                                True)
+                kp_tmp = np.asarray(
+                    [(kp.pt[0], kp.pt[1])
+                     for kp in training_target.keypoint_list]).reshape(-1, 2)
                 dtd.visualize_reprojections(
                     img_copy, kp_tmp, training_target.keypoint_list_3d,
                     camera_params.camera_int.camera_matrix,
@@ -396,19 +485,30 @@
     y2020_target_list = training_target_list
     return y2020_target_list
 
+
 if __name__ == '__main__':
     ap = argparse.ArgumentParser()
-    ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
-    ap.add_argument("--no_use_bazel", help="Don't run using Bazel", default=True, action='store_false')
-    args = vars(ap.parse_args())
+    ap.add_argument(
+        "-v",
+        "--visualize",
+        help="Whether to visualize the results",
+        default=False,
+        action='store_true')
+    ap.add_argument(
+        "-n",
+        "--no_bazel",
+        help="Don't run using Bazel",
+        default=True,
+        action='store_false')
 
-    VISUALIZE_KEYPOINTS = args["visualize"]
-    if args["visualize"]:
+    args = ap.parse_args()
+
+    if args.visualize:
         print("Visualizing results")
+        VISUALIZE_KEYPOINTS = True
 
-    USE_BAZEL = args["no_use_bazel"]
-    if args["no_use_bazel"]:
+    if not args.no_bazel:
         print("Running on command line (no Bazel)")
+        USE_BAZEL = False
 
     compute_target_definition()
-    pass
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
new file mode 100644
index 0000000..5432766
--- /dev/null
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -0,0 +1,29 @@
+import cv2
+import numpy as np
+
+import camera_definition_test
+import define_training_data as dtd
+import target_definition
+import train_and_match as tam
+
+
+def compute_target_definition():
+    target_data_list = []
+    target_data_test_1 = target_definition.TargetData(
+        'test_images/train_power_port_red.png')
+
+    target_data_test_1.extract_features()
+
+    target_data_test_1.keypoint_list[0].pt = (0., 1.)
+
+    kp_3d = []
+    for i in range(len(target_data_test_1.keypoint_list)):
+        kp_3d.append((i, i, i))
+
+    target_data_test_1.keypoint_list_3d = np.asarray(
+        np.float32(kp_3d)).reshape(-1, 1, 3)
+
+    target_data_test_1.target_rotation = np.identity(3, np.double)
+    target_data_test_1.target_position = np.array([0., 1., 2.])
+    target_data_list.append(target_data_test_1)
+    return target_data_list