Merge "Initialize y2020 Localizer correctly"
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index fb7b113..ded680d 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -23,6 +23,7 @@
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/base",
     ],
+    visibility = ["//y2020:__subpackages__"],
 )
 
 cc_binary(
@@ -47,6 +48,7 @@
         "//y2020/vision/sift:sift_fbs",
         "//y2020/vision/sift:sift_training_fbs",
     ],
+    visibility = ["//y2020:__subpackages__"],
 )
 
 flatbuffer_ts_library(
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index 06e4077..ca41905 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -128,7 +128,7 @@
 void CameraReader::CopyTrainingFeatures() {
   for (const sift::TrainingImage *training_image : *training_data_->images()) {
     cv::Mat features(training_image->features()->size(), 128, CV_32F);
-    for (size_t i = 0; i <  training_image->features()->size(); ++i) {
+    for (size_t i = 0; i < training_image->features()->size(); ++i) {
       const sift::Feature *feature_table = training_image->features()->Get(i);
 
       // We don't need this information right now, but make sure it's here to
@@ -157,8 +157,7 @@
   // fused into the beginning of the SIFT algorithm because the algorithm needs
   // to look at the base image directly. It also only takes 2ms on our images.
   // This is converting from YUYV to a grayscale image.
-  cv::Mat image_mat(
-      image.rows(), image.cols(), CV_8U);
+  cv::Mat image_mat(image.rows(), image.cols(), CV_8U);
   CHECK(image_mat.isContinuous());
   const int number_pixels = image.rows() * image.cols();
   for (int i = 0; i < number_pixels; ++i) {
@@ -228,17 +227,17 @@
 
     sift::CameraPose::Builder pose_builder(*builder.fbb());
     {
-    CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
-    CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
-    cv::Mat camera_target = cv::Mat::zeros(4, 4, CV_32F);
-    R_camera_target.copyTo(camera_target(cv::Range(0, 3), cv::Range(0, 3)));
-    T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
-    camera_target.at<float>(3, 3) = 1;
-    CHECK(camera_target.isContinuous());
-    const auto data_offset = builder.fbb()->CreateVector<float>(
-        reinterpret_cast<float *>(camera_target.data), camera_target.total());
-    pose_builder.add_camera_to_target(
-        sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
+      CHECK_EQ(cv::Size(3, 3), R_camera_target.size());
+      CHECK_EQ(cv::Size(3, 1), T_camera_target.size());
+      cv::Mat camera_target = cv::Mat::zeros(4, 4, CV_32F);
+      R_camera_target.copyTo(camera_target(cv::Range(0, 3), cv::Range(0, 3)));
+      T_camera_target.copyTo(camera_target(cv::Range(3, 4), cv::Range(0, 3)));
+      camera_target.at<float>(3, 3) = 1;
+      CHECK(camera_target.isContinuous());
+      const auto data_offset = builder.fbb()->CreateVector<float>(
+          reinterpret_cast<float *>(camera_target.data), camera_target.total());
+      pose_builder.add_camera_to_target(
+          sift::CreateTransformationMatrix(*builder.fbb(), data_offset));
     }
     pose_builder.add_field_to_target(
         aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb()));
@@ -355,7 +354,8 @@
 
   aos::ShmEventLoop event_loop(&config.message());
   V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
-  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader, &matcher);
+  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader,
+                             &matcher);
 
   event_loop.Run();
 }
@@ -364,7 +364,6 @@
 }  // namespace vision
 }  // namespace frc971
 
-
 int main(int argc, char **argv) {
   aos::InitGoogle(&argc, &argv);
   frc971::vision::CameraReaderMain();
diff --git a/y2020/vision/sift/BUILD b/y2020/vision/sift/BUILD
index 02507a9..a7827b6 100644
--- a/y2020/vision/sift/BUILD
+++ b/y2020/vision/sift/BUILD
@@ -28,6 +28,7 @@
         # TODO(Brian): Replace this with something more fine-grained from the
         # configuration fragment or something.
         "//tools/cpp:toolchain",
+        "@amd64_debian_sysroot//:sysroot_files",
     ],
     default_python_version = "PY3",
     main = "fast_gaussian_runner.py",
@@ -195,10 +196,12 @@
     ],
     namespace = "frc971.vision.sift",
     tables = [
+        "KeypointFieldLocation",
         "Feature",
         "Match",
         "ImageMatch",
         "TransformationMatrix",
+        "CameraCalibration",
         "CameraPose",
         "ImageMatchResult",
         "TrainingImage",
diff --git a/y2020/vision/sift/fast_gaussian_runner.py b/y2020/vision/sift/fast_gaussian_runner.py
index 9699fef..673c378 100755
--- a/y2020/vision/sift/fast_gaussian_runner.py
+++ b/y2020/vision/sift/fast_gaussian_runner.py
@@ -22,12 +22,13 @@
 
   commands = []
 
+  amd64_debian_sysroot = r.Rlocation('amd64_debian_sysroot/usr/lib/x86_64-linux-gnu/libc.so.6').rsplit('/', 4)[0]
   env = os.environ.copy()
   env['LD_LIBRARY_PATH'] = ':'.join([
-      'debian_amd64_sysroot/lib/x86_64-linux-gnu',
-      'debian_amd64_sysroot/lib',
-      'debian_amd64_sysroot/usr/lib/x86_64-linux-gnu',
-      'debian_amd64_sysroot/usr/lib',
+      amd64_debian_sysroot + '/lib/x86_64-linux-gnu',
+      amd64_debian_sysroot + '/lib',
+      amd64_debian_sysroot + '/usr/lib/x86_64-linux-gnu',
+      amd64_debian_sysroot + '/usr/lib',
   ])
 
   all_header = [
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index 670b664..a130ed6 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -2,27 +2,30 @@
 
 py_binary(
     name = "load_sift_training",
-    data = [
-        ":test_images/train_power_port_red.png",
-        ":test_images/train_power_port_red_webcam.png",
-        ":test_images/train_power_port_blue.png",
-        ":test_images/train_loading_bay_red.png",
-        ":test_images/train_loading_bay_blue.png",
-    ],
-    srcs = ["load_sift_training.py",
+    srcs = [
         "camera_definition.py",
         "define_training_data.py",
+        "load_sift_training.py",
         "target_definition.py",
         "train_and_match.py",
     ],
-    args = ["sift_training_data.h",
+    args = [
+        "sift_training_data.h",
+    ],
+    data = [
+        ":test_images/train_loading_bay_blue.png",
+        ":test_images/train_loading_bay_red.png",
+        ":test_images/train_power_port_blue.png",
+        ":test_images/train_power_port_red.png",
+        ":test_images/train_power_port_red_webcam.png",
     ],
     default_python_version = "PY3",
     srcs_version = "PY2AND3",
     deps = [
         "//y2020/vision/sift:sift_fbs_python",
-        "@opencv_contrib_nonfree_amd64//:python_opencv",
         "@bazel_tools//tools/python/runfiles",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+        "//external:python-glog",
     ],
 )
 
@@ -41,9 +44,77 @@
 )
 
 cc_library(
-    name = "sift_training",
+    name = "sift_training_data",
     hdrs = [
         "sift_training_data.h",
     ],
     visibility = ["//visibility:public"],
 )
+
+py_binary(
+    name = "load_sift_training_test",
+    srcs = [
+        "camera_definition.py",
+        "define_training_data.py",
+        "load_sift_training.py",
+        "target_definition.py",
+        "train_and_match.py",
+    ],
+    args = [
+        "sift_training_data_test.h",
+        "test",
+    ],
+    data = [
+        ":test_images/train_power_port_red.png",
+    ],
+    default_python_version = "PY3",
+    main = "load_sift_training.py",
+    srcs_version = "PY2AND3",
+    deps = [
+        "//y2020/vision/sift:sift_fbs_python",
+        "@bazel_tools//tools/python/runfiles",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+        "//external:python-glog",
+    ],
+)
+
+genrule(
+    name = "run_load_sift_training_test",
+    outs = [
+        "sift_training_data_test.h",
+    ],
+    cmd = " ".join([
+        "$(location :load_sift_training_test)",
+        "$(location sift_training_data_test.h) test",
+    ]),
+    tools = [
+        ":load_sift_training_test",
+    ],
+)
+
+cc_library(
+    name = "sift_training_data_test",
+    hdrs = [
+        "sift_training_data_test.h",
+    ],
+    visibility = ["//visibility:public"],
+)
+
+cc_test(
+    name = "camera_param_test",
+    srcs = [
+        "camera_param_test.cc",
+    ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
+    deps = [
+        ":sift_training_data_test",
+        "//aos/testing:googletest",
+        "//third_party:opencv",
+        "//y2020/vision:vision_fbs",
+        "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
+    ],
+)
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 59f008f..ce25ea5 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -1,9 +1,6 @@
-import argparse
-import cv2
-import json
+import copy
 import math
 import numpy as np
-import time
 
 
 class CameraIntrinsics:
@@ -18,14 +15,14 @@
         self.R = []
         self.T = []
 
-    pass
 
 class CameraParameters:
     def __init__(self):
         self.camera_int = CameraIntrinsics()
         self.camera_ext = CameraExtrinsics()
+        self.node_name = ""
+        self.team_number = -1
 
-    pass
 
 
 ### CAMERA DEFINITIONS
@@ -50,8 +47,17 @@
 web_cam_ext.R = np.array([[0., 0., 1.],
                           [-1, 0,  0],
                           [0, -1., 0]])
-web_cam_ext.T = np.array([[0., 0., 0.]]).T
+web_cam_ext.T = np.array([0., 0., 0.])
 
 web_cam_params = CameraParameters()
 web_cam_params.camera_int = web_cam_int
 web_cam_params.camera_ext = web_cam_ext
+
+camera_list = []
+
+for team_number in (971, 8971, 9971):
+    for node_name in ("pi-0", "pi-1", "pi-2", "pi-3", "pi-4"):
+        camera_base = copy.deepcopy(web_cam_params)
+        camera_base.node_name = node_name
+        camera_base.team_number = team_number
+        camera_list.append(camera_base)
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
new file mode 100644
index 0000000..f8e17f8
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -0,0 +1,64 @@
+import copy
+import math
+import numpy as np
+
+
+class CameraIntrinsics:
+    def __init__(self):
+        self.camera_matrix = []
+        self.distortion_coeffs = []
+
+
+class CameraExtrinsics:
+    def __init__(self):
+        self.R = []
+        self.T = []
+
+
+class CameraParameters:
+    def __init__(self):
+        self.camera_int = CameraIntrinsics()
+        self.camera_ext = CameraExtrinsics()
+        self.node_name = ""
+        self.team_number = -1
+
+
+
+### CAMERA DEFINITIONS
+
+# Robot camera has:
+# FOV_H = 93.*math.pi()/180.
+# FOV_V = 70.*math.pi()/180.
+
+# Create fake camera (based on USB webcam params)
+fx = 810.
+fy = 810.
+cx = 320.
+cy = 240.
+
+# Define a web_cam
+web_cam_int = CameraIntrinsics()
+web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
+web_cam_int.distortion_coeffs = np.zeros((5,1))
+
+web_cam_ext = CameraExtrinsics()
+# Camera rotation from robot x,y,z to opencv (z, -x, -y)
+web_cam_ext.R = np.array([[0., 0., 1.],
+                          [-1, 0,  0],
+                          [0, -1., 0]])
+web_cam_ext.T = np.array([0., 0., 0.])
+
+web_cam_params = CameraParameters()
+web_cam_params.camera_int = web_cam_int
+web_cam_params.camera_ext = web_cam_ext
+
+camera_list = []
+
+for team_number in (971, 8971, 9971): 
+    for (i, node_name) in enumerate(("pi-1", "pi-2", "pi-3", "pi-4", "pi-5")):
+        camera_base = copy.deepcopy(web_cam_params)
+        camera_base.node_name = node_name
+        camera_base.team_number = team_number
+        camera_base.camera_ext.T = np.asarray(np.float32([i+1, i+1, i+1]))
+        camera_list.append(camera_base)
+
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
new file mode 100644
index 0000000..3feaa99
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -0,0 +1,249 @@
+#include <unistd.h>
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#if 1
+#include "y2020/vision/tools/python_code/sift_training_data_test.h"
+#else
+// Using this include (and changing BUILD file) allows to test on real data
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+#endif
+
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/vision_generated.h"
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+class Feature {
+ public:
+  std::vector<float> descriptor_;
+  /*	float x_;
+  float y_;
+  float size_;
+  float angle_;
+  float response_;
+  int octave_;
+  cv::Point3f keypoint_field_location_;*/
+};
+
+class CameraCalibration {
+ public:
+  cv::Mat intrinsics_;
+  cv::Mat fixed_extrinsics_;
+};
+
+class TrainingImage {
+ public:
+  cv::Mat features_;
+  cv::Mat field_to_target_;
+};
+
+class TrainingData {
+ public:
+  std::vector<TrainingImage> images_;
+  CameraCalibration camera_calibration_;
+};
+
+class CameraParamTest {
+ public:
+  CameraParamTest() {
+    const auto training_data_bfbs = SiftTrainingData();
+    sift_training_data_ =
+        flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
+    {
+      flatbuffers::Verifier verifier(
+          reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
+          training_data_bfbs.size());
+
+      CHECK(sift_training_data_->Verify(verifier));
+    }
+
+    CopyTrainingFeatures();
+    sift_camera_calibration_ = CameraParamTest::FindCameraCalibration();
+    camera_intrinsics_ = CameraIntrinsics();
+    camera_extrinsics_ = CameraExtrinsics();
+  }
+
+  // Copies the information from sift_training_data_ into matcher_.
+  void CopyTrainingFeatures();
+
+  const sift::CameraCalibration *FindCameraCalibration() const;
+
+  // Returns the 2D image location for the specified training feature.
+  cv::Point2f Training2dPoint(int training_image_index, int feature_index);
+  // Returns the 3D location for the specified training feature.
+  cv::Point3f Training3dPoint(int training_image_index, int feature_index);
+
+  const sift::TransformationMatrix *FieldToTarget(int training_image_index) {
+    return sift_training_data_->images()
+        ->Get(training_image_index)
+        ->field_to_target();
+  }
+
+  cv::Mat CameraIntrinsics() const {
+    const cv::Mat result(3, 3, CV_32F,
+                         const_cast<void *>(static_cast<const void *>(
+                             sift_camera_calibration_->intrinsics()->data())));
+    CHECK_EQ(result.total(), sift_camera_calibration_->intrinsics()->size());
+    return result;
+  }
+
+  cv::Mat CameraExtrinsics() const {
+    const cv::Mat result(
+        4, 4, CV_32F,
+        const_cast<void *>(static_cast<const void *>(
+            sift_camera_calibration_->fixed_extrinsics()->data()->data())));
+    return result;
+  }
+
+  int number_training_images() const {
+    return sift_training_data_->images()->size();
+  }
+
+  const std::string node_name_ = "pi-3";
+  const int team_number_ = 971;
+
+  // We'll just extract the one that matches our current module
+  const sift::CameraCalibration *sift_camera_calibration_;
+  cv::Mat camera_intrinsics_;
+  cv::Mat camera_extrinsics_;
+
+  TrainingData training_data_;
+  const sift::TrainingData *sift_training_data_;
+  std::vector<sift::TransformationMatrix *> field_to_targets_;
+  std::vector<cv::Point2f> point_list_2d_;
+  std::vector<cv::Point3f> point_list_3d_;
+};
+
+// Copy in the keypoint descriptors, the 2d (image) location,
+// and the 3d (field) location
+void CameraParamTest::CopyTrainingFeatures() {
+  int train_image_index = 0;
+  for (const sift::TrainingImage *training_image :
+       *sift_training_data_->images()) {
+    TrainingImage training_image_data;
+    cv::Mat features(training_image->features()->size(), 128, CV_32F);
+    for (size_t i = 0; i < training_image->features()->size(); ++i) {
+      const sift::Feature *feature_table = training_image->features()->Get(i);
+      const flatbuffers::Vector<float> *const descriptor =
+          feature_table->descriptor();
+      CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
+      cv::Mat(1, descriptor->size(), CV_32F,
+              const_cast<void *>(static_cast<const void *>(descriptor->data())))
+          .copyTo(features(cv::Range(i, i + 1), cv::Range(0, 128)));
+
+      cv::Point2f point_2d = Training2dPoint(train_image_index, i);
+      point_list_2d_.push_back(point_2d);
+      cv::Point3f point_3d = Training3dPoint(train_image_index, i);
+      point_list_3d_.push_back(point_3d);
+    }
+    const sift::TransformationMatrix *field_to_target_ =
+        FieldToTarget(train_image_index);
+    const cv::Mat field_to_target_mat(
+        4, 4, CV_32F,
+        const_cast<void *>(
+            static_cast<const void *>(field_to_target_->data()->data())));
+    training_image_data.features_ = features;
+    training_image_data.field_to_target_ = field_to_target_mat;
+
+    training_data_.images_.push_back(training_image_data);
+    train_image_index++;
+  }
+}
+
+const sift::CameraCalibration *CameraParamTest::FindCameraCalibration() const {
+  for (const sift::CameraCalibration *candidate :
+       *sift_training_data_->camera_calibrations()) {
+    if (candidate->node_name()->string_view() != node_name_) {
+      continue;
+    }
+    if (candidate->team_number() != team_number_) {
+      continue;
+    }
+    return candidate;
+  }
+  LOG(INFO) << ": Failed to find camera calibration for " << node_name_
+            << " on " << team_number_;
+  return NULL;
+}
+
+// Returns the 2D image location for the specified training feature.
+cv::Point2f CameraParamTest::Training2dPoint(int training_image_index,
+                                             int feature_index) {
+  const float x = sift_training_data_->images()
+                      ->Get(training_image_index)
+                      ->features()
+                      ->Get(feature_index)
+                      ->x();
+  const float y = sift_training_data_->images()
+                      ->Get(training_image_index)
+                      ->features()
+                      ->Get(feature_index)
+                      ->y();
+  return cv::Point2f(x, y);
+}
+
+// Returns the 3D location for the specified training feature.
+cv::Point3f CameraParamTest::Training3dPoint(int training_image_index,
+                                             int feature_index) {
+  const sift::KeypointFieldLocation *const location =
+      sift_training_data_->images()
+          ->Get(training_image_index)
+          ->features()
+          ->Get(feature_index)
+          ->field_location();
+  return cv::Point3f(location->x(), location->y(), location->z());
+}
+
+TEST(CameraParamTest, TargetDataTest) {
+  CameraParamTest camera_params;
+
+  ASSERT_EQ(camera_params.number_training_images(), 1);
+  ASSERT_EQ(camera_params.point_list_2d_.size(), 676);
+  ASSERT_EQ(camera_params.point_list_2d_[0].x, 0);
+  ASSERT_EQ(camera_params.point_list_2d_[0].y, 1);
+  ASSERT_EQ(camera_params.point_list_3d_.size(), 676);
+  ASSERT_EQ(camera_params.point_list_3d_[0].x, 0);
+  ASSERT_EQ(camera_params.point_list_3d_[1].y, 1);
+  ASSERT_EQ(camera_params.point_list_3d_[2].z, 2);
+
+  float ftt_mat[16] = {1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 2, 0, 0, 0, 1};
+  cv::Mat field_to_targets_0 = cv::Mat(4, 4, CV_32F, ftt_mat);
+  cv::Mat ftt_diff =
+      (camera_params.training_data_.images_[0].field_to_target_ !=
+       field_to_targets_0);
+  bool ftt_eq = (cv::countNonZero(ftt_diff) == 0);
+  ASSERT_TRUE(ftt_eq)
+      << "Mismatch on field_to_target: "
+      << camera_params.training_data_.images_[0].field_to_target_ << "\nvs.\n"
+      << field_to_targets_0;
+
+  float intrinsic_mat[9] = {810, 0, 320, 0, 810, 240, 0, 0, 1};
+  cv::Mat intrinsic = cv::Mat(3, 3, CV_32F, intrinsic_mat);
+  cv::Mat intrinsic_diff = (intrinsic != camera_params.camera_intrinsics_);
+  bool intrinsic_eq = (cv::countNonZero(intrinsic_diff) == 0);
+  ASSERT_TRUE(intrinsic_eq)
+      << "Mismatch on intrinsics: " << intrinsic << "\nvs.\n"
+      << camera_params.camera_intrinsics_;
+
+  float i_f = 3.0;
+  float extrinsic_mat[16] = {0, 0,  1, i_f, -1, 0, 0, i_f,
+                             0, -1, 0, i_f, 0,  0, 0, 1};
+  cv::Mat extrinsic = cv::Mat(4, 4, CV_32F, extrinsic_mat);
+  cv::Mat extrinsic_diff = (extrinsic != camera_params.camera_extrinsics_);
+  bool extrinsic_eq = (cv::countNonZero(extrinsic_diff) == 0);
+  ASSERT_TRUE(extrinsic_eq)
+      << "Mismatch of extrinsic: " << extrinsic << "\nvs.\n"
+      << camera_params.camera_extrinsics_;
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
+
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 22eb6ce..157f9e6 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -1,5 +1,6 @@
 import argparse
 import cv2
+import glog
 import json
 import math
 import numpy as np
@@ -17,7 +18,7 @@
     global current_mouse
     current_mouse = (x, y)
     if event == cv2.EVENT_LBUTTONUP:
-        #print("Adding point at %d, %d" % (x,y))
+        glog.debug("Adding point at %d, %d" % (x,y))
         point_list.append([x, y])
     pass
 
@@ -218,7 +219,7 @@
     pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
                                                distortion_coeffs)
     if inliers is None:
-        print("WARNING: Didn't get any inliers when reprojecting polygons")
+        glog.warn("WARNING: Didn't get any inliers when reprojecting polygons")
         return img
     for i in range(len(pts_2d)):
         pt_2d = pts_2d_np[i][0]
@@ -239,7 +240,7 @@
     image = cv2.imread("test_images/train_power_port_red.png")
 
     polygon_list = define_polygon(image)
-    print(polygon_list)
+    glog.debug(polygon_list)
 
 
 def sample_define_points_by_list_usage():
@@ -251,5 +252,5 @@
                    (689, 679)]
 
     polygon_list = define_points_by_list(image, test_points)
-    print(polygon_list)
+    glog.debug(polygon_list)
 
diff --git a/y2020/vision/tools/python_code/field_display.py b/y2020/vision/tools/python_code/field_display.py
index 93f645c..11d098d 100644
--- a/y2020/vision/tools/python_code/field_display.py
+++ b/y2020/vision/tools/python_code/field_display.py
@@ -17,17 +17,18 @@
 
 
 # Convert field coordinates (meters) to image coordinates (pixels).
-# Field origin is (x,y) at center of red alliance driver station wall,
-# with x pointing into the field.
+# Field origin is (x,y) at center of the field,
+# with x pointing towards the red alliance driver station
 # The default field image is 1598 x 821 pixels, so 1 cm per pixel on field
 # I.e., Field is 1598 x 821 pixels = 15.98 x 8.21 meters
-# Our origin in image coordinates is at (1598, 410) pixels, facing in the -x image direction
+# Our origin in image coordinates is at (799, 410) pixels, with +x direction
+# to the right
 
 
 def field_coord_to_image_coord(robot_position):
     # Scale by 100 pixel / cm
-    robot_pos_img_coord = np.array([[1598, 410]]).T + np.int32(
-        100.0 * np.array([[-robot_position[0][0], robot_position[1][0]]]).T)
+    robot_pos_img_coord = np.array([[799, 410]]).T + np.int32(
+        100.0 * np.array([[robot_position[0][0], -robot_position[1][0]]]).T)
     return robot_pos_img_coord
 
 
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index d6b7deb..09d2764 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -1,36 +1,20 @@
 import cv2
 import math
-from matplotlib import pyplot as plt
 import numpy as np
 import time
 
 import field_display
 import train_and_match as tam
+import target_definition
+import camera_definition
 
 ### DEFINITIONS
+target_definition.USE_BAZEL = False
+target_list = target_definition.compute_target_definition()
+camera_list = camera_definition.camera_list
 
-# List of images to train on
-training_image_names = [
-    'test_images/train_power_port_blue.png',
-    # Using webcam captured image for testing-- best to use only one of the two
-    # training images for the power_port_red
-    'test_images/train_power_port_red_webcam.png',
-    #    'test_images/train_power_port_red.png',
-    'test_images/train_loading_bay_blue.png',
-    'test_images/train_loading_bay_red.png'
-]
-
-# Target points on the image -- needs to match training images-- one per image
-# These are manually captured by examining the images, and entering the pixel
-# values from the target center for each image.
-# These are currently only used for visualization of the target
-target_pt_list = [
-    np.float32([[393, 147]]).reshape(-1, 1, 2),  # train_power_port_blue.png
-    np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
-    #    np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
-    np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
-    np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
-]
+# For now, just use the first one
+camera_params = camera_list[0]
 
 # Put list of all possible images we want to test, and then we'll select one
 # Note, if you query on the same image as training, only that image will match
@@ -45,8 +29,8 @@
     'test_images/test_raspi3_sample.jpg',  #7
     'test_images/test_VR_sample1.png',  #8
     'test_images/train_loading_bay_blue.png',  #9
-    'test_images/train_loading_bay_red.png'
-]  #10
+    'test_images/train_loading_bay_red.png'  #10
+]
 
 training_image_index = 0
 # TODO: Should add argParser here to select this
@@ -54,15 +38,25 @@
 
 ##### Let's get to work!
 
-# Load the training and query images
-training_images = tam.load_images(training_image_names)
+training_image_names = []
+training_images = []
+train_keypoint_lists = []
+train_descriptor_lists = []
+target_points_tmp = []
+target_pt_list = None
+# Popluate the data structures used by this function
+for target in target_list:
+    # Image names
+    print("Target filename:", target.image_filename)
+    training_image_names.append(target.image_filename)
+    # The training images
+    training_images.append(target.image)
+    # Keypoints and desciptors
+    train_keypoint_lists.append(target.keypoint_list)
+    train_descriptor_lists.append(target.descriptor_list)
+    target_points_tmp.append(target.target_point_2d)
 
-# Create feature extractor
-feature_extractor = tam.load_feature_extractor()
-
-# Extract keypoints and descriptors for model
-train_keypoint_lists, train_descriptor_lists = tam.detect_and_compute(
-    feature_extractor, training_images)
+target_pt_list = np.asarray(target_points_tmp).reshape(-1, 1, 2)
 
 # # Create the matcher.  This only needs to be created once
 ts = time.monotonic()  # Do some basic timing
@@ -72,6 +66,11 @@
 for i in range(len(train_keypoint_lists)):
     print("Model ", i, " has ", len(train_keypoint_lists[i]), " features: ")
 
+# Create feature extractor
+# TODO<Jim>: Should probably make sure we're using the same extractor for
+# training and query
+feature_extractor = tam.load_feature_extractor()
+
 # Load the query image in.  Based on index in our list, or using camera if index is -1
 query_images = []
 
@@ -121,183 +120,160 @@
     for i in range(len(train_keypoint_lists)):
         print("Model ", i, " has # good matches: ", len(good_matches_list[i]))
 
-    # TEMP: Use first list for what follows
-    # TODO: Should look for best match after homography and display that one
-    # OR: show results on all good matches
-    good_matches = good_matches_list[0]
+    # If we're querying static images, show full results
+    if (query_image_index is not -1):
+        print("Showing results for static query image")
+        homography_list, matches_mask_list = tam.show_results(
+            training_images, train_keypoint_lists, query_images,
+            query_keypoint_lists, target_pt_list, good_matches_list)
 
     # Next, find homography (map between training and query images)
     homography_list, matches_mask_list = tam.compute_homographies(
         train_keypoint_lists, query_keypoint_lists, good_matches_list)
 
-    if matches_mask_list[0].count(1) < tam.MIN_MATCH_COUNT:
-        print(
-            "Not continuing pose calc because not enough good matches after homography-- only had ",
-            matches_mask_list[0].count(1))
-        continue
+    # Image used to store field plot
+    img_ret = None
+    for i, good_matches in enumerate(good_matches_list):
+        # TODO: Should look for best match after homography and display that one
+        # OR: show results on all good matches
+        print("Processing match for model %d" % i)
 
-    # Next, let's go to compute pose
-    # I've chosen some coordinate frames to help track things.  Using indices:
-    #   w:    world coordinate frame (at center wall of driver station)
-    #   tarj: camera pose when jth target (e.g., power panel) was captured
-    #   ci:   ith camera
-    #   b:    body frame of robot (robot location)
-    #
-    # And, T for translation, R for rotation, "est" for estimated
-    # So, T_w_b_est is the estimated translation from world to body coords
-    #
-    # A few notes:
-    #   -- CV uses rotated frame, where z points out of camera, y points down
-    #   -- This causes a lot of mapping around of points, but overall OK
+        if matches_mask_list[i].count(1) < tam.MIN_MATCH_COUNT:
+            print(
+                "Not continuing pose calc because not enough good matches after homography-- only had ",
+                matches_mask_list[i].count(1))
+            continue
 
-    ### TODO: Still need to clean this rest up
+        # Next, let's go to compute pose
+        # I've chosen some coordinate frames to help track things.  Using indices:
+        #   w:    world coordinate frame (at center wall of driver station)
+        #   tarj: camera pose when jth target (e.g., power panel) was captured
+        #   ci:   ith camera
+        #   b:    body frame of robot (robot location)
+        #
+        # And, T for translation, R for rotation, "est" for estimated
+        # So, T_w_b_est is the estimated translation from world to body coords
+        #
+        # A few notes:
+        #   -- CV uses rotated frame, where z points out of camera, y points down
+        #   -- This causes a lot of mapping around of points, but overall OK
 
-    # TODO: Need to generalize this for each target-- need to have 3D global locations of each target's keypoints
-    field_length = 15.98
-    target_center_offset_y = 1.67
-    target_center_height = 2.49
-    if training_image_index is 0:
-        # webcam
-        fx = 810.
-        fy = 810.
-        cx = 320.
-        cy = 240.
-        cy_above_ground = 1.  # meters above ground
-        depth = 4.57  # meters
-        target_center_offset_y = 1.67
-    elif training_image_index is 1:
-        # Made up for screenshot from manual
-        cx = int(1165 / 2)
-        cy_above_ground = -0.5856  # meters above ground
-        cy = 776 / 2
-        fx = 1143  # pixels (or 0.00160m or 1143 pixels?)
-        depth = 4.57  # meters
-    elif training_image_index is 2:
-        cx = 732 / 2
-        cy_above_ground = 0.454  # meters above ground
-        cy = 436 / 2
-        target_width = 5 * 12 * 0.0254  # width in ft * in/ft * m/in
-        target_height = target_width * cy / cx  # width in ft * in/ft * m/in * cy / cx
-        FOV_x = 54 * math.pi / 180.  # camera FOV in radians (from 54 degree lens)
-        fx = cx / math.tan(FOV_x / 2)  # pixels
-        depth = (target_height / 2) / math.tan(FOV_x / 2)  # meters
+        src_pts_3d = []
+        for m in good_matches:
+            src_pts_3d.append(target_list[i].keypoint_list_3d[m.trainIdx])
+            pt = query_keypoint_lists[0][m.queryIdx].pt
+            print("Color at ", pt, " is ", query_images[0][int(pt[1])][int(
+                pt[0])])
+            query_images[0] = cv2.circle(
+                query_images[0], (int(pt[0]), int(pt[1])), 5, (0, 255, 0), 3)
 
-    src_pts_3d = []
-    for m in good_matches:
-        # Project the matches all back to 3D, assuming they're all at the same depth
-        # Add in the distance across the field and any lateral offset of the target
-        src_pts_3d.append([(
-            field_length,
-            -(train_keypoint_lists[training_image_index][m.trainIdx].pt[0] - cx
-              ) * depth / fx - target_center_offset_y,
-            -(train_keypoint_lists[training_image_index][m.trainIdx].pt[1] - cy
-              ) * depth / fx + cy_above_ground)])
+        cv2.imshow('DEBUG', query_images[0]), cv2.waitKey(0)
+        # Reshape 3d point list to work with computations to follow
+        src_pts_3d_array = np.asarray(np.float32(src_pts_3d)).reshape(-1, 3, 1)
 
-    # Reshape 3d point list to work with computations to follow
-    src_pts_3d_array = np.asarray(np.float32(src_pts_3d)).reshape(-1, 3, 1)
+        # Load from camera parameters
+        cam_mat = camera_params.camera_int.camera_matrix
+        distortion_coeffs = camera_params.camera_int.distortion_coeffs
 
-    cam_mat = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-    distortion_coeffs = np.zeros((5, 1))
+        # Create list of matching query point locations
+        dst_pts = np.float32([
+            query_keypoint_lists[0][m.queryIdx].pt for m in good_matches
+        ]).reshape(-1, 1, 2)
 
-    # Create list of matching query point locations
-    dst_pts = np.float32([
-        query_keypoint_lists[0][m.queryIdx].pt for m in good_matches
-    ]).reshape(-1, 1, 2)
+        ts = time.monotonic()
+        # TODO: May want to supply it with estimated guess as starting point
+        # Find offset from camera to original location of camera relative to target
+        retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
+            src_pts_3d_array, dst_pts, cam_mat, distortion_coeffs)
 
-    ts = time.monotonic()
-    # TODO: May want to supply it with estimated guess as starting point
-    # Find offset from camera to original location of camera relative to target
-    retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
-        src_pts_3d_array, dst_pts, cam_mat, distortion_coeffs)
+        tf = time.monotonic()
+        print("Solving Pose took ", tf - ts, " secs")
+        print("Found ", len(inliers), " inliers, out of ", len(dst_pts),
+              " matched points")
+        ### THIS is the estimate of the robot on the field!
+        R_w_ci_estj, T_w_ci_estj = field_display.invert_pose_Rodrigues(
+            R_ci_w_estj, T_ci_w_estj)
+        #print("Pose from PnP is:\n", R_ci_w_estj, "\n", T_ci_w_estj)
+        # The rotation estimate is for camera with z pointing forwards.
+        # Compute the rotation as if x is forward
+        R_w_ci_estj_robot = field_display.camera_rot_to_world_Rodrigues(
+            R_w_ci_estj)
+        #print("Pose in world coords is:\n", R_w_ci_estj, "\n", T_w_ci_estj,
+        #      "\nWith robot coord frame rotation as \n", R_w_ci_estj_robot)
 
-    tf = time.monotonic()
-    print("Solving Pose took ", tf - ts, " secs")
-    print("Found ", len(inliers), " inliers, out of ", len(dst_pts),
-          " matched points")
-    ### THIS is the estimate of the robot on the field!
-    R_w_ci_estj, T_w_ci_estj = field_display.invert_pose_Rodrigues(
-        R_ci_w_estj, T_ci_w_estj)
-    print("Pose from PnP is:\n", R_ci_w_estj, "\n", T_ci_w_estj)
-    # The rotation estimate is for camera with z pointing forwards.
-    # Compute the rotation as if x is forward
-    R_w_ci_estj_robot = field_display.camera_rot_to_world_Rodrigues(
-        R_w_ci_estj)
-    print("Pose in world coords is:\n", R_w_ci_estj, "\n", T_w_ci_estj,
-          "\nWith robot coord frame rotation as \n", R_w_ci_estj_robot)
+        # Use the rotational component about the z axis to define the heading
+        # TODO<jim>: Change this to use rotation of x-unit vector
+        heading_est = R_w_ci_estj_robot[2][0]
+        # Plot location of the robot, along with heading
+        color = (255, 0, 0)  # Blue
+        if i in (0, 1):
+            color = (0, 0, 255)  # Red
+        img_ret = field_display.plot_bot_on_field(img_ret, color, T_w_ci_estj,
+                                                  heading_est)
 
-    # Use the rotational component about the z axis to define the heading
-    heading_est = R_w_ci_estj_robot[2][0]
-    # Plot location of the robot, along with heading
-    img_ret = field_display.plot_bot_on_field(None, (0, 255, 255), T_w_ci_estj,
-                                              heading_est)
+        # Compute vector to target
+        T_w_target_pt = np.array(target_list[i].target_position).reshape(3, 1)
+        vector_to_target = T_w_target_pt - T_w_ci_estj
+        # Compute distance to target (full 3D)
+        distance_to_target = np.linalg.norm(vector_to_target)
+        # Compute distance to target (x,y)
+        distance_to_target_ground = np.linalg.norm(vector_to_target[0:2])
+        #print("Distance comparison: all: ", distance_to_target, " vs. ground: ", distance_to_target_ground)
 
-    # Compute vector to target
-    T_w_target_pt = np.array(
-        [[field_length, -target_center_offset_y, target_center_height]]).T
-    vector_to_target = T_w_target_pt - T_w_ci_estj
-    # Compute distance to target (full 3D)
-    distance_to_target = np.linalg.norm(vector_to_target)
-    # Compute distance to target (x,y)
-    distance_to_target_ground = np.linalg.norm(vector_to_target[0:2])
-    #print("Distance comparison: all: ", distance_to_target, " vs. ground: ", distance_to_target_ground)
+        angle_to_target_abs = math.atan2(vector_to_target[1][0],
+                                         vector_to_target[0][0])
+        angle_to_target_robot = angle_to_target_abs - heading_est
+        img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0),
+                                                   T_w_ci_estj, T_w_target_pt)
+        # THESE ARE OUR ESTIMATES OF HEADING, DISTANCE, AND SKEW TO TARGET
+        log_file.write('%lf, %lf, %lf\n' %
+                       (heading_est, distance_to_target_ground,
+                        angle_to_target_robot))
 
-    angle_to_target_abs = math.atan2(vector_to_target[1][0],
-                                     vector_to_target[0][0])
-    angle_to_target_robot = angle_to_target_abs - heading_est
-    img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0),
-                                               T_w_ci_estj, T_w_target_pt)
-    # THESE ARE OUR ESTIMATES OF HEADING, DISTANCE, AND SKEW TO TARGET
-    log_file.write('%lf, %lf, %lf\n' % (heading_est, distance_to_target_ground,
-                                        angle_to_target_robot))
+        # A bunch of code to visualize things...
+        #
+        # Draw target on the image
+        h, w, ch = query_images[0].shape
+        query_image_copy = query_images[0].copy()
+        # Map target_pt onto image, for display
+        target_point_2d_trans = cv2.perspectiveTransform(
+            target_pt_list[i].reshape(-1, 1, 2),
+            homography_list[i]).astype(int)
+        # Ballpark the size of the circle so it looks right on image
+        radius = int(
+            32 * abs(homography_list[i][0][0] + homography_list[i][1][1]) /
+            2)  # Average of scale factors
+        query_image_copy = cv2.circle(query_image_copy,
+                                      (target_point_2d_trans.flatten()[0],
+                                       target_point_2d_trans.flatten()[1]),
+                                      radius, (0, 255, 0), 3)
 
-    # A bunch of code to visualize things...
-    #
-    # Draw target on the image
-    h, w, ch = query_images[0].shape
-    query_image_copy = query_images[0].copy()
-    # Map target_pt onto image, for display
-    transformed_target = cv2.perspectiveTransform(target_pt_list[0],
-                                                  homography_list[0])
-    # Ballpark the size of the circle so it looks right on image
-    radius = int(32 * abs(homography_list[0][0][0] + homography_list[0][1][1])
-                 / 2)  # Average of scale factors
-    query_image_copy = cv2.circle(
-        query_image_copy,
-        (transformed_target.flatten()[0], transformed_target.flatten()[1]),
-        radius, (0, 255, 0), 3)
+        # Create empty image the size of the field
+        img_heading = field_display.load_field_image()
+        img_heading[:, :, :] = 0
+        f_h, f_w, f_ch = img_heading.shape
 
-    # If we're querying static images, show full results
-    if (query_image_index is not -1):
-        homography_list, matches_mask_list = tam.show_results(
-            training_images, train_keypoint_lists, query_images,
-            query_keypoint_lists, target_pt_list, good_matches_list)
+        # Create heading view, and paste it to bottom of field
+        img_heading = field_display.plot_camera_to_target(
+            img_heading, (0, 255, 0), heading_est, distance_to_target_ground,
+            angle_to_target_robot)
+        vis = np.concatenate((img_ret, img_heading), axis=0)
 
-    # Create empty image the size of the field
-    img_heading = field_display.load_field_image()
-    img_heading[:, :, :] = 0
-    f_h, f_w, f_ch = img_heading.shape
+        # Paste query image to right of other views (scale to keep aspect ratio)
+        img_query_scaled = cv2.resize(query_image_copy,
+                                      (int(2 * w * f_h / h), 2 * f_h))
+        vis = np.concatenate((vis, img_query_scaled), axis=1)
 
-    # Create heading view, and paste it to bottom of field
-    img_heading = field_display.plot_camera_to_target(
-        img_heading, (0, 255, 0), heading_est, distance_to_target_ground,
-        angle_to_target_robot)
-    vis = np.concatenate((img_ret, img_heading), axis=0)
+        # Scale down to make it fit on screen
+        vis = cv2.resize(vis, (int(vis.shape[1] / 4), int(vis.shape[0] / 4)))
+        cv2.imshow('field_display', vis)
 
-    # Paste query image to right of other views (scale to keep aspect ratio)
-    img_query_scaled = cv2.resize(query_image_copy,
-                                  (int(2 * w * f_h / h), 2 * f_h))
-    vis = np.concatenate((vis, img_query_scaled), axis=1)
-
-    # Scale down to make it fit on screen
-    vis = cv2.resize(vis, (int(vis.shape[1] / 4), int(vis.shape[0] / 4)))
-    cv2.imshow('field_display', vis)
-
-    #Waits for a user input to quit the application
-    pause_time = 0
-    if (query_image_index is -1):
-        pause_time = 1
-    if cv2.waitKey(pause_time) & 0xFF == ord('q'):
-        break
+        #Waits for a user input to quit the application
+        pause_time = 0
+        if (query_image_index is -1):
+            pause_time = 1
+        if cv2.waitKey(pause_time) & 0xFF == ord('q'):
+            break
 
 # Done.  Clean things up
 if (query_image_index is -1):
@@ -305,4 +281,3 @@
     cap.release()
 
 cv2.destroyAllWindows()
-
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 9fa4acf..65f3342 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -1,100 +1,217 @@
 #!/usr/bin/python3
 
 import cv2
+import glog
+import numpy as np
 import sys
 import flatbuffers
-import target_definition
 
-import frc971.vision.sift.TrainingImage as TrainingImage
-import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.CameraCalibration as CameraCalibration
 import frc971.vision.sift.Feature as Feature
+import frc971.vision.sift.KeypointFieldLocation as KeypointFieldLocation
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TransformationMatrix as TransformationMatrix
+
+
+# Takes a 3x3 rotation matrix and 3x1 translation vector, and outputs 12
+# element list, suitable for outputing to flatbuffer
+def rot_and_trans_to_list(R, T):
+    output_list = []
+    for row in range(3):
+        for col in range(3):
+            output_list.append(R[row][col])
+        output_list.append(T[row])
+
+    output_list = output_list + [0., 0., 0., 1.]
+    return output_list
+
 
 def main():
 
-  output_path = sys.argv[1]
-  print("Writing file to ", output_path)
+    target_data_list = None
+    camera_calib_list = None
 
-  target_data_list = target_definition.compute_target_definition()
+    output_path = sys.argv[1]
 
-  fbb = flatbuffers.Builder(0)
+    if (len(sys.argv) > 2):
+        if sys.argv[2] == "test":
+            glog.info("Loading test data")
+            import camera_definition_test
+            import target_definition_test
+            target_data_list = target_definition_test.compute_target_definition(
+            )
+            camera_calib_list = camera_definition_test.camera_list
+        else:
+            glog.error("Unhandled arguments: '%s'" % sys.argv[2])
+            quit()
+    else:
+        glog.info("Loading target configuration data")
+        import camera_definition
+        import target_definition
+        target_data_list = target_definition.compute_target_definition()
+        camera_calib_list = camera_definition.camera_list
 
-  images_vector = []
+    glog.info("Writing file to ", output_path)
 
-  for target_data in target_data_list:
+    fbb = flatbuffers.Builder(0)
 
-    features_vector = []
+    images_vector = []
 
-    for keypoint, keypoint_3d, descriptor in zip(target_data.keypoint_list,
-                                                 target_data.keypoint_list_3d,
-                                                 target_data.descriptor_list):
+    # Iterate overall the training targets
+    for target_data in target_data_list:
 
-      Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
-      for n in reversed(descriptor):
-        fbb.PrependFloat32(n)
-      descriptor_vector = fbb.EndVector(len(descriptor))
+        features_vector = []
 
-      Feature.FeatureStart(fbb)
+        # Iterate over all the keypoints
+        for keypoint, keypoint_3d, descriptor in zip(
+                target_data.keypoint_list, target_data.keypoint_list_3d,
+                target_data.descriptor_list):
 
-      Feature.FeatureAddDescriptor(fbb, descriptor_vector)
-      Feature.FeatureAddX(fbb, keypoint.pt[0])
-      Feature.FeatureAddY(fbb, keypoint.pt[1])
-      Feature.FeatureAddSize(fbb, keypoint.size)
-      Feature.FeatureAddAngle(fbb, keypoint.angle)
-      Feature.FeatureAddResponse(fbb, keypoint.response)
-      Feature.FeatureAddOctave(fbb, keypoint.octave)
+            # Build the Descriptor vector
+            Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+            for n in reversed(descriptor):
+                fbb.PrependFloat32(n)
+            descriptor_vector = fbb.EndVector(len(descriptor))
 
-      features_vector.append(Feature.FeatureEnd(fbb))
+            # Add all the components to the each Feature
+            Feature.FeatureStart(fbb)
+            Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+            Feature.FeatureAddX(fbb, keypoint.pt[0])
+            Feature.FeatureAddY(fbb, keypoint.pt[1])
+            Feature.FeatureAddSize(fbb, keypoint.size)
+            Feature.FeatureAddAngle(fbb, keypoint.angle)
+            Feature.FeatureAddResponse(fbb, keypoint.response)
+            Feature.FeatureAddOctave(fbb, keypoint.octave)
 
-      ## TODO: Write 3d vector here
+            keypoint_3d_location = KeypointFieldLocation.CreateKeypointFieldLocation(
+                fbb, keypoint_3d[0][0], keypoint_3d[0][1], keypoint_3d[0][2])
 
-    TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
-    for feature in reversed(features_vector):
-      fbb.PrependUOffsetTRelative(feature)
-    features_vector_table = fbb.EndVector(len(features_vector))
+            Feature.FeatureAddFieldLocation(fbb, keypoint_3d_location)
 
-    TrainingImage.TrainingImageStart(fbb)
-    TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
-    # TODO(Brian): Fill out the transformation matrices.
-    images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+            features_vector.append(Feature.FeatureEnd(fbb))
 
-  TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
-  for training_image in reversed(images_vector):
-    fbb.PrependUOffsetTRelative(training_image)
-  images_vector_table = fbb.EndVector(len(images_vector))
+        # Create the field_to_target TransformationMatrix
+        field_to_target_list = rot_and_trans_to_list(
+            target_data.target_rotation, target_data.target_position)
+        TransformationMatrix.TransformationMatrixStartDataVector(
+            fbb, len(field_to_target_list))
+        for n in reversed(field_to_target_list):
+            fbb.PrependFloat32(n)
+        field_to_target_offset = fbb.EndVector(len(field_to_target_list))
 
-  TrainingData.TrainingDataStart(fbb)
-  TrainingData.TrainingDataAddImages(fbb, images_vector_table)
-  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+        TransformationMatrix.TransformationMatrixStart(fbb)
+        TransformationMatrix.TransformationMatrixAddData(
+            fbb, field_to_target_offset)
+        transformation_mat_offset = TransformationMatrix.TransformationMatrixEnd(
+            fbb)
 
-  bfbs = fbb.Output()
+        # Create the TrainingImage feature vector
+        TrainingImage.TrainingImageStartFeaturesVector(fbb,
+                                                       len(features_vector))
+        for feature in reversed(features_vector):
+            fbb.PrependUOffsetTRelative(feature)
+        features_vector_offset = fbb.EndVector(len(features_vector))
 
-  output_prefix = [
-      b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-      b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-      b'#include <string_view>',
-      b'namespace frc971 {',
-      b'namespace vision {',
-      b'inline std::string_view SiftTrainingData() {',
-  ]
-  output_suffix = [
-      b'  return std::string_view(kData, sizeof(kData));',
-      b'}',
-      b'}  // namespace vision',
-      b'}  // namespace frc971',
-      b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
-  ]
+        # Create the TrainingImage
+        TrainingImage.TrainingImageStart(fbb)
+        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(fbb,
+                                                    transformation_mat_offset)
 
-  with open(output_path, 'wb') as output:
-    for line in output_prefix:
-      output.write(line)
-      output.write(b'\n')
-    output.write(b'alignas(64) static constexpr char kData[] = "')
-    for byte in fbb.Output():
-      output.write(b'\\x' + (b'%x' % byte).zfill(2))
-    output.write(b'";\n')
-    for line in output_suffix:
-      output.write(line)
-      output.write(b'\n')
+        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+
+    # Create and add Training Data of all targets
+    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+    for training_image in reversed(images_vector):
+        fbb.PrependUOffsetTRelative(training_image)
+    images_vector_table = fbb.EndVector(len(images_vector))
+
+    # Create camera calibration data
+    camera_calibration_vector = []
+    for camera_calib in camera_calib_list:
+        fixed_extrinsics_list = rot_and_trans_to_list(
+            camera_calib.camera_ext.R, camera_calib.camera_ext.T)
+        TransformationMatrix.TransformationMatrixStartDataVector(
+            fbb, len(fixed_extrinsics_list))
+        for n in reversed(fixed_extrinsics_list):
+            fbb.PrependFloat32(n)
+        fixed_extrinsics_data_offset = fbb.EndVector(
+            len(fixed_extrinsics_list))
+
+        TransformationMatrix.TransformationMatrixStart(fbb)
+        TransformationMatrix.TransformationMatrixAddData(
+            fbb, fixed_extrinsics_data_offset)
+        fixed_extrinsics_vector = TransformationMatrix.TransformationMatrixEnd(
+            fbb)
+
+        # TODO: Need to add in distortion coefficients here
+        # For now, just send camera paramter matrix (fx, fy, cx, cy)
+        camera_int_list = camera_calib.camera_int.camera_matrix.ravel().tolist(
+        )
+        CameraCalibration.CameraCalibrationStartIntrinsicsVector(
+            fbb, len(camera_int_list))
+        for n in reversed(camera_int_list):
+            fbb.PrependFloat32(n)
+        intrinsics_vector = fbb.EndVector(len(camera_int_list))
+
+        node_name_offset = fbb.CreateString(camera_calib.node_name)
+        CameraCalibration.CameraCalibrationStart(fbb)
+        CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
+        CameraCalibration.CameraCalibrationAddTeamNumber(
+            fbb, camera_calib.team_number)
+        CameraCalibration.CameraCalibrationAddIntrinsics(
+            fbb, intrinsics_vector)
+        CameraCalibration.CameraCalibrationAddFixedExtrinsics(
+            fbb, fixed_extrinsics_vector)
+        camera_calibration_vector.append(
+            CameraCalibration.CameraCalibrationEnd(fbb))
+
+    TrainingData.TrainingDataStartCameraCalibrationsVector(
+        fbb, len(camera_calibration_vector))
+    for camera_calibration in reversed(camera_calibration_vector):
+        fbb.PrependUOffsetTRelative(camera_calibration)
+    camera_calibration_vector_table = fbb.EndVector(
+        len(camera_calibration_vector))
+
+    # Fill out TrainingData
+    TrainingData.TrainingDataStart(fbb)
+    TrainingData.TrainingDataAddImages(fbb, images_vector_table)
+    TrainingData.TrainingDataAddCameraCalibrations(
+        fbb, camera_calibration_vector_table)
+    fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+    bfbs = fbb.Output()
+
+    output_prefix = [
+        b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+        b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+        b'#include <string_view>',
+        b'namespace frc971 {',
+        b'namespace vision {',
+        b'inline std::string_view SiftTrainingData() {',
+    ]
+    output_suffix = [
+        b'  return std::string_view(kData, sizeof(kData));',
+        b'}',
+        b'}  // namespace vision',
+        b'}  // namespace frc971',
+        b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+    ]
+
+    # Write out the header file
+    with open(output_path, 'wb') as output:
+        for line in output_prefix:
+            output.write(line)
+            output.write(b'\n')
+        output.write(b'alignas(64) static constexpr char kData[] = "')
+        for byte in fbb.Output():
+            output.write(b'\\x' + (b'%x' % byte).zfill(2))
+        output.write(b'";\n')
+        for line in output_suffix:
+            output.write(line)
+            output.write(b'\n')
+
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 779eb0b..1727dff 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -1,6 +1,7 @@
 import argparse
 import cv2
-import json
+# TODO<Jim>: Add gflags for handling command-line flags
+import glog
 import math
 import numpy as np
 
@@ -8,11 +9,14 @@
 import define_training_data as dtd
 import train_and_match as tam
 
+# TODO<Jim>: Allow command-line setting of logging level
+glog.setLevel("WARN")
 global VISUALIZE_KEYPOINTS
 global USE_BAZEL
 USE_BAZEL = True
 VISUALIZE_KEYPOINTS = False
 
+
 def bazel_name_fix(filename):
     ret_name = filename
     if USE_BAZEL:
@@ -20,6 +24,7 @@
 
     return ret_name
 
+
 class TargetData:
     def __init__(self, filename):
         self.image_filename = filename
@@ -27,7 +32,8 @@
         if USE_BAZEL:
             from bazel_tools.tools.python.runfiles import runfiles
             r = runfiles.Create()
-            self.image_filename = r.Rlocation(bazel_name_fix(self.image_filename))
+            self.image_filename = r.Rlocation(
+                bazel_name_fix(self.image_filename))
         self.image = tam.load_images([self.image_filename])[0]
         self.polygon_list = []
         self.polygon_list_3d = []
@@ -35,8 +41,14 @@
         self.keypoint_list = []
         self.keypoint_list_3d = None  # numpy array of 3D points
         self.descriptor_list = []
+        self.target_rotation = None
+        self.target_position = None
+        self.target_point_2d = None
 
-    def extract_features(self, feature_extractor):
+    def extract_features(self, feature_extractor=None):
+        if feature_extractor is None:
+            feature_extractor = tam.load_feature_extractor()
+
         kp_lists, desc_lists = tam.detect_and_compute(feature_extractor,
                                                       [self.image])
         self.keypoint_list = kp_lists[0]
@@ -61,8 +73,8 @@
             # Filter and project points for each polygon in the list
             filtered_keypoints, _, _, _, keep_list = dtd.filter_keypoints_by_polygons(
                 keypoint_list, None, [self.polygon_list[poly_ind]])
-            print("Filtering kept %d of %d features" % (len(keep_list),
-                                                        len(keypoint_list)))
+            glog.info("Filtering kept %d of %d features" % (len(keep_list),
+                                                            len(keypoint_list)))
             filtered_point_array = np.asarray(
                 [(keypoint.pt[0], keypoint.pt[1])
                  for keypoint in filtered_keypoints]).reshape(-1, 2)
@@ -73,6 +85,7 @@
 
         return point_list_3d
 
+
 def compute_target_definition():
     ############################################################
     # TARGET DEFINITIONS
@@ -81,23 +94,27 @@
     ideal_target_list = []
     training_target_list = []
 
-    # Some general info about our field and targets
-    # Assume camera centered on target at 1 m above ground and distance of 4.85m
-    field_length = 15.98
+    # Using coordinate system as defined in sift.fbs:
+    # field origin (0, 0, 0) at floor height at center of field
+    # Robot orientation with x-axis pointing towards RED ALLIANCE player station
+    # y-axis to left and z-axis up (right-hand coordinate system)
+    # Thus, the red power port target location will be at (-15.98/2,1.67,0),
+    # with orientation (facing out of the field) of M_PI
+
+    # Field constants
+    field_length = 15.983
     power_port_total_height = 3.10
-    power_port_center_y = 1.67
-    power_port_width = 1.22
-    power_port_bottom_wing_height = 1.88
+    power_port_edge_y = 1.089
+    power_port_width = 1.225
+    power_port_bottom_wing_height = 1.828
     power_port_wing_width = 1.83
-    loading_bay_edge_y = 1.11
-    loading_bay_width = 1.52
-    loading_bay_height = 0.94
+    loading_bay_edge_y = 0.784
+    loading_bay_width = 1.524
+    loading_bay_height = 0.933
+    wing_angle = 20. * math.pi / 180.
 
     # Pick the target center location at halfway between top and bottom of the top panel
-    target_center_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
-
-    # TODO: Still need to figure out what this angle actually is
-    wing_angle = 20. * math.pi / 180.
+    power_port_target_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
 
     ###
     ### Red Power Port
@@ -109,51 +126,73 @@
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
     power_port_red_main_panel_polygon_points_2d = [(451, 679), (451, 304),
-                                                   (100, 302), (451, 74),
-                                                   (689, 74), (689, 302),
-                                                   (689, 679)]
+                                                   (100, 302), (451, 74), (689,
+                                                                           74),
+                                                   (689, 302), (689, 679)]
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_main_panel_polygon_points_3d = [
-        (field_length, -power_port_center_y + power_port_width / 2., 0.),
-        (field_length, -power_port_center_y + power_port_width / 2.,
+        (-field_length/2., power_port_edge_y, 0.),
+        (-field_length/2., power_port_edge_y,
          power_port_bottom_wing_height),
-        (field_length, -power_port_center_y + power_port_width / 2.
-         + power_port_wing_width, power_port_bottom_wing_height),
-        (field_length, -power_port_center_y + power_port_width / 2.,
+        (-field_length/2., power_port_edge_y
+         - power_port_wing_width, power_port_bottom_wing_height),
+        (-field_length/2., power_port_edge_y,
          power_port_total_height),
-        (field_length, -power_port_center_y - power_port_width / 2.,
+        (-field_length/2., power_port_edge_y + power_port_width,
          power_port_total_height),
-        (field_length, -power_port_center_y - power_port_width / 2.,
+        (-field_length/2., power_port_edge_y + power_port_width,
          power_port_bottom_wing_height),
-        (field_length, -power_port_center_y - power_port_width / 2., 0.)
+        (-field_length/2., power_port_edge_y + power_port_width, 0.)
     ]
 
     power_port_red_wing_panel_polygon_points_2d = [(689, 74), (1022, 302),
                                                    (689, 302)]
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_wing_panel_polygon_points_3d = [
-        (field_length, -power_port_center_y - power_port_width / 2.,
+        (-field_length/2., power_port_edge_y + power_port_width,
          power_port_total_height),
-        (field_length - power_port_wing_width * math.sin(wing_angle),
-         -power_port_center_y - power_port_width / 2.
-         - power_port_wing_width * math.cos(wing_angle),
+        (-field_length/2. + power_port_wing_width * math.sin(wing_angle),
+         power_port_edge_y + power_port_width
+         + power_port_wing_width * math.cos(wing_angle),
          power_port_bottom_wing_height),
-        (field_length, -power_port_center_y - power_port_width / 2.,
+        (-field_length/2., power_port_edge_y + power_port_width,
          power_port_bottom_wing_height)
     ]
 
     # Populate the red power port
-    ideal_power_port_red.polygon_list.append(power_port_red_main_panel_polygon_points_2d)
-    ideal_power_port_red.polygon_list_3d.append(power_port_red_main_panel_polygon_points_3d)
+    ideal_power_port_red.polygon_list.append(
+        power_port_red_main_panel_polygon_points_2d)
+    ideal_power_port_red.polygon_list_3d.append(
+        power_port_red_main_panel_polygon_points_3d)
 
-    ideal_power_port_red.polygon_list.append(power_port_red_wing_panel_polygon_points_2d)
-    ideal_power_port_red.polygon_list_3d.append(power_port_red_wing_panel_polygon_points_3d)
+    # Define the pose of the target
+    # Location is on the ground, at the center of the target
+    # Orientation is with "x" pointing out of the field, and "z" up
+    # This way, if robot is facing target directly, the x-axes are aligned
+    # and the skew to target is zero
+    ideal_power_port_red.target_rotation = -np.identity(3, np.double)
+    ideal_power_port_red.target_rotation[2][2] = 1.
+    ideal_power_port_red.target_position = np.array([-field_length/2.,
+                                                     power_port_edge_y + power_port_width/2.,
+                                                     power_port_target_height])
+
+    # Target point on the image -- needs to match training image
+    # These are manually captured by examining the images,
+    # and entering the pixel values from the target center for each image.
+    # These are currently only used for visualization of the target
+    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
+
+    # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
     # Add the ideal 3D target to our list
     ideal_target_list.append(ideal_power_port_red)
     # And add the training image we'll actually use to the training list
-    training_target_list.append(TargetData('test_images/train_power_port_red_webcam.png'))
+    training_target_power_port_red = TargetData(
+        'test_images/train_power_port_red_webcam.png')
+    training_target_power_port_red.target_rotation = ideal_power_port_red.target_rotation
+    training_target_power_port_red.target_position = ideal_power_port_red.target_position
+    training_target_list.append(training_target_power_port_red)
 
     ###
     ### Red Loading Bay
@@ -163,21 +202,34 @@
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
-    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651, 406)]
+    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651,
+                                                                          406)]
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_red_polygon_points_3d = [
-        (field_length, loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length, loading_bay_edge_y, loading_bay_height),
-        (field_length, loading_bay_edge_y, 0.)
+        (field_length/2., loading_bay_edge_y + loading_bay_width, 0.),
+        (field_length/2., loading_bay_edge_y + loading_bay_width, loading_bay_height),
+        (field_length/2., loading_bay_edge_y, loading_bay_height),
+        (field_length/2., loading_bay_edge_y, 0.)
     ]
 
-    ideal_loading_bay_red.polygon_list.append(loading_bay_red_polygon_points_2d)
-    ideal_loading_bay_red.polygon_list_3d.append(loading_bay_red_polygon_points_3d)
+    ideal_loading_bay_red.polygon_list.append(
+        loading_bay_red_polygon_points_2d)
+    ideal_loading_bay_red.polygon_list_3d.append(
+        loading_bay_red_polygon_points_3d)
+    # Location of target
+    ideal_loading_bay_red.target_rotation = np.identity(3, np.double)
+    ideal_loading_bay_red.target_position = np.array([field_length/2.,
+                                                     loading_bay_edge_y + loading_bay_width/2.,
+                                                      loading_bay_height/2.])
+    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
-    training_target_list.append(TargetData('test_images/train_loading_bay_red.png'))
+    training_target_loading_bay_red = TargetData(
+        'test_images/train_loading_bay_red.png')
+    training_target_loading_bay_red.target_rotation = ideal_loading_bay_red.target_rotation
+    training_target_loading_bay_red.target_position = ideal_loading_bay_red.target_position
+    training_target_list.append(training_target_loading_bay_red)
 
     ###
     ### Blue Power Port
@@ -188,73 +240,99 @@
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
     power_port_blue_main_panel_polygon_points_2d = [(438, 693), (438, 285),
-                                                    (93, 285), (440, 50),
-                                                    (692, 50), (692, 285),
-                                                    (692, 693)]
+                                                    (93, 285), (440, 50), (692,
+                                                                           50),
+                                                    (692, 285), (692, 693)]
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_main_panel_polygon_points_3d = [
-        (0., power_port_center_y - power_port_width / 2., 0.),
-        (0., power_port_center_y - power_port_width / 2.,
+        (field_length/2., -power_port_edge_y, 0.),
+        (field_length/2., -power_port_edge_y,
          power_port_bottom_wing_height),
-        (0., power_port_center_y - power_port_width / 2. - power_port_wing_width,
+        (field_length/2., -power_port_edge_y + power_port_wing_width,
          power_port_bottom_wing_height),
-        (0., power_port_center_y - power_port_width / 2.,
+        (field_length/2., -power_port_edge_y,
          power_port_total_height),
-        (0., power_port_center_y + power_port_width / 2.,
+        (field_length/2., -power_port_edge_y - power_port_width,
          power_port_total_height),
-        (0., power_port_center_y + power_port_width / 2.,
+        (field_length/2., -power_port_edge_y - power_port_width,
          power_port_bottom_wing_height),
-        (0., power_port_center_y + power_port_width / 2., 0.)
+        (field_length/2., -power_port_edge_y - power_port_width, 0.)
     ]
 
     power_port_blue_wing_panel_polygon_points_2d = [(692, 50), (1047, 285),
                                                     (692, 285)]
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_wing_panel_polygon_points_3d = [
-        (0., power_port_center_y + power_port_width / 2.,
+        (field_length/2., -power_port_edge_y - power_port_width,
          power_port_total_height),
-        (power_port_wing_width * math.sin(wing_angle),
-         power_port_center_y - power_port_width / 2. +
+        (field_length/2. - power_port_wing_width * math.sin(wing_angle),
+         -power_port_edge_y - power_port_width -
          power_port_wing_width * math.cos(wing_angle),
          power_port_bottom_wing_height),
-        (0., power_port_center_y + power_port_width / 2.,
+        (field_length/2., -power_port_edge_y - power_port_width,
          power_port_bottom_wing_height)
     ]
 
     # Populate the blue power port
-    ideal_power_port_blue.polygon_list.append(power_port_blue_main_panel_polygon_points_2d)
-    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_main_panel_polygon_points_3d)
+    ideal_power_port_blue.polygon_list.append(
+        power_port_blue_main_panel_polygon_points_2d)
+    ideal_power_port_blue.polygon_list_3d.append(
+        power_port_blue_main_panel_polygon_points_3d)
 
-    ideal_power_port_blue.polygon_list.append(power_port_blue_wing_panel_polygon_points_2d)
-    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_wing_panel_polygon_points_3d)
+    # Location of target.  Rotation is pointing in -x direction
+    ideal_power_port_blue.target_rotation = np.identity(3, np.double)
+    ideal_power_port_blue.target_position = np.array([field_length/2.,
+                                                     -power_port_edge_y - power_port_width/2.,
+                                                      power_port_target_height])
+    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2),  # train_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
-    training_target_list.append(TargetData('test_images/train_power_port_blue.png'))
+    training_target_power_port_blue = TargetData(
+        'test_images/train_power_port_blue.png')
+    training_target_power_port_blue.target_rotation = ideal_power_port_blue.target_rotation
+    training_target_power_port_blue.target_position = ideal_power_port_blue.target_position
+    training_target_list.append(training_target_power_port_blue)
 
     ###
     ### Blue Loading Bay
     ###
 
-    ideal_loading_bay_blue = TargetData('test_images/train_loading_bay_blue.png')
+    ideal_loading_bay_blue = TargetData(
+        'test_images/train_loading_bay_blue.png')
 
     # Start at lower left corner, and work around clockwise
     # These are taken by manually finding the points in gimp for this image
-    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729, 434)]
+    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729,
+                                                                       434)]
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_blue_polygon_points_3d = [
-        (field_length, loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length, loading_bay_edge_y, loading_bay_height),
-        (field_length, loading_bay_edge_y, 0.)
+        (-field_length/2., -loading_bay_edge_y - loading_bay_width, 0.),
+        (-field_length/2., -loading_bay_edge_y - loading_bay_width, loading_bay_height),
+        (-field_length/2., -loading_bay_edge_y, loading_bay_height),
+        (-field_length/2., -loading_bay_edge_y, 0.)
     ]
 
-    ideal_loading_bay_blue.polygon_list.append(loading_bay_blue_polygon_points_2d)
-    ideal_loading_bay_blue.polygon_list_3d.append(loading_bay_blue_polygon_points_3d)
+    ideal_loading_bay_blue.polygon_list.append(
+        loading_bay_blue_polygon_points_2d)
+    ideal_loading_bay_blue.polygon_list_3d.append(
+        loading_bay_blue_polygon_points_3d)
+
+    # Location of target
+    ideal_loading_bay_blue.target_rotation = -np.identity(3, np.double)
+    ideal_loading_bay_blue.target_rotation[2][2] = 1.
+    ideal_loading_bay_blue.target_position = np.array([-field_length/2.,
+                                                     -loading_bay_edge_y - loading_bay_width/2.,
+                                                       loading_bay_height/2.])
+    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
-    training_target_list.append(TargetData('test_images/train_loading_bay_blue.png'))
+    training_target_loading_bay_blue = TargetData(
+        'test_images/train_loading_bay_blue.png')
+    training_target_loading_bay_blue.target_rotation = ideal_loading_bay_blue.target_rotation
+    training_target_loading_bay_blue.target_position = ideal_loading_bay_blue.target_position
+    training_target_list.append(training_target_loading_bay_blue)
 
     # Create feature extractor
     feature_extractor = tam.load_feature_extractor()
@@ -263,7 +341,7 @@
     camera_params = camera_definition.web_cam_params
 
     for ideal_target in ideal_target_list:
-        print("\nPreparing target for image %s" % ideal_target.image_filename)
+        glog.info("\nPreparing target for image %s" % ideal_target.image_filename)
         ideal_target.extract_features(feature_extractor)
         ideal_target.filter_keypoints_by_polygons()
         ideal_target.compute_reprojection_maps()
@@ -272,16 +350,21 @@
 
         if VISUALIZE_KEYPOINTS:
             for i in range(len(ideal_target.polygon_list)):
-                ideal_pts_tmp = np.asarray(ideal_target.polygon_list[i]).reshape(
-                    -1, 2)
+                ideal_pts_tmp = np.asarray(
+                    ideal_target.polygon_list[i]).reshape(-1, 2)
                 ideal_pts_3d_tmp = np.asarray(
                     ideal_target.polygon_list_3d[i]).reshape(-1, 3)
                 # We can only compute pose if we have at least 4 points
                 # Only matters for reprojection for visualization
                 # Keeping this code here, since it's helpful when testing
                 if (len(ideal_target.polygon_list[i]) >= 4):
-                    img_copy = dtd.draw_polygon(ideal_target.image.copy(), ideal_target.polygon_list[i], (0,255,0), True)
-                    dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+                    img_copy = dtd.draw_polygon(ideal_target.image.copy(),
+                                                ideal_target.polygon_list[i],
+                                                (0, 255, 0), True)
+                    dtd.visualize_reprojections(
+                        img_copy, ideal_pts_tmp, ideal_pts_3d_tmp,
+                        camera_params.camera_int.camera_matrix,
+                        camera_params.camera_int.distortion_coeffs)
 
             for polygon in ideal_target.polygon_list:
                 img_copy = ideal_target.image.copy()
@@ -307,7 +390,7 @@
     AUTO_PROJECTION = True
 
     if AUTO_PROJECTION:
-        print("\n\nAuto projection of training keypoints to 3D using ideal images")
+        glog.info("\n\nAuto projection of training keypoints to 3D using ideal images")
         # Match the captured training image against the "ideal" training image
         # and use those matches to pin down the 3D locations of the keypoints
 
@@ -316,35 +399,43 @@
             training_target = training_target_list[target_ind]
             ideal_target = ideal_target_list[target_ind]
 
-            print("\nPreparing target for image %s" % training_target.image_filename)
+            glog.info("\nPreparing target for image %s" % training_target.image_filename)
             # Extract keypoints and descriptors for model
             training_target.extract_features(feature_extractor)
 
             # Create matcher that we'll use to match with ideal
             matcher = tam.train_matcher([training_target.descriptor_list])
 
-            matches_list = tam.compute_matches(matcher,
-                                               [training_target.descriptor_list],
-                                               [ideal_target.descriptor_list])
+            matches_list = tam.compute_matches(
+                matcher, [training_target.descriptor_list],
+                [ideal_target.descriptor_list])
 
             homography_list, matches_mask_list = tam.compute_homographies(
                 [training_target.keypoint_list], [ideal_target.keypoint_list],
                 matches_list)
 
+            # Compute H_inv, since this maps points in ideal target to
+            # points in the training target
+            H_inv = np.linalg.inv(homography_list[0])
+
             for polygon in ideal_target.polygon_list:
                 ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
-                H_inv = np.linalg.inv(homography_list[0])
                 # We use the ideal target's polygons to define the polygons on
                 # the training target
-                transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
+                transformed_polygon = cv2.perspectiveTransform(
+                    ideal_pts_2d, H_inv)
                 training_target.polygon_list.append(transformed_polygon)
 
-            print("Started with %d keypoints" % len(training_target.keypoint_list))
+            # Also project the target point from the ideal to training image
+            training_target_point_2d = cv2.perspectiveTransform(np.asarray(ideal_target.target_point_2d).reshape(-1,1,2), H_inv)
+            training_target.target_point_2d = training_target_point_2d.reshape(-1,1,2)
+
+            glog.info("Started with %d keypoints" % len(training_target.keypoint_list))
 
             training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
                 training_target.keypoint_list, training_target.descriptor_list,
                 training_target.polygon_list)
-            print("After filtering by polygons, had %d keypoints" % len(
+            glog.info("After filtering by polygons, had %d keypoints" % len(
                 training_target.keypoint_list))
             if VISUALIZE_KEYPOINTS:
                 tam.show_keypoints(training_target.image,
@@ -383,11 +474,11 @@
                 img_copy = training_target.image.copy()
                 for polygon in training_target.polygon_list:
                     pts = polygon.astype(int).reshape(-1, 2)
-                    img_copy = dtd.draw_polygon(img_copy, pts,
-                                             (255, 0, 0), True)
-                kp_tmp = np.asarray([
-                    (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
-                ]).reshape(-1, 2)
+                    img_copy = dtd.draw_polygon(img_copy, pts, (255, 0, 0),
+                                                True)
+                kp_tmp = np.asarray(
+                    [(kp.pt[0], kp.pt[1])
+                     for kp in training_target.keypoint_list]).reshape(-1, 2)
                 dtd.visualize_reprojections(
                     img_copy, kp_tmp, training_target.keypoint_list_3d,
                     camera_params.camera_int.camera_matrix,
@@ -396,19 +487,19 @@
     y2020_target_list = training_target_list
     return y2020_target_list
 
+
 if __name__ == '__main__':
     ap = argparse.ArgumentParser()
     ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
-    ap.add_argument("--no_use_bazel", help="Don't run using Bazel", default=True, action='store_false')
+    ap.add_argument("-n", "--no_bazel", help="Don't run using Bazel", default=True, action='store_false')
     args = vars(ap.parse_args())
 
     VISUALIZE_KEYPOINTS = args["visualize"]
     if args["visualize"]:
-        print("Visualizing results")
+        glog.info("Visualizing results")
 
-    USE_BAZEL = args["no_use_bazel"]
-    if args["no_use_bazel"]:
-        print("Running on command line (no Bazel)")
+    USE_BAZEL = args["no_bazel"]
+    if args["no_bazel"]:
+        glog.info("Running on command line (no Bazel)")
 
     compute_target_definition()
-    pass
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
new file mode 100644
index 0000000..5432766
--- /dev/null
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -0,0 +1,29 @@
+import cv2
+import numpy as np
+
+import camera_definition_test
+import define_training_data as dtd
+import target_definition
+import train_and_match as tam
+
+
+def compute_target_definition():
+    target_data_list = []
+    target_data_test_1 = target_definition.TargetData(
+        'test_images/train_power_port_red.png')
+
+    target_data_test_1.extract_features()
+
+    target_data_test_1.keypoint_list[0].pt = (0., 1.)
+
+    kp_3d = []
+    for i in range(len(target_data_test_1.keypoint_list)):
+        kp_3d.append((i, i, i))
+
+    target_data_test_1.keypoint_list_3d = np.asarray(
+        np.float32(kp_3d)).reshape(-1, 1, 3)
+
+    target_data_test_1.target_rotation = np.identity(3, np.double)
+    target_data_test_1.target_position = np.array([0., 1., 2.])
+    target_data_list.append(target_data_test_1)
+    return target_data_list
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index e5a7f5b..93c607c 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -1,4 +1,5 @@
 import cv2
+import glog
 import math
 import numpy as np
 import time
@@ -8,6 +9,7 @@
 FEATURE_EXTRACTOR_NAME = 'SIFT'
 QUERY_INDEX = 0  # We use a list for both training and query info, but only ever have one query item
 
+glog.setLevel("WARN")
 
 # Calculates rotation matrix to euler angles
 # The result is the same as MATLAB except the order
@@ -38,7 +40,7 @@
         # Load image (in color; let opencv convert to B&W for features)
         img_data = cv2.imread(im)
         if img_data is None:
-            print("Failed to load image: ", im)
+            glog.error("Failed to load image: ", im)
             exit()
         else:
             image_list.append(img_data)
@@ -143,15 +145,12 @@
 
     elif FEATURE_EXTRACTOR_NAME is 'ORB':
         matches = matcher.knnMatch(train_keypoint_lists[0], desc_query, k=2)
-        print(matches)
         good_matches = []
         for m in matches:
             if m:
                 if len(m) == 2:
-                    print(m[0].distance, m[1].distance)
                     if m[0].distance < 0.7 * m[1].distance:
                         good_matches.append(m[0])
-                        print(m[0].distance)
 
         good_matches_list.append(good_matches)
 
@@ -176,14 +175,13 @@
     for i in range(len(train_keypoint_lists)):
         good_matches = good_matches_list[i]
         if len(good_matches) < MIN_MATCH_COUNT:
-            print("Not enough matches are for model ", i, ": ",
-                  len(good_matches), " out of needed #: ", MIN_MATCH_COUNT)
+            glog.warn("Not enough matches are for model %d: %d out of needed #: %d" % (i, len(good_matches), MIN_MATCH_COUNT))
             homography_list.append([])
             matches_mask_list.append([])
             continue
 
-        print("Got good number of matches for model %d: %d (needed only %d)" %
-              (i, len(good_matches), MIN_MATCH_COUNT))
+        glog.info("Got good number of matches for model %d: %d (needed only %d)" %
+                  (i, len(good_matches), MIN_MATCH_COUNT))
         # Extract and bundle keypoint locations for computations
         src_pts = np.float32([
             train_keypoint_lists[i][m.trainIdx].pt for m in good_matches
@@ -206,7 +204,7 @@
 # Also shows image with query unwarped (to match training image) and target pt
 def show_results(training_images, train_keypoint_lists, query_images,
                  query_keypoint_lists, target_point_list, good_matches_list):
-    print("Showing results for ", len(training_images), " training images")
+    glog.info("Showing results for ", len(training_images), " training images")
 
     homography_list, matches_mask_list = compute_homographies(
         train_keypoint_lists, query_keypoint_lists, good_matches_list)
@@ -214,15 +212,15 @@
         good_matches = good_matches_list[i]
         if len(good_matches) < MIN_MATCH_COUNT:
             continue
-        print("Showing results for model ", i)
+        glog.debug("Showing results for model ", i)
         matches_mask_count = matches_mask_list[i].count(1)
         if matches_mask_count != len(good_matches):
-            print("Homography rejected some matches!  From ",
+            glog.info("Homography rejected some matches!  From ",
                   len(good_matches), ", only ", matches_mask_count,
                   " were used")
 
         if matches_mask_count < MIN_MATCH_COUNT:
-            print(
+            glog.info(
                 "Skipping match because homography rejected matches down to below ",
                 MIN_MATCH_COUNT)
             continue
@@ -235,7 +233,7 @@
         query_box_pts = cv2.perspectiveTransform(train_box_pts, H)
 
         # Figure out where the training target goes on the query img
-        transformed_target = cv2.perspectiveTransform(target_point_list[i], H)
+        transformed_target = cv2.perspectiveTransform(target_point_list[i].reshape(-1,1,2), H)
         # Ballpark the size of the circle so it looks right on image
         radius = int(
             32 * abs(H[0][0] + H[1][1]) / 2)  # Average of scale factors
@@ -260,14 +258,13 @@
         img3 = cv2.drawMatches(query_image, query_keypoint_lists[QUERY_INDEX],
                                training_images[i], train_keypoint_lists[i],
                                good_matches_list[i], None, **draw_params)
-        print("Drawing matches for model ", i,
-              ".  Query on left, Training image on right")
+        glog.debug("Drawing matches for model ", i,
+                   ".  Query on left, Training image on right")
         cv2.imshow('Matches', img3), cv2.waitKey()
 
         # Next, unwarp the query image so it looks like the training view
         H_inv = np.linalg.inv(H)
         query_image_warp = cv2.warpPerspective(query_image, H_inv, (w, h))
-        print("Showing unwarped query image for model ", i)
         cv2.imshow('Unwarped Image', query_image_warp), cv2.waitKey()
 
     # Go ahead and return these, for use elsewhere