Defining targets for training images

Cleaning up commit message, and formatting on files.

More details:
- Includes ability to run command-line with or without bazel
- Also has write capability to flatbuffers for multiple targets
- Definition of targets and cameras, along with 3D point calcs
- Includes camera and target helper functions, along with defaults to get started
- Modified from Brian's version to work with python target_definition code, esp. to send multiple targets.

- Getting target_defintion set up for building flatbuffer data file.  Updated blue power point image to include full wings.
- Sample python code to do multiple image training, matching, and display results.  Includes sample images to play with
- Adding code to define masking polygons for removing features for training
- Tools to write out the target definition to flatbuffer
- Adds multi-target definition, and automates the process

Change-Id: Ibc90d51a129751bf456da6813b3e7cbc5e55901a
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
new file mode 100644
index 0000000..670b664
--- /dev/null
+++ b/y2020/vision/tools/python_code/BUILD
@@ -0,0 +1,49 @@
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
+
+py_binary(
+    name = "load_sift_training",
+    data = [
+        ":test_images/train_power_port_red.png",
+        ":test_images/train_power_port_red_webcam.png",
+        ":test_images/train_power_port_blue.png",
+        ":test_images/train_loading_bay_red.png",
+        ":test_images/train_loading_bay_blue.png",
+    ],
+    srcs = ["load_sift_training.py",
+        "camera_definition.py",
+        "define_training_data.py",
+        "target_definition.py",
+        "train_and_match.py",
+    ],
+    args = ["sift_training_data.h",
+    ],
+    default_python_version = "PY3",
+    srcs_version = "PY2AND3",
+    deps = [
+        "//y2020/vision/sift:sift_fbs_python",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+        "@bazel_tools//tools/python/runfiles",
+    ],
+)
+
+genrule(
+    name = "run_load_sift_training",
+    outs = [
+        "sift_training_data.h",
+    ],
+    cmd = " ".join([
+        "$(location :load_sift_training)",
+        "$(location sift_training_data.h)",
+    ]),
+    tools = [
+        ":load_sift_training",
+    ],
+)
+
+cc_library(
+    name = "sift_training",
+    hdrs = [
+        "sift_training_data.h",
+    ],
+    visibility = ["//visibility:public"],
+)
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index d1802aa..22eb6ce 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -5,29 +5,33 @@
 import numpy as np
 import time
 
-import field_display
 import train_and_match as tam
 
 # Points for current polygon
 point_list = []
-current_mouse = (0,0)
+current_mouse = (0, 0)
+
 
 def get_mouse_event(event, x, y, flags, param):
     global point_list
     global current_mouse
-    current_mouse = (x,y)
+    current_mouse = (x, y)
     if event == cv2.EVENT_LBUTTONUP:
         #print("Adding point at %d, %d" % (x,y))
-        point_list.append([x,y])
+        point_list.append([x, y])
+    pass
 
-def draw_polygon(image, polygon, color=(255,0,0), close_polygon = False):
+
+def draw_polygon(image, polygon, color=(255, 0, 0), close_polygon=False):
     for point in polygon:
-        image = cv2.circle(image, (point[0], point[1]), 5, (255,0,0), -1)
-    if len(polygon) > 1:
+        image = cv2.circle(image, (point[0], point[1]), 5, (255, 0, 0), -1)
+    if (len(polygon) > 1):
         np_poly = np.array(polygon)
-        image = cv2.polylines(image, [np_poly], close_polygon, color, thickness=3)
+        image = cv2.polylines(
+            image, [np_poly], close_polygon, color, thickness=3)
     return image
 
+
 # Close out polygon, return True if size is 3 or more points
 def finish_polygon(image, polygon):
     global point_list
@@ -36,7 +40,7 @@
         return False
 
     point_list.append(point_list[0])
-    image = draw_polygon(image, point_list, color=(0,0,255))
+    image = draw_polygon(image, point_list, color=(0, 0, 255))
     cv2.imshow("image", image)
     cv2.waitKey(500)
     return True
@@ -99,14 +103,23 @@
     cv2.namedWindow("image")
     cv2.setMouseCallback("image", get_mouse_event)
 
-    while len(point_list) < len(points):
+    while (len(point_list) < len(points)):
         i = len(point_list)
         # Draw mouse location and suggested target
         display_image = image.copy()
-        display_image = cv2.circle(display_image, (points[i][0], points[i][1]), 15, (0,255,0), 2)
+        display_image = cv2.circle(display_image, (points[i][0], points[i][1]),
+                                   15, (0, 255, 0), 2)
         cursor_length = 5
-        display_image = cv2.line(display_image, (current_mouse[0]-cursor_length, current_mouse[1]), (current_mouse[0]+cursor_length, current_mouse[1]), (255,0,0), 2, cv2.LINE_AA)
-        display_image = cv2.line(display_image, (current_mouse[0], current_mouse[1]-cursor_length), (current_mouse[0], current_mouse[1]+cursor_length), (255,0,0), 2, cv2.LINE_AA)
+        display_image = cv2.line(
+            display_image,
+            (current_mouse[0] - cursor_length, current_mouse[1]),
+            (current_mouse[0] + cursor_length, current_mouse[1]), (255, 0, 0),
+            2, cv2.LINE_AA)
+        display_image = cv2.line(
+            display_image,
+            (current_mouse[0], current_mouse[1] - cursor_length),
+            (current_mouse[0], current_mouse[1] + cursor_length), (255, 0, 0),
+            2, cv2.LINE_AA)
 
         cv2.imshow("image", display_image)
 
@@ -179,7 +192,7 @@
     pts_2d_lstsq = append_ones(pts_2d)
     pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1,3)
 
-    reprojection_map = np.linalg.lstsq(pts_2d_lstsq, pts_3d_lstsq, rcond=None)[0]
+    reprojection_map = np.linalg.lstsq(pts_2d_lstsq, pts_3d_lstsq, rcond=-1)[0]
 
     return reprojection_map
 
@@ -198,19 +211,24 @@
     # Compute camera location
     # TODO: Warn on bad inliers
     # TODO: Change this to not have to recast to np
-    pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1,1,2)
-    pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1,1,3)
-    retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat, distortion_coeffs)
-    pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat, distortion_coeffs)
+    pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1, 1, 2)
+    pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1, 1, 3)
+    retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat,
+                                               distortion_coeffs)
+    pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
+                                               distortion_coeffs)
+    if inliers is None:
+        print("WARNING: Didn't get any inliers when reprojecting polygons")
+        return img
     for i in range(len(pts_2d)):
         pt_2d = pts_2d_np[i][0]
         pt_3d_proj = pts_3d_proj_2d[i][0]
-        pt_color =  (0,255,0)
+        pt_color = (0, 255, 0)
         if i not in inliers:
-            pt_color = (0,0,255)
+            pt_color = (0, 0, 255)
 
-        img = cv2.circle(img,(pt_2d[0],pt_2d[1]),3,pt_color,3)
-        img = cv2.circle(img,(pt_3d_proj[0], pt_3d_proj[1]),15,pt_color,3)
+        img = cv2.circle(img, (pt_2d[0], pt_2d[1]), 3, pt_color, 3)
+        img = cv2.circle(img, (pt_3d_proj[0], pt_3d_proj[1]), 15, pt_color, 3)
 
     cv2.imshow("image", img)
     cv2.waitKey(0)
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
new file mode 100644
index 0000000..9fa4acf
--- /dev/null
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python3
+
+import cv2
+import sys
+import flatbuffers
+import target_definition
+
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.Feature as Feature
+
+def main():
+
+  output_path = sys.argv[1]
+  print("Writing file to ", output_path)
+
+  target_data_list = target_definition.compute_target_definition()
+
+  fbb = flatbuffers.Builder(0)
+
+  images_vector = []
+
+  for target_data in target_data_list:
+
+    features_vector = []
+
+    for keypoint, keypoint_3d, descriptor in zip(target_data.keypoint_list,
+                                                 target_data.keypoint_list_3d,
+                                                 target_data.descriptor_list):
+
+      Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+      for n in reversed(descriptor):
+        fbb.PrependFloat32(n)
+      descriptor_vector = fbb.EndVector(len(descriptor))
+
+      Feature.FeatureStart(fbb)
+
+      Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+      Feature.FeatureAddX(fbb, keypoint.pt[0])
+      Feature.FeatureAddY(fbb, keypoint.pt[1])
+      Feature.FeatureAddSize(fbb, keypoint.size)
+      Feature.FeatureAddAngle(fbb, keypoint.angle)
+      Feature.FeatureAddResponse(fbb, keypoint.response)
+      Feature.FeatureAddOctave(fbb, keypoint.octave)
+
+      features_vector.append(Feature.FeatureEnd(fbb))
+
+      ## TODO: Write 3d vector here
+
+    TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
+    for feature in reversed(features_vector):
+      fbb.PrependUOffsetTRelative(feature)
+    features_vector_table = fbb.EndVector(len(features_vector))
+
+    TrainingImage.TrainingImageStart(fbb)
+    TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
+    # TODO(Brian): Fill out the transformation matrices.
+    images_vector.append(TrainingImage.TrainingImageEnd(fbb))
+
+  TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
+  for training_image in reversed(images_vector):
+    fbb.PrependUOffsetTRelative(training_image)
+  images_vector_table = fbb.EndVector(len(images_vector))
+
+  TrainingData.TrainingDataStart(fbb)
+  TrainingData.TrainingDataAddImages(fbb, images_vector_table)
+  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+  bfbs = fbb.Output()
+
+  output_prefix = [
+      b'#ifndef Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+      b'#define Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+      b'#include <string_view>',
+      b'namespace frc971 {',
+      b'namespace vision {',
+      b'inline std::string_view SiftTrainingData() {',
+  ]
+  output_suffix = [
+      b'  return std::string_view(kData, sizeof(kData));',
+      b'}',
+      b'}  // namespace vision',
+      b'}  // namespace frc971',
+      b'#endif  // Y2020_VISION_TOOLS_PYTHON_CODE_TRAINING_DATA_H_',
+  ]
+
+  with open(output_path, 'wb') as output:
+    for line in output_prefix:
+      output.write(line)
+      output.write(b'\n')
+    output.write(b'alignas(64) static constexpr char kData[] = "')
+    for byte in fbb.Output():
+      output.write(b'\\x' + (b'%x' % byte).zfill(2))
+    output.write(b'";\n')
+    for line in output_suffix:
+      output.write(line)
+      output.write(b'\n')
+
+if __name__ == '__main__':
+  main()
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index d711fb9..779eb0b 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -1,3 +1,4 @@
+import argparse
 import cv2
 import json
 import math
@@ -7,15 +8,27 @@
 import define_training_data as dtd
 import train_and_match as tam
 
-VISUALIZE_KEYPOINTS = True
-VISUALIZE_POLYGONS = True
+global VISUALIZE_KEYPOINTS
+global USE_BAZEL
+USE_BAZEL = True
+VISUALIZE_KEYPOINTS = False
 
+def bazel_name_fix(filename):
+    ret_name = filename
+    if USE_BAZEL:
+        ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
+
+    return ret_name
 
 class TargetData:
     def __init__(self, filename):
         self.image_filename = filename
         # Load an image (will come in as a 1-element list)
-        self.image = tam.load_images([filename])[0]
+        if USE_BAZEL:
+            from bazel_tools.tools.python.runfiles import runfiles
+            r = runfiles.Create()
+            self.image_filename = r.Rlocation(bazel_name_fix(self.image_filename))
+        self.image = tam.load_images([self.image_filename])[0]
         self.polygon_list = []
         self.polygon_list_3d = []
         self.reprojection_map_list = []
@@ -60,226 +73,342 @@
 
         return point_list_3d
 
-    # Save out the training data-- haven't implemented this
-    def save_training_data(self):
-        if (len(self.keypoint_list) != len(self.descriptor_list)
-                or len(self.keypoint_list) != len(self.keypoint_list_3d)):
-            print("Big problem-- lists don't match in size: %d, %d, %d" %
-                  (len(self.keypoint_list), len(self.descriptor_list),
-                   len(self.keypoint_list_3d)))
+def compute_target_definition():
+    ############################################################
+    # TARGET DEFINITIONS
+    ############################################################
 
-        print("(Would like to) Save target with %d keypoints" % len(
-            self.keypoint_list))
+    ideal_target_list = []
+    training_target_list = []
 
+    # Some general info about our field and targets
+    # Assume camera centered on target at 1 m above ground and distance of 4.85m
+    field_length = 15.98
+    power_port_total_height = 3.10
+    power_port_center_y = 1.67
+    power_port_width = 1.22
+    power_port_bottom_wing_height = 1.88
+    power_port_wing_width = 1.83
+    loading_bay_edge_y = 1.11
+    loading_bay_width = 1.52
+    loading_bay_height = 0.94
 
-# TARGET DEFINITIONS
+    # Pick the target center location at halfway between top and bottom of the top panel
+    target_center_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
 
-# Some general info about our field and targets
-# Assume camera centered on target at 1 m above ground and distance of 4.85m
-camera_height = 1.
-total_target_height = 3.10
-field_length = 15.98
-power_port_offset_y = 1.67
-power_port_width = 1.22
-bottom_wing_height = 1.88
-wing_width = 1.83
+    # TODO: Still need to figure out what this angle actually is
+    wing_angle = 20. * math.pi / 180.
 
-# Pick the target center location at halfway between top and bottom of the top panel
-target_center_height = (total_target_height + bottom_wing_height) / 2.
+    ###
+    ### Red Power Port
+    ###
 
-# TODO: Still need to figure out what this angle actually is
-wing_angle = 20. * math.pi / 180.
+    # Create the reference "ideal" image
+    ideal_power_port_red = TargetData('test_images/train_power_port_red.png')
 
-# Start at lower left corner, and work around clockwise
-# These are taken by manually finding the points in gimp for this image
-main_panel_polygon_points_2d = [(451, 679), (451, 304), (100, 302), (451, 74),
-                                (689, 74), (689, 302), (689, 679)]
-# These are "virtual" 3D points based on the expected geometry
-main_panel_polygon_points_3d = [
-    (field_length, power_port_width / 2. - power_port_offset_y,
-     0.), (field_length, power_port_width / 2. - power_port_offset_y,
-           bottom_wing_height),
-    (field_length, power_port_width / 2. - power_port_offset_y + wing_width,
-     bottom_wing_height),
-    (field_length, power_port_width / 2. - power_port_offset_y,
-     total_target_height), (field_length,
-                            -power_port_width / 2. - power_port_offset_y,
-                            total_target_height),
-    (field_length, -power_port_width / 2. - power_port_offset_y,
-     bottom_wing_height), (field_length,
-                           -power_port_width / 2. - power_port_offset_y, 0.)
-]
+    # Start at lower left corner, and work around clockwise
+    # These are taken by manually finding the points in gimp for this image
+    power_port_red_main_panel_polygon_points_2d = [(451, 679), (451, 304),
+                                                   (100, 302), (451, 74),
+                                                   (689, 74), (689, 302),
+                                                   (689, 679)]
 
-wing_panel_polygon_points_2d = [(689, 74), (1022, 302), (689, 302)]
-# These are "virtual" 3D points based on the expected geometry
-wing_panel_polygon_points_3d = [
-    (field_length, -power_port_width / 2. - power_port_offset_y,
-     total_target_height),
-    (field_length - wing_width * math.sin(wing_angle), -power_port_width / 2. -
-     power_port_offset_y - wing_width * math.cos(wing_angle),
-     bottom_wing_height), (field_length,
-                           -power_port_width / 2. - power_port_offset_y,
-                           bottom_wing_height)
-]
+    # These are "virtual" 3D points based on the expected geometry
+    power_port_red_main_panel_polygon_points_3d = [
+        (field_length, -power_port_center_y + power_port_width / 2., 0.),
+        (field_length, -power_port_center_y + power_port_width / 2.,
+         power_port_bottom_wing_height),
+        (field_length, -power_port_center_y + power_port_width / 2.
+         + power_port_wing_width, power_port_bottom_wing_height),
+        (field_length, -power_port_center_y + power_port_width / 2.,
+         power_port_total_height),
+        (field_length, -power_port_center_y - power_port_width / 2.,
+         power_port_total_height),
+        (field_length, -power_port_center_y - power_port_width / 2.,
+         power_port_bottom_wing_height),
+        (field_length, -power_port_center_y - power_port_width / 2., 0.)
+    ]
 
-# Populate the red power port
-ideal_power_port_red = TargetData('test_images/train_power_port_red.png')
+    power_port_red_wing_panel_polygon_points_2d = [(689, 74), (1022, 302),
+                                                   (689, 302)]
+    # These are "virtual" 3D points based on the expected geometry
+    power_port_red_wing_panel_polygon_points_3d = [
+        (field_length, -power_port_center_y - power_port_width / 2.,
+         power_port_total_height),
+        (field_length - power_port_wing_width * math.sin(wing_angle),
+         -power_port_center_y - power_port_width / 2.
+         - power_port_wing_width * math.cos(wing_angle),
+         power_port_bottom_wing_height),
+        (field_length, -power_port_center_y - power_port_width / 2.,
+         power_port_bottom_wing_height)
+    ]
 
-ideal_power_port_red.polygon_list.append(main_panel_polygon_points_2d)
-ideal_power_port_red.polygon_list_3d.append(main_panel_polygon_points_3d)
+    # Populate the red power port
+    ideal_power_port_red.polygon_list.append(power_port_red_main_panel_polygon_points_2d)
+    ideal_power_port_red.polygon_list_3d.append(power_port_red_main_panel_polygon_points_3d)
 
-ideal_power_port_red.polygon_list.append(wing_panel_polygon_points_2d)
-ideal_power_port_red.polygon_list_3d.append(wing_panel_polygon_points_3d)
+    ideal_power_port_red.polygon_list.append(power_port_red_wing_panel_polygon_points_2d)
+    ideal_power_port_red.polygon_list_3d.append(power_port_red_wing_panel_polygon_points_3d)
 
-ideal_target_list = []
-ideal_target_list.append(ideal_power_port_red)
+    # Add the ideal 3D target to our list
+    ideal_target_list.append(ideal_power_port_red)
+    # And add the training image we'll actually use to the training list
+    training_target_list.append(TargetData('test_images/train_power_port_red_webcam.png'))
 
-# Create feature extractor
-feature_extractor = tam.load_feature_extractor()
+    ###
+    ### Red Loading Bay
+    ###
 
-# Use webcam parameters for now
-camera_params = camera_definition.web_cam_params
-for ideal_target in ideal_target_list:
-    print("Preparing target for image %s" % ideal_target.image_filename)
-    ideal_target.extract_features(feature_extractor)
-    ideal_target.filter_keypoints_by_polygons()
-    ideal_target.compute_reprojection_maps()
-    ideal_target.keypoint_list_3d = ideal_target.project_keypoint_to_3d_by_polygon(
-        ideal_target.keypoint_list)
+    ideal_loading_bay_red = TargetData('test_images/train_loading_bay_red.png')
 
-    if VISUALIZE_POLYGONS:
-        # For each polygon, show the 2D points and the reprojection for 3D
-        # of the polygon definition
-        for polygon_2d, polygon_3d in zip(ideal_target.polygon_list,
-                                          ideal_target.polygon_list_3d):
-            ideal_pts_tmp = np.asarray(polygon_2d).reshape(-1, 2)
-            ideal_pts_3d_tmp = np.asarray(polygon_3d).reshape(-1, 3)
-            # We can only compute pose if we have at least 4 points
-            # Only matters for reprojection for visualization
-            # Keeping this code here, since it's helpful when testing
-            if (len(polygon_2d) >= 4):
-                img_copy = dtd.draw_polygon(ideal_target.image.copy(), polygon_2d, (0,255,0), True)
-                dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+    # Start at lower left corner, and work around clockwise
+    # These are taken by manually finding the points in gimp for this image
+    loading_bay_red_polygon_points_2d = [(42, 406), (42, 35), (651, 34), (651, 406)]
 
-    if VISUALIZE_KEYPOINTS:
-        # For each polygon, show which keypoints (2D and 3D manual) were kept
-        for polygon in ideal_target.polygon_list:
-            img_copy = ideal_target.image.copy()
-            kp_in_poly2d = []
-            kp_in_poly3d = []
-            for kp, kp_3d in zip(ideal_target.keypoint_list,
-                                 ideal_target.keypoint_list_3d):
-                if dtd.point_in_polygons((kp.pt[0], kp.pt[1]), [polygon]):
-                    kp_in_poly2d.append((kp.pt[0], kp.pt[1]))
-                    kp_in_poly3d.append(kp_3d)
+    # These are "virtual" 3D points based on the expected geometry
+    loading_bay_red_polygon_points_3d = [
+        (field_length, loading_bay_edge_y + loading_bay_width, 0.),
+        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
+        (field_length, loading_bay_edge_y, loading_bay_height),
+        (field_length, loading_bay_edge_y, 0.)
+    ]
 
-            img_copy = dtd.draw_polygon(img_copy, polygon, (0,255,0), True)
-            dtd.visualize_reprojections(
-                img_copy,
-                np.asarray(kp_in_poly2d).reshape(-1, 2),
-                np.asarray(kp_in_poly3d).reshape(
-                    -1, 3), camera_params.camera_int.camera_matrix,
-                camera_params.camera_int.distortion_coeffs)
+    ideal_loading_bay_red.polygon_list.append(loading_bay_red_polygon_points_2d)
+    ideal_loading_bay_red.polygon_list_3d.append(loading_bay_red_polygon_points_3d)
 
-### TODO: Add code to do manual point selection
-AUTO_PROJECTION = True
-training_target_list = []
-if AUTO_PROJECTION:
-    print("Auto projection of training keypoints to 3D using ideal images")
-    # Match the captured training image against the "ideal" training image
-    # and use those matches to pin down the 3D locations of the keypoints
+    ideal_target_list.append(ideal_loading_bay_red)
+    training_target_list.append(TargetData('test_images/train_loading_bay_red.png'))
 
-    training_power_port_red = TargetData(
-        'test_images/train_power_port_red_webcam.png')
+    ###
+    ### Blue Power Port
+    ###
 
-    training_target_list.append(training_power_port_red)
+    ideal_power_port_blue = TargetData('test_images/train_power_port_blue.png')
 
-    for target_ind in range(len(training_target_list)):
-        # Assumes we have 1 ideal view for each training target
-        training_target = training_target_list[target_ind]
-        ideal_target = ideal_target_list[target_ind]
+    # Start at lower left corner, and work around clockwise
+    # These are taken by manually finding the points in gimp for this image
+    power_port_blue_main_panel_polygon_points_2d = [(438, 693), (438, 285),
+                                                    (93, 285), (440, 50),
+                                                    (692, 50), (692, 285),
+                                                    (692, 693)]
 
-        print("Preparing target for image %s" % training_target.image_filename)
-        # Extract keypoints and descriptors for model
-        training_target.extract_features(feature_extractor)
+    # These are "virtual" 3D points based on the expected geometry
+    power_port_blue_main_panel_polygon_points_3d = [
+        (0., power_port_center_y - power_port_width / 2., 0.),
+        (0., power_port_center_y - power_port_width / 2.,
+         power_port_bottom_wing_height),
+        (0., power_port_center_y - power_port_width / 2. - power_port_wing_width,
+         power_port_bottom_wing_height),
+        (0., power_port_center_y - power_port_width / 2.,
+         power_port_total_height),
+        (0., power_port_center_y + power_port_width / 2.,
+         power_port_total_height),
+        (0., power_port_center_y + power_port_width / 2.,
+         power_port_bottom_wing_height),
+        (0., power_port_center_y + power_port_width / 2., 0.)
+    ]
 
-        # Create matcher that we'll use to match with ideal
-        matcher = tam.train_matcher([training_target.descriptor_list])
+    power_port_blue_wing_panel_polygon_points_2d = [(692, 50), (1047, 285),
+                                                    (692, 285)]
+    # These are "virtual" 3D points based on the expected geometry
+    power_port_blue_wing_panel_polygon_points_3d = [
+        (0., power_port_center_y + power_port_width / 2.,
+         power_port_total_height),
+        (power_port_wing_width * math.sin(wing_angle),
+         power_port_center_y - power_port_width / 2. +
+         power_port_wing_width * math.cos(wing_angle),
+         power_port_bottom_wing_height),
+        (0., power_port_center_y + power_port_width / 2.,
+         power_port_bottom_wing_height)
+    ]
 
-        matches_list = tam.compute_matches(matcher,
-                                           [training_target.descriptor_list],
-                                           [ideal_target.descriptor_list])
+    # Populate the blue power port
+    ideal_power_port_blue.polygon_list.append(power_port_blue_main_panel_polygon_points_2d)
+    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_main_panel_polygon_points_3d)
 
-        homography_list, matches_mask_list = tam.compute_homographies(
-            [training_target.keypoint_list], [ideal_target.keypoint_list],
-            matches_list)
+    ideal_power_port_blue.polygon_list.append(power_port_blue_wing_panel_polygon_points_2d)
+    ideal_power_port_blue.polygon_list_3d.append(power_port_blue_wing_panel_polygon_points_3d)
 
-        for polygon in ideal_target.polygon_list:
-            ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
-            H_inv = np.linalg.inv(homography_list[0])
-            # We use the ideal target's polygons to define the polygons on
-            # the training target
-            transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
-            training_target.polygon_list.append(transformed_polygon)
+    ideal_target_list.append(ideal_power_port_blue)
+    training_target_list.append(TargetData('test_images/train_power_port_blue.png'))
 
-            # Verify that this looks right
+    ###
+    ### Blue Loading Bay
+    ###
+
+    ideal_loading_bay_blue = TargetData('test_images/train_loading_bay_blue.png')
+
+    # Start at lower left corner, and work around clockwise
+    # These are taken by manually finding the points in gimp for this image
+    loading_bay_blue_polygon_points_2d = [(7, 434), (7, 1), (729, 1), (729, 434)]
+
+    # These are "virtual" 3D points based on the expected geometry
+    loading_bay_blue_polygon_points_3d = [
+        (field_length, loading_bay_edge_y + loading_bay_width, 0.),
+        (field_length, loading_bay_edge_y + loading_bay_width, loading_bay_height),
+        (field_length, loading_bay_edge_y, loading_bay_height),
+        (field_length, loading_bay_edge_y, 0.)
+    ]
+
+    ideal_loading_bay_blue.polygon_list.append(loading_bay_blue_polygon_points_2d)
+    ideal_loading_bay_blue.polygon_list_3d.append(loading_bay_blue_polygon_points_3d)
+
+    ideal_target_list.append(ideal_loading_bay_blue)
+    training_target_list.append(TargetData('test_images/train_loading_bay_blue.png'))
+
+    # Create feature extractor
+    feature_extractor = tam.load_feature_extractor()
+
+    # Use webcam parameters for now
+    camera_params = camera_definition.web_cam_params
+
+    for ideal_target in ideal_target_list:
+        print("\nPreparing target for image %s" % ideal_target.image_filename)
+        ideal_target.extract_features(feature_extractor)
+        ideal_target.filter_keypoints_by_polygons()
+        ideal_target.compute_reprojection_maps()
+        ideal_target.keypoint_list_3d = ideal_target.project_keypoint_to_3d_by_polygon(
+            ideal_target.keypoint_list)
+
+        if VISUALIZE_KEYPOINTS:
+            for i in range(len(ideal_target.polygon_list)):
+                ideal_pts_tmp = np.asarray(ideal_target.polygon_list[i]).reshape(
+                    -1, 2)
+                ideal_pts_3d_tmp = np.asarray(
+                    ideal_target.polygon_list_3d[i]).reshape(-1, 3)
+                # We can only compute pose if we have at least 4 points
+                # Only matters for reprojection for visualization
+                # Keeping this code here, since it's helpful when testing
+                if (len(ideal_target.polygon_list[i]) >= 4):
+                    img_copy = dtd.draw_polygon(ideal_target.image.copy(), ideal_target.polygon_list[i], (0,255,0), True)
+                    dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+
+            for polygon in ideal_target.polygon_list:
+                img_copy = ideal_target.image.copy()
+                kp_in_poly2d = []
+                kp_in_poly3d = []
+                for kp, kp_3d in zip(ideal_target.keypoint_list,
+                                     ideal_target.keypoint_list_3d):
+                    if dtd.point_in_polygons((kp.pt[0], kp.pt[1]), [polygon]):
+                        kp_in_poly2d.append((kp.pt[0], kp.pt[1]))
+                        kp_in_poly3d.append(kp_3d)
+
+                dtd.visualize_reprojections(
+                    img_copy,
+                    np.asarray(kp_in_poly2d).reshape(-1, 2),
+                    np.asarray(kp_in_poly3d).reshape(
+                        -1, 3), camera_params.camera_int.camera_matrix,
+                    camera_params.camera_int.distortion_coeffs)
+
+    ###############
+    ### Compute 3D points on actual training images
+    ### TODO: Add code to do manual point selection
+    ###############
+    AUTO_PROJECTION = True
+
+    if AUTO_PROJECTION:
+        print("\n\nAuto projection of training keypoints to 3D using ideal images")
+        # Match the captured training image against the "ideal" training image
+        # and use those matches to pin down the 3D locations of the keypoints
+
+        for target_ind in range(len(training_target_list)):
+            # Assumes we have 1 ideal view for each training target
+            training_target = training_target_list[target_ind]
+            ideal_target = ideal_target_list[target_ind]
+
+            print("\nPreparing target for image %s" % training_target.image_filename)
+            # Extract keypoints and descriptors for model
+            training_target.extract_features(feature_extractor)
+
+            # Create matcher that we'll use to match with ideal
+            matcher = tam.train_matcher([training_target.descriptor_list])
+
+            matches_list = tam.compute_matches(matcher,
+                                               [training_target.descriptor_list],
+                                               [ideal_target.descriptor_list])
+
+            homography_list, matches_mask_list = tam.compute_homographies(
+                [training_target.keypoint_list], [ideal_target.keypoint_list],
+                matches_list)
+
+            for polygon in ideal_target.polygon_list:
+                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
+                H_inv = np.linalg.inv(homography_list[0])
+                # We use the ideal target's polygons to define the polygons on
+                # the training target
+                transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
+                training_target.polygon_list.append(transformed_polygon)
+
+            print("Started with %d keypoints" % len(training_target.keypoint_list))
+
+            training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
+                training_target.keypoint_list, training_target.descriptor_list,
+                training_target.polygon_list)
+            print("After filtering by polygons, had %d keypoints" % len(
+                training_target.keypoint_list))
             if VISUALIZE_KEYPOINTS:
-                pts = transformed_polygon.astype(int).reshape(-1, 2)
-                image = dtd.draw_polygon(training_target.image.copy(), pts,
-                                         (255, 0, 0), True)
-                cv2.imshow("image", image)
-                cv2.waitKey(0)
+                tam.show_keypoints(training_target.image,
+                                   training_target.keypoint_list)
 
-        print("Started with %d keypoints" % len(training_target.keypoint_list))
+            # Now comes the fun part
+            # Go through all my training keypoints to define 3D location using ideal
+            training_3d_list = []
+            for kp_ind in range(len(training_target.keypoint_list)):
+                # We're going to look for the first time this keypoint is in a polygon
+                found_3d_loc = False
+                # First, is it in the correct polygon
+                kp_loc = (training_target.keypoint_list[kp_ind].pt[0],
+                          training_target.keypoint_list[kp_ind].pt[1])
+                for poly_ind in range(len(training_target.polygon_list)):
+                    if dtd.point_in_polygons(
+                            kp_loc, [training_target.polygon_list[poly_ind]
+                                     ]) and not found_3d_loc:
+                        found_3d_loc = True
+                        # If so, transform keypoint location to ideal using homography, and compute 3D
+                        kp_loc_array = np.asarray(np.float32(kp_loc)).reshape(
+                            -1, 1, 2)
+                        training_2d_in_ideal = cv2.perspectiveTransform(
+                            kp_loc_array, homography_list[0])
+                        # Get 3D from this 2D point in ideal image
+                        training_3d_pt = dtd.compute_3d_points(
+                            training_2d_in_ideal,
+                            ideal_target.reprojection_map_list[poly_ind])
+                        training_3d_list.append(training_3d_pt)
 
-        training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
-            training_target.keypoint_list, training_target.descriptor_list,
-            training_target.polygon_list)
-        print("After filtering by polygons, had %d keypoints" % len(
-            training_target.keypoint_list))
-        if VISUALIZE_KEYPOINTS:
-            tam.show_keypoints(training_target.image,
-                               training_target.keypoint_list)
+            training_target.keypoint_list_3d = np.asarray(
+                training_3d_list).reshape(-1, 1, 3)
 
-        # Now comes the fun part
-        # Go through all my training keypoints to define 3D location using ideal
-        training_3d_list = []
-        for kp_ind in range(len(training_target.keypoint_list)):
-            # We're going to look for the first time this keypoint is in a polygon
-            found_3d_loc = False
-            # First, is it in the correct polygon
-            kp_loc = (training_target.keypoint_list[kp_ind].pt[0],
-                      training_target.keypoint_list[kp_ind].pt[1])
-            for poly_ind in range(len(training_target.polygon_list)):
-                if dtd.point_in_polygons(
-                        kp_loc, [training_target.polygon_list[poly_ind]
-                                 ]) and not found_3d_loc:
-                    found_3d_loc = True
-                    # If so, transform keypoint location to ideal using homography, and compute 3D
-                    kp_loc_array = np.asarray(np.float32(kp_loc)).reshape(
-                        -1, 1, 2)
-                    training_2d_in_ideal = cv2.perspectiveTransform(
-                        kp_loc_array, homography_list[0])
-                    # Get 3D from this 2D point in ideal image
-                    training_3d_pt = dtd.compute_3d_points(
-                        training_2d_in_ideal,
-                        ideal_target.reprojection_map_list[poly_ind])
-                    training_3d_list.append(training_3d_pt)
+            if VISUALIZE_KEYPOINTS:
+                # Sanity check these:
+                img_copy = training_target.image.copy()
+                for polygon in training_target.polygon_list:
+                    pts = polygon.astype(int).reshape(-1, 2)
+                    img_copy = dtd.draw_polygon(img_copy, pts,
+                                             (255, 0, 0), True)
+                kp_tmp = np.asarray([
+                    (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
+                ]).reshape(-1, 2)
+                dtd.visualize_reprojections(
+                    img_copy, kp_tmp, training_target.keypoint_list_3d,
+                    camera_params.camera_int.camera_matrix,
+                    camera_params.camera_int.distortion_coeffs)
 
-        training_target.keypoint_list_3d = np.asarray(
-            training_3d_list).reshape(-1, 1, 3)
+    y2020_target_list = training_target_list
+    return y2020_target_list
 
-        if VISUALIZE_KEYPOINTS:
-            # Sanity check these:
-            img_copy = training_target.image.copy()
-            kp_tmp = np.asarray([
-                (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
-            ]).reshape(-1, 2)
-            dtd.visualize_reprojections(
-                img_copy, kp_tmp, training_target.keypoint_list_3d,
-                camera_params.camera_int.camera_matrix,
-                camera_params.camera_int.distortion_coeffs)
+if __name__ == '__main__':
+    ap = argparse.ArgumentParser()
+    ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
+    ap.add_argument("--no_use_bazel", help="Don't run using Bazel", default=True, action='store_false')
+    args = vars(ap.parse_args())
 
-        training_target.save_training_data()
+    VISUALIZE_KEYPOINTS = args["visualize"]
+    if args["visualize"]:
+        print("Visualizing results")
 
-y2020_target_list = training_target_list
+    USE_BAZEL = args["no_use_bazel"]
+    if args["no_use_bazel"]:
+        print("Running on command line (no Bazel)")
+
+    compute_target_definition()
+    pass
diff --git a/y2020/vision/tools/python_code/test_images/train_power_port_blue.png b/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
index 6ade26f..a3a7597 100644
--- a/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
+++ b/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index 276046b..e5a7f5b 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -3,8 +3,6 @@
 import numpy as np
 import time
 
-import field_display
-
 ### DEFINITIONS
 MIN_MATCH_COUNT = 10  # 10 is min; more gives better matches
 FEATURE_EXTRACTOR_NAME = 'SIFT'
@@ -281,7 +279,8 @@
 #        keypoint_list: List of opencv keypoints
 def show_keypoints(image, keypoint_list):
     ret_img = image.copy()
-    ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img)
+    ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img,
+                               flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
     cv2.imshow("Keypoints", ret_img)
     cv2.waitKey(0)
     return ret_img
diff --git a/y2020/vision/tools/python_code/usb_camera_stream.py b/y2020/vision/tools/python_code/usb_camera_stream.py
index ecad9d8..5d3ae91 100644
--- a/y2020/vision/tools/python_code/usb_camera_stream.py
+++ b/y2020/vision/tools/python_code/usb_camera_stream.py
@@ -14,7 +14,7 @@
     exp = cap.get(cv2.CAP_PROP_EXPOSURE)
     print("Exposure:", exp)
     # Display the resulting frame
-    cv2.imshow('preview',frame)
+    cv2.imshow('preview', frame)
 
     #Waits for a user input to quit the application
     if cv2.waitKey(1) & 0xFF == ord('q'):