Definition of targets and cameras, along with 3D point calcs

Includes camera and target helper functions, along with defaults to get started

Cleaned up some of image handling in defining polygons, and added sample code

Change-Id: I8b1237a9118dcdcd161235be4ea3a506e254a6bc
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
new file mode 100644
index 0000000..59f008f
--- /dev/null
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -0,0 +1,57 @@
+import argparse
+import cv2
+import json
+import math
+import numpy as np
+import time
+
+
+class CameraIntrinsics:
+    def __init__(self):
+        self.camera_matrix = []
+        self.distortion_coeffs = []
+
+    pass
+
+class CameraExtrinsics:
+    def __init__(self):
+        self.R = []
+        self.T = []
+
+    pass
+
+class CameraParameters:
+    def __init__(self):
+        self.camera_int = CameraIntrinsics()
+        self.camera_ext = CameraExtrinsics()
+
+    pass
+
+
+### CAMERA DEFINITIONS
+
+# Robot camera has:
+# FOV_H = 93.*math.pi()/180.
+# FOV_V = 70.*math.pi()/180.
+
+# Create fake camera (based on USB webcam params)
+fx = 810.
+fy = 810.
+cx = 320.
+cy = 240.
+
+# Define a web_cam
+web_cam_int = CameraIntrinsics()
+web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
+web_cam_int.distortion_coeffs = np.zeros((5,1))
+
+web_cam_ext = CameraExtrinsics()
+# Camera rotation from robot x,y,z to opencv (z, -x, -y)
+web_cam_ext.R = np.array([[0., 0., 1.],
+                          [-1, 0,  0],
+                          [0, -1., 0]])
+web_cam_ext.T = np.array([[0., 0., 0.]]).T
+
+web_cam_params = CameraParameters()
+web_cam_params.camera_int = web_cam_int
+web_cam_params.camera_ext = web_cam_ext
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
new file mode 100644
index 0000000..d1802aa
--- /dev/null
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -0,0 +1,237 @@
+import argparse
+import cv2
+import json
+import math
+import numpy as np
+import time
+
+import field_display
+import train_and_match as tam
+
+# Points for current polygon
+point_list = []
+current_mouse = (0,0)
+
+def get_mouse_event(event, x, y, flags, param):
+    global point_list
+    global current_mouse
+    current_mouse = (x,y)
+    if event == cv2.EVENT_LBUTTONUP:
+        #print("Adding point at %d, %d" % (x,y))
+        point_list.append([x,y])
+
+def draw_polygon(image, polygon, color=(255,0,0), close_polygon = False):
+    for point in polygon:
+        image = cv2.circle(image, (point[0], point[1]), 5, (255,0,0), -1)
+    if len(polygon) > 1:
+        np_poly = np.array(polygon)
+        image = cv2.polylines(image, [np_poly], close_polygon, color, thickness=3)
+    return image
+
+# Close out polygon, return True if size is 3 or more points
+def finish_polygon(image, polygon):
+    global point_list
+    # If we have at least 3 points, close and show the polygon
+    if len(point_list) <= 2:
+        return False
+
+    point_list.append(point_list[0])
+    image = draw_polygon(image, point_list, color=(0,0,255))
+    cv2.imshow("image", image)
+    cv2.waitKey(500)
+    return True
+
+
+def define_polygon(image):
+    global point_list
+    # Set of all defined polygons
+    point_list = []
+    polygon_list = []
+
+    display_image = image.copy()
+
+    cv2.namedWindow("image")
+    cv2.setMouseCallback("image", get_mouse_event)
+
+    while True:
+        cv2.imshow("image", display_image)
+        key = cv2.waitKey(1) & 0xFF
+
+        # if the 'r' key is pressed, reset the current polygon
+        # Leaves previously defined polygons intact
+        if key == ord("r"):
+            display_image = image.copy()
+            point_list = []
+
+        # if the 'd' key is pressed, delete the last point
+        if key == ord("d"):
+            display_image = image.copy()
+            point_list.pop()
+
+        # if the 'n' key is pressed, save current polygon, and move to next
+        if key == ord("n"):
+            if (finish_polygon(display_image, point_list)):
+                polygon_list.append(point_list)
+            display_image = image.copy()
+            point_list = []
+
+        # if the 'q' key is pressed, break from the loop and finish polygon
+        elif key == ord("q"):
+            if (finish_polygon(display_image, point_list)):
+                polygon_list.append(point_list)
+            break
+
+        display_image = draw_polygon(display_image, point_list)
+
+    return polygon_list
+
+
+# Given a list of points on an image, prompt the user to click on the
+# corresponding points within the image.
+# Return the list of those points that have been clicked
+def define_points_by_list(image, points):
+    global point_list
+    global current_mouse
+    point_list = []
+
+    display_image = image.copy()
+
+    cv2.namedWindow("image")
+    cv2.setMouseCallback("image", get_mouse_event)
+
+    while len(point_list) < len(points):
+        i = len(point_list)
+        # Draw mouse location and suggested target
+        display_image = image.copy()
+        display_image = cv2.circle(display_image, (points[i][0], points[i][1]), 15, (0,255,0), 2)
+        cursor_length = 5
+        display_image = cv2.line(display_image, (current_mouse[0]-cursor_length, current_mouse[1]), (current_mouse[0]+cursor_length, current_mouse[1]), (255,0,0), 2, cv2.LINE_AA)
+        display_image = cv2.line(display_image, (current_mouse[0], current_mouse[1]-cursor_length), (current_mouse[0], current_mouse[1]+cursor_length), (255,0,0), 2, cv2.LINE_AA)
+
+        cv2.imshow("image", display_image)
+
+        key = cv2.waitKey(1) & 0xFF
+
+        # if the 'r' key is pressed, reset the current point collection
+        # Leaves previously defined polygons intact
+        if key == ord("r"):
+            draw_image = image.copy()
+            point_list = []
+
+        # if the 'd' key is pressed, delete the last point
+        elif key == ord("d"):
+            draw_image = image.copy()
+            point_list.pop()
+
+        # if the 'q' key is pressed, quit
+        elif key == ord("q"):
+            quit()
+
+    return point_list
+
+# Determine whether a given point lies within (or on border of) a set of polygons
+# Return true if it does
+def point_in_polygons(point, polygons):
+    for poly in polygons:
+        np_poly = np.asarray(poly)
+        dist = cv2.pointPolygonTest(np_poly, (point[0], point[1]), True)
+        if dist >=0:
+            return True
+
+    return False
+
+## Filter keypoints by polygons
+def filter_keypoints_by_polygons(keypoint_list, descriptor_list, polygons):
+    # TODO: Need to make sure we've got the right numpy array / list 
+    keep_keypoint_list = []
+    keep_descriptor_list = []
+    reject_keypoint_list = []
+    reject_descriptor_list = []
+    keep_list = []
+    reject_list = []
+
+    # For now, pretend keypoints are just points
+    for i in range(len(keypoint_list)):
+        if point_in_polygons((keypoint_list[i].pt[0], keypoint_list[i].pt[1]), polygons):
+            keep_list.append(i)
+        else:
+            reject_list.append(i)
+
+    keep_keypoint_list = [keypoint_list[kp_ind] for kp_ind in keep_list]
+    reject_keypoint_list = [keypoint_list[kp_ind] for kp_ind in reject_list]
+    # Allow function to be called with no descriptors, and just return empty list
+    if descriptor_list is not None:
+        keep_descriptor_list = descriptor_list[keep_list,:]
+        reject_descriptor_list = descriptor_list[reject_list,:]
+
+    return keep_keypoint_list, keep_descriptor_list, reject_keypoint_list, reject_descriptor_list, keep_list
+
+# Helper function that appends a column of ones to a list (of 2d points)
+def append_ones(point_list):
+    return np.hstack([point_list, np.ones((len(point_list),1))])
+
+# Given a list of 2d points and thei corresponding 3d locations, compute map
+# between them.
+# pt_3d = (pt_2d, 1) * reprojection_map
+# TODO: We should really look at residuals to see if things are messed up
+def compute_reprojection_map(polygon_2d, polygon_3d):
+    pts_2d = np.asarray(np.float32(polygon_2d)).reshape(-1,2)
+    pts_2d_lstsq = append_ones(pts_2d)
+    pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1,3)
+
+    reprojection_map = np.linalg.lstsq(pts_2d_lstsq, pts_3d_lstsq, rcond=None)[0]
+
+    return reprojection_map
+
+# Given set of keypoints (w/ 2d location), a reprojection map, and a polygon
+# that defines regions for where this is valid
+# Returns a numpy array of 3d locations the same size as the keypoint list
+def compute_3d_points(keypoint_2d_list, reprojection_map):
+    pt_2d_lstsq = append_ones(np.asarray(np.float32(keypoint_2d_list)).reshape(-1,2))
+    pt_3d = pt_2d_lstsq.dot(reprojection_map)
+
+    return pt_3d
+
+# Given 2d and 3d locations, and camera location and projection model,
+# display locations on an image
+def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, distortion_coeffs):
+    # Compute camera location
+    # TODO: Warn on bad inliers
+    # TODO: Change this to not have to recast to np
+    pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1,1,2)
+    pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1,1,3)
+    retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat, distortion_coeffs)
+    pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat, distortion_coeffs)
+    for i in range(len(pts_2d)):
+        pt_2d = pts_2d_np[i][0]
+        pt_3d_proj = pts_3d_proj_2d[i][0]
+        pt_color =  (0,255,0)
+        if i not in inliers:
+            pt_color = (0,0,255)
+
+        img = cv2.circle(img,(pt_2d[0],pt_2d[1]),3,pt_color,3)
+        img = cv2.circle(img,(pt_3d_proj[0], pt_3d_proj[1]),15,pt_color,3)
+
+    cv2.imshow("image", img)
+    cv2.waitKey(0)
+    return img
+
+
+def sample_define_polygon_usage():
+    image = cv2.imread("test_images/train_power_port_red.png")
+
+    polygon_list = define_polygon(image)
+    print(polygon_list)
+
+
+def sample_define_points_by_list_usage():
+    image = cv2.imread("test_images/train_power_port_red.png")
+
+    test_points = [(451, 679), (451, 304),
+                   (100, 302), (451, 74),
+                   (689, 74), (689, 302),
+                   (689, 679)]
+
+    polygon_list = define_points_by_list(image, test_points)
+    print(polygon_list)
+
diff --git a/y2020/vision/tools/python_code/field_display.py b/y2020/vision/tools/python_code/field_display.py
index a3f1024..93f645c 100644
--- a/y2020/vision/tools/python_code/field_display.py
+++ b/y2020/vision/tools/python_code/field_display.py
@@ -1,6 +1,5 @@
 import cv2
 import math
-from matplotlib import pyplot as plt
 import numpy as np
 
 
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index bcf610f..d6b7deb 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -100,7 +100,7 @@
 
 looping = True
 # Grab images until 'q' is pressed (or just once for canned images)
-while (looping):
+while looping:
     if (query_image_index is -1):
         # Capture frame-by-frame
         ret, frame = cap.read()
@@ -305,3 +305,4 @@
     cap.release()
 
 cv2.destroyAllWindows()
+
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
new file mode 100644
index 0000000..d711fb9
--- /dev/null
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -0,0 +1,285 @@
+import cv2
+import json
+import math
+import numpy as np
+
+import camera_definition
+import define_training_data as dtd
+import train_and_match as tam
+
+VISUALIZE_KEYPOINTS = True
+VISUALIZE_POLYGONS = True
+
+
+class TargetData:
+    def __init__(self, filename):
+        self.image_filename = filename
+        # Load an image (will come in as a 1-element list)
+        self.image = tam.load_images([filename])[0]
+        self.polygon_list = []
+        self.polygon_list_3d = []
+        self.reprojection_map_list = []
+        self.keypoint_list = []
+        self.keypoint_list_3d = None  # numpy array of 3D points
+        self.descriptor_list = []
+
+    def extract_features(self, feature_extractor):
+        kp_lists, desc_lists = tam.detect_and_compute(feature_extractor,
+                                                      [self.image])
+        self.keypoint_list = kp_lists[0]
+        self.descriptor_list = desc_lists[0]
+
+    def filter_keypoints_by_polygons(self):
+        self.keypoint_list, self.descriptor_list, _, _, _ = dtd.filter_keypoints_by_polygons(
+            self.keypoint_list, self.descriptor_list, self.polygon_list)
+
+    def compute_reprojection_maps(self):
+        for poly_ind in range(len(self.polygon_list)):
+            reprojection_map = dtd.compute_reprojection_map(
+                self.polygon_list[poly_ind], self.polygon_list_3d[poly_ind])
+            self.reprojection_map_list.append(reprojection_map)
+
+    def project_keypoint_to_3d_by_polygon(self, keypoint_list):
+        # Create dummy array of correct size that we can put values in
+        point_list_3d = np.asarray(
+            [(0., 0., 0.) for kp in keypoint_list]).reshape(-1, 3)
+        # Iterate through our polygons
+        for poly_ind in range(len(self.polygon_list)):
+            # Filter and project points for each polygon in the list
+            filtered_keypoints, _, _, _, keep_list = dtd.filter_keypoints_by_polygons(
+                keypoint_list, None, [self.polygon_list[poly_ind]])
+            print("Filtering kept %d of %d features" % (len(keep_list),
+                                                        len(keypoint_list)))
+            filtered_point_array = np.asarray(
+                [(keypoint.pt[0], keypoint.pt[1])
+                 for keypoint in filtered_keypoints]).reshape(-1, 2)
+            filtered_point_list_3d = dtd.compute_3d_points(
+                filtered_point_array, self.reprojection_map_list[poly_ind])
+            for i in range(len(keep_list)):
+                point_list_3d[keep_list[i]] = filtered_point_list_3d[i]
+
+        return point_list_3d
+
+    # Save out the training data-- haven't implemented this
+    def save_training_data(self):
+        if (len(self.keypoint_list) != len(self.descriptor_list)
+                or len(self.keypoint_list) != len(self.keypoint_list_3d)):
+            print("Big problem-- lists don't match in size: %d, %d, %d" %
+                  (len(self.keypoint_list), len(self.descriptor_list),
+                   len(self.keypoint_list_3d)))
+
+        print("(Would like to) Save target with %d keypoints" % len(
+            self.keypoint_list))
+
+
+# TARGET DEFINITIONS
+
+# Some general info about our field and targets
+# Assume camera centered on target at 1 m above ground and distance of 4.85m
+camera_height = 1.
+total_target_height = 3.10
+field_length = 15.98
+power_port_offset_y = 1.67
+power_port_width = 1.22
+bottom_wing_height = 1.88
+wing_width = 1.83
+
+# Pick the target center location at halfway between top and bottom of the top panel
+target_center_height = (total_target_height + bottom_wing_height) / 2.
+
+# TODO: Still need to figure out what this angle actually is
+wing_angle = 20. * math.pi / 180.
+
+# Start at lower left corner, and work around clockwise
+# These are taken by manually finding the points in gimp for this image
+main_panel_polygon_points_2d = [(451, 679), (451, 304), (100, 302), (451, 74),
+                                (689, 74), (689, 302), (689, 679)]
+# These are "virtual" 3D points based on the expected geometry
+main_panel_polygon_points_3d = [
+    (field_length, power_port_width / 2. - power_port_offset_y,
+     0.), (field_length, power_port_width / 2. - power_port_offset_y,
+           bottom_wing_height),
+    (field_length, power_port_width / 2. - power_port_offset_y + wing_width,
+     bottom_wing_height),
+    (field_length, power_port_width / 2. - power_port_offset_y,
+     total_target_height), (field_length,
+                            -power_port_width / 2. - power_port_offset_y,
+                            total_target_height),
+    (field_length, -power_port_width / 2. - power_port_offset_y,
+     bottom_wing_height), (field_length,
+                           -power_port_width / 2. - power_port_offset_y, 0.)
+]
+
+wing_panel_polygon_points_2d = [(689, 74), (1022, 302), (689, 302)]
+# These are "virtual" 3D points based on the expected geometry
+wing_panel_polygon_points_3d = [
+    (field_length, -power_port_width / 2. - power_port_offset_y,
+     total_target_height),
+    (field_length - wing_width * math.sin(wing_angle), -power_port_width / 2. -
+     power_port_offset_y - wing_width * math.cos(wing_angle),
+     bottom_wing_height), (field_length,
+                           -power_port_width / 2. - power_port_offset_y,
+                           bottom_wing_height)
+]
+
+# Populate the red power port
+ideal_power_port_red = TargetData('test_images/train_power_port_red.png')
+
+ideal_power_port_red.polygon_list.append(main_panel_polygon_points_2d)
+ideal_power_port_red.polygon_list_3d.append(main_panel_polygon_points_3d)
+
+ideal_power_port_red.polygon_list.append(wing_panel_polygon_points_2d)
+ideal_power_port_red.polygon_list_3d.append(wing_panel_polygon_points_3d)
+
+ideal_target_list = []
+ideal_target_list.append(ideal_power_port_red)
+
+# Create feature extractor
+feature_extractor = tam.load_feature_extractor()
+
+# Use webcam parameters for now
+camera_params = camera_definition.web_cam_params
+for ideal_target in ideal_target_list:
+    print("Preparing target for image %s" % ideal_target.image_filename)
+    ideal_target.extract_features(feature_extractor)
+    ideal_target.filter_keypoints_by_polygons()
+    ideal_target.compute_reprojection_maps()
+    ideal_target.keypoint_list_3d = ideal_target.project_keypoint_to_3d_by_polygon(
+        ideal_target.keypoint_list)
+
+    if VISUALIZE_POLYGONS:
+        # For each polygon, show the 2D points and the reprojection for 3D
+        # of the polygon definition
+        for polygon_2d, polygon_3d in zip(ideal_target.polygon_list,
+                                          ideal_target.polygon_list_3d):
+            ideal_pts_tmp = np.asarray(polygon_2d).reshape(-1, 2)
+            ideal_pts_3d_tmp = np.asarray(polygon_3d).reshape(-1, 3)
+            # We can only compute pose if we have at least 4 points
+            # Only matters for reprojection for visualization
+            # Keeping this code here, since it's helpful when testing
+            if (len(polygon_2d) >= 4):
+                img_copy = dtd.draw_polygon(ideal_target.image.copy(), polygon_2d, (0,255,0), True)
+                dtd.visualize_reprojections(img_copy, ideal_pts_tmp, ideal_pts_3d_tmp, camera_params.camera_int.camera_matrix, camera_params.camera_int.distortion_coeffs)
+
+    if VISUALIZE_KEYPOINTS:
+        # For each polygon, show which keypoints (2D and 3D manual) were kept
+        for polygon in ideal_target.polygon_list:
+            img_copy = ideal_target.image.copy()
+            kp_in_poly2d = []
+            kp_in_poly3d = []
+            for kp, kp_3d in zip(ideal_target.keypoint_list,
+                                 ideal_target.keypoint_list_3d):
+                if dtd.point_in_polygons((kp.pt[0], kp.pt[1]), [polygon]):
+                    kp_in_poly2d.append((kp.pt[0], kp.pt[1]))
+                    kp_in_poly3d.append(kp_3d)
+
+            img_copy = dtd.draw_polygon(img_copy, polygon, (0,255,0), True)
+            dtd.visualize_reprojections(
+                img_copy,
+                np.asarray(kp_in_poly2d).reshape(-1, 2),
+                np.asarray(kp_in_poly3d).reshape(
+                    -1, 3), camera_params.camera_int.camera_matrix,
+                camera_params.camera_int.distortion_coeffs)
+
+### TODO: Add code to do manual point selection
+AUTO_PROJECTION = True
+training_target_list = []
+if AUTO_PROJECTION:
+    print("Auto projection of training keypoints to 3D using ideal images")
+    # Match the captured training image against the "ideal" training image
+    # and use those matches to pin down the 3D locations of the keypoints
+
+    training_power_port_red = TargetData(
+        'test_images/train_power_port_red_webcam.png')
+
+    training_target_list.append(training_power_port_red)
+
+    for target_ind in range(len(training_target_list)):
+        # Assumes we have 1 ideal view for each training target
+        training_target = training_target_list[target_ind]
+        ideal_target = ideal_target_list[target_ind]
+
+        print("Preparing target for image %s" % training_target.image_filename)
+        # Extract keypoints and descriptors for model
+        training_target.extract_features(feature_extractor)
+
+        # Create matcher that we'll use to match with ideal
+        matcher = tam.train_matcher([training_target.descriptor_list])
+
+        matches_list = tam.compute_matches(matcher,
+                                           [training_target.descriptor_list],
+                                           [ideal_target.descriptor_list])
+
+        homography_list, matches_mask_list = tam.compute_homographies(
+            [training_target.keypoint_list], [ideal_target.keypoint_list],
+            matches_list)
+
+        for polygon in ideal_target.polygon_list:
+            ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
+            H_inv = np.linalg.inv(homography_list[0])
+            # We use the ideal target's polygons to define the polygons on
+            # the training target
+            transformed_polygon = cv2.perspectiveTransform(ideal_pts_2d, H_inv)
+            training_target.polygon_list.append(transformed_polygon)
+
+            # Verify that this looks right
+            if VISUALIZE_KEYPOINTS:
+                pts = transformed_polygon.astype(int).reshape(-1, 2)
+                image = dtd.draw_polygon(training_target.image.copy(), pts,
+                                         (255, 0, 0), True)
+                cv2.imshow("image", image)
+                cv2.waitKey(0)
+
+        print("Started with %d keypoints" % len(training_target.keypoint_list))
+
+        training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
+            training_target.keypoint_list, training_target.descriptor_list,
+            training_target.polygon_list)
+        print("After filtering by polygons, had %d keypoints" % len(
+            training_target.keypoint_list))
+        if VISUALIZE_KEYPOINTS:
+            tam.show_keypoints(training_target.image,
+                               training_target.keypoint_list)
+
+        # Now comes the fun part
+        # Go through all my training keypoints to define 3D location using ideal
+        training_3d_list = []
+        for kp_ind in range(len(training_target.keypoint_list)):
+            # We're going to look for the first time this keypoint is in a polygon
+            found_3d_loc = False
+            # First, is it in the correct polygon
+            kp_loc = (training_target.keypoint_list[kp_ind].pt[0],
+                      training_target.keypoint_list[kp_ind].pt[1])
+            for poly_ind in range(len(training_target.polygon_list)):
+                if dtd.point_in_polygons(
+                        kp_loc, [training_target.polygon_list[poly_ind]
+                                 ]) and not found_3d_loc:
+                    found_3d_loc = True
+                    # If so, transform keypoint location to ideal using homography, and compute 3D
+                    kp_loc_array = np.asarray(np.float32(kp_loc)).reshape(
+                        -1, 1, 2)
+                    training_2d_in_ideal = cv2.perspectiveTransform(
+                        kp_loc_array, homography_list[0])
+                    # Get 3D from this 2D point in ideal image
+                    training_3d_pt = dtd.compute_3d_points(
+                        training_2d_in_ideal,
+                        ideal_target.reprojection_map_list[poly_ind])
+                    training_3d_list.append(training_3d_pt)
+
+        training_target.keypoint_list_3d = np.asarray(
+            training_3d_list).reshape(-1, 1, 3)
+
+        if VISUALIZE_KEYPOINTS:
+            # Sanity check these:
+            img_copy = training_target.image.copy()
+            kp_tmp = np.asarray([
+                (kp.pt[0], kp.pt[1]) for kp in training_target.keypoint_list
+            ]).reshape(-1, 2)
+            dtd.visualize_reprojections(
+                img_copy, kp_tmp, training_target.keypoint_list_3d,
+                camera_params.camera_int.camera_matrix,
+                camera_params.camera_int.distortion_coeffs)
+
+        training_target.save_training_data()
+
+y2020_target_list = training_target_list
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index 8fed4c3..276046b 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -1,10 +1,6 @@
 import cv2
 import math
-from matplotlib import pyplot as plt
 import numpy as np
-#import tkinter
-# pip install pillow
-#import PIL.Image, PIL.ImageTk
 import time
 
 import field_display
@@ -289,3 +285,4 @@
     cv2.imshow("Keypoints", ret_img)
     cv2.waitKey(0)
     return ret_img
+
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
index b8d94fd..00f68bb 100644
--- a/y2020/vision/tools/python_code/transform_projection_test.py
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -1,111 +1,90 @@
 import cv2
 import math
-from matplotlib import pyplot as plt
 import numpy as np
 
 import field_display
+import camera_definition
 
-# Assume image is 640x480 (VGA)
-num_pixels_x = 640
-num_pixels_y = 480
-
-# Camera center is 320, 240
-c_x = num_pixels_x / 2
-c_y = num_pixels_y / 2
-
-# Horiz. FOV is 54 degrees
-FOV_h = 54 * math.pi / 180.0  # (in radians)
-# Vert FOV is horizontal FOV scaled by aspect ratio (through tan function)
-FOV_v = math.atan(c_y / c_x * math.tan(FOV_h / 2.)) * 2
-
-# Assuming square pixels
-# focal length is (640/2)/tan(FOV_h/2)
-f_x = c_x / (math.tan(FOV_h / 2.))
-f_y = c_y / (math.tan(FOV_v / 2.))
+# Import camera from camera_definition
+camera_params = camera_definition.web_cam_params
+cam_mat = camera_params.camera_int.camera_matrix
+distortion_coeffs = camera_params.camera_int.distortion_coeffs
 
 # Height of camera on robot above ground
 cam_above_ground = 0.5
-
-# TODO: Should fix f_y entry below.
-cam_mat = np.array([[f_x, 0, c_x], [0, f_y, c_y], [0, 0, 1.]])
-
-# Use default distortion (i.e., none)
-distortion_coeffs = np.zeros((5, 1))
-
 depth = 5.0  # meters
 
-R_w_tarj_mat = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
-R_w_tarj, jac = cv2.Rodrigues(R_w_tarj_mat)
-T_w_tarj = np.array([[15.98 - depth, -4.10 + 2.36, cam_above_ground]]).T
+# Define camera location (cam) relative to origin (w)
+R_w_cam_mat = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
+R_w_cam, jac = cv2.Rodrigues(R_w_cam_mat)
+T_w_cam = np.array([[15.98 - depth, -4.10 + 2.36, cam_above_ground]]).T
 
-img_ret = field_display.plot_bot_on_field(None, (0, 255, 0), T_w_tarj)
-#field_display.show_field(img_ret)
+img_ret = field_display.plot_bot_on_field(None, (0, 255, 0), T_w_cam)
 
 # Create fake set of points relative to camera capture (target) frame
 # at +/- 1 meter in x, 5 meter depth, and every 1 meter in y from 0 to 4m (above the camera, so negative y values)
-pts_3d_T_t = np.array([[-1., 0., depth], [1., 0., depth], [-1., -1., depth], [
-    1., -1., depth
-], [-1., -2., depth], [0., -2., depth], [1., -2., depth], [-1., -3., depth],
-                       [1., -3., depth], [-1., -4., depth], [1., -4., depth]])
+pts_3d_target = np.array(
+    [[-1., 0., depth], [1., 0., depth], [-1., -1., depth], [1., -1., depth],
+     [-1., -2., depth], [0., -2., depth], [1., -2., depth], [-1., -3., depth],
+     [1., -3., depth], [-1., -4., depth], [1., -4., depth]])
 
-# Ground truth shift of camera, to compute projections
-R_tarj_ci_gt = np.array([[0., 0.2, 0.2]]).T
+# Ground truth shift of camera from (cam) to (cam2), to compute projections
+R_cam_cam2_gt = np.array([[0., 0.2, 0.2]]).T
 
-R_tarj_ci_gt_mat, jac = cv2.Rodrigues(R_tarj_ci_gt)
+R_cam_cam2_gt_mat, jac = cv2.Rodrigues(R_cam_cam2_gt)
 
-T_tarj_ci_gt = np.array([[1., 2., -5.]]).T
+T_cam_cam2_gt = np.array([[1., 2., -5.]]).T
 
 # To transform vectors, we apply the inverse transformation
-R_tarj_ci_gt_mat_inv = R_tarj_ci_gt_mat.T
-T_tarj_ci_gt_inv = -R_tarj_ci_gt_mat_inv.dot(T_tarj_ci_gt)
+R_cam_cam2_gt_mat_inv = R_cam_cam2_gt_mat.T
+T_cam_cam2_gt_inv = -R_cam_cam2_gt_mat_inv.dot(T_cam_cam2_gt)
 
-#pts_3d_T_t_shifted = (R_tarj_ci_gt_mat_inv.dot(pts_3d_T_t.T) + T_tarj_ci_gt_inv).T
+#pts_3d_target_shifted = (R_cam_cam2_gt_mat_inv.dot(pts_3d_target.T) + T_cam_cam2_gt_inv).T
 
 # Now project from new position
 # This was the manual way to do it-- use cv2.projectPoints instead
-#pts_proj_3d = cam_mat.dot(pts_3d_T_t_shifted.T).T
+#pts_proj_3d = cam_mat.dot(pts_3d_target_shifted.T).T
 #pts_proj_2d = np.divide(pts_proj_3d[:,0:2],(pts_proj_3d[:,2].reshape(-1,1)))
 
-pts_proj_2d_ci, jac_2d = cv2.projectPoints(pts_3d_T_t, R_tarj_ci_gt_mat_inv,
-                                           T_tarj_ci_gt_inv, cam_mat,
-                                           distortion_coeffs)
-#print(pts_proj_2d_ci)
+pts_proj_2d_cam2, jac_2d = cv2.projectPoints(
+    pts_3d_target, R_cam_cam2_gt_mat_inv, T_cam_cam2_gt_inv, cam_mat,
+    distortion_coeffs)
 
 # Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
-retval, R_ci_tarj_est, T_ci_tarj_est, inliers = cv2.solvePnPRansac(
-    pts_3d_T_t, pts_proj_2d_ci, cam_mat, distortion_coeffs)
+retval, R_cam2_cam_est, T_cam2_cam_est, inliers = cv2.solvePnPRansac(
+    pts_3d_target, pts_proj_2d_cam2, cam_mat, distortion_coeffs)
 
 # This is the pose from camera to original target spot.  We need to invert to get back to the pose we want
-R_tarj_ci_est_mat = cv2.Rodrigues(R_ci_tarj_est)[0].T
-T_tarj_ci_est = -R_tarj_ci_est_mat.dot(T_ci_tarj_est)
-R_tarj_ci_est = cv2.Rodrigues(R_tarj_ci_est_mat)[0]
+R_cam_cam2_est_mat = cv2.Rodrigues(R_cam2_cam_est)[0].T
+T_cam_cam2_est = -R_cam_cam2_est_mat.dot(T_cam2_cam_est)
+R_cam_cam2_est = cv2.Rodrigues(R_cam_cam2_est_mat)[0]
 
-print("Check:\n", "Rot error:\n", R_tarj_ci_gt - R_tarj_ci_est,
-      "\nTrans error:\n", T_tarj_ci_gt - T_tarj_ci_est,
+print("Check:\n", "Rot error:\n", R_cam_cam2_gt - R_cam_cam2_est,
+      "\nTrans error:\n", T_cam_cam2_gt - T_cam_cam2_est,
       "\nError is < 0.001 in R & T: ",
-      np.linalg.norm(R_tarj_ci_gt - R_tarj_ci_est) < 0.001,
-      np.linalg.norm(T_tarj_ci_gt - T_tarj_ci_est) < 0.001)
+      np.linalg.norm(R_cam_cam2_gt - R_cam_cam2_est) < 0.001, " & ",
+      np.linalg.norm(T_cam_cam2_gt - T_cam_cam2_est) < 0.001)
 
 # Compute camera location in world coordinates
-R_w_ci, T_w_ci, d1, d2, d3, d4, d5, d6, d7, d8 = cv2.composeRT(
-    R_tarj_ci_est, T_tarj_ci_est, R_w_tarj, T_w_tarj)
+R_w_cam2, T_w_cam2, d1, d2, d3, d4, d5, d6, d7, d8 = cv2.composeRT(
+    R_cam_cam2_est, T_cam_cam2_est, R_w_cam, T_w_cam)
 
 print("Estimate in world coordinates")
-print("R, T:\n", R_w_ci, "\n", T_w_ci)
-img_ret = field_display.plot_bot_on_field(img_ret, (0, 255, 255), T_w_ci)
+print("R, T:\n", R_w_cam2, "\n", T_w_cam2)
+img_ret = field_display.plot_bot_on_field(img_ret, (0, 255, 255), T_w_cam2)
 field_display.show_field(img_ret)
 
 # Compute vector to target
 # TODO: Likely better to do this from the homography, rather than the pose estimate...
 
 T_w_target_pt = np.array([[15.98, -4.10 + 2.36, 2.0 - cam_above_ground]]).T
-vector_to_target = T_w_target_pt - T_w_ci
-d_ci_target = np.linalg.norm(vector_to_target)
-phi_ci_target = math.atan2(vector_to_target[1][0], vector_to_target[0][0])
+vector_to_target = T_w_target_pt - T_w_cam2
+d_cam2_target = np.linalg.norm(vector_to_target)
+phi_cam2_target = math.atan2(vector_to_target[1][0], vector_to_target[0][0])
 print("Vector to target (from cam frame):\n", vector_to_target,
-      "\nDistance to target: ", d_ci_target, "\nAngle to target (deg): ",
-      phi_ci_target * 180. / math.pi)
+      "\nDistance to target: ", d_cam2_target, "\nAngle to target (deg): ",
+      phi_cam2_target * 180. / math.pi)
 
-img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0), T_w_ci,
+img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0), T_w_cam2,
                                            T_w_target_pt)
 field_display.show_field(img_ret)
diff --git a/y2020/vision/tools/python_code/usb_camera_stream.py b/y2020/vision/tools/python_code/usb_camera_stream.py
index 42cb1f1..ecad9d8 100644
--- a/y2020/vision/tools/python_code/usb_camera_stream.py
+++ b/y2020/vision/tools/python_code/usb_camera_stream.py
@@ -7,14 +7,14 @@
     print("Could not open video device")
     quit()
 
-while (True):
+while True:
     # Capture frame-by-frame
     ret, frame = cap.read()
 
     exp = cap.get(cv2.CAP_PROP_EXPOSURE)
     print("Exposure:", exp)
     # Display the resulting frame
-    cv2.imshow('preview', frame)
+    cv2.imshow('preview',frame)
 
     #Waits for a user input to quit the application
     if cv2.waitKey(1) & 0xFF == ord('q'):