Adding target point definition and calculation (x,y,radius) for visualization

Apologies in advance for:
1) Changing flatbuffer definitions
1a) Moved to camera poses, since I think it really belongs there
1b) Changed name of target, since it had same name as those in trainging data
2) A lot of formatting from running yapf, buildifier, clang-format.
(BUILD, target_definition_test.py have formatting changes only)
(define_training_data.py and load_sift_training.py are mostly formatting)

Change-Id: Iace1261ea5f04aeb8103b5b9a9d6da5ec3328293
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index b643fcb..5e65387 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -31,14 +31,14 @@
     srcs = [
         "camera_reader.cc",
     ],
+    data = [
+        "//y2020:config.json",
+    ],
     restricted_to = [
         "//tools:k8",
         "//tools:armhf-debian",
     ],
     visibility = ["//y2020:__subpackages__"],
-    data = [
-        "//y2020:config.json",
-    ],
     deps = [
         ":v4l2_reader",
         ":vision_fbs",
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index d45ec3f..c47e42b 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -76,6 +76,8 @@
                             const std::vector<std::vector<cv::DMatch>> &matches,
                             const std::vector<cv::Mat> &camera_target_list,
                             const std::vector<cv::Mat> &field_camera_list,
+                            const std::vector<cv::Point2f> &target_point_vector,
+                            const std::vector<float> &target_radius_vector,
                             aos::Sender<sift::ImageMatchResult> *result_sender,
                             bool send_details);
 
@@ -113,6 +115,17 @@
         ->field_to_target();
   }
 
+  void TargetLocation(int training_image_index, cv::Point2f &target_location,
+                      float &target_radius) {
+    target_location.x =
+        training_data_->images()->Get(training_image_index)->target_point_x();
+    target_location.y =
+        training_data_->images()->Get(training_image_index)->target_point_y();
+    target_radius = training_data_->images()
+                        ->Get(training_image_index)
+                        ->target_point_radius();
+  }
+
   int number_training_images() const {
     return training_data_->images()->size();
   }
@@ -193,6 +206,8 @@
     const std::vector<std::vector<cv::DMatch>> &matches,
     const std::vector<cv::Mat> &camera_target_list,
     const std::vector<cv::Mat> &field_camera_list,
+    const std::vector<cv::Point2f> &target_point_vector,
+    const std::vector<float> &target_radius_vector,
     aos::Sender<sift::ImageMatchResult> *result_sender, bool send_details) {
   auto builder = result_sender->MakeBuilder();
   const auto camera_calibration_offset =
@@ -234,6 +249,9 @@
     pose_builder.add_camera_to_target(transform_offset);
     pose_builder.add_field_to_camera(fc_transform_offset);
     pose_builder.add_field_to_target(field_to_target_offset);
+    pose_builder.add_query_target_point_x(target_point_vector[i].x);
+    pose_builder.add_query_target_point_y(target_point_vector[i].y);
+    pose_builder.add_query_target_point_radius(target_radius_vector[i]);
     camera_poses.emplace_back(pose_builder.Finish());
   }
   const auto camera_poses_offset = builder.fbb()->CreateVector(camera_poses);
@@ -317,8 +335,12 @@
   std::vector<cv::Mat> camera_target_list;
   std::vector<cv::Mat> field_camera_list;
 
-  std::vector<PerImageMatches> per_image_good_matches;
+  // Rebuild the matches and store them here
   std::vector<std::vector<cv::DMatch>> all_good_matches;
+  // Build list of target point and radius for each good match
+  std::vector<cv::Point2f> target_point_vector;
+  std::vector<float> target_radius_vector;
+
   // Iterate through matches for each training image
   for (size_t i = 0; i < per_image_matches.size(); ++i) {
     const PerImageMatches &per_image = per_image_matches[i];
@@ -343,8 +365,8 @@
 
     VLOG(2) << "Number of matches after homography: " << cv::countNonZero(mask)
             << "\n";
-    // Fill our match info for each good match
-    // TODO<Jim>: Could probably optimize some of the copies here
+
+    // Fill our match info for each good match based on homography result
     PerImageMatches per_image_good_match;
     CHECK_EQ(per_image.training_points.size(),
              (unsigned long)mask.size().height);
@@ -359,22 +381,46 @@
           static_cast<std::vector<cv::DMatch>>(*per_image.matches[j]));
 
       // Fill out the data for matches per image that made it past
-      // homography check
+      // homography check, for later use
       per_image_good_match.matches.push_back(per_image.matches[j]);
       per_image_good_match.training_points.push_back(
           per_image.training_points[j]);
       per_image_good_match.training_points_3d.push_back(
           per_image.training_points_3d[j]);
       per_image_good_match.query_points.push_back(per_image.query_points[j]);
-      per_image_good_match.homography = homography;
     }
-    per_image_good_matches.push_back(per_image_good_match);
 
-    // TODO: Use homography to compute target point on query image
+    // Returns from opencv are doubles (CV_64F), which don't play well
+    // with our floats
+    homography.convertTo(homography, CV_32F);
+    per_image_good_match.homography = homography;
+
+    CHECK_GT(per_image_good_match.matches.size(), 0u);
+
+    // Collect training target location, so we can map it to matched image
+    cv::Point2f target_point;
+    float target_radius;
+    TargetLocation((*(per_image_good_match.matches[0]))[0].imgIdx, target_point,
+                   target_radius);
+
+    // Store target_point in vector for use by perspectiveTransform
+    std::vector<cv::Point2f> src_target_pt;
+    src_target_pt.push_back(target_point);
+    std::vector<cv::Point2f> query_target_pt;
+
+    cv::perspectiveTransform(src_target_pt, query_target_pt, homography);
+
+    float query_target_radius =
+        target_radius *
+        abs(homography.at<float>(0, 0) + homography.at<float>(1, 1)) / 2.;
+
+    CHECK_EQ(query_target_pt.size(), 1u);
+    target_point_vector.push_back(query_target_pt[0]);
+    target_radius_vector.push_back(query_target_radius);
 
     // Pose transformations (rotations and translations) for various
     // coordinate frames.  R_X_Y_vec is the Rodrigues (angle-axis)
-    // representation the 3x3 rotation R_X_Y from frame X to frame Y
+    // representation of the 3x3 rotation R_X_Y from frame X to frame Y
 
     // Tranform from camera to target frame
     cv::Mat R_camera_target_vec, R_camera_target, T_camera_target;
@@ -444,10 +490,12 @@
   // debugging(features), and one smaller
   SendImageMatchResult(image, keypoints, descriptors, all_good_matches,
                        camera_target_list, field_camera_list,
+                       target_point_vector, target_radius_vector,
                        &detailed_result_sender_, true);
   SendImageMatchResult(image, keypoints, descriptors, all_good_matches,
-                       camera_target_list, field_camera_list, &result_sender_,
-                       false);
+                       camera_target_list, field_camera_list,
+                       target_point_vector, target_radius_vector,
+                       &result_sender_, false);
 }
 
 void CameraReader::ReadImage() {
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 43ea152..3e2daaf 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -133,6 +133,12 @@
 
   // The pose of the camera in the field coordinate frame
   field_to_camera:TransformationMatrix;
+
+  // 2D image coordinate representing target location on the matched image
+  query_target_point_x:float;
+  query_target_point_y:float;
+  // Perceived radius of target circle
+  query_target_point_radius:float;
 }
 
 table ImageMatchResult {
@@ -153,10 +159,6 @@
 
   // Information about the camera which took this image.
   camera_calibration:CameraCalibration;
-
-  // 2D image coordinate representing target location on the matched image
-  target_point_x:float;
-  target_point_y:float;
 }
 
 root_type ImageMatchResult;
diff --git a/y2020/vision/sift/sift_training.fbs b/y2020/vision/sift/sift_training.fbs
index d4fa740..12ad9b4 100644
--- a/y2020/vision/sift/sift_training.fbs
+++ b/y2020/vision/sift/sift_training.fbs
@@ -14,6 +14,8 @@
   // 2D image coordinate representing target location on the training image
   target_point_x:float;
   target_point_y:float;
+  // Radius of target circle
+  target_point_radius:float;
 }
 
 // Represents the information used to match incoming images against.
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 157f9e6..9894ee2 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -18,7 +18,7 @@
     global current_mouse
     current_mouse = (x, y)
     if event == cv2.EVENT_LBUTTONUP:
-        glog.debug("Adding point at %d, %d" % (x,y))
+        glog.debug("Adding point at %d, %d" % (x, y))
         point_list.append([x, y])
     pass
 
@@ -143,20 +143,22 @@
 
     return point_list
 
+
 # Determine whether a given point lies within (or on border of) a set of polygons
 # Return true if it does
 def point_in_polygons(point, polygons):
     for poly in polygons:
         np_poly = np.asarray(poly)
         dist = cv2.pointPolygonTest(np_poly, (point[0], point[1]), True)
-        if dist >=0:
+        if dist >= 0:
             return True
 
     return False
 
+
 ## Filter keypoints by polygons
 def filter_keypoints_by_polygons(keypoint_list, descriptor_list, polygons):
-    # TODO: Need to make sure we've got the right numpy array / list 
+    # TODO: Need to make sure we've got the right numpy array / list
     keep_keypoint_list = []
     keep_descriptor_list = []
     reject_keypoint_list = []
@@ -166,7 +168,8 @@
 
     # For now, pretend keypoints are just points
     for i in range(len(keypoint_list)):
-        if point_in_polygons((keypoint_list[i].pt[0], keypoint_list[i].pt[1]), polygons):
+        if point_in_polygons((keypoint_list[i].pt[0], keypoint_list[i].pt[1]),
+                             polygons):
             keep_list.append(i)
         else:
             reject_list.append(i)
@@ -175,37 +178,42 @@
     reject_keypoint_list = [keypoint_list[kp_ind] for kp_ind in reject_list]
     # Allow function to be called with no descriptors, and just return empty list
     if descriptor_list is not None:
-        keep_descriptor_list = descriptor_list[keep_list,:]
-        reject_descriptor_list = descriptor_list[reject_list,:]
+        keep_descriptor_list = descriptor_list[keep_list, :]
+        reject_descriptor_list = descriptor_list[reject_list, :]
 
     return keep_keypoint_list, keep_descriptor_list, reject_keypoint_list, reject_descriptor_list, keep_list
 
+
 # Helper function that appends a column of ones to a list (of 2d points)
 def append_ones(point_list):
-    return np.hstack([point_list, np.ones((len(point_list),1))])
+    return np.hstack([point_list, np.ones((len(point_list), 1))])
+
 
 # Given a list of 2d points and thei corresponding 3d locations, compute map
 # between them.
 # pt_3d = (pt_2d, 1) * reprojection_map
 # TODO: We should really look at residuals to see if things are messed up
 def compute_reprojection_map(polygon_2d, polygon_3d):
-    pts_2d = np.asarray(np.float32(polygon_2d)).reshape(-1,2)
+    pts_2d = np.asarray(np.float32(polygon_2d)).reshape(-1, 2)
     pts_2d_lstsq = append_ones(pts_2d)
-    pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1,3)
+    pts_3d_lstsq = np.asarray(np.float32(polygon_3d)).reshape(-1, 3)
 
     reprojection_map = np.linalg.lstsq(pts_2d_lstsq, pts_3d_lstsq, rcond=-1)[0]
 
     return reprojection_map
 
+
 # Given set of keypoints (w/ 2d location), a reprojection map, and a polygon
 # that defines regions for where this is valid
 # Returns a numpy array of 3d locations the same size as the keypoint list
 def compute_3d_points(keypoint_2d_list, reprojection_map):
-    pt_2d_lstsq = append_ones(np.asarray(np.float32(keypoint_2d_list)).reshape(-1,2))
+    pt_2d_lstsq = append_ones(
+        np.asarray(np.float32(keypoint_2d_list)).reshape(-1, 2))
     pt_3d = pt_2d_lstsq.dot(reprojection_map)
 
     return pt_3d
 
+
 # Given 2d and 3d locations, and camera location and projection model,
 # display locations on an image
 def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, distortion_coeffs):
@@ -246,11 +254,8 @@
 def sample_define_points_by_list_usage():
     image = cv2.imread("test_images/train_power_port_red.png")
 
-    test_points = [(451, 679), (451, 304),
-                   (100, 302), (451, 74),
-                   (689, 74), (689, 302),
-                   (689, 679)]
+    test_points = [(451, 679), (451, 304), (100, 302), (451, 74), (689, 74),
+                   (689, 302), (689, 679)]
 
     polygon_list = define_points_by_list(image, test_points)
     glog.debug(polygon_list)
-
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 651efe2..6db3155 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -107,28 +107,27 @@
             fbb)
 
         # Create the TrainingImage feature vector
-        TrainingImage.TrainingImageStartFeaturesVector(
-            fbb, len(features_vector))
+        TrainingImage.TrainingImageStartFeaturesVector(fbb,
+                                                       len(features_vector))
         for feature in reversed(features_vector):
             fbb.PrependUOffsetTRelative(feature)
         features_vector_offset = fbb.EndVector(len(features_vector))
 
         # Add the TrainingImage data
         TrainingImage.TrainingImageStart(fbb)
-        TrainingImage.TrainingImageAddFeatures(fbb,
-                                                       features_vector_offset)
-        TrainingImage.TrainingImageAddFieldToTarget(
-            fbb, transformation_mat_offset)
+        TrainingImage.TrainingImageAddFeatures(fbb, features_vector_offset)
+        TrainingImage.TrainingImageAddFieldToTarget(fbb,
+                                                    transformation_mat_offset)
         TrainingImage.TrainingImageAddTargetPointX(
             fbb, target_data.target_point_2d[0][0][0])
         TrainingImage.TrainingImageAddTargetPointY(
             fbb, target_data.target_point_2d[0][0][1])
-        images_vector.append(
-            TrainingImage.TrainingImageEnd(fbb))
+        TrainingImage.TrainingImageAddTargetPointRadius(
+            fbb, target_data.target_radius)
+        images_vector.append(TrainingImage.TrainingImageEnd(fbb))
 
     # Create and add Training Data of all targets
-    TrainingData.TrainingDataStartImagesVector(fbb,
-                                                     len(images_vector))
+    TrainingData.TrainingDataStartImagesVector(fbb, len(images_vector))
     for training_image in reversed(images_vector):
         fbb.PrependUOffsetTRelative(training_image)
     images_vector_table = fbb.EndVector(len(images_vector))
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 4c3f36b..eae358c 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -16,6 +16,9 @@
 USE_BAZEL = True
 VISUALIZE_KEYPOINTS = False
 
+# For now, just have a 32 pixel radius, based on original training image
+target_radius_default = 32.
+
 
 def bazel_name_fix(filename):
     ret_name = filename
@@ -44,6 +47,7 @@
         self.target_rotation = None
         self.target_position = None
         self.target_point_2d = None
+        self.target_radius = target_radius_default
 
     def extract_features(self, feature_extractor=None):
         if feature_extractor is None:
@@ -73,8 +77,8 @@
             # Filter and project points for each polygon in the list
             filtered_keypoints, _, _, _, keep_list = dtd.filter_keypoints_by_polygons(
                 keypoint_list, None, [self.polygon_list[poly_ind]])
-            glog.info("Filtering kept %d of %d features" % (len(keep_list),
-                                                            len(keypoint_list)))
+            glog.info("Filtering kept %d of %d features" %
+                      (len(keep_list), len(keypoint_list)))
             filtered_point_array = np.asarray(
                 [(keypoint.pt[0], keypoint.pt[1])
                  for keypoint in filtered_keypoints]).reshape(-1, 2)
@@ -114,7 +118,8 @@
     wing_angle = 20. * math.pi / 180.
 
     # Pick the target center location at halfway between top and bottom of the top panel
-    power_port_target_height = (power_port_total_height + power_port_bottom_wing_height) / 2.
+    power_port_target_height = (
+        power_port_total_height + power_port_bottom_wing_height) / 2.
 
     ###
     ### Red Power Port
@@ -132,32 +137,32 @@
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_main_panel_polygon_points_3d = [
-        (-field_length/2., power_port_edge_y, 0.),
-        (-field_length/2., power_port_edge_y,
-         power_port_bottom_wing_height),
-        (-field_length/2., power_port_edge_y
-         - power_port_wing_width, power_port_bottom_wing_height),
-        (-field_length/2., power_port_edge_y,
-         power_port_total_height),
-        (-field_length/2., power_port_edge_y + power_port_width,
-         power_port_total_height),
-        (-field_length/2., power_port_edge_y + power_port_width,
-         power_port_bottom_wing_height),
-        (-field_length/2., power_port_edge_y + power_port_width, 0.)
+        (-field_length / 2., power_port_edge_y,
+         0.), (-field_length / 2., power_port_edge_y,
+               power_port_bottom_wing_height),
+        (-field_length / 2., power_port_edge_y - power_port_wing_width,
+         power_port_bottom_wing_height), (-field_length / 2.,
+                                          power_port_edge_y,
+                                          power_port_total_height),
+        (-field_length / 2., power_port_edge_y + power_port_width,
+         power_port_total_height), (-field_length / 2.,
+                                    power_port_edge_y + power_port_width,
+                                    power_port_bottom_wing_height),
+        (-field_length / 2., power_port_edge_y + power_port_width, 0.)
     ]
 
     power_port_red_wing_panel_polygon_points_2d = [(689, 74), (1022, 302),
                                                    (689, 302)]
     # These are "virtual" 3D points based on the expected geometry
     power_port_red_wing_panel_polygon_points_3d = [
-        (-field_length/2., power_port_edge_y + power_port_width,
+        (-field_length / 2., power_port_edge_y + power_port_width,
          power_port_total_height),
-        (-field_length/2. + power_port_wing_width * math.sin(wing_angle),
-         power_port_edge_y + power_port_width
-         + power_port_wing_width * math.cos(wing_angle),
-         power_port_bottom_wing_height),
-        (-field_length/2., power_port_edge_y + power_port_width,
-         power_port_bottom_wing_height)
+        (-field_length / 2. + power_port_wing_width * math.sin(wing_angle),
+         power_port_edge_y + power_port_width +
+         power_port_wing_width * math.cos(wing_angle),
+         power_port_bottom_wing_height), (-field_length / 2.,
+                                          power_port_edge_y + power_port_width,
+                                          power_port_bottom_wing_height)
     ]
 
     # Populate the red power port
@@ -173,16 +178,17 @@
     # and the skew to target is zero
     ideal_power_port_red.target_rotation = -np.identity(3, np.double)
     ideal_power_port_red.target_rotation[2][2] = 1.
-    ideal_power_port_red.target_position = np.array([-field_length/2.,
-                                                     power_port_edge_y + power_port_width/2.,
-                                                     power_port_target_height])
+    ideal_power_port_red.target_position = np.array([
+        -field_length / 2., power_port_edge_y + power_port_width / 2.,
+        power_port_target_height
+    ])
 
     # Target point on the image -- needs to match training image
     # These are manually captured by examining the images,
     # and entering the pixel values from the target center for each image.
     # These are currently only used for visualization of the target
-    ideal_power_port_red.target_point_2d = np.float32([[570,192]]).reshape(-1,1,2)  # train_power_port_red.png
-
+    ideal_power_port_red.target_point_2d = np.float32([[570, 192]]).reshape(
+        -1, 1, 2)  # train_power_port_red.png
     # np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
 
     # Add the ideal 3D target to our list
@@ -192,6 +198,8 @@
         'test_images/train_power_port_red_webcam.png')
     training_target_power_port_red.target_rotation = ideal_power_port_red.target_rotation
     training_target_power_port_red.target_position = ideal_power_port_red.target_position
+    training_target_power_port_red.target_radius = target_radius_default
+
     training_target_list.append(training_target_power_port_red)
 
     ###
@@ -207,10 +215,11 @@
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_red_polygon_points_3d = [
-        (field_length/2., loading_bay_edge_y + loading_bay_width, 0.),
-        (field_length/2., loading_bay_edge_y + loading_bay_width, loading_bay_height),
-        (field_length/2., loading_bay_edge_y, loading_bay_height),
-        (field_length/2., loading_bay_edge_y, 0.)
+        (field_length / 2., loading_bay_edge_y + loading_bay_width, 0.),
+        (field_length / 2., loading_bay_edge_y + loading_bay_width,
+         loading_bay_height), (field_length / 2., loading_bay_edge_y,
+                               loading_bay_height), (field_length / 2.,
+                                                     loading_bay_edge_y, 0.)
     ]
 
     ideal_loading_bay_red.polygon_list.append(
@@ -219,16 +228,19 @@
         loading_bay_red_polygon_points_3d)
     # Location of target
     ideal_loading_bay_red.target_rotation = np.identity(3, np.double)
-    ideal_loading_bay_red.target_position = np.array([field_length/2.,
-                                                     loading_bay_edge_y + loading_bay_width/2.,
-                                                      loading_bay_height/2.])
-    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_red.png
+    ideal_loading_bay_red.target_position = np.array([
+        field_length / 2., loading_bay_edge_y + loading_bay_width / 2.,
+        loading_bay_height / 2.
+    ])
+    ideal_loading_bay_red.target_point_2d = np.float32([[366, 236]]).reshape(
+        -1, 1, 2)  # train_loading_bay_red.png
 
     ideal_target_list.append(ideal_loading_bay_red)
     training_target_loading_bay_red = TargetData(
         'test_images/train_loading_bay_red.png')
     training_target_loading_bay_red.target_rotation = ideal_loading_bay_red.target_rotation
     training_target_loading_bay_red.target_position = ideal_loading_bay_red.target_position
+    training_target_loading_bay_red.target_radius = target_radius_default
     training_target_list.append(training_target_loading_bay_red)
 
     ###
@@ -246,31 +258,31 @@
 
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_main_panel_polygon_points_3d = [
-        (field_length/2., -power_port_edge_y, 0.),
-        (field_length/2., -power_port_edge_y,
-         power_port_bottom_wing_height),
-        (field_length/2., -power_port_edge_y + power_port_wing_width,
-         power_port_bottom_wing_height),
-        (field_length/2., -power_port_edge_y,
-         power_port_total_height),
-        (field_length/2., -power_port_edge_y - power_port_width,
-         power_port_total_height),
-        (field_length/2., -power_port_edge_y - power_port_width,
-         power_port_bottom_wing_height),
-        (field_length/2., -power_port_edge_y - power_port_width, 0.)
+        (field_length / 2., -power_port_edge_y,
+         0.), (field_length / 2., -power_port_edge_y,
+               power_port_bottom_wing_height),
+        (field_length / 2., -power_port_edge_y + power_port_wing_width,
+         power_port_bottom_wing_height), (field_length / 2.,
+                                          -power_port_edge_y,
+                                          power_port_total_height),
+        (field_length / 2., -power_port_edge_y - power_port_width,
+         power_port_total_height), (field_length / 2.,
+                                    -power_port_edge_y - power_port_width,
+                                    power_port_bottom_wing_height),
+        (field_length / 2., -power_port_edge_y - power_port_width, 0.)
     ]
 
     power_port_blue_wing_panel_polygon_points_2d = [(692, 50), (1047, 285),
                                                     (692, 285)]
     # These are "virtual" 3D points based on the expected geometry
     power_port_blue_wing_panel_polygon_points_3d = [
-        (field_length/2., -power_port_edge_y - power_port_width,
+        (field_length / 2., -power_port_edge_y - power_port_width,
          power_port_total_height),
-        (field_length/2. - power_port_wing_width * math.sin(wing_angle),
+        (field_length / 2. - power_port_wing_width * math.sin(wing_angle),
          -power_port_edge_y - power_port_width -
          power_port_wing_width * math.cos(wing_angle),
          power_port_bottom_wing_height),
-        (field_length/2., -power_port_edge_y - power_port_width,
+        (field_length / 2., -power_port_edge_y - power_port_width,
          power_port_bottom_wing_height)
     ]
 
@@ -282,16 +294,19 @@
 
     # Location of target.  Rotation is pointing in -x direction
     ideal_power_port_blue.target_rotation = np.identity(3, np.double)
-    ideal_power_port_blue.target_position = np.array([field_length/2.,
-                                                     -power_port_edge_y - power_port_width/2.,
-                                                      power_port_target_height])
-    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(-1, 1, 2)  # train_power_port_blue.png
+    ideal_power_port_blue.target_position = np.array([
+        field_length / 2., -power_port_edge_y - power_port_width / 2.,
+        power_port_target_height
+    ])
+    ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(
+        -1, 1, 2)  # train_power_port_blue.png
 
     ideal_target_list.append(ideal_power_port_blue)
     training_target_power_port_blue = TargetData(
         'test_images/train_power_port_blue.png')
     training_target_power_port_blue.target_rotation = ideal_power_port_blue.target_rotation
     training_target_power_port_blue.target_position = ideal_power_port_blue.target_position
+    training_target_power_port_blue.target_radius = target_radius_default
     training_target_list.append(training_target_power_port_blue)
 
     ###
@@ -308,10 +323,11 @@
 
     # These are "virtual" 3D points based on the expected geometry
     loading_bay_blue_polygon_points_3d = [
-        (-field_length/2., -loading_bay_edge_y - loading_bay_width, 0.),
-        (-field_length/2., -loading_bay_edge_y - loading_bay_width, loading_bay_height),
-        (-field_length/2., -loading_bay_edge_y, loading_bay_height),
-        (-field_length/2., -loading_bay_edge_y, 0.)
+        (-field_length / 2., -loading_bay_edge_y - loading_bay_width, 0.),
+        (-field_length / 2., -loading_bay_edge_y - loading_bay_width,
+         loading_bay_height), (-field_length / 2., -loading_bay_edge_y,
+                               loading_bay_height), (-field_length / 2.,
+                                                     -loading_bay_edge_y, 0.)
     ]
 
     ideal_loading_bay_blue.polygon_list.append(
@@ -322,16 +338,19 @@
     # Location of target
     ideal_loading_bay_blue.target_rotation = -np.identity(3, np.double)
     ideal_loading_bay_blue.target_rotation[2][2] = 1.
-    ideal_loading_bay_blue.target_position = np.array([-field_length/2.,
-                                                     -loading_bay_edge_y - loading_bay_width/2.,
-                                                       loading_bay_height/2.])
-    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(-1, 1, 2)  # train_loading_bay_blue.png
+    ideal_loading_bay_blue.target_position = np.array([
+        -field_length / 2., -loading_bay_edge_y - loading_bay_width / 2.,
+        loading_bay_height / 2.
+    ])
+    ideal_loading_bay_blue.target_point_2d = np.float32([[366, 236]]).reshape(
+        -1, 1, 2)  # train_loading_bay_blue.png
 
     ideal_target_list.append(ideal_loading_bay_blue)
     training_target_loading_bay_blue = TargetData(
         'test_images/train_loading_bay_blue.png')
     training_target_loading_bay_blue.target_rotation = ideal_loading_bay_blue.target_rotation
     training_target_loading_bay_blue.target_position = ideal_loading_bay_blue.target_position
+    training_target_loading_bay_blue.target_radius = target_radius_default
     training_target_list.append(training_target_loading_bay_blue)
 
     # Create feature extractor
@@ -341,7 +360,8 @@
     camera_params = camera_definition.web_cam_params
 
     for ideal_target in ideal_target_list:
-        glog.info("\nPreparing target for image %s" % ideal_target.image_filename)
+        glog.info(
+            "\nPreparing target for image %s" % ideal_target.image_filename)
         ideal_target.extract_features(feature_extractor)
         ideal_target.filter_keypoints_by_polygons()
         ideal_target.compute_reprojection_maps()
@@ -390,7 +410,9 @@
     AUTO_PROJECTION = True
 
     if AUTO_PROJECTION:
-        glog.info("\n\nAuto projection of training keypoints to 3D using ideal images")
+        glog.info(
+            "\n\nAuto projection of training keypoints to 3D using ideal images"
+        )
         # Match the captured training image against the "ideal" training image
         # and use those matches to pin down the 3D locations of the keypoints
 
@@ -399,7 +421,8 @@
             training_target = training_target_list[target_ind]
             ideal_target = ideal_target_list[target_ind]
 
-            glog.info("\nPreparing target for image %s" % training_target.image_filename)
+            glog.info("\nPreparing target for image %s" %
+                      training_target.image_filename)
             # Extract keypoints and descriptors for model
             training_target.extract_features(feature_extractor)
 
@@ -419,7 +442,8 @@
             H_inv = np.linalg.inv(homography_list[0])
 
             for polygon in ideal_target.polygon_list:
-                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(-1, 1, 2)
+                ideal_pts_2d = np.asarray(np.float32(polygon)).reshape(
+                    -1, 1, 2)
                 # We use the ideal target's polygons to define the polygons on
                 # the training target
                 transformed_polygon = cv2.perspectiveTransform(
@@ -427,10 +451,14 @@
                 training_target.polygon_list.append(transformed_polygon)
 
             # Also project the target point from the ideal to training image
-            training_target_point_2d = cv2.perspectiveTransform(np.asarray(ideal_target.target_point_2d).reshape(-1,1,2), H_inv)
-            training_target.target_point_2d = training_target_point_2d.reshape(-1,1,2)
+            training_target_point_2d = cv2.perspectiveTransform(
+                np.asarray(ideal_target.target_point_2d).reshape(-1, 1, 2),
+                H_inv)
+            training_target.target_point_2d = training_target_point_2d.reshape(
+                -1, 1, 2)
 
-            glog.info("Started with %d keypoints" % len(training_target.keypoint_list))
+            glog.info("Started with %d keypoints" % len(
+                training_target.keypoint_list))
 
             training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
                 training_target.keypoint_list, training_target.descriptor_list,
@@ -490,8 +518,17 @@
 
 if __name__ == '__main__':
     ap = argparse.ArgumentParser()
-    ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
-    ap.add_argument("-n", "--no_bazel", help="Don't run using Bazel", default=True, action='store_false')
+    ap.add_argument(
+        "--visualize",
+        help="Whether to visualize the results",
+        default=False,
+        action='store_true')
+    ap.add_argument(
+        "-n",
+        "--no_bazel",
+        help="Don't run using Bazel",
+        default=True,
+        action='store_false')
     args = vars(ap.parse_args())
 
     VISUALIZE_KEYPOINTS = args["visualize"]
diff --git a/y2020/vision/tools/python_code/target_definition_test.py b/y2020/vision/tools/python_code/target_definition_test.py
index 18df1e9..3c4d308 100644
--- a/y2020/vision/tools/python_code/target_definition_test.py
+++ b/y2020/vision/tools/python_code/target_definition_test.py
@@ -26,5 +26,6 @@
     target_data_test_1.target_rotation = np.identity(3, np.double)
     target_data_test_1.target_position = np.array([0., 1., 2.])
     target_data_test_1.target_point_2d = np.array([10., 20.]).reshape(-1, 1, 2)
+    target_data_test_1.target_radius = 32
     target_data_list.append(target_data_test_1)
     return target_data_list
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index 93c607c..f139e71 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -11,6 +11,7 @@
 
 glog.setLevel("WARN")
 
+
 # Calculates rotation matrix to euler angles
 # The result is the same as MATLAB except the order
 # of the euler angles ( x and z are swapped ).
@@ -175,13 +176,16 @@
     for i in range(len(train_keypoint_lists)):
         good_matches = good_matches_list[i]
         if len(good_matches) < MIN_MATCH_COUNT:
-            glog.warn("Not enough matches are for model %d: %d out of needed #: %d" % (i, len(good_matches), MIN_MATCH_COUNT))
+            glog.warn(
+                "Not enough matches are for model %d: %d out of needed #: %d" %
+                (i, len(good_matches), MIN_MATCH_COUNT))
             homography_list.append([])
             matches_mask_list.append([])
             continue
 
-        glog.info("Got good number of matches for model %d: %d (needed only %d)" %
-                  (i, len(good_matches), MIN_MATCH_COUNT))
+        glog.info(
+            "Got good number of matches for model %d: %d (needed only %d)" %
+            (i, len(good_matches), MIN_MATCH_COUNT))
         # Extract and bundle keypoint locations for computations
         src_pts = np.float32([
             train_keypoint_lists[i][m.trainIdx].pt for m in good_matches
@@ -216,8 +220,8 @@
         matches_mask_count = matches_mask_list[i].count(1)
         if matches_mask_count != len(good_matches):
             glog.info("Homography rejected some matches!  From ",
-                  len(good_matches), ", only ", matches_mask_count,
-                  " were used")
+                      len(good_matches), ", only ", matches_mask_count,
+                      " were used")
 
         if matches_mask_count < MIN_MATCH_COUNT:
             glog.info(
@@ -233,7 +237,8 @@
         query_box_pts = cv2.perspectiveTransform(train_box_pts, H)
 
         # Figure out where the training target goes on the query img
-        transformed_target = cv2.perspectiveTransform(target_point_list[i].reshape(-1,1,2), H)
+        transformed_target = cv2.perspectiveTransform(
+            target_point_list[i].reshape(-1, 1, 2), H)
         # Ballpark the size of the circle so it looks right on image
         radius = int(
             32 * abs(H[0][0] + H[1][1]) / 2)  # Average of scale factors
@@ -276,9 +281,11 @@
 #        keypoint_list: List of opencv keypoints
 def show_keypoints(image, keypoint_list):
     ret_img = image.copy()
-    ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img,
-                               flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
+    ret_img = cv2.drawKeypoints(
+        ret_img,
+        keypoint_list,
+        ret_img,
+        flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
     cv2.imshow("Keypoints", ret_img)
     cv2.waitKey(0)
     return ret_img
-