Fixing naming of distortion coefficients to dist_coeffs

This matches opencv naming in their docs-- it was inconsistent here.

Change-Id: I0136bf6c1f8e79f2c4b32b95dcd139135bb73ea1
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 194ddd3..0667b16 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -6,7 +6,7 @@
 class CameraIntrinsics:
     def __init__(self):
         self.camera_matrix = []
-        self.distortion_coeffs = []
+        self.dist_coeffs = []
 
     pass
 
@@ -40,7 +40,7 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5, 1))
+web_cam_int.dist_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
diff --git a/y2020/vision/tools/python_code/camera_definition_test.py b/y2020/vision/tools/python_code/camera_definition_test.py
index 65d1b68..732dbb8 100644
--- a/y2020/vision/tools/python_code/camera_definition_test.py
+++ b/y2020/vision/tools/python_code/camera_definition_test.py
@@ -6,7 +6,7 @@
 class CameraIntrinsics:
     def __init__(self):
         self.camera_matrix = []
-        self.distortion_coeffs = []
+        self.dist_coeffs = []
 
 
 class CameraExtrinsics:
@@ -38,7 +38,7 @@
 # Define a web_cam
 web_cam_int = CameraIntrinsics()
 web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.distortion_coeffs = np.zeros((5, 1))
+web_cam_int.dist_coeffs = np.zeros((5, 1))
 
 web_cam_ext = CameraExtrinsics()
 # Camera rotation from robot x,y,z to opencv (z, -x, -y)
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
index 4fdc2fe..483fe75 100644
--- a/y2020/vision/tools/python_code/camera_param_test.cc
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -254,12 +254,12 @@
       << "Mismatch on camera intrinsic matrix: " << intrinsic << "\nvs.\n"
       << camera_params.camera_intrinsic_matrix_;
 
-  float dist_coeff_mat[5] = {0., 0., 0., 0., 0.};
-  cv::Mat dist_coeff = cv::Mat(5, 1, CV_32F, dist_coeff_mat);
-  cv::Mat dist_coeff_diff = (dist_coeff != camera_params.camera_dist_coeffs_);
-  bool dist_coeff_eq = (cv::countNonZero(dist_coeff_diff) == 0);
-  ASSERT_TRUE(dist_coeff_eq)
-      << "Mismatch on camera distortion coefficients: " << dist_coeff
+  float dist_coeffs_mat[5] = {0., 0., 0., 0., 0.};
+  cv::Mat dist_coeffs = cv::Mat(5, 1, CV_32F, dist_coeffs_mat);
+  cv::Mat dist_coeffs_diff = (dist_coeffs != camera_params.camera_dist_coeffs_);
+  bool dist_coeffs_eq = (cv::countNonZero(dist_coeffs_diff) == 0);
+  ASSERT_TRUE(dist_coeffs_eq)
+      << "Mismatch on camera distortion coefficients: " << dist_coeffs
       << "\nvs.\n"
       << camera_params.camera_dist_coeffs_;
 
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 9894ee2..8116fb7 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -216,16 +216,16 @@
 
 # Given 2d and 3d locations, and camera location and projection model,
 # display locations on an image
-def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, distortion_coeffs):
+def visualize_reprojections(img, pts_2d, pts_3d, cam_mat, dist_coeffs):
     # Compute camera location
     # TODO: Warn on bad inliers
     # TODO: Change this to not have to recast to np
     pts_2d_np = np.asarray(np.float32(pts_2d)).reshape(-1, 1, 2)
     pts_3d_np = np.asarray(np.float32(pts_3d)).reshape(-1, 1, 3)
     retval, R, T, inliers = cv2.solvePnPRansac(pts_3d_np, pts_2d_np, cam_mat,
-                                               distortion_coeffs)
+                                               dist_coeffs)
     pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
-                                               distortion_coeffs)
+                                               dist_coeffs)
     if inliers is None:
         glog.warn("WARNING: Didn't get any inliers when reprojecting polygons")
         return img
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index 09d2764..455bc30 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -173,7 +173,7 @@
 
         # Load from camera parameters
         cam_mat = camera_params.camera_int.camera_matrix
-        distortion_coeffs = camera_params.camera_int.distortion_coeffs
+        dist_coeffs = camera_params.camera_int.dist_coeffs
 
         # Create list of matching query point locations
         dst_pts = np.float32([
@@ -184,7 +184,7 @@
         # TODO: May want to supply it with estimated guess as starting point
         # Find offset from camera to original location of camera relative to target
         retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
-            src_pts_3d_array, dst_pts, cam_mat, distortion_coeffs)
+            src_pts_3d_array, dst_pts, cam_mat, dist_coeffs)
 
         tf = time.monotonic()
         print("Solving Pose took ", tf - ts, " secs")
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index bf58e5f..0ce70b7 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -161,13 +161,12 @@
             fbb.PrependFloat32(n)
         intrinsics_vector = fbb.EndVector(len(camera_int_list))
 
-        dist_coeff_list = camera_calib.camera_int.distortion_coeffs.ravel(
-        ).tolist()
+        dist_coeffs_list = camera_calib.camera_int.dist_coeffs.ravel().tolist()
         CameraCalibration.CameraCalibrationStartDistCoeffsVector(
-            fbb, len(dist_coeff_list))
-        for n in reversed(dist_coeff_list):
+            fbb, len(dist_coeffs_list))
+        for n in reversed(dist_coeffs_list):
             fbb.PrependFloat32(n)
-        dist_coeff_vector = fbb.EndVector(len(dist_coeff_list))
+        dist_coeffs_vector = fbb.EndVector(len(dist_coeffs_list))
 
         node_name_offset = fbb.CreateString(camera_calib.node_name)
         CameraCalibration.CameraCalibrationStart(fbb)
@@ -177,7 +176,7 @@
         CameraCalibration.CameraCalibrationAddIntrinsics(
             fbb, intrinsics_vector)
         CameraCalibration.CameraCalibrationAddDistCoeffs(
-            fbb, dist_coeff_vector)
+            fbb, dist_coeffs_vector)
         CameraCalibration.CameraCalibrationAddFixedExtrinsics(
             fbb, fixed_extrinsics_vector)
         camera_calibration_vector.append(
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index eae358c..fb2f03a 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -384,7 +384,7 @@
                     dtd.visualize_reprojections(
                         img_copy, ideal_pts_tmp, ideal_pts_3d_tmp,
                         camera_params.camera_int.camera_matrix,
-                        camera_params.camera_int.distortion_coeffs)
+                        camera_params.camera_int.dist_coeffs)
 
             for polygon in ideal_target.polygon_list:
                 img_copy = ideal_target.image.copy()
@@ -401,7 +401,7 @@
                     np.asarray(kp_in_poly2d).reshape(-1, 2),
                     np.asarray(kp_in_poly3d).reshape(
                         -1, 3), camera_params.camera_int.camera_matrix,
-                    camera_params.camera_int.distortion_coeffs)
+                    camera_params.camera_int.dist_coeffs)
 
     ###############
     ### Compute 3D points on actual training images
@@ -510,7 +510,7 @@
                 dtd.visualize_reprojections(
                     img_copy, kp_tmp, training_target.keypoint_list_3d,
                     camera_params.camera_int.camera_matrix,
-                    camera_params.camera_int.distortion_coeffs)
+                    camera_params.camera_int.dist_coeffs)
 
     y2020_target_list = training_target_list
     return y2020_target_list
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index f139e71..1def399 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -41,7 +41,7 @@
         # Load image (in color; let opencv convert to B&W for features)
         img_data = cv2.imread(im)
         if img_data is None:
-            glog.error("Failed to load image: ", im)
+            glog.error("Failed to load image: '%s'" % im)
             exit()
         else:
             image_list.append(img_data)
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
index 00f68bb..01fe695 100644
--- a/y2020/vision/tools/python_code/transform_projection_test.py
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -8,7 +8,7 @@
 # Import camera from camera_definition
 camera_params = camera_definition.web_cam_params
 cam_mat = camera_params.camera_int.camera_matrix
-distortion_coeffs = camera_params.camera_int.distortion_coeffs
+dist_coeffs = camera_params.camera_int.dist_coeffs
 
 # Height of camera on robot above ground
 cam_above_ground = 0.5
@@ -48,11 +48,11 @@
 
 pts_proj_2d_cam2, jac_2d = cv2.projectPoints(
     pts_3d_target, R_cam_cam2_gt_mat_inv, T_cam_cam2_gt_inv, cam_mat,
-    distortion_coeffs)
+    dist_coeffs)
 
 # Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
 retval, R_cam2_cam_est, T_cam2_cam_est, inliers = cv2.solvePnPRansac(
-    pts_3d_target, pts_proj_2d_cam2, cam_mat, distortion_coeffs)
+    pts_3d_target, pts_proj_2d_cam2, cam_mat, dist_coeffs)
 
 # This is the pose from camera to original target spot.  We need to invert to get back to the pose we want
 R_cam_cam2_est_mat = cv2.Rodrigues(R_cam2_cam_est)[0].T