Adding calibration files for robot using cafeteria target

Temporarily removing Blue Power port panel

Also, removed bazel conditionals-- just try to load bazel tools, and ignore if it fails

Change-Id: If158322fafc9a4c19df2ee6d481bc641ec1ca640
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index f38a40e..64583d0 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -228,6 +228,9 @@
   std::vector<flatbuffers::Offset<sift::CameraPose>> camera_poses;
 
   CHECK_EQ(camera_target_list.size(), field_camera_list.size());
+  // TODO<Jim>: Need to figure out why these aren't the same size
+  // And why we're only sending one camera pose
+  //CHECK_EQ(camera_target_list.size(), matches.size());
   for (size_t i = 0; i < camera_target_list.size(); ++i) {
     cv::Mat camera_target = camera_target_list[i];
     CHECK(camera_target.isContinuous());
diff --git a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-971-1_2020-03-07-15-34-00.txt b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-971-1_2020-03-07-15-34-00.txt
new file mode 100644
index 0000000..cf45ef0
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-971-1_2020-03-07-15-34-00.txt
@@ -0,0 +1 @@
+{"hostname": "pi-971-1", "node_name": "pi1", "team_number": 971, "timestamp": "2020-03-07-15-34-00", "camera_matrix": [[387.95046316, 0.0, 341.13297242], [0.0, 387.85366427, 245.69219733], [0.0, 0.0, 1.0]], "dist_coeffs": [[ 0.13594152, -0.23946991, -0.00088608,  0.00038653,  0.08745377]]}
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 4d8929a..578c9cc 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -5,17 +5,9 @@
 import numpy as np
 import os
 
+import define_training_data as dtd
+
 glog.setLevel("WARN")
-USE_BAZEL = True
-
-
-def bazel_name_fix(filename):
-    ret_name = filename
-    if USE_BAZEL:
-        ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
-
-    return ret_name
-
 
 class CameraIntrinsics:
     def __init__(self):
@@ -39,69 +31,78 @@
         self.team_number = -1
 
 
-### CAMERA DEFINITIONS
+def load_camera_definitions():
+    ### CAMERA DEFINITIONS
 
-# Robot camera has:
-# FOV_H = 93.*math.pi()/180.
-# FOV_V = 70.*math.pi()/180.
+    # Robot camera has:
+    # FOV_H = 93.*math.pi()/180.
+    # FOV_V = 70.*math.pi()/180.
 
-# Create fake camera (based on USB webcam params)
-fx = 810.
-fy = 810.
-cx = 320.
-cy = 240.
+    # Create fake camera (based on USB webcam params)
+    fx = 810.
+    fy = 810.
+    cx = 320.
+    cy = 240.
 
-# Define a web_cam
-web_cam_int = CameraIntrinsics()
-web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.dist_coeffs = np.zeros((5, 1))
+    # Define a web_cam
+    web_cam_int = CameraIntrinsics()
+    web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy],
+                                            [0, 0, 1]])
+    web_cam_int.dist_coeffs = np.zeros((5, 1))
 
-web_cam_ext = CameraExtrinsics()
-# Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
-web_cam_ext.T = np.array([0., 0., 0.])
+    web_cam_ext = CameraExtrinsics()
+    # Camera rotation from robot x,y,z to opencv (z, -x, -y)
+    # This is extrinsics for the turret camera
+    # camera pose relative to center, base of the turret
+    # TODO<Jim>: Need to implement per-camera calibration, like with intrinsics
+    camera_pitch = 34.0 * np.pi / 180.0
+    camera_pitch_matrix = np.matrix(
+        [[np.cos(camera_pitch), 0.0, -np.sin(camera_pitch)], [0.0, 1.0, 0.0],
+         [np.sin(camera_pitch), 0.0,
+          np.cos(camera_pitch)]])
+    web_cam_ext.R = np.array(camera_pitch_matrix * np.matrix(
+        [[0., 0., 1.], [-1, 0, 0], [0, -1., 0]]))
+    # Tape measure calibration-- need to pull from CAD or automate this
+    web_cam_ext.T = np.array([2.0 * 0.0254, -6.0 * 0.0254, 41.0 * 0.0254])
 
-web_cam_params = CameraParameters()
-web_cam_params.camera_int = web_cam_int
-web_cam_params.camera_ext = web_cam_ext
+    web_cam_params = CameraParameters()
+    web_cam_params.camera_int = web_cam_int
+    web_cam_params.camera_ext = web_cam_ext
 
-camera_list = []
+    camera_list = []
 
-# TODO<Jim>: Should probably make this a dict to make replacing easier
-for team_number in (971, 7971, 8971, 9971):
-    for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4", "pi5"):
-        camera_base = copy.deepcopy(web_cam_params)
-        camera_base.node_name = node_name
-        camera_base.team_number = team_number
-        camera_list.append(camera_base)
+    # TODO<Jim>: Should probably make this a dict to make replacing easier
+    for team_number in (971, 7971, 8971, 9971):
+        for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4", "pi5"):
+            camera_base = copy.deepcopy(web_cam_params)
+            camera_base.node_name = node_name
+            camera_base.team_number = team_number
+            camera_list.append(camera_base)
 
-dir_name = ('calib_files')
+    dir_name = dtd.bazel_name_fix('calib_files')
+    for filename in os.listdir(dir_name):
+        if "cam-calib-int" in filename and filename.endswith(".txt"):
+            # Extract intrinsics from file
+            fn_split = filename.split("_")
+            hostname_split = fn_split[1].split("-")
+            if hostname_split[0] == "pi":
+                team_number = int(hostname_split[1])
+                node_name = hostname_split[0] + hostname_split[2]
 
-if USE_BAZEL:
-    from bazel_tools.tools.python.runfiles import runfiles
-    r = runfiles.Create()
-    dir_name = r.Rlocation(bazel_name_fix('calib_files'))
+            calib_file = open(dir_name + "/" + filename, 'r')
+            calib_dict = json.loads(calib_file.read())
+            hostname = np.asarray(calib_dict["hostname"])
+            camera_matrix = np.asarray(calib_dict["camera_matrix"])
+            dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
 
-for filename in os.listdir(dir_name):
-    if "cam-calib-int" in filename and filename.endswith(".txt"):
-        # Extract intrinsics from file
-        fn_split = filename.split("_")
-        hostname_split = fn_split[1].split("-")
-        if hostname_split[0] == "pi":
-            team_number = int(hostname_split[1])
-            node_name = hostname_split[0] + hostname_split[2]
+            # Look for match, and replace camera_intrinsics
+            for camera_calib in camera_list:
+                if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
+                    glog.info("Found calib for %s, team #%d" % (node_name,
+                                                                team_number))
+                    camera_calib.camera_int.camera_matrix = copy.copy(
+                        camera_matrix)
+                    camera_calib.camera_int.dist_coeffs = copy.copy(
+                        dist_coeffs)
 
-        calib_file = open(dir_name + "/" + filename, 'r')
-        calib_dict = json.loads(calib_file.read())
-        hostname = np.asarray(calib_dict["hostname"])
-        camera_matrix = np.asarray(calib_dict["camera_matrix"])
-        dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
-
-        # Look for match, and replace camera_intrinsics
-        for camera_calib in camera_list:
-            if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
-                glog.info("Found calib for %s, team #%d" % (node_name,
-                                                            team_number))
-                camera_calib.camera_int.camera_matrix = copy.copy(
-                    camera_matrix)
-                camera_calib.camera_int.dist_coeffs = copy.copy(dist_coeffs)
+    return camera_list
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 8116fb7..76e73da 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -244,6 +244,18 @@
     return img
 
 
+def bazel_name_fix(filename):
+    ret_name = filename
+    try:
+        from bazel_tools.tools.python.runfiles import runfiles
+        r = runfiles.Create()
+        ret_name = r.Rlocation('org_frc971/y2020/vision/tools/python_code/' + filename)
+    except:
+        pass
+
+    return ret_name
+
+
 def sample_define_polygon_usage():
     image = cv2.imread("test_images/train_power_port_red.png")
 
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index 455bc30..8d16104 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -10,8 +10,9 @@
 
 ### DEFINITIONS
 target_definition.USE_BAZEL = False
+camera_definition.USE_BAZEL = False
 target_list = target_definition.compute_target_definition()
-camera_list = camera_definition.camera_list
+camera_list = camera_definition.load_camera_definitions()
 
 # For now, just use the first one
 camera_params = camera_list[0]
@@ -29,12 +30,14 @@
     'test_images/test_raspi3_sample.jpg',  #7
     'test_images/test_VR_sample1.png',  #8
     'test_images/train_loading_bay_blue.png',  #9
-    'test_images/train_loading_bay_red.png'  #10
+    'test_images/train_loading_bay_red.png',  #10
+    'test_images/pi-7971-3_test_image.png',  #11
+    'sample_images/capture-2020-02-13-16-40-07.png',
 ]
 
 training_image_index = 0
 # TODO: Should add argParser here to select this
-query_image_index = 0  # Use -1 to use camera capture; otherwise index above list
+query_image_index = 12  # Use -1 to use camera capture; otherwise index above list
 
 ##### Let's get to work!
 
@@ -162,8 +165,6 @@
         for m in good_matches:
             src_pts_3d.append(target_list[i].keypoint_list_3d[m.trainIdx])
             pt = query_keypoint_lists[0][m.queryIdx].pt
-            print("Color at ", pt, " is ", query_images[0][int(pt[1])][int(
-                pt[0])])
             query_images[0] = cv2.circle(
                 query_images[0], (int(pt[0]), int(pt[1])), 5, (0, 255, 0), 3)
 
diff --git a/y2020/vision/tools/python_code/image_stream.py b/y2020/vision/tools/python_code/image_stream.py
new file mode 100644
index 0000000..2cd5ac7
--- /dev/null
+++ b/y2020/vision/tools/python_code/image_stream.py
@@ -0,0 +1,34 @@
+import cv2
+import datetime
+# Open the device at the ID X for /dev/videoX
+CAMERA_INDEX = 0
+cap = cv2.VideoCapture(CAMERA_INDEX)
+
+#Check whether user selected camera is opened successfully.
+if not (cap.isOpened()):
+    print("Could not open video device /dev/video%d" % CAMERA_INDEX)
+    quit()
+
+while True:
+    # Capture frame-by-frame
+    ret, frame = cap.read()
+
+    exp = cap.get(cv2.CAP_PROP_EXPOSURE)
+    #print("Exposure:", exp)
+    # Display the resulting frame
+    cv2.imshow('preview', frame)
+
+    #Waits for a user input to capture image or quit the application
+    keystroke = cv2.waitKey(1)
+
+    if keystroke & 0xFF == ord('q'):
+        break
+    elif keystroke & 0xFF == ord('c'):
+        image_name = datetime.datetime.today().strftime(
+            "capture-%b-%d-%Y-%H-%M-%S.png")
+        print("Capturing image as %s" % image_name)
+        cv2.imwrite(image_name, frame)
+
+# When everything's done, release the capture
+cap.release()
+cv2.destroyAllWindows()
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 0ce70b7..5ca574d 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -50,7 +50,7 @@
         import camera_definition
         import target_definition
         target_data_list = target_definition.compute_target_definition()
-        camera_calib_list = camera_definition.camera_list
+        camera_calib_list = camera_definition.load_camera_definitions()
 
     glog.info("Writing file to ", output_path)
 
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 430e83f..6080ac2 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -12,31 +12,16 @@
 # TODO<Jim>: Allow command-line setting of logging level
 glog.setLevel("WARN")
 global VISUALIZE_KEYPOINTS
-global USE_BAZEL
-USE_BAZEL = True
 VISUALIZE_KEYPOINTS = False
 
 # For now, just have a 32 pixel radius, based on original training image
 target_radius_default = 32.
 
 
-def bazel_name_fix(filename):
-    ret_name = filename
-    if USE_BAZEL:
-        ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
-
-    return ret_name
-
-
 class TargetData:
     def __init__(self, filename):
-        self.image_filename = filename
+        self.image_filename = dtd.bazel_name_fix(filename)
         # Load an image (will come in as a 1-element list)
-        if USE_BAZEL:
-            from bazel_tools.tools.python.runfiles import runfiles
-            r = runfiles.Create()
-            self.image_filename = r.Rlocation(
-                bazel_name_fix(self.image_filename))
         self.image = tam.load_images([self.image_filename])[0]
         self.polygon_list = []
         self.polygon_list_3d = []
@@ -90,7 +75,7 @@
         return point_list_3d
 
 
-def compute_target_definition():
+def load_training_data():
     ############################################################
     # TARGET DEFINITIONS
     ############################################################
@@ -121,6 +106,76 @@
     power_port_target_height = (
         power_port_total_height + power_port_bottom_wing_height) / 2.
 
+    ### Cafeteria target definition
+    inch_to_meter = 0.0254
+    c_power_port_total_height = (79.5 + 39.5) * inch_to_meter
+    c_power_port_edge_y = 1.089
+    c_power_port_width = 4.0 * 12 * inch_to_meter
+    c_power_port_bottom_wing_height = 79.5 * inch_to_meter
+    c_power_port_wing_width = 47.5 * inch_to_meter
+    c_power_port_white_marker_z = (79.5 - 19.5) * inch_to_meter
+
+    # Pick the target center location at halfway between top and bottom of the top panel
+    c_power_port_target_height = (
+        power_port_total_height + power_port_bottom_wing_height) / 2.
+
+    ###
+    ### Cafe power port
+    ###
+
+    # Create the reference "ideal" image
+    ideal_power_port_cafe = TargetData(
+        'test_images/train_cafeteria-2020-02-13-16-27-25.png')
+
+    # Start at lower left corner, and work around clockwise
+    # These are taken by manually finding the points in gimp for this image
+    power_port_cafe_main_panel_polygon_points_2d = [(271, 456), (278, 394),
+                                                    (135, 382), (286, 294),
+                                                    (389, 311), (397,
+                                                                 403), (401,
+                                                                        458)]
+
+    # These are "virtual" 3D points based on the expected geometry
+    power_port_cafe_main_panel_polygon_points_3d = [
+        (field_length / 2., -c_power_port_edge_y,
+         c_power_port_white_marker_z), (field_length / 2.,
+                                        -c_power_port_edge_y,
+                                        c_power_port_bottom_wing_height),
+        (field_length / 2., -c_power_port_edge_y + c_power_port_wing_width,
+         c_power_port_bottom_wing_height), (field_length / 2.,
+                                            -c_power_port_edge_y,
+                                            c_power_port_total_height),
+        (field_length / 2., -c_power_port_edge_y - c_power_port_width,
+         c_power_port_total_height),
+        (field_length / 2., -c_power_port_edge_y - c_power_port_width,
+         c_power_port_bottom_wing_height),
+        (field_length / 2., -c_power_port_edge_y - c_power_port_width,
+         c_power_port_white_marker_z)
+    ]
+
+    # Populate the cafe power port
+    ideal_power_port_cafe.polygon_list.append(
+        power_port_cafe_main_panel_polygon_points_2d)
+    ideal_power_port_cafe.polygon_list_3d.append(
+        power_port_cafe_main_panel_polygon_points_3d)
+
+    # Location of target.  Rotation is pointing in -x direction
+    ideal_power_port_cafe.target_rotation = np.identity(3, np.double)
+    ideal_power_port_cafe.target_position = np.array([
+        field_length / 2., -c_power_port_edge_y - c_power_port_width / 2.,
+        c_power_port_target_height
+    ])
+    ideal_power_port_cafe.target_point_2d = np.float32([[340, 350]]).reshape(
+        -1, 1, 2)  # train_cafeteria-2020-02-13-16-27-25.png
+
+    ideal_target_list.append(ideal_power_port_cafe)
+    training_target_power_port_cafe = TargetData(
+        'test_images/train_cafeteria-2020-02-13-16-27-25.png')
+    training_target_power_port_cafe.target_rotation = ideal_power_port_cafe.target_rotation
+    training_target_power_port_cafe.target_position = ideal_power_port_cafe.target_position
+    training_target_power_port_cafe.target_radius = target_radius_default
+    training_target_list.append(training_target_power_port_cafe)
+
     ###
     ### Red Power Port
     ###
@@ -302,13 +357,15 @@
     ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(
         -1, 1, 2)  # ideal_power_port_blue.png
 
-    ideal_target_list.append(ideal_power_port_blue)
+    #### TEMPORARILY DISABLING the BLUE POWER PORT target
+    #ideal_target_list.append(ideal_power_port_blue)
     training_target_power_port_blue = TargetData(
         'test_images/train_power_port_blue.png')
     training_target_power_port_blue.target_rotation = ideal_power_port_blue.target_rotation
     training_target_power_port_blue.target_position = ideal_power_port_blue.target_position
     training_target_power_port_blue.target_radius = target_radius_default
-    training_target_list.append(training_target_power_port_blue)
+    #### TEMPORARILY DISABLING the BLUE POWER PORT target
+    #training_target_list.append(training_target_power_port_blue)
 
     ###
     ### Blue Loading Bay
@@ -354,11 +411,17 @@
     training_target_loading_bay_blue.target_radius = target_radius_default
     training_target_list.append(training_target_loading_bay_blue)
 
+    return ideal_target_list, training_target_list
+
+
+def compute_target_definition():
+    ideal_target_list, training_target_list = load_training_data()
+
     # Create feature extractor
     feature_extractor = tam.load_feature_extractor()
 
     # Use webcam parameters for now
-    camera_params = camera_definition.web_cam_params
+    camera_params = camera_definition.load_camera_definitions()[0]
 
     for ideal_target in ideal_target_list:
         glog.info(
@@ -524,20 +587,10 @@
         help="Whether to visualize the results",
         default=False,
         action='store_true')
-    ap.add_argument(
-        "-n",
-        "--no_bazel",
-        help="Don't run using Bazel",
-        default=True,
-        action='store_false')
     args = vars(ap.parse_args())
 
     VISUALIZE_KEYPOINTS = args["visualize"]
     if args["visualize"]:
         glog.info("Visualizing results")
 
-    USE_BAZEL = args["no_bazel"]
-    if args["no_bazel"]:
-        glog.info("Running on command line (no Bazel)")
-
     compute_target_definition()
diff --git a/y2020/vision/tools/python_code/test_images/train_cafeteria-2020-02-13-16-27-25.png b/y2020/vision/tools/python_code/test_images/train_cafeteria-2020-02-13-16-27-25.png
new file mode 100644
index 0000000..be67176
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_cafeteria-2020-02-13-16-27-25.png
Binary files differ