Merge changes I24c90237,I4bf3aba1,If158322f
* changes:
Fixing broken calibrate_intrinsics. Write files to json
Send only the first match (of two choices). Keeps from sending bad matches
Adding calibration files for robot using cafeteria target
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index f38a40e..d0479d2 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -521,14 +521,15 @@
std::vector<std::vector<sift::Match>> per_image_matches(
number_training_images());
for (const std::vector<cv::DMatch> &image_matches : matches) {
- for (const cv::DMatch &image_match : image_matches) {
- CHECK_LT(image_match.imgIdx, number_training_images());
- per_image_matches[image_match.imgIdx].emplace_back();
- sift::Match *const match = &per_image_matches[image_match.imgIdx].back();
- match->mutate_query_feature(image_match.queryIdx);
- match->mutate_train_feature(image_match.trainIdx);
- match->mutate_distance(image_match.distance);
- }
+ CHECK_GT(image_matches.size(), 0u);
+ // We're only using the first of the two matches
+ const cv::DMatch &image_match = image_matches[0];
+ CHECK_LT(image_match.imgIdx, number_training_images());
+ per_image_matches[image_match.imgIdx].emplace_back();
+ sift::Match *const match = &per_image_matches[image_match.imgIdx].back();
+ match->mutate_query_feature(image_match.queryIdx);
+ match->mutate_train_feature(image_match.trainIdx);
+ match->mutate_distance(image_match.distance);
}
// Then, we need to build up each ImageMatch table.
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index 1892a88..a55cb23 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -12,7 +12,7 @@
args = [
"sift_training_data.h",
],
- data = glob(["calib_files/*.txt"]) + glob([
+ data = glob(["calib_files/*.json"]) + glob([
"test_images/*.png",
]),
default_python_version = "PY3",
@@ -60,7 +60,7 @@
"sift_training_data_test.h",
"test",
],
- data = glob(["calib_files/*.txt"]) + glob([
+ data = glob(["calib_files/*.json"]) + glob([
"test_images/*.png",
]),
default_python_version = "PY3",
diff --git a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.json
similarity index 100%
rename from y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.txt
rename to y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.json
diff --git a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-971-1_2020-03-07-15-34-00.json b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-971-1_2020-03-07-15-34-00.json
new file mode 100644
index 0000000..cf45ef0
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-971-1_2020-03-07-15-34-00.json
@@ -0,0 +1 @@
+{"hostname": "pi-971-1", "node_name": "pi1", "team_number": 971, "timestamp": "2020-03-07-15-34-00", "camera_matrix": [[387.95046316, 0.0, 341.13297242], [0.0, 387.85366427, 245.69219733], [0.0, 0.0, 1.0]], "dist_coeffs": [[ 0.13594152, -0.23946991, -0.00088608, 0.00038653, 0.08745377]]}
diff --git a/y2020/vision/tools/python_code/calibrate_intrinsics.py b/y2020/vision/tools/python_code/calibrate_intrinsics.py
index 828a9a9..fb95599 100644
--- a/y2020/vision/tools/python_code/calibrate_intrinsics.py
+++ b/y2020/vision/tools/python_code/calibrate_intrinsics.py
@@ -1,7 +1,6 @@
import cv2
import cv2.aruco
import datetime
-import glog
import json
from json import JSONEncoder
import numpy as np
@@ -23,9 +22,10 @@
def get_robot_info(hostname):
- hostname_split = hostname[1].split("-")
+ hostname_split = hostname.split("-")
if hostname_split[0] != "pi":
- print("ERROR: expected hostname to start with pi!")
+ print(
+ "ERROR: expected hostname to start with pi! Got '%s'" % hostname)
quit()
team_number = int(hostname_split[1])
@@ -33,11 +33,10 @@
return node_name, team_number
-USE_LARGE_BOARD = False
+USE_LARGE_BOARD = True
if USE_LARGE_BOARD:
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_5X5_100)
- # Need to measure what the second size parameter is (markerLength)
board = cv2.aruco.CharucoBoard_create(12, 9, .06, .045, dictionary)
img = board.draw((200 * 12, 200 * 9))
else:
@@ -46,7 +45,7 @@
img = board.draw((200 * 11, 200 * 8))
#Dump the calibration board to a file
-cv2.imwrite('charuco.png', img)
+#cv2.imwrite('charuco.png', img)
#Start capturing images for calibration
CAMERA_INDEX = 0 # Capture from /dev/videoX, where X=CAMERA_INDEX
@@ -75,7 +74,7 @@
if keystroke & 0xFF == ord('q'):
break
elif keystroke & 0xFF == ord('c'):
- glog.info("Asked to capture image")
+ print("Asked to capture image")
if len(res[0]) == 0 or len(res[1]) == 0:
# Can't use this image
continue
@@ -86,7 +85,7 @@
charuco_detect_image = frame.copy()
allCorners.append(res2[1])
allIds.append(res2[2])
- glog.info("Capturing image #%d" % capture_count)
+ print("Capturing image #%d" % capture_count)
cv2.aruco.drawDetectedCornersCharuco(charuco_detect_image, res2[1],
res2[2])
@@ -99,14 +98,14 @@
imsize = gray.shape
cal = cv2.aruco.calibrateCameraCharuco(allCorners, allIds, board, imsize,
None, None)
- glog.info("Calibration is:\n", cal)
- glog.info("Reproduction error:", cal[0])
+ print("Calibration is:\n", cal)
+ print("Reproduction error:", cal[0])
if (cal[0] > 1.0):
- glog.error("REPRODUCTION ERROR NOT GOOD")
- glog.info("Calibration matrix:\n", cal[1])
- glog.info("Distortion Coefficients:\n", cal[2])
+ print("REPRODUCTION ERROR NOT GOOD")
+ print("Calibration matrix:\n", cal[1])
+ print("Distortion Coefficients:\n", cal[2])
except:
- glog.error("Calibration failed")
+ print("Calibration failed")
cap.release()
quit()
@@ -125,7 +124,7 @@
numpyData, cls=NumpyArrayEncoder) # use dump() to write array into file
# Write out the data
-calib_file = open("cam_calib_%s_%s.txt" % (hostname, date_str), "w")
+calib_file = open("cam_calib_%s_%s.json" % (hostname, date_str), "w")
calib_file.write(encodedNumpyData)
calib_file.close()
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index 4d8929a..da20a52 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -5,17 +5,9 @@
import numpy as np
import os
+import define_training_data as dtd
+
glog.setLevel("WARN")
-USE_BAZEL = True
-
-
-def bazel_name_fix(filename):
- ret_name = filename
- if USE_BAZEL:
- ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
-
- return ret_name
-
class CameraIntrinsics:
def __init__(self):
@@ -39,69 +31,78 @@
self.team_number = -1
-### CAMERA DEFINITIONS
+def load_camera_definitions():
+ ### CAMERA DEFINITIONS
-# Robot camera has:
-# FOV_H = 93.*math.pi()/180.
-# FOV_V = 70.*math.pi()/180.
+ # Robot camera has:
+ # FOV_H = 93.*math.pi()/180.
+ # FOV_V = 70.*math.pi()/180.
-# Create fake camera (based on USB webcam params)
-fx = 810.
-fy = 810.
-cx = 320.
-cy = 240.
+ # Create fake camera (based on USB webcam params)
+ fx = 810.
+ fy = 810.
+ cx = 320.
+ cy = 240.
-# Define a web_cam
-web_cam_int = CameraIntrinsics()
-web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
-web_cam_int.dist_coeffs = np.zeros((5, 1))
+ # Define a web_cam
+ web_cam_int = CameraIntrinsics()
+ web_cam_int.camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy],
+ [0, 0, 1]])
+ web_cam_int.dist_coeffs = np.zeros((5, 1))
-web_cam_ext = CameraExtrinsics()
-# Camera rotation from robot x,y,z to opencv (z, -x, -y)
-web_cam_ext.R = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
-web_cam_ext.T = np.array([0., 0., 0.])
+ web_cam_ext = CameraExtrinsics()
+ # Camera rotation from robot x,y,z to opencv (z, -x, -y)
+ # This is extrinsics for the turret camera
+ # camera pose relative to center, base of the turret
+ # TODO<Jim>: Need to implement per-camera calibration, like with intrinsics
+ camera_pitch = 34.0 * np.pi / 180.0
+ camera_pitch_matrix = np.matrix(
+ [[np.cos(camera_pitch), 0.0, -np.sin(camera_pitch)], [0.0, 1.0, 0.0],
+ [np.sin(camera_pitch), 0.0,
+ np.cos(camera_pitch)]])
+ web_cam_ext.R = np.array(camera_pitch_matrix * np.matrix(
+ [[0., 0., 1.], [-1, 0, 0], [0, -1., 0]]))
+ # Tape measure calibration-- need to pull from CAD or automate this
+ web_cam_ext.T = np.array([2.0 * 0.0254, -6.0 * 0.0254, 41.0 * 0.0254])
-web_cam_params = CameraParameters()
-web_cam_params.camera_int = web_cam_int
-web_cam_params.camera_ext = web_cam_ext
+ web_cam_params = CameraParameters()
+ web_cam_params.camera_int = web_cam_int
+ web_cam_params.camera_ext = web_cam_ext
-camera_list = []
+ camera_list = []
-# TODO<Jim>: Should probably make this a dict to make replacing easier
-for team_number in (971, 7971, 8971, 9971):
- for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4", "pi5"):
- camera_base = copy.deepcopy(web_cam_params)
- camera_base.node_name = node_name
- camera_base.team_number = team_number
- camera_list.append(camera_base)
+ # TODO<Jim>: Should probably make this a dict to make replacing easier
+ for team_number in (971, 7971, 8971, 9971):
+ for node_name in ("pi0", "pi1", "pi2", "pi3", "pi4", "pi5"):
+ camera_base = copy.deepcopy(web_cam_params)
+ camera_base.node_name = node_name
+ camera_base.team_number = team_number
+ camera_list.append(camera_base)
-dir_name = ('calib_files')
+ dir_name = dtd.bazel_name_fix('calib_files')
+ for filename in os.listdir(dir_name):
+ if "cam-calib-int" in filename and filename.endswith(".json"):
+ # Extract intrinsics from file
+ fn_split = filename.split("_")
+ hostname_split = fn_split[1].split("-")
+ if hostname_split[0] == "pi":
+ team_number = int(hostname_split[1])
+ node_name = hostname_split[0] + hostname_split[2]
-if USE_BAZEL:
- from bazel_tools.tools.python.runfiles import runfiles
- r = runfiles.Create()
- dir_name = r.Rlocation(bazel_name_fix('calib_files'))
+ calib_file = open(dir_name + "/" + filename, 'r')
+ calib_dict = json.loads(calib_file.read())
+ hostname = np.asarray(calib_dict["hostname"])
+ camera_matrix = np.asarray(calib_dict["camera_matrix"])
+ dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
-for filename in os.listdir(dir_name):
- if "cam-calib-int" in filename and filename.endswith(".txt"):
- # Extract intrinsics from file
- fn_split = filename.split("_")
- hostname_split = fn_split[1].split("-")
- if hostname_split[0] == "pi":
- team_number = int(hostname_split[1])
- node_name = hostname_split[0] + hostname_split[2]
+ # Look for match, and replace camera_intrinsics
+ for camera_calib in camera_list:
+ if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
+ glog.info("Found calib for %s, team #%d" % (node_name,
+ team_number))
+ camera_calib.camera_int.camera_matrix = copy.copy(
+ camera_matrix)
+ camera_calib.camera_int.dist_coeffs = copy.copy(
+ dist_coeffs)
- calib_file = open(dir_name + "/" + filename, 'r')
- calib_dict = json.loads(calib_file.read())
- hostname = np.asarray(calib_dict["hostname"])
- camera_matrix = np.asarray(calib_dict["camera_matrix"])
- dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
-
- # Look for match, and replace camera_intrinsics
- for camera_calib in camera_list:
- if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
- glog.info("Found calib for %s, team #%d" % (node_name,
- team_number))
- camera_calib.camera_int.camera_matrix = copy.copy(
- camera_matrix)
- camera_calib.camera_int.dist_coeffs = copy.copy(dist_coeffs)
+ return camera_list
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 8116fb7..76e73da 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -244,6 +244,18 @@
return img
+def bazel_name_fix(filename):
+ ret_name = filename
+ try:
+ from bazel_tools.tools.python.runfiles import runfiles
+ r = runfiles.Create()
+ ret_name = r.Rlocation('org_frc971/y2020/vision/tools/python_code/' + filename)
+ except:
+ pass
+
+ return ret_name
+
+
def sample_define_polygon_usage():
image = cv2.imread("test_images/train_power_port_red.png")
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index 455bc30..8d16104 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -10,8 +10,9 @@
### DEFINITIONS
target_definition.USE_BAZEL = False
+camera_definition.USE_BAZEL = False
target_list = target_definition.compute_target_definition()
-camera_list = camera_definition.camera_list
+camera_list = camera_definition.load_camera_definitions()
# For now, just use the first one
camera_params = camera_list[0]
@@ -29,12 +30,14 @@
'test_images/test_raspi3_sample.jpg', #7
'test_images/test_VR_sample1.png', #8
'test_images/train_loading_bay_blue.png', #9
- 'test_images/train_loading_bay_red.png' #10
+ 'test_images/train_loading_bay_red.png', #10
+ 'test_images/pi-7971-3_test_image.png', #11
+ 'sample_images/capture-2020-02-13-16-40-07.png',
]
training_image_index = 0
# TODO: Should add argParser here to select this
-query_image_index = 0 # Use -1 to use camera capture; otherwise index above list
+query_image_index = 12 # Use -1 to use camera capture; otherwise index above list
##### Let's get to work!
@@ -162,8 +165,6 @@
for m in good_matches:
src_pts_3d.append(target_list[i].keypoint_list_3d[m.trainIdx])
pt = query_keypoint_lists[0][m.queryIdx].pt
- print("Color at ", pt, " is ", query_images[0][int(pt[1])][int(
- pt[0])])
query_images[0] = cv2.circle(
query_images[0], (int(pt[0]), int(pt[1])), 5, (0, 255, 0), 3)
diff --git a/y2020/vision/tools/python_code/image_stream.py b/y2020/vision/tools/python_code/image_stream.py
new file mode 100644
index 0000000..2cd5ac7
--- /dev/null
+++ b/y2020/vision/tools/python_code/image_stream.py
@@ -0,0 +1,34 @@
+import cv2
+import datetime
+# Open the device at the ID X for /dev/videoX
+CAMERA_INDEX = 0
+cap = cv2.VideoCapture(CAMERA_INDEX)
+
+#Check whether user selected camera is opened successfully.
+if not (cap.isOpened()):
+ print("Could not open video device /dev/video%d" % CAMERA_INDEX)
+ quit()
+
+while True:
+ # Capture frame-by-frame
+ ret, frame = cap.read()
+
+ exp = cap.get(cv2.CAP_PROP_EXPOSURE)
+ #print("Exposure:", exp)
+ # Display the resulting frame
+ cv2.imshow('preview', frame)
+
+ #Waits for a user input to capture image or quit the application
+ keystroke = cv2.waitKey(1)
+
+ if keystroke & 0xFF == ord('q'):
+ break
+ elif keystroke & 0xFF == ord('c'):
+ image_name = datetime.datetime.today().strftime(
+ "capture-%b-%d-%Y-%H-%M-%S.png")
+ print("Capturing image as %s" % image_name)
+ cv2.imwrite(image_name, frame)
+
+# When everything's done, release the capture
+cap.release()
+cv2.destroyAllWindows()
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index 0ce70b7..5ca574d 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -50,7 +50,7 @@
import camera_definition
import target_definition
target_data_list = target_definition.compute_target_definition()
- camera_calib_list = camera_definition.camera_list
+ camera_calib_list = camera_definition.load_camera_definitions()
glog.info("Writing file to ", output_path)
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index 430e83f..6080ac2 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -12,31 +12,16 @@
# TODO<Jim>: Allow command-line setting of logging level
glog.setLevel("WARN")
global VISUALIZE_KEYPOINTS
-global USE_BAZEL
-USE_BAZEL = True
VISUALIZE_KEYPOINTS = False
# For now, just have a 32 pixel radius, based on original training image
target_radius_default = 32.
-def bazel_name_fix(filename):
- ret_name = filename
- if USE_BAZEL:
- ret_name = 'org_frc971/y2020/vision/tools/python_code/' + filename
-
- return ret_name
-
-
class TargetData:
def __init__(self, filename):
- self.image_filename = filename
+ self.image_filename = dtd.bazel_name_fix(filename)
# Load an image (will come in as a 1-element list)
- if USE_BAZEL:
- from bazel_tools.tools.python.runfiles import runfiles
- r = runfiles.Create()
- self.image_filename = r.Rlocation(
- bazel_name_fix(self.image_filename))
self.image = tam.load_images([self.image_filename])[0]
self.polygon_list = []
self.polygon_list_3d = []
@@ -90,7 +75,7 @@
return point_list_3d
-def compute_target_definition():
+def load_training_data():
############################################################
# TARGET DEFINITIONS
############################################################
@@ -121,6 +106,76 @@
power_port_target_height = (
power_port_total_height + power_port_bottom_wing_height) / 2.
+ ### Cafeteria target definition
+ inch_to_meter = 0.0254
+ c_power_port_total_height = (79.5 + 39.5) * inch_to_meter
+ c_power_port_edge_y = 1.089
+ c_power_port_width = 4.0 * 12 * inch_to_meter
+ c_power_port_bottom_wing_height = 79.5 * inch_to_meter
+ c_power_port_wing_width = 47.5 * inch_to_meter
+ c_power_port_white_marker_z = (79.5 - 19.5) * inch_to_meter
+
+ # Pick the target center location at halfway between top and bottom of the top panel
+ c_power_port_target_height = (
+ power_port_total_height + power_port_bottom_wing_height) / 2.
+
+ ###
+ ### Cafe power port
+ ###
+
+ # Create the reference "ideal" image
+ ideal_power_port_cafe = TargetData(
+ 'test_images/train_cafeteria-2020-02-13-16-27-25.png')
+
+ # Start at lower left corner, and work around clockwise
+ # These are taken by manually finding the points in gimp for this image
+ power_port_cafe_main_panel_polygon_points_2d = [(271, 456), (278, 394),
+ (135, 382), (286, 294),
+ (389, 311), (397,
+ 403), (401,
+ 458)]
+
+ # These are "virtual" 3D points based on the expected geometry
+ power_port_cafe_main_panel_polygon_points_3d = [
+ (field_length / 2., -c_power_port_edge_y,
+ c_power_port_white_marker_z), (field_length / 2.,
+ -c_power_port_edge_y,
+ c_power_port_bottom_wing_height),
+ (field_length / 2., -c_power_port_edge_y + c_power_port_wing_width,
+ c_power_port_bottom_wing_height), (field_length / 2.,
+ -c_power_port_edge_y,
+ c_power_port_total_height),
+ (field_length / 2., -c_power_port_edge_y - c_power_port_width,
+ c_power_port_total_height),
+ (field_length / 2., -c_power_port_edge_y - c_power_port_width,
+ c_power_port_bottom_wing_height),
+ (field_length / 2., -c_power_port_edge_y - c_power_port_width,
+ c_power_port_white_marker_z)
+ ]
+
+ # Populate the cafe power port
+ ideal_power_port_cafe.polygon_list.append(
+ power_port_cafe_main_panel_polygon_points_2d)
+ ideal_power_port_cafe.polygon_list_3d.append(
+ power_port_cafe_main_panel_polygon_points_3d)
+
+ # Location of target. Rotation is pointing in -x direction
+ ideal_power_port_cafe.target_rotation = np.identity(3, np.double)
+ ideal_power_port_cafe.target_position = np.array([
+ field_length / 2., -c_power_port_edge_y - c_power_port_width / 2.,
+ c_power_port_target_height
+ ])
+ ideal_power_port_cafe.target_point_2d = np.float32([[340, 350]]).reshape(
+ -1, 1, 2) # train_cafeteria-2020-02-13-16-27-25.png
+
+ ideal_target_list.append(ideal_power_port_cafe)
+ training_target_power_port_cafe = TargetData(
+ 'test_images/train_cafeteria-2020-02-13-16-27-25.png')
+ training_target_power_port_cafe.target_rotation = ideal_power_port_cafe.target_rotation
+ training_target_power_port_cafe.target_position = ideal_power_port_cafe.target_position
+ training_target_power_port_cafe.target_radius = target_radius_default
+ training_target_list.append(training_target_power_port_cafe)
+
###
### Red Power Port
###
@@ -302,13 +357,15 @@
ideal_power_port_blue.target_point_2d = np.float32([[567, 180]]).reshape(
-1, 1, 2) # ideal_power_port_blue.png
- ideal_target_list.append(ideal_power_port_blue)
+ #### TEMPORARILY DISABLING the BLUE POWER PORT target
+ #ideal_target_list.append(ideal_power_port_blue)
training_target_power_port_blue = TargetData(
'test_images/train_power_port_blue.png')
training_target_power_port_blue.target_rotation = ideal_power_port_blue.target_rotation
training_target_power_port_blue.target_position = ideal_power_port_blue.target_position
training_target_power_port_blue.target_radius = target_radius_default
- training_target_list.append(training_target_power_port_blue)
+ #### TEMPORARILY DISABLING the BLUE POWER PORT target
+ #training_target_list.append(training_target_power_port_blue)
###
### Blue Loading Bay
@@ -354,11 +411,17 @@
training_target_loading_bay_blue.target_radius = target_radius_default
training_target_list.append(training_target_loading_bay_blue)
+ return ideal_target_list, training_target_list
+
+
+def compute_target_definition():
+ ideal_target_list, training_target_list = load_training_data()
+
# Create feature extractor
feature_extractor = tam.load_feature_extractor()
# Use webcam parameters for now
- camera_params = camera_definition.web_cam_params
+ camera_params = camera_definition.load_camera_definitions()[0]
for ideal_target in ideal_target_list:
glog.info(
@@ -524,20 +587,10 @@
help="Whether to visualize the results",
default=False,
action='store_true')
- ap.add_argument(
- "-n",
- "--no_bazel",
- help="Don't run using Bazel",
- default=True,
- action='store_false')
args = vars(ap.parse_args())
VISUALIZE_KEYPOINTS = args["visualize"]
if args["visualize"]:
glog.info("Visualizing results")
- USE_BAZEL = args["no_bazel"]
- if args["no_bazel"]:
- glog.info("Running on command line (no Bazel)")
-
compute_target_definition()
diff --git a/y2020/vision/tools/python_code/test_images/train_cafeteria-2020-02-13-16-27-25.png b/y2020/vision/tools/python_code/test_images/train_cafeteria-2020-02-13-16-27-25.png
new file mode 100644
index 0000000..be67176
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_cafeteria-2020-02-13-16-27-25.png
Binary files differ