Changing print statements to glog
Change-Id: Ic2a2e7e0bb63bf080f521a1e54c5413b70b2abdf
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index 3d7bedb..a130ed6 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -25,6 +25,7 @@
"//y2020/vision/sift:sift_fbs_python",
"@bazel_tools//tools/python/runfiles",
"@opencv_contrib_nonfree_amd64//:python_opencv",
+ "//external:python-glog",
],
)
@@ -73,6 +74,7 @@
"//y2020/vision/sift:sift_fbs_python",
"@bazel_tools//tools/python/runfiles",
"@opencv_contrib_nonfree_amd64//:python_opencv",
+ "//external:python-glog",
],
)
diff --git a/y2020/vision/tools/python_code/define_training_data.py b/y2020/vision/tools/python_code/define_training_data.py
index 22eb6ce..157f9e6 100644
--- a/y2020/vision/tools/python_code/define_training_data.py
+++ b/y2020/vision/tools/python_code/define_training_data.py
@@ -1,5 +1,6 @@
import argparse
import cv2
+import glog
import json
import math
import numpy as np
@@ -17,7 +18,7 @@
global current_mouse
current_mouse = (x, y)
if event == cv2.EVENT_LBUTTONUP:
- #print("Adding point at %d, %d" % (x,y))
+ glog.debug("Adding point at %d, %d" % (x,y))
point_list.append([x, y])
pass
@@ -218,7 +219,7 @@
pts_3d_proj_2d, jac_2d = cv2.projectPoints(pts_3d_np, R, T, cam_mat,
distortion_coeffs)
if inliers is None:
- print("WARNING: Didn't get any inliers when reprojecting polygons")
+ glog.warn("WARNING: Didn't get any inliers when reprojecting polygons")
return img
for i in range(len(pts_2d)):
pt_2d = pts_2d_np[i][0]
@@ -239,7 +240,7 @@
image = cv2.imread("test_images/train_power_port_red.png")
polygon_list = define_polygon(image)
- print(polygon_list)
+ glog.debug(polygon_list)
def sample_define_points_by_list_usage():
@@ -251,5 +252,5 @@
(689, 679)]
polygon_list = define_points_by_list(image, test_points)
- print(polygon_list)
+ glog.debug(polygon_list)
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index fba847d..65f3342 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -1,6 +1,7 @@
#!/usr/bin/python3
import cv2
+import glog
import numpy as np
import sys
import flatbuffers
@@ -35,23 +36,23 @@
if (len(sys.argv) > 2):
if sys.argv[2] == "test":
- print("Loading test data")
+ glog.info("Loading test data")
import camera_definition_test
import target_definition_test
target_data_list = target_definition_test.compute_target_definition(
)
camera_calib_list = camera_definition_test.camera_list
else:
- print("Unhandled arguments: '%s'" % sys.argv[2])
+ glog.error("Unhandled arguments: '%s'" % sys.argv[2])
quit()
else:
- print("Loading target configuration data")
+ glog.info("Loading target configuration data")
import camera_definition
import target_definition
target_data_list = target_definition.compute_target_definition()
camera_calib_list = camera_definition.camera_list
- print("Writing file to ", output_path)
+ glog.info("Writing file to ", output_path)
fbb = flatbuffers.Builder(0)
diff --git a/y2020/vision/tools/python_code/target_definition.py b/y2020/vision/tools/python_code/target_definition.py
index cbbb785..02fd089 100644
--- a/y2020/vision/tools/python_code/target_definition.py
+++ b/y2020/vision/tools/python_code/target_definition.py
@@ -1,5 +1,7 @@
import argparse
import cv2
+# TODO<Jim>: Add gflags for handling command-line flags
+import glog
import math
import numpy as np
@@ -7,6 +9,8 @@
import define_training_data as dtd
import train_and_match as tam
+# TODO<Jim>: Allow command-line setting of logging level
+glog.setLevel("WARN")
global VISUALIZE_KEYPOINTS
global USE_BAZEL
USE_BAZEL = True
@@ -68,8 +72,8 @@
# Filter and project points for each polygon in the list
filtered_keypoints, _, _, _, keep_list = dtd.filter_keypoints_by_polygons(
keypoint_list, None, [self.polygon_list[poly_ind]])
- print("Filtering kept %d of %d features" % (len(keep_list),
- len(keypoint_list)))
+ glog.info("Filtering kept %d of %d features" % (len(keep_list),
+ len(keypoint_list)))
filtered_point_array = np.asarray(
[(keypoint.pt[0], keypoint.pt[1])
for keypoint in filtered_keypoints]).reshape(-1, 2)
@@ -325,9 +329,7 @@
camera_params = camera_definition.web_cam_params
for ideal_target in ideal_target_list:
- if not USE_BAZEL:
- print("\nPreparing target for image %s" %
- ideal_target.image_filename)
+ glog.info("\nPreparing target for image %s" % ideal_target.image_filename)
ideal_target.extract_features(feature_extractor)
ideal_target.filter_keypoints_by_polygons()
ideal_target.compute_reprojection_maps()
@@ -376,9 +378,7 @@
AUTO_PROJECTION = True
if AUTO_PROJECTION:
- print(
- "\n\nAuto projection of training keypoints to 3D using ideal images"
- )
+ glog.info("\n\nAuto projection of training keypoints to 3D using ideal images")
# Match the captured training image against the "ideal" training image
# and use those matches to pin down the 3D locations of the keypoints
@@ -387,9 +387,7 @@
training_target = training_target_list[target_ind]
ideal_target = ideal_target_list[target_ind]
- if not USE_BAZEL:
- print("\nPreparing target for image %s" %
- training_target.image_filename)
+ glog.info("\nPreparing target for image %s" % training_target.image_filename)
# Extract keypoints and descriptors for model
training_target.extract_features(feature_extractor)
@@ -414,13 +412,12 @@
ideal_pts_2d, H_inv)
training_target.polygon_list.append(transformed_polygon)
- print("Started with %d keypoints" % len(
- training_target.keypoint_list))
+ glog.info("Started with %d keypoints" % len(training_target.keypoint_list))
training_target.keypoint_list, training_target.descriptor_list, rejected_keypoint_list, rejected_descriptor_list, _ = dtd.filter_keypoints_by_polygons(
training_target.keypoint_list, training_target.descriptor_list,
training_target.polygon_list)
- print("After filtering by polygons, had %d keypoints" % len(
+ glog.info("After filtering by polygons, had %d keypoints" % len(
training_target.keypoint_list))
if VISUALIZE_KEYPOINTS:
tam.show_keypoints(training_target.image,
@@ -475,27 +472,16 @@
if __name__ == '__main__':
ap = argparse.ArgumentParser()
- ap.add_argument(
- "-v",
- "--visualize",
- help="Whether to visualize the results",
- default=False,
- action='store_true')
- ap.add_argument(
- "-n",
- "--no_bazel",
- help="Don't run using Bazel",
- default=True,
- action='store_false')
+ ap.add_argument("--visualize", help="Whether to visualize the results", default=False, action='store_true')
+ ap.add_argument("-n", "--no_bazel", help="Don't run using Bazel", default=True, action='store_false')
+ args = vars(ap.parse_args())
- args = ap.parse_args()
+ VISUALIZE_KEYPOINTS = args["visualize"]
+ if args["visualize"]:
+ glog.info("Visualizing results")
- if args.visualize:
- print("Visualizing results")
- VISUALIZE_KEYPOINTS = True
-
- if not args.no_bazel:
- print("Running on command line (no Bazel)")
- USE_BAZEL = False
+ USE_BAZEL = args["no_bazel"]
+ if args["no_bazel"]:
+ glog.info("Running on command line (no Bazel)")
compute_target_definition()
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
index e5a7f5b..6902d10 100644
--- a/y2020/vision/tools/python_code/train_and_match.py
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -1,4 +1,5 @@
import cv2
+import glog
import math
import numpy as np
import time
@@ -8,6 +9,7 @@
FEATURE_EXTRACTOR_NAME = 'SIFT'
QUERY_INDEX = 0 # We use a list for both training and query info, but only ever have one query item
+glog.setLevel("WARN")
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
@@ -38,7 +40,7 @@
# Load image (in color; let opencv convert to B&W for features)
img_data = cv2.imread(im)
if img_data is None:
- print("Failed to load image: ", im)
+ glog.error("Failed to load image: ", im)
exit()
else:
image_list.append(img_data)
@@ -143,15 +145,12 @@
elif FEATURE_EXTRACTOR_NAME is 'ORB':
matches = matcher.knnMatch(train_keypoint_lists[0], desc_query, k=2)
- print(matches)
good_matches = []
for m in matches:
if m:
if len(m) == 2:
- print(m[0].distance, m[1].distance)
if m[0].distance < 0.7 * m[1].distance:
good_matches.append(m[0])
- print(m[0].distance)
good_matches_list.append(good_matches)
@@ -176,14 +175,14 @@
for i in range(len(train_keypoint_lists)):
good_matches = good_matches_list[i]
if len(good_matches) < MIN_MATCH_COUNT:
- print("Not enough matches are for model ", i, ": ",
- len(good_matches), " out of needed #: ", MIN_MATCH_COUNT)
+ glog.warn("Not enough matches are for model ", i, ": ",
+ len(good_matches), " out of needed #: ", MIN_MATCH_COUNT)
homography_list.append([])
matches_mask_list.append([])
continue
- print("Got good number of matches for model %d: %d (needed only %d)" %
- (i, len(good_matches), MIN_MATCH_COUNT))
+ glog.info("Got good number of matches for model %d: %d (needed only %d)" %
+ (i, len(good_matches), MIN_MATCH_COUNT))
# Extract and bundle keypoint locations for computations
src_pts = np.float32([
train_keypoint_lists[i][m.trainIdx].pt for m in good_matches
@@ -206,7 +205,7 @@
# Also shows image with query unwarped (to match training image) and target pt
def show_results(training_images, train_keypoint_lists, query_images,
query_keypoint_lists, target_point_list, good_matches_list):
- print("Showing results for ", len(training_images), " training images")
+ glog.info("Showing results for ", len(training_images), " training images")
homography_list, matches_mask_list = compute_homographies(
train_keypoint_lists, query_keypoint_lists, good_matches_list)
@@ -214,15 +213,15 @@
good_matches = good_matches_list[i]
if len(good_matches) < MIN_MATCH_COUNT:
continue
- print("Showing results for model ", i)
+ glog.debug("Showing results for model ", i)
matches_mask_count = matches_mask_list[i].count(1)
if matches_mask_count != len(good_matches):
- print("Homography rejected some matches! From ",
+ glog.info("Homography rejected some matches! From ",
len(good_matches), ", only ", matches_mask_count,
" were used")
if matches_mask_count < MIN_MATCH_COUNT:
- print(
+ glog.info(
"Skipping match because homography rejected matches down to below ",
MIN_MATCH_COUNT)
continue
@@ -260,14 +259,13 @@
img3 = cv2.drawMatches(query_image, query_keypoint_lists[QUERY_INDEX],
training_images[i], train_keypoint_lists[i],
good_matches_list[i], None, **draw_params)
- print("Drawing matches for model ", i,
- ". Query on left, Training image on right")
+ glog.debug("Drawing matches for model ", i,
+ ". Query on left, Training image on right")
cv2.imshow('Matches', img3), cv2.waitKey()
# Next, unwarp the query image so it looks like the training view
H_inv = np.linalg.inv(H)
query_image_warp = cv2.warpPerspective(query_image, H_inv, (w, h))
- print("Showing unwarped query image for model ", i)
cv2.imshow('Unwarped Image', query_image_warp), cv2.waitKey()
# Go ahead and return these, for use elsewhere