Add C++ charuco camera calibration code
It subscribes to the image message and generates the intrinsics
calibration when enough images have been captured.
To deal with calibration from the python calibration code, the
flatbuffer generator also supports loading both the new and old json
files.
Change-Id: I83190e5c17f970a94ca0eb9310f5787fe714c518
diff --git a/debian/opencv.BUILD b/debian/opencv.BUILD
index 7fdbaaa..a8b01fb 100644
--- a/debian/opencv.BUILD
+++ b/debian/opencv.BUILD
@@ -10,6 +10,7 @@
"usr/lib/x86_64-linux-gnu/libopencv_flann.so.3.2",
"usr/lib/x86_64-linux-gnu/libopencv_highgui.so.3.2",
"usr/lib/x86_64-linux-gnu/libopencv_videoio.so.3.2",
+ "usr/lib/x86_64-linux-gnu/libopencv_aruco.so.3.2",
"usr/lib/x86_64-linux-gnu/libopencv_imgcodecs.so.3.2",
"usr/lib/x86_64-linux-gnu/libopencv_ml.so.3.2",
"usr/lib/x86_64-linux-gnu/libopencv_calib3d.so.3.2",
@@ -239,6 +240,7 @@
"usr/lib/arm-linux-gnueabihf/libopencv_flann.so.3.2",
"usr/lib/arm-linux-gnueabihf/libopencv_highgui.so.3.2",
"usr/lib/arm-linux-gnueabihf/libopencv_videoio.so.3.2",
+ "usr/lib/arm-linux-gnueabihf/libopencv_aruco.so.3.2",
"usr/lib/arm-linux-gnueabihf/libopencv_imgcodecs.so.3.2",
"usr/lib/arm-linux-gnueabihf/libopencv_ml.so.3.2",
"usr/lib/arm-linux-gnueabihf/libopencv_calib3d.so.3.2",
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 86088e6..8b73691 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -83,6 +83,33 @@
)
cc_binary(
+ name = "calibration",
+ srcs = [
+ "calibration.cc",
+ ],
+ data = [
+ "//y2020:config.json",
+ ],
+ restricted_to = [
+ "//tools:k8",
+ "//tools:armhf-debian",
+ ],
+ visibility = ["//y2020:__subpackages__"],
+ deps = [
+ ":vision_fbs",
+ "//aos:init",
+ "//aos/events:shm_event_loop",
+ "//frc971/control_loops/drivetrain:improved_down_estimator",
+ "//third_party:opencv",
+ "//y2020/vision/sift:sift_fbs",
+ "//y2020/vision/sift:sift_training_fbs",
+ "//y2020/vision/tools/python_code:sift_training_data",
+ "@com_google_absl//absl/strings:str_format",
+ "@org_tuxfamily_eigen//:eigen",
+ ],
+)
+
+cc_binary(
name = "viewer_replay",
srcs = [
"viewer_replay.cc",
diff --git a/y2020/vision/calibration.cc b/y2020/vision/calibration.cc
new file mode 100644
index 0000000..15f1f4c
--- /dev/null
+++ b/y2020/vision/calibration.cc
@@ -0,0 +1,322 @@
+#include "Eigen/Dense"
+#include "Eigen/Geometry"
+#include <opencv2/aruco/charuco.hpp>
+#include <opencv2/calib3d.hpp>
+#include <opencv2/core/eigen.hpp>
+#include <opencv2/features2d.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include "absl/strings/str_format.h"
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "aos/network/team_number.h"
+#include "aos/time/time.h"
+#include "aos/util/file.h"
+#include "frc971/control_loops/drivetrain/improved_down_estimator.h"
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
+#include "y2020/vision/tools/python_code/sift_training_data.h"
+#include "y2020/vision/vision_generated.h"
+
+DEFINE_string(config, "config.json", "Path to the config file to use.");
+DEFINE_string(pi, "pi-7971-1", "Pi name to calibrate.");
+DEFINE_string(calibration_folder, "y2020/vision/tools/python_code/calib_files",
+ "Folder to place calibration files.");
+DEFINE_bool(large_board, true, "If true, use the large calibration board.");
+DEFINE_bool(display_undistorted, false,
+ "If true, display the undistorted image.");
+DEFINE_string(board_template_path, "",
+ "If specified, write an image to the specified path for the "
+ "charuco board pattern.");
+DEFINE_uint32(min_targets, 10,
+ "The mininum number of targets required to match.");
+
+namespace frc971 {
+namespace vision {
+
+class CameraCalibration {
+ public:
+ CameraCalibration(const std::string_view training_data_bfbs) {
+ const sift::TrainingData *const training_data =
+ flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
+ {
+ flatbuffers::Verifier verifier(
+ reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
+ training_data_bfbs.size());
+ CHECK(training_data->Verify(verifier));
+ }
+
+ camera_calibration_ = FindCameraCalibration(training_data);
+ }
+
+ cv::Mat CameraIntrinsics() const {
+ const cv::Mat result(3, 3, CV_32F,
+ const_cast<void *>(static_cast<const void *>(
+ camera_calibration_->intrinsics()->data())));
+ CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
+ return result;
+ }
+
+ cv::Mat CameraDistCoeffs() const {
+ const cv::Mat result(5, 1, CV_32F,
+ const_cast<void *>(static_cast<const void *>(
+ camera_calibration_->dist_coeffs()->data())));
+ CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
+ return result;
+ }
+
+ private:
+ const sift::CameraCalibration *FindCameraCalibration(
+ const sift::TrainingData *const training_data) const {
+ std::optional<uint16_t> pi_number = aos::network::ParsePiNumber(FLAGS_pi);
+ std::optional<uint16_t> team_number =
+ aos::network::team_number_internal::ParsePiTeamNumber(FLAGS_pi);
+ CHECK(pi_number);
+ CHECK(team_number);
+ const std::string node_name = absl::StrFormat("pi%d", pi_number.value());
+ LOG(INFO) << "Looking for node name " << node_name << " team number "
+ << team_number.value();
+ for (const sift::CameraCalibration *candidate :
+ *training_data->camera_calibrations()) {
+ if (candidate->node_name()->string_view() != node_name) {
+ continue;
+ }
+ if (candidate->team_number() != team_number.value()) {
+ continue;
+ }
+ return candidate;
+ }
+ LOG(FATAL) << ": Failed to find camera calibration for " << node_name
+ << " on " << team_number.value();
+ }
+
+ const sift::CameraCalibration *camera_calibration_;
+};
+
+namespace {
+
+
+void ViewerMain() {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(FLAGS_config);
+
+ aos::ShmEventLoop event_loop(&config.message());
+
+ const CameraCalibration calibration(SiftTrainingData());
+
+ cv::Ptr<cv::aruco::Dictionary> dictionary =
+ cv::aruco::getPredefinedDictionary(FLAGS_large_board
+ ? cv::aruco::DICT_5X5_100
+ : cv::aruco::DICT_6X6_250);
+ cv::Ptr<cv::aruco::CharucoBoard> board =
+ FLAGS_large_board
+ ? cv::aruco::CharucoBoard::create(12, 9, 0.06, 0.045, dictionary)
+ : cv::aruco::CharucoBoard::create(7, 5, 0.04, 0.025, dictionary);
+
+ if (!FLAGS_board_template_path.empty()) {
+ cv::Mat board_image;
+ board->draw(cv::Size(600, 500), board_image, 10, 1);
+ cv::imwrite(FLAGS_board_template_path, board_image);
+ }
+
+ std::vector<std::vector<int>> all_charuco_ids;
+ std::vector<std::vector<cv::Point2f>> all_charuco_corners;
+
+ const cv::Mat camera_matrix = calibration.CameraIntrinsics();
+ Eigen::Matrix3d eigen_camera_matrix;
+ cv::cv2eigen(camera_matrix, eigen_camera_matrix);
+
+ const cv::Mat dist_coeffs = calibration.CameraDistCoeffs();
+ LOG(INFO) << "Camera matrix " << camera_matrix;
+ LOG(INFO) << "Distortion Coefficients " << dist_coeffs;
+
+ std::optional<uint16_t> pi_number = aos::network::ParsePiNumber(FLAGS_pi);
+ CHECK(pi_number) << ": Invalid pi number " << FLAGS_pi
+ << ", failed to parse pi number";
+
+ const std::string channel =
+ absl::StrCat("/pi", std::to_string(pi_number.value()), "/camera");
+ LOG(INFO) << "Connecting to channel " << channel;
+
+ event_loop.MakeWatcher(
+ channel, [&event_loop, &board, &all_charuco_ids, &all_charuco_corners,
+ camera_matrix, dist_coeffs,
+ eigen_camera_matrix](const CameraImage &image) {
+ const aos::monotonic_clock::duration age =
+ event_loop.monotonic_now() -
+ event_loop.context().monotonic_event_time;
+ const double age_double =
+ std::chrono::duration_cast<std::chrono::duration<double>>(age)
+ .count();
+ if (age > std::chrono::milliseconds(100)) {
+ LOG(INFO) << "Age: " << age_double << ", getting behind, skipping";
+ return;
+ }
+ // Create color image:
+ cv::Mat image_color_mat(cv::Size(image.cols(), image.rows()), CV_8UC2,
+ (void *)image.data()->data());
+ const cv::Size image_size(image.cols(), image.rows());
+ cv::Mat rgb_image(image_size, CV_8UC3);
+ cv::cvtColor(image_color_mat, rgb_image, CV_YUV2BGR_YUYV);
+
+ std::vector<int> marker_ids;
+ std::vector<std::vector<cv::Point2f>> marker_corners;
+
+ cv::aruco::detectMarkers(rgb_image, board->dictionary, marker_corners,
+ marker_ids);
+
+ std::vector<cv::Point2f> charuco_corners;
+ std::vector<int> charuco_ids;
+ // If at least one marker detected
+ if (marker_ids.size() >= FLAGS_min_targets) {
+ // Run everything twice, once with the calibration, and once without.
+ // This lets us both calibrate, and also print out the pose real time
+ // with the previous calibration.
+ cv::aruco::interpolateCornersCharuco(marker_corners, marker_ids,
+ rgb_image, board,
+ charuco_corners, charuco_ids);
+
+ std::vector<cv::Point2f> charuco_corners_with_calibration;
+ std::vector<int> charuco_ids_with_calibration;
+
+ cv::aruco::interpolateCornersCharuco(
+ marker_corners, marker_ids, rgb_image, board,
+ charuco_corners_with_calibration, charuco_ids_with_calibration,
+ camera_matrix, dist_coeffs);
+
+ cv::aruco::drawDetectedMarkers(rgb_image, marker_corners, marker_ids);
+
+ if (charuco_ids.size() >= FLAGS_min_targets) {
+ cv::aruco::drawDetectedCornersCharuco(
+ rgb_image, charuco_corners, charuco_ids, cv::Scalar(255, 0, 0));
+
+ cv::Vec3d rvec, tvec;
+ bool valid = cv::aruco::estimatePoseCharucoBoard(
+ charuco_corners_with_calibration, charuco_ids_with_calibration,
+ board, camera_matrix, dist_coeffs, rvec, tvec);
+
+
+ // if charuco pose is valid
+ if (valid) {
+ LOG(INFO) << std::fixed << std::setprecision(6)
+ << "Age: " << age_double << ", Pose is R:" << rvec
+ << " T:" << tvec;
+ cv::aruco::drawAxis(rgb_image, camera_matrix,
+ dist_coeffs, rvec, tvec, 0.1);
+ } else {
+ LOG(INFO) << "Age: " << age_double << ", invalid pose";
+ }
+ } else {
+ LOG(INFO) << "Age: " << age_double << ", no charuco IDs.";
+ }
+ } else {
+ LOG(INFO) << "Age: " << age_double << ", no marker IDs";
+ }
+
+ cv::imshow("Display", rgb_image);
+
+ if (FLAGS_display_undistorted) {
+ cv::Mat undistorted_rgb_image(image_size, CV_8UC3);
+ cv::undistort(rgb_image, undistorted_rgb_image, camera_matrix,
+ dist_coeffs);
+
+ cv::imshow("Display undist", undistorted_rgb_image);
+ }
+
+ int keystroke = cv::waitKey(1);
+ if ((keystroke & 0xFF) == static_cast<int>('c')) {
+ if (charuco_ids.size() >= FLAGS_min_targets) {
+ all_charuco_ids.emplace_back(std::move(charuco_ids));
+ all_charuco_corners.emplace_back(std::move(charuco_corners));
+ LOG(INFO) << "Image " << all_charuco_corners.size();
+ }
+
+ if (all_charuco_ids.size() >= 50) {
+ LOG(INFO) << "Got enough images to calibrate";
+ event_loop.Exit();
+ }
+ } else if ((keystroke & 0xFF) == static_cast<int>('q')) {
+ event_loop.Exit();
+ }
+ });
+
+ event_loop.Run();
+
+ if (all_charuco_ids.size() >= 50) {
+ cv::Mat cameraMatrix, distCoeffs;
+ std::vector<cv::Mat> rvecs, tvecs;
+ cv::Mat stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors;
+
+ // Set calibration flags (same than in calibrateCamera() function)
+ int calibration_flags = 0;
+ cv::Size img_size(640, 480);
+ const double reprojection_error = cv::aruco::calibrateCameraCharuco(
+ all_charuco_corners, all_charuco_ids, board, img_size, cameraMatrix,
+ distCoeffs, rvecs, tvecs, stdDeviationsIntrinsics,
+ stdDeviationsExtrinsics, perViewErrors, calibration_flags);
+ CHECK_LE(reprojection_error, 1.0) << ": Reproduction error is bad.";
+ LOG(INFO) << "Reprojection Error is " << reprojection_error;
+
+ flatbuffers::FlatBufferBuilder fbb;
+ flatbuffers::Offset<flatbuffers::String> name_offset =
+ fbb.CreateString(absl::StrFormat("pi%d", pi_number.value()));
+ flatbuffers::Offset<flatbuffers::Vector<float>> intrinsics_offset =
+ fbb.CreateVector<float>(9u, [&cameraMatrix](size_t i) {
+ return static_cast<float>(
+ reinterpret_cast<double *>(cameraMatrix.data)[i]);
+ });
+
+ flatbuffers::Offset<flatbuffers::Vector<float>>
+ distortion_coefficients_offset =
+ fbb.CreateVector<float>(5u, [&distCoeffs](size_t i) {
+ return static_cast<float>(
+ reinterpret_cast<double *>(distCoeffs.data)[i]);
+ });
+
+ sift::CameraCalibration::Builder camera_calibration_builder(fbb);
+ std::optional<uint16_t> team_number =
+ aos::network::team_number_internal::ParsePiTeamNumber(FLAGS_pi);
+ CHECK(team_number) << ": Invalid pi hostname " << FLAGS_pi
+ << ", failed to parse team number";
+
+ const aos::realtime_clock::time_point realtime_now =
+ aos::realtime_clock::now();
+ camera_calibration_builder.add_node_name(name_offset);
+ camera_calibration_builder.add_team_number(team_number.value());
+ camera_calibration_builder.add_calibration_timestamp(
+ realtime_now.time_since_epoch().count());
+ camera_calibration_builder.add_intrinsics(intrinsics_offset);
+ camera_calibration_builder.add_dist_coeffs(distortion_coefficients_offset);
+ camera_calibration_builder.add_dist_coeffs(distortion_coefficients_offset);
+ fbb.Finish(camera_calibration_builder.Finish());
+
+ aos::FlatbufferDetachedBuffer<sift::CameraCalibration> camera_calibration(
+ fbb.Release());
+ std::stringstream time_ss;
+ time_ss << realtime_now;
+
+ const std::string calibration_filename =
+ FLAGS_calibration_folder +
+ absl::StrFormat("/calibration_pi-%d-%d_%s.json", team_number.value(),
+ pi_number.value(), time_ss.str());
+
+ LOG(INFO) << calibration_filename << " -> "
+ << aos::FlatbufferToJson(camera_calibration, true);
+
+ aos::util::WriteStringToFileOrDie(
+ calibration_filename, aos::FlatbufferToJson(camera_calibration, true));
+ } else {
+ LOG(INFO) << "Skipping calibration due to not enough images.";
+ }
+}
+
+} // namespace
+} // namespace vision
+} // namespace frc971
+
+// Quick and lightweight grayscale viewer for images
+int main(int argc, char **argv) {
+ aos::InitGoogle(&argc, &argv);
+ frc971::vision::ViewerMain();
+}
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 24fd64d..f38e0fe 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -101,6 +101,9 @@
// This is the standard OpenCV 5 parameter distortion coefficients
dist_coeffs:[float];
+
+ // Timestamp for when the calibration was taken on the realtime clock.
+ calibration_timestamp:int64;
}
// Contains the information the EKF wants from an image matched against a single
diff --git a/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-1_2020-03-21_21-47-32.962241755.json b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-1_2020-03-21_21-47-32.962241755.json
new file mode 100755
index 0000000..62d20f4
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-1_2020-03-21_21-47-32.962241755.json
@@ -0,0 +1,23 @@
+{
+ "node_name": "pi1",
+ "team_number": 7971,
+ "intrinsics": [
+ 387.439758,
+ 0.0,
+ 358.201599,
+ 0.0,
+ 387.034973,
+ 232.376465,
+ 0.0,
+ 0.0,
+ 1.0
+ ],
+ "dist_coeffs": [
+ 0.140882,
+ -0.237723,
+ -0.000273,
+ 0.000602,
+ 0.084725
+ ],
+ "calibration_timestamp": 1584852452962241755
+}
\ No newline at end of file
diff --git a/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-2_2020-03-21_21-34-48.682329573.json b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-2_2020-03-21_21-34-48.682329573.json
new file mode 100755
index 0000000..67cc05f
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-2_2020-03-21_21-34-48.682329573.json
@@ -0,0 +1,23 @@
+{
+ "node_name": "pi2",
+ "team_number": 7971,
+ "intrinsics": [
+ 390.917023,
+ 0.0,
+ 338.912598,
+ 0.0,
+ 390.893799,
+ 240.735367,
+ 0.0,
+ 0.0,
+ 1.0
+ ],
+ "dist_coeffs": [
+ 0.133992,
+ -0.22539,
+ -0.000965,
+ -0.000223,
+ 0.067738
+ ],
+ "calibration_timestamp": 1584851688682329573
+}
\ No newline at end of file
diff --git a/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-3_2020-03-21_21-39-10.172513400.json b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-3_2020-03-21_21-39-10.172513400.json
new file mode 100755
index 0000000..081ad95
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-3_2020-03-21_21-39-10.172513400.json
@@ -0,0 +1,23 @@
+{
+ "node_name": "pi3",
+ "team_number": 7971,
+ "intrinsics": [
+ 389.0737,
+ 0.0,
+ 283.094513,
+ 0.0,
+ 389.047638,
+ 227.70932,
+ 0.0,
+ 0.0,
+ 1.0
+ ],
+ "dist_coeffs": [
+ 0.13731,
+ -0.222632,
+ 0.000651,
+ 0.000001,
+ 0.059589
+ ],
+ "calibration_timestamp": 1584851950172513400
+}
\ No newline at end of file
diff --git a/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-4_2020-03-21_21-44-00.488542608.json b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-4_2020-03-21_21-44-00.488542608.json
new file mode 100755
index 0000000..c3b30da
--- /dev/null
+++ b/y2020/vision/tools/python_code/calib_files/calibration_pi-7971-4_2020-03-21_21-44-00.488542608.json
@@ -0,0 +1,23 @@
+{
+ "node_name": "pi4",
+ "team_number": 7971,
+ "intrinsics": [
+ 390.216858,
+ 0.0,
+ 291.966309,
+ 0.0,
+ 390.171906,
+ 223.897293,
+ 0.0,
+ 0.0,
+ 1.0
+ ],
+ "dist_coeffs": [
+ 0.137507,
+ -0.221239,
+ -0.000349,
+ 0.000203,
+ 0.057804
+ ],
+ "calibration_timestamp": 1584852240488542608
+}
\ No newline at end of file
diff --git a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.json b/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.json
deleted file mode 100644
index 8c1efbb..0000000
--- a/y2020/vision/tools/python_code/calib_files/cam-calib-int_pi-7971-3_Feb-13-2020-00-00-00.json
+++ /dev/null
@@ -1 +0,0 @@
-{"hostname": "pi-7971-3", "node_name": "pi3", "team_number": 7971, "timestamp": "Feb-13-2020-00-00-00", "camera_matrix": [[388.79947319784713, 0.0, 345.0976031055917], [0.0, 388.539148344188, 265.2780372766764], [0.0, 0.0, 1.0]], "dist_coeffs": [[0.13939139612079282, -0.24345067782097646, -0.0004228219772016648, -0.0004552350162154737, 0.08966339831250879]]}
diff --git a/y2020/vision/tools/python_code/camera_definition.py b/y2020/vision/tools/python_code/camera_definition.py
index b8c71ff..33eb1a9 100644
--- a/y2020/vision/tools/python_code/camera_definition.py
+++ b/y2020/vision/tools/python_code/camera_definition.py
@@ -31,6 +31,7 @@
self.turret_ext = None
self.node_name = ""
self.team_number = -1
+ self.timestamp = 0
def load_camera_definitions():
@@ -91,25 +92,36 @@
dir_name = dtd.bazel_name_fix('calib_files')
for filename in os.listdir(dir_name):
- if "cam-calib-int" in filename and filename.endswith(".json"):
+ glog.debug("Inspecting %s", filename)
+ if ("cam-calib-int" in filename or 'calibration' in filename) and filename.endswith(".json"):
# Extract intrinsics from file
- fn_split = filename.split("_")
- hostname_split = fn_split[1].split("-")
- if hostname_split[0] == "pi":
- team_number = int(hostname_split[1])
- node_name = hostname_split[0] + hostname_split[2]
-
calib_file = open(dir_name + "/" + filename, 'r')
calib_dict = json.loads(calib_file.read())
- hostname = np.asarray(calib_dict["hostname"])
- camera_matrix = np.asarray(calib_dict["camera_matrix"])
- dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
+
+ # We have 2 json formats.
+ # 1) is Jim's custom format.
+ # 2) is the flatbuffer definition of the intrinsics converted to
+ # JSON.
+ # See which one we have and parse accordingly.
+ if 'hostname' in calib_dict:
+ hostname_split = calib_dict["hostname"].split("-")
+ team_number = int(hostname_split[1])
+ node_name = hostname_split[0] + hostname_split[2]
+ camera_matrix = np.asarray(calib_dict["camera_matrix"])
+ dist_coeffs = np.asarray(calib_dict["dist_coeffs"])
+ else:
+ team_number = calib_dict["team_number"]
+ node_name = calib_dict["node_name"]
+ camera_matrix = np.asarray(calib_dict["intrinsics"]).reshape(
+ (3, 3))
+ dist_coeffs = np.asarray(calib_dict["dist_coeffs"]).reshape(
+ (1, 5))
# Look for match, and replace camera_intrinsics
for camera_calib in camera_list:
if camera_calib.node_name == node_name and camera_calib.team_number == team_number:
- glog.info("Found calib for %s, team #%d" %
- (node_name, team_number))
+ glog.info("Found calib for %s, team #%d" % (node_name,
+ team_number))
camera_calib.camera_int.camera_matrix = copy.copy(
camera_matrix)
camera_calib.camera_int.dist_coeffs = copy.copy(
diff --git a/y2020/vision/tools/python_code/load_sift_training.py b/y2020/vision/tools/python_code/load_sift_training.py
index a21a06f..809d748 100644
--- a/y2020/vision/tools/python_code/load_sift_training.py
+++ b/y2020/vision/tools/python_code/load_sift_training.py
@@ -13,6 +13,11 @@
import frc971.vision.sift.TrainingImage as TrainingImage
import frc971.vision.sift.TransformationMatrix as TransformationMatrix
+import gflags
+import glog
+
+FLAGS = gflags.FLAGS
+
# Takes a 3x3 rotation matrix and 3x1 translation vector, and outputs 12
# element list, suitable for outputing to flatbuffer
@@ -65,7 +70,7 @@
target_data_list = target_definition.compute_target_definition()
camera_calib_list = camera_definition.load_camera_definitions()
- glog.info("Writing file to ", output_path)
+ glog.info("Writing file to %s", output_path)
fbb = flatbuffers.Builder(0)
@@ -242,4 +247,6 @@
if __name__ == '__main__':
+ argv = FLAGS(sys.argv)
+ glog.init()
main()