Stripping out SIFT model and just create calibration data header
Storing this in y2022 for now, though eventually it might make sense
to move to frc971. But, I think camera_reader will stay here
Change-Id: Iac4d5f3364b0f3f63c298d3902ac66fd50053b55
Signed-off-by: Jim Ostrowski <yimmy13@gmail.com>
diff --git a/y2022/BUILD b/y2022/BUILD
index 2173877..812cac1 100644
--- a/y2022/BUILD
+++ b/y2022/BUILD
@@ -51,6 +51,7 @@
"//aos/network:timestamp_fbs",
"//frc971/input:robot_state_fbs",
"//frc971/vision:vision_fbs",
+ "//y2022/vision:calibration_fbs",
],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
@@ -75,6 +76,7 @@
"//aos/network:timestamp_fbs",
"//aos/network:remote_message_fbs",
"//frc971/vision:vision_fbs",
+ "//y2022/vision:calibration_fbs",
"//y2022/vision:target_estimate_fbs",
],
target_compatible_with = ["@platforms//os:linux"],
@@ -103,6 +105,7 @@
"//aos/network:timestamp_fbs",
"//aos/network:remote_message_fbs",
"//frc971/vision:vision_fbs",
+ "//y2022/vision:calibration_fbs",
],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
diff --git a/y2022/vision/BUILD b/y2022/vision/BUILD
index 0105796..823462b 100644
--- a/y2022/vision/BUILD
+++ b/y2022/vision/BUILD
@@ -1,16 +1,81 @@
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
-cc_binary(
- name = "camera_reader",
- srcs = [
- "camera_reader_main.cc",
- ],
+flatbuffer_cc_library(
+ name = "calibration_fbs",
+ srcs = ["calibration.fbs"],
+ gen_reflections = 1,
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//y2022:__subpackages__"],
+)
+
+flatbuffer_py_library(
+ name = "calibration_fbs_python",
+ srcs = [
+ "calibration.fbs",
+ ],
+ namespace = "frc971.vision.calibration",
+ tables = [
+ "CalibrationData",
+ "CameraCalibration",
+ "TransformationMatrix",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "camera_definition",
+ srcs = [
+ "camera_definition.py",
+ ],
deps = [
- ":camera_reader_lib",
- "//aos:init",
- "//aos/events:shm_event_loop",
+ "//external:python-glog",
+ ],
+)
+
+py_binary(
+ name = "create_calib_file",
+ srcs = [
+ "create_calib_file.py",
+ ],
+ args = [
+ "calibration_data.h",
+ ],
+ data = glob(["calib_files/*.json"]),
+ target_compatible_with = ["@platforms//os:linux"],
+ deps = [
+ ":camera_definition",
+ "//external:python-glog",
+ "//y2022/vision:calibration_fbs_python",
+ "@bazel_tools//tools/python/runfiles",
+ "@opencv_contrib_nonfree_amd64//:python_opencv",
+ ],
+)
+
+genrule(
+ name = "run_calibration_data",
+ outs = [
+ "calibration_data.h",
+ ],
+ cmd = " ".join([
+ "$(location :create_calib_file)",
+ "$(location calibration_data.h)",
+ ]),
+ target_compatible_with = ["@platforms//os:linux"],
+ tools = [
+ ":create_calib_file",
+ ],
+)
+
+cc_library(
+ name = "calibration_data",
+ hdrs = [
+ "calibration_data.h",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_google_absl//absl/types:span",
],
)
@@ -29,6 +94,8 @@
visibility = ["//y2022:__subpackages__"],
deps = [
":blob_detector_lib",
+ ":calibration_data",
+ ":calibration_fbs",
":target_estimator_lib",
"//aos:flatbuffer_merge",
"//aos/events:event_loop",
@@ -37,9 +104,20 @@
"//frc971/vision:v4l2_reader",
"//frc971/vision:vision_fbs",
"//third_party:opencv",
- "//y2020/vision/sift:sift_fbs",
- "//y2020/vision/sift:sift_training_fbs",
- "//y2020/vision/tools/python_code:sift_training_data",
+ ],
+)
+
+cc_binary(
+ name = "camera_reader",
+ srcs = [
+ "camera_reader_main.cc",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//y2022:__subpackages__"],
+ deps = [
+ ":camera_reader_lib",
+ "//aos:init",
+ "//aos/events:shm_event_loop",
],
)
diff --git a/y2022/vision/calib_files/calibration_pi-971-1_2021-09-11_17-49-00.000000000.json b/y2022/vision/calib_files/calibration_pi-971-1_2021-09-11_17-49-00.000000000.json
new file mode 100644
index 0000000..39c7911
--- /dev/null
+++ b/y2022/vision/calib_files/calibration_pi-971-1_2021-09-11_17-49-00.000000000.json
@@ -0,0 +1,23 @@
+{
+ "node_name": "pi1",
+ "team_number": 971,
+ "intrinsics": [
+ 392.276093,
+ 0.0,
+ 293.934753,
+ 0.0,
+ 392.30838,
+ 212.287537,
+ 0.0,
+ 0.0,
+ 1.0
+ ],
+ "dist_coeffs": [
+ 0.149561,
+ -0.261432,
+ -0.000182,
+ -0.000697,
+ 0.099255
+ ],
+ "calibration_timestamp": 1597994992500905688
+}
diff --git a/y2022/vision/calibration.fbs b/y2022/vision/calibration.fbs
new file mode 100644
index 0000000..f5e30a6
--- /dev/null
+++ b/y2022/vision/calibration.fbs
@@ -0,0 +1,55 @@
+namespace frc971.vision.calibration;
+
+table TransformationMatrix {
+ // The matrix data for a row-major 4x4 homogeneous transformation matrix.
+ // This implies the bottom row is (0, 0, 0, 1).
+ data:[float] (id: 0);
+}
+
+// Calibration information for a given camera on a given robot.
+table CameraCalibration {
+ // The name of the camera node which this calibration data applies to.
+ node_name:string (id: 0);
+ // The team number of the robot which this calibration data applies to.
+ team_number:int (id: 1);
+
+ // Intrinsics for the camera.
+ //
+ // This is the standard OpenCV intrinsics matrix in row major order (3x3).
+ intrinsics:[float] (id: 2);
+
+ // Fixed extrinsics for the camera. This transforms from camera coordinates to
+ // robot coordinates. For example: multiplying (0, 0, 0, 1) by this results in
+ // the position of the camera aperture in robot coordinates.
+ fixed_extrinsics:TransformationMatrix (id: 3);
+
+ // Extrinsics for a camera on a turret. This will only be filled out for
+ // applicable cameras. For turret-mounted cameras, fixed_extrinsics defines
+ // a position for the center of rotation of the turret, and this field defines
+ // a position for the camera on the turret.
+ //
+ // The combination of the two transformations is underdefined, so nothing can
+ // distinguish between the two parts of the final extrinsics for a given
+ // turret position.
+ //
+ // To get the final extrinsics for a camera using this transformation,
+ // multiply (in order):
+ // fixed_extrinsics
+ // rotation around the Z axis by the turret angle
+ // turret_extrinsics
+ turret_extrinsics:TransformationMatrix (id: 4);
+
+ // This is the standard OpenCV 5 parameter distortion coefficients
+ dist_coeffs:[float] (id: 5);
+
+ // Timestamp for when the calibration was taken on the realtime clock.
+ calibration_timestamp:int64 (id: 6);
+}
+
+// Calibration information for all the cameras we know about.
+table CalibrationData {
+ camera_calibrations:[CameraCalibration] (id: 0);
+}
+
+
+root_type CalibrationData;
diff --git a/y2022/vision/camera_definition.py b/y2022/vision/camera_definition.py
new file mode 100644
index 0000000..a45c704
--- /dev/null
+++ b/y2022/vision/camera_definition.py
@@ -0,0 +1,165 @@
+import copy
+import glog
+import json
+import math
+import numpy as np
+import os
+
+glog.setLevel("INFO")
+
+
+# Quick fix to naming games that happen with bazel
+def bazel_name_fix(filename):
+ ret_name = filename
+ try:
+ from bazel_tools.tools.python.runfiles import runfiles
+ r = runfiles.Create()
+ ret_name = r.Rlocation('org_frc971/y2022/vision/' + filename)
+ #print("Trying directory: ", ret_name)
+ except:
+ print("Failed bazel_name_fix")
+ pass
+
+ ### TODO<Jim>: Need to figure out why this isn't working
+ ### Hardcoding for now
+ if ret_name == None:
+ ret_name = '/home/jim/code/FRC/971-Robot-Code/y2022/vision/calib_files'
+ return ret_name
+
+
+class CameraIntrinsics:
+ def __init__(self):
+ self.camera_matrix = []
+ self.dist_coeffs = []
+
+ pass
+
+
+class CameraExtrinsics:
+ def __init__(self):
+ self.R = []
+ self.T = []
+
+
+class CameraParameters:
+ def __init__(self):
+ self.camera_int = CameraIntrinsics()
+ self.camera_ext = CameraExtrinsics()
+ self.turret_ext = None
+ self.node_name = ""
+ self.team_number = -1
+ self.timestamp = 0
+
+
+def compute_extrinsic(camera_pitch, camera_yaw, T_camera, is_turret):
+ # Compute the extrinsic calibration based on pitch and translation
+ # Includes camera rotation from robot x,y,z to opencv (z, -x, -y)
+
+ # Also, handle extrinsics for the turret
+ # The basic camera pose is relative to the center, base of the turret
+ # TODO<Jim>: Maybe store these to .json files, like with intrinsics?
+ base_cam_ext = CameraExtrinsics()
+ turret_cam_ext = CameraExtrinsics()
+
+ camera_pitch_matrix = np.array(
+ [[np.cos(camera_pitch), 0.0,
+ np.sin(camera_pitch)], [0.0, 1.0, 0.0],
+ [-np.sin(camera_pitch), 0.0,
+ np.cos(camera_pitch)]])
+
+ camera_yaw_matrix = np.array(
+ [[np.cos(camera_yaw), -np.sin(camera_yaw), 0.0],
+ [np.sin(camera_yaw), np.cos(camera_yaw), 0.0], [0.0, 0.0, 1.0]])
+
+ robot_to_camera_rotation = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1.,
+ 0]])
+
+ if is_turret:
+ # Turret camera has default heading 180 deg away from the robot x
+ base_cam_ext.R = np.array([[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0],
+ [0.0, 0.0, 1.0]])
+ base_cam_ext.T = np.array([0.0, 0.0, 0.0])
+ turret_cam_ext.R = camera_yaw_matrix @ camera_pitch_matrix @ robot_to_camera_rotation
+ turret_cam_ext.T = T_camera
+ else:
+ base_cam_ext.R = camera_yaw_matrix @ camera_pitch_matrix @ robot_to_camera_rotation
+ base_cam_ext.T = T_camera
+ turret_cam_ext = None
+
+ return base_cam_ext, turret_cam_ext
+
+
+def compute_extrinsic_by_pi(pi_number):
+ # Defaults for non-turret camera
+ camera_pitch = -20.0 * np.pi / 180.0
+ camera_yaw = 0.0
+ is_turret = False
+ # Default camera location to robot origin
+ T = np.array([0.0, 0.0, 0.0])
+
+ if pi_number == "pi1":
+ # This is the turret camera
+ camera_pitch = -10.0 * np.pi / 180.0
+ is_turret = True
+ T = np.array([7.5 * 0.0254, -5.5 * 0.0254, 41.0 * 0.0254])
+ elif pi_number == "pi2":
+ T = np.array([4.5 * 0.0254, 3.75 * 0.0254, 26.0 * 0.0254])
+ elif pi_number == "pi3":
+ camera_yaw = 90.0 * np.pi / 180.0
+ T = np.array([-2.75 * 0.0254, 5.25 * 0.0254, 25.25 * 0.0254])
+ elif pi_number == "pi4":
+ camera_yaw = -90.0 * np.pi / 180.0
+ T = np.array([-2.75 * 0.0254, -6 * 0.0254, 26 * 0.0254])
+ elif pi_number == "pi5":
+ camera_yaw = 180.0 * np.pi / 180.0
+ T = np.array([-7.25 * 0.0254, -4.24 * 0.0254, 16.5 * 0.0254])
+
+ return compute_extrinsic(camera_pitch, camera_yaw, T, is_turret)
+
+
+def load_camera_definitions():
+ ### CAMERA DEFINITIONS
+ # We only load in cameras that have a calibration file
+ # These are stored in y2022/vision/calib_files
+ #
+ # Or better yet, use //y2020/vision:calibration to calibrate the camera
+ # using a Charuco target board
+
+ camera_list = []
+
+ dir_name = bazel_name_fix('calib_files')
+ if dir_name is not None:
+ glog.debug("Searching for calibration files in " + dir_name)
+ else:
+ glog.fatal("Failed to find calib_files directory")
+
+ for filename in sorted(os.listdir(dir_name)):
+ glog.debug("Inspecting %s", filename)
+ if ("cam-calib-int" in filename
+ or 'calibration' in filename) and filename.endswith(".json"):
+
+ # Extract intrinsics from file
+ calib_file = open(dir_name + "/" + filename, 'r')
+ calib_dict = json.loads(calib_file.read())
+
+ team_number = calib_dict["team_number"]
+ node_name = calib_dict["node_name"]
+ camera_matrix = np.asarray(calib_dict["intrinsics"]).reshape(
+ (3, 3))
+ dist_coeffs = np.asarray(calib_dict["dist_coeffs"]).reshape((1, 5))
+
+ glog.debug("Found calib for " + node_name + ", team #" +
+ str(team_number))
+
+ camera_params = CameraParameters()
+ # TODO: Need to add reading in extrinsic camera parameters from json
+ camera_params.camera_ext, camera_params.turret_ext = compute_extrinsic_by_pi(
+ node_name)
+
+ camera_params.node_name = node_name
+ camera_params.team_number = team_number
+ camera_params.camera_int.camera_matrix = copy.copy(camera_matrix)
+ camera_params.camera_int.dist_coeffs = copy.copy(dist_coeffs)
+ camera_list.append(camera_params)
+
+ return camera_list
diff --git a/y2022/vision/camera_reader.cc b/y2022/vision/camera_reader.cc
index 4ac6aad..31411ca 100644
--- a/y2022/vision/camera_reader.cc
+++ b/y2022/vision/camera_reader.cc
@@ -10,10 +10,8 @@
#include "aos/network/team_number.h"
#include "frc971/vision/v4l2_reader.h"
#include "frc971/vision/vision_generated.h"
-#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/sift/sift_training_generated.h"
-#include "y2020/vision/tools/python_code/sift_training_data.h"
#include "y2022/vision/blob_detector.h"
+#include "y2022/vision/calibration_generated.h"
#include "y2022/vision/target_estimator.h"
DEFINE_string(image_png, "", "A set of PNG images");
@@ -23,11 +21,12 @@
using namespace frc971::vision;
-const sift::CameraCalibration *CameraReader::FindCameraCalibration() const {
+const calibration::CameraCalibration *CameraReader::FindCameraCalibration()
+ const {
const std::string_view node_name = event_loop_->node()->name()->string_view();
const int team_number = aos::network::GetTeamNumber();
- for (const sift::CameraCalibration *candidate :
- *training_data_->camera_calibrations()) {
+ for (const calibration::CameraCalibration *candidate :
+ *calibration_data_->camera_calibrations()) {
if (candidate->node_name()->string_view() != node_name) {
continue;
}
diff --git a/y2022/vision/camera_reader.h b/y2022/vision/camera_reader.h
index 78abaa5..2a5c729 100644
--- a/y2022/vision/camera_reader.h
+++ b/y2022/vision/camera_reader.h
@@ -13,9 +13,8 @@
#include "aos/network/team_number.h"
#include "frc971/vision/v4l2_reader.h"
#include "frc971/vision/vision_generated.h"
-#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/sift/sift_training_generated.h"
-#include "y2020/vision/tools/python_code/sift_training_data.h"
+#include "y2022/vision/calibration_data.h"
+#include "y2022/vision/calibration_generated.h"
#include "y2022/vision/target_estimate_generated.h"
namespace y2022 {
@@ -28,9 +27,10 @@
class CameraReader {
public:
CameraReader(aos::ShmEventLoop *event_loop,
- const sift::TrainingData *training_data, V4L2Reader *reader)
+ const calibration::CalibrationData *calibration_data,
+ V4L2Reader *reader)
: event_loop_(event_loop),
- training_data_(training_data),
+ calibration_data_(calibration_data),
camera_calibration_(FindCameraCalibration()),
reader_(reader),
image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
@@ -42,7 +42,7 @@
}
private:
- const sift::CameraCalibration *FindCameraCalibration() const;
+ const calibration::CameraCalibration *FindCameraCalibration() const;
// Processes an image (including sending the results).
void ProcessImage(const cv::Mat &image);
@@ -77,8 +77,8 @@
}
aos::ShmEventLoop *const event_loop_;
- const sift::TrainingData *const training_data_;
- const sift::CameraCalibration *const camera_calibration_;
+ const calibration::CalibrationData *const calibration_data_;
+ const calibration::CameraCalibration *const camera_calibration_;
V4L2Reader *const reader_;
aos::Sender<CameraImage> image_sender_;
aos::Sender<TargetEstimate> target_estimate_sender_;
diff --git a/y2022/vision/camera_reader_main.cc b/y2022/vision/camera_reader_main.cc
index 6f65a6d..f1c5fdf 100644
--- a/y2022/vision/camera_reader_main.cc
+++ b/y2022/vision/camera_reader_main.cc
@@ -18,24 +18,24 @@
aos::FlatbufferDetachedBuffer<aos::Configuration> config =
aos::configuration::ReadConfig(FLAGS_config);
- const aos::FlatbufferSpan<sift::TrainingData> training_data(
- SiftTrainingData());
- CHECK(training_data.Verify());
+ const aos::FlatbufferSpan<calibration::CalibrationData> calibration_data(
+ CalibrationData());
+ CHECK(calibration_data.Verify());
aos::ShmEventLoop event_loop(&config.message());
// First, log the data for future reference.
{
- aos::Sender<sift::TrainingData> training_data_sender =
- event_loop.MakeSender<sift::TrainingData>("/camera");
- CHECK_EQ(training_data_sender.Send(training_data),
+ aos::Sender<calibration::CalibrationData> calibration_data_sender =
+ event_loop.MakeSender<calibration::CalibrationData>("/camera");
+ CHECK_EQ(calibration_data_sender.Send(calibration_data),
aos::RawSender::Error::kOk);
}
V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
v4l2_reader.SetExposure(FLAGS_exposure);
- CameraReader camera_reader(&event_loop, &training_data.message(),
+ CameraReader camera_reader(&event_loop, &calibration_data.message(),
&v4l2_reader);
event_loop.Run();
diff --git a/y2022/vision/create_calib_file.py b/y2022/vision/create_calib_file.py
new file mode 100644
index 0000000..de9f65d
--- /dev/null
+++ b/y2022/vision/create_calib_file.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python3
+
+import camera_definition
+import cv2
+import flatbuffers
+import glog
+import numpy as np
+import sys
+
+import frc971.vision.calibration.CalibrationData as CalibrationData
+import frc971.vision.calibration.CameraCalibration as CameraCalibration
+import frc971.vision.calibration.TransformationMatrix as TransformationMatrix
+
+import gflags
+import glog
+
+FLAGS = gflags.FLAGS
+
+
+# Takes a 3x3 rotation matrix and 3x1 translation vector, and outputs 12
+# element list, suitable for outputing to flatbuffer
+def rot_and_trans_to_list(R, T):
+ output_list = []
+ for row in range(3):
+ for col in range(3):
+ output_list.append(R[row][col])
+ output_list.append(T[row])
+
+ output_list = output_list + [0., 0., 0., 1.]
+ return output_list
+
+
+def list_to_transformation_matrix(values, fbb):
+ """Puts a list of values into an FBB TransformationMatrix."""
+
+ TransformationMatrix.TransformationMatrixStartDataVector(fbb, len(values))
+ for n in reversed(values):
+ fbb.PrependFloat32(n)
+ list_offset = fbb.EndVector(len(values))
+
+ TransformationMatrix.TransformationMatrixStart(fbb)
+ TransformationMatrix.TransformationMatrixAddData(fbb, list_offset)
+ return TransformationMatrix.TransformationMatrixEnd(fbb)
+
+
+def main():
+
+ camera_calib_list = None
+
+ output_path = sys.argv[1]
+
+ camera_calib_list = camera_definition.load_camera_definitions()
+
+ glog.debug("Writing file to %s", output_path)
+
+ fbb = flatbuffers.Builder(0)
+
+ images_vector = []
+
+ # Create camera calibration data
+ camera_calibration_vector = []
+ for camera_calib in camera_calib_list:
+ fixed_extrinsics_list = rot_and_trans_to_list(
+ camera_calib.camera_ext.R, camera_calib.camera_ext.T)
+ fixed_extrinsics_vector = list_to_transformation_matrix(
+ fixed_extrinsics_list, fbb)
+
+ turret_extrinsics_vector = None
+ if camera_calib.turret_ext is not None:
+ turret_extrinsics_list = rot_and_trans_to_list(
+ camera_calib.turret_ext.R, camera_calib.turret_ext.T)
+ turret_extrinsics_vector = list_to_transformation_matrix(
+ turret_extrinsics_list, fbb)
+
+ camera_int_list = camera_calib.camera_int.camera_matrix.ravel().tolist(
+ )
+ CameraCalibration.CameraCalibrationStartIntrinsicsVector(
+ fbb, len(camera_int_list))
+ for n in reversed(camera_int_list):
+ fbb.PrependFloat32(n)
+ intrinsics_vector = fbb.EndVector(len(camera_int_list))
+
+ dist_coeffs_list = camera_calib.camera_int.dist_coeffs.ravel().tolist()
+ CameraCalibration.CameraCalibrationStartDistCoeffsVector(
+ fbb, len(dist_coeffs_list))
+ for n in reversed(dist_coeffs_list):
+ fbb.PrependFloat32(n)
+ dist_coeffs_vector = fbb.EndVector(len(dist_coeffs_list))
+
+ node_name_offset = fbb.CreateString(camera_calib.node_name)
+ CameraCalibration.CameraCalibrationStart(fbb)
+ CameraCalibration.CameraCalibrationAddNodeName(fbb, node_name_offset)
+ CameraCalibration.CameraCalibrationAddTeamNumber(
+ fbb, camera_calib.team_number)
+ CameraCalibration.CameraCalibrationAddIntrinsics(
+ fbb, intrinsics_vector)
+ CameraCalibration.CameraCalibrationAddDistCoeffs(
+ fbb, dist_coeffs_vector)
+ CameraCalibration.CameraCalibrationAddFixedExtrinsics(
+ fbb, fixed_extrinsics_vector)
+ if turret_extrinsics_vector is not None:
+ CameraCalibration.CameraCalibrationAddTurretExtrinsics(
+ fbb, turret_extrinsics_vector)
+ camera_calibration_vector.append(
+ CameraCalibration.CameraCalibrationEnd(fbb))
+
+ CalibrationData.CalibrationDataStartCameraCalibrationsVector(
+ fbb, len(camera_calibration_vector))
+ for camera_calibration in reversed(camera_calibration_vector):
+ fbb.PrependUOffsetTRelative(camera_calibration)
+ camera_calibration_vector_table = fbb.EndVector(
+ len(camera_calibration_vector))
+
+ # Fill out TrainingData
+ CalibrationData.CalibrationDataStart(fbb)
+ CalibrationData.CalibrationDataAddCameraCalibrations(
+ fbb, camera_calibration_vector_table)
+ fbb.Finish(CalibrationData.CalibrationDataEnd(fbb))
+
+ bfbs = fbb.Output()
+
+ output_prefix = [
+ b'#ifndef Y2022_VISION_CALIBRATION_DATA_H_',
+ b'#define Y2022_VISION_CALIBRATION_DATA_H_',
+ b'#include <stdint.h>',
+ b'#include "absl/types/span.h"',
+ b'namespace frc971 {',
+ b'namespace vision {',
+ b'inline absl::Span<const uint8_t> CalibrationData() {',
+ ]
+ output_suffix = [
+ b' return absl::Span<const uint8_t>(reinterpret_cast<const uint8_t *>(kData), sizeof(kData));',
+ b'}',
+ b'} // namespace vision',
+ b'} // namespace frc971',
+ b'#endif // Y2022_VISION_CALIBRATION_DATA_H_',
+ ]
+
+ # Write out the header file
+ with open(output_path, 'wb') as output:
+ for line in output_prefix:
+ output.write(line)
+ output.write(b'\n')
+ output.write(b'alignas(64) static constexpr char kData[] = "')
+ for byte in fbb.Output():
+ output.write(b'\\x' + (b'%x' % byte).zfill(2))
+ output.write(b'";\n')
+ for line in output_suffix:
+ output.write(line)
+ output.write(b'\n')
+
+
+if __name__ == '__main__':
+ argv = FLAGS(sys.argv)
+ glog.init()
+ main()
diff --git a/y2022/y2022_pi_template.json b/y2022/y2022_pi_template.json
index 4994a36..823a7f5 100644
--- a/y2022/y2022_pi_template.json
+++ b/y2022/y2022_pi_template.json
@@ -100,6 +100,13 @@
},
{
"name": "/pi{{ NUM }}/camera",
+ "type": "frc971.vision.calibration.CalibrationData",
+ "source_node": "pi{{ NUM }}",
+ "frequency": 2,
+ "max_size": 100000
+ },
+ {
+ "name": "/pi{{ NUM }}/camera",
"type": "y2022.vision.TargetEstimate",
"source_node": "pi{{ NUM }}",
"frequency": 25,