Moving v4l2_reader into frc971/vision. Fixing dependencies
Change-Id: I25e821b7fb77a6c183dfeb697b81c771cd5d2339
Signed-off-by: Jim Ostrowski <yimmy13@gmail.com>
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 330848a..5156b59 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -1,12 +1,4 @@
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_ts_library")
-
-flatbuffer_cc_library(
- name = "vision_fbs",
- srcs = ["vision.fbs"],
- gen_reflections = 1,
- target_compatible_with = ["@platforms//os:linux"],
- visibility = ["//y2020:__subpackages__"] + ["//y2022:__subpackages__"],
-)
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
flatbuffer_cc_library(
name = "galactic_search_path_fbs",
@@ -16,25 +8,6 @@
visibility = ["//y2020:__subpackages__"],
)
-cc_library(
- name = "v4l2_reader",
- srcs = [
- "v4l2_reader.cc",
- ],
- hdrs = [
- "v4l2_reader.h",
- ],
- target_compatible_with = ["@platforms//os:linux"],
- visibility = ["//y2020:__subpackages__"],
- deps = [
- ":vision_fbs",
- "//aos/events:event_loop",
- "//aos/scoped:scoped_fd",
- "@com_github_google_glog//:glog",
- "@com_google_absl//absl/base",
- ],
-)
-
cc_binary(
name = "camera_reader",
srcs = [
@@ -63,11 +36,11 @@
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//y2020:__subpackages__"] + ["//y2022:__subpackages__"],
deps = [
- ":v4l2_reader",
- ":vision_fbs",
"//aos:flatbuffer_merge",
"//aos/events:event_loop",
"//aos/network:team_number",
+ "//frc971/vision:v4l2_reader",
+ "//frc971/vision:vision_fbs",
"//third_party:opencv",
"//y2020/vision/sift:sift971",
"//y2020/vision/sift:sift_fbs",
@@ -76,13 +49,6 @@
],
)
-flatbuffer_ts_library(
- name = "vision_ts_fbs",
- srcs = ["vision.fbs"],
- target_compatible_with = ["@platforms//os:linux"],
- visibility = ["//y2020:__subpackages__"],
-)
-
cc_binary(
name = "viewer",
srcs = [
@@ -94,9 +60,10 @@
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//y2020:__subpackages__"],
deps = [
- ":vision_fbs",
"//aos:init",
"//aos/events:shm_event_loop",
+ "//frc971/vision:v4l2_reader",
+ "//frc971/vision:vision_fbs",
"//third_party:opencv",
"//y2020/vision/sift:sift_fbs",
],
@@ -113,12 +80,12 @@
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//y2020:__subpackages__"],
deps = [
- ":vision_fbs",
"//aos:flatbuffers",
"//aos/events:event_loop",
"//aos/network:message_bridge_server_fbs",
"//aos/network:team_number",
"//frc971/control_loops:quaternion_utils",
+ "//frc971/vision:vision_fbs",
"//third_party:opencv",
"//y2020/vision/sift:sift_fbs",
"//y2020/vision/sift:sift_training_fbs",
@@ -142,10 +109,10 @@
visibility = ["//y2020:__subpackages__"],
deps = [
":charuco_lib",
- ":vision_fbs",
"//aos:init",
"//aos/events:shm_event_loop",
"//frc971/control_loops/drivetrain:improved_down_estimator",
+ "//frc971/vision:vision_fbs",
"//frc971/wpilib:imu_batch_fbs",
"//frc971/wpilib:imu_fbs",
"//third_party:opencv",
@@ -168,10 +135,10 @@
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//y2020:__subpackages__"],
deps = [
- ":vision_fbs",
"//aos:init",
"//aos/events:simulated_event_loop",
"//aos/events/logging:log_reader",
+ "//frc971/vision:vision_fbs",
"//third_party:opencv",
],
)
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index 1e28e82..9966a5b 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -9,12 +9,12 @@
#include "aos/events/event_loop.h"
#include "aos/flatbuffer_merge.h"
#include "aos/network/team_number.h"
+#include "frc971/vision/v4l2_reader.h"
+#include "frc971/vision/vision_generated.h"
#include "y2020/vision/sift/sift971.h"
#include "y2020/vision/sift/sift_generated.h"
#include "y2020/vision/sift/sift_training_generated.h"
#include "y2020/vision/tools/python_code/sift_training_data.h"
-#include "y2020/vision/v4l2_reader.h"
-#include "y2020/vision/vision_generated.h"
DEFINE_bool(skip_sift, false,
"If true don't run any feature extraction. Just forward images.");
diff --git a/y2020/vision/camera_reader.h b/y2020/vision/camera_reader.h
index c05ebea..37fb5a9 100644
--- a/y2020/vision/camera_reader.h
+++ b/y2020/vision/camera_reader.h
@@ -10,12 +10,12 @@
#include "aos/events/event_loop.h"
#include "aos/flatbuffer_merge.h"
#include "aos/network/team_number.h"
+#include "frc971/vision/v4l2_reader.h"
+#include "frc971/vision/vision_generated.h"
#include "y2020/vision/sift/sift971.h"
#include "y2020/vision/sift/sift_generated.h"
#include "y2020/vision/sift/sift_training_generated.h"
#include "y2020/vision/tools/python_code/sift_training_data.h"
-#include "y2020/vision/v4l2_reader.h"
-#include "y2020/vision/vision_generated.h"
namespace frc971 {
namespace vision {
diff --git a/y2020/vision/charuco_lib.cc b/y2020/vision/charuco_lib.cc
index 21bdcc3..0df820b 100644
--- a/y2020/vision/charuco_lib.cc
+++ b/y2020/vision/charuco_lib.cc
@@ -11,11 +11,11 @@
#include "aos/flatbuffers.h"
#include "aos/network/team_number.h"
#include "frc971/control_loops/quaternion_utils.h"
+#include "frc971/vision/vision_generated.h"
#include "glog/logging.h"
#include "y2020/vision/sift/sift_generated.h"
#include "y2020/vision/sift/sift_training_generated.h"
#include "y2020/vision/tools/python_code/sift_training_data.h"
-#include "y2020/vision/vision_generated.h"
DEFINE_uint32(min_targets, 10,
"The mininum number of targets required to match.");
@@ -133,8 +133,7 @@
const monotonic_clock::time_point eof = eof_source_node - offset;
- const monotonic_clock::duration age =
- event_loop_->monotonic_now() - eof;
+ const monotonic_clock::duration age = event_loop_->monotonic_now() - eof;
const double age_double =
std::chrono::duration_cast<std::chrono::duration<double>>(age).count();
if (age > std::chrono::milliseconds(100)) {
diff --git a/y2020/vision/extrinsics_calibration.cc b/y2020/vision/extrinsics_calibration.cc
index 1d4de28..c7ef752 100644
--- a/y2020/vision/extrinsics_calibration.cc
+++ b/y2020/vision/extrinsics_calibration.cc
@@ -12,13 +12,13 @@
#include "frc971/analysis/in_process_plotter.h"
#include "frc971/control_loops/drivetrain/improved_down_estimator.h"
#include "frc971/control_loops/quaternion_utils.h"
+#include "frc971/vision/vision_generated.h"
#include "frc971/wpilib/imu_batch_generated.h"
#include "y2020/vision/calibration_accumulator.h"
#include "y2020/vision/charuco_lib.h"
#include "y2020/vision/sift/sift_generated.h"
#include "y2020/vision/sift/sift_training_generated.h"
#include "y2020/vision/tools/python_code/sift_training_data.h"
-#include "y2020/vision/vision_generated.h"
DEFINE_string(config, "config.json", "Path to the config file to use.");
DEFINE_string(pi, "pi-7971-2", "Pi name to calibrate.");
@@ -120,7 +120,7 @@
const Eigen::Quaternion<Scalar> &orientation() const { return orientation_; }
- std::vector<Eigen::Matrix<Scalar, 3, 1> > errors_;
+ std::vector<Eigen::Matrix<Scalar, 3, 1>> errors_;
// Returns the angular errors for each camera sample.
size_t num_errors() const { return errors_.size(); }
@@ -136,18 +136,18 @@
result.template block<4, 1>(0, 0) = q.coeffs();
result.template block<6, 1>(4, 0) = x_hat;
result.template block<36, 1>(10, 0) =
- Eigen::Map<Eigen::Matrix<Scalar, 36, 1> >(p.data(), p.size());
+ Eigen::Map<Eigen::Matrix<Scalar, 36, 1>>(p.data(), p.size());
return result;
}
std::tuple<Eigen::Quaternion<Scalar>, Eigen::Matrix<Scalar, 6, 1>,
- Eigen::Matrix<Scalar, 6, 6> >
+ Eigen::Matrix<Scalar, 6, 6>>
UnPack(Eigen::Matrix<Scalar, 46, 1> input) {
Eigen::Quaternion<Scalar> q(input.template block<4, 1>(0, 0));
Eigen::Matrix<Scalar, 6, 1> x_hat(input.template block<6, 1>(4, 0));
Eigen::Matrix<Scalar, 6, 6> p =
- Eigen::Map<Eigen::Matrix<Scalar, 6, 6> >(input.data() + 10, 6, 6);
+ Eigen::Map<Eigen::Matrix<Scalar, 6, 6>>(input.data() + 10, 6, 6);
return std::make_tuple(q, x_hat, p);
}
@@ -361,8 +361,8 @@
std::vector<double> imu_ratez;
std::vector<double> times_;
- std::vector<Eigen::Matrix<double, 6, 1> > x_hats_;
- std::vector<Eigen::Quaternion<double> > orientations_;
+ std::vector<Eigen::Matrix<double, 6, 1>> x_hats_;
+ std::vector<Eigen::Quaternion<double>> orientations_;
Eigen::Matrix<double, 3, 1> last_accel_ = Eigen::Matrix<double, 3, 1>::Zero();
};
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index b495658..d259ba7 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -192,8 +192,8 @@
deps = [
":sift_training_data_test",
"//aos/testing:googletest",
+ "//frc971/vision:vision_fbs",
"//third_party:opencv",
- "//y2020/vision:vision_fbs",
"//y2020/vision/sift:sift_fbs",
"//y2020/vision/sift:sift_training_fbs",
],
diff --git a/y2020/vision/tools/python_code/camera_param_test.cc b/y2020/vision/tools/python_code/camera_param_test.cc
index 483fe75..a2d3a75 100644
--- a/y2020/vision/tools/python_code/camera_param_test.cc
+++ b/y2020/vision/tools/python_code/camera_param_test.cc
@@ -12,9 +12,9 @@
#include "y2020/vision/tools/python_code/sift_training_data.h"
#endif
+#include "frc971/vision/vision_generated.h"
#include "y2020/vision/sift/sift_generated.h"
#include "y2020/vision/sift/sift_training_generated.h"
-#include "y2020/vision/vision_generated.h"
namespace frc971 {
namespace vision {
diff --git a/y2020/vision/v4l2_reader.cc b/y2020/vision/v4l2_reader.cc
deleted file mode 100644
index 3f24f1e..0000000
--- a/y2020/vision/v4l2_reader.cc
+++ /dev/null
@@ -1,201 +0,0 @@
-#include "y2020/vision/v4l2_reader.h"
-
-#include <fcntl.h>
-#include <linux/videodev2.h>
-#include <sys/ioctl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-
-DEFINE_bool(ignore_timestamps, false,
- "Don't require timestamps on images. Used to allow webcams");
-
-namespace frc971 {
-namespace vision {
-
-V4L2Reader::V4L2Reader(aos::EventLoop *event_loop,
- const std::string &device_name)
- : fd_(open(device_name.c_str(), O_RDWR | O_NONBLOCK)) {
- PCHECK(fd_.get() != -1);
-
- // First, clean up after anybody else who left the device streaming.
- StreamOff();
-
- {
- struct v4l2_format format;
- memset(&format, 0, sizeof(format));
- format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- format.fmt.pix.width = cols_;
- format.fmt.pix.height = rows_;
- format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
- // This means we want to capture from a progressive (non-interlaced) source.
- format.fmt.pix.field = V4L2_FIELD_NONE;
- PCHECK(Ioctl(VIDIOC_S_FMT, &format) == 0);
- CHECK_EQ(static_cast<int>(format.fmt.pix.width), cols_);
- CHECK_EQ(static_cast<int>(format.fmt.pix.height), rows_);
- CHECK_EQ(static_cast<int>(format.fmt.pix.bytesperline),
- cols_ * 2 /* bytes per pixel */);
- CHECK_EQ(format.fmt.pix.sizeimage, ImageSize());
- }
-
- {
- struct v4l2_requestbuffers request;
- memset(&request, 0, sizeof(request));
- request.count = buffers_.size();
- request.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- request.memory = V4L2_MEMORY_USERPTR;
- PCHECK(Ioctl(VIDIOC_REQBUFS, &request) == 0);
- CHECK_EQ(request.count, buffers_.size())
- << ": Kernel refused to give us the number of buffers we asked for";
- }
-
- for (size_t i = 0; i < buffers_.size(); ++i) {
- buffers_[i].sender = event_loop->MakeSender<CameraImage>("/camera");
- EnqueueBuffer(i);
- }
-
- {
- int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- PCHECK(Ioctl(VIDIOC_STREAMON, &type) == 0);
- }
-}
-
-bool V4L2Reader::ReadLatestImage() {
- // First, enqueue any old buffer we already have. This is the one which may
- // have been sent.
- if (saved_buffer_) {
- EnqueueBuffer(saved_buffer_.index);
- saved_buffer_.Clear();
- }
- while (true) {
- const BufferInfo previous_buffer = saved_buffer_;
- saved_buffer_ = DequeueBuffer();
- if (saved_buffer_) {
- // We got a new buffer. Return the previous one (if relevant) and keep
- // going.
- if (previous_buffer) {
- EnqueueBuffer(previous_buffer.index);
- }
- continue;
- }
- if (!previous_buffer) {
- // There were no images to read. Return an indication of that.
- return false;
- }
- // We didn't get a new one, but we already got one in a previous
- // iteration, which means we found an image so return it.
- saved_buffer_ = previous_buffer;
- buffers_[saved_buffer_.index].PrepareMessage(rows_, cols_, ImageSize(),
- saved_buffer_.monotonic_eof);
- return true;
- }
-}
-
-void V4L2Reader::SendLatestImage() { buffers_[saved_buffer_.index].Send(); }
-
-void V4L2Reader::SetExposure(size_t duration) {
- v4l2_control manual_control;
- manual_control.id = V4L2_CID_EXPOSURE_AUTO;
- manual_control.value = V4L2_EXPOSURE_MANUAL;
- PCHECK(Ioctl(VIDIOC_S_CTRL, &manual_control) == 0);
-
- v4l2_control exposure_control;
- exposure_control.id = V4L2_CID_EXPOSURE_ABSOLUTE;
- exposure_control.value = static_cast<int>(duration); // 100 micro s units
- PCHECK(Ioctl(VIDIOC_S_CTRL, &exposure_control) == 0);
-}
-
-void V4L2Reader::UseAutoExposure() {
- v4l2_control control;
- control.id = V4L2_CID_EXPOSURE_AUTO;
- control.value = V4L2_EXPOSURE_AUTO;
- PCHECK(Ioctl(VIDIOC_S_CTRL, &control) == 0);
-}
-
-void V4L2Reader::Buffer::InitializeMessage(size_t max_image_size) {
- message_offset = flatbuffers::Offset<CameraImage>();
- builder = aos::Sender<CameraImage>::Builder();
- builder = sender.MakeBuilder();
- // The kernel has an undocumented requirement that the buffer is aligned
- // to 64 bytes. If you give it a nonaligned pointer, it will return EINVAL
- // and only print something in dmesg with the relevant dynamic debug
- // prints turned on.
- builder.fbb()->StartIndeterminateVector(max_image_size, 1, 64, &data_pointer);
- CHECK_EQ(reinterpret_cast<uintptr_t>(data_pointer) % 64, 0u)
- << ": Flatbuffers failed to align things as requested";
-}
-
-void V4L2Reader::Buffer::PrepareMessage(
- int rows, int cols, size_t image_size,
- aos::monotonic_clock::time_point monotonic_eof) {
- CHECK(data_pointer != nullptr);
- data_pointer = nullptr;
-
- const auto data_offset = builder.fbb()->EndIndeterminateVector(image_size, 1);
- auto image_builder = builder.MakeBuilder<CameraImage>();
- image_builder.add_data(data_offset);
- image_builder.add_rows(rows);
- image_builder.add_cols(cols);
- image_builder.add_monotonic_timestamp_ns(
- std::chrono::nanoseconds(monotonic_eof.time_since_epoch()).count());
- message_offset = image_builder.Finish();
-}
-
-int V4L2Reader::Ioctl(unsigned long number, void *arg) {
- return ioctl(fd_.get(), number, arg);
-}
-
-V4L2Reader::BufferInfo V4L2Reader::DequeueBuffer() {
- struct v4l2_buffer buffer;
- memset(&buffer, 0, sizeof(buffer));
- buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buffer.memory = V4L2_MEMORY_USERPTR;
- const int result = Ioctl(VIDIOC_DQBUF, &buffer);
- if (result == -1 && errno == EAGAIN) {
- return BufferInfo();
- }
- PCHECK(result == 0) << ": VIDIOC_DQBUF failed";
- CHECK_LT(buffer.index, buffers_.size());
- CHECK_EQ(reinterpret_cast<uintptr_t>(buffers_[buffer.index].data_pointer),
- buffer.m.userptr);
- CHECK_EQ(ImageSize(), buffer.length);
- CHECK(buffer.flags & V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC);
- if (!FLAGS_ignore_timestamps) {
- // Require that we have good timestamp on images
- CHECK_EQ(buffer.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK,
- static_cast<uint32_t>(V4L2_BUF_FLAG_TSTAMP_SRC_EOF));
- }
- return {static_cast<int>(buffer.index),
- aos::time::from_timeval(buffer.timestamp)};
-}
-
-void V4L2Reader::EnqueueBuffer(int buffer_number) {
- CHECK_GE(buffer_number, 0);
- CHECK_LT(buffer_number, static_cast<int>(buffers_.size()));
- buffers_[buffer_number].InitializeMessage(ImageSize());
- struct v4l2_buffer buffer;
- memset(&buffer, 0, sizeof(buffer));
- buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buffer.memory = V4L2_MEMORY_USERPTR;
- buffer.index = buffer_number;
- buffer.m.userptr =
- reinterpret_cast<uintptr_t>(buffers_[buffer_number].data_pointer);
- buffer.length = ImageSize();
- PCHECK(Ioctl(VIDIOC_QBUF, &buffer) == 0);
-}
-
-void V4L2Reader::StreamOff() {
- int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- const int result = Ioctl(VIDIOC_STREAMOFF, &type);
- if (result == 0) {
- return;
- }
- // Some devices (like Alex's webcam) return this if streaming isn't currently
- // on, unlike what the documentations says should happen.
- if (errno == EBUSY) {
- return;
- }
- PLOG(FATAL) << "VIDIOC_STREAMOFF failed";
-}
-
-} // namespace vision
-} // namespace frc971
diff --git a/y2020/vision/v4l2_reader.h b/y2020/vision/v4l2_reader.h
deleted file mode 100644
index b9b3ce4..0000000
--- a/y2020/vision/v4l2_reader.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#ifndef Y2020_VISION_V4L2_READER_H_
-#define Y2020_VISION_V4L2_READER_H_
-
-#include <array>
-#include <string>
-
-#include "absl/types/span.h"
-#include "glog/logging.h"
-
-#include "aos/events/event_loop.h"
-#include "aos/scoped/scoped_fd.h"
-#include "y2020/vision/vision_generated.h"
-
-namespace frc971 {
-namespace vision {
-
-// Reads images from a V4L2 capture device (aka camera).
-class V4L2Reader {
- public:
- // device_name is the name of the device file (like "/dev/video0").
- V4L2Reader(aos::EventLoop *event_loop, const std::string &device_name);
-
- V4L2Reader(const V4L2Reader &) = delete;
- V4L2Reader &operator=(const V4L2Reader &) = delete;
-
- // Reads the latest image.
- //
- // Returns false if no image was available since the last image was read.
- // Call LatestImage() to get a reference to the data, which will be valid
- // until this method is called again.
- bool ReadLatestImage();
-
- // Sends the latest image.
- //
- // ReadLatestImage() must have returned a non-empty span the last time it was
- // called. After calling this, the data which was returned from
- // ReadLatestImage() will no longer be valid.
- void SendLatestImage();
-
- const CameraImage &LatestImage() {
- Buffer *const buffer = &buffers_[saved_buffer_.index];
- return *flatbuffers::GetTemporaryPointer(*buffer->builder.fbb(),
- buffer->message_offset);
- }
-
- // Sets the exposure duration of the camera. duration is the number of 100
- // microsecond units.
- void SetExposure(size_t duration);
-
- // Switches from manual to auto exposure.
- void UseAutoExposure();
-
- private:
- static constexpr int kNumberBuffers = 16;
-
- struct Buffer {
- void InitializeMessage(size_t max_image_size);
-
- void PrepareMessage(int rows, int cols, size_t image_size,
- aos::monotonic_clock::time_point monotonic_eof);
-
- void Send() {
- (void)builder.Send(message_offset);
- message_offset = flatbuffers::Offset<CameraImage>();
- }
-
- absl::Span<const char> DataSpan(size_t image_size) {
- return absl::Span<const char>(
- reinterpret_cast<char *>(CHECK_NOTNULL(data_pointer)), image_size);
- }
-
- aos::Sender<CameraImage> sender;
- aos::Sender<CameraImage>::Builder builder;
- flatbuffers::Offset<CameraImage> message_offset;
-
- uint8_t *data_pointer = nullptr;
- };
-
- struct BufferInfo {
- int index = -1;
- aos::monotonic_clock::time_point monotonic_eof =
- aos::monotonic_clock::min_time;
-
- explicit operator bool() const { return index != -1; }
-
- void Clear() {
- index = -1;
- monotonic_eof = aos::monotonic_clock::min_time;
- }
- };
-
- // TODO(Brian): This concept won't exist once we start using variable-size
- // H.264 frames.
- size_t ImageSize() const { return rows_ * cols_ * 2 /* bytes per pixel */; }
-
- // Attempts to dequeue a buffer (nonblocking). Returns the index of the new
- // buffer, or BufferInfo() if there wasn't a frame to dequeue.
- BufferInfo DequeueBuffer();
-
- void EnqueueBuffer(int buffer);
-
- int Ioctl(unsigned long number, void *arg);
-
- void StreamOff();
-
- // The mmaped V4L2 buffers.
- std::array<Buffer, kNumberBuffers> buffers_;
-
- // If this is non-negative, it's the buffer number we're currently holding
- // onto.
- BufferInfo saved_buffer_;
-
- const int rows_ = 480;
- const int cols_ = 640;
-
- aos::ScopedFD fd_;
-};
-
-} // namespace vision
-} // namespace frc971
-
-#endif // Y2020_VISION_V4L2_READER_H_
diff --git a/y2020/vision/viewer.cc b/y2020/vision/viewer.cc
index 2aff3fb..37ff4a3 100644
--- a/y2020/vision/viewer.cc
+++ b/y2020/vision/viewer.cc
@@ -8,8 +8,9 @@
#include "aos/events/shm_event_loop.h"
#include "aos/init.h"
#include "aos/time/time.h"
+#include "frc971/vision/v4l2_reader.h"
+#include "frc971/vision/vision_generated.h"
#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/vision_generated.h"
DEFINE_string(config, "config.json", "Path to the config file to use.");
DEFINE_bool(show_features, true, "Show the SIFT features that matched.");
diff --git a/y2020/vision/viewer_replay.cc b/y2020/vision/viewer_replay.cc
index 93e531d..a818859 100644
--- a/y2020/vision/viewer_replay.cc
+++ b/y2020/vision/viewer_replay.cc
@@ -6,7 +6,7 @@
#include "aos/events/logging/log_reader.h"
#include "aos/events/simulated_event_loop.h"
#include "aos/init.h"
-#include "y2020/vision/vision_generated.h"
+#include "frc971/vision/vision_generated.h"
DEFINE_string(node, "pi1", "Node name to replay.");
DEFINE_string(image_save_prefix, "/tmp/img",
diff --git a/y2020/vision/vision.fbs b/y2020/vision/vision.fbs
deleted file mode 100644
index e89a181..0000000
--- a/y2020/vision/vision.fbs
+++ /dev/null
@@ -1,22 +0,0 @@
-namespace frc971.vision;
-
-// Contains the image data from one frame of a camera.
-//
-// The following image options are hard-coded. If you add images in a different
-// format, make fields for them which default to these values and remove this
-// comment:
-// * Format: YUYV (V4L2_PIX_FMT_YUYV, which puts 2 pixels in every 4 bytes,
-// with the order Y0,U,Y1,V)
-// * Order: row major (index 0 is upper left, index 1 is to its right)
-table CameraImage {
- // The number of rows in the image.
- rows:int32 (id: 0);
- // The number of columns in the image.
- cols:int32 (id: 1);
- // The image data.
- data:[ubyte] (id: 2);
- // Timestamp when the frame was captured. This is the end-of-frame timestamp.
- monotonic_timestamp_ns:int64 (id: 3);
-}
-
-root_type CameraImage;