Add a process which reads frames from the camera

Tested on a pi, and it captures something. Don't have a good way to look
if the data makes any sense or not.

Change-Id: I40c8c4e395fcf468f4381250c7b75a5e4bee0cc4
diff --git a/y2020/BUILD b/y2020/BUILD
index 53ccb8e..58788d4 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -108,6 +108,7 @@
         "//y2020/control_loops/superstructure:superstructure_position_fbs",
         "//y2019/control_loops/drivetrain:target_selector_fbs",
         "//y2020/control_loops/superstructure:superstructure_status_fbs",
+        "//y2020/vision:vision_fbs",
     ],
     visibility = ["//visibility:public"],
     deps = [
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 021a466..2e3723d 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -1,7 +1,38 @@
 load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("//aos:config.bzl", "aos_config")
 
 flatbuffer_cc_library(
     name = "vision_fbs",
     srcs = ["vision.fbs"],
     gen_reflections = 1,
+    visibility = ["//y2020:__subpackages__"],
+)
+
+cc_library(
+    name = "v4l2_reader",
+    srcs = [
+        "v4l2_reader.cc",
+    ],
+    hdrs = [
+        "v4l2_reader.h",
+    ],
+    deps = [
+        ":vision_fbs",
+        "//aos/events:event_loop",
+        "//aos/scoped:scoped_fd",
+        "@com_github_google_glog//:glog",
+        "@com_google_absl//absl/base",
+    ],
+)
+
+cc_binary(
+    name = "camera_reader",
+    srcs = [
+        "camera_reader.cc",
+    ],
+    deps = [
+        ":v4l2_reader",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+    ],
 )
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
new file mode 100644
index 0000000..e5bcb64
--- /dev/null
+++ b/y2020/vision/camera_reader.cc
@@ -0,0 +1,42 @@
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+
+#include "y2020/vision/v4l2_reader.h"
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+void CameraReaderMain() {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig("config.json");
+
+  aos::ShmEventLoop event_loop(&config.message());
+  V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
+
+  while (true) {
+    const auto image = v4l2_reader.ReadLatestImage();
+    if (image.empty()) {
+      LOG(INFO) << "No image, sleeping";
+      std::this_thread::sleep_for(std::chrono::milliseconds(10));
+      continue;
+    }
+
+    // Now, process image.
+    // TODO(Brian): Actually process it, rather than just logging its size...
+    LOG(INFO) << image.size();
+    std::this_thread::sleep_for(std::chrono::milliseconds(70));
+
+    v4l2_reader.SendLatestImage();
+  }
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
+
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  frc971::vision::CameraReaderMain();
+}
diff --git a/y2020/vision/v4l2_reader.cc b/y2020/vision/v4l2_reader.cc
new file mode 100644
index 0000000..727f8ba
--- /dev/null
+++ b/y2020/vision/v4l2_reader.cc
@@ -0,0 +1,134 @@
+#include "y2020/vision/v4l2_reader.h"
+
+#include <fcntl.h>
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+namespace frc971 {
+namespace vision {
+
+V4L2Reader::V4L2Reader(aos::EventLoop *event_loop,
+                       const std::string &device_name)
+    : fd_(open(device_name.c_str(), O_RDWR | O_NONBLOCK)) {
+  PCHECK(fd_.get() != -1);
+
+  // First, clean up after anybody else who left the device streaming.
+  {
+    int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    PCHECK(Ioctl(VIDIOC_STREAMOFF, &type) == 0);
+  }
+
+  {
+    struct v4l2_format format;
+    memset(&format, 0, sizeof(format));
+    format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    format.fmt.pix.width = cols_;
+    format.fmt.pix.height = rows_;
+    format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+    // This means we want to capture from a progressive (non-interlaced) source.
+    format.fmt.pix.field = V4L2_FIELD_NONE;
+    PCHECK(Ioctl(VIDIOC_S_FMT, &format) == 0);
+    CHECK_EQ(static_cast<int>(format.fmt.pix.width), cols_);
+    CHECK_EQ(static_cast<int>(format.fmt.pix.height), rows_);
+    CHECK_EQ(static_cast<int>(format.fmt.pix.bytesperline),
+             cols_ * 2 /* bytes per pixel */);
+    CHECK_EQ(format.fmt.pix.sizeimage, ImageSize());
+  }
+
+  {
+    struct v4l2_requestbuffers request;
+    memset(&request, 0, sizeof(request));
+    request.count = buffers_.size();
+    request.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    request.memory = V4L2_MEMORY_USERPTR;
+    PCHECK(Ioctl(VIDIOC_REQBUFS, &request) == 0);
+    CHECK_EQ(request.count, buffers_.size())
+        << ": Kernel refused to give us the number of buffers we asked for";
+  }
+
+  for (size_t i = 0; i < buffers_.size(); ++i) {
+    buffers_[i].sender = event_loop->MakeSender<CameraImage>("/camera");
+    EnqueueBuffer(i);
+  }
+
+  {
+    int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+    PCHECK(Ioctl(VIDIOC_STREAMON, &type) == 0);
+  }
+}
+
+absl::Span<const char> V4L2Reader::ReadLatestImage() {
+  // First, enqueue any old buffer we already have. This is the one which may
+  // have been sent.
+  if (saved_buffer_ != -1) {
+    EnqueueBuffer(saved_buffer_);
+    saved_buffer_ = -1;
+  }
+  while (true) {
+    const int previous_buffer = saved_buffer_;
+    saved_buffer_ = DequeueBuffer();
+    if (saved_buffer_ != -1) {
+      // We got a new buffer. Return the previous one (if relevant) and keep
+      // going.
+      if (previous_buffer != -1) {
+        EnqueueBuffer(previous_buffer);
+      }
+      continue;
+    }
+    if (previous_buffer == -1) {
+      // There were no images to read. Return an indication of that.
+      return absl::Span<const char>();
+    }
+    // We didn't get a new one, but we already got one in a previous
+    // iteration, which means we found an image so return it.
+    saved_buffer_ = previous_buffer;
+    return buffers_[saved_buffer_].DataSpan(ImageSize());
+  }
+}
+
+void V4L2Reader::SendLatestImage() {
+  buffers_[saved_buffer_].Send(rows_, cols_, ImageSize());
+}
+
+int V4L2Reader::Ioctl(unsigned long number, void *arg) {
+  return ioctl(fd_.get(), number, arg);
+}
+
+int V4L2Reader::DequeueBuffer() {
+  struct v4l2_buffer buffer;
+  memset(&buffer, 0, sizeof(buffer));
+  buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  buffer.memory = V4L2_MEMORY_USERPTR;
+  const int result = Ioctl(VIDIOC_DQBUF, &buffer);
+  if (result == -1 && errno == EAGAIN) {
+    return -1;
+  }
+  PCHECK(result == 0) << ": VIDIOC_DQBUF failed";
+  CHECK_LT(buffer.index, buffers_.size());
+  LOG(INFO) << "dequeued " << buffer.index;
+  CHECK_EQ(reinterpret_cast<uintptr_t>(buffers_[buffer.index].data_pointer),
+           buffer.m.userptr);
+  CHECK_EQ(ImageSize(), buffer.length);
+  return buffer.index;
+}
+
+void V4L2Reader::EnqueueBuffer(int buffer_number) {
+  LOG(INFO) << "enqueueing " << buffer_number;
+  CHECK_GE(buffer_number, 0);
+  CHECK_LT(buffer_number, static_cast<int>(buffers_.size()));
+  buffers_[buffer_number].InitializeMessage(ImageSize());
+  struct v4l2_buffer buffer;
+  memset(&buffer, 0, sizeof(buffer));
+  buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+  buffer.memory = V4L2_MEMORY_USERPTR;
+  buffer.index = buffer_number;
+  buffer.m.userptr =
+      reinterpret_cast<uintptr_t>(buffers_[buffer_number].data_pointer);
+  buffer.length = ImageSize();
+  PCHECK(Ioctl(VIDIOC_QBUF, &buffer) == 0);
+}
+
+}  // namespace vision
+}  // namespace frc971
diff --git a/y2020/vision/v4l2_reader.h b/y2020/vision/v4l2_reader.h
new file mode 100644
index 0000000..969f4a8
--- /dev/null
+++ b/y2020/vision/v4l2_reader.h
@@ -0,0 +1,107 @@
+#ifndef Y2020_VISION_V4L2_READER_H_
+#define Y2020_VISION_V4L2_READER_H_
+
+#include <array>
+#include <string>
+
+#include "absl/types/span.h"
+#include "glog/logging.h"
+
+#include "aos/events/event_loop.h"
+#include "aos/scoped/scoped_fd.h"
+#include "y2020/vision/vision_generated.h"
+
+namespace frc971 {
+namespace vision {
+
+// Reads images from a V4L2 capture device (aka camera).
+class V4L2Reader {
+ public:
+  // device_name is the name of the device file (like "/dev/video0").
+  V4L2Reader(aos::EventLoop *event_loop, const std::string &device_name);
+
+  V4L2Reader(const V4L2Reader &) = delete;
+  V4L2Reader &operator=(const V4L2Reader &) = delete;
+
+  // Reads the latest image.
+  //
+  // Returns an empty span if no image was available since this object was
+  // created. The data referenced in the return value is valid until this method
+  // is called again.
+  absl::Span<const char> ReadLatestImage();
+
+  // Sends the latest image.
+  //
+  // ReadLatestImage() must have returned a non-empty span the last time it was
+  // called. After calling this, the data which was returned from
+  // ReadLatestImage() will no longer be valid.
+  void SendLatestImage();
+
+ private:
+  static constexpr int kNumberBuffers = 16;
+
+  struct Buffer {
+    void InitializeMessage(size_t max_image_size) {
+      builder = aos::Sender<CameraImage>::Builder();
+      builder = sender.MakeBuilder();
+      // The kernel has an undocumented requirement that the buffer is aligned
+      // to 64 bytes. If you give it a nonaligned pointer, it will return EINVAL
+      // and only print something in dmesg with the relevant dynamic debug
+      // prints turned on.
+      builder.fbb()->StartIndeterminateVector(max_image_size, 1, 64,
+                                              &data_pointer);
+      CHECK_EQ(reinterpret_cast<uintptr_t>(data_pointer) % 64, 0u)
+          << ": Flatbuffers failed to align things as requested";
+    }
+
+    void Send(int rows, int cols, size_t image_size) {
+      const auto data_offset =
+          builder.fbb()->EndIndeterminateVector(image_size, 1);
+      auto image_builder = builder.MakeBuilder<CameraImage>();
+      image_builder.add_data(data_offset);
+      image_builder.add_rows(rows);
+      image_builder.add_cols(cols);
+      builder.Send(image_builder.Finish());
+      data_pointer = nullptr;
+    }
+
+    absl::Span<const char> DataSpan(size_t image_size) {
+      return absl::Span<const char>(reinterpret_cast<char *>(data_pointer),
+                                    image_size);
+    }
+
+    aos::Sender<CameraImage> sender;
+    aos::Sender<CameraImage>::Builder builder;
+
+    uint8_t *data_pointer = nullptr;
+  };
+
+  // TODO(Brian): This concept won't exist once we start using variable-size
+  // H.264 frames.
+  size_t ImageSize() const { return rows_ * cols_ * 2 /* bytes per pixel */; }
+
+  // Attempts to dequeue a buffer (nonblocking). Returns the index of the new
+  // buffer, or -1 if there wasn't a frame to dequeue.
+  int DequeueBuffer();
+
+  void EnqueueBuffer(int buffer);
+
+  int Ioctl(unsigned long number, void *arg);
+
+  // The mmaped V4L2 buffers.
+  std::array<Buffer, kNumberBuffers> buffers_;
+
+  // If this is non-negative, it's the buffer number we're currently holding
+  // onto.
+  int saved_buffer_ = -1;
+
+  const int rows_ = 480;
+  const int cols_ = 640;
+
+  aos::ScopedFD fd_;
+};
+
+}  // namespace vision
+}  // namespace frc971
+
+#endif  // Y2020_VISION_V4L2_READER_H_
diff --git a/y2020/y2020.json b/y2020/y2020.json
index 7680ef5..238d73f 100644
--- a/y2020/y2020.json
+++ b/y2020/y2020.json
@@ -24,11 +24,21 @@
       "name": "/superstructure",
       "type": "y2020.control_loops.superstructure.Position",
       "frequency": 200
+    },
+    {
+      "name": "/camera",
+      "type": "frc971.vision.CameraImage",
+      "frequency": 25,
+      "max_size": 620000,
+      "num_senders": 18
     }
   ],
   "applications": [
     {
       "name": "drivetrain"
+    },
+    {
+      "name": "camera_reader"
     }
   ],
   "imports": [