Do SIFT and send out the results

Don't yet have the math for calculating poses based on these results.

Change-Id: I6494dbf1d3a7c13db902cf00b7c362a2a956691b
diff --git a/aos/time/time.cc b/aos/time/time.cc
index a0423ea..03fe45b 100644
--- a/aos/time/time.cc
+++ b/aos/time/time.cc
@@ -115,6 +115,12 @@
 struct timespec to_timespec(const ::aos::monotonic_clock::time_point time) {
   return to_timespec(time.time_since_epoch());
 }
+
+::aos::monotonic_clock::time_point from_timeval(struct timeval t) {
+  return monotonic_clock::epoch() + std::chrono::seconds(t.tv_sec) +
+         std::chrono::microseconds(t.tv_usec);
+}
+
 }  // namespace time
 
 constexpr monotonic_clock::time_point monotonic_clock::min_time;
diff --git a/aos/time/time.h b/aos/time/time.h
index 27e4c70..e31de5c 100644
--- a/aos/time/time.h
+++ b/aos/time/time.h
@@ -99,6 +99,9 @@
 // epoch.
 struct timespec to_timespec(::aos::monotonic_clock::time_point time);
 
+// Converts a timeval object to a monotonic_clock::time_point.
+::aos::monotonic_clock::time_point from_timeval(struct timeval t);
+
 namespace time_internal {
 
 template <class T>
diff --git a/aos/time/time_test.cc b/aos/time/time_test.cc
index 8edaf9b..c5eb634 100644
--- a/aos/time/time_test.cc
+++ b/aos/time/time_test.cc
@@ -49,6 +49,21 @@
   EXPECT_EQ(neg_time.tv_nsec, 0);
 }
 
+// Tests from_timeval.
+TEST(TimeTest, TimevalToTimePoint) {
+  struct timeval pos_time;
+  pos_time.tv_sec = 1432423;
+  pos_time.tv_usec = 0;
+  EXPECT_EQ(::aos::monotonic_clock::epoch() + chrono::seconds(1432423),
+            from_timeval(pos_time));
+
+  struct timeval neg_time;
+  neg_time.tv_sec = -1432423;
+  neg_time.tv_usec = 0;
+  EXPECT_EQ(::aos::monotonic_clock::epoch() - chrono::seconds(1432423),
+            from_timeval(neg_time));
+}
+
 // Test that << works with numbers with leading 0's.
 TEST(TimeTest, OperatorStream) {
   const monotonic_clock::time_point t = monotonic_clock::epoch() +
diff --git a/y2020/BUILD b/y2020/BUILD
index c80550b..c32cd10 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -110,6 +110,7 @@
         "//y2019/control_loops/drivetrain:target_selector_fbs",
         "//y2020/control_loops/superstructure:superstructure_status_fbs",
         "//y2020/vision:vision_fbs",
+        "//y2020/vision/sift:sift_fbs",
     ],
     visibility = ["//visibility:public"],
     deps = [
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index fa30531..44e14e1 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -30,10 +30,20 @@
     srcs = [
         "camera_reader.cc",
     ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
     deps = [
         ":v4l2_reader",
+        ":vision_fbs",
         "//aos:init",
         "//aos/events:shm_event_loop",
+        "//third_party:opencv",
+        "//y2020/vision/sift:demo_sift",
+        "//y2020/vision/sift:sift971",
+        "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
     ],
 )
 
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index e5bcb64..de4dfb7 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -1,34 +1,235 @@
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
 #include "aos/events/shm_event_loop.h"
 #include "aos/init.h"
 
+#include "y2020/vision/sift/demo_sift.h"
+#include "y2020/vision/sift/sift971.h"
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
 #include "y2020/vision/v4l2_reader.h"
+#include "y2020/vision/vision_generated.h"
 
 namespace frc971 {
 namespace vision {
 namespace {
 
+class CameraReader {
+ public:
+  CameraReader(aos::EventLoop *event_loop,
+               const sift::TrainingData *training_data, V4L2Reader *reader,
+               cv::FlannBasedMatcher *matcher)
+      : event_loop_(event_loop),
+        training_data_(training_data),
+        reader_(reader),
+        matcher_(matcher),
+        image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
+        result_sender_(
+            event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
+        read_image_timer_(event_loop->AddTimer([this]() {
+          ReadImage();
+          read_image_timer_->Setup(event_loop_->monotonic_now());
+        })) {
+    CopyTrainingFeatures();
+    // Technically we don't need to do this, but doing it now avoids the first
+    // match attempt being slow.
+    matcher_->train();
+
+    event_loop->OnRun(
+        [this]() { read_image_timer_->Setup(event_loop_->monotonic_now()); });
+  }
+
+ private:
+  // Copies the information from training_data_ into matcher_.
+  void CopyTrainingFeatures();
+  // Processes an image (including sending the results).
+  void ProcessImage(const CameraImage &image);
+  // Reads an image, and then performs all of our processing on it.
+  void ReadImage();
+
+  flatbuffers::Offset<
+      flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
+  PackImageMatches(flatbuffers::FlatBufferBuilder *fbb,
+                   const std::vector<std::vector<cv::DMatch>> &matches);
+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+  PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
+               const std::vector<cv::KeyPoint> &keypoints,
+               const cv::Mat &descriptors);
+
+  aos::EventLoop *const event_loop_;
+  const sift::TrainingData *const training_data_;
+  V4L2Reader *const reader_;
+  cv::FlannBasedMatcher *const matcher_;
+  aos::Sender<CameraImage> image_sender_;
+  aos::Sender<sift::ImageMatchResult> result_sender_;
+  // We schedule this immediately to read an image. Having it on a timer means
+  // other things can run on the event loop in between.
+  aos::TimerHandler *const read_image_timer_;
+
+  const std::unique_ptr<frc971::vision::SIFT971_Impl> sift_{
+      new frc971::vision::SIFT971_Impl()};
+};
+
+void CameraReader::CopyTrainingFeatures() {
+  for (const sift::TrainingImage *training_image : *training_data_->images()) {
+    cv::Mat features(training_image->features()->size(), 128, CV_32F);
+    for (size_t i = 0; i <  training_image->features()->size(); ++i) {
+      const sift::Feature *feature_table = training_image->features()->Get(i);
+      const flatbuffers::Vector<float> *const descriptor =
+          feature_table->descriptor();
+      CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
+      cv::Mat(1, descriptor->size(), CV_32F,
+              const_cast<void *>(static_cast<const void *>(descriptor->data())))
+          .copyTo(features(cv::Range(i, i + 1), cv::Range(0, 128)));
+    }
+    matcher_->add(features);
+  }
+}
+
+void CameraReader::ProcessImage(const CameraImage &image) {
+  // First, we need to extract the brightness information. This can't really be
+  // fused into the beginning of the SIFT algorithm because the algorithm needs
+  // to look at the base image directly. It also only takes 2ms on our images.
+  // This is converting from YUYV to a grayscale image.
+  cv::Mat image_mat(
+      image.rows(), image.cols(), CV_8U);
+  CHECK(image_mat.isContinuous());
+  const int number_pixels = image.rows() * image.cols();
+  for (int i = 0; i < number_pixels; ++i) {
+    reinterpret_cast<uint8_t *>(image_mat.data)[i] =
+        image.data()->data()[i * 2];
+  }
+
+  // Next, grab the features from the image.
+  std::vector<cv::KeyPoint> keypoints;
+  cv::Mat descriptors;
+  sift_->detectAndCompute(image_mat, cv::noArray(), keypoints, descriptors);
+
+  // Then, match those features against our training data.
+  std::vector<std::vector<cv::DMatch>> matches;
+  matcher_->knnMatch(/* queryDescriptors */ descriptors, matches, /* k */ 2);
+
+  // Now, pack the results up and send them out.
+  auto builder = result_sender_.MakeBuilder();
+
+  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
+  // TODO(Brian): PackCameraPoses (and put it in the result)
+  const auto features_offset =
+      PackFeatures(builder.fbb(), keypoints, descriptors);
+
+  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
+  result_builder.add_image_matches(image_matches_offset);
+  result_builder.add_features(features_offset);
+  result_builder.add_image_monotonic_timestamp_ns(
+      image.monotonic_timestamp_ns());
+  builder.Send(result_builder.Finish());
+}
+
+void CameraReader::ReadImage() {
+  if (!reader_->ReadLatestImage()) {
+    LOG(INFO) << "No image, sleeping";
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+    return;
+  }
+
+  ProcessImage(reader_->LatestImage());
+
+  reader_->SendLatestImage();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
+CameraReader::PackImageMatches(
+    flatbuffers::FlatBufferBuilder *fbb,
+    const std::vector<std::vector<cv::DMatch>> &matches) {
+  // First, we need to pull out all the matches for each image. Might as well
+  // build up the Match tables at the same time.
+  std::vector<std::vector<flatbuffers::Offset<sift::Match>>> per_image_matches;
+  for (const std::vector<cv::DMatch> &image_matches : matches) {
+    for (const cv::DMatch &image_match : image_matches) {
+      sift::Match::Builder match_builder(*fbb);
+      match_builder.add_query_feature(image_match.queryIdx);
+      match_builder.add_train_feature(image_match.trainIdx);
+      if (per_image_matches.size() <= static_cast<size_t>(image_match.imgIdx)) {
+        per_image_matches.resize(image_match.imgIdx + 1);
+      }
+      per_image_matches[image_match.imgIdx].emplace_back(
+          match_builder.Finish());
+    }
+  }
+
+  // Then, we need to build up each ImageMatch table.
+  std::vector<flatbuffers::Offset<sift::ImageMatch>> image_match_tables;
+  for (size_t i = 0; i < per_image_matches.size(); ++i) {
+    const std::vector<flatbuffers::Offset<sift::Match>> &this_image_matches =
+        per_image_matches[i];
+    if (this_image_matches.empty()) {
+      continue;
+    }
+    const auto vector_offset = fbb->CreateVector(this_image_matches);
+    sift::ImageMatch::Builder image_builder(*fbb);
+    image_builder.add_train_image(i);
+    image_builder.add_matches(vector_offset);
+    image_match_tables.emplace_back(image_builder.Finish());
+  }
+
+  return fbb->CreateVector(image_match_tables);
+}
+
+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+CameraReader::PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
+                           const std::vector<cv::KeyPoint> &keypoints,
+                           const cv::Mat &descriptors) {
+  const int number_features = keypoints.size();
+  CHECK_EQ(descriptors.rows, number_features);
+  std::vector<flatbuffers::Offset<sift::Feature>> features_vector(
+      number_features);
+  for (int i = 0; i < number_features; ++i) {
+    const auto submat = descriptors(cv::Range(i, i + 1), cv::Range(0, 128));
+    CHECK(submat.isContinuous());
+    const auto descriptor_offset =
+        fbb->CreateVector(reinterpret_cast<float *>(submat.data), 128);
+    sift::Feature::Builder feature_builder(*fbb);
+    feature_builder.add_descriptor(descriptor_offset);
+    feature_builder.add_x(keypoints[i].pt.x);
+    feature_builder.add_y(keypoints[i].pt.y);
+    feature_builder.add_size(keypoints[i].size);
+    feature_builder.add_angle(keypoints[i].angle);
+    feature_builder.add_response(keypoints[i].response);
+    feature_builder.add_octave(keypoints[i].octave);
+    CHECK_EQ(-1, keypoints[i].class_id)
+        << ": Not sure what to do with a class id";
+    features_vector[i] = feature_builder.Finish();
+  }
+  return fbb->CreateVector(features_vector);
+}
+
 void CameraReaderMain() {
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
       aos::configuration::ReadConfig("config.json");
 
+  const auto training_data_bfbs = DemoSiftData();
+  const sift::TrainingData *const training_data =
+      flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
+  {
+    flatbuffers::Verifier verifier(
+        reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
+        training_data_bfbs.size());
+    CHECK(training_data->Verify(verifier));
+  }
+
+  const auto index_params = cv::makePtr<cv::flann::IndexParams>();
+  index_params->setAlgorithm(cvflann::FLANN_INDEX_KDTREE);
+  index_params->setInt("trees", 5);
+  const auto search_params =
+      cv::makePtr<cv::flann::SearchParams>(/* checks */ 50);
+  cv::FlannBasedMatcher matcher(index_params, search_params);
+
   aos::ShmEventLoop event_loop(&config.message());
   V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
+  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader, &matcher);
 
-  while (true) {
-    const auto image = v4l2_reader.ReadLatestImage();
-    if (image.empty()) {
-      LOG(INFO) << "No image, sleeping";
-      std::this_thread::sleep_for(std::chrono::milliseconds(10));
-      continue;
-    }
-
-    // Now, process image.
-    // TODO(Brian): Actually process it, rather than just logging its size...
-    LOG(INFO) << image.size();
-    std::this_thread::sleep_for(std::chrono::milliseconds(70));
-
-    v4l2_reader.SendLatestImage();
-  }
+  event_loop.Run();
 }
 
 }  // namespace
diff --git a/y2020/vision/sift/BUILD b/y2020/vision/sift/BUILD
index b4610d7..5cfb6aa 100644
--- a/y2020/vision/sift/BUILD
+++ b/y2020/vision/sift/BUILD
@@ -220,3 +220,40 @@
     includes = [":sift_fbs_includes"],
     visibility = ["//visibility:public"],
 )
+
+py_binary(
+    name = "demo_sift_training",
+    srcs = ["demo_sift_training.py"],
+    default_python_version = "PY3",
+    srcs_version = "PY2AND3",
+    deps = [
+        ":sift_fbs_python",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+    ],
+)
+
+genrule(
+    name = "run_demo_sift_training",
+    srcs = [
+        "images/demo/FRC-Image4-cleaned.png",
+    ],
+    outs = [
+        "demo_sift.h",
+    ],
+    cmd = " ".join([
+        "$(location :demo_sift_training)",
+        "$(location images/demo/FRC-Image4-cleaned.png)",
+        "$(location demo_sift.h)",
+    ]),
+    tools = [
+        ":demo_sift_training",
+    ],
+)
+
+cc_library(
+    name = "demo_sift",
+    hdrs = [
+        "demo_sift.h",
+    ],
+    visibility = ["//visibility:public"],
+)
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
new file mode 100644
index 0000000..a6650fd
--- /dev/null
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python3
+
+import cv2
+import sys
+import flatbuffers
+
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.Feature as Feature
+
+def main():
+  image4_cleaned_path = sys.argv[1]
+  output_path = sys.argv[2]
+
+  image4_cleaned = cv2.imread(image4_cleaned_path)
+
+  image = cv2.cvtColor(image4_cleaned, cv2.COLOR_BGR2GRAY)
+  image = cv2.resize(image, (640, 480))
+  sift = cv2.xfeatures2d.SIFT_create()
+  keypoints, descriptors = sift.detectAndCompute(image, None)
+
+  fbb = flatbuffers.Builder(0)
+
+  features_vector = []
+
+  for keypoint, descriptor in zip(keypoints, descriptors):
+    Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+    for n in reversed(descriptor):
+      fbb.PrependFloat32(n)
+    descriptor_vector = fbb.EndVector(len(descriptor))
+
+    Feature.FeatureStart(fbb)
+
+    Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+    Feature.FeatureAddX(fbb, keypoint.pt[0])
+    Feature.FeatureAddY(fbb, keypoint.pt[1])
+    Feature.FeatureAddSize(fbb, keypoint.size)
+    Feature.FeatureAddAngle(fbb, keypoint.angle)
+    Feature.FeatureAddResponse(fbb, keypoint.response)
+    Feature.FeatureAddOctave(fbb, keypoint.octave)
+
+    features_vector.append(Feature.FeatureEnd(fbb))
+
+  TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
+  for feature in reversed(features_vector):
+    fbb.PrependUOffsetTRelative(feature)
+  features_vector_table = fbb.EndVector(len(features_vector))
+
+  TrainingImage.TrainingImageStart(fbb)
+  TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
+  # TODO(Brian): Fill out the transformation matrices.
+  training_image = TrainingImage.TrainingImageEnd(fbb)
+
+  TrainingData.TrainingDataStartImagesVector(fbb, 1)
+  fbb.PrependUOffsetTRelative(training_image)
+  images = fbb.EndVector(1)
+
+  TrainingData.TrainingDataStart(fbb)
+  TrainingData.TrainingDataAddImages(fbb, images)
+  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+  bfbs = fbb.Output()
+
+  output_prefix = [
+      b'#ifndef Y2020_VISION_SIFT_DEMO_SIFT_H_',
+      b'#define Y2020_VISION_SIFT_DEMO_SIFT_H_',
+      b'#include <string_view>',
+      b'namespace frc971 {',
+      b'namespace vision {',
+      b'inline std::string_view DemoSiftData() {',
+  ]
+  output_suffix = [
+      b'  return std::string_view(kData, sizeof(kData));',
+      b'}',
+      b'}  // namespace vision',
+      b'}  // namespace frc971',
+      b'#endif  // Y2020_VISION_SIFT_DEMO_SIFT_H_',
+  ]
+
+  with open(output_path, 'wb') as output:
+    for line in output_prefix:
+      output.write(line)
+      output.write(b'\n')
+    output.write(b'alignas(64) static constexpr char kData[] = "')
+    for byte in fbb.Output():
+      output.write(b'\\x' + (b'%x' % byte).zfill(2))
+    output.write(b'";\n')
+    for line in output_suffix:
+      output.write(line)
+      output.write(b'\n')
+
+if __name__ == '__main__':
+  main()
diff --git a/y2020/vision/sift/images/demo/FRC-Image4-cleaned.png b/y2020/vision/sift/images/demo/FRC-Image4-cleaned.png
new file mode 100644
index 0000000..9d2f0bf
--- /dev/null
+++ b/y2020/vision/sift/images/demo/FRC-Image4-cleaned.png
Binary files differ
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 84697de..5a1384e 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -82,6 +82,9 @@
 
   // The features for this image.
   features:[Feature];
+
+  // Timestamp when the frame was captured.
+  image_monotonic_timestamp_ns:long;
 }
 
 root_type ImageMatchResult;
diff --git a/y2020/vision/v4l2_reader.cc b/y2020/vision/v4l2_reader.cc
index f43a2ac..f1944c1 100644
--- a/y2020/vision/v4l2_reader.cc
+++ b/y2020/vision/v4l2_reader.cc
@@ -56,63 +56,94 @@
   }
 }
 
-absl::Span<const char> V4L2Reader::ReadLatestImage() {
+bool V4L2Reader::ReadLatestImage() {
   // First, enqueue any old buffer we already have. This is the one which may
   // have been sent.
-  if (saved_buffer_ != -1) {
-    EnqueueBuffer(saved_buffer_);
-    saved_buffer_ = -1;
+  if (saved_buffer_) {
+    EnqueueBuffer(saved_buffer_.index);
+    saved_buffer_.Clear();
   }
   while (true) {
-    const int previous_buffer = saved_buffer_;
+    const BufferInfo previous_buffer = saved_buffer_;
     saved_buffer_ = DequeueBuffer();
-    if (saved_buffer_ != -1) {
+    if (saved_buffer_) {
       // We got a new buffer. Return the previous one (if relevant) and keep
       // going.
-      if (previous_buffer != -1) {
-        EnqueueBuffer(previous_buffer);
+      if (previous_buffer) {
+        EnqueueBuffer(previous_buffer.index);
       }
       continue;
     }
-    if (previous_buffer == -1) {
+    if (!previous_buffer) {
       // There were no images to read. Return an indication of that.
-      return absl::Span<const char>();
+      return false;
     }
     // We didn't get a new one, but we already got one in a previous
     // iteration, which means we found an image so return it.
     saved_buffer_ = previous_buffer;
-    return buffers_[saved_buffer_].DataSpan(ImageSize());
+    buffers_[saved_buffer_.index].PrepareMessage(rows_, cols_, ImageSize(),
+                                                 saved_buffer_.monotonic_eof);
+    return true;
   }
 }
 
-void V4L2Reader::SendLatestImage() {
-  buffers_[saved_buffer_].Send(rows_, cols_, ImageSize());
+void V4L2Reader::SendLatestImage() { buffers_[saved_buffer_.index].Send(); }
+
+void V4L2Reader::Buffer::InitializeMessage(size_t max_image_size) {
+  message_offset = flatbuffers::Offset<CameraImage>();
+  builder = aos::Sender<CameraImage>::Builder();
+  builder = sender.MakeBuilder();
+  // The kernel has an undocumented requirement that the buffer is aligned
+  // to 64 bytes. If you give it a nonaligned pointer, it will return EINVAL
+  // and only print something in dmesg with the relevant dynamic debug
+  // prints turned on.
+  builder.fbb()->StartIndeterminateVector(max_image_size, 1, 64, &data_pointer);
+  CHECK_EQ(reinterpret_cast<uintptr_t>(data_pointer) % 64, 0u)
+      << ": Flatbuffers failed to align things as requested";
+}
+
+void V4L2Reader::Buffer::PrepareMessage(
+    int rows, int cols, size_t image_size,
+    aos::monotonic_clock::time_point monotonic_eof) {
+  CHECK(data_pointer != nullptr);
+  data_pointer = nullptr;
+
+  const auto data_offset = builder.fbb()->EndIndeterminateVector(image_size, 1);
+  auto image_builder = builder.MakeBuilder<CameraImage>();
+  image_builder.add_data(data_offset);
+  image_builder.add_rows(rows);
+  image_builder.add_cols(cols);
+  image_builder.add_monotonic_timestamp_ns(
+      std::chrono::nanoseconds(monotonic_eof.time_since_epoch()).count());
+  message_offset = image_builder.Finish();
 }
 
 int V4L2Reader::Ioctl(unsigned long number, void *arg) {
   return ioctl(fd_.get(), number, arg);
 }
 
-int V4L2Reader::DequeueBuffer() {
+V4L2Reader::BufferInfo V4L2Reader::DequeueBuffer() {
   struct v4l2_buffer buffer;
   memset(&buffer, 0, sizeof(buffer));
   buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
   buffer.memory = V4L2_MEMORY_USERPTR;
   const int result = Ioctl(VIDIOC_DQBUF, &buffer);
   if (result == -1 && errno == EAGAIN) {
-    return -1;
+    return BufferInfo();
   }
   PCHECK(result == 0) << ": VIDIOC_DQBUF failed";
   CHECK_LT(buffer.index, buffers_.size());
-  LOG(INFO) << "dequeued " << buffer.index;
   CHECK_EQ(reinterpret_cast<uintptr_t>(buffers_[buffer.index].data_pointer),
            buffer.m.userptr);
   CHECK_EQ(ImageSize(), buffer.length);
-  return buffer.index;
+  CHECK(buffer.flags & V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC);
+  CHECK_EQ(buffer.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK,
+           static_cast<uint32_t>(V4L2_BUF_FLAG_TSTAMP_SRC_EOF));
+  return {static_cast<int>(buffer.index),
+          aos::time::from_timeval(buffer.timestamp)};
 }
 
 void V4L2Reader::EnqueueBuffer(int buffer_number) {
-  LOG(INFO) << "enqueueing " << buffer_number;
   CHECK_GE(buffer_number, 0);
   CHECK_LT(buffer_number, static_cast<int>(buffers_.size()));
   buffers_[buffer_number].InitializeMessage(ImageSize());
diff --git a/y2020/vision/v4l2_reader.h b/y2020/vision/v4l2_reader.h
index bdf4a8e..3c9d795 100644
--- a/y2020/vision/v4l2_reader.h
+++ b/y2020/vision/v4l2_reader.h
@@ -25,10 +25,10 @@
 
   // Reads the latest image.
   //
-  // Returns an empty span if no image was available since this object was
-  // created. The data referenced in the return value is valid until this method
-  // is called again.
-  absl::Span<const char> ReadLatestImage();
+  // Returns false if no image was available since the last image was read.
+  // Call LatestImage() to get a reference to the data, which will be valid
+  // until this method is called again.
+  bool ReadLatestImage();
 
   // Sends the latest image.
   //
@@ -37,52 +37,58 @@
   // ReadLatestImage() will no longer be valid.
   void SendLatestImage();
 
+  const CameraImage &LatestImage() {
+    Buffer *const buffer = &buffers_[saved_buffer_.index];
+    return *flatbuffers::GetTemporaryPointer(*buffer->builder.fbb(),
+                                             buffer->message_offset);
+  }
+
  private:
   static constexpr int kNumberBuffers = 16;
 
   struct Buffer {
-    void InitializeMessage(size_t max_image_size) {
-      builder = aos::Sender<CameraImage>::Builder();
-      builder = sender.MakeBuilder();
-      // The kernel has an undocumented requirement that the buffer is aligned
-      // to 64 bytes. If you give it a nonaligned pointer, it will return EINVAL
-      // and only print something in dmesg with the relevant dynamic debug
-      // prints turned on.
-      builder.fbb()->StartIndeterminateVector(max_image_size, 1, 64,
-                                              &data_pointer);
-      CHECK_EQ(reinterpret_cast<uintptr_t>(data_pointer) % 64, 0u)
-          << ": Flatbuffers failed to align things as requested";
-    }
+    void InitializeMessage(size_t max_image_size);
 
-    void Send(int rows, int cols, size_t image_size) {
-      const auto data_offset =
-          builder.fbb()->EndIndeterminateVector(image_size, 1);
-      auto image_builder = builder.MakeBuilder<CameraImage>();
-      image_builder.add_data(data_offset);
-      image_builder.add_rows(rows);
-      image_builder.add_cols(cols);
-      builder.Send(image_builder.Finish());
-      data_pointer = nullptr;
+    void PrepareMessage(int rows, int cols, size_t image_size,
+                        aos::monotonic_clock::time_point monotonic_eof);
+
+    void Send() {
+      builder.Send(message_offset);
+      message_offset = flatbuffers::Offset<CameraImage>();
     }
 
     absl::Span<const char> DataSpan(size_t image_size) {
-      return absl::Span<const char>(reinterpret_cast<char *>(data_pointer),
-                                    image_size);
+      return absl::Span<const char>(
+          reinterpret_cast<char *>(CHECK_NOTNULL(data_pointer)), image_size);
     }
 
     aos::Sender<CameraImage> sender;
     aos::Sender<CameraImage>::Builder builder;
+    flatbuffers::Offset<CameraImage> message_offset;
 
     uint8_t *data_pointer = nullptr;
   };
 
+  struct BufferInfo {
+    int index = -1;
+    aos::monotonic_clock::time_point monotonic_eof =
+        aos::monotonic_clock::min_time;
+
+    explicit operator bool() const { return index != -1; }
+
+    void Clear() {
+      index = -1;
+      monotonic_eof = aos::monotonic_clock::min_time;
+    }
+  };
+
   // TODO(Brian): This concept won't exist once we start using variable-size
   // H.264 frames.
   size_t ImageSize() const { return rows_ * cols_ * 2 /* bytes per pixel */; }
 
   // Attempts to dequeue a buffer (nonblocking). Returns the index of the new
-  // buffer, or -1 if there wasn't a frame to dequeue.
-  int DequeueBuffer();
+  // buffer, or BufferInfo() if there wasn't a frame to dequeue.
+  BufferInfo DequeueBuffer();
 
   void EnqueueBuffer(int buffer);
 
@@ -95,7 +101,7 @@
 
   // If this is non-negative, it's the buffer number we're currently holding
   // onto.
-  int saved_buffer_ = -1;
+  BufferInfo saved_buffer_;
 
   const int rows_ = 480;
   const int cols_ = 640;
diff --git a/y2020/vision/vision.fbs b/y2020/vision/vision.fbs
index b8d8bd9..17dc4a4 100644
--- a/y2020/vision/vision.fbs
+++ b/y2020/vision/vision.fbs
@@ -15,9 +15,8 @@
   cols:int;
   // The image data.
   data:[ubyte];
-  // Timestamp when the frame was captured.
+  // Timestamp when the frame was captured. This is the end-of-frame timestamp.
   monotonic_timestamp_ns:long;
-  realtime_timestamp_ns:long;
 }
 
 root_type CameraImage;
diff --git a/y2020/y2020.json b/y2020/y2020.json
index 238d73f..d0c74e4 100644
--- a/y2020/y2020.json
+++ b/y2020/y2020.json
@@ -31,6 +31,12 @@
       "frequency": 25,
       "max_size": 620000,
       "num_senders": 18
+    },
+    {
+      "name": "/camera",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "frequency": 25,
+      "max_size": 300000
     }
   ],
   "applications": [