Merge "Undistort april detection corners"
diff --git a/aos/events/logging/buffer_encoder.cc b/aos/events/logging/buffer_encoder.cc
index 4b3ccf3..5c1d4b2 100644
--- a/aos/events/logging/buffer_encoder.cc
+++ b/aos/events/logging/buffer_encoder.cc
@@ -9,34 +9,32 @@
 
 namespace aos::logger {
 
-DummyEncoder::DummyEncoder(size_t max_buffer_size) {
-  // TODO(austin): This is going to end up writing > 128k chunks, not 128k
-  // chunks exactly.  If we really want to, we could make it always write 128k
-  // chunks by only exposing n * 128k chunks as we go.  This might improve write
-  // performance, then again, it might have no effect if the kernel is combining
-  // writes...
-  constexpr size_t kWritePageSize = 128 * 1024;
+DummyEncoder::DummyEncoder(size_t /*max_message_size*/, size_t buffer_size) {
   // Round up to the nearest page size.
-  input_buffer_.reserve(
-      ((max_buffer_size + kWritePageSize - 1) / kWritePageSize) *
-      kWritePageSize);
+  input_buffer_.reserve(buffer_size);
   return_queue_.resize(1);
 }
 
-bool DummyEncoder::HasSpace(size_t request) const {
-  return request + input_buffer_.size() < input_buffer_.capacity();
+size_t DummyEncoder::space() const {
+  return input_buffer_.capacity() - input_buffer_.size();
 }
 
-void DummyEncoder::Encode(Copier *copy) {
-  DCHECK(HasSpace(copy->size()));
+bool DummyEncoder::HasSpace(size_t request) const { return request <= space(); }
+
+size_t DummyEncoder::Encode(Copier *copy, size_t start_byte) {
   const size_t input_buffer_initial_size = input_buffer_.size();
 
-  input_buffer_.resize(input_buffer_initial_size + copy->size());
-  const size_t written_size = copy->Copy(
-      input_buffer_.data() + input_buffer_initial_size, 0, copy->size());
-  DCHECK_EQ(written_size, copy->size());
+  size_t expected_write_size =
+      std::min(input_buffer_.capacity() - input_buffer_initial_size,
+               copy->size() - start_byte);
+  input_buffer_.resize(input_buffer_initial_size + expected_write_size);
+  const size_t written_size =
+      copy->Copy(input_buffer_.data() + input_buffer_initial_size, start_byte,
+                 expected_write_size + start_byte);
 
   total_bytes_ += written_size;
+
+  return written_size;
 }
 
 void DummyEncoder::Clear(const int n) {
diff --git a/aos/events/logging/buffer_encoder.h b/aos/events/logging/buffer_encoder.h
index f4a6251..ed5bef6 100644
--- a/aos/events/logging/buffer_encoder.h
+++ b/aos/events/logging/buffer_encoder.h
@@ -30,7 +30,7 @@
     size_t size_;
   };
 
-  // Coppies a span.  The span must have a longer lifetime than the coppier is
+  // Copies a span.  The span must have a longer lifetime than the coppier is
   // being used.
   class SpanCopier : public Copier {
    public:
@@ -55,12 +55,13 @@
   // the output needs to be flushed.
   virtual bool HasSpace(size_t request) const = 0;
 
-  // Encodes and enqueues the given data encoder.
-  virtual void Encode(Copier *copy) = 0;
+  // Returns the space available.
+  virtual size_t space() const = 0;
 
-  // If this returns true, the encoder may be bypassed by writing directly to
-  // the file.
-  virtual bool may_bypass() const { return false; }
+  // Encodes and enqueues the given data encoder.  Starts at the start byte
+  // (which must be a multiple of 8 bytes), and goes as far as it can.  Returns
+  // the amount encoded.
+  virtual size_t Encode(Copier *copy, size_t start_byte) = 0;
 
   // Finalizes the encoding process. After this, queue_size() represents the
   // full extent of data which will be written to this file.
@@ -90,7 +91,7 @@
 // and queues it up as is.
 class DummyEncoder final : public DataEncoder {
  public:
-  DummyEncoder(size_t max_buffer_size);
+  DummyEncoder(size_t max_message_size, size_t buffer_size = 128 * 1024);
   DummyEncoder(const DummyEncoder &) = delete;
   DummyEncoder(DummyEncoder &&other) = delete;
   DummyEncoder &operator=(const DummyEncoder &) = delete;
@@ -98,8 +99,8 @@
   ~DummyEncoder() override = default;
 
   bool HasSpace(size_t request) const final;
-  void Encode(Copier *copy) final;
-  bool may_bypass() const final { return true; }
+  size_t space() const final;
+  size_t Encode(Copier *copy, size_t start_byte) final;
   void Finish() final {}
   void Clear(int n) final;
   absl::Span<const absl::Span<const uint8_t>> queue() final;
diff --git a/aos/events/logging/buffer_encoder_param_test.h b/aos/events/logging/buffer_encoder_param_test.h
index 9e7882d..6085779 100644
--- a/aos/events/logging/buffer_encoder_param_test.h
+++ b/aos/events/logging/buffer_encoder_param_test.h
@@ -45,7 +45,7 @@
           << encoder->queued_bytes() << ", encoding " << buffer.size();
 
       DetachedBufferCopier coppier(std::move(buffer));
-      encoder->Encode(&coppier);
+      encoder->Encode(&coppier, 0);
     }
     return result;
   }
diff --git a/aos/events/logging/buffer_encoder_test.cc b/aos/events/logging/buffer_encoder_test.cc
index 5f3ecab..d5e2e5d 100644
--- a/aos/events/logging/buffer_encoder_test.cc
+++ b/aos/events/logging/buffer_encoder_test.cc
@@ -13,9 +13,12 @@
 
 class DummyEncoderTest : public BufferEncoderBaseTest {};
 
+constexpr size_t kEncoderBufferSize = 4 * 1024 * 1024;
+
 // Tests that buffers are concatenated without being modified.
 TEST_F(DummyEncoderTest, QueuesBuffersAsIs) {
-  DummyEncoder encoder(BufferEncoderBaseTest::kMaxMessageSize);
+  DummyEncoder encoder(BufferEncoderBaseTest::kMaxMessageSize,
+                       kEncoderBufferSize);
   const auto expected = CreateAndEncode(100, &encoder);
   std::vector<uint8_t> data = Flatten(expected);
 
@@ -26,7 +29,8 @@
 
 // Tests that buffers are concatenated without being modified.
 TEST_F(DummyEncoderTest, CoppiesBuffersAsIs) {
-  DummyEncoder encoder(BufferEncoderBaseTest::kMaxMessageSize);
+  DummyEncoder encoder(BufferEncoderBaseTest::kMaxMessageSize,
+                       kEncoderBufferSize);
   const auto expected = CreateAndEncode(100, &encoder);
   std::vector<uint8_t> data = Flatten(expected);
 
@@ -108,7 +112,8 @@
 INSTANTIATE_TEST_SUITE_P(
     Dummy, BufferEncoderTest,
     ::testing::Combine(::testing::Values([](size_t max_buffer_size) {
-                         return std::make_unique<DummyEncoder>(max_buffer_size);
+                         return std::make_unique<DummyEncoder>(
+                             max_buffer_size, kEncoderBufferSize);
                        }),
                        ::testing::Values([](std::string_view filename) {
                          return std::make_unique<DummyDecoder>(filename);
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index c0c7c73..86d813f 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -14,6 +14,8 @@
 #include "flatbuffers/flatbuffers.h"
 #include "glog/logging.h"
 
+DECLARE_int32(flush_size);
+
 namespace aos {
 namespace logger {
 
@@ -568,7 +570,12 @@
                                      EventLoop *event_loop, const Node *node)
     : LogNamer(configuration, event_loop, node),
       base_name_(base_name),
-      old_base_name_() {}
+      old_base_name_(),
+      encoder_factory_([](size_t max_message_size) {
+        // TODO(austin): For slow channels, can we allocate less memory?
+        return std::make_unique<DummyEncoder>(max_message_size,
+                                              FLAGS_flush_size);
+      }) {}
 
 MultiNodeLogNamer::~MultiNodeLogNamer() {
   if (!ran_out_of_space_) {
diff --git a/aos/events/logging/log_namer.h b/aos/events/logging/log_namer.h
index 4e8c990..07edeac 100644
--- a/aos/events/logging/log_namer.h
+++ b/aos/events/logging/log_namer.h
@@ -491,10 +491,7 @@
   std::vector<std::string> all_filenames_;
 
   std::string temp_suffix_;
-  std::function<std::unique_ptr<DataEncoder>(size_t)> encoder_factory_ =
-      [](size_t max_message_size) {
-        return std::make_unique<DummyEncoder>(max_message_size);
-      };
+  std::function<std::unique_ptr<DataEncoder>(size_t)> encoder_factory_;
   std::string extension_;
 
   // Storage for statistics from previously-rotated DetachedBufferWriters.
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 0918545..02a9fdd 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -64,6 +64,10 @@
             "corrupt message found by MessageReader be silently ignored, "
             "providing access to all uncorrupted messages in a logfile.");
 
+DEFINE_bool(direct, false,
+            "If true, write using O_DIRECT and write 512 byte aligned blocks "
+            "whenever possible.");
+
 namespace aos::logger {
 namespace {
 
@@ -81,7 +85,10 @@
 
 DetachedBufferWriter::DetachedBufferWriter(std::string_view filename,
                                            std::unique_ptr<DataEncoder> encoder)
-    : filename_(filename), encoder_(std::move(encoder)) {
+    : filename_(filename),
+      encoder_(std::move(encoder)),
+      supports_odirect_(FLAGS_direct) {
+  iovec_.reserve(10);
   if (!util::MkdirPIfSpace(filename, 0777)) {
     ran_out_of_space_ = true;
   } else {
@@ -92,10 +99,38 @@
       PCHECK(fd_ != -1) << ": Failed to open " << this->filename()
                         << " for writing";
       VLOG(1) << "Opened " << this->filename() << " for writing";
+
+      flags_ = fcntl(fd_, F_GETFL, 0);
+      PCHECK(flags_ >= 0) << ": Failed to get flags for " << this->filename();
+
+      EnableDirect();
     }
   }
 }
 
+void DetachedBufferWriter::EnableDirect() {
+  if (supports_odirect_ && !ODirectEnabled()) {
+    const int new_flags = flags_ | O_DIRECT;
+    // Track if we failed to set O_DIRECT.  Note: Austin hasn't seen this call
+    // fail.  The write call tends to fail instead.
+    if (fcntl(fd_, F_SETFL, new_flags) == -1) {
+      PLOG(WARNING) << "Failed to set O_DIRECT on " << filename();
+      supports_odirect_ = false;
+    } else {
+      VLOG(1) << "Enabled O_DIRECT on " << filename();
+      flags_ = new_flags;
+    }
+  }
+}
+
+void DetachedBufferWriter::DisableDirect() {
+  if (supports_odirect_ && ODirectEnabled()) {
+    flags_ = flags_ & (~O_DIRECT);
+    PCHECK(fcntl(fd_, F_SETFL, flags_) != -1) << ": Failed to disable O_DIRECT";
+    VLOG(1) << "Disabled O_DIRECT on " << filename();
+  }
+}
+
 DetachedBufferWriter::~DetachedBufferWriter() {
   Close();
   if (ran_out_of_space_) {
@@ -126,10 +161,14 @@
   std::swap(total_write_count_, other.total_write_count_);
   std::swap(total_write_messages_, other.total_write_messages_);
   std::swap(total_write_bytes_, other.total_write_bytes_);
+  std::swap(last_synced_bytes_, other.last_synced_bytes_);
+  std::swap(supports_odirect_, other.supports_odirect_);
+  std::swap(flags_, other.flags_);
+  std::swap(last_flush_time_, other.last_flush_time_);
   return *this;
 }
 
-void DetachedBufferWriter::CopyMessage(DataEncoder::Copier *coppier,
+void DetachedBufferWriter::CopyMessage(DataEncoder::Copier *copier,
                                        aos::monotonic_clock::time_point now) {
   if (ran_out_of_space_) {
     // We don't want any later data to be written after space becomes
@@ -138,12 +177,23 @@
     return;
   }
 
-  if (!encoder_->HasSpace(coppier->size())) {
-    Flush();
-    CHECK(encoder_->HasSpace(coppier->size()));
-  }
+  const size_t message_size = copier->size();
+  size_t overall_bytes_written = 0;
 
-  encoder_->Encode(coppier);
+  // Keep writing chunks until we've written it all.  If we end up with a
+  // partial write, this means we need to flush to disk.
+  do {
+    const size_t bytes_written = encoder_->Encode(copier, overall_bytes_written);
+    CHECK(bytes_written != 0);
+
+    overall_bytes_written += bytes_written;
+    if (overall_bytes_written < message_size) {
+      VLOG(1) << "Flushing because of a partial write, tried to write "
+              << message_size << " wrote " << overall_bytes_written;
+      Flush(now);
+    }
+  } while (overall_bytes_written < message_size);
+
   FlushAtThreshold(now);
 }
 
@@ -153,7 +203,7 @@
   }
   encoder_->Finish();
   while (encoder_->queue_size() > 0) {
-    Flush();
+    Flush(monotonic_clock::max_time);
   }
   if (close(fd_) == -1) {
     if (errno == ENOSPC) {
@@ -166,7 +216,8 @@
   VLOG(1) << "Closed " << filename();
 }
 
-void DetachedBufferWriter::Flush() {
+void DetachedBufferWriter::Flush(aos::monotonic_clock::time_point now) {
+  last_flush_time_ = now;
   if (ran_out_of_space_) {
     // We don't want any later data to be written after space becomes available,
     // so refuse to write anything more once we've dropped data because we ran
@@ -186,23 +237,110 @@
   }
 
   iovec_.clear();
-  const size_t iovec_size = std::min<size_t>(queue.size(), IOV_MAX);
+  size_t iovec_size = std::min<size_t>(queue.size(), IOV_MAX);
   iovec_.resize(iovec_size);
   size_t counted_size = 0;
+
+  // Ok, we now need to figure out if we were aligned, and if we were, how much
+  // of the data we are being asked to write is aligned.
+  //
+  // The file is aligned if it is a multiple of kSector in length.  The data is
+  // aligned if it's memory is kSector aligned, and the length is a multiple of
+  // kSector in length.
+  bool aligned = (total_write_bytes_ % kSector) == 0;
+  size_t write_index = 0;
   for (size_t i = 0; i < iovec_size; ++i) {
-    iovec_[i].iov_base = const_cast<uint8_t *>(queue[i].data());
-    iovec_[i].iov_len = queue[i].size();
-    counted_size += iovec_[i].iov_len;
+    iovec_[write_index].iov_base = const_cast<uint8_t *>(queue[i].data());
+
+    // Make sure the address is aligned, or give up.  This should be uncommon,
+    // but is always possible.
+    if ((reinterpret_cast<size_t>(iovec_[write_index].iov_base) &
+         (kSector - 1)) != 0) {
+      aligned = false;
+    }
+
+    // Now, see if the length is a multiple of kSector.  The goal is to figure
+    // out if/how much memory we can write out with O_DIRECT so that only the
+    // last little bit is done with non-direct IO to keep it fast.
+    iovec_[write_index].iov_len = queue[i].size();
+    if ((iovec_[write_index].iov_len % kSector) != 0) {
+      VLOG(1) << "Unaligned length on " << filename();
+      // If we've got over a sector of data to write, write it out with O_DIRECT
+      // and then continue writing the rest unaligned.
+      if (aligned && iovec_[write_index].iov_len > kSector) {
+        const size_t aligned_size =
+            iovec_[write_index].iov_len & (~(kSector - 1));
+        VLOG(1) << "Was aligned, writing last chunk rounded from "
+                << queue[i].size() << " to " << aligned_size;
+        iovec_[write_index].iov_len = aligned_size;
+
+        WriteV(iovec_.data(), i + 1, true, counted_size + aligned_size);
+
+        // Now, everything before here has been written.  Make an iovec out of
+        // the last bytes, and keep going.
+        iovec_size -= write_index;
+        iovec_.resize(iovec_size);
+        write_index = 0;
+        counted_size = 0;
+
+        iovec_[write_index].iov_base =
+            const_cast<uint8_t *>(queue[i].data() + aligned_size);
+        iovec_[write_index].iov_len = queue[i].size() - aligned_size;
+      }
+      aligned = false;
+    }
+    VLOG(1) << "Writing " << iovec_[write_index].iov_len << " to "
+            << filename();
+    counted_size += iovec_[write_index].iov_len;
+    ++write_index;
+  }
+
+  // Either write the aligned data if it is all aligned, or write the rest
+  // unaligned if we wrote aligned up above.
+  WriteV(iovec_.data(), iovec_.size(), aligned, counted_size);
+
+  encoder_->Clear(iovec_size);
+}
+
+size_t DetachedBufferWriter::WriteV(struct iovec *iovec_data, size_t iovec_size,
+                                    bool aligned, size_t counted_size) {
+  // Configure the file descriptor to match the mode we should be in.  This is
+  // safe to over-call since it only does the syscall if needed.
+  if (aligned) {
+    EnableDirect();
+  } else {
+    DisableDirect();
   }
 
   const auto start = aos::monotonic_clock::now();
-  const ssize_t written = writev(fd_, iovec_.data(), iovec_.size());
+  const ssize_t written = writev(fd_, iovec_data, iovec_size);
+
+  if (written > 0) {
+    // Flush asynchronously and force the data out of the cache.
+    sync_file_range(fd_, total_write_bytes_, written, SYNC_FILE_RANGE_WRITE);
+    if (last_synced_bytes_ != 0) {
+      // Per Linus' recommendation online on how to do fast file IO, do a
+      // blocking flush of the previous write chunk, and then tell the kernel to
+      // drop the pages from the cache.  This makes sure we can't get too far
+      // ahead.
+      sync_file_range(fd_, last_synced_bytes_,
+                      total_write_bytes_ - last_synced_bytes_,
+                      SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE |
+                          SYNC_FILE_RANGE_WAIT_AFTER);
+      posix_fadvise(fd_, last_synced_bytes_,
+                    total_write_bytes_ - last_synced_bytes_,
+                    POSIX_FADV_DONTNEED);
+
+      last_synced_bytes_ = total_write_bytes_;
+    }
+  }
+
   const auto end = aos::monotonic_clock::now();
   HandleWriteReturn(written, counted_size);
 
-  encoder_->Clear(iovec_size);
-
   UpdateStatsForWrite(end - start, written, iovec_size);
+
+  return written;
 }
 
 void DetachedBufferWriter::HandleWriteReturn(ssize_t write_return,
@@ -211,7 +349,7 @@
     ran_out_of_space_ = true;
     return;
   }
-  PCHECK(write_return >= 0) << ": write failed";
+  PCHECK(write_return >= 0) << ": write failed, got " << write_return;
   if (write_return < static_cast<ssize_t>(write_size)) {
     // Sometimes this happens instead of ENOSPC. On a real filesystem, this
     // never seems to happen in any other case. If we ever want to log to a
@@ -260,13 +398,14 @@
   // Flush if we are at the max number of iovs per writev, because there's no
   // point queueing up any more data in memory. Also flush once we have enough
   // data queued up or if it has been long enough.
-  while (encoder_->queued_bytes() > static_cast<size_t>(FLAGS_flush_size) ||
+  while (encoder_->space() == 0 ||
+         encoder_->queued_bytes() > static_cast<size_t>(FLAGS_flush_size) ||
          encoder_->queue_size() >= IOV_MAX ||
          now > last_flush_time_ +
                    chrono::duration_cast<chrono::nanoseconds>(
                        chrono::duration<double>(FLAGS_flush_period))) {
-    last_flush_time_ = now;
-    Flush();
+    VLOG(1) << "Chose to flush at " << now << ", last " << last_flush_time_;
+    Flush(now);
   }
 }
 
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index 4e38feb..50f6b40 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -44,11 +44,30 @@
 
 // This class manages efficiently writing a sequence of detached buffers to a
 // file.  It encodes them, queues them up, and batches the write operation.
+//
+// There are a couple over-arching constraints on writing to keep track of.
+//  1) The kernel is both faster and more efficient at writing large, aligned
+//     chunks with O_DIRECT set on the file.  The alignment needed is specified
+//     by kSector and is file system dependent.
+//  2) Not all encoders support generating round multiples of kSector of data.
+//     Rather than burden the API for detecting when that is the case, we want
+//     DetachedBufferWriter to be as efficient as it can at writing what given.
+//  3) Some files are small and not updated frequently.  They need to be
+//     flushed or we will lose data on power off.  It is most efficient to write
+//     as much as we can aligned by kSector and then fall back to the non direct
+//     method when it has been flushed.
+//  4) Not all filesystems support O_DIRECT, and different sizes may be optimal
+//     for different machines.  The defaults should work decently anywhere and
+//     be tuneable for faster systems.
 class DetachedBufferWriter {
  public:
   // Marker struct for one of our constructor overloads.
   struct already_out_of_space_t {};
 
+  // Size of an aligned sector used to detect when the data is aligned enough to
+  // use O_DIRECT instead.
+  static constexpr size_t kSector = 512u;
+
   DetachedBufferWriter(std::string_view filename,
                        std::unique_ptr<DataEncoder> encoder);
   // Creates a dummy instance which won't even open a file. It will act as if
@@ -134,12 +153,13 @@
 
  private:
   // Performs a single writev call with as much of the data we have queued up as
-  // possible.
+  // possible.  now is the time we flushed at, to be recorded in
+  // last_flush_time_.
   //
   // This will normally take all of the data we have queued up, unless an
   // encoder has spit out a big enough chunk all at once that we can't manage
   // all of it.
-  void Flush();
+  void Flush(aos::monotonic_clock::time_point now);
 
   // write_return is what write(2) or writev(2) returned. write_size is the
   // number of bytes we expected it to write.
@@ -154,6 +174,21 @@
   // the current time.  It just needs to be close.
   void FlushAtThreshold(aos::monotonic_clock::time_point now);
 
+  // Enables O_DIRECT on the open file if it is supported.  Cheap to call if it
+  // is already enabled.
+  void EnableDirect();
+  // Disables O_DIRECT on the open file if it is supported.  Cheap to call if it
+  // is already disabld.
+  void DisableDirect();
+
+  // Writes a chunk of iovecs.  aligned is true if all the data is kSector byte
+  // aligned and multiples of it in length, and counted_size is the sum of the
+  // sizes of all the chunks of data.  Returns the size of data written.
+  size_t WriteV(struct iovec *iovec_data, size_t iovec_size, bool aligned,
+                size_t counted_size);
+
+  bool ODirectEnabled() { return !!(flags_ & O_DIRECT); }
+
   std::string filename_;
   std::unique_ptr<DataEncoder> encoder_;
 
@@ -172,6 +207,10 @@
   int total_write_count_ = 0;
   int total_write_messages_ = 0;
   int total_write_bytes_ = 0;
+  int last_synced_bytes_ = 0;
+
+  bool supports_odirect_ = true;
+  int flags_ = 0;
 
   aos::monotonic_clock::time_point last_flush_time_ =
       aos::monotonic_clock::min_time;
diff --git a/aos/events/logging/logger_main.cc b/aos/events/logging/logger_main.cc
index 81b4e3f..f0582dc 100644
--- a/aos/events/logging/logger_main.cc
+++ b/aos/events/logging/logger_main.cc
@@ -32,6 +32,8 @@
 DEFINE_int32(xz_compression_level, 9, "Compression level for the LZMA Encoder");
 #endif
 
+DECLARE_int32(flush_size);
+
 int main(int argc, char *argv[]) {
   gflags::SetUsageMessage(
       "This program provides a simple logger binary that logs all SHMEM data "
@@ -53,14 +55,15 @@
   if (FLAGS_snappy_compress) {
     log_namer->set_extension(aos::logger::SnappyDecoder::kExtension);
     log_namer->set_encoder_factory([](size_t max_message_size) {
-      return std::make_unique<aos::logger::SnappyEncoder>(max_message_size);
+      return std::make_unique<aos::logger::SnappyEncoder>(max_message_size,
+                                                          FLAGS_flush_size);
     });
 #ifdef LZMA
   } else if (FLAGS_xz_compress) {
     log_namer->set_extension(aos::logger::LzmaEncoder::kExtension);
     log_namer->set_encoder_factory([](size_t max_message_size) {
       return std::make_unique<aos::logger::LzmaEncoder>(
-          max_message_size, FLAGS_xz_compression_level);
+          max_message_size, FLAGS_xz_compression_level, FLAGS_flush_size);
     });
 #endif
   }
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 18c8aa1..cdf080b 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -557,7 +557,7 @@
            }},
           {SnappyDecoder::kExtension,
            [](size_t max_message_size) {
-             return std::make_unique<SnappyEncoder>(max_message_size);
+             return std::make_unique<SnappyEncoder>(max_message_size, 32768);
            }},
 #ifdef LZMA
           {LzmaDecoder::kExtension,
diff --git a/aos/events/logging/lzma_encoder.cc b/aos/events/logging/lzma_encoder.cc
index 3959727..f354638 100644
--- a/aos/events/logging/lzma_encoder.cc
+++ b/aos/events/logging/lzma_encoder.cc
@@ -88,19 +88,22 @@
 
 LzmaEncoder::~LzmaEncoder() { lzma_end(&stream_); }
 
-void LzmaEncoder::Encode(Copier *copy) {
+size_t LzmaEncoder::Encode(Copier *copy, size_t start_byte) {
   const size_t copy_size = copy->size();
   // LZMA compresses the data as it goes along, copying the compressed results
   // into another buffer.  So, there's no need to store more than one message
   // since lzma is going to take it from here.
   CHECK_LE(copy_size, input_buffer_.size());
 
-  CHECK_EQ(copy->Copy(input_buffer_.data(), 0, copy_size), copy_size);
+  CHECK_EQ(copy->Copy(input_buffer_.data(), start_byte, copy_size - start_byte),
+           copy_size - start_byte);
 
   stream_.next_in = input_buffer_.data();
   stream_.avail_in = copy_size;
 
   RunLzmaCode(LZMA_RUN);
+
+  return copy_size - start_byte;
 }
 
 void LzmaEncoder::Finish() { RunLzmaCode(LZMA_FINISH); }
diff --git a/aos/events/logging/lzma_encoder.h b/aos/events/logging/lzma_encoder.h
index bbd739a..b4964fb 100644
--- a/aos/events/logging/lzma_encoder.h
+++ b/aos/events/logging/lzma_encoder.h
@@ -37,7 +37,8 @@
     // space.
     return true;
   }
-  void Encode(Copier *copy) final;
+  size_t space() const final { return input_buffer_.capacity(); }
+  size_t Encode(Copier *copy, size_t start_byte) final;
   void Finish() final;
   void Clear(int n) final;
   absl::Span<const absl::Span<const uint8_t>> queue() final;
diff --git a/aos/events/logging/snappy_encoder.cc b/aos/events/logging/snappy_encoder.cc
index 1b80fe0..0e96092 100644
--- a/aos/events/logging/snappy_encoder.cc
+++ b/aos/events/logging/snappy_encoder.cc
@@ -43,12 +43,15 @@
 
 void SnappyEncoder::Finish() { EncodeCurrentBuffer(); }
 
-void SnappyEncoder::Encode(Copier *copy) {
+size_t SnappyEncoder::Encode(Copier *copy, size_t start_byte) {
+  CHECK_EQ(start_byte, 0u);
   buffer_source_.Append(copy);
 
   if (buffer_source_.Available() >= chunk_size_) {
     EncodeCurrentBuffer();
   }
+
+  return copy->size();
 }
 
 void SnappyEncoder::EncodeCurrentBuffer() {
diff --git a/aos/events/logging/snappy_encoder.h b/aos/events/logging/snappy_encoder.h
index d3edbe5..d698ee1 100644
--- a/aos/events/logging/snappy_encoder.h
+++ b/aos/events/logging/snappy_encoder.h
@@ -16,9 +16,9 @@
 // Encodes buffers using snappy.
 class SnappyEncoder final : public DataEncoder {
  public:
-  explicit SnappyEncoder(size_t max_message_size, size_t chunk_size = 32768);
+  explicit SnappyEncoder(size_t max_message_size, size_t chunk_size = 128 * 1024);
 
-  void Encode(Copier *copy) final;
+  size_t Encode(Copier *copy, size_t start_byte) final;
 
   void Finish() final;
   void Clear(int n) final;
@@ -28,6 +28,7 @@
     // Since the output always mallocs space, we have infinite output space.
     return true;
   }
+  size_t space() const final { return buffer_source_.space(); }
   size_t total_bytes() const final { return total_bytes_; }
   size_t queue_size() const final { return queue_.size(); }
 
@@ -35,6 +36,7 @@
   class DetachedBufferSource : public snappy::Source {
    public:
     DetachedBufferSource(size_t buffer_size);
+    size_t space() const { return data_.capacity() - data_.size(); }
     size_t Available() const final;
     const char *Peek(size_t *length) final;
     void Skip(size_t n) final;
diff --git a/documentation/README.md b/documentation/README.md
index 32b8eb1..0f6485c 100644
--- a/documentation/README.md
+++ b/documentation/README.md
@@ -13,3 +13,4 @@
 * [Create a new autonomous routine](tutorials/create-a-new-autonomous.md)
 * [Tune an autonomous](tutorials/tune-an-autonomous.md)
 * [Set up access to the build server using vscode](tutorials/setup-ssh-vscode.md)
+* [Install PyCharm on the build server](tutorials/setup-pycharm-on-build-server.md)
diff --git a/documentation/tutorials/setup-pycharm-on-build-server.md b/documentation/tutorials/setup-pycharm-on-build-server.md
new file mode 100644
index 0000000..acbb66d
--- /dev/null
+++ b/documentation/tutorials/setup-pycharm-on-build-server.md
@@ -0,0 +1,20 @@
+## Installing PyCharm on the build server
+
+### Getting PyCharm Professional (optional)
+Go to JetBrains' student [website](https://www.jetbrains.com/community/education/#students) and click "Apply now". Fill out the form using your school email. Check your school email for an email from JetBrains. Create a new JetBrains account. Use this account to log in when PyCharn asks for a professional key
+
+### Downloading on the build server
+Open your shell and run 
+```
+curl https://raw.githubusercontent.com/thealtofwar/files/main/install_pycharm.sh > ~/install_pycharm.sh
+chmod +x ~/install_pycharm.sh
+~/install_pycharm.sh
+```
+This installs PyCharm to your desktop.
+### Alternate download method
+When you're on the build server, go to the [download](https://www.jetbrains.com/pycharm/download/#section=linux) and pick which edition of PyCharm you would like to download. Once the download completes, click on it to extract it. When something that looks like this appears,<br>
+![](https://raw.githubusercontent.com/thealtofwar/files/main/Screenshot%202021-11-11%20100000.png)<br>
+Look for the button that looks like a up arrow with a line of top that's labeled root that looks like [this](https://raw.githubusercontent.com/thealtofwar/files/main/Screenshot%202021-11-11%20101425.png). Click on it. Drag the folder to your desktop. It will say an error occured, but it should be fine.
+
+### Installing PyCharm on the build server
+When you're on the build server, go to the folder that you just downloaded PyCharm to. Click on the 'bin' folder. Click on the 'pycharm.sh' file. PyCharm should start running. If you downloaded the Professional version, you might have to log in with the JetBrains account you made when signing up. Open the folder where your project is located. Now you can start coding.
diff --git a/frc971/control_loops/python/graph.py b/frc971/control_loops/python/graph.py
index 178b63d..1cc6f57 100644
--- a/frc971/control_loops/python/graph.py
+++ b/frc971/control_loops/python/graph.py
@@ -49,12 +49,15 @@
         if self.data is None:
             return None
         cursor_index = int(self.cursor / self.dt)
-        if cursor_index > self.data.size:
+        if self.data[0].size < cursor_index:
             return None
         # use the time to index into the position data
-        distance_at_cursor = self.data[0][cursor_index - 1]
-        multispline_index = int(self.data[5][cursor_index - 1])
-        return (multispline_index, distance_at_cursor)
+        try:
+            distance_at_cursor = self.data[0][cursor_index - 1]
+            multispline_index = int(self.data[5][cursor_index - 1])
+            return (multispline_index, distance_at_cursor)
+        except IndexError:
+            return None
 
     def place_cursor(self, multispline_index, distance):
         """Places the cursor at a certain distance along the spline"""
diff --git a/frc971/control_loops/python/path_edit.py b/frc971/control_loops/python/path_edit.py
index 0a944e1..2b55e94 100755
--- a/frc971/control_loops/python/path_edit.py
+++ b/frc971/control_loops/python/path_edit.py
@@ -240,8 +240,11 @@
         multispline, result = Multispline.nearest_distance(
             self.multisplines, mouse)
 
-        if self.graph.cursor is not None and self.graph.data is not None:
-            multispline_index, x = self.graph.find_cursor()
+        if self.graph.cursor is not None:
+            cursor = self.graph.find_cursor()
+            if cursor is None:
+                return
+            multispline_index, x = cursor
             distance_spline = DistanceSpline(
                 self.multisplines[multispline_index].getLibsplines())
 
diff --git a/frc971/vision/BUILD b/frc971/vision/BUILD
index 642ca0f..3b011f2 100644
--- a/frc971/vision/BUILD
+++ b/frc971/vision/BUILD
@@ -99,11 +99,9 @@
         "//aos/network:message_bridge_server_fbs",
         "//aos/network:team_number",
         "//frc971/control_loops:quaternion_utils",
+        "//frc971/vision:calibration_fbs",
         "//frc971/vision:vision_fbs",
         "//third_party:opencv",
-        "//y2020/vision/sift:sift_fbs",
-        "//y2020/vision/sift:sift_training_fbs",
-        "//y2020/vision/tools/python_code:sift_training_data",
         "@com_github_foxglove_schemas//:schemas",
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/strings:str_format",
@@ -303,3 +301,51 @@
         "//aos/testing:tmpdir",
     ],
 )
+
+cc_library(
+    name = "intrinsics_calibration_lib",
+    srcs = [
+        "intrinsics_calibration_lib.cc",
+    ],
+    hdrs = [
+        "intrinsics_calibration_lib.h",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    deps = [
+        "//aos/events:event_loop",
+        "//frc971/control_loops/drivetrain:improved_down_estimator",
+        "//frc971/vision:charuco_lib",
+        "//frc971/vision:vision_fbs",
+        "//frc971/wpilib:imu_batch_fbs",
+        "//frc971/wpilib:imu_fbs",
+        "//third_party:opencv",
+        "@com_google_absl//absl/strings:str_format",
+        "@org_tuxfamily_eigen//:eigen",
+    ],
+)
+
+cc_binary(
+    name = "intrinsics_calibration",
+    srcs = [
+        "intrinsics_calibration.cc",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = [
+        "//y2020:__subpackages__",
+        "//y2022:__subpackages__",
+        "//y2023:__subpackages__",
+    ],
+    deps = [
+        ":intrinsics_calibration_lib",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+        "//frc971/control_loops/drivetrain:improved_down_estimator",
+        "//frc971/vision:charuco_lib",
+        "//frc971/vision:vision_fbs",
+        "//frc971/wpilib:imu_batch_fbs",
+        "//frc971/wpilib:imu_fbs",
+        "//third_party:opencv",
+        "@com_google_absl//absl/strings:str_format",
+        "@org_tuxfamily_eigen//:eigen",
+    ],
+)
diff --git a/frc971/vision/calibration.fbs b/frc971/vision/calibration.fbs
index f5e30a6..df8b7b0 100644
--- a/frc971/vision/calibration.fbs
+++ b/frc971/vision/calibration.fbs
@@ -44,6 +44,10 @@
 
   // Timestamp for when the calibration was taken on the realtime clock.
   calibration_timestamp:int64 (id: 6);
+
+  // ID for the physical camera hardware (typically will be a string of the form
+  // YY-NN, with a two-digit year and an index).
+  camera_id:string (id: 7);
 }
 
 // Calibration information for all the cameras we know about.
diff --git a/frc971/vision/calibration_accumulator.cc b/frc971/vision/calibration_accumulator.cc
index ea9af28..711ed7d 100644
--- a/frc971/vision/calibration_accumulator.cc
+++ b/frc971/vision/calibration_accumulator.cc
@@ -140,11 +140,13 @@
       node, channel_overrides);
 }
 
-Calibration::Calibration(aos::SimulatedEventLoopFactory *event_loop_factory,
-                         aos::EventLoop *image_event_loop,
-                         aos::EventLoop *imu_event_loop, std::string_view pi,
-                         TargetType target_type, std::string_view image_channel,
-                         CalibrationData *data)
+Calibration::Calibration(
+    aos::SimulatedEventLoopFactory *event_loop_factory,
+    aos::EventLoop *image_event_loop, aos::EventLoop *imu_event_loop,
+    std::string_view pi,
+    const calibration::CameraCalibration *intrinsics_calibration,
+    TargetType target_type, std::string_view image_channel,
+    CalibrationData *data)
     : image_event_loop_(image_event_loop),
       image_factory_(event_loop_factory->GetNodeEventLoopFactory(
           image_event_loop_->node())),
@@ -152,7 +154,7 @@
       imu_factory_(
           event_loop_factory->GetNodeEventLoopFactory(imu_event_loop_->node())),
       charuco_extractor_(
-          image_event_loop_, pi, target_type, image_channel,
+          image_event_loop_, intrinsics_calibration, target_type, image_channel,
           [this](cv::Mat rgb_image, monotonic_clock::time_point eof,
                  std::vector<cv::Vec4i> charuco_ids,
                  std::vector<std::vector<cv::Point2f>> charuco_corners,
diff --git a/frc971/vision/calibration_accumulator.h b/frc971/vision/calibration_accumulator.h
index 5c435ad..c4dcae9 100644
--- a/frc971/vision/calibration_accumulator.h
+++ b/frc971/vision/calibration_accumulator.h
@@ -105,8 +105,10 @@
  public:
   Calibration(aos::SimulatedEventLoopFactory *event_loop_factory,
               aos::EventLoop *image_event_loop, aos::EventLoop *imu_event_loop,
-              std::string_view pi, TargetType target_type,
-              std::string_view image_channel, CalibrationData *data);
+              std::string_view pi,
+              const calibration::CameraCalibration *intrinsics_calibration,
+              TargetType target_type, std::string_view image_channel,
+              CalibrationData *data);
 
   // Processes a charuco detection that is returned from charuco_lib.
   // For a valid detection(s), it stores camera observation
diff --git a/frc971/vision/charuco_lib.cc b/frc971/vision/charuco_lib.cc
index f12a6a5..116aba0 100644
--- a/frc971/vision/charuco_lib.cc
+++ b/frc971/vision/charuco_lib.cc
@@ -13,9 +13,6 @@
 #include "frc971/control_loops/quaternion_utils.h"
 #include "frc971/vision/vision_generated.h"
 #include "glog/logging.h"
-#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/sift/sift_training_generated.h"
-#include "y2020/vision/tools/python_code/sift_training_data.h"
 
 DEFINE_string(board_template_path, "",
               "If specified, write an image to the specified path for the "
@@ -37,59 +34,27 @@
 using aos::monotonic_clock;
 
 CameraCalibration::CameraCalibration(
-    const absl::Span<const uint8_t> training_data_bfbs, std::string_view pi) {
-  const aos::FlatbufferSpan<sift::TrainingData> training_data(
-      training_data_bfbs);
-  CHECK(training_data.Verify());
-  camera_calibration_ = FindCameraCalibration(&training_data.message(), pi);
-}
-
-cv::Mat CameraCalibration::CameraIntrinsics() const {
-  const cv::Mat result(3, 3, CV_32F,
-                       const_cast<void *>(static_cast<const void *>(
-                           camera_calibration_->intrinsics()->data())));
-  CHECK_EQ(result.total(), camera_calibration_->intrinsics()->size());
-  return result;
-}
-
-Eigen::Matrix3d CameraCalibration::CameraIntrinsicsEigen() const {
-  cv::Mat camera_intrinsics = CameraIntrinsics();
-  Eigen::Matrix3d result;
-  cv::cv2eigen(camera_intrinsics, result);
-  return result;
-}
-
-cv::Mat CameraCalibration::CameraDistCoeffs() const {
-  const cv::Mat result(5, 1, CV_32F,
-                       const_cast<void *>(static_cast<const void *>(
-                           camera_calibration_->dist_coeffs()->data())));
-  CHECK_EQ(result.total(), camera_calibration_->dist_coeffs()->size());
-  return result;
-}
-
-const sift::CameraCalibration *CameraCalibration::FindCameraCalibration(
-    const sift::TrainingData *const training_data, std::string_view pi) const {
-  std::optional<uint16_t> pi_number = aos::network::ParsePiNumber(pi);
-  std::optional<uint16_t> team_number =
-      aos::network::team_number_internal::ParsePiTeamNumber(pi);
-  CHECK(pi_number);
-  CHECK(team_number);
-  const std::string node_name = absl::StrFormat("pi%d", pi_number.value());
-  LOG(INFO) << "Looking for node name " << node_name << " team number "
-            << team_number.value();
-  for (const sift::CameraCalibration *candidate :
-       *training_data->camera_calibrations()) {
-    if (candidate->node_name()->string_view() != node_name) {
-      continue;
-    }
-    if (candidate->team_number() != team_number.value()) {
-      continue;
-    }
-    return candidate;
-  }
-  LOG(FATAL) << ": Failed to find camera calibration for " << node_name
-             << " on " << team_number.value();
-}
+    const calibration::CameraCalibration *calibration)
+    : intrinsics_([calibration]() {
+        const cv::Mat result(3, 3, CV_32F,
+                             const_cast<void *>(static_cast<const void *>(
+                                 calibration->intrinsics()->data())));
+        CHECK_EQ(result.total(), calibration->intrinsics()->size());
+        return result;
+      }()),
+      intrinsics_eigen_([this]() {
+        cv::Mat camera_intrinsics = intrinsics_;
+        Eigen::Matrix3d result;
+        cv::cv2eigen(camera_intrinsics, result);
+        return result;
+      }()),
+      dist_coeffs_([calibration]() {
+        const cv::Mat result(5, 1, CV_32F,
+                             const_cast<void *>(static_cast<const void *>(
+                                 calibration->dist_coeffs()->data())));
+        CHECK_EQ(result.total(), calibration->dist_coeffs()->size());
+        return result;
+      }()) {}
 
 ImageCallback::ImageCallback(
     aos::EventLoop *event_loop, std::string_view channel,
@@ -257,8 +222,9 @@
 
     const Eigen::Affine3d board_to_camera = translation * rotation;
 
-    Eigen::Vector3d result = eigen_camera_matrix_ * camera_projection *
-                             board_to_camera * Eigen::Vector3d::Zero();
+    Eigen::Vector3d result = calibration_.CameraIntrinsicsEigen() *
+                             camera_projection * board_to_camera *
+                             Eigen::Vector3d::Zero();
 
     // Found that drawAxis hangs if you try to draw with z values too
     // small (trying to draw axes at inifinity)
@@ -269,11 +235,13 @@
     } else {
       result /= result.z();
       if (target_type_ == TargetType::kCharuco) {
-        cv::aruco::drawAxis(rgb_image, camera_matrix_, dist_coeffs_, rvecs[i],
+        cv::aruco::drawAxis(rgb_image, calibration_.CameraIntrinsics(),
+                            calibration_.CameraDistCoeffs(), rvecs[i],
                             tvecs[i], 0.1);
       } else {
-        cv::drawFrameAxes(rgb_image, camera_matrix_, dist_coeffs_, rvecs[i],
-                          tvecs[i], 0.1);
+        cv::drawFrameAxes(rgb_image, calibration_.CameraIntrinsics(),
+                          calibration_.CameraDistCoeffs(), rvecs[i], tvecs[i],
+                          0.1);
       }
     }
     std::stringstream ss;
@@ -307,7 +275,8 @@
 }
 
 CharucoExtractor::CharucoExtractor(
-    aos::EventLoop *event_loop, std::string_view pi, TargetType target_type,
+    aos::EventLoop *event_loop,
+    const calibration::CameraCalibration *calibration, TargetType target_type,
     std::string_view image_channel,
     std::function<void(cv::Mat, monotonic_clock::time_point,
                        std::vector<cv::Vec4i>,
@@ -315,21 +284,14 @@
                        std::vector<Eigen::Vector3d>,
                        std::vector<Eigen::Vector3d>)> &&handle_charuco_fn)
     : event_loop_(event_loop),
-      calibration_(SiftTrainingData(), pi),
       target_type_(target_type),
       image_channel_(image_channel),
-      camera_matrix_(calibration_.CameraIntrinsics()),
-      eigen_camera_matrix_(calibration_.CameraIntrinsicsEigen()),
-      dist_coeffs_(calibration_.CameraDistCoeffs()),
-      pi_number_(aos::network::ParsePiNumber(pi)),
+      calibration_(CHECK_NOTNULL(calibration)),
       handle_charuco_(std::move(handle_charuco_fn)) {
   SetupTargetData();
 
-  LOG(INFO) << "Camera matrix " << camera_matrix_;
-  LOG(INFO) << "Distortion Coefficients " << dist_coeffs_;
-
-  CHECK(pi_number_) << ": Invalid pi number " << pi
-                    << ", failed to parse pi number";
+  LOG(INFO) << "Camera matrix " << calibration_.CameraIntrinsics();
+  LOG(INFO) << "Distortion Coefficients " << calibration_.CameraDistCoeffs();
 
   LOG(INFO) << "Connecting to channel " << image_channel_;
 }
@@ -392,7 +354,7 @@
         cv::aruco::interpolateCornersCharuco(
             marker_corners, marker_ids, rgb_image, board_,
             charuco_corners_with_calibration, charuco_ids_with_calibration,
-            camera_matrix_, dist_coeffs_);
+            calibration_.CameraIntrinsics(), calibration_.CameraDistCoeffs());
 
         if (charuco_ids.size() >= FLAGS_min_charucos) {
           cv::aruco::drawDetectedCornersCharuco(
@@ -401,7 +363,8 @@
           cv::Vec3d rvec, tvec;
           valid = cv::aruco::estimatePoseCharucoBoard(
               charuco_corners_with_calibration, charuco_ids_with_calibration,
-              board_, camera_matrix_, dist_coeffs_, rvec, tvec);
+              board_, calibration_.CameraIntrinsics(),
+              calibration_.CameraDistCoeffs(), rvec, tvec);
 
           // if charuco pose is valid, return pose, with ids and corners
           if (valid) {
@@ -433,9 +396,9 @@
       // estimate pose for arucos doesn't return valid, so marking true
       valid = true;
       std::vector<cv::Vec3d> rvecs, tvecs;
-      cv::aruco::estimatePoseSingleMarkers(marker_corners, square_length_,
-                                           camera_matrix_, dist_coeffs_, rvecs,
-                                           tvecs);
+      cv::aruco::estimatePoseSingleMarkers(
+          marker_corners, square_length_, calibration_.CameraIntrinsics(),
+          calibration_.CameraDistCoeffs(), rvecs, tvecs);
       DrawTargetPoses(rgb_image, rvecs, tvecs);
 
       PackPoseResults(rvecs, tvecs, &rvecs_eigen, &tvecs_eigen);
@@ -459,9 +422,9 @@
         // estimate pose for diamonds doesn't return valid, so marking true
         valid = true;
         std::vector<cv::Vec3d> rvecs, tvecs;
-        cv::aruco::estimatePoseSingleMarkers(diamond_corners, square_length_,
-                                             camera_matrix_, dist_coeffs_,
-                                             rvecs, tvecs);
+        cv::aruco::estimatePoseSingleMarkers(
+            diamond_corners, square_length_, calibration_.CameraIntrinsics(),
+            calibration_.CameraDistCoeffs(), rvecs, tvecs);
         DrawTargetPoses(rgb_image, rvecs, tvecs);
 
         PackPoseResults(rvecs, tvecs, &rvecs_eigen, &tvecs_eigen);
diff --git a/frc971/vision/charuco_lib.h b/frc971/vision/charuco_lib.h
index 984cef6..b2ca7ee 100644
--- a/frc971/vision/charuco_lib.h
+++ b/frc971/vision/charuco_lib.h
@@ -11,8 +11,7 @@
 #include "absl/types/span.h"
 #include "aos/events/event_loop.h"
 #include "aos/network/message_bridge_server_generated.h"
-#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/sift/sift_training_generated.h"
+#include "frc971/vision/calibration_generated.h"
 #include "external/com_github_foxglove_schemas/ImageAnnotations_generated.h"
 
 DECLARE_bool(visualize);
@@ -24,23 +23,19 @@
 // training data.
 class CameraCalibration {
  public:
-  CameraCalibration(const absl::Span<const uint8_t> training_data_bfbs,
-                    std::string_view pi);
+  CameraCalibration(const calibration::CameraCalibration *calibration);
 
   // Intrinsics for the located camera.
-  cv::Mat CameraIntrinsics() const;
-  Eigen::Matrix3d CameraIntrinsicsEigen() const;
+  cv::Mat CameraIntrinsics() const { return intrinsics_; }
+  Eigen::Matrix3d CameraIntrinsicsEigen() const { return intrinsics_eigen_; }
 
   // Distortion coefficients for the located camera.
-  cv::Mat CameraDistCoeffs() const;
+  cv::Mat CameraDistCoeffs() const { return dist_coeffs_; }
 
  private:
-  // Finds the camera specific calibration flatbuffer.
-  const sift::CameraCalibration *FindCameraCalibration(
-      const sift::TrainingData *const training_data, std::string_view pi) const;
-
-  // Pointer to this camera's calibration parameters.
-  const sift::CameraCalibration *camera_calibration_;
+  const cv::Mat intrinsics_;
+  const Eigen::Matrix3d intrinsics_eigen_;
+  const cv::Mat dist_coeffs_;
 };
 
 // Helper class to call a function with a cv::Mat and age when an image shows up
@@ -108,7 +103,8 @@
   // multiple targets in an image; for charuco boards, there should be just one
   // element
   CharucoExtractor(
-      aos::EventLoop *event_loop, std::string_view pi, TargetType target_type,
+      aos::EventLoop *event_loop,
+      const calibration::CameraCalibration *calibration, TargetType target_type,
       std::string_view image_channel,
       std::function<void(cv::Mat, aos::monotonic_clock::time_point,
                          std::vector<cv::Vec4i>,
@@ -125,9 +121,11 @@
   cv::Ptr<cv::aruco::CharucoBoard> board() const { return board_; }
 
   // Returns the camera matrix for this camera.
-  const cv::Mat camera_matrix() const { return camera_matrix_; }
+  const cv::Mat camera_matrix() const {
+    return calibration_.CameraIntrinsics();
+  }
   // Returns the distortion coefficients for this camera.
-  const cv::Mat dist_coeffs() const { return dist_coeffs_; }
+  const cv::Mat dist_coeffs() const { return calibration_.CameraDistCoeffs(); }
 
  private:
   // Creates the dictionary, board, and other parameters for the appropriate
@@ -146,7 +144,6 @@
                        std::vector<Eigen::Vector3d> *tvecs_eigen);
 
   aos::EventLoop *event_loop_;
-  CameraCalibration calibration_;
 
   cv::Ptr<cv::aruco::Dictionary> dictionary_;
   cv::Ptr<cv::aruco::CharucoBoard> board_;
@@ -161,15 +158,7 @@
   // Length of a side of the checkerboard squares (around the marker)
   double square_length_;
 
-  // Intrinsic calibration matrix
-  const cv::Mat camera_matrix_;
-  // Intrinsic calibration matrix as Eigen::Matrix3d
-  const Eigen::Matrix3d eigen_camera_matrix_;
-  // Intrinsic distortion coefficients
-  const cv::Mat dist_coeffs_;
-
-  // Index number of the raspberry pi
-  const std::optional<uint16_t> pi_number_;
+  CameraCalibration calibration_;
 
   // Function to call.
   std::function<void(
diff --git a/frc971/vision/intrinsics_calibration.cc b/frc971/vision/intrinsics_calibration.cc
new file mode 100644
index 0000000..224a37c
--- /dev/null
+++ b/frc971/vision/intrinsics_calibration.cc
@@ -0,0 +1,60 @@
+#include <cmath>
+#include <opencv2/calib3d.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc.hpp>
+#include <regex>
+
+#include "absl/strings/str_format.h"
+#include "aos/events/shm_event_loop.h"
+#include "aos/init.h"
+#include "aos/network/team_number.h"
+#include "aos/time/time.h"
+#include "aos/util/file.h"
+#include "frc971/vision/intrinsics_calibration_lib.h"
+
+DEFINE_string(calibration_folder, ".", "Folder to place calibration files.");
+DEFINE_string(camera_id, "", "Camera ID in format YY-NN-- year and number.");
+DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
+DEFINE_bool(display_undistorted, false,
+            "If true, display the undistorted image.");
+DEFINE_string(pi, "", "Pi name to calibrate.");
+DEFINE_string(base_intrinsics, "",
+              "Intrinsics to use for estimating board pose prior to solving "
+              "for the new intrinsics.");
+
+namespace frc971 {
+namespace vision {
+namespace {
+
+void Main() {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  aos::ShmEventLoop event_loop(&config.message());
+
+  std::string hostname = FLAGS_pi;
+  if (hostname == "") {
+    hostname = aos::network::GetHostname();
+    LOG(INFO) << "Using pi name from hostname as " << hostname;
+  }
+  CHECK(!FLAGS_base_intrinsics.empty())
+      << "Need a base intrinsics json to use to auto-capture images when the "
+         "camera moves.";
+  std::unique_ptr<aos::ExitHandle> exit_handle = event_loop.MakeExitHandle();
+  IntrinsicsCalibration extractor(
+      &event_loop, hostname, FLAGS_camera_id, FLAGS_base_intrinsics,
+      FLAGS_display_undistorted, FLAGS_calibration_folder, exit_handle.get());
+
+  event_loop.Run();
+
+  extractor.MaybeCalibrate();
+}
+
+}  // namespace
+}  // namespace vision
+}  // namespace frc971
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+  frc971::vision::Main();
+}
diff --git a/frc971/vision/intrinsics_calibration_lib.cc b/frc971/vision/intrinsics_calibration_lib.cc
new file mode 100644
index 0000000..6cf53e8
--- /dev/null
+++ b/frc971/vision/intrinsics_calibration_lib.cc
@@ -0,0 +1,245 @@
+#include "frc971/vision/intrinsics_calibration_lib.h"
+
+namespace frc971 {
+namespace vision {
+
+IntrinsicsCalibration::IntrinsicsCalibration(
+    aos::EventLoop *event_loop, std::string_view pi, std::string_view camera_id,
+    std::string_view base_intrinsics_file, bool display_undistorted,
+    std::string_view calibration_folder, aos::ExitHandle *exit_handle)
+    : pi_(pi),
+      pi_number_(aos::network::ParsePiNumber(pi)),
+      camera_id_(camera_id),
+      prev_H_camera_board_(Eigen::Affine3d()),
+      prev_image_H_camera_board_(Eigen::Affine3d()),
+      base_intrinsics_(
+          aos::JsonFileToFlatbuffer<calibration::CameraCalibration>(
+              base_intrinsics_file)),
+      charuco_extractor_(
+          event_loop, &base_intrinsics_.message(), TargetType::kCharuco,
+          "/camera",
+          [this](cv::Mat rgb_image, const aos::monotonic_clock::time_point eof,
+                 std::vector<cv::Vec4i> charuco_ids,
+                 std::vector<std::vector<cv::Point2f>> charuco_corners,
+                 bool valid, std::vector<Eigen::Vector3d> rvecs_eigen,
+                 std::vector<Eigen::Vector3d> tvecs_eigen) {
+            HandleCharuco(rgb_image, eof, charuco_ids, charuco_corners, valid,
+                          rvecs_eigen, tvecs_eigen);
+          }),
+      image_callback_(
+          event_loop,
+          absl::StrCat("/pi",
+                       std::to_string(aos::network::ParsePiNumber(pi).value()),
+                       "/camera"),
+          [this](cv::Mat rgb_image,
+                 const aos::monotonic_clock::time_point eof) {
+            charuco_extractor_.HandleImage(rgb_image, eof);
+          },
+          std::chrono::milliseconds(5)),
+      display_undistorted_(display_undistorted),
+      calibration_folder_(calibration_folder),
+      exit_handle_(exit_handle) {
+  CHECK(pi_number_) << ": Invalid pi number " << pi
+                    << ", failed to parse pi number";
+  std::regex re{"^[0-9][0-9]-[0-9][0-9]"};
+  CHECK(std::regex_match(camera_id_, re))
+      << ": Invalid camera_id '" << camera_id_ << "', should be of form YY-NN";
+}
+
+void IntrinsicsCalibration::HandleCharuco(
+    cv::Mat rgb_image, const aos::monotonic_clock::time_point /*eof*/,
+    std::vector<cv::Vec4i> charuco_ids,
+    std::vector<std::vector<cv::Point2f>> charuco_corners, bool valid,
+    std::vector<Eigen::Vector3d> rvecs_eigen,
+    std::vector<Eigen::Vector3d> tvecs_eigen) {
+  // Reduce resolution displayed on remote viewer to prevent lag
+  cv::resize(rgb_image, rgb_image,
+             cv::Size(rgb_image.cols / 2, rgb_image.rows / 2));
+  cv::imshow("Display", rgb_image);
+
+  if (display_undistorted_) {
+    const cv::Size image_size(rgb_image.cols, rgb_image.rows);
+    cv::Mat undistorted_rgb_image(image_size, CV_8UC3);
+    cv::undistort(rgb_image, undistorted_rgb_image,
+                  charuco_extractor_.camera_matrix(),
+                  charuco_extractor_.dist_coeffs());
+
+    cv::imshow("Display undist", undistorted_rgb_image);
+  }
+
+  int keystroke = cv::waitKey(1);
+
+  // If we haven't got a valid pose estimate, don't use these points
+  if (!valid) {
+    return;
+  }
+  CHECK(tvecs_eigen.size() == 1)
+      << "Charuco board should only return one translational pose";
+  CHECK(rvecs_eigen.size() == 1)
+      << "Charuco board should only return one rotational pose";
+  // Calibration calculates rotation and translation delta from last image
+  // stored to automatically capture next image
+
+  Eigen::Affine3d H_board_camera =
+      Eigen::Translation3d(tvecs_eigen[0]) *
+      Eigen::AngleAxisd(rvecs_eigen[0].norm(),
+                        rvecs_eigen[0] / rvecs_eigen[0].norm());
+  Eigen::Affine3d H_camera_board_ = H_board_camera.inverse();
+  Eigen::Affine3d H_delta = H_board_camera * prev_H_camera_board_;
+
+  Eigen::AngleAxisd delta_r = Eigen::AngleAxisd(H_delta.rotation());
+
+  Eigen::Vector3d delta_t = H_delta.translation();
+
+  double r_norm = std::abs(delta_r.angle());
+  double t_norm = delta_t.norm();
+
+  bool store_image = false;
+  double percent_motion =
+      std::max<double>(r_norm / kDeltaRThreshold, t_norm / kDeltaTThreshold);
+  LOG(INFO) << "Captured: " << all_charuco_ids_.size() << " points; Moved "
+            << percent_motion << "% of what's needed";
+  // Verify that camera has moved enough from last stored image
+  if (r_norm > kDeltaRThreshold || t_norm > kDeltaTThreshold) {
+    // frame_ refers to deltas between current and last captured image
+    Eigen::Affine3d frame_H_delta = H_board_camera * prev_image_H_camera_board_;
+
+    Eigen::AngleAxisd frame_delta_r =
+        Eigen::AngleAxisd(frame_H_delta.rotation());
+
+    Eigen::Vector3d frame_delta_t = frame_H_delta.translation();
+
+    double frame_r_norm = std::abs(frame_delta_r.angle());
+    double frame_t_norm = frame_delta_t.norm();
+
+    // Make sure camera has stopped moving before storing image
+    store_image =
+        frame_r_norm < kFrameDeltaRLimit && frame_t_norm < kFrameDeltaTLimit;
+    double percent_stop = std::max<double>(frame_r_norm / kFrameDeltaRLimit,
+                                           frame_t_norm / kFrameDeltaTLimit);
+    LOG(INFO) << "Captured: " << all_charuco_ids_.size()
+              << "points; Moved enough (" << percent_motion
+              << "%); Need to stop (last motion was " << percent_stop
+              << "% of limit; needs to be < 1 to capture)";
+  }
+  prev_image_H_camera_board_ = H_camera_board_;
+
+  if (store_image) {
+    if (valid) {
+      prev_H_camera_board_ = H_camera_board_;
+
+      // Unpack the Charuco ids from Vec4i
+      std::vector<int> charuco_ids_int;
+      for (cv::Vec4i charuco_id : charuco_ids) {
+        charuco_ids_int.emplace_back(charuco_id[0]);
+      }
+      all_charuco_ids_.emplace_back(std::move(charuco_ids_int));
+      all_charuco_corners_.emplace_back(std::move(charuco_corners[0]));
+
+      if (r_norm > kDeltaRThreshold) {
+        LOG(INFO) << "Triggered by rotation delta = " << r_norm << " > "
+                  << kDeltaRThreshold;
+      }
+      if (t_norm > kDeltaTThreshold) {
+        LOG(INFO) << "Triggered by translation delta = " << t_norm << " > "
+                  << kDeltaTThreshold;
+      }
+    }
+
+  } else if ((keystroke & 0xFF) == static_cast<int>('q')) {
+    exit_handle_->Exit();
+  }
+}
+
+aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
+IntrinsicsCalibration::BuildCalibration(
+    cv::Mat camera_matrix, cv::Mat dist_coeffs,
+    aos::realtime_clock::time_point realtime_now, int pi_number,
+    std::string_view camera_id, uint16_t team_number) {
+  flatbuffers::FlatBufferBuilder fbb;
+  flatbuffers::Offset<flatbuffers::String> name_offset =
+      fbb.CreateString(absl::StrFormat("pi%d", pi_number));
+  flatbuffers::Offset<flatbuffers::String> camera_id_offset =
+      fbb.CreateString(camera_id);
+  flatbuffers::Offset<flatbuffers::Vector<float>> intrinsics_offset =
+      fbb.CreateVector<float>(9u, [&camera_matrix](size_t i) {
+        return static_cast<float>(
+            reinterpret_cast<double *>(camera_matrix.data)[i]);
+      });
+
+  flatbuffers::Offset<flatbuffers::Vector<float>>
+      distortion_coefficients_offset =
+          fbb.CreateVector<float>(5u, [&dist_coeffs](size_t i) {
+            return static_cast<float>(
+                reinterpret_cast<double *>(dist_coeffs.data)[i]);
+          });
+
+  calibration::CameraCalibration::Builder camera_calibration_builder(fbb);
+
+  camera_calibration_builder.add_node_name(name_offset);
+  camera_calibration_builder.add_team_number(team_number);
+  camera_calibration_builder.add_camera_id(camera_id_offset);
+  camera_calibration_builder.add_calibration_timestamp(
+      realtime_now.time_since_epoch().count());
+  camera_calibration_builder.add_intrinsics(intrinsics_offset);
+  camera_calibration_builder.add_dist_coeffs(distortion_coefficients_offset);
+  fbb.Finish(camera_calibration_builder.Finish());
+
+  return fbb.Release();
+}
+
+void IntrinsicsCalibration::MaybeCalibrate() {
+  // TODO: This number should depend on coarse vs. fine pattern
+  // Maybe just on total # of ids found, not just images
+  if (all_charuco_ids_.size() >= 50) {
+    LOG(INFO) << "Beginning calibration on " << all_charuco_ids_.size()
+              << " images";
+    cv::Mat camera_matrix, dist_coeffs;
+    std::vector<cv::Mat> rvecs, tvecs;
+    cv::Mat std_deviations_intrinsics, std_deviations_extrinsics,
+        per_view_errors;
+
+    // Set calibration flags (same as in calibrateCamera() function)
+    int calibration_flags = 0;
+    cv::Size img_size(640, 480);
+    const double reprojection_error = cv::aruco::calibrateCameraCharuco(
+        all_charuco_corners_, all_charuco_ids_, charuco_extractor_.board(),
+        img_size, camera_matrix, dist_coeffs, rvecs, tvecs,
+        std_deviations_intrinsics, std_deviations_extrinsics, per_view_errors,
+        calibration_flags);
+    CHECK_LE(reprojection_error, 1.0)
+        << ": Reproduction error is bad-- greater than 1 pixel.";
+    LOG(INFO) << "Reprojection Error is " << reprojection_error;
+
+    const aos::realtime_clock::time_point realtime_now =
+        aos::realtime_clock::now();
+    std::optional<uint16_t> team_number =
+        aos::network::team_number_internal::ParsePiTeamNumber(pi_);
+    CHECK(team_number) << ": Invalid pi hostname " << pi_
+                       << ", failed to parse team number";
+    aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
+        camera_calibration = BuildCalibration(camera_matrix, dist_coeffs,
+                                              realtime_now, pi_number_.value(),
+                                              camera_id_, team_number.value());
+    std::stringstream time_ss;
+    time_ss << realtime_now;
+
+    const std::string calibration_filename =
+        calibration_folder_ +
+        absl::StrFormat("/calibration_pi-%d-%d_cam-%s_%s.json",
+                        team_number.value(), pi_number_.value(), camera_id_,
+                        time_ss.str());
+
+    LOG(INFO) << calibration_filename << " -> "
+              << aos::FlatbufferToJson(camera_calibration,
+                                       {.multi_line = true});
+
+    aos::util::WriteStringToFileOrDie(
+        calibration_filename,
+        aos::FlatbufferToJson(camera_calibration, {.multi_line = true}));
+  } else {
+    LOG(INFO) << "Skipping calibration due to not enough images.";
+  }
+}
+}  // namespace vision
+}  // namespace frc971
diff --git a/frc971/vision/intrinsics_calibration_lib.h b/frc971/vision/intrinsics_calibration_lib.h
new file mode 100644
index 0000000..10062ba
--- /dev/null
+++ b/frc971/vision/intrinsics_calibration_lib.h
@@ -0,0 +1,78 @@
+#ifndef FRC971_VISION_CALIBRATION_LIB_H_
+#define FRC971_VISION_CALIBRATION_LIB_H_
+#include <cmath>
+#include <opencv2/calib3d.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc.hpp>
+#include <regex>
+
+#include "Eigen/Dense"
+#include "Eigen/Geometry"
+#include "absl/strings/str_format.h"
+#include "aos/events/event_loop.h"
+#include "aos/network/team_number.h"
+#include "aos/time/time.h"
+#include "aos/util/file.h"
+#include "frc971/vision/charuco_lib.h"
+
+namespace frc971 {
+namespace vision {
+
+class IntrinsicsCalibration {
+ public:
+  IntrinsicsCalibration(aos::EventLoop *event_loop, std::string_view pi,
+                        std::string_view camera_id,
+                        std::string_view base_intrinsics_file,
+                        bool display_undistorted,
+                        std::string_view calibration_folder,
+                        aos::ExitHandle *exit_handle);
+
+  void HandleCharuco(cv::Mat rgb_image,
+                     const aos::monotonic_clock::time_point /*eof*/,
+                     std::vector<cv::Vec4i> charuco_ids,
+                     std::vector<std::vector<cv::Point2f>> charuco_corners,
+                     bool valid, std::vector<Eigen::Vector3d> rvecs_eigen,
+                     std::vector<Eigen::Vector3d> tvecs_eigen);
+
+  void MaybeCalibrate();
+
+  static aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
+  BuildCalibration(cv::Mat camera_matrix, cv::Mat dist_coeffs,
+                   aos::realtime_clock::time_point realtime_now, int pi_number,
+                   std::string_view camera_id, uint16_t team_number);
+
+ private:
+  static constexpr double kDeltaRThreshold = M_PI / 6.0;
+  static constexpr double kDeltaTThreshold = 0.3;
+
+  static constexpr double kFrameDeltaRLimit = M_PI / 60;
+  static constexpr double kFrameDeltaTLimit = 0.01;
+
+  std::string pi_;
+  const std::optional<uint16_t> pi_number_;
+  const std::string camera_id_;
+
+  std::vector<std::vector<int>> all_charuco_ids_;
+  std::vector<std::vector<cv::Point2f>> all_charuco_corners_;
+
+  Eigen::Affine3d H_camera_board_;
+  Eigen::Affine3d prev_H_camera_board_;
+  Eigen::Affine3d prev_image_H_camera_board_;
+
+  // Camera intrinsics that we will use to bootstrap the intrinsics estimation
+  // here. We make use of the intrinsics in this calibration to allow us to
+  // estimate the relative pose of the charuco board and then identify how much
+  // the board is moving.
+  aos::FlatbufferDetachedBuffer<calibration::CameraCalibration>
+      base_intrinsics_;
+  CharucoExtractor charuco_extractor_;
+  ImageCallback image_callback_;
+
+  const bool display_undistorted_;
+  const std::string calibration_folder_;
+  aos::ExitHandle *exit_handle_;
+};
+
+}  // namespace vision
+}  // namespace frc971
+#endif  // FRC971_VISION_CALIBRATION_LIB_H_
diff --git a/frc971/vision/v4l2_reader.cc b/frc971/vision/v4l2_reader.cc
index 2553003..1404ec5 100644
--- a/frc971/vision/v4l2_reader.cc
+++ b/frc971/vision/v4l2_reader.cc
@@ -13,9 +13,11 @@
 namespace vision {
 
 V4L2ReaderBase::V4L2ReaderBase(aos::EventLoop *event_loop,
-                               const std::string &device_name)
-    : fd_(open(device_name.c_str(), O_RDWR | O_NONBLOCK)),
-      event_loop_(event_loop) {
+                               std::string_view device_name,
+                               std::string_view image_channel)
+    : fd_(open(device_name.data(), O_RDWR | O_NONBLOCK)),
+      event_loop_(event_loop),
+      image_channel_(image_channel) {
   PCHECK(fd_.get() != -1) << " Failed to open device " << device_name;
 
   // Figure out if we are multi-planar or not.
@@ -84,7 +86,7 @@
   }
 
   for (size_t i = 0; i < buffers_.size(); ++i) {
-    buffers_[i].sender = event_loop_->MakeSender<CameraImage>("/camera");
+    buffers_[i].sender = event_loop_->MakeSender<CameraImage>(image_channel_);
     MarkBufferToBeEnqueued(i);
   }
   int type = multiplanar() ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
@@ -287,9 +289,9 @@
   PLOG(FATAL) << "VIDIOC_STREAMOFF failed";
 }
 
-V4L2Reader::V4L2Reader(aos::EventLoop *event_loop,
-                       const std::string &device_name)
-    : V4L2ReaderBase(event_loop, device_name) {
+V4L2Reader::V4L2Reader(aos::EventLoop *event_loop, std::string_view device_name,
+                       std::string_view image_channel)
+    : V4L2ReaderBase(event_loop, device_name, image_channel) {
   // Don't know why this magic call to SetExposure is required (before the
   // camera settings are configured) to make things work on boot of the pi, but
   // it seems to be-- without it, the image exposure is wrong (too dark). Note--
@@ -322,11 +324,12 @@
 
 RockchipV4L2Reader::RockchipV4L2Reader(aos::EventLoop *event_loop,
                                        aos::internal::EPoll *epoll,
-                                       const std::string &device_name,
-                                       const std::string &image_sensor_subdev)
-    : V4L2ReaderBase(event_loop, device_name),
+                                       std::string_view device_name,
+                                       std::string_view image_sensor_subdev,
+                                       std::string_view image_channel)
+    : V4L2ReaderBase(event_loop, device_name, image_channel),
       epoll_(epoll),
-      image_sensor_fd_(open(image_sensor_subdev.c_str(), O_RDWR | O_NONBLOCK)),
+      image_sensor_fd_(open(image_sensor_subdev.data(), O_RDWR | O_NONBLOCK)),
       buffer_requeuer_([this](int buffer) { EnqueueBuffer(buffer); },
                        kEnqueueFifoPriority) {
   PCHECK(image_sensor_fd_.get() != -1)
diff --git a/frc971/vision/v4l2_reader.h b/frc971/vision/v4l2_reader.h
index 22e4367..8764024 100644
--- a/frc971/vision/v4l2_reader.h
+++ b/frc971/vision/v4l2_reader.h
@@ -22,7 +22,9 @@
 class V4L2ReaderBase {
  public:
   // device_name is the name of the device file (like "/dev/video0").
-  V4L2ReaderBase(aos::EventLoop *event_loop, const std::string &device_name);
+  // image_channel is the channel to send images on
+  V4L2ReaderBase(aos::EventLoop *event_loop, std::string_view device_name,
+                 std::string_view image_channel);
 
   V4L2ReaderBase(const V4L2ReaderBase &) = delete;
   V4L2ReaderBase &operator=(const V4L2ReaderBase &) = delete;
@@ -114,6 +116,8 @@
     flatbuffers::Offset<CameraImage> message_offset;
 
     uint8_t *data_pointer = nullptr;
+
+    std::string_view image_channel_;
   };
 
   struct BufferInfo {
@@ -149,12 +153,15 @@
 
   aos::EventLoop *event_loop_;
   aos::Ftrace ftrace_;
+
+  std::string_view image_channel_;
 };
 
 // Generic V4L2 reader for pi's and older.
 class V4L2Reader : public V4L2ReaderBase {
  public:
-  V4L2Reader(aos::EventLoop *event_loop, const std::string &device_name);
+  V4L2Reader(aos::EventLoop *event_loop, std::string_view device_name,
+             std::string_view image_channel = "/camera");
 };
 
 // Rockpi specific v4l2 reader.  This assumes that the media device has been
@@ -162,10 +169,11 @@
 class RockchipV4L2Reader : public V4L2ReaderBase {
  public:
   RockchipV4L2Reader(aos::EventLoop *event_loop, aos::internal::EPoll *epoll,
-                     const std::string &device_name,
-                     const std::string &image_sensor_subdev);
+                     std::string_view device_name,
+                     std::string_view image_sensor_subdev,
+                     std::string_view image_channel = "/camera");
 
-  ~RockchipV4L2Reader();
+  virtual ~RockchipV4L2Reader();
 
   void SetExposure(size_t duration) override;
 
diff --git a/y2018/control_loops/superstructure/arm/BUILD b/y2018/control_loops/superstructure/arm/BUILD
index d05c245..f90cc92 100644
--- a/y2018/control_loops/superstructure/arm/BUILD
+++ b/y2018/control_loops/superstructure/arm/BUILD
@@ -82,7 +82,7 @@
         ":ekf",
         ":generated_graph",
         ":trajectory",
-        "//third_party/matplotlib-cpp",
+        "//frc971/analysis:in_process_plotter",
         "@com_github_gflags_gflags//:gflags",
         "@org_tuxfamily_eigen//:eigen",
     ],
diff --git a/y2018/control_loops/superstructure/arm/trajectory_plot.cc b/y2018/control_loops/superstructure/arm/trajectory_plot.cc
index eb3f7c1..10b39bc 100644
--- a/y2018/control_loops/superstructure/arm/trajectory_plot.cc
+++ b/y2018/control_loops/superstructure/arm/trajectory_plot.cc
@@ -1,10 +1,10 @@
-#include "y2018/control_loops/superstructure/arm/trajectory.h"
-
+#include "aos/init.h"
+#include "frc971/analysis/in_process_plotter.h"
 #include "gflags/gflags.h"
-#include "third_party/matplotlib-cpp/matplotlibcpp.h"
 #include "y2018/control_loops/superstructure/arm/dynamics.h"
 #include "y2018/control_loops/superstructure/arm/ekf.h"
 #include "y2018/control_loops/superstructure/arm/generated_graph.h"
+#include "y2018/control_loops/superstructure/arm/trajectory.h"
 
 DEFINE_bool(forwards, true, "If true, run the forwards simulation.");
 DEFINE_bool(plot, true, "If true, plot");
@@ -32,7 +32,7 @@
           .finished();
   trajectory.OptimizeTrajectory(alpha_unitizer, vmax);
 
-  ::std::vector<double> distance_array = trajectory.DistanceArray();
+  const ::std::vector<double> distance_array = trajectory.DistanceArray();
 
   ::std::vector<double> theta0_array;
   ::std::vector<double> theta1_array;
@@ -96,7 +96,10 @@
 
     const ::Eigen::Matrix<double, 6, 1> R = trajectory.R(theta_t, omega_t);
     const ::Eigen::Matrix<double, 2, 1> U =
-        Dynamics::FF_U(R.block<4, 1>(0, 0), omega_t, alpha_t).array().max(-20).min(20);
+        Dynamics::FF_U(R.block<4, 1>(0, 0), omega_t, alpha_t)
+            .array()
+            .max(-20)
+            .min(20);
 
     Uff0_distance_array.push_back(U(0));
     Uff1_distance_array.push_back(U(1));
@@ -115,17 +118,20 @@
 
     const ::Eigen::Matrix<double, 6, 1> R = trajectory.R(theta_t, omega_t);
     const ::Eigen::Matrix<double, 2, 1> U =
-        Dynamics::FF_U(R.block<4, 1>(0, 0), omega_t, alpha_t).array().max(-20).min(20);
+        Dynamics::FF_U(R.block<4, 1>(0, 0), omega_t, alpha_t)
+            .array()
+            .max(-20)
+            .min(20);
 
     Uff0_distance_array_curvature.push_back(U(0));
     Uff1_distance_array_curvature.push_back(U(1));
   }
 
   for (const double distance : distance_array) {
-    const double goal_velocity = trajectory.GetDVelocity(
-        distance, trajectory.max_dvelocity());
-    const double goal_acceleration = trajectory.GetDAcceleration(
-        distance, trajectory.max_dvelocity());
+    const double goal_velocity =
+        trajectory.GetDVelocity(distance, trajectory.max_dvelocity());
+    const double goal_acceleration =
+        trajectory.GetDAcceleration(distance, trajectory.max_dvelocity());
     const ::Eigen::Matrix<double, 2, 1> theta_t = trajectory.ThetaT(distance);
     const ::Eigen::Matrix<double, 2, 1> omega_t =
         trajectory.OmegaT(distance, goal_velocity);
@@ -134,7 +140,10 @@
 
     const ::Eigen::Matrix<double, 6, 1> R = trajectory.R(theta_t, omega_t);
     const ::Eigen::Matrix<double, 2, 1> U =
-        Dynamics::FF_U(R.block<4, 1>(0, 0), omega_t, alpha_t).array().max(-20).min(20);
+        Dynamics::FF_U(R.block<4, 1>(0, 0), omega_t, alpha_t)
+            .array()
+            .max(-20)
+            .min(20);
 
     Uff0_distance_array_backwards_only.push_back(U(0));
     Uff1_distance_array_backwards_only.push_back(U(1));
@@ -244,115 +253,96 @@
   }
 
   if (FLAGS_plot) {
-    matplotlibcpp::figure();
-    matplotlibcpp::title("Trajectory");
-    matplotlibcpp::plot(theta0_array, theta1_array,
-                        {{"label", "desired path"}});
-    matplotlibcpp::plot(theta0_t_array, theta1_t_array,
-                        {{"label", "actual path"}});
-    matplotlibcpp::legend();
+    frc971::analysis::Plotter plotter;
 
-    matplotlibcpp::figure();
-    matplotlibcpp::plot(distance_array, theta0_array, {{"label", "theta0"}});
-    matplotlibcpp::plot(distance_array, theta1_array, {{"label", "theta1"}});
-    matplotlibcpp::plot(distance_array, omega0_array, {{"label", "omega0"}});
-    matplotlibcpp::plot(distance_array, omega1_array, {{"label", "omega1"}});
-    matplotlibcpp::plot(distance_array, alpha0_array, {{"label", "alpha0"}});
-    matplotlibcpp::plot(distance_array, alpha1_array, {{"label", "alpha1"}});
+    plotter.AddFigure();
+    plotter.Title("Trajectory");
+    plotter.AddLine(theta0_array, theta1_array, "desired path");
+    plotter.AddLine(theta0_t_array, theta1_t_array, "actual path");
+    plotter.Publish();
 
-    matplotlibcpp::plot(integrated_distance, integrated_theta0_array,
-                        {{"label", "itheta0"}});
-    matplotlibcpp::plot(integrated_distance, integrated_theta1_array,
-                        {{"label", "itheta1"}});
-    matplotlibcpp::plot(integrated_distance, integrated_omega0_array,
-                        {{"label", "iomega0"}});
-    matplotlibcpp::plot(integrated_distance, integrated_omega1_array,
-                        {{"label", "iomega1"}});
-    matplotlibcpp::legend();
+    plotter.AddFigure();
+    plotter.Title("Input spline");
+    plotter.AddLine(distance_array, theta0_array, "theta0");
+    plotter.AddLine(distance_array, theta1_array, "theta1");
+    plotter.AddLine(distance_array, omega0_array, "omega0");
+    plotter.AddLine(distance_array, omega1_array, "omega1");
+    plotter.AddLine(distance_array, alpha0_array, "alpha0");
+    plotter.AddLine(distance_array, alpha1_array, "alpha1");
 
-    matplotlibcpp::figure();
-    matplotlibcpp::plot(distance_array, trajectory.max_dvelocity_unfiltered(),
-                        {{"label", "pass0"}});
-    matplotlibcpp::plot(distance_array, trajectory.max_dvelocity(),
-                        {{"label", "passb"}});
-    matplotlibcpp::plot(distance_array, trajectory.max_dvelocity_forward_pass(),
-                        {{"label", "passf"}});
-    matplotlibcpp::legend();
+    plotter.AddLine(integrated_distance, integrated_theta0_array, "integrated theta0");
+    plotter.AddLine(integrated_distance, integrated_theta1_array, "integrated theta1");
+    plotter.AddLine(integrated_distance, integrated_omega0_array, "integrated omega0");
+    plotter.AddLine(integrated_distance, integrated_omega1_array, "integrated omega1");
+    plotter.Publish();
 
-    matplotlibcpp::figure();
-    matplotlibcpp::plot(t_array, alpha0_goal_t_array,
-                        {{"label", "alpha0_t_goal"}});
-    matplotlibcpp::plot(t_array, alpha0_t_array, {{"label", "alpha0_t"}});
-    matplotlibcpp::plot(t_array, alpha1_goal_t_array,
-                        {{"label", "alpha1_t_goal"}});
-    matplotlibcpp::plot(t_array, alpha1_t_array, {{"label", "alpha1_t"}});
-    matplotlibcpp::plot(t_array, distance_t_array, {{"label", "distance_t"}});
-    matplotlibcpp::plot(t_array, velocity_t_array, {{"label", "velocity_t"}});
-    matplotlibcpp::plot(t_array, acceleration_t_array,
-                        {{"label", "acceleration_t"}});
-    matplotlibcpp::legend();
+    plotter.AddFigure();
+    plotter.Title("Solver passes");
+    plotter.AddLine(distance_array, trajectory.max_dvelocity_unfiltered(),
+                    "pass0");
+    plotter.AddLine(distance_array, trajectory.max_dvelocity(), "passb");
+    plotter.AddLine(distance_array, trajectory.max_dvelocity_forward_pass(),
+                    "passf");
+    plotter.Publish();
 
-    matplotlibcpp::figure();
-    matplotlibcpp::title("Angular Velocities");
-    matplotlibcpp::plot(t_array, omega0_goal_t_array,
-                        {{"label", "omega0_t_goal"}});
-    matplotlibcpp::plot(t_array, omega0_t_array, {{"label", "omega0_t"}});
-    matplotlibcpp::plot(t_array, omega0_hat_t_array,
-                        {{"label", "omega0_hat_t"}});
-    matplotlibcpp::plot(t_array, omega1_goal_t_array,
-                        {{"label", "omega1_t_goal"}});
-    matplotlibcpp::plot(t_array, omega1_t_array, {{"label", "omega1_t"}});
-    matplotlibcpp::plot(t_array, omega1_hat_t_array,
-                        {{"label", "omega1_hat_t"}});
-    matplotlibcpp::legend();
+    plotter.AddFigure();
+    plotter.Title("Time Goals");
+    plotter.AddLine(t_array, alpha0_goal_t_array, "alpha0_t_goal");
+    plotter.AddLine(t_array, alpha0_t_array, "alpha0_t");
+    plotter.AddLine(t_array, alpha1_goal_t_array, "alpha1_t_goal");
+    plotter.AddLine(t_array, alpha1_t_array, "alpha1_t");
+    plotter.AddLine(t_array, distance_t_array, "distance_t");
+    plotter.AddLine(t_array, velocity_t_array, "velocity_t");
+    plotter.AddLine(t_array, acceleration_t_array, "acceleration_t");
+    plotter.Publish();
 
-    matplotlibcpp::figure();
-    matplotlibcpp::title("Voltages");
-    matplotlibcpp::plot(t_array, u0_unsaturated_array, {{"label", "u0_full"}});
-    matplotlibcpp::plot(t_array, u0_array, {{"label", "u0"}});
-    matplotlibcpp::plot(t_array, uff0_array, {{"label", "uff0"}});
-    matplotlibcpp::plot(t_array, u1_unsaturated_array, {{"label", "u1_full"}});
-    matplotlibcpp::plot(t_array, u1_array, {{"label", "u1"}});
-    matplotlibcpp::plot(t_array, uff1_array, {{"label", "uff1"}});
-    matplotlibcpp::plot(t_array, torque0_hat_t_array,
-                        {{"label", "torque0_hat"}});
-    matplotlibcpp::plot(t_array, torque1_hat_t_array,
-                        {{"label", "torque1_hat"}});
-    matplotlibcpp::legend();
+    plotter.AddFigure();
+    plotter.Title("Angular Velocities");
+    plotter.AddLine(t_array, omega0_goal_t_array, "omega0_t_goal");
+    plotter.AddLine(t_array, omega0_t_array, "omega0_t");
+    plotter.AddLine(t_array, omega0_hat_t_array, "omega0_hat_t");
+    plotter.AddLine(t_array, omega1_goal_t_array, "omega1_t_goal");
+    plotter.AddLine(t_array, omega1_t_array, "omega1_t");
+    plotter.AddLine(t_array, omega1_hat_t_array, "omega1_hat_t");
+    plotter.Publish();
+
+    plotter.AddFigure();
+    plotter.Title("Voltages");
+    plotter.AddLine(t_array, u0_unsaturated_array, "u0_full");
+    plotter.AddLine(t_array, u0_array, "u0");
+    plotter.AddLine(t_array, uff0_array, "uff0");
+    plotter.AddLine(t_array, u1_unsaturated_array, "u1_full");
+    plotter.AddLine(t_array, u1_array, "u1");
+    plotter.AddLine(t_array, uff1_array, "uff1");
+    plotter.AddLine(t_array, torque0_hat_t_array, "torque0_hat");
+    plotter.AddLine(t_array, torque1_hat_t_array, "torque1_hat");
+    plotter.Publish();
 
     if (FLAGS_plot_thetas) {
-      matplotlibcpp::figure();
-      matplotlibcpp::title("Angles");
-      matplotlibcpp::plot(t_array, theta0_goal_t_array,
-                          {{"label", "theta0_t_goal"}});
-      matplotlibcpp::plot(t_array, theta0_t_array, {{"label", "theta0_t"}});
-      matplotlibcpp::plot(t_array, theta0_hat_t_array,
-                          {{"label", "theta0_hat_t"}});
-      matplotlibcpp::plot(t_array, theta1_goal_t_array,
-                          {{"label", "theta1_t_goal"}});
-      matplotlibcpp::plot(t_array, theta1_t_array, {{"label", "theta1_t"}});
-      matplotlibcpp::plot(t_array, theta1_hat_t_array,
-                          {{"label", "theta1_hat_t"}});
-      matplotlibcpp::legend();
+      plotter.AddFigure();
+      plotter.Title("Angles");
+      plotter.AddLine(t_array, theta0_goal_t_array, "theta0_t_goal");
+      plotter.AddLine(t_array, theta0_t_array, "theta0_t");
+      plotter.AddLine(t_array, theta0_hat_t_array, "theta0_hat_t");
+      plotter.AddLine(t_array, theta1_goal_t_array, "theta1_t_goal");
+      plotter.AddLine(t_array, theta1_t_array, "theta1_t");
+      plotter.AddLine(t_array, theta1_hat_t_array, "theta1_hat_t");
+      plotter.Publish();
     }
 
-    matplotlibcpp::figure();
-    matplotlibcpp::title("ff for distance");
-    matplotlibcpp::plot(distance_array, Uff0_distance_array, {{"label",
-    "ff0"}});
-    matplotlibcpp::plot(distance_array, Uff1_distance_array, {{"label",
-    "ff1"}});
-    matplotlibcpp::plot(distance_array, Uff0_distance_array_backwards_only,
-                        {{"label", "ff0_back"}});
-    matplotlibcpp::plot(distance_array, Uff1_distance_array_backwards_only,
-                        {{"label", "ff1_back"}});
-    matplotlibcpp::plot(distance_array, Uff0_distance_array_curvature,
-                        {{"label", "ff0_curve"}});
-    matplotlibcpp::plot(distance_array, Uff1_distance_array_curvature,
-                        {{"label", "ff1_curve"}});
-    matplotlibcpp::legend();
+    plotter.AddFigure();
+    plotter.Title("ff for distance");
+    plotter.AddLine(distance_array, Uff0_distance_array, "ff0");
+    plotter.AddLine(distance_array, Uff1_distance_array, "ff1");
+    plotter.AddLine(distance_array, Uff0_distance_array_backwards_only,
+                    "ff0_back");
+    plotter.AddLine(distance_array, Uff1_distance_array_backwards_only,
+                    "ff1_back");
+    plotter.AddLine(distance_array, Uff0_distance_array_curvature, "ff0_curve");
+    plotter.AddLine(distance_array, Uff1_distance_array_curvature, "ff1_curve");
 
-    matplotlibcpp::show();
+    plotter.Publish();
+    plotter.Spin();
   }
 }
 
@@ -362,7 +352,7 @@
 }  // namespace y2018
 
 int main(int argc, char **argv) {
-  gflags::ParseCommandLineFlags(&argc, &argv, false);
+  ::aos::InitGoogle(&argc, &argv);
   ::y2018::control_loops::superstructure::arm::Main();
   return 0;
 }
diff --git a/y2020/BUILD b/y2020/BUILD
index 918ff79..2f6f963 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -35,7 +35,7 @@
 robot_downloader(
     name = "pi_download",
     binaries = [
-        "//y2020/vision:calibration",
+        "//frc971/vision:intrinsics_calibration",
         "//y2020/vision:viewer",
     ],
     data = [
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 608ec5d..aa4e5bf 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -70,37 +70,6 @@
 )
 
 cc_binary(
-    name = "calibration",
-    srcs = [
-        "calibration.cc",
-    ],
-    data = [
-        "//y2020:aos_config",
-    ],
-    target_compatible_with = ["@platforms//os:linux"],
-    visibility = [
-        "//y2020:__subpackages__",
-        "//y2022:__subpackages__",
-        "//y2023:__subpackages__",
-    ],
-    deps = [
-        "//aos:init",
-        "//aos/events:shm_event_loop",
-        "//frc971/control_loops/drivetrain:improved_down_estimator",
-        "//frc971/vision:charuco_lib",
-        "//frc971/vision:vision_fbs",
-        "//frc971/wpilib:imu_batch_fbs",
-        "//frc971/wpilib:imu_fbs",
-        "//third_party:opencv",
-        "//y2020/vision/sift:sift_fbs",
-        "//y2020/vision/sift:sift_training_fbs",
-        "//y2020/vision/tools/python_code:sift_training_data",
-        "@com_google_absl//absl/strings:str_format",
-        "@org_tuxfamily_eigen//:eigen",
-    ],
-)
-
-cc_binary(
     name = "viewer_replay",
     srcs = [
         "viewer_replay.cc",
diff --git a/y2020/vision/calibration.cc b/y2020/vision/calibration.cc
deleted file mode 100644
index d5ed54f..0000000
--- a/y2020/vision/calibration.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-#include <cmath>
-#include <opencv2/calib3d.hpp>
-#include <opencv2/highgui/highgui.hpp>
-#include <opencv2/imgproc.hpp>
-#include <regex>
-
-#include "Eigen/Dense"
-#include "Eigen/Geometry"
-#include "absl/strings/str_format.h"
-#include "aos/events/shm_event_loop.h"
-#include "aos/init.h"
-#include "aos/network/team_number.h"
-#include "aos/time/time.h"
-#include "aos/util/file.h"
-#include "frc971/vision/charuco_lib.h"
-
-DEFINE_string(calibration_folder, ".", "Folder to place calibration files.");
-DEFINE_string(camera_id, "", "Camera ID in format YY-NN-- year and number.");
-DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
-DEFINE_bool(display_undistorted, false,
-            "If true, display the undistorted image.");
-DEFINE_string(pi, "", "Pi name to calibrate.");
-
-namespace frc971 {
-namespace vision {
-
-class Calibration {
- public:
-  Calibration(aos::ShmEventLoop *event_loop, std::string_view pi,
-              std::string_view camera_id)
-      : event_loop_(event_loop),
-        pi_(pi),
-        pi_number_(aos::network::ParsePiNumber(pi)),
-        camera_id_(camera_id),
-        prev_H_camera_board_(Eigen::Affine3d()),
-        prev_image_H_camera_board_(Eigen::Affine3d()),
-        charuco_extractor_(
-            event_loop, pi, TargetType::kCharuco, "/camera",
-            [this](cv::Mat rgb_image,
-                   const aos::monotonic_clock::time_point eof,
-                   std::vector<cv::Vec4i> charuco_ids,
-                   std::vector<std::vector<cv::Point2f>> charuco_corners,
-                   bool valid, std::vector<Eigen::Vector3d> rvecs_eigen,
-                   std::vector<Eigen::Vector3d> tvecs_eigen) {
-              HandleCharuco(rgb_image, eof, charuco_ids, charuco_corners, valid,
-                            rvecs_eigen, tvecs_eigen);
-            }),
-        image_callback_(
-            event_loop,
-            absl::StrCat(
-                "/pi", std::to_string(aos::network::ParsePiNumber(pi).value()),
-                "/camera"),
-            [this](cv::Mat rgb_image,
-                   const aos::monotonic_clock::time_point eof) {
-              charuco_extractor_.HandleImage(rgb_image, eof);
-            },
-            std::chrono::milliseconds(5)) {
-    CHECK(pi_number_) << ": Invalid pi number " << pi
-                      << ", failed to parse pi number";
-    std::regex re{"^[0-9][0-9]-[0-9][0-9]"};
-    CHECK(std::regex_match(camera_id_, re))
-        << ": Invalid camera_id '" << camera_id_
-        << "', should be of form YY-NN";
-  }
-
-  void HandleCharuco(cv::Mat rgb_image,
-                     const aos::monotonic_clock::time_point /*eof*/,
-                     std::vector<cv::Vec4i> charuco_ids,
-                     std::vector<std::vector<cv::Point2f>> charuco_corners,
-                     bool valid, std::vector<Eigen::Vector3d> rvecs_eigen,
-                     std::vector<Eigen::Vector3d> tvecs_eigen) {
-    // Reduce resolution displayed on remote viewer to prevent lag
-    cv::resize(rgb_image, rgb_image,
-               cv::Size(rgb_image.cols / 2, rgb_image.rows / 2));
-    cv::imshow("Display", rgb_image);
-
-    if (FLAGS_display_undistorted) {
-      const cv::Size image_size(rgb_image.cols, rgb_image.rows);
-      cv::Mat undistorted_rgb_image(image_size, CV_8UC3);
-      cv::undistort(rgb_image, undistorted_rgb_image,
-                    charuco_extractor_.camera_matrix(),
-                    charuco_extractor_.dist_coeffs());
-
-      cv::imshow("Display undist", undistorted_rgb_image);
-    }
-
-    int keystroke = cv::waitKey(1);
-
-    // If we haven't got a valid pose estimate, don't use these points
-    if (!valid) {
-      return;
-    }
-    CHECK(tvecs_eigen.size() == 1)
-        << "Charuco board should only return one translational pose";
-    CHECK(rvecs_eigen.size() == 1)
-        << "Charuco board should only return one rotational pose";
-    // Calibration calculates rotation and translation delta from last image
-    // stored to automatically capture next image
-
-    Eigen::Affine3d H_board_camera =
-        Eigen::Translation3d(tvecs_eigen[0]) *
-        Eigen::AngleAxisd(rvecs_eigen[0].norm(),
-                          rvecs_eigen[0] / rvecs_eigen[0].norm());
-    Eigen::Affine3d H_camera_board_ = H_board_camera.inverse();
-    Eigen::Affine3d H_delta = H_board_camera * prev_H_camera_board_;
-
-    Eigen::AngleAxisd delta_r = Eigen::AngleAxisd(H_delta.rotation());
-
-    Eigen::Vector3d delta_t = H_delta.translation();
-
-    double r_norm = std::abs(delta_r.angle());
-    double t_norm = delta_t.norm();
-
-    bool store_image = false;
-    double percent_motion =
-        std::max<double>(r_norm / kDeltaRThreshold, t_norm / kDeltaTThreshold);
-    LOG(INFO) << "Captured: " << all_charuco_ids_.size() << " points; Moved "
-              << percent_motion << "% of what's needed";
-    // Verify that camera has moved enough from last stored image
-    if (r_norm > kDeltaRThreshold || t_norm > kDeltaTThreshold) {
-      // frame_ refers to deltas between current and last captured image
-      Eigen::Affine3d frame_H_delta =
-          H_board_camera * prev_image_H_camera_board_;
-
-      Eigen::AngleAxisd frame_delta_r =
-          Eigen::AngleAxisd(frame_H_delta.rotation());
-
-      Eigen::Vector3d frame_delta_t = frame_H_delta.translation();
-
-      double frame_r_norm = std::abs(frame_delta_r.angle());
-      double frame_t_norm = frame_delta_t.norm();
-
-      // Make sure camera has stopped moving before storing image
-      store_image =
-          frame_r_norm < kFrameDeltaRLimit && frame_t_norm < kFrameDeltaTLimit;
-      double percent_stop = std::max<double>(frame_r_norm / kFrameDeltaRLimit,
-                                             frame_t_norm / kFrameDeltaTLimit);
-      LOG(INFO) << "Captured: " << all_charuco_ids_.size()
-                << "points; Moved enough (" << percent_motion
-                << "%); Need to stop (last motion was " << percent_stop
-                << "% of limit; needs to be < 1 to capture)";
-    }
-    prev_image_H_camera_board_ = H_camera_board_;
-
-    if (store_image) {
-      if (valid) {
-        prev_H_camera_board_ = H_camera_board_;
-
-        // Unpack the Charuco ids from Vec4i
-        std::vector<int> charuco_ids_int;
-        for (cv::Vec4i charuco_id : charuco_ids) {
-          charuco_ids_int.emplace_back(charuco_id[0]);
-        }
-        all_charuco_ids_.emplace_back(std::move(charuco_ids_int));
-        all_charuco_corners_.emplace_back(std::move(charuco_corners[0]));
-
-        if (r_norm > kDeltaRThreshold) {
-          LOG(INFO) << "Triggered by rotation delta = " << r_norm << " > "
-                    << kDeltaRThreshold;
-        }
-        if (t_norm > kDeltaTThreshold) {
-          LOG(INFO) << "Triggered by translation delta = " << t_norm << " > "
-                    << kDeltaTThreshold;
-        }
-      }
-
-    } else if ((keystroke & 0xFF) == static_cast<int>('q')) {
-      event_loop_->Exit();
-    }
-  }
-
-  void MaybeCalibrate() {
-    // TODO: This number should depend on coarse vs. fine pattern
-    // Maybe just on total # of ids found, not just images
-    if (all_charuco_ids_.size() >= 50) {
-      LOG(INFO) << "Beginning calibration on " << all_charuco_ids_.size()
-                << " images";
-      cv::Mat cameraMatrix, distCoeffs;
-      std::vector<cv::Mat> rvecs, tvecs;
-      cv::Mat stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors;
-
-      // Set calibration flags (same as in calibrateCamera() function)
-      int calibration_flags = 0;
-      cv::Size img_size(640, 480);
-      const double reprojection_error = cv::aruco::calibrateCameraCharuco(
-          all_charuco_corners_, all_charuco_ids_, charuco_extractor_.board(),
-          img_size, cameraMatrix, distCoeffs, rvecs, tvecs,
-          stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors,
-          calibration_flags);
-      CHECK_LE(reprojection_error, 1.0)
-          << ": Reproduction error is bad-- greater than 1 pixel.";
-      LOG(INFO) << "Reprojection Error is " << reprojection_error;
-
-      flatbuffers::FlatBufferBuilder fbb;
-      flatbuffers::Offset<flatbuffers::String> name_offset =
-          fbb.CreateString(absl::StrFormat("pi%d", pi_number_.value()));
-      flatbuffers::Offset<flatbuffers::String> camera_id_offset =
-          fbb.CreateString(camera_id_);
-      flatbuffers::Offset<flatbuffers::Vector<float>> intrinsics_offset =
-          fbb.CreateVector<float>(9u, [&cameraMatrix](size_t i) {
-            return static_cast<float>(
-                reinterpret_cast<double *>(cameraMatrix.data)[i]);
-          });
-
-      flatbuffers::Offset<flatbuffers::Vector<float>>
-          distortion_coefficients_offset =
-              fbb.CreateVector<float>(5u, [&distCoeffs](size_t i) {
-                return static_cast<float>(
-                    reinterpret_cast<double *>(distCoeffs.data)[i]);
-              });
-
-      sift::CameraCalibration::Builder camera_calibration_builder(fbb);
-      std::optional<uint16_t> team_number =
-          aos::network::team_number_internal::ParsePiTeamNumber(pi_);
-      CHECK(team_number) << ": Invalid pi hostname " << pi_
-                         << ", failed to parse team number";
-
-      const aos::realtime_clock::time_point realtime_now =
-          aos::realtime_clock::now();
-      camera_calibration_builder.add_node_name(name_offset);
-      camera_calibration_builder.add_team_number(team_number.value());
-      camera_calibration_builder.add_camera_id(camera_id_offset);
-      camera_calibration_builder.add_calibration_timestamp(
-          realtime_now.time_since_epoch().count());
-      camera_calibration_builder.add_intrinsics(intrinsics_offset);
-      camera_calibration_builder.add_dist_coeffs(
-          distortion_coefficients_offset);
-      fbb.Finish(camera_calibration_builder.Finish());
-
-      aos::FlatbufferDetachedBuffer<sift::CameraCalibration> camera_calibration(
-          fbb.Release());
-      std::stringstream time_ss;
-      time_ss << realtime_now;
-
-      const std::string calibration_filename =
-          FLAGS_calibration_folder +
-          absl::StrFormat("/calibration_pi-%d-%d_cam-%s_%s.json",
-                          team_number.value(), pi_number_.value(), camera_id_,
-                          time_ss.str());
-
-      LOG(INFO) << calibration_filename << " -> "
-                << aos::FlatbufferToJson(camera_calibration,
-                                         {.multi_line = true});
-
-      aos::util::WriteStringToFileOrDie(
-          calibration_filename,
-          aos::FlatbufferToJson(camera_calibration, {.multi_line = true}));
-    } else {
-      LOG(INFO) << "Skipping calibration due to not enough images.";
-    }
-  }
-
- private:
-  static constexpr double kDeltaRThreshold = M_PI / 6.0;
-  static constexpr double kDeltaTThreshold = 0.3;
-
-  static constexpr double kFrameDeltaRLimit = M_PI / 60;
-  static constexpr double kFrameDeltaTLimit = 0.01;
-
-  aos::ShmEventLoop *event_loop_;
-  std::string pi_;
-  const std::optional<uint16_t> pi_number_;
-  const std::string camera_id_;
-
-  std::vector<std::vector<int>> all_charuco_ids_;
-  std::vector<std::vector<cv::Point2f>> all_charuco_corners_;
-
-  Eigen::Affine3d H_camera_board_;
-  Eigen::Affine3d prev_H_camera_board_;
-  Eigen::Affine3d prev_image_H_camera_board_;
-
-  CharucoExtractor charuco_extractor_;
-  ImageCallback image_callback_;
-};
-
-namespace {
-
-void Main() {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(FLAGS_config);
-
-  aos::ShmEventLoop event_loop(&config.message());
-
-  std::string hostname = FLAGS_pi;
-  if (hostname == "") {
-    hostname = aos::network::GetHostname();
-    LOG(INFO) << "Using pi name from hostname as " << hostname;
-  }
-  Calibration extractor(&event_loop, hostname, FLAGS_camera_id);
-
-  event_loop.Run();
-
-  extractor.MaybeCalibrate();
-}
-
-}  // namespace
-}  // namespace vision
-}  // namespace frc971
-
-int main(int argc, char **argv) {
-  aos::InitGoogle(&argc, &argv);
-  frc971::vision::Main();
-}
diff --git a/y2020/vision/extrinsics_calibration.cc b/y2020/vision/extrinsics_calibration.cc
index 3702355..d9e13b3 100644
--- a/y2020/vision/extrinsics_calibration.cc
+++ b/y2020/vision/extrinsics_calibration.cc
@@ -13,13 +13,12 @@
 #include "frc971/vision/vision_generated.h"
 #include "frc971/wpilib/imu_batch_generated.h"
 #include "y2020/control_loops/superstructure/superstructure_status_generated.h"
-#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/sift/sift_training_generated.h"
-#include "y2020/vision/tools/python_code/sift_training_data.h"
 
 DEFINE_string(pi, "pi-7971-2", "Pi name to calibrate.");
 DEFINE_bool(plot, false, "Whether to plot the resulting data.");
 DEFINE_bool(turret, false, "If true, the camera is on the turret");
+DEFINE_string(base_intrinsics, "",
+              "Intrinsics to use for extrinsics calibration.");
 
 namespace frc971 {
 namespace vision {
@@ -63,9 +62,13 @@
     std::unique_ptr<aos::EventLoop> pi_event_loop =
         factory.MakeEventLoop("calibration", pi_node);
 
+    aos::FlatbufferDetachedBuffer<calibration::CameraCalibration> intrinsics =
+        aos::JsonFileToFlatbuffer<calibration::CameraCalibration>(
+            FLAGS_base_intrinsics);
     // Now, hook Calibration up to everything.
     Calibration extractor(&factory, pi_event_loop.get(), imu_event_loop.get(),
-                          FLAGS_pi, TargetType::kCharuco, "/camera", &data);
+                          FLAGS_pi, &intrinsics.message(), TargetType::kCharuco,
+                          "/camera", &data);
 
     if (FLAGS_turret) {
       aos::NodeEventLoopFactory *roborio_factory =
diff --git a/y2022/BUILD b/y2022/BUILD
index 3d4dac3..5d32f15 100644
--- a/y2022/BUILD
+++ b/y2022/BUILD
@@ -39,7 +39,7 @@
 robot_downloader(
     name = "pi_download",
     binaries = [
-        "//y2020/vision:calibration",
+        "//frc971/vision:intrinsics_calibration",
         "//y2022/vision:viewer",
         "//y2022/localizer:imu_main",
         "//y2022/localizer:localizer_main",
diff --git a/y2022/vision/calibrate_extrinsics.cc b/y2022/vision/calibrate_extrinsics.cc
index 04b24ea..6793bc4 100644
--- a/y2022/vision/calibrate_extrinsics.cc
+++ b/y2022/vision/calibrate_extrinsics.cc
@@ -11,9 +11,6 @@
 #include "frc971/vision/extrinsics_calibration.h"
 #include "frc971/vision/vision_generated.h"
 #include "frc971/wpilib/imu_batch_generated.h"
-#include "y2020/vision/sift/sift_generated.h"
-#include "y2020/vision/sift/sift_training_generated.h"
-#include "y2020/vision/tools/python_code/sift_training_data.h"
 #include "y2022/control_loops/superstructure/superstructure_status_generated.h"
 
 DEFINE_string(pi, "pi-7971-2", "Pi name to calibrate.");
@@ -24,6 +21,8 @@
 DEFINE_string(image_channel, "/camera", "Channel to listen for images on");
 DEFINE_string(output_logs, "/tmp/calibration/",
               "Output folder for visualization logs.");
+DEFINE_string(base_intrinsics, "",
+              "Intrinsics to use for extrinsics calibration.");
 
 namespace frc971 {
 namespace vision {
@@ -97,9 +96,13 @@
                  << ", expected: aruco|charuco|charuco_diamond";
     }
 
+    aos::FlatbufferDetachedBuffer<calibration::CameraCalibration> intrinsics =
+        aos::JsonFileToFlatbuffer<calibration::CameraCalibration>(
+            FLAGS_base_intrinsics);
     // Now, hook Calibration up to everything.
     Calibration extractor(&factory, pi_event_loop.get(), imu_event_loop.get(),
-                          FLAGS_pi, target_type, FLAGS_image_channel, &data);
+                          FLAGS_pi, &intrinsics.message(), target_type,
+                          FLAGS_image_channel, &data);
 
     if (FLAGS_turret) {
       aos::NodeEventLoopFactory *roborio_factory =
diff --git a/y2023/BUILD b/y2023/BUILD
index cf5d60f..6ba1392 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -5,7 +5,7 @@
 robot_downloader(
     name = "pi_download",
     binaries = [
-        "//y2020/vision:calibration",
+        "//frc971/vision:intrinsics_calibration",
         "//y2023/vision:viewer",
         "//y2023/vision:aprilrobotics",
         "//y2022/localizer:localizer_main",
diff --git a/y2023/vision/camera_reader.cc b/y2023/vision/camera_reader.cc
index b604516..7de3eb5 100644
--- a/y2023/vision/camera_reader.cc
+++ b/y2023/vision/camera_reader.cc
@@ -17,6 +17,8 @@
 DEFINE_double(green, 1, "Green gain");
 DEFINE_double(blue, 1.96, "Blue gain");
 DEFINE_double(exposure, 150, "Camera exposure");
+DEFINE_bool(send_downsized_images, false,
+            "Whether to send downsized image for driver cam streaming.");
 
 namespace y2023 {
 namespace vision {
@@ -31,59 +33,70 @@
     media_device->Log();
   }
 
-  int width = 1296;
-  int height = 972;
-  int color_format = MEDIA_BUS_FMT_SBGGR10_1X10;
-  std::string camera_device_string = "ov5647 4-0036";
-  if (FLAGS_lowlight_camera) {
-    width = 1920;
-    height = 1080;
-    color_format = MEDIA_BUS_FMT_SRGGB10_1X10;
-    camera_device_string = "arducam-pivariety 4-000c";
-  }
+  const int kWidth = (FLAGS_lowlight_camera ? 1920 : 1296);
+  const int kHeight = (FLAGS_lowlight_camera ? 1080 : 972);
+  const int kColorFormat = (FLAGS_lowlight_camera ? MEDIA_BUS_FMT_SRGGB10_1X10
+                                                  : MEDIA_BUS_FMT_SBGGR10_1X10);
+
+  const std::string_view kCameraDeviceString =
+      (FLAGS_lowlight_camera ? "arducam-pivariety 4-000c" : "ov5647 4-0036");
+
+  // Scale down the selfpath images so we can log at 30 Hz (but still detect
+  // april tags at a far enough distance)
+  const double kSelfpathScalar = 2.0 / 3.0;
+  const int kSelfpathWidth = kWidth * kSelfpathScalar;
+  const int kSelfpathHeight = kHeight * kSelfpathScalar;
+
+  // Send heavily downsized images to the drivercam. They go over the network,
+  // and in this case frame rate is more important than quality
+  constexpr int kMainpathWidth = 640;
+  constexpr int kMainpathHeight = 480;
 
   media_device->Reset();
 
-  Entity *camera = media_device->FindEntity(camera_device_string);
-  camera->pads()[0]->SetSubdevFormat(width, height, color_format);
+  Entity *camera = media_device->FindEntity(kCameraDeviceString);
+  camera->pads()[0]->SetSubdevFormat(kWidth, kHeight, kColorFormat);
 
   Entity *rkisp1_csi = media_device->FindEntity("rkisp1_csi");
-  rkisp1_csi->pads()[0]->SetSubdevFormat(width, height, color_format);
-  rkisp1_csi->pads()[1]->SetSubdevFormat(width, height, color_format);
+  rkisp1_csi->pads()[0]->SetSubdevFormat(kWidth, kHeight, kColorFormat);
+  rkisp1_csi->pads()[1]->SetSubdevFormat(kWidth, kHeight, kColorFormat);
 
   // TODO(austin): Should we set this on the link?
   Entity *rkisp1_isp = media_device->FindEntity("rkisp1_isp");
-  rkisp1_isp->pads(0)->SetSubdevFormat(width, height, color_format);
-  rkisp1_isp->pads(0)->SetSubdevCrop(width, height);
+  rkisp1_isp->pads(0)->SetSubdevFormat(kWidth, kHeight, kColorFormat);
+  rkisp1_isp->pads(0)->SetSubdevCrop(kWidth, kHeight);
 
-  rkisp1_isp->pads(2)->SetSubdevFormat(width, height, MEDIA_BUS_FMT_YUYV8_2X8);
-  rkisp1_isp->pads(2)->SetSubdevCrop(width, height);
+  rkisp1_isp->pads(2)->SetSubdevFormat(kWidth, kHeight,
+                                       MEDIA_BUS_FMT_YUYV8_2X8);
+  rkisp1_isp->pads(2)->SetSubdevCrop(kWidth, kHeight);
 
   Entity *rkisp1_resizer_selfpath =
       media_device->FindEntity("rkisp1_resizer_selfpath");
-  rkisp1_resizer_selfpath->pads(0)->SetSubdevFormat(width, height,
+  rkisp1_resizer_selfpath->pads(0)->SetSubdevFormat(kWidth, kHeight,
                                                     MEDIA_BUS_FMT_YUYV8_2X8);
   rkisp1_resizer_selfpath->pads(1)->SetSubdevFormat(
-      width * 2 / 3, height * 2 / 3, MEDIA_BUS_FMT_YUYV8_2X8);
-  rkisp1_resizer_selfpath->pads(0)->SetSubdevCrop(width, height);
+      kSelfpathWidth, kSelfpathHeight, MEDIA_BUS_FMT_YUYV8_2X8);
+  rkisp1_resizer_selfpath->pads(0)->SetSubdevCrop(kWidth, kHeight);
 
   Entity *rkisp1_resizer_mainpath =
       media_device->FindEntity("rkisp1_resizer_mainpath");
-  rkisp1_resizer_mainpath->pads(0)->SetSubdevFormat(width, height,
+  rkisp1_resizer_mainpath->pads(0)->SetSubdevFormat(kWidth, kHeight,
                                                     MEDIA_BUS_FMT_YUYV8_2X8);
 
-  rkisp1_resizer_mainpath->pads(0)->SetSubdevCrop(width, height);
-  rkisp1_resizer_mainpath->pads(1)->SetSubdevFormat(width / 2, height / 2,
-                                                    MEDIA_BUS_FMT_YUYV8_2X8);
+  rkisp1_resizer_mainpath->pads(0)->SetSubdevCrop(kWidth, kHeight);
+  rkisp1_resizer_mainpath->pads(1)->SetSubdevFormat(
+      kMainpathWidth, kMainpathHeight, MEDIA_BUS_FMT_YUYV8_2X8);
 
   Entity *rkisp1_mainpath = media_device->FindEntity("rkisp1_mainpath");
-  rkisp1_mainpath->SetFormat(width / 2, height / 2, V4L2_PIX_FMT_YUV422P);
+  rkisp1_mainpath->SetFormat(kMainpathWidth, kMainpathHeight,
+                             V4L2_PIX_FMT_YUYV);
 
   Entity *rkisp1_selfpath = media_device->FindEntity("rkisp1_selfpath");
-  rkisp1_selfpath->SetFormat(width * 2 / 3, height * 2 / 3, V4L2_PIX_FMT_YUYV);
+  rkisp1_selfpath->SetFormat(kSelfpathWidth, kSelfpathHeight,
+                             V4L2_PIX_FMT_YUYV);
 
   media_device->Enable(
-      media_device->FindLink(camera_device_string, 0, "rkisp1_csi", 0));
+      media_device->FindLink(kCameraDeviceString, 0, "rkisp1_csi", 0));
   media_device->Enable(
       media_device->FindLink("rkisp1_csi", 1, "rkisp1_isp", 0));
   media_device->Enable(
@@ -99,16 +112,25 @@
   event_loop.SetRuntimeRealtimePriority(55);
   event_loop.SetRuntimeAffinity(aos::MakeCpusetFromCpus({2}));
 
-  RockchipV4L2Reader v4l2_reader(&event_loop, event_loop.epoll(),
-                                 rkisp1_selfpath->device(), camera->device());
-
+  // Reader for vision processing
+  RockchipV4L2Reader v4l2_reader_selfpath(&event_loop, event_loop.epoll(),
+                                          rkisp1_selfpath->device(),
+                                          camera->device());
   if (FLAGS_lowlight_camera) {
-    v4l2_reader.SetGainExt(100);
-    v4l2_reader.SetVerticalBlanking(1000);
-    v4l2_reader.SetExposure(FLAGS_exposure);
+    v4l2_reader_selfpath.SetGainExt(100);
+    v4l2_reader_selfpath.SetVerticalBlanking(1000);
+    v4l2_reader_selfpath.SetExposure(FLAGS_exposure);
   } else {
-    v4l2_reader.SetGainExt(1000);
-    v4l2_reader.SetExposure(1000);
+    v4l2_reader_selfpath.SetGainExt(1000);
+    v4l2_reader_selfpath.SetExposure(1000);
+  }
+
+  std::unique_ptr<RockchipV4L2Reader> v4l2_reader_mainpath;
+  if (FLAGS_send_downsized_images) {
+    // Reader for driver cam streaming on logger pi, sending lower res images
+    v4l2_reader_mainpath = std::make_unique<RockchipV4L2Reader>(
+        &event_loop, event_loop.epoll(), rkisp1_mainpath->device(),
+        camera->device(), "/camera/downsized");
   }
 
   {
diff --git a/y2023/vision/target_mapping.cc b/y2023/vision/target_mapping.cc
index 8b1e31e..18be258 100644
--- a/y2023/vision/target_mapping.cc
+++ b/y2023/vision/target_mapping.cc
@@ -21,7 +21,6 @@
 
 namespace y2023 {
 namespace vision {
-using frc971::vision::CharucoExtractor;
 using frc971::vision::DataAdapter;
 using frc971::vision::ImageCallback;
 using frc971::vision::PoseUtils;
diff --git a/y2023/y2023.json b/y2023/y2023.json
index d5f9462..cdac47c 100644
--- a/y2023/y2023.json
+++ b/y2023/y2023.json
@@ -1,5 +1,5 @@
 {
-  "channel_storage_duration": 10000000000,
+  "channel_storage_duration": 8000000000,
   "maps": [
     {
       "match": {
diff --git a/y2023/y2023_logger.json b/y2023/y2023_logger.json
index 793c101..0f1e633 100644
--- a/y2023/y2023_logger.json
+++ b/y2023/y2023_logger.json
@@ -377,6 +377,17 @@
       "num_senders": 1
     },
     {
+      "name": "/logger/camera/downsized",
+      "type": "frc971.vision.CameraImage",
+      "logger": "NOT_LOGGED",
+      "source_node": "logger",
+      "frequency": 40,
+      "max_size": 921744,
+      "num_readers": 4,
+      "read_method": "PIN",
+      "num_senders": 18
+    },
+    {
       "name": "/localizer",
       "type": "frc971.IMUValuesBatch",
       "source_node": "imu",
@@ -455,16 +466,24 @@
       ]
     },
     {
+      "name": "camera_reader",
+      "executable_name": "camera_reader",
+      "args": ["--enable_ftrace", "--send_downsized_images"],
+      "nodes": [
+        "logger"
+      ]
+    },
+    {
       "name": "image_logger",
       "executable_name": "logger_main",
       "autostart": false,
       "args": [
-        "--snappy_compress",
         "--logging_folder",
         "",
-        "--snappy_compress",
         "--rotate_every",
-        "60.0"
+        "60.0",
+        "--direct",
+        "--flush_size=4194304"
       ],
       "nodes": [
         "logger"
diff --git a/y2023/y2023_pi_template.json b/y2023/y2023_pi_template.json
index 911c795..c722110 100644
--- a/y2023/y2023_pi_template.json
+++ b/y2023/y2023_pi_template.json
@@ -175,7 +175,7 @@
       "source_node": "pi{{ NUM }}",
       "frequency": 40,
       "num_senders": 2,
-      "max_size": 40000,
+      "max_size": 1024,
       "logger": "LOCAL_AND_REMOTE_LOGGER",
       "logger_nodes": [
         "imu",
@@ -379,6 +379,22 @@
       ]
     },
     {
+      "name": "image_logger",
+      "executable_name": "logger_main",
+      "autostart": false,
+      "args": [
+        "--logging_folder",
+        "",
+        "--rotate_every",
+        "60.0",
+        "--direct",
+        "--flush_size=4194304"
+      ],
+      "nodes": [
+        "pi{{ NUM }}"
+      ]
+    },
+    {
       "name": "aprilrobotics",
       "executable_name": "aprilrobotics",
       "args": ["--enable_ftrace"],