Merge "Add control panel messages to superstructure"
diff --git a/aos/configuration.cc b/aos/configuration.cc
index c4f139c..2d4b867 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -653,9 +653,8 @@
   }
 }
 
-int GetNodeIndex(const Configuration *config, const Node *node) {
-  CHECK(config->has_nodes())
-      << ": Asking for a node from a single node configuration.";
+namespace {
+int GetNodeIndexFromConfig(const Configuration *config, const Node *node) {
   int node_index = 0;
   for (const Node *iterated_node : *config->nodes()) {
     if (iterated_node == node) {
@@ -663,12 +662,56 @@
     }
     ++node_index;
   }
-  LOG(FATAL) << "Node not found in the configuration.";
+  return -1;
+}
+}  // namespace
+
+int GetNodeIndex(const Configuration *config, const Node *node) {
+  if (!MultiNode(config)) {
+    return 0;
+  }
+
+  {
+    int node_index = GetNodeIndexFromConfig(config, node);
+    if (node_index != -1) {
+      return node_index;
+    }
+  }
+
+  const Node *result = GetNode(config, node);
+  CHECK(result != nullptr);
+
+  {
+    int node_index = GetNodeIndexFromConfig(config, result);
+    if (node_index != -1) {
+      return node_index;
+    }
+  }
+
+  LOG(FATAL) << "Node " << FlatbufferToJson(node)
+             << " not found in the configuration.";
+}
+
+int GetNodeIndex(const Configuration *config, std::string_view name) {
+  if (!MultiNode(config)) {
+    return 0;
+  }
+
+  {
+    int node_index = 0;
+    for (const Node *iterated_node : *config->nodes()) {
+      if (iterated_node->name()->string_view() == name) {
+        return node_index;
+      }
+      ++node_index;
+    }
+  }
+  LOG(FATAL) << "Node " << name << " not found in the configuration.";
 }
 
 std::vector<const Node *> GetNodes(const Configuration *config) {
   std::vector<const Node *> nodes;
-  if (configuration::MultiNode(config)) {
+  if (MultiNode(config)) {
     for (const Node *node : *config->nodes()) {
       nodes.emplace_back(node);
     }
diff --git a/aos/configuration.h b/aos/configuration.h
index a9fbfe3..bf2ec6c 100644
--- a/aos/configuration.h
+++ b/aos/configuration.h
@@ -80,8 +80,10 @@
 // in a single node world.)
 std::vector<const Node *> GetNodes(const Configuration *config);
 
-// Returns the node index for a node.  Note: node needs to exist inside config.
+// Returns the node index for a node.  Note: will be faster if node is a pointer
+// to a node in config, but is not required.
 int GetNodeIndex(const Configuration *config, const Node *node);
+int GetNodeIndex(const Configuration *config, std::string_view name);
 
 // Returns true if we are running in a multinode configuration.
 bool MultiNode(const Configuration *config);
diff --git a/aos/configuration_test.cc b/aos/configuration_test.cc
index 70515fd..0c0874b 100644
--- a/aos/configuration_test.cc
+++ b/aos/configuration_test.cc
@@ -631,11 +631,22 @@
 TEST_F(ConfigurationTest, GetNodeIndex) {
   FlatbufferDetachedBuffer<Configuration> config =
       ReadConfig("aos/testdata/good_multinode.json");
+  FlatbufferDetachedBuffer<Configuration> config2 =
+      ReadConfig("aos/testdata/good_multinode.json");
   const Node *pi1 = GetNode(&config.message(), "pi1");
   const Node *pi2 = GetNode(&config.message(), "pi2");
 
+  // Try the normal case.
   EXPECT_EQ(GetNodeIndex(&config.message(), pi1), 0);
   EXPECT_EQ(GetNodeIndex(&config.message(), pi2), 1);
+
+  // Now try if we have node pointers from a different message.
+  EXPECT_EQ(GetNodeIndex(&config2.message(), pi1), 0);
+  EXPECT_EQ(GetNodeIndex(&config2.message(), pi2), 1);
+
+  // And now try string names.
+  EXPECT_EQ(GetNodeIndex(&config2.message(), pi1->name()->string_view()), 0);
+  EXPECT_EQ(GetNodeIndex(&config2.message(), pi2->name()->string_view()), 1);
 }
 
 // Tests that GetNodeOrDie handles both single and multi-node worlds and returns
diff --git a/aos/events/BUILD b/aos/events/BUILD
index 0470b6e..1f7a3d7 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -1,4 +1,4 @@
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_ts_library")
 load("//aos:config.bzl", "aos_config")
 
 package(default_visibility = ["//visibility:public"])
@@ -24,6 +24,11 @@
     gen_reflections = 1,
 )
 
+flatbuffer_ts_library(
+    name = "ping_ts_fbs",
+    srcs = ["ping.fbs"],
+)
+
 flatbuffer_cc_library(
     name = "pong_fbs",
     srcs = ["pong.fbs"],
diff --git a/aos/events/event_loop.cc b/aos/events/event_loop.cc
index 5b6f5c6..b50162b 100644
--- a/aos/events/event_loop.cc
+++ b/aos/events/event_loop.cc
@@ -142,14 +142,15 @@
   ChannelIndex(channel);
 
   CHECK(taken_senders_.find(channel) == taken_senders_.end())
-      << ": " << FlatbufferToJson(channel) << " is already being used.";
+      << ": " << configuration::CleanedChannelToString(channel)
+      << " is already being used.";
 
   auto result = taken_watchers_.insert(channel);
-  CHECK(result.second) << ": " << FlatbufferToJson(channel)
+  CHECK(result.second) << ": " << configuration::CleanedChannelToString(channel)
                        << " is already being used.";
 
   if (!configuration::ChannelIsReadableOnNode(channel, node())) {
-    LOG(FATAL) << ": " << FlatbufferToJson(channel)
+    LOG(FATAL) << ": " << configuration::CleanedChannelToString(channel)
                << " is not able to be watched on this node.  Check your "
                   "configuration.";
   }
@@ -160,7 +161,8 @@
   ChannelIndex(channel);
 
   CHECK(taken_watchers_.find(channel) == taken_watchers_.end())
-      << ": Channel " << FlatbufferToJson(channel) << " is already being used.";
+      << ": Channel " << configuration::CleanedChannelToString(channel)
+      << " is already being used.";
 
   // We don't care if this is a duplicate.
   taken_senders_.insert(channel);
diff --git a/aos/events/event_scheduler.cc b/aos/events/event_scheduler.cc
index 26752b1..a9ae7c0 100644
--- a/aos/events/event_scheduler.cc
+++ b/aos/events/event_scheduler.cc
@@ -9,7 +9,7 @@
 namespace aos {
 
 EventScheduler::Token EventScheduler::Schedule(
-    distributed_clock::time_point time, ::std::function<void()> callback) {
+    monotonic_clock::time_point time, ::std::function<void()> callback) {
   return events_list_.emplace(time, callback);
 }
 
@@ -17,43 +17,29 @@
   events_list_.erase(token);
 }
 
-void EventScheduler::RunFor(distributed_clock::duration duration) {
-  const distributed_clock::time_point end_time =
-      distributed_now() + duration;
-  logging::ScopedLogRestorer prev_logger;
-  is_running_ = true;
-  for (std::function<void()> &on_run : on_run_) {
-    on_run();
+aos::monotonic_clock::time_point EventScheduler::OldestEvent() {
+  if (events_list_.empty()) {
+    return monotonic_clock::max_time;
   }
-  on_run_.clear();
-  while (!events_list_.empty() && is_running_) {
-    auto iter = events_list_.begin();
-    distributed_clock::time_point next_time = iter->first;
-    if (next_time > end_time) {
-      break;
-    }
-    now_ = iter->first;
-    ::std::function<void()> callback = ::std::move(iter->second);
-    events_list_.erase(iter);
-    callback();
-  }
-  now_ = end_time;
+
+  return events_list_.begin()->first;
 }
 
-void EventScheduler::Run() {
-  logging::ScopedLogRestorer prev_logger;
-  is_running_ = true;
+void EventScheduler::CallOldestEvent() {
+  CHECK_GT(events_list_.size(), 0u);
+  auto iter = events_list_.begin();
+  now_ = iter->first;
+
+  ::std::function<void()> callback = ::std::move(iter->second);
+  events_list_.erase(iter);
+  callback();
+}
+
+void EventScheduler::RunOnRun() {
   for (std::function<void()> &on_run : on_run_) {
     on_run();
   }
   on_run_.clear();
-  while (!events_list_.empty() && is_running_) {
-    auto iter = events_list_.begin();
-    now_ = iter->first;
-    ::std::function<void()> callback = ::std::move(iter->second);
-    events_list_.erase(iter);
-    callback();
-  }
 }
 
 std::ostream &operator<<(std::ostream &stream,
@@ -63,4 +49,92 @@
   return stream;
 }
 
+void EventSchedulerScheduler::AddEventScheduler(EventScheduler *scheduler) {
+  CHECK(std::find(schedulers_.begin(), schedulers_.end(), scheduler) ==
+        schedulers_.end());
+  CHECK(scheduler->scheduler_scheduler_ == nullptr);
+
+  schedulers_.emplace_back(scheduler);
+  scheduler->scheduler_scheduler_ = this;
+}
+
+void EventSchedulerScheduler::RunFor(distributed_clock::duration duration) {
+  distributed_clock::time_point end_time = now_ + duration;
+  logging::ScopedLogRestorer prev_logger;
+  RunOnRun();
+
+  // Run all the sub-event-schedulers.
+  while (is_running_) {
+    std::tuple<distributed_clock::time_point, EventScheduler *> oldest_event =
+        OldestEvent();
+    // No events left, bail.
+    if (std::get<0>(oldest_event) == distributed_clock::max_time ||
+        std::get<0>(oldest_event) > end_time) {
+      is_running_ = false;
+      break;
+    }
+
+    // We get to pick our tradeoffs here.  Either we assume that there are no
+    // backward step changes in our time function for each node, or we have to
+    // let time go backwards.  This backwards time jump should be small, so we
+    // can check for it and bound it.
+    CHECK_LE(now_, std::get<0>(oldest_event) + std::chrono::milliseconds(100))
+        << ": Simulated time went backwards by too much.  Please investigate.";
+    now_ = std::get<0>(oldest_event);
+
+    std::get<1>(oldest_event)->CallOldestEvent();
+  }
+
+  now_ = end_time;
+}
+
+void EventSchedulerScheduler::Run() {
+  logging::ScopedLogRestorer prev_logger;
+  RunOnRun();
+  // Run all the sub-event-schedulers.
+  while (is_running_) {
+    std::tuple<distributed_clock::time_point, EventScheduler *> oldest_event =
+        OldestEvent();
+    // No events left, bail.
+    if (std::get<0>(oldest_event) == distributed_clock::max_time) {
+      break;
+    }
+
+    // We get to pick our tradeoffs here.  Either we assume that there are no
+    // backward step changes in our time function for each node, or we have to
+    // let time go backwards.  This backwards time jump should be small, so we
+    // can check for it and bound it.
+    CHECK_LE(now_, std::get<0>(oldest_event) + std::chrono::milliseconds(100))
+        << ": Simulated time went backwards by too much.  Please investigate.";
+    now_ = std::get<0>(oldest_event);
+
+    std::get<1>(oldest_event)->CallOldestEvent();
+  }
+
+  is_running_ = false;
+}
+
+std::tuple<distributed_clock::time_point, EventScheduler *>
+EventSchedulerScheduler::OldestEvent() {
+  distributed_clock::time_point min_event_time = distributed_clock::max_time;
+  EventScheduler *min_scheduler = nullptr;
+
+  // TODO(austin): Don't linearly search...  But for N=3, it is probably the
+  // fastest way to do this.
+  for (EventScheduler *scheduler : schedulers_) {
+    const monotonic_clock::time_point monotonic_event_time =
+        scheduler->OldestEvent();
+    if (monotonic_event_time != monotonic_clock::max_time) {
+      const distributed_clock::time_point event_time =
+          scheduler->ToDistributedClock(monotonic_event_time);
+      if (event_time < min_event_time) {
+        min_event_time = event_time;
+        min_scheduler = scheduler;
+      }
+    }
+  }
+
+  return std::make_tuple(min_event_time, min_scheduler);
+}
+
 }  // namespace aos
diff --git a/aos/events/event_scheduler.h b/aos/events/event_scheduler.h
index 400a307..e6c732b 100644
--- a/aos/events/event_scheduler.h
+++ b/aos/events/event_scheduler.h
@@ -9,6 +9,7 @@
 #include <vector>
 
 #include "aos/events/event_loop.h"
+#include "aos/logging/implementations.h"
 #include "aos/time/time.h"
 #include "glog/logging.h"
 
@@ -42,15 +43,17 @@
 std::ostream &operator<<(std::ostream &stream,
                          const aos::distributed_clock::time_point &now);
 
+class EventSchedulerScheduler;
+
 class EventScheduler {
  public:
   using ChannelType =
-      std::multimap<distributed_clock::time_point, std::function<void()>>;
+      std::multimap<monotonic_clock::time_point, std::function<void()>>;
   using Token = ChannelType::iterator;
 
   // Schedule an event with a callback function
   // Returns an iterator to the event
-  Token Schedule(distributed_clock::time_point time,
+  Token Schedule(monotonic_clock::time_point time,
                  std::function<void()> callback);
 
   // Schedules a callback when the event scheduler starts.
@@ -63,29 +66,132 @@
   // Deschedule an event by its iterator
   void Deschedule(Token token);
 
-  // Runs until exited.
-  void Run();
-  // Runs for a duration.
-  void RunFor(distributed_clock::duration duration);
+  // Runs the OnRun callbacks.
+  void RunOnRun();
 
-  void Exit() { is_running_ = false; }
+  // Returns true if events are being handled.
+  inline bool is_running() const;
 
-  bool is_running() const { return is_running_; }
+  // Returns the timestamp of the next event to trigger.
+  aos::monotonic_clock::time_point OldestEvent();
+  // Handles the next event.
+  void CallOldestEvent();
 
-  distributed_clock::time_point distributed_now() const { return now_; }
+  // Converts a time to the distributed clock for scheduling and cross-node time
+  // measurement.
+  distributed_clock::time_point ToDistributedClock(
+      monotonic_clock::time_point time) const {
+    return distributed_clock::epoch() + time.time_since_epoch() +
+           monotonic_offset_;
+  }
+
+  // Takes the distributed time and converts it to the monotonic clock for this
+  // node.
+  monotonic_clock::time_point FromDistributedClock(
+      distributed_clock::time_point time) const {
+    return monotonic_clock::epoch() + time.time_since_epoch() -
+           monotonic_offset_;
+  }
+
+  // Returns the current monotonic time on this node calculated from the
+  // distributed clock.
+  inline monotonic_clock::time_point monotonic_now() const;
+
+  // Sets the offset between the distributed and monotonic clock.
+  //   distributed = monotonic + offset;
+  void SetDistributedOffset(std::chrono::nanoseconds monotonic_offset) {
+    monotonic_offset_ = monotonic_offset;
+  }
+
+  // Returns the offset used to convert to and from the distributed clock.
+  std::chrono::nanoseconds monotonic_offset() const {
+    return monotonic_offset_;
+  }
 
  private:
+  friend class EventSchedulerScheduler;
   // Current execution time.
-  distributed_clock::time_point now_ = distributed_clock::epoch();
+  monotonic_clock::time_point now_ = monotonic_clock::epoch();
 
+  // Offset to the distributed clock.
+  //   distributed = monotonic + offset;
+  std::chrono::nanoseconds monotonic_offset_ = std::chrono::seconds(0);
+
+  // List of functions to run (once) when running.
   std::vector<std::function<void()>> on_run_;
 
   // Multimap holding times to run functions.  These are stored in order, and
   // the order is the callback tree.
   ChannelType events_list_;
-  bool is_running_ = false;
+
+  // Pointer to the actual scheduler.
+  EventSchedulerScheduler *scheduler_scheduler_ = nullptr;
 };
 
+// We need a heap of heaps...
+//
+// Events in a node have a very well defined progression of time.  It is linear
+// and well represented by the monotonic clock.
+//
+// Events across nodes don't follow this well.  Time skews between the two nodes
+// all the time.  We also don't know the function ahead of time which converts
+// from each node's monotonic clock to the distributed clock (our unified base
+// time which is likely the average time between nodes).
+//
+// This pushes us towards merge sort.  Sorting each node's events with a heap
+// like we used to be doing, and then sorting each of those nodes independently.
+class EventSchedulerScheduler {
+ public:
+  // Adds an event scheduler to the list.
+  void AddEventScheduler(EventScheduler *scheduler);
+
+  // Runs until there are no more events or Exit is called.
+  void Run();
+
+  // Stops running.
+  void Exit() { is_running_ = false; }
+
+  bool is_running() const { return is_running_; }
+
+  // Runs for a duration on the distributed clock.  Time on the distributed
+  // clock should be very representative of time on each node, but won't be
+  // exactly the same.
+  void RunFor(distributed_clock::duration duration);
+
+  // Returns the current distributed time.
+  distributed_clock::time_point distributed_now() const { return now_; }
+
+ private:
+  // Handles running the OnRun functions.
+  void RunOnRun() {
+    CHECK(!is_running_);
+    is_running_ = true;
+    for (EventScheduler *scheduler : schedulers_) {
+      scheduler->RunOnRun();
+    }
+  }
+
+  // Returns the next event time and scheduler on which to run it.
+  std::tuple<distributed_clock::time_point, EventScheduler *> OldestEvent();
+
+  // True if we are running.
+  bool is_running_ = false;
+  // The current time.
+  distributed_clock::time_point now_ = distributed_clock::epoch();
+  // List of schedulers to run in sync.
+  std::vector<EventScheduler *> schedulers_;
+};
+
+inline monotonic_clock::time_point EventScheduler::monotonic_now() const {
+  // Make sure we stay in sync.
+  CHECK_EQ(now_, FromDistributedClock(scheduler_scheduler_->distributed_now()));
+  return now_;
+}
+
+inline bool EventScheduler::is_running() const {
+  return scheduler_scheduler_->is_running();
+}
+
 }  // namespace aos
 
 #endif  // AOS_EVENTS_EVENT_SCHEDULER_H_
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 2b3df8f..2e63f45 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -16,6 +16,7 @@
     srcs = [
         "logfile_utils.cc",
         "logger.cc",
+        "logger_math.cc",
     ],
     hdrs = [
         "logfile_utils.h",
@@ -28,10 +29,12 @@
         "//aos/events:event_loop",
         "//aos/events:simulated_event_loop",
         "//aos/network:team_number",
+        "//aos/network:timestamp_filter",
         "//aos/time",
         "@com_github_google_flatbuffers//:flatbuffers",
         "@com_google_absl//absl/container:inlined_vector",
         "@com_google_absl//absl/strings",
+        "@org_tuxfamily_eigen//:eigen",
     ],
 )
 
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 03b89d1..44381f3 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -291,7 +291,7 @@
       chrono::nanoseconds(result.message().monotonic_sent_time()));
 
   newest_timestamp_ = std::max(newest_timestamp_, timestamp);
-  VLOG(1) << "Read from " << filename() << " data " << FlatbufferToJson(result);
+  VLOG(2) << "Read from " << filename() << " data " << FlatbufferToJson(result);
   return std::move(result);
 }
 
@@ -435,8 +435,14 @@
       }
     } else {
       if (!NextLogFile()) {
-        VLOG(1) << "End of log file.";
+        VLOG(1) << "End of log file " << filenames_.back();
         at_end_ = true;
+        for (MessageHeaderQueue *queue : channels_to_write_) {
+          if (queue == nullptr || queue->timestamp_merger == nullptr) {
+            continue;
+          }
+          queue->timestamp_merger->NoticeAtEnd();
+        }
         return false;
       }
     }
@@ -800,15 +806,40 @@
   return oldest_timestamp;
 }
 
+TimestampMerger::DeliveryTimestamp TimestampMerger::OldestTimestamp() const {
+  if (!has_timestamps_ || timestamp_heap_.size() == 0u) {
+    return TimestampMerger::DeliveryTimestamp{};
+  }
+
+  std::tuple<monotonic_clock::time_point, uint32_t, SplitMessageReader *>
+      oldest_timestamp_reader = timestamp_heap_.front();
+
+  std::tuple<monotonic_clock::time_point, uint32_t, const MessageHeader *>
+      oldest_timestamp = std::get<2>(oldest_timestamp_reader)
+                             ->oldest_message(channel_index_, node_index_);
+
+  TimestampMerger::DeliveryTimestamp timestamp;
+  timestamp.monotonic_event_time =
+      monotonic_clock::time_point(chrono::nanoseconds(
+          std::get<2>(oldest_timestamp)->monotonic_sent_time()));
+  timestamp.realtime_event_time = realtime_clock::time_point(
+      chrono::nanoseconds(std::get<2>(oldest_timestamp)->realtime_sent_time()));
+
+  timestamp.monotonic_remote_time =
+      monotonic_clock::time_point(chrono::nanoseconds(
+          std::get<2>(oldest_timestamp)->monotonic_remote_time()));
+  timestamp.realtime_remote_time =
+      realtime_clock::time_point(chrono::nanoseconds(
+          std::get<2>(oldest_timestamp)->realtime_remote_time()));
+
+  timestamp.remote_queue_index = std::get<2>(oldest_timestamp)->queue_index();
+  return timestamp;
+}
+
 std::tuple<TimestampMerger::DeliveryTimestamp, FlatbufferVector<MessageHeader>>
 TimestampMerger::PopOldest() {
   if (has_timestamps_) {
-    CHECK_GT(message_heap_.size(), 0u)
-        << ": Missing data from source node, no data available to match "
-           "timestamp on "
-        << configuration::CleanedChannelToString(
-               configuration_->channels()->Get(channel_index_));
-
+    // Read the timestamps.
     std::tuple<monotonic_clock::time_point, uint32_t,
                FlatbufferVector<MessageHeader>>
         oldest_timestamp = PopTimestampHeap();
@@ -830,6 +861,15 @@
         chrono::nanoseconds(
             std::get<2>(oldest_timestamp).message().monotonic_remote_time()));
 
+    // See if we have any data.  If not, pass the problem up the chain.
+    if (message_heap_.size() == 0u) {
+      VLOG(1) << "No data to match timestamp on "
+              << configuration::CleanedChannelToString(
+                     configuration_->channels()->Get(channel_index_));
+      return std::make_tuple(timestamp,
+                             std::move(std::get<2>(oldest_timestamp)));
+    }
+
     while (true) {
       {
         // Ok, now try grabbing data until we find one which matches.
@@ -842,16 +882,16 @@
             std::get<2>(oldest_message_ref)->monotonic_sent_time()));
 
         if (remote_monotonic_time < remote_timestamp_monotonic_time) {
-          LOG(INFO) << "Undelivered message, skipping.  Remote time is "
-                    << remote_monotonic_time << " timestamp is "
-                    << remote_timestamp_monotonic_time << " on channel "
-                    << channel_index_;
+          VLOG(1) << "Undelivered message, skipping.  Remote time is "
+                  << remote_monotonic_time << " timestamp is "
+                  << remote_timestamp_monotonic_time << " on channel "
+                  << channel_index_;
           PopMessageHeap();
           continue;
         } else if (remote_monotonic_time > remote_timestamp_monotonic_time) {
-          LOG(INFO) << "Data not found.  Remote time should be "
-                    << remote_timestamp_monotonic_time << " on channel "
-                    << channel_index_;
+          VLOG(1) << "Data not found.  Remote time should be "
+                  << remote_timestamp_monotonic_time << " on channel "
+                  << channel_index_;
           return std::make_tuple(timestamp,
                                  std::move(std::get<2>(oldest_timestamp)));
         }
@@ -902,6 +942,8 @@
   }
 }
 
+void TimestampMerger::NoticeAtEnd() { channel_merger_->NoticeAtEnd(); }
+
 namespace {
 std::vector<std::unique_ptr<SplitMessageReader>> MakeSplitMessageReaders(
     const std::vector<std::vector<std::string>> &filenames) {
@@ -997,6 +1039,18 @@
   return channel_heap_.front().first;
 }
 
+TimestampMerger::DeliveryTimestamp ChannelMerger::OldestTimestamp() const {
+  if (timestamp_heap_.size() == 0u) {
+    return TimestampMerger::DeliveryTimestamp{};
+  }
+  return timestamp_mergers_[timestamp_heap_.front().second].OldestTimestamp();
+}
+
+TimestampMerger::DeliveryTimestamp ChannelMerger::OldestTimestampForChannel(
+    int channel) const {
+  return timestamp_mergers_[channel].OldestTimestamp();
+}
+
 void ChannelMerger::PushChannelHeap(monotonic_clock::time_point timestamp,
                                     int channel_index) {
   // Pop and recreate the heap if it has already been pushed.  And since we are
@@ -1009,6 +1063,16 @@
         }));
     std::make_heap(channel_heap_.begin(), channel_heap_.end(),
                    ChannelHeapCompare);
+
+    if (timestamp_mergers_[channel_index].has_timestamps()) {
+      timestamp_heap_.erase(std::find_if(
+          timestamp_heap_.begin(), timestamp_heap_.end(),
+          [channel_index](const std::pair<monotonic_clock::time_point, int> x) {
+            return x.second == channel_index;
+          }));
+      std::make_heap(timestamp_heap_.begin(), timestamp_heap_.end(),
+                     ChannelHeapCompare);
+    }
   }
 
   channel_heap_.push_back(std::make_pair(timestamp, channel_index));
@@ -1017,22 +1081,40 @@
   // put the oldest message first.
   std::push_heap(channel_heap_.begin(), channel_heap_.end(),
                  ChannelHeapCompare);
+
+  if (timestamp_mergers_[channel_index].has_timestamps()) {
+    timestamp_heap_.push_back(std::make_pair(timestamp, channel_index));
+    std::push_heap(timestamp_heap_.begin(), timestamp_heap_.end(),
+                   ChannelHeapCompare);
+  }
 }
 
 std::tuple<TimestampMerger::DeliveryTimestamp, int,
            FlatbufferVector<MessageHeader>>
 ChannelMerger::PopOldest() {
-  CHECK(channel_heap_.size() > 0);
+  CHECK_GT(channel_heap_.size(), 0u);
   std::pair<monotonic_clock::time_point, int> oldest_channel_data =
       channel_heap_.front();
   int channel_index = oldest_channel_data.second;
   std::pop_heap(channel_heap_.begin(), channel_heap_.end(),
                 &ChannelHeapCompare);
   channel_heap_.pop_back();
+
   timestamp_mergers_[channel_index].set_pushed(false);
 
   TimestampMerger *merger = &timestamp_mergers_[channel_index];
 
+  if (merger->has_timestamps()) {
+    CHECK_GT(timestamp_heap_.size(), 0u);
+    std::pair<monotonic_clock::time_point, int> oldest_timestamp_data =
+        timestamp_heap_.front();
+    CHECK(oldest_timestamp_data == oldest_channel_data)
+        << ": Timestamp heap out of sync.";
+    std::pop_heap(timestamp_heap_.begin(), timestamp_heap_.end(),
+                  &ChannelHeapCompare);
+    timestamp_heap_.pop_back();
+  }
+
   // Merger handles any queueing needed from here.
   std::tuple<TimestampMerger::DeliveryTimestamp,
              FlatbufferVector<MessageHeader>>
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index e5e0175..4ab4dca 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -302,6 +302,10 @@
   std::string DebugString(int channel) const;
   std::string DebugString(int channel, int node_index) const;
 
+  // Returns true if all the messages have been queued from the last log file in
+  // the list of log files chunks.
+  bool at_end() const { return at_end_; }
+
  private:
   // TODO(austin): Need to copy or refcount the message instead of running
   // multiple copies of the reader.  Or maybe have a "as_node" index and hide it
@@ -441,6 +445,9 @@
   // The caller can determine what the appropriate action is to recover.
   std::tuple<DeliveryTimestamp, FlatbufferVector<MessageHeader>> PopOldest();
 
+  // Returns the oldest forwarding timestamp.
+  DeliveryTimestamp OldestTimestamp() const;
+
   // Tracks if the channel merger has pushed this onto it's heap or not.
   bool pushed() { return pushed_; }
   // Sets if this has been pushed to the channel merger heap.  Should only be
@@ -450,6 +457,13 @@
   // Returns a debug string with the heaps printed out.
   std::string DebugString() const;
 
+  // Returns true if we have timestamps.
+  bool has_timestamps() const { return has_timestamps_; }
+
+  // Records that one of the log files ran out of data.  This should only be
+  // called by a SplitMessageReader.
+  void NoticeAtEnd();
+
  private:
   // Pushes messages and timestamps to the corresponding heaps.
   void PushMessageHeap(
@@ -536,6 +550,12 @@
              FlatbufferVector<MessageHeader>>
   PopOldest();
 
+  // Returns the oldest timestamp in the timestamp heap.
+  TimestampMerger::DeliveryTimestamp OldestTimestamp() const;
+  // Returns the oldest timestamp in the timestamp heap for a specific channel.
+  TimestampMerger::DeliveryTimestamp OldestTimestampForChannel(
+      int channel) const;
+
   // Returns the config for this set of log files.
   const Configuration *configuration() const {
     return log_file_header()->configuration();
@@ -568,6 +588,15 @@
   // debugging what went wrong.
   std::string DebugString() const;
 
+  // Returns true if one of the log files has finished reading everything.  When
+  // log file chunks are involved, this means that the last chunk in a log file
+  // has been read.  It is acceptable to be missing data at this point in time.
+  bool at_end() const { return at_end_; }
+
+  // Marks that one of the log files is at the end.  This should only be called
+  // by timestamp mergers.
+  void NoticeAtEnd() { at_end_ = true; }
+
  private:
   // Pushes the timestamp for new data on the provided channel.
   void PushChannelHeap(monotonic_clock::time_point timestamp,
@@ -584,10 +613,15 @@
 
   // A heap of the channel readers and timestamps for the oldest data in each.
   std::vector<std::pair<monotonic_clock::time_point, int>> channel_heap_;
+  // A heap of just the timestamp channel readers and timestamps for the oldest
+  // data in each.
+  std::vector<std::pair<monotonic_clock::time_point, int>> timestamp_heap_;
 
   // Configured node.
   const Node *node_;
 
+  bool at_end_ = false;
+
   // Cached copy of the list of nodes.
   std::vector<const Node *> nodes_;
 };
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index c7f64ae..57c2c2c 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -7,6 +7,7 @@
 #include <sys/uio.h>
 #include <vector>
 
+#include "Eigen/Dense"
 #include "absl/types/span.h"
 #include "aos/events/event_loop.h"
 #include "aos/events/logging/logger_generated.h"
@@ -19,6 +20,11 @@
             "If true, drop any forwarding entries with missing data.  If "
             "false, CHECK.");
 
+DEFINE_bool(timestamps_to_csv, false,
+            "If true, write all the time synchronization information to a set "
+            "of CSV files in /tmp/.  This should only be needed when debugging "
+            "time synchronization.");
+
 namespace aos {
 namespace logger {
 
@@ -109,12 +115,12 @@
 // TODO(austin): Set the remote start time to the first time we see a remote
 // message when we are logging those messages separate?  Need to signal what to
 // do, or how to get a good timestamp.
-
 void Logger::WriteHeader() {
   for (const Node *node : log_namer_->nodes()) {
     WriteHeader(node);
   }
 }
+
 void Logger::WriteHeader(const Node *node) {
   // Now write the header with this timestamp in it.
   flatbuffers::FlatBufferBuilder fbb;
@@ -294,14 +300,20 @@
   MakeRemappedConfig();
 
   if (!configuration::MultiNode(configuration())) {
-    auto it = channel_mergers_.insert(std::make_pair(nullptr, State{}));
-    State *state = &(it.first->second);
+    states_.emplace_back(std::make_unique<State>());
+    State *state = states_[0].get();
 
     state->channel_merger = std::make_unique<ChannelMerger>(filenames);
+  } else {
+    states_.resize(configuration()->nodes()->size());
   }
 }
 
-LogReader::~LogReader() { Deregister(); }
+LogReader::~LogReader() { Deregister();
+  if (offset_fp_ != nullptr) {
+    fclose(offset_fp_);
+  }
+}
 
 const Configuration *LogReader::logged_configuration() const {
   return log_file_header_.message().configuration();
@@ -324,17 +336,19 @@
 }
 
 monotonic_clock::time_point LogReader::monotonic_start_time(const Node *node) {
-  auto it = channel_mergers_.find(node);
-  CHECK(it != channel_mergers_.end())
-      << ": Unknown node " << FlatbufferToJson(node);
-  return it->second.channel_merger->monotonic_start_time();
+  State *state =
+      states_[configuration::GetNodeIndex(configuration(), node)].get();
+  CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
+
+  return state->channel_merger->monotonic_start_time();
 }
 
 realtime_clock::time_point LogReader::realtime_start_time(const Node *node) {
-  auto it = channel_mergers_.find(node);
-  CHECK(it != channel_mergers_.end())
-      << ": Unknown node " << FlatbufferToJson(node);
-  return it->second.channel_merger->realtime_start_time();
+  State *state =
+      states_[configuration::GetNodeIndex(configuration(), node)].get();
+  CHECK(state != nullptr) << ": Unknown node " << FlatbufferToJson(node);
+
+  return state->channel_merger->realtime_start_time();
 }
 
 void LogReader::Register() {
@@ -347,9 +361,10 @@
   event_loop_factory_ = event_loop_factory;
 
   for (const Node *node : configuration::GetNodes(configuration())) {
-    auto it = channel_mergers_.insert(std::make_pair(node, State{}));
-
-    State *state = &(it.first->second);
+    const size_t node_index =
+        configuration::GetNodeIndex(configuration(), node);
+    states_[node_index] = std::make_unique<State>();
+    State *state = states_[node_index].get();
 
     state->channel_merger = std::make_unique<ChannelMerger>(filenames_);
 
@@ -361,47 +376,169 @@
     Register(state->event_loop_unique_ptr.get());
   }
 
-  // Basic idea is that we want to
-  //   1) Find the node which booted first.
-  //   2) Setup the clocks so that each clock is at the time it would be at when
-  //      the first node booted.
+  // We need to now seed our per-node time offsets and get everything set up to
+  // run.
+  const size_t num_nodes = !configuration::MultiNode(logged_configuration())
+                               ? 1u
+                               : logged_configuration()->nodes()->size();
 
-  realtime_clock::time_point earliest_boot_time = realtime_clock::max_time;
-  for (std::pair<const Node *const, State> &state_pair : channel_mergers_) {
-    State *state = &(state_pair.second);
+  // It is easiest to solve for per node offsets with a matrix rather than
+  // trying to solve the equations by hand.  So let's get after it.
+  //
+  // Now, build up the map matrix.
+  //
+  // sample_matrix_ = map_matrix_ * offset_matrix_
+  map_matrix_ = Eigen::MatrixXd::Zero(filters_.size() + 1, num_nodes);
 
-    const realtime_clock::time_point boot_time =
-        state->channel_merger->realtime_start_time() -
-        state->channel_merger->monotonic_start_time().time_since_epoch();
+  sample_matrix_ = Eigen::VectorXd::Zero(filters_.size() + 1);
+  offset_matrix_ = Eigen::VectorXd::Zero(num_nodes);
 
-    if (boot_time < earliest_boot_time) {
-      earliest_boot_time = boot_time;
+  // And the base offset matrix, which will be a copy of the initial offset
+  // matrix.
+  base_offset_matrix_ =
+      Eigen::Matrix<std::chrono::nanoseconds, Eigen::Dynamic, 1>::Zero(
+          num_nodes);
+
+  // All offsets should sum to 0.  Add that as the first constraint in our least
+  // squares.
+  map_matrix_.row(0).setOnes();
+
+  {
+    // Now, add the a - b -> sample elements.
+    size_t i = 1;
+    for (std::pair<const std::tuple<const Node *, const Node *>,
+                   message_bridge::ClippedAverageFilter> &filter : filters_) {
+      const Node *const node_a = std::get<0>(filter.first);
+      const Node *const node_b = std::get<1>(filter.first);
+
+      const size_t node_a_index =
+          configuration::GetNodeIndex(configuration(), node_a);
+      const size_t node_b_index =
+          configuration::GetNodeIndex(configuration(), node_b);
+
+      // +a
+      map_matrix_(i, node_a_index) = 1.0;
+      // -b
+      map_matrix_(i, node_b_index) = -1.0;
+
+      // -> sample
+      filter.second.set_sample_pointer(&sample_matrix_(i, 0));
+
+      ++i;
     }
   }
 
+  // Rank of the map matrix tells you if all the nodes are in communication with
+  // each other, which tells you if the offsets are observable.
+  const size_t connected_nodes =
+      Eigen::FullPivLU<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>(
+          map_matrix_)
+          .rank();
+
+  // We don't need to support isolated nodes until someone has a real use case.
+  CHECK_EQ(connected_nodes, num_nodes)
+      << ": There is a node which isn't communicating with the rest.";
+
+  // Now, iterate through all the timestamps from all the nodes and seed
+  // everything.
+  for (std::unique_ptr<State> &state : states_) {
+    for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
+      TimestampMerger::DeliveryTimestamp timestamp =
+          state->channel_merger->OldestTimestampForChannel(i);
+      if (timestamp.monotonic_event_time != monotonic_clock::min_time) {
+        CHECK(state->MaybeUpdateTimestamp(timestamp, i));
+      }
+    }
+  }
+
+  // Make sure all the samples have been seeded.
+  for (int i = 1; i < sample_matrix_.cols(); ++i) {
+    // The seeding logic is pretty basic right now because we don't have great
+    // use cases yet.  It wants to see data from every node.  Blow up for now,
+    // and once we have a reason to do something different, update this logic.
+    // Maybe read further in the log file?  Or seed off the realtime time?
+    CHECK_NE(sample_matrix_(i, 0), 0.0)
+        << ": Sample " << i << " is not seeded.";
+  }
+
+  // And solve.
+  offset_matrix_ = SolveOffsets();
+
+  // Save off the base offsets so we can work in deltas from here out.  That
+  // will significantly simplify the numerical precision problems.
+  for (size_t i = 0; i < num_nodes; ++i) {
+    base_offset_matrix_(i, 0) =
+        std::chrono::duration_cast<std::chrono::nanoseconds>(
+            std::chrono::duration<double>(offset_matrix_(i, 0)));
+  }
+
+  {
+    // Shift everything so we never could (reasonably) require the distributed
+    // clock to have a large backwards jump in time.  This makes it so the boot
+    // time on the node up the longest will essentially start matching the
+    // distributed clock.
+    const chrono::nanoseconds offset = -base_offset_matrix_.maxCoeff();
+    for (int i = 0; i < base_offset_matrix_.rows(); ++i) {
+      base_offset_matrix_(i, 0) += offset;
+    }
+  }
+
+  {
+    // Re-compute the samples and setup all the filters so that they
+    // subtract this base offset.
+
+    size_t i = 1;
+    for (std::pair<const std::tuple<const Node *, const Node *>,
+                   message_bridge::ClippedAverageFilter> &filter : filters_) {
+      CHECK(filter.second.sample_pointer() == &sample_matrix_(i, 0));
+
+      const Node *const node_a = std::get<0>(filter.first);
+      const Node *const node_b = std::get<1>(filter.first);
+
+      const size_t node_a_index =
+          configuration::GetNodeIndex(configuration(), node_a);
+      const size_t node_b_index =
+          configuration::GetNodeIndex(configuration(), node_b);
+
+      filter.second.set_base_offset(base_offset_matrix_(node_a_index) -
+                                    base_offset_matrix_(node_b_index));
+
+      ++i;
+    }
+  }
+
+  // Now, iterate again through all the offsets now that we have set the base
+  // offset to something sane.  This will seed everything with an accurate
+  // initial offset.
+  for (std::unique_ptr<State> &state : states_) {
+    for (size_t i = 0; i < logged_configuration()->channels()->size(); ++i) {
+      TimestampMerger::DeliveryTimestamp timestamp =
+          state->channel_merger->OldestTimestampForChannel(i);
+      if (timestamp.monotonic_event_time != monotonic_clock::min_time) {
+        CHECK(state->MaybeUpdateTimestamp(timestamp, i));
+      }
+    }
+  }
+
+  UpdateOffsets();
+
   // We want to start the log file at the last start time of the log files from
   // all the nodes.  Compute how long each node's simulation needs to run to
   // move time to this point.
-  monotonic_clock::duration run_time = monotonic_clock::duration(0);
+  distributed_clock::time_point start_time = distributed_clock::min_time;
 
-  for (std::pair<const Node *const, State> &state_pair : channel_mergers_) {
-    State *state = &(state_pair.second);
-
-    const realtime_clock::time_point boot_time =
-        state->channel_merger->realtime_start_time() -
-        state->channel_merger->monotonic_start_time().time_since_epoch();
-
-    // And start each node's clocks so the realtime clocks line up for the start
-    // times.  This will let us start using it, but isn't good enough.
-    state->node_event_loop_factory->SetMonotonicNow(
-        monotonic_clock::time_point(earliest_boot_time - boot_time));
+  for (std::unique_ptr<State> &state : states_) {
+    // Setup the realtime clock to have something sane in it now.
     state->node_event_loop_factory->SetRealtimeOffset(
         state->channel_merger->monotonic_start_time(),
         state->channel_merger->realtime_start_time());
-    run_time =
-        std::max(run_time, state->channel_merger->monotonic_start_time() -
-                               state->node_event_loop_factory->monotonic_now());
+    // And start computing the start time on the distributed clock now that that
+    // works.
+    start_time = std::max(start_time,
+                          state->node_event_loop_factory->ToDistributedClock(
+                              state->channel_merger->monotonic_start_time()));
   }
+  CHECK_GE(start_time, distributed_clock::epoch());
 
   // Forwarding is tracked per channel.  If it is enabled, we want to turn it
   // off.  Otherwise messages replayed will get forwarded across to the other
@@ -413,9 +550,8 @@
       const Node *node = configuration::GetNode(
           configuration(), channel->source_node()->string_view());
 
-      auto state_pair = channel_mergers_.find(node);
-      CHECK(state_pair != channel_mergers_.end());
-      State *state = &(state_pair->second);
+      State *state =
+          states_[configuration::GetNodeIndex(configuration(), node)].get();
 
       const Channel *remapped_channel =
           RemapChannel(state->event_loop, channel);
@@ -428,16 +564,90 @@
   // to timestamps on log files where the timestamp log file starts before the
   // data.  In this case, it is reasonable to expect missing data.
   ignore_missing_data_ = true;
-  event_loop_factory_->RunFor(run_time);
+  event_loop_factory_->RunFor(start_time.time_since_epoch());
   // Now that we are running for real, missing data means that the log file is
   // corrupted or went wrong.
   ignore_missing_data_ = false;
 }
 
+void LogReader::UpdateOffsets() {
+  // TODO(austin): Evaluate less accurate inverses.  We might be able to
+  // do some tricks to keep the accuracy up.
+  offset_matrix_ = SolveOffsets();
+
+  size_t node_index = 0;
+  for (std::unique_ptr<State> &state : states_) {
+    state->node_event_loop_factory->SetDistributedOffset(offset(node_index));
+    ++node_index;
+  }
+}
+
+std::tuple<message_bridge::ClippedAverageFilter *, bool> LogReader::GetFilter(
+    const Node *node_a, const Node *node_b) {
+  CHECK_NE(node_a, node_b);
+  CHECK_EQ(configuration::GetNode(configuration(), node_a), node_a);
+  CHECK_EQ(configuration::GetNode(configuration(), node_b), node_b);
+
+  if (node_a > node_b) {
+    return std::make_pair(std::get<0>(GetFilter(node_b, node_a)), false);
+  }
+
+  auto tuple = std::make_tuple(node_a, node_b);
+
+  auto it = filters_.find(tuple);
+
+  if (it == filters_.end()) {
+    auto &x = filters_
+                  .insert(std::make_pair(
+                      tuple, message_bridge::ClippedAverageFilter()))
+                  .first->second;
+    if (FLAGS_timestamps_to_csv) {
+      std::string fwd_name =
+          absl::StrCat("/tmp/timestamp_", node_a->name()->string_view(), "_",
+                       node_b->name()->string_view(), ".csv");
+      x.fwd_fp = fopen(fwd_name.c_str(), "w");
+      std::string rev_name =
+          absl::StrCat("/tmp/timestamp_", node_b->name()->string_view(), "_",
+                       node_a->name()->string_view(), ".csv");
+      x.rev_fp = fopen(rev_name.c_str(), "w");
+    }
+
+    return std::make_tuple(&x, true);
+  } else {
+    return std::make_tuple(&(it->second), true);
+  }
+}
+
+bool LogReader::State::MaybeUpdateTimestamp(
+    const TimestampMerger::DeliveryTimestamp &channel_timestamp,
+    int channel_index) {
+  if (channel_timestamp.monotonic_remote_time == monotonic_clock::min_time) {
+    return false;
+  }
+
+  // Got a forwarding timestamp!
+  CHECK(std::get<0>(filters[channel_index]) != nullptr);
+
+  // Call the correct method depending on if we are the forward or reverse
+  // direction here.
+  if (std::get<1>(filters[channel_index])) {
+    std::get<0>(filters[channel_index])
+        ->FwdSample(channel_timestamp.monotonic_event_time,
+                    channel_timestamp.monotonic_event_time -
+                        channel_timestamp.monotonic_remote_time);
+  } else {
+    std::get<0>(filters[channel_index])
+        ->RevSample(channel_timestamp.monotonic_event_time,
+                    channel_timestamp.monotonic_event_time -
+                        channel_timestamp.monotonic_remote_time);
+  }
+  return true;
+}
+
 void LogReader::Register(EventLoop *event_loop) {
-  auto state_pair = channel_mergers_.find(event_loop->node());
-  CHECK(state_pair != channel_mergers_.end());
-  State *state = &(state_pair->second);
+  State *state =
+      states_[configuration::GetNodeIndex(configuration(), event_loop->node())]
+          .get();
 
   state->event_loop = event_loop;
 
@@ -450,12 +660,29 @@
   state->channel_merger->SetNode(event_loop->node());
 
   state->channels.resize(logged_configuration()->channels()->size());
+  state->filters.resize(state->channels.size());
+
+  state->channel_target_event_loop_factory.resize(state->channels.size());
 
   for (size_t i = 0; i < state->channels.size(); ++i) {
     const Channel *channel =
         RemapChannel(event_loop, logged_configuration()->channels()->Get(i));
 
     state->channels[i] = event_loop->MakeRawSender(channel);
+
+    state->filters[i] = std::make_tuple(nullptr, false);
+
+    if (!configuration::ChannelIsSendableOnNode(channel, event_loop->node()) &&
+        configuration::ChannelIsReadableOnNode(channel, event_loop->node())) {
+      const Node *target_node = configuration::GetNode(
+          event_loop->configuration(), channel->source_node()->string_view());
+      state->filters[i] = GetFilter(event_loop->node(), target_node);
+
+      if (event_loop_factory_ != nullptr) {
+        state->channel_target_event_loop_factory[i] =
+            event_loop_factory_->GetNodeEventLoopFactory(target_node);
+      }
+    }
   }
 
   state->timer_handler = event_loop->AddTimer([this, state]() {
@@ -466,6 +693,7 @@
       }
       return;
     }
+    bool update_offsets = false;
     TimestampMerger::DeliveryTimestamp channel_timestamp;
     int channel_index;
     FlatbufferVector<MessageHeader> channel_data =
@@ -477,20 +705,60 @@
     const monotonic_clock::time_point monotonic_now =
         state->event_loop->context().monotonic_event_time;
     CHECK(monotonic_now == channel_timestamp.monotonic_event_time)
-        << ": Now " << monotonic_now.time_since_epoch().count()
-        << " trying to send "
-        << channel_timestamp.monotonic_event_time.time_since_epoch().count();
+        << ": " << FlatbufferToJson(state->event_loop->node()) << " Now "
+        << monotonic_now << " trying to send "
+        << channel_timestamp.monotonic_event_time << " failure "
+        << state->channel_merger->DebugString();
 
     if (channel_timestamp.monotonic_event_time >
             state->channel_merger->monotonic_start_time() ||
         event_loop_factory_ != nullptr) {
-      if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries) ||
+      if ((!ignore_missing_data_ && !FLAGS_skip_missing_forwarding_entries &&
+           !state->channel_merger->at_end()) ||
           channel_data.message().data() != nullptr) {
         CHECK(channel_data.message().data() != nullptr)
             << ": Got a message without data.  Forwarding entry which was "
                "not matched?  Use --skip_missing_forwarding_entries to ignore "
                "this.";
 
+        if (state->MaybeUpdateTimestamp(channel_timestamp, channel_index)) {
+          // Confirm that the message was sent on the sending node before the
+          // destination node (this node).  As a proxy, do this by making sure
+          // that time on the source node is past when the message was sent.
+          CHECK_LT(channel_timestamp.monotonic_remote_time,
+                   state->channel_target_event_loop_factory[channel_index]
+                       ->monotonic_now());
+
+          update_offsets = true;
+
+          if (FLAGS_timestamps_to_csv) {
+            if (offset_fp_ == nullptr) {
+              offset_fp_ = fopen("/tmp/offsets.csv", "w");
+              fprintf(
+                  offset_fp_,
+                  "# time_since_start, offset node 0, offset node 1, ...\n");
+              first_time_ = channel_timestamp.realtime_event_time;
+            }
+
+            fprintf(offset_fp_, "%.9f",
+                    std::chrono::duration_cast<std::chrono::duration<double>>(
+                        channel_timestamp.realtime_event_time - first_time_)
+                        .count());
+            for (int i = 0; i < base_offset_matrix_.rows(); ++i) {
+              fprintf(
+                  offset_fp_, ", %.9f",
+                  offset_matrix_(i, 0) +
+                      std::chrono::duration_cast<std::chrono::duration<double>>(
+                          base_offset_matrix_(i, 0))
+                          .count());
+            }
+            fprintf(offset_fp_, "\n");
+          }
+
+        } else {
+          CHECK(std::get<0>(state->filters[channel_index]) == nullptr);
+        }
+
         // If we have access to the factory, use it to fix the realtime time.
         if (state->node_event_loop_factory != nullptr) {
           state->node_event_loop_factory->SetRealtimeOffset(
@@ -504,7 +772,16 @@
             channel_timestamp.monotonic_remote_time,
             channel_timestamp.realtime_remote_time,
             channel_timestamp.remote_queue_index);
+      } else if (state->channel_merger->at_end()) {
+        // We are at the end of the log file and found missing data.  Finish
+        // reading the rest of the log file and call it quits.  We don't want to
+        // replay partial data.
+        while (state->channel_merger->OldestMessage() !=
+               monotonic_clock::max_time) {
+          state->channel_merger->PopOldest();
+        }
       }
+
     } else {
       LOG(WARNING)
           << "Not sending data from before the start of the log file. "
@@ -526,6 +803,13 @@
                                     std::chrono::nanoseconds(1));
       }
     }
+
+    // Once we make this call, the current time changes.  So do everything which
+    // involves time before changing it.  That especially includes sending the
+    // message.
+    if (update_offsets) {
+      UpdateOffsets();
+    }
   });
 
   ++live_nodes_;
@@ -540,10 +824,7 @@
 void LogReader::Deregister() {
   // Make sure that things get destroyed in the correct order, rather than
   // relying on getting the order correct in the class definition.
-  for (const Node *node : Nodes()) {
-    auto state_pair = channel_mergers_.find(node);
-    CHECK(state_pair != channel_mergers_.end());
-    State *state = &(state_pair->second);
+  for (std::unique_ptr<State> &state : states_) {
     for (size_t i = 0; i < state->channels.size(); ++i) {
       state->channels[i].reset();
     }
@@ -578,8 +859,8 @@
 }
 
 void LogReader::MakeRemappedConfig() {
-  for (std::pair<const Node *const, State> &state : channel_mergers_) {
-    CHECK(!state.second.event_loop)
+  for (std::unique_ptr<State> &state : states_) {
+    CHECK(!state->event_loop)
         << ": Can't change the mapping after the events are scheduled.";
   }
 
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index e0350bb..34dbd24 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -1,15 +1,19 @@
 #ifndef AOS_EVENTS_LOGGER_H_
 #define AOS_EVENTS_LOGGER_H_
 
+#include <chrono>
 #include <deque>
 #include <string_view>
 #include <vector>
 
+#include "Eigen/Dense"
+#include "absl/strings/str_cat.h"
 #include "absl/types/span.h"
 #include "aos/events/event_loop.h"
 #include "aos/events/logging/logfile_utils.h"
 #include "aos/events/logging/logger_generated.h"
 #include "aos/events/simulated_event_loop.h"
+#include "aos/network/timestamp_filter.h"
 #include "aos/time/time.h"
 #include "flatbuffers/flatbuffers.h"
 
@@ -174,6 +178,7 @@
       data_writers_;
 };
 
+
 // Logs all channels available in the event loop to disk every 100 ms.
 // Start by logging one message per channel to capture any state and
 // configuration that is sent rately on a channel and would affect execution.
@@ -359,6 +364,8 @@
   // the header is likely distracting.
   FlatbufferVector<LogFileHeader> log_file_header_;
 
+  Eigen::Matrix<double, Eigen::Dynamic, 1> SolveOffsets();
+
   // State per node.
   struct State {
     // Log file.
@@ -373,10 +380,68 @@
     EventLoop *event_loop = nullptr;
     // And timer used to send messages.
     TimerHandler *timer_handler;
+
+    // Updates the timestamp filter with the timestamp.  Returns true if the
+    // provided timestamp was actually a forwarding timestamp and used, and
+    // false otherwise.
+    bool MaybeUpdateTimestamp(
+        const TimestampMerger::DeliveryTimestamp &channel_timestamp,
+        int channel_index);
+
+    // Filters (or nullptr if it isn't a forwarded channel) for each channel.
+    // This corresponds to the object which is shared among all the channels
+    // going between 2 nodes.  The second element in the tuple indicates if this
+    // is the primary direction or not.
+    std::vector<std::tuple<message_bridge::ClippedAverageFilter *, bool>>
+        filters;
+
+    // List of NodeEventLoopFactorys (or nullptr if it isn't a forwarded
+    // channel) which correspond to the originating node.
+    std::vector<NodeEventLoopFactory *> channel_target_event_loop_factory;
   };
 
-  // Map of nodes to States used to hold all the state for all the nodes.
-  std::map<const Node *, State> channel_mergers_;
+  // Node index -> State.
+  std::vector<std::unique_ptr<State>> states_;
+
+  // Creates the requested filter if it doesn't exist, regardless of whether
+  // these nodes can actually communicate directly.  The second return value
+  // reports if this is the primary direction or not.
+  std::tuple<message_bridge::ClippedAverageFilter *, bool> GetFilter(
+      const Node *node_a, const Node *node_b);
+
+  // FILE to write offsets to (if populated).
+  FILE *offset_fp_ = nullptr;
+  // Timestamp of the first piece of data used for the horizontal axis on the
+  // plot.
+  aos::realtime_clock::time_point first_time_;
+
+  // List of filters for a connection.  The pointer to the first node will be
+  // less than the second node.
+  std::map<std::tuple<const Node *, const Node *>,
+           message_bridge::ClippedAverageFilter>
+      filters_;
+
+  // Returns the offset from the monotonic clock for a node to the distributed
+  // clock.  distributed = monotonic + offset;
+  std::chrono::nanoseconds offset(int node_index) const {
+    return -std::chrono::duration_cast<std::chrono::nanoseconds>(
+               std::chrono::duration<double>(offset_matrix_(node_index))) -
+           base_offset_matrix_(node_index);
+  }
+
+  // Updates the offset matrix solution and sets the per-node distributed
+  // offsets in the factory.
+  void UpdateOffsets();
+
+  // sample_matrix_ = map_matrix_ * offset_matrix_
+  Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> map_matrix_;
+  Eigen::Matrix<double, Eigen::Dynamic, 1> sample_matrix_;
+  Eigen::Matrix<double, Eigen::Dynamic, 1> offset_matrix_;
+
+  // Base offsets.  The actual offset is the sum of this and the offset matrix.
+  // This removes some of the dynamic range challenges from the double above.
+  Eigen::Matrix<std::chrono::nanoseconds, Eigen::Dynamic, 1>
+      base_offset_matrix_;
 
   std::unique_ptr<FlatbufferDetachedBuffer<Configuration>>
       remapped_configuration_buffer_;
diff --git a/aos/events/logging/logger_math.cc b/aos/events/logging/logger_math.cc
new file mode 100644
index 0000000..313894b
--- /dev/null
+++ b/aos/events/logging/logger_math.cc
@@ -0,0 +1,16 @@
+#include "aos/events/logging/logger.h"
+
+#include "Eigen/Dense"
+
+namespace aos {
+namespace logger {
+
+// This is slow to compile, so we put it in a separate file.  More parallelism
+// and less change.
+Eigen::Matrix<double, Eigen::Dynamic, 1> LogReader::SolveOffsets() {
+  return map_matrix_.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV)
+      .solve(sample_matrix_);
+}
+
+}  // namespace logger
+}  // namespace aos
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 9f2969e..9e69ae4 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -325,10 +325,10 @@
     event_loop_factory_.RunFor(chrono::milliseconds(95));
 
     Logger pi1_logger(std::move(pi1_log_namer), pi1_logger_event_loop.get(),
-                      std::chrono::milliseconds(100));
+                      chrono::milliseconds(100));
 
     Logger pi2_logger(std::move(pi2_log_namer), pi2_logger_event_loop.get(),
-                      std::chrono::milliseconds(100));
+                      chrono::milliseconds(100));
     event_loop_factory_.RunFor(chrono::milliseconds(20000));
   }
 
@@ -547,12 +547,12 @@
     event_loop_factory_.RunFor(chrono::milliseconds(95));
 
     Logger pi1_logger(std::move(pi1_log_namer), pi1_logger_event_loop.get(),
-                      std::chrono::milliseconds(100));
+                      chrono::milliseconds(100));
 
     event_loop_factory_.RunFor(chrono::milliseconds(200));
 
     Logger pi2_logger(std::move(pi2_log_namer), pi2_logger_event_loop.get(),
-                      std::chrono::milliseconds(100));
+                      chrono::milliseconds(100));
     event_loop_factory_.RunFor(chrono::milliseconds(20000));
   }
 
@@ -638,12 +638,11 @@
 
   reader.Deregister();
 }
-// TODO(austin): We can write a test which recreates a logfile and confirms that
-// we get it back.  That is the ultimate test.
 
-// Tests that we can read log files where the monotonic clocks don't match
-// correctly.
-TEST_F(MultinodeLoggerTest, MissmatchingTimeStart) {
+// Tests that we can read log files where the monotonic clocks drift and don't
+// match correctly.  While we are here, also test that different ending times
+// also is readable.
+TEST_F(MultinodeLoggerTest, MismatchedClocks) {
   const ::std::string tmpdir(getenv("TEST_TMPDIR"));
   const ::std::string logfile_base = tmpdir + "/multi_logfile";
   const ::std::string logfile1 = logfile_base + "_pi1_data.bfbs";
@@ -659,47 +658,76 @@
   LOG(INFO) << "Logging data to " << logfile1 << " and " << logfile3;
 
   {
-   NodeEventLoopFactory *pi2 = event_loop_factory_.GetNodeEventLoopFactory(pi2_);
-   LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
-             << pi2->realtime_now() << " distributed "
-             << pi2->ToDistributedClock(pi2->monotonic_now());
+    NodeEventLoopFactory *pi2 =
+        event_loop_factory_.GetNodeEventLoopFactory(pi2_);
+    LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
+              << pi2->realtime_now() << " distributed "
+              << pi2->ToDistributedClock(pi2->monotonic_now());
 
-   pi2->SetMonotonicNow(pi2->monotonic_now() + std::chrono::seconds(1000));
-   LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
-             << pi2->realtime_now() << " distributed "
-             << pi2->ToDistributedClock(pi2->monotonic_now());
+    const chrono::nanoseconds initial_pi2_offset = -chrono::seconds(1000);
+    chrono::nanoseconds pi2_offset = initial_pi2_offset;
 
-   std::unique_ptr<EventLoop> ping_event_loop =
-       event_loop_factory_.MakeEventLoop("ping", pi1_);
-   Ping ping(ping_event_loop.get());
-   std::unique_ptr<EventLoop> pong_event_loop =
-       event_loop_factory_.MakeEventLoop("pong", pi2_);
-   Pong pong(pong_event_loop.get());
+    pi2->SetDistributedOffset(pi2_offset);
+    LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
+              << pi2->realtime_now() << " distributed "
+              << pi2->ToDistributedClock(pi2->monotonic_now());
 
-   std::unique_ptr<EventLoop> pi1_logger_event_loop =
-       event_loop_factory_.MakeEventLoop("logger", pi1_);
-   std::unique_ptr<LogNamer> pi1_log_namer =
-       std::make_unique<MultiNodeLogNamer>(
-           logfile_base, pi1_logger_event_loop->configuration(),
-           pi1_logger_event_loop->node());
+    std::unique_ptr<EventLoop> ping_event_loop =
+        event_loop_factory_.MakeEventLoop("ping", pi1_);
+    Ping ping(ping_event_loop.get());
+    std::unique_ptr<EventLoop> pong_event_loop =
+        event_loop_factory_.MakeEventLoop("pong", pi2_);
+    Pong pong(pong_event_loop.get());
 
-   std::unique_ptr<EventLoop> pi2_logger_event_loop =
-       event_loop_factory_.MakeEventLoop("logger", pi2_);
-   std::unique_ptr<LogNamer> pi2_log_namer =
-       std::make_unique<MultiNodeLogNamer>(
-           logfile_base, pi2_logger_event_loop->configuration(),
-           pi2_logger_event_loop->node());
+    std::unique_ptr<EventLoop> pi2_logger_event_loop =
+        event_loop_factory_.MakeEventLoop("logger", pi2_);
+    std::unique_ptr<LogNamer> pi2_log_namer =
+        std::make_unique<MultiNodeLogNamer>(
+            logfile_base, pi2_logger_event_loop->configuration(),
+            pi2_logger_event_loop->node());
 
-   event_loop_factory_.RunFor(chrono::milliseconds(95));
+    for (int i = 0; i < 95; ++i) {
+      pi2_offset += chrono::nanoseconds(200);
+      pi2->SetDistributedOffset(pi2_offset);
+      event_loop_factory_.RunFor(chrono::milliseconds(1));
+    }
 
-   Logger pi1_logger(std::move(pi1_log_namer), pi1_logger_event_loop.get(),
-                     std::chrono::milliseconds(100));
+    Logger pi2_logger(std::move(pi2_log_namer), pi2_logger_event_loop.get(),
+                      chrono::milliseconds(100));
 
-   event_loop_factory_.RunFor(chrono::milliseconds(200));
+    event_loop_factory_.RunFor(chrono::milliseconds(200));
 
-   Logger pi2_logger(std::move(pi2_log_namer), pi2_logger_event_loop.get(),
-                     std::chrono::milliseconds(100));
-   event_loop_factory_.RunFor(chrono::milliseconds(20000));
+    {
+      // Run pi1's logger for only part of the time.
+      std::unique_ptr<EventLoop> pi1_logger_event_loop =
+          event_loop_factory_.MakeEventLoop("logger", pi1_);
+      std::unique_ptr<LogNamer> pi1_log_namer =
+          std::make_unique<MultiNodeLogNamer>(
+              logfile_base, pi1_logger_event_loop->configuration(),
+              pi1_logger_event_loop->node());
+
+      Logger pi1_logger(std::move(pi1_log_namer), pi1_logger_event_loop.get(),
+                        chrono::milliseconds(100));
+
+      for (int i = 0; i < 20000; ++i) {
+        pi2_offset += chrono::nanoseconds(200);
+        pi2->SetDistributedOffset(pi2_offset);
+        event_loop_factory_.RunFor(chrono::milliseconds(1));
+      }
+
+      EXPECT_GT(pi2_offset - initial_pi2_offset,
+                event_loop_factory_.send_delay() +
+                    event_loop_factory_.network_delay());
+
+      for (int i = 0; i < 40000; ++i) {
+        pi2_offset -= chrono::nanoseconds(200);
+        pi2->SetDistributedOffset(pi2_offset);
+        event_loop_factory_.RunFor(chrono::milliseconds(1));
+      }
+    }
+
+    // And log a bit more on pi2.
+    event_loop_factory_.RunFor(chrono::milliseconds(400));
   }
 
   LogReader reader(
@@ -785,14 +813,17 @@
       });
 
   log_reader_factory.Run();
-  EXPECT_EQ(pi1_ping_count, 2030);
-  EXPECT_EQ(pi2_ping_count, 2030);
-  EXPECT_EQ(pi1_pong_count, 2030);
-  EXPECT_EQ(pi2_pong_count, 2030);
+  EXPECT_EQ(pi1_ping_count, 6030);
+  EXPECT_EQ(pi2_ping_count, 6030);
+  EXPECT_EQ(pi1_pong_count, 6030);
+  EXPECT_EQ(pi2_pong_count, 6030);
 
   reader.Deregister();
 }
 
+// TODO(austin): We can write a test which recreates a logfile and confirms that
+// we get it back.  That is the ultimate test.
+
 }  // namespace testing
 }  // namespace logger
 }  // namespace aos
diff --git a/aos/events/multinode_pingpong.json b/aos/events/multinode_pingpong.json
index 80ffe76..c9bc53e 100644
--- a/aos/events/multinode_pingpong.json
+++ b/aos/events/multinode_pingpong.json
@@ -25,14 +25,6 @@
       "max_size": 2048
     },
     {
-      "name": "/aos/roborio",
-      "type": "aos.logging.LogMessageFbs",
-      "source_node": "roborio",
-      "frequency": 200,
-      "num_senders": 20,
-      "max_size": 2048
-    },
-    {
       "name": "/aos/pi1",
       "type": "aos.message_bridge.ServerStatistics",
       "source_node": "pi1",
@@ -51,12 +43,6 @@
       "frequency": 2
     },
     {
-      "name": "/aos/roborio",
-      "type": "aos.message_bridge.ServerStatistics",
-      "source_node": "roborio",
-      "frequency": 2
-    },
-    {
       "name": "/aos/pi1",
       "type": "aos.message_bridge.ClientStatistics",
       "source_node": "pi1",
@@ -75,12 +61,6 @@
       "frequency": 2
     },
     {
-      "name": "/aos/roborio",
-      "type": "aos.message_bridge.ClientStatistics",
-      "source_node": "roborio",
-      "frequency": 2
-    },
-    {
       "name": "/aos/pi1",
       "type": "aos.timing.Report",
       "source_node": "pi1",
@@ -105,14 +85,6 @@
       "max_size": 2048
     },
     {
-      "name": "/aos/roborio",
-      "type": "aos.timing.Report",
-      "source_node": "roborio",
-      "frequency": 50,
-      "num_senders": 20,
-      "max_size": 2048
-    },
-    {
       "name": "/test",
       "type": "aos.examples.Ping",
       "source_node": "pi1",
@@ -199,16 +171,6 @@
     {
       "match": {
         "name": "/aos",
-        "type": "aos.logging.LogMessageFbs",
-        "source_node": "roborio"
-      },
-      "rename": {
-        "name": "/aos/roborio"
-      }
-    },
-    {
-      "match": {
-        "name": "/aos",
         "type": "aos.timing.Report",
         "source_node": "pi1"
       },
@@ -239,16 +201,6 @@
     {
       "match": {
         "name": "/aos",
-        "type": "aos.timing.Report",
-        "source_node": "roborio"
-      },
-      "rename": {
-        "name": "/aos/roborio"
-      }
-    },
-    {
-      "match": {
-        "name": "/aos",
         "type": "aos.message_bridge.ServerStatistics",
         "source_node": "pi1"
       },
@@ -279,16 +231,6 @@
     {
       "match": {
         "name": "/aos",
-        "type": "aos.message_bridge.ServerStatistics",
-        "source_node": "roborio"
-      },
-      "rename": {
-        "name": "/aos/roborio"
-      }
-    },
-    {
-      "match": {
-        "name": "/aos",
         "type": "aos.message_bridge.ClientStatistics",
         "source_node": "pi1"
       },
@@ -315,16 +257,6 @@
       "rename": {
         "name": "/aos/pi3"
       }
-    },
-    {
-      "match": {
-        "name": "/aos",
-        "type": "aos.message_bridge.ClientStatistics",
-        "source_node": "roborio"
-      },
-      "rename": {
-        "name": "/aos/roborio"
-      }
     }
   ],
   "nodes": [
@@ -342,11 +274,6 @@
       "name": "pi3",
       "hostname": "raspberrypi3",
       "port": 9971
-    },
-    {
-      "name": "roborio",
-      "hostname": "roboRIO-6971-FRC",
-      "port": 9971
     }
   ],
   "applications": [
diff --git a/aos/events/simulated_event_loop.cc b/aos/events/simulated_event_loop.cc
index faa9fe9..cf58b46 100644
--- a/aos/events/simulated_event_loop.cc
+++ b/aos/events/simulated_event_loop.cc
@@ -36,7 +36,6 @@
  public:
   SimulatedWatcher(
       SimulatedEventLoop *simulated_event_loop, EventScheduler *scheduler,
-      NodeEventLoopFactory *node_event_loop_factory,
       const Channel *channel,
       std::function<void(const Context &context, const void *message)> fn);
 
@@ -60,7 +59,6 @@
   SimulatedEventLoop *simulated_event_loop_;
   EventHandler<SimulatedWatcher> event_;
   EventScheduler *scheduler_;
-  NodeEventLoopFactory *node_event_loop_factory_;
   EventScheduler::Token token_;
   SimulatedChannel *simulated_channel_ = nullptr;
 };
@@ -272,7 +270,6 @@
 class SimulatedTimerHandler : public TimerHandler {
  public:
   explicit SimulatedTimerHandler(EventScheduler *scheduler,
-                                 NodeEventLoopFactory *node_event_loop_factory,
                                  SimulatedEventLoop *simulated_event_loop,
                                  ::std::function<void()> fn);
   ~SimulatedTimerHandler() { Disable(); }
@@ -288,7 +285,6 @@
   SimulatedEventLoop *simulated_event_loop_;
   EventHandler<SimulatedTimerHandler> event_;
   EventScheduler *scheduler_;
-  NodeEventLoopFactory *node_event_loop_factory_;
   EventScheduler::Token token_;
 
   monotonic_clock::time_point base_;
@@ -298,7 +294,6 @@
 class SimulatedPhasedLoopHandler : public PhasedLoopHandler {
  public:
   SimulatedPhasedLoopHandler(EventScheduler *scheduler,
-                             NodeEventLoopFactory *node_event_loop_factory,
                              SimulatedEventLoop *simulated_event_loop,
                              ::std::function<void(int)> fn,
                              const monotonic_clock::duration interval,
@@ -314,7 +309,6 @@
   EventHandler<SimulatedPhasedLoopHandler> event_;
 
   EventScheduler *scheduler_;
-  NodeEventLoopFactory *node_event_loop_factory_;
   EventScheduler::Token token_;
 };
 
@@ -387,17 +381,17 @@
 
   TimerHandler *AddTimer(::std::function<void()> callback) override {
     CHECK(!is_running());
-    return NewTimer(::std::unique_ptr<TimerHandler>(new SimulatedTimerHandler(
-        scheduler_, node_event_loop_factory_, this, callback)));
+    return NewTimer(::std::unique_ptr<TimerHandler>(
+        new SimulatedTimerHandler(scheduler_, this, callback)));
   }
 
   PhasedLoopHandler *AddPhasedLoop(::std::function<void(int)> callback,
                                    const monotonic_clock::duration interval,
                                    const monotonic_clock::duration offset =
                                        ::std::chrono::seconds(0)) override {
-    return NewPhasedLoop(::std::unique_ptr<PhasedLoopHandler>(
-        new SimulatedPhasedLoopHandler(scheduler_, node_event_loop_factory_,
-                                       this, callback, interval, offset)));
+    return NewPhasedLoop(
+        ::std::unique_ptr<PhasedLoopHandler>(new SimulatedPhasedLoopHandler(
+            scheduler_, this, callback, interval, offset)));
   }
 
   void OnRun(::std::function<void()> on_run) override {
@@ -483,8 +477,8 @@
     std::function<void(const Context &channel, const void *message)> watcher) {
   TakeWatcher(channel);
 
-  std::unique_ptr<SimulatedWatcher> shm_watcher(new SimulatedWatcher(
-      this, scheduler_, node_event_loop_factory_, channel, std::move(watcher)));
+  std::unique_ptr<SimulatedWatcher> shm_watcher(
+      new SimulatedWatcher(this, scheduler_, channel, std::move(watcher)));
 
   GetSimulatedChannel(channel)->MakeRawWatcher(shm_watcher.get());
   NewWatcher(std::move(shm_watcher));
@@ -526,13 +520,12 @@
 
 SimulatedWatcher::SimulatedWatcher(
     SimulatedEventLoop *simulated_event_loop, EventScheduler *scheduler,
-    NodeEventLoopFactory *node_event_loop_factory, const Channel *channel,
+    const Channel *channel,
     std::function<void(const Context &context, const void *message)> fn)
     : WatcherState(simulated_event_loop, channel, std::move(fn)),
       simulated_event_loop_(simulated_event_loop),
       event_(this),
       scheduler_(scheduler),
-      node_event_loop_factory_(node_event_loop_factory),
       token_(scheduler_->InvalidToken()) {}
 
 SimulatedWatcher::~SimulatedWatcher() {
@@ -594,10 +587,9 @@
 }
 
 void SimulatedWatcher::DoSchedule(monotonic_clock::time_point event_time) {
-  token_ = scheduler_->Schedule(
-      node_event_loop_factory_->ToDistributedClock(
-          event_time + simulated_event_loop_->send_delay()),
-      [this]() { simulated_event_loop_->HandleEvent(); });
+  token_ =
+      scheduler_->Schedule(event_time + simulated_event_loop_->send_delay(),
+                           [this]() { simulated_event_loop_->HandleEvent(); });
 }
 
 void SimulatedChannel::MakeRawWatcher(SimulatedWatcher *watcher) {
@@ -643,13 +635,12 @@
 }
 
 SimulatedTimerHandler::SimulatedTimerHandler(
-    EventScheduler *scheduler, NodeEventLoopFactory *node_event_loop_factory,
-    SimulatedEventLoop *simulated_event_loop, ::std::function<void()> fn)
+    EventScheduler *scheduler, SimulatedEventLoop *simulated_event_loop,
+    ::std::function<void()> fn)
     : TimerHandler(simulated_event_loop, std::move(fn)),
       simulated_event_loop_(simulated_event_loop),
       event_(this),
       scheduler_(scheduler),
-      node_event_loop_factory_(node_event_loop_factory),
       token_(scheduler_->InvalidToken()) {}
 
 void SimulatedTimerHandler::Setup(monotonic_clock::time_point base,
@@ -661,12 +652,10 @@
   repeat_offset_ = repeat_offset;
   if (base < monotonic_now) {
     token_ = scheduler_->Schedule(
-        node_event_loop_factory_->ToDistributedClock(monotonic_now),
-        [this]() { simulated_event_loop_->HandleEvent(); });
+        monotonic_now, [this]() { simulated_event_loop_->HandleEvent(); });
   } else {
     token_ = scheduler_->Schedule(
-        node_event_loop_factory_->ToDistributedClock(base),
-        [this]() { simulated_event_loop_->HandleEvent(); });
+        base, [this]() { simulated_event_loop_->HandleEvent(); });
   }
   event_.set_event_time(base_);
   simulated_event_loop_->AddEvent(&event_);
@@ -683,8 +672,7 @@
     // Reschedule.
     while (base_ <= monotonic_now) base_ += repeat_offset_;
     token_ = scheduler_->Schedule(
-        node_event_loop_factory_->ToDistributedClock(base_),
-        [this]() { simulated_event_loop_->HandleEvent(); });
+        base_, [this]() { simulated_event_loop_->HandleEvent(); });
     event_.set_event_time(base_);
     simulated_event_loop_->AddEvent(&event_);
   } else {
@@ -703,15 +691,13 @@
 }
 
 SimulatedPhasedLoopHandler::SimulatedPhasedLoopHandler(
-    EventScheduler *scheduler, NodeEventLoopFactory *node_event_loop_factory,
-    SimulatedEventLoop *simulated_event_loop, ::std::function<void(int)> fn,
-    const monotonic_clock::duration interval,
+    EventScheduler *scheduler, SimulatedEventLoop *simulated_event_loop,
+    ::std::function<void(int)> fn, const monotonic_clock::duration interval,
     const monotonic_clock::duration offset)
     : PhasedLoopHandler(simulated_event_loop, std::move(fn), interval, offset),
       simulated_event_loop_(simulated_event_loop),
       event_(this),
       scheduler_(scheduler),
-      node_event_loop_factory_(node_event_loop_factory),
       token_(scheduler_->InvalidToken()) {}
 
 SimulatedPhasedLoopHandler::~SimulatedPhasedLoopHandler() {
@@ -737,29 +723,27 @@
 void SimulatedPhasedLoopHandler::Schedule(
     monotonic_clock::time_point sleep_time) {
   token_ = scheduler_->Schedule(
-      node_event_loop_factory_->ToDistributedClock(sleep_time),
-      [this]() { simulated_event_loop_->HandleEvent(); });
+      sleep_time, [this]() { simulated_event_loop_->HandleEvent(); });
   event_.set_event_time(sleep_time);
   simulated_event_loop_->AddEvent(&event_);
 }
 
 NodeEventLoopFactory::NodeEventLoopFactory(
-    EventScheduler *scheduler, SimulatedEventLoopFactory *factory,
-    const Node *node,
+    EventSchedulerScheduler *scheduler_scheduler,
+    SimulatedEventLoopFactory *factory, const Node *node,
     std::vector<std::pair<EventLoop *, std::function<void(bool)>>>
         *raw_event_loops)
-    : scheduler_(scheduler),
-      factory_(factory),
-      node_(node),
-      raw_event_loops_(raw_event_loops) {}
+    : factory_(factory), node_(node), raw_event_loops_(raw_event_loops) {
+  scheduler_scheduler->AddEventScheduler(&scheduler_);
+}
 
 SimulatedEventLoopFactory::SimulatedEventLoopFactory(
     const Configuration *configuration)
     : configuration_(CHECK_NOTNULL(configuration)),
       nodes_(configuration::GetNodes(configuration_)) {
   for (const Node *node : nodes_) {
-    node_factories_.emplace_back(
-        new NodeEventLoopFactory(&scheduler_, this, node, &raw_event_loops_));
+    node_factories_.emplace_back(new NodeEventLoopFactory(
+        &scheduler_scheduler_, this, node, &raw_event_loops_));
   }
 
   if (configuration::MultiNode(configuration)) {
@@ -797,11 +781,14 @@
 
 ::std::unique_ptr<EventLoop> NodeEventLoopFactory::MakeEventLoop(
     std::string_view name) {
+  CHECK(!scheduler_.is_running())
+      << ": Can't create an event loop while running";
+
   pid_t tid = tid_;
   ++tid_;
   ::std::unique_ptr<SimulatedEventLoop> result(new SimulatedEventLoop(
-      scheduler_, this, &channels_, factory_->configuration(), raw_event_loops_,
-      node_, tid));
+      &scheduler_, this, &channels_, factory_->configuration(),
+      raw_event_loops_, node_, tid));
   result->set_name(name);
   result->set_send_delay(factory_->send_delay());
   return std::move(result);
@@ -812,7 +799,7 @@
        raw_event_loops_) {
     event_loop.second(true);
   }
-  scheduler_.RunFor(duration);
+  scheduler_scheduler_.RunFor(duration);
   for (const std::pair<EventLoop *, std::function<void(bool)>> &event_loop :
        raw_event_loops_) {
     event_loop.second(false);
@@ -824,7 +811,7 @@
        raw_event_loops_) {
     event_loop.second(true);
   }
-  scheduler_.Run();
+  scheduler_scheduler_.Run();
   for (const std::pair<EventLoop *, std::function<void(bool)>> &event_loop :
        raw_event_loops_) {
     event_loop.second(false);
diff --git a/aos/events/simulated_event_loop.h b/aos/events/simulated_event_loop.h
index 9dd3d1f..a7f7920 100644
--- a/aos/events/simulated_event_loop.h
+++ b/aos/events/simulated_event_loop.h
@@ -78,7 +78,7 @@
 
   // Stops executing all event loops.  Meant to be called from within an event
   // loop handler.
-  void Exit() { scheduler_.Exit(); }
+  void Exit() { scheduler_scheduler_.Exit(); }
 
   const std::vector<const Node *> &nodes() const { return nodes_; }
 
@@ -92,7 +92,7 @@
 
   // Returns the clock used to synchronize the nodes.
   distributed_clock::time_point distributed_now() const {
-    return scheduler_.distributed_now();
+    return scheduler_scheduler_.distributed_now();
   }
 
   // Returns the configuration used for everything.
@@ -104,7 +104,7 @@
 
  private:
   const Configuration *const configuration_;
-  EventScheduler scheduler_;
+  EventSchedulerScheduler scheduler_scheduler_;
   // List of event loops to manage running and not running for.
   // The function is a callback used to set and clear the running bool on each
   // event loop.
@@ -149,28 +149,28 @@
   // node.
   std::chrono::nanoseconds send_delay() const { return factory_->send_delay(); }
 
+  // TODO(austin): Private for the following?
+
   // Converts a time to the distributed clock for scheduling and cross-node time
   // measurement.
   inline distributed_clock::time_point ToDistributedClock(
       monotonic_clock::time_point time) const;
 
-  // Note: use this very very carefully.  It can cause massive problems.  This
-  // needs to go away as we properly handle time drifting between nodes.
-  void SetMonotonicNow(monotonic_clock::time_point monotonic_now) {
-    monotonic_clock::duration offset = (monotonic_now - this->monotonic_now());
-    monotonic_offset_ += offset;
-    realtime_offset_ -= offset;
+  // Sets the offset between the monotonic clock and the central distributed
+  // clock.  distributed_clock = monotonic_clock + offset.
+  void SetDistributedOffset(std::chrono::nanoseconds monotonic_offset) {
+    scheduler_.SetDistributedOffset(monotonic_offset);
   }
 
  private:
   friend class SimulatedEventLoopFactory;
   NodeEventLoopFactory(
-      EventScheduler *scheduler, SimulatedEventLoopFactory *factory,
-      const Node *node,
+      EventSchedulerScheduler *scheduler_scheduler,
+      SimulatedEventLoopFactory *factory, const Node *node,
       std::vector<std::pair<EventLoop *, std::function<void(bool)>>>
           *raw_event_loops);
 
-  EventScheduler *const scheduler_;
+  EventScheduler scheduler_;
   SimulatedEventLoopFactory *const factory_;
 
   const Node *const node_;
@@ -178,7 +178,6 @@
   std::vector<std::pair<EventLoop *, std::function<void(bool)>>>
       *const raw_event_loops_;
 
-  std::chrono::nanoseconds monotonic_offset_ = std::chrono::seconds(0);
   std::chrono::nanoseconds realtime_offset_ = std::chrono::seconds(0);
 
   // Map from name, type to queue.
@@ -189,8 +188,8 @@
 };
 
 inline monotonic_clock::time_point NodeEventLoopFactory::monotonic_now() const {
-  return monotonic_clock::time_point(
-      factory_->distributed_now().time_since_epoch() + monotonic_offset_);
+  // TODO(austin): Confirm that time never goes backwards?
+  return scheduler_.FromDistributedClock(factory_->distributed_now());
 }
 
 inline realtime_clock::time_point NodeEventLoopFactory::realtime_now() const {
@@ -200,8 +199,7 @@
 
 inline distributed_clock::time_point NodeEventLoopFactory::ToDistributedClock(
     monotonic_clock::time_point time) const {
-  return distributed_clock::time_point(time.time_since_epoch() -
-                                       monotonic_offset_);
+  return scheduler_.ToDistributedClock(time);
 }
 
 }  // namespace aos
diff --git a/aos/events/simulated_event_loop_test.cc b/aos/events/simulated_event_loop_test.cc
index de5cbae..0b5dbec 100644
--- a/aos/events/simulated_event_loop_test.cc
+++ b/aos/events/simulated_event_loop_test.cc
@@ -64,30 +64,33 @@
 // Test that creating an event and running the scheduler runs the event.
 TEST(EventSchedulerTest, ScheduleEvent) {
   int counter = 0;
+  EventSchedulerScheduler scheduler_scheduler;
   EventScheduler scheduler;
+  scheduler_scheduler.AddEventScheduler(&scheduler);
 
-  scheduler.Schedule(distributed_clock::epoch() + chrono::seconds(1),
+  scheduler.Schedule(monotonic_clock::epoch() + chrono::seconds(1),
                      [&counter]() { counter += 1; });
-  scheduler.Run();
+  scheduler_scheduler.Run();
   EXPECT_EQ(counter, 1);
   auto token =
-      scheduler.Schedule(distributed_clock::epoch() + chrono::seconds(2),
+      scheduler.Schedule(monotonic_clock::epoch() + chrono::seconds(2),
                          [&counter]() { counter += 1; });
   scheduler.Deschedule(token);
-  scheduler.Run();
+  scheduler_scheduler.Run();
   EXPECT_EQ(counter, 1);
 }
 
 // Test that descheduling an already scheduled event doesn't run the event.
 TEST(EventSchedulerTest, DescheduleEvent) {
   int counter = 0;
+  EventSchedulerScheduler scheduler_scheduler;
   EventScheduler scheduler;
+  scheduler_scheduler.AddEventScheduler(&scheduler);
 
-  auto token =
-      scheduler.Schedule(distributed_clock::epoch() + chrono::seconds(1),
-                         [&counter]() { counter += 1; });
+  auto token = scheduler.Schedule(monotonic_clock::epoch() + chrono::seconds(1),
+                                  [&counter]() { counter += 1; });
   scheduler.Deschedule(token);
-  scheduler.Run();
+  scheduler_scheduler.Run();
   EXPECT_EQ(counter, 0);
 }
 
diff --git a/aos/network/BUILD b/aos/network/BUILD
index 8f049e3..11eebdb 100644
--- a/aos/network/BUILD
+++ b/aos/network/BUILD
@@ -240,27 +240,51 @@
 flatbuffer_cc_library(
     name = "web_proxy_fbs",
     srcs = ["web_proxy.fbs"],
+    includes = [
+        ":connect_fbs_includes",
+        "//aos:configuration_fbs_includes",
+    ],
     gen_reflections = True,
 )
 
 flatbuffer_ts_library(
     name = "web_proxy_ts_fbs",
     srcs = ["web_proxy.fbs"],
+    includes = [
+        ":connect_fbs_includes",
+        "//aos:configuration_fbs_includes",
+    ],
+)
+
+cc_library(
+    name = "web_proxy_utils",
+    hdrs = ["web_proxy_utils.h"],
+    srcs = ["web_proxy_utils.cc"],
+    deps = [
+        ":connect_fbs",
+        ":web_proxy_fbs",
+        "//aos:configuration_fbs",
+        "//aos/events:event_loop",
+        "@com_github_google_flatbuffers//:flatbuffers",
+    ],
 )
 
 cc_library(
     name = "web_proxy",
-    hdrs = ["web_proxy.h"],
     srcs = ["web_proxy.cc"],
+    hdrs = ["web_proxy.h"],
     copts = [
         "-DWEBRTC_POSIX",
         "-Wno-unused-parameter",
     ],
     deps = [
+        ":connect_fbs",
         ":web_proxy_fbs",
+        ":web_proxy_utils",
+        "//aos/events:shm_event_loop",
         "//aos/seasocks:seasocks_logger",
-        "//third_party/seasocks",
         "//third_party:webrtc",
+        "//third_party/seasocks",
         "@com_github_google_glog//:glog",
     ],
 )
@@ -277,8 +301,9 @@
     name = "web_proxy_main",
     srcs = ["web_proxy_main.cc"],
     deps = [
-        ":web_proxy",
         ":gen_embedded",
+        ":web_proxy",
+        "//aos/events:shm_event_loop",
         "//aos:init",
         "//aos/seasocks:seasocks_logger",
         "//third_party/seasocks",
@@ -290,8 +315,28 @@
     ],
     data = [
         "//aos/network/www:files",
-        "//aos/network/www:proxy_bundle",
+        "//aos/network/www:main_bundle",
         "//aos/network/www:flatbuffers",
-        "@com_github_google_flatbuffers//:flatjs"
+        "@com_github_google_flatbuffers//:flatjs",
+        "//aos/events:pingpong_config.json",
+    ],
+)
+
+cc_library(
+    name = "timestamp_filter",
+    hdrs = ["timestamp_filter.h"],
+    deps = [
+        "//aos/time",
+    ],
+)
+
+cc_test(
+    name = "timestamp_filter_test",
+    srcs = [
+        "timestamp_filter_test.cc",
+    ],
+    deps = [
+        ":timestamp_filter",
+        "//aos/testing:googletest",
     ],
 )
diff --git a/aos/network/timestamp_filter.h b/aos/network/timestamp_filter.h
new file mode 100644
index 0000000..b4280f6
--- /dev/null
+++ b/aos/network/timestamp_filter.h
@@ -0,0 +1,289 @@
+#ifndef AOS_EVENTS_LOGGING_TIMESTAMP_FILTER_H_
+#define AOS_EVENTS_LOGGING_TIMESTAMP_FILTER_H_
+
+#include <stdio.h>
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+
+#include "aos/time/time.h"
+#include "glog/logging.h"
+
+namespace aos {
+namespace message_bridge {
+
+// This class handles filtering differences between clocks across a network.
+//
+// The basic concept is that network latencies are highly asymmetric.  They will
+// have a good and well-defined minima, and have large maxima.  We also want
+// to guarantee that the filtered line is *always* below the samples.
+//
+// In order to preserve precision, an initial offset (base_offset_) is
+// subtracted from everything.  This should make all the offsets near 0, so we
+// have enough precision to represent nanoseconds, and to not have to worry too
+// much about precision when solving for the global offset.
+class TimestampFilter {
+ public:
+  // Updates with a new sample.  monotonic_now is the timestamp of the sample on
+  // the destination node, and sample_ns is destination_time - source_time.
+  void Sample(aos::monotonic_clock::time_point monotonic_now,
+              std::chrono::nanoseconds sample_ns) {
+    // Compute the sample offset as a double (seconds), taking into account the
+    // base offset.
+    const double sample =
+        std::chrono::duration_cast<std::chrono::duration<double>>(sample_ns -
+                                                                  base_offset_)
+            .count();
+
+    // This is our first sample.  Just use it.
+    if (last_time_ == aos::monotonic_clock::min_time) {
+      offset_ = sample;
+    } else {
+      // Took less time to transmit, so clamp to it.
+      if (sample < offset_) {
+        offset_ = sample;
+      } else {
+        // We split things up into 2 portions.
+        //  1) Each sample has information.  Correct some using it.
+        //  2) We want to keep a decent time constant if the sample rate slows.
+        //     Take time since the last sample into account.
+
+        // Time constant for the low pass filter in seconds.
+        constexpr double kTau = 0.5;
+
+        constexpr double kClampNegative = -0.0003;
+
+        {
+          // 1)
+          constexpr double kAlpha = 0.005;
+          // This formulation is more numerically precise.
+          // Clamp to kClampNegative ms to reduce the effect of wildly large
+          // samples.
+          offset_ =
+              offset_ - kAlpha * std::max(offset_ - sample, kClampNegative);
+        }
+
+        {
+          // 2)
+          //
+          // 1-e^(t/tau) -> alpha
+          const double alpha = -std::expm1(
+              -std::chrono::duration_cast<std::chrono::duration<double>>(
+                   monotonic_now - last_time_)
+                   .count() /
+              kTau);
+
+          // Clamp to kClampNegative ms to reduce the effect of wildly large
+          // samples.
+          offset_ =
+              offset_ - alpha * std::max(offset_ - sample, kClampNegative);
+        }
+      }
+    }
+
+    last_time_ = monotonic_now;
+  }
+
+  // Updates the base_offset, and compensates offset while we are here.
+  void set_base_offset(std::chrono::nanoseconds base_offset) {
+    offset_ -= std::chrono::duration_cast<std::chrono::duration<double>>(
+                   base_offset - base_offset_)
+                   .count();
+    base_offset_ = base_offset;
+    // Clear everything out to avoid any numerical precision problems.
+    last_time_ = aos::monotonic_clock::min_time;
+  }
+
+  double offset() const { return offset_; }
+
+  std::chrono::nanoseconds base_offset() const { return base_offset_; }
+
+  double base_offset_double() const {
+    return std::chrono::duration_cast<std::chrono::duration<double>>(
+               base_offset_)
+        .count();
+  }
+
+  bool has_sample() const {
+    return last_time_ != aos::monotonic_clock::min_time;
+  }
+
+ private:
+  double offset_ = 0;
+
+  aos::monotonic_clock::time_point last_time_ = aos::monotonic_clock::min_time;
+  std::chrono::nanoseconds base_offset_{0};
+};
+
+// This class combines the a -> b offsets with the b -> a offsets and
+// aggressively filters the results.
+struct ClippedAverageFilter {
+  // If not nullptr, timestamps will get written to these two files for
+  // debugging.
+  FILE *fwd_fp = nullptr;
+  FILE *rev_fp = nullptr;
+
+  ~ClippedAverageFilter() {
+    if (fwd_fp != nullptr) {
+      fclose(fwd_fp);
+    }
+    if (rev_fp != nullptr) {
+      fclose(rev_fp);
+    }
+  }
+
+  // Adds a forward sample.  sample_ns = destination - source;  Forward samples
+  // are from A -> B.
+  void FwdSample(aos::monotonic_clock::time_point monotonic_now,
+                 std::chrono::nanoseconds sample_ns) {
+    fwd_.Sample(monotonic_now, sample_ns);
+    Update(monotonic_now, &last_fwd_time_);
+
+    if (fwd_fp != nullptr) {
+      if (first_fwd_time_ == aos::monotonic_clock::min_time) {
+        first_fwd_time_ = monotonic_now;
+      }
+      fprintf(
+          fwd_fp, "%f, %f, %f, %f\n",
+          std::chrono::duration_cast<std::chrono::duration<double>>(
+              monotonic_now - first_fwd_time_)
+              .count(),
+          std::chrono::duration_cast<std::chrono::duration<double>>(sample_ns)
+              .count(),
+          fwd_.offset() + fwd_.base_offset_double(),
+          std::chrono::duration_cast<std::chrono::duration<double>>(offset())
+              .count());
+    }
+  }
+
+  // Adds a reverse sample.  sample_ns = destination - source;  Reverse samples
+  // are B -> A.
+  void RevSample(aos::monotonic_clock::time_point monotonic_now,
+                 std::chrono::nanoseconds sample_ns) {
+    rev_.Sample(monotonic_now, sample_ns);
+    Update(monotonic_now, &last_rev_time_);
+
+    if (rev_fp != nullptr) {
+      if (first_rev_time_ == aos::monotonic_clock::min_time) {
+        first_rev_time_ = monotonic_now;
+      }
+      fprintf(
+          rev_fp, "%f, %f, %f, %f\n",
+          std::chrono::duration_cast<std::chrono::duration<double>>(
+              monotonic_now - first_rev_time_)
+              .count(),
+          std::chrono::duration_cast<std::chrono::duration<double>>(sample_ns)
+              .count(),
+          rev_.offset() + rev_.base_offset_double(),
+          std::chrono::duration_cast<std::chrono::duration<double>>(offset())
+              .count());
+    }
+  }
+
+  // Returns the overall filtered offset, offseta - offsetb.
+  std::chrono::nanoseconds offset() const {
+    return std::chrono::duration_cast<std::chrono::nanoseconds>(
+               std::chrono::duration<double>(offset_)) +
+           base_offset_;
+  }
+
+  // Returns the offset as a double.
+  double offset_double() const {
+    return std::chrono::duration_cast<std::chrono::duration<double>>(offset())
+        .count();
+  }
+
+  // Sets the sample pointer.  This address gets set every time the sample
+  // changes.  Makes it really easy to place in a matrix and solve.
+  void set_sample_pointer(double *sample_pointer) {
+    sample_pointer_ = sample_pointer;
+  }
+
+  double *sample_pointer() { return sample_pointer_; }
+
+  // Sets the base offset.  This is used to reduce the dynamic range needed from
+  // the double to something manageable.  It is subtracted from offset_.
+  void set_base_offset(std::chrono::nanoseconds base_offset) {
+    offset_ -= std::chrono::duration_cast<std::chrono::duration<double>>(
+                   base_offset - base_offset_)
+                   .count();
+    fwd_.set_base_offset(base_offset);
+    rev_.set_base_offset(-base_offset);
+    base_offset_ = base_offset;
+    last_fwd_time_ = aos::monotonic_clock::min_time;
+    last_rev_time_ = aos::monotonic_clock::min_time;
+  }
+
+ private:
+  // Updates the offset estimate given the current time, and a pointer to the
+  // variable holding the last time.
+  void Update(aos::monotonic_clock::time_point monotonic_now,
+              aos::monotonic_clock::time_point *last_time) {
+    // ta = t + offseta
+    // tb = t + offsetb
+    // fwd sample => ta - tb + network -> offseta - offsetb + network
+    // rev sample => tb - ta + network -> offsetb - offseta + network
+    const double hard_max = fwd_.offset();
+    const double hard_min = -rev_.offset();
+    const double average = (hard_max + hard_min) / 2.0;
+    LOG(INFO) << "max " << hard_max << " min " << hard_min;
+    // We don't want to clip the offset to the hard min/max.  We really want to
+    // keep it within a band around the middle.  ratio of 0.5 means stay within
+    // +- 0.25 of the middle of the hard min and max.
+    constexpr double kBand = 0.5;
+    const double max = average + kBand / 2.0 * (hard_max - hard_min);
+    const double min = average - kBand / 2.0 * (hard_max - hard_min);
+
+    // Update regardless for the first sample from both the min and max.
+    if (*last_time == aos::monotonic_clock::min_time) {
+      offset_ = average;
+    } else {
+      // Do just a time constant based update.  We can afford to be slow here
+      // for smoothness.
+      constexpr double kTau = 10.0;
+      const double alpha = -std::expm1(
+          -std::chrono::duration_cast<std::chrono::duration<double>>(
+               monotonic_now - *last_time)
+               .count() /
+          kTau);
+
+      // Clamp it such that it remains in the min/max bounds.
+      offset_ = std::clamp(offset_ - alpha * (offset_ - average), min, max);
+    }
+    *last_time = monotonic_now;
+
+    if (sample_pointer_ != nullptr) {
+      *sample_pointer_ = offset_;
+    }
+  }
+
+  // Filters for both the forward and reverse directions.
+  TimestampFilter fwd_;
+  TimestampFilter rev_;
+
+  // Base offset in nanoseconds.  This is subtracted from all calculations, and
+  // added back to the result when reporting.
+  std::chrono::nanoseconds base_offset_ = std::chrono::nanoseconds(0);
+  // Dynamic part of the offset.
+  double offset_ = 0;
+
+  // Last time we had a sample for a direction.
+  aos::monotonic_clock::time_point last_fwd_time_ =
+      aos::monotonic_clock::min_time;
+  aos::monotonic_clock::time_point last_rev_time_ =
+      aos::monotonic_clock::min_time;
+
+  // First times used for plotting.
+  aos::monotonic_clock::time_point first_fwd_time_ =
+      aos::monotonic_clock::min_time;
+  aos::monotonic_clock::time_point first_rev_time_ =
+      aos::monotonic_clock::min_time;
+
+  // Pointer to copy the sample to when it is updated.
+  double *sample_pointer_ = nullptr;
+};
+
+}  // namespace message_bridge
+}  // namespace aos
+
+#endif  // AOS_EVENTS_LOGGING_TIMESTAMP_FILTER_H_
diff --git a/aos/network/timestamp_filter_test.cc b/aos/network/timestamp_filter_test.cc
new file mode 100644
index 0000000..d06923e
--- /dev/null
+++ b/aos/network/timestamp_filter_test.cc
@@ -0,0 +1,76 @@
+#include "aos/network/timestamp_filter.h"
+
+#include <chrono>
+
+#include "aos/macros.h"
+#include "gtest/gtest.h"
+
+namespace aos {
+namespace message_bridge {
+namespace testing {
+
+namespace chrono = std::chrono;
+
+// Tests that adding samples tracks more negative offsets down quickly, and
+// slowly comes back up.
+TEST(TimestampFilterTest, Sample) {
+  TimestampFilter filter;
+
+  EXPECT_EQ(filter.offset(), 0.0);
+  EXPECT_EQ(filter.base_offset(), chrono::seconds(0));
+  EXPECT_FALSE(filter.has_sample());
+
+  filter.Sample(aos::monotonic_clock::epoch() + chrono::seconds(1),
+                chrono::milliseconds(-100));
+
+  EXPECT_EQ(filter.offset(), -0.1);
+  EXPECT_EQ(filter.base_offset(), chrono::seconds(0));
+  EXPECT_TRUE(filter.has_sample());
+
+  // Further negative -> follow the min down exactly.
+  filter.Sample(aos::monotonic_clock::epoch() + chrono::seconds(2),
+                chrono::milliseconds(-1000));
+
+  EXPECT_EQ(filter.offset(), -1.0);
+  EXPECT_EQ(filter.base_offset(), chrono::seconds(0));
+  EXPECT_TRUE(filter.has_sample());
+
+  // Positive now goes positive, but slower.
+  filter.Sample(aos::monotonic_clock::epoch() + chrono::seconds(3),
+                chrono::milliseconds(0));
+
+  EXPECT_GT(filter.offset(), -0.9999);
+  EXPECT_LT(filter.offset(), -0.999);
+  EXPECT_EQ(filter.base_offset(), chrono::seconds(0));
+  EXPECT_TRUE(filter.has_sample());
+}
+
+// Tests that ClippedAverageFilter tracks between the two filters.
+TEST(ClippedAverageFilterTest, Sample) {
+  ClippedAverageFilter filter;
+
+  // Pass in a sample in both the forward and reverse direction.  We should
+  // expect that the offset should be smack down the middle.
+  filter.FwdSample(aos::monotonic_clock::epoch() + chrono::seconds(1),
+                   chrono::milliseconds(101));
+
+  filter.RevSample(aos::monotonic_clock::epoch() + chrono::seconds(1),
+                   chrono::milliseconds(-100));
+
+  EXPECT_EQ(filter.offset(), chrono::microseconds(100500));
+
+  // Confirm the base offset works too.
+  filter.set_base_offset(chrono::milliseconds(100));
+
+  filter.FwdSample(aos::monotonic_clock::epoch() + chrono::seconds(1),
+                   chrono::milliseconds(101));
+
+  filter.RevSample(aos::monotonic_clock::epoch() + chrono::seconds(1),
+                   chrono::milliseconds(-100));
+
+  EXPECT_EQ(filter.offset(), chrono::microseconds(100500));
+}
+
+}  // namespace testing
+}  // namespace message_bridge
+}  // namespace aos
diff --git a/aos/network/web_proxy.cc b/aos/network/web_proxy.cc
index c6d7336..77371eb 100644
--- a/aos/network/web_proxy.cc
+++ b/aos/network/web_proxy.cc
@@ -1,5 +1,9 @@
 #include "aos/network/web_proxy.h"
+
+#include "aos/flatbuffer_merge.h"
+#include "aos/network/connect_generated.h"
 #include "aos/network/web_proxy_generated.h"
+#include "aos/network/web_proxy_utils.h"
 #include "api/create_peerconnection_factory.h"
 #include "glog/logging.h"
 
@@ -22,12 +26,15 @@
 
 }  // namespace
 
-WebsocketHandler::WebsocketHandler(::seasocks::Server *server)
-    : server_(server) {}
+WebsocketHandler::WebsocketHandler(
+    ::seasocks::Server *server,
+    const std::vector<std::unique_ptr<Subscriber>> &subscribers,
+    const aos::FlatbufferDetachedBuffer<aos::Configuration> &config)
+    : server_(server), subscribers_(subscribers), config_(config) {}
 
 void WebsocketHandler::onConnect(::seasocks::WebSocket *sock) {
   std::unique_ptr<Connection> conn =
-      std::make_unique<Connection>(sock, server_);
+      std::make_unique<Connection>(sock, server_, subscribers_, config_);
   connections_.insert({sock, std::move(conn)});
 }
 
@@ -40,8 +47,42 @@
   connections_.erase(sock);
 }
 
-Connection::Connection(::seasocks::WebSocket *sock, ::seasocks::Server *server)
-    : sock_(sock), server_(server) {}
+void Subscriber::RunIteration() {
+  if (channels_.empty()) {
+    return;
+  }
+
+  fetcher_->Fetch();
+  for (int packet_index = 0; packet_index < GetPacketCount(fetcher_->context());
+       ++packet_index) {
+    flatbuffers::Offset<MessageHeader> message =
+        PackMessage(&fbb_, fetcher_->context(), channel_index_, packet_index);
+    fbb_.Finish(message);
+
+    const flatbuffers::DetachedBuffer buffer = fbb_.Release();
+
+    webrtc::DataBuffer data_buffer(
+        rtc::CopyOnWriteBuffer(buffer.data(), buffer.size()),
+        true /* binary array */);
+    for (auto conn : channels_) {
+      conn->Send(data_buffer);
+    }
+  }
+}
+
+bool Subscriber::Compare(const Channel *channel) const {
+  return channel->name() == fetcher_->channel()->name() &&
+         channel->type() == fetcher_->channel()->type();
+}
+
+Connection::Connection(
+    ::seasocks::WebSocket *sock, ::seasocks::Server *server,
+    const std::vector<std::unique_ptr<Subscriber>> &subscribers,
+    const aos::FlatbufferDetachedBuffer<aos::Configuration> &config)
+    : sock_(sock),
+      server_(server),
+      subscribers_(subscribers),
+      config_(config) {}
 
 // Function called for web socket data. Parses the flatbuffer and handles it
 // appropriately.
@@ -111,6 +152,13 @@
   }
 }
 
+void Connection::Send(const ::flatbuffers::DetachedBuffer &buffer) const {
+  webrtc::DataBuffer data_buffer(
+      rtc::CopyOnWriteBuffer(buffer.data(), buffer.size()),
+      true /* binary array */);
+  data_channel_->Send(data_buffer);
+}
+
 void Connection::OnDataChannel(
     rtc::scoped_refptr<webrtc::DataChannelInterface> channel) {
   data_channel_ = channel;
@@ -150,13 +198,45 @@
   server_->execute(std::make_shared<UpdateData>(sock_, fbb.Release()));
 }
 
-// Receive and respond to a DataChannel message. Temporarily acting as a
-// "PONG", but will change to handle "Connect" subscription messages.
+// Wait until the data channel is ready for data before sending the config.
+void Connection::OnStateChange() {
+  if (peer_connection_.get() != nullptr &&
+      data_channel_->state() == webrtc::DataChannelInterface::kOpen) {
+    Send(config_.buffer());
+  }
+}
+
+// Handle DataChannel messages. Subscribe to each listener that matches the
+// subscribe message
 void Connection::OnMessage(const webrtc::DataBuffer &buffer) {
-  // This is technically disallowed by webrtc, But doesn't seem to cause major
-  // problems. At least for the small data tested manually. Send should be
-  // called from outside this call stack.
-  data_channel_->Send(buffer);
+  const message_bridge::Connect *message =
+      flatbuffers::GetRoot<message_bridge::Connect>(buffer.data.data());
+  for (auto &subscriber : subscribers_) {
+    bool found_match = false;
+    for (auto channel : *message->channels_to_transfer()) {
+      if (subscriber->Compare(channel)) {
+        int index = subscriber->index();
+        auto it = channels_.find(index);
+        if (it == channels_.end()) {
+          auto pair = channels_.insert(
+              {index, peer_connection_->CreateDataChannel(
+                          channel->name()->str() + "/" + channel->type()->str(),
+                          nullptr)});
+          it = pair.first;
+        }
+        subscriber->AddListener(it->second);
+
+        VLOG(1) << "Subscribe to: " << channel->type()->str();
+        found_match = true;
+        break;
+      }
+    }
+    if (!found_match) {
+      int index = subscriber->index();
+      auto it = channels_.find(index);
+      subscriber->RemoveListener(it->second);
+    }
+  }
 }
 
 }  // namespace web_proxy
diff --git a/aos/network/web_proxy.fbs b/aos/network/web_proxy.fbs
index e712622..d5e027e 100644
--- a/aos/network/web_proxy.fbs
+++ b/aos/network/web_proxy.fbs
@@ -1,3 +1,9 @@
+// Typescript namespaces are weird when coming from multiple files. We generate
+// all transitive dependencies into the same file in typescript so we can
+// include all 'aos' flatbuffers we care about here.
+include "aos/configuration.fbs";
+include "aos/network/connect.fbs";
+
 namespace aos.web_proxy;
 
 // SDP is Session Description Protocol. We only handle OFFER (starting a
@@ -29,3 +35,26 @@
 table WebSocketMessage {
   payload:Payload;
 }
+
+// WebRTC has size limits on the messages sent on datachannels. This message
+// ensures that parts are recieved in the correct order. If there is any
+// mismatch, all the existing work should be dropped and restart when reasonable
+// data starts again.
+table MessageHeader {
+  // Index of the channel in config
+  channel_index:uint;
+
+  // How many packets will be required for the message being sent.
+  packet_count:uint;
+  // What index into the the total packets for the multipart message, this
+  // header is parts of.
+  packet_index:uint;
+
+  // Total number of bytes in the message
+  length:uint;
+
+  // Index into the sequence of messages. This will not always increase.
+  queue_index:uint;
+
+  data:[ubyte];
+}
diff --git a/aos/network/web_proxy.h b/aos/network/web_proxy.h
index 7c24eae..409e61d 100644
--- a/aos/network/web_proxy.h
+++ b/aos/network/web_proxy.h
@@ -2,6 +2,9 @@
 #define AOS_NETWORK_WEB_PROXY_H_
 #include <map>
 #include <set>
+#include "aos/events/shm_event_loop.h"
+#include "aos/network/connect_generated.h"
+#include "aos/network/web_proxy_generated.h"
 #include "aos/seasocks/seasocks_logger.h"
 #include "flatbuffers/flatbuffers.h"
 #include "seasocks/Server.h"
@@ -14,13 +17,17 @@
 namespace web_proxy {
 
 class Connection;
+class Subscriber;
 
 // Basic class that handles receiving new websocket connections. Creates a new
 // Connection to manage the rest of the negotiation and data passing. When the
 // websocket closes, it deletes the Connection.
 class WebsocketHandler : public ::seasocks::WebSocket::Handler {
  public:
-  WebsocketHandler(::seasocks::Server *server);
+  WebsocketHandler(
+      ::seasocks::Server *server,
+      const std::vector<std::unique_ptr<Subscriber>> &subscribers,
+      const aos::FlatbufferDetachedBuffer<aos::Configuration> &config);
   void onConnect(::seasocks::WebSocket *sock) override;
   void onData(::seasocks::WebSocket *sock, const uint8_t *data,
               size_t size) override;
@@ -29,6 +36,8 @@
  private:
   std::map<::seasocks::WebSocket *, std::unique_ptr<Connection>> connections_;
   ::seasocks::Server *server_;
+  const std::vector<std::unique_ptr<Subscriber>> &subscribers_;
+  const aos::FlatbufferDetachedBuffer<aos::Configuration> &config_;
 };
 
 // Seasocks requires that sends happen on the correct thread. This class takes a
@@ -37,7 +46,7 @@
 class UpdateData : public ::seasocks::Server::Runnable {
  public:
   UpdateData(::seasocks::WebSocket *websocket,
-             ::flatbuffers::DetachedBuffer &&buffer)
+             flatbuffers::DetachedBuffer &&buffer)
       : sock_(websocket), buffer_(std::move(buffer)) {}
   ~UpdateData() override = default;
   UpdateData(const UpdateData &) = delete;
@@ -47,7 +56,40 @@
 
  private:
   ::seasocks::WebSocket *sock_;
-  const ::flatbuffers::DetachedBuffer buffer_;
+  const flatbuffers::DetachedBuffer buffer_;
+};
+
+// Represents a fetcher and all the Connections that care about it.
+// Handles building the message and telling each connection to send it.
+// indexed by location of the channel it handles in the config.
+class Subscriber {
+ public:
+  Subscriber(std::unique_ptr<RawFetcher> fetcher, int channel_index)
+      : fbb_(1024),
+        fetcher_(std::move(fetcher)),
+        channel_index_(channel_index) {}
+
+  void RunIteration();
+
+  void AddListener(rtc::scoped_refptr<webrtc::DataChannelInterface> channel) {
+    channels_.insert(channel);
+  }
+
+  void RemoveListener(
+      rtc::scoped_refptr<webrtc::DataChannelInterface> channel) {
+    channels_.erase(channel);
+  }
+
+  // Check if the Channel passed matches the channel this fetchs.
+  bool Compare(const Channel *channel) const;
+
+  int index() const { return channel_index_; }
+
+ private:
+  flatbuffers::FlatBufferBuilder fbb_;
+  std::unique_ptr<RawFetcher> fetcher_;
+  int channel_index_;
+  std::set<rtc::scoped_refptr<webrtc::DataChannelInterface>> channels_;
 };
 
 // Represents a single connection to a browser for the entire lifetime of the
@@ -56,10 +98,20 @@
                    public webrtc::CreateSessionDescriptionObserver,
                    public webrtc::DataChannelObserver {
  public:
-  Connection(::seasocks::WebSocket *sock, ::seasocks::Server *server);
+  Connection(::seasocks::WebSocket *sock, ::seasocks::Server *server,
+             const std::vector<std::unique_ptr<Subscriber>> &subscribers,
+             const aos::FlatbufferDetachedBuffer<aos::Configuration> &config);
+
+  ~Connection() {
+    // DataChannel may call OnStateChange after this is destroyed, so make sure
+    // it doesn't.
+    data_channel_->UnregisterObserver();
+  }
 
   void HandleWebSocketData(const uint8_t *data, size_t size);
 
+  void Send(const flatbuffers::DetachedBuffer &buffer) const;
+
   // PeerConnectionObserver implementation
   void OnSignalingChange(
       webrtc::PeerConnectionInterface::SignalingState) override {}
@@ -88,13 +140,17 @@
   }
 
   // DataChannelObserver implementation
-  void OnStateChange() override {}
+  void OnStateChange() override;
   void OnMessage(const webrtc::DataBuffer &buffer) override;
   void OnBufferedAmountChange(uint64_t sent_data_size) override {}
 
  private:
   ::seasocks::WebSocket *sock_;
   ::seasocks::Server *server_;
+  const std::vector<std::unique_ptr<Subscriber>> &subscribers_;
+  const aos::FlatbufferDetachedBuffer<aos::Configuration> &config_;
+  std::map<int, rtc::scoped_refptr<webrtc::DataChannelInterface>> channels_;
+
   rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
   rtc::scoped_refptr<webrtc::DataChannelInterface> data_channel_;
 };
diff --git a/aos/network/web_proxy_main.cc b/aos/network/web_proxy_main.cc
index 37b3d1e..21b6f9c 100644
--- a/aos/network/web_proxy_main.cc
+++ b/aos/network/web_proxy_main.cc
@@ -1,23 +1,65 @@
+#include "aos/events/shm_event_loop.h"
 #include "aos/init.h"
-#include "aos/seasocks/seasocks_logger.h"
 #include "aos/network/web_proxy.h"
+#include "aos/seasocks/seasocks_logger.h"
+#include "gflags/gflags.h"
 
 #include "internal/Embedded.h"
 #include "seasocks/Server.h"
 #include "seasocks/WebSocket.h"
 
-int main() {
+DEFINE_string(config, "./config.json", "File path of aos configuration");
+DEFINE_string(data_dir, "www", "Directory to serve data files from");
+
+void RunDataThread(
+    std::vector<std::unique_ptr<aos::web_proxy::Subscriber>> *subscribers,
+    const aos::FlatbufferDetachedBuffer<aos::Configuration> &config) {
+  aos::ShmEventLoop event_loop(&config.message());
+
+  // TODO(alex): skip fetchers on the wrong node.
+  for (uint i = 0; i < config.message().channels()->size(); ++i) {
+    auto channel = config.message().channels()->Get(i);
+    auto fetcher = event_loop.MakeRawFetcher(channel);
+    subscribers->emplace_back(
+        std::make_unique<aos::web_proxy::Subscriber>(std::move(fetcher), i));
+  }
+
+  flatbuffers::FlatBufferBuilder fbb(1024);
+
+  auto timer = event_loop.AddTimer([&]() {
+    for (auto &subscriber : *subscribers) {
+      subscriber->RunIteration();
+    }
+  });
+
+  event_loop.OnRun([&]() {
+    timer->Setup(event_loop.monotonic_now(), std::chrono::milliseconds(100));
+  });
+
+  event_loop.Run();
+}
+
+int main(int argc, char **argv) {
   // Make sure to reference this to force the linker to include it.
+  aos::InitGoogle(&argc, &argv);
   findEmbeddedContent("");
 
   aos::InitNRT();
 
-  seasocks::Server server(::std::shared_ptr<seasocks::Logger>(
-      new ::aos::seasocks::SeasocksLogger(seasocks::Logger::Level::Info)));
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
 
-  auto websocket_handler =
-      std::make_shared<aos::web_proxy::WebsocketHandler>(&server);
+  std::vector<std::unique_ptr<aos::web_proxy::Subscriber>> subscribers;
+
+  std::thread data_thread{
+      [&subscribers, &config]() { RunDataThread(&subscribers, config); }};
+
+  seasocks::Server server(std::shared_ptr<seasocks::Logger>(
+      new aos::seasocks::SeasocksLogger(seasocks::Logger::Level::Info)));
+
+  auto websocket_handler = std::make_shared<aos::web_proxy::WebsocketHandler>(
+      &server, subscribers, config);
   server.addWebSocketHandler("/ws", websocket_handler);
 
-  server.serve("aos/network/www", 8080);
+  server.serve(FLAGS_data_dir.c_str(), 8080);
 }
diff --git a/aos/network/web_proxy_utils.cc b/aos/network/web_proxy_utils.cc
new file mode 100644
index 0000000..dbcc2e6
--- /dev/null
+++ b/aos/network/web_proxy_utils.cc
@@ -0,0 +1,43 @@
+#include "aos/network/web_proxy_utils.h"
+
+namespace aos {
+namespace web_proxy {
+
+// Recommended max size is 64KiB for compatibility reasons. 256KiB theoretically
+// works on chrome but seemed to have some consistency issues. Picked a size in
+// the middle which seems to work.
+constexpr size_t kPacketSize = 125000;
+
+int GetPacketCount(const Context &context) {
+  return context.size / kPacketSize + 1;
+}
+
+flatbuffers::Offset<MessageHeader> PackMessage(
+    flatbuffers::FlatBufferBuilder *fbb, const Context &context,
+    int channel_index, int packet_index) {
+  int packet_count = GetPacketCount(context);
+  flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data_offset;
+  if (kPacketSize * (packet_index + 1) < context.size) {
+    data_offset = fbb->CreateVector(
+        static_cast<uint8_t *>(context.data) + kPacketSize * packet_index,
+        kPacketSize);
+  } else {
+    int prefix_size = kPacketSize * packet_index;
+    data_offset =
+        fbb->CreateVector(static_cast<uint8_t *>(context.data) + prefix_size,
+                          context.size - prefix_size);
+  }
+
+  MessageHeader::Builder message_header_builder(*fbb);
+  message_header_builder.add_channel_index(channel_index);
+  message_header_builder.add_queue_index(context.queue_index);
+  message_header_builder.add_packet_count(packet_count);
+  message_header_builder.add_packet_index(packet_index);
+  message_header_builder.add_data(data_offset);
+  message_header_builder.add_length(context.size);
+
+  return message_header_builder.Finish();
+}
+
+}  // namespace web_proxy
+}  // namespace aos
diff --git a/aos/network/web_proxy_utils.h b/aos/network/web_proxy_utils.h
new file mode 100644
index 0000000..0672ddc
--- /dev/null
+++ b/aos/network/web_proxy_utils.h
@@ -0,0 +1,20 @@
+#include "aos/network/web_proxy_generated.h"
+#include "aos/events/event_loop.h"
+
+namespace aos {
+namespace web_proxy {
+
+int GetPacketCount(const Context &context);
+
+/*
+ * Packs a message embedded in context into a MessageHeader on fbb. Handles
+ * multipart messages by use of the packet_index.
+ * TODO(alex): make this an iterator that returns each packet sequentially
+ */
+flatbuffers::Offset<MessageHeader> PackMessage(
+    flatbuffers::FlatBufferBuilder *fbb, const Context &context,
+    int channel_index, int packet_index);
+
+
+}  // namespace web_proxy
+}  // namespace aos
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index 5faae12..76e8ef4 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -5,27 +5,42 @@
     name = "files",
     srcs = glob([
         "**/*.html",
+        "**/*.css",
     ]),
     visibility=["//visibility:public"],
 )
 
 ts_library(
     name = "proxy",
-    srcs = glob([
-        "*.ts",
-    ]),
+    srcs = [
+        "config_handler.ts",
+        "proxy.ts",
+    ],
     deps = [
         "//aos/network:web_proxy_ts_fbs",
     ],
+    visibility=["//visibility:public"],
+)
+
+ts_library(
+    name = "main",
+    srcs = [
+        "main.ts",
+        "ping_handler.ts",
+    ],
+    deps = [
+        ":proxy",
+        "//aos/events:ping_ts_fbs",
+    ],
 )
 
 rollup_bundle(
-    name = "proxy_bundle",
+    name = "main_bundle",
     entry_point = "aos/network/www/main",
     deps = [
-        "proxy",
+        "main",
     ],
-    visibility=["//visibility:public"],
+    visibility=["//aos:__subpackages__"],
 )
 
 genrule(
@@ -37,5 +52,5 @@
         "flatbuffers.js",
     ],
     cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
-    visibility=["//visibility:public"],
+    visibility=["//aos:__subpackages__"],
 )
diff --git a/aos/network/www/config_handler.ts b/aos/network/www/config_handler.ts
new file mode 100644
index 0000000..0af78b6
--- /dev/null
+++ b/aos/network/www/config_handler.ts
@@ -0,0 +1,67 @@
+import {aos} from 'aos/network/web_proxy_generated';
+
+export class ConfigHandler {
+  private readonly root_div = document.getElementById('config');
+
+  constructor(
+      private readonly config: aos.Configuration,
+      private readonly dataChannel: RTCDataChannel) {}
+
+  printConfig() {
+    for (const i = 0; i < this.config.channelsLength(); i++) {
+      const channel_div = document.createElement('div');
+      channel_div.classList.add('channel');
+      this.root_div.appendChild(channel_div);
+
+      const input_el = document.createElement('input');
+      input_el.setAttribute('data-index', i);
+      input_el.setAttribute('type', 'checkbox');
+      input_el.addEventListener('click', () => this.handleChange());
+      channel_div.appendChild(input_el);
+
+      const name_div = document.createElement('div');
+      const name_text = document.createTextNode(this.config.channels(i).name());
+      name_div.appendChild(name_text);
+      const type_div = document.createElement('div');
+      const type_text = document.createTextNode(this.config.channels(i).type());
+      type_div.appendChild(type_text);
+      const info_div = document.createElement('div');
+      info_div.appendChild(name_div);
+      info_div.appendChild(type_div);
+
+      channel_div.appendChild(info_div);
+    }
+  }
+
+  handleChange() {
+    const toggles = this.root_div.getElementsByTagName('input');
+    const builder = new flatbuffers.Builder(512);
+
+    const channels: flatbuffers.Offset[] = [];
+    for (const toggle of toggles) {
+      if (!toggle.checked) {
+        continue;
+      }
+      const index = toggle.getAttribute('data-index');
+      const channel = this.config.channels(index);
+      const namefb = builder.createString(channel.name());
+      const typefb = builder.createString(channel.type());
+      aos.Channel.startChannel(builder);
+      aos.Channel.addName(builder, namefb);
+      aos.Channel.addType(builder, typefb);
+      const channelfb = aos.Channel.endChannel(builder);
+      channels.push(channelfb);
+    }
+
+    const channelsfb =
+        aos.message_bridge.Connect.createChannelsToTransferVector(
+            builder, channels);
+    aos.message_bridge.Connect.startConnect(builder);
+    aos.message_bridge.Connect.addChannelsToTransfer(builder, channelsfb);
+    const connect = aos.message_bridge.Connect.endConnect(builder);
+    builder.finish(connect);
+    const array = builder.asUint8Array();
+    console.log('connect', array);
+    this.dataChannel.send(array.buffer.slice(array.byteOffset));
+  }
+}
diff --git a/aos/network/www/index.html b/aos/network/www/index.html
index bc90d40..97e16d4 100644
--- a/aos/network/www/index.html
+++ b/aos/network/www/index.html
@@ -1,6 +1,11 @@
 <html>
-  <body>
+  <head>
     <script src="flatbuffers.js"></script>
-    <script src="proxy_bundle.min.js"></script>
+    <script src="main_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
+  </head>
+  <body>
+    <div id="config">
+    </div>
   </body>
 </html>
diff --git a/aos/network/www/main.ts b/aos/network/www/main.ts
index 5a3165e..1840ffb 100644
--- a/aos/network/www/main.ts
+++ b/aos/network/www/main.ts
@@ -1,5 +1,10 @@
 import {Connection} from './proxy';
+import * as PingHandler from './ping_handler';
 
 const conn = new Connection();
 
 conn.connect();
+
+PingHandler.SetupDom();
+
+conn.addHandler(PingHandler.GetId(), PingHandler.HandlePing);
diff --git a/aos/network/www/ping_handler.ts b/aos/network/www/ping_handler.ts
new file mode 100644
index 0000000..9b37d70
--- /dev/null
+++ b/aos/network/www/ping_handler.ts
@@ -0,0 +1,26 @@
+import {aos} from 'aos/events/ping_generated';
+
+export function HandlePing(data: Uint8Array) {
+  const fbBuffer = new flatbuffers.ByteBuffer(data);
+  const ping = aos.examples.Ping.getRootAsPing(fbBuffer);
+
+  document.getElementById('val').innerHTML = ping.value();
+  document.getElementById('time').innerHTML = ping.sendTime().low;
+}
+
+export function SetupDom() {
+  const ping_div = document.createElement('div');
+  document.body.appendChild(ping_div);
+
+  const value_div = document.createElement('div');
+  value_div.setAttribute('id', 'val');
+  const time_div = document.createElement('div');
+  time_div.setAttribute('id', 'time');
+
+  ping_div.appendChild(value_div);
+  ping_div.appendChild(time_div);
+}
+
+export function GetId() {
+  return aos.examples.Ping.getFullyQualifiedName();
+}
diff --git a/aos/network/www/proxy.ts b/aos/network/www/proxy.ts
index 1ef6320..27ffbfe 100644
--- a/aos/network/www/proxy.ts
+++ b/aos/network/www/proxy.ts
@@ -1,4 +1,41 @@
-import {aos.web_proxy} from '../web_proxy_generated';
+import {aos} from 'aos/network/web_proxy_generated';
+import {ConfigHandler} from './config_handler';
+
+// There is one handler for each DataChannel, it maintains the state of
+// multi-part messages and delegates to a callback when the message is fully
+// assembled.
+export class Handler {
+  private dataBuffer: Uint8Array|null = null;
+  private receivedMessageLength: number = 0;
+  constructor(
+      private readonly handlerFunc: (data: Uint8Array) => void,
+      private readonly channel: RTCPeerConnection) {
+    channel.addEventListener('message', (e) => this.handleMessage(e));
+  }
+
+  handleMessage(e: MessageEvent): void {
+    const fbBuffer = new flatbuffers.ByteBuffer(new Uint8Array(e.data));
+    const messageHeader =
+        aos.web_proxy.MessageHeader.getRootAsMessageHeader(fbBuffer);
+    // Short circuit if only one packet
+    if (messageHeader.packetCount === 1) {
+      this.handlerFunc(messageHeader.dataArray());
+      return;
+    }
+
+    if (messageHeader.packetIndex() === 0) {
+      this.dataBuffer = new Uint8Array(messageHeader.length());
+    }
+    this.dataBuffer.set(
+        messageHeader.dataArray(),
+        this.receivedMessageLength);
+    this.receivedMessageLength += messageHeader.dataLength();
+
+    if (messageHeader.packetIndex() === messageHeader.packetCount() - 1) {
+      this.handlerFunc(this.dataBuffer);
+    }
+  }
+}
 
 // Analogous to the Connection class in //aos/network/web_proxy.h. Because most
 // of the apis are native in JS, it is much simpler.
@@ -7,12 +44,20 @@
   private rtcPeerConnection: RTCPeerConnection|null = null;
   private dataChannel: DataChannel|null = null;
   private webSocketUrl: string;
+  private configHandler: ConfigHandler|null =
+      null private config: aos.Configuration|null = null;
+  private readonly handlerFuncs = new Map<string, (data: Uint8Array) => void>();
+  private readonly handlers = new Set<Handler>();
 
   constructor() {
     const server = location.host;
     this.webSocketUrl = `ws://${server}/ws`;
   }
 
+  addHandler(id: string, handler: (data: Uint8Array) => void): void {
+    this.handlerFuncs.set(id, handler);
+  }
+
   connect(): void {
     this.webSocketConnection = new WebSocket(this.webSocketUrl);
     this.webSocketConnection.binaryType = 'arraybuffer';
@@ -22,14 +67,29 @@
         'message', (e) => this.onWebSocketMessage(e));
   }
 
-  // Handle messages on the DataChannel. Will delegate to various handlers for
-  // different message types.
+  // Handle messages on the DataChannel. Handles the Configuration message as
+  // all other messages are sent on specific DataChannels.
   onDataChannelMessage(e: MessageEvent): void {
-    console.log(e);
+    const fbBuffer = new flatbuffers.ByteBuffer(new Uint8Array(e.data));
+    // TODO(alex): handle config updates if/when required
+    if (!this.configHandler) {
+      const config = aos.Configuration.getRootAsConfiguration(fbBuffer);
+      this.config = config;
+      this.configHandler = new ConfigHandler(config, this.dataChannel);
+      this.configHandler.printConfig();
+      return;
+    }
+  }
+
+  onDataChannel(ev: RTCDataChannelEvent): void {
+    const channel = ev.channel;
+    const name = channel.label;
+    const channelType = name.split('/').pop();
+    const handlerFunc = this.handlerFuncs.get(channelType);
+    this.handlers.add(new Handler(handlerFunc, channel));
   }
 
   onIceCandidate(e: RTCPeerConnectionIceEvent): void {
-    console.log('Created ice candidate', e);
     if (!e.candidate) {
       return;
     }
@@ -49,7 +109,6 @@
 
   // Called for new SDPs. Make sure to set it locally and remotely.
   onOfferCreated(description: RTCSessionDescription): void {
-    console.log('Created offer', description);
     this.rtcPeerConnection.setLocalDescription(description);
     const builder = new flatbuffers.Builder(512);
     const offerString = builder.createString(description.sdp);
@@ -67,7 +126,9 @@
   // want a DataChannel, so create it and then create an offer to send.
   onWebSocketOpen(): void {
     this.rtcPeerConnection = new RTCPeerConnection({});
-    this.dataChannel = this.rtcPeerConnection.createDataChannel('dc');
+    this.rtcPeerConnection.addEventListener(
+        'datachannel', (e) => this.onDataCnannel(e));
+    this.dataChannel = this.rtcPeerConnection.createDataChannel('signalling');
     this.dataChannel.addEventListener(
         'message', (e) => this.onDataChannelMessage(e));
     window.dc = this.dataChannel;
@@ -81,14 +142,12 @@
   // and handle appropriately. Either by setting the remote description or
   // adding the remote ice candidate.
   onWebSocketMessage(e: MessageEvent): void {
-    console.log('ws: ', e);
     const buffer = new Uint8Array(e.data)
     const fbBuffer = new flatbuffers.ByteBuffer(buffer);
     const message =
         aos.web_proxy.WebSocketMessage.getRootAsWebSocketMessage(fbBuffer);
     switch (message.payloadType()) {
       case aos.web_proxy.Payload.WebSocketSdp:
-        console.log('got an sdp message');
         const sdpFb = message.payload(new aos.web_proxy.WebSocketSdp());
         if (sdpFb.type() !== aos.web_proxy.SdpType.ANSWER) {
           console.log('got something other than an answer back');
@@ -98,7 +157,6 @@
             {'type': 'answer', 'sdp': sdpFb.payload()}));
         break;
       case aos.web_proxy.Payload.WebSocketIce:
-        console.log('got an ice message');
         const iceFb = message.payload(new aos.web_proxy.WebSocketIce());
         const candidate = {} as RTCIceCandidateInit;
         candidate.candidate = iceFb.candidate();
diff --git a/aos/network/www/styles.css b/aos/network/www/styles.css
new file mode 100644
index 0000000..23ceb21
--- /dev/null
+++ b/aos/network/www/styles.css
@@ -0,0 +1,5 @@
+.channel {
+  display: flex;
+  border-bottom: 1px solid;
+  font-size: 24px;
+}
diff --git a/aos/time/time.cc b/aos/time/time.cc
index a0423ea..03fe45b 100644
--- a/aos/time/time.cc
+++ b/aos/time/time.cc
@@ -115,6 +115,12 @@
 struct timespec to_timespec(const ::aos::monotonic_clock::time_point time) {
   return to_timespec(time.time_since_epoch());
 }
+
+::aos::monotonic_clock::time_point from_timeval(struct timeval t) {
+  return monotonic_clock::epoch() + std::chrono::seconds(t.tv_sec) +
+         std::chrono::microseconds(t.tv_usec);
+}
+
 }  // namespace time
 
 constexpr monotonic_clock::time_point monotonic_clock::min_time;
diff --git a/aos/time/time.h b/aos/time/time.h
index 27e4c70..e31de5c 100644
--- a/aos/time/time.h
+++ b/aos/time/time.h
@@ -99,6 +99,9 @@
 // epoch.
 struct timespec to_timespec(::aos::monotonic_clock::time_point time);
 
+// Converts a timeval object to a monotonic_clock::time_point.
+::aos::monotonic_clock::time_point from_timeval(struct timeval t);
+
 namespace time_internal {
 
 template <class T>
diff --git a/aos/time/time_test.cc b/aos/time/time_test.cc
index 8edaf9b..c5eb634 100644
--- a/aos/time/time_test.cc
+++ b/aos/time/time_test.cc
@@ -49,6 +49,21 @@
   EXPECT_EQ(neg_time.tv_nsec, 0);
 }
 
+// Tests from_timeval.
+TEST(TimeTest, TimevalToTimePoint) {
+  struct timeval pos_time;
+  pos_time.tv_sec = 1432423;
+  pos_time.tv_usec = 0;
+  EXPECT_EQ(::aos::monotonic_clock::epoch() + chrono::seconds(1432423),
+            from_timeval(pos_time));
+
+  struct timeval neg_time;
+  neg_time.tv_sec = -1432423;
+  neg_time.tv_usec = 0;
+  EXPECT_EQ(::aos::monotonic_clock::epoch() - chrono::seconds(1432423),
+            from_timeval(neg_time));
+}
+
 // Test that << works with numbers with leading 0's.
 TEST(TimeTest, OperatorStream) {
   const monotonic_clock::time_point t = monotonic_clock::epoch() +
diff --git a/frc971/control_loops/drivetrain/drivetrain.cc b/frc971/control_loops/drivetrain/drivetrain.cc
index 094a947..1afc32d 100644
--- a/frc971/control_loops/drivetrain/drivetrain.cc
+++ b/frc971/control_loops/drivetrain/drivetrain.cc
@@ -147,6 +147,9 @@
   while (imu_values_fetcher_.FetchNext()) {
     imu_zeroer_.ProcessMeasurement(*imu_values_fetcher_);
     last_gyro_time_ = monotonic_now;
+    if (!imu_zeroer_.Zeroed()) {
+      continue;
+    }
     aos::monotonic_clock::time_point reading_time(std::chrono::nanoseconds(
         imu_values_fetcher_->monotonic_timestamp_ns()));
     if (last_imu_update_ == aos::monotonic_clock::min_time) {
diff --git a/frc971/control_loops/drivetrain/drivetrain_config.json b/frc971/control_loops/drivetrain/drivetrain_config.json
index 649f9f0..f452bc6 100644
--- a/frc971/control_loops/drivetrain/drivetrain_config.json
+++ b/frc971/control_loops/drivetrain/drivetrain_config.json
@@ -29,7 +29,8 @@
     {
       "name": "/drivetrain",
       "type": "frc971.control_loops.drivetrain.Status",
-      "frequency": 200
+      "frequency": 200,
+      "max_size": 2000
     },
     {
       "name": "/drivetrain",
diff --git a/frc971/control_loops/drivetrain/drivetrain_status.fbs b/frc971/control_loops/drivetrain/drivetrain_status.fbs
index 0e49552..3642131 100644
--- a/frc971/control_loops/drivetrain/drivetrain_status.fbs
+++ b/frc971/control_loops/drivetrain/drivetrain_status.fbs
@@ -99,6 +99,34 @@
   rel_theta:float;
 }
 
+// Current states of the EKF. See hybrid_ekf.h for detailed comments.
+table LocalizerState {
+  // X/Y field position, in meters.
+  x:float;
+  y:float;
+  // Current heading, in radians.
+  theta:float;
+  // Current estimate of the left encoder position, in meters.
+  left_encoder:float;
+  // Velocity of the left side of the robot.
+  left_velocity:float;
+  // Current estimate of the right encoder position, in meters.
+  right_encoder:float;
+  // Velocity of the right side of the robot.
+  right_velocity:float;
+  // Current "voltage error" terms, in V.
+  left_voltage_error:float;
+  right_voltage_error:float;
+  // Estimate of the offset between the encoder readings and true rotation of
+  // the robot, in rad/sec.
+  angular_error:float;
+  // Current difference between the estimated longitudinal velocity of the robot
+  // and that experienced by the wheels, in m/s.
+  longitudinal_velocity_offset:float;
+  // Lateral velocity of the robot, in m/s.
+  lateral_velocity:float;
+}
+
 table DownEstimatorState {
   quaternion_x:double;
   quaternion_y:double;
@@ -115,9 +143,54 @@
   // All angles in radians.
   lateral_pitch:float;
   longitudinal_pitch:float;
-  // Current yaw angle (heading) of the robot, as estimated solely by the down
-  // estimator.
+  // Current yaw angle (heading) of the robot, as estimated solely by
+  // integrating the Z-axis of the gyro (in rad).
   yaw:float;
+
+  // Current position of the robot, as determined solely from the
+  // IMU/down-estimator, in meters.
+  position_x:float;
+  position_y:float;
+  position_z:float;
+
+  // Current velocity of the robot, as determined solely from the
+  // IMU/down-estimator, in meters / sec.
+  velocity_x:float;
+  velocity_y:float;
+  velocity_z:float;
+
+  // Current acceleration of the robot, with pitch/roll (but not yaw)
+  // compensated out, in meters / sec / sec.
+  accel_x:float;
+  accel_y:float;
+  accel_z:float;
+
+  // Current acceleration that we expect to see from the accelerometer, assuming
+  // no acceleration other than that due to gravity, in g's.
+  expected_accel_x:float;
+  expected_accel_y:float;
+  expected_accel_z:float;
+
+  // Current estimate of the overall acceleration due to gravity, in g's. Should
+  // generally be within ~0.003 g's of 1.0.
+  gravity_magnitude:float;
+
+  consecutive_still:int;
+}
+
+table ImuZeroerState {
+  // True if we have successfully zeroed the IMU.
+  zeroed:bool;
+  // True if the zeroing code has observed some inconsistency in the IMU.
+  faulted:bool;
+  // Number of continuous zeroing measurements that we have accumulated for use
+  // in the zeroing.
+  number_of_zeroes:int;
+
+  // Current zeroing values beind used for each gyro axis, in rad / sec.
+  gyro_x_average:float;
+  gyro_y_average:float;
+  gyro_z_average:float;
 }
 
 table Status {
@@ -174,6 +247,10 @@
   poly_drive_logging:PolyDriveLogging;
 
   down_estimator:DownEstimatorState;
+
+  localizer:LocalizerState;
+
+  zeroing:ImuZeroerState;
 }
 
 root_type Status;
diff --git a/frc971/control_loops/pose.h b/frc971/control_loops/pose.h
index 13bd6e6..20672cf 100644
--- a/frc971/control_loops/pose.h
+++ b/frc971/control_loops/pose.h
@@ -9,6 +9,20 @@
 namespace frc971 {
 namespace control_loops {
 
+// Constructs a homogeneous transformation matrix for rotating about the Z axis.
+template <typename Scalar>
+Eigen::Matrix<Scalar, 4, 4> TransformationMatrixForYaw(Scalar yaw) {
+  Eigen::Matrix<Scalar, 4, 4> matrix;
+  matrix.setIdentity();
+  const Scalar stheta = std::sin(yaw);
+  const Scalar ctheta = std::cos(yaw);
+  matrix(0, 0) = ctheta;
+  matrix(1, 1) = ctheta;
+  matrix(0, 1) = -stheta;
+  matrix(1, 0) = stheta;
+  return matrix;
+}
+
 // Provides a representation of a transformation on the field.
 // Currently, this is heavily geared towards things that occur in a 2-D plane.
 // The Z-axis is rarely used (but still relevant; e.g., in 2019 some of the
@@ -41,7 +55,7 @@
   // The type that contains the translational (x, y, z) component of the Pose.
   typedef Eigen::Matrix<Scalar, 3, 1> Pos;
 
-  // Provide a default constructor that crease a pose at the origin.
+  // Provide a default constructor that creates a pose at the origin.
   TypedPose() : TypedPose({0.0, 0.0, 0.0}, 0.0) {}
 
   // Construct a Pose in the absolute frame with a particular position and yaw.
@@ -54,6 +68,15 @@
   TypedPose(const TypedPose<Scalar> *base, const Pos &rel_pos, Scalar rel_theta)
       : base_(base), pos_(rel_pos), theta_(rel_theta) {}
 
+  // Constructs a Pose from a homogeneous transformation matrix. Ignores the
+  // pitch/roll components of the rotation. Ignores the bottom row.
+  TypedPose(const Eigen::Matrix<Scalar, 4, 4> &H) {
+    pos_ = H.template block<3, 1>(0, 3);
+    const Eigen::Vector3d rotated_x =
+        H.template block<3, 3>(0, 0) * Eigen::Vector3d::UnitX();
+    theta_ = std::atan2(rotated_x.y(), rotated_x.x());
+  }
+
   // Calculate the current global position of this Pose.
   Pos abs_pos() const {
     if (base_ == nullptr) {
@@ -93,6 +116,15 @@
     return abs_pos().template topRows<2>();
   }
 
+  // Returns a transformation matrix representing this pose--note that this
+  // explicitly does not include the base position, so this is equivalent to a
+  // translation and rotation by rel_pos and rel_theta.
+  Eigen::Matrix<Scalar, 4, 4> AsTransformationMatrix() const {
+    Eigen::Matrix<Scalar, 4, 4> matrix = TransformationMatrixForYaw(theta_);
+    matrix.template block<3, 1>(0, 3) = pos_;
+    return matrix;
+  }
+
   // Provide a copy of this that is set to have the same
   // current absolute Pose as this, but have a different base.
   // This can be used, e.g., to compute a Pose for a vision target that is
@@ -104,6 +136,14 @@
   // the returned object (unless new_base == nullptr).
   TypedPose Rebase(const TypedPose<Scalar> *new_base) const;
 
+  // Convert this pose to the heading/distance/skew numbers that we
+  // traditionally use for EKF corrections.
+  Eigen::Matrix<Scalar, 3, 1> ToHeadingDistanceSkew() const {
+    const Scalar target_heading = heading();
+    return {target_heading, xy_norm(),
+            aos::math::NormalizeAngle(rel_theta() - target_heading)};
+  }
+
  private:
   // A rotation-matrix like representation of the rotation for a given angle.
   inline static Eigen::AngleAxis<Scalar> YawRotation(double theta) {
diff --git a/frc971/control_loops/pose_test.cc b/frc971/control_loops/pose_test.cc
index 4fd3f15..18486e3 100644
--- a/frc971/control_loops/pose_test.cc
+++ b/frc971/control_loops/pose_test.cc
@@ -83,6 +83,53 @@
   EXPECT_NEAR(rel1.abs_theta(), abs.rel_theta(), kEps);
 }
 
+// Tests that we can go between transformation matrices and Pose objects.
+TEST(PoseTest, TransformationMatrixTest) {
+  // First, sanity check the basic case.
+  Pose pose({0, 0, 0}, 0);
+  typedef Eigen::Matrix<double, 4, 4> TransformationMatrix;
+  ASSERT_EQ(TransformationMatrix::Identity(), pose.AsTransformationMatrix());
+  Pose reproduced_pose(pose.AsTransformationMatrix());
+  ASSERT_EQ(reproduced_pose.rel_pos(), pose.rel_pos());
+  ASSERT_EQ(reproduced_pose.rel_theta(), pose.rel_theta());
+  // Check a basic case of rotation + translation.
+  *pose.mutable_pos() << 1, 2, 3;
+  pose.set_theta(M_PI_2);
+  TransformationMatrix expected;
+  expected << 0, -1, 0, 1, 1, 0, 0, 2, 0, 0, 1, 3, 0, 0, 0, 1;
+  TransformationMatrix pose_transformation =
+      pose.AsTransformationMatrix();
+  ASSERT_LT((expected - pose_transformation).norm(), 1e-15)
+      << "expected:\n"
+      << expected << "\nBut got:\n"
+      << pose_transformation;
+  ASSERT_EQ(Eigen::Vector4d(1, 2, 3, 1),
+            pose_transformation * Eigen::Vector4d(0, 0, 0, 1));
+  ASSERT_LT((Eigen::Vector4d(0, 3, 3, 1) -
+             pose_transformation * Eigen::Vector4d(1, 1, 0, 1))
+                .norm(),
+            1e-15)
+      << "got " << pose_transformation * Eigen::Vector4d(1, 1, 0, 1);
+
+  // Also, confirm that setting a new base does not affect the pose.
+  Pose faux_base({1, 1, 1}, 1);
+  pose.set_base(&faux_base);
+
+  ASSERT_EQ(pose_transformation, pose.AsTransformationMatrix());
+
+  reproduced_pose = Pose(pose_transformation);
+  ASSERT_EQ(reproduced_pose.rel_pos(), pose.rel_pos());
+  ASSERT_EQ(reproduced_pose.rel_theta(), pose.rel_theta());
+  // And check that if we introduce a pitch to the transformation matrix that it
+  // does not impact the resulting Pose (which only has a yaw component).
+  pose_transformation.block<3, 3>(0, 0) =
+      Eigen::AngleAxis<double>(0.5, Eigen::Vector3d::UnitX()) *
+      pose_transformation.block<3, 3>(0, 0);
+  reproduced_pose = Pose(pose_transformation);
+  ASSERT_EQ(reproduced_pose.rel_pos(), pose.rel_pos());
+  ASSERT_EQ(reproduced_pose.rel_theta(), pose.rel_theta());
+}
+
 // Tests that basic accessors for LineSegment behave as expected.
 TEST(LineSegmentTest, BasicAccessorTest) {
   LineSegment l;
diff --git a/frc971/zeroing/BUILD b/frc971/zeroing/BUILD
index a2b908b..75d610a 100644
--- a/frc971/zeroing/BUILD
+++ b/frc971/zeroing/BUILD
@@ -35,6 +35,8 @@
     ],
     deps = [
         ":averager",
+        "//frc971/control_loops:control_loops_fbs",
+        "//frc971/control_loops/drivetrain:drivetrain_status_fbs",
         "//frc971/wpilib:imu_fbs",
         "@com_github_google_glog//:glog",
         "@org_tuxfamily_eigen//:eigen",
diff --git a/frc971/zeroing/averager.h b/frc971/zeroing/averager.h
index b6ff09d..23a1b06 100644
--- a/frc971/zeroing/averager.h
+++ b/frc971/zeroing/averager.h
@@ -19,6 +19,11 @@
 class Averager {
  public:
   typedef Eigen::Matrix<Scalar, rows_per_sample, 1> Vector;
+  Averager() {
+    for (Vector &datum : data_) {
+      datum.setZero();
+    }
+  }
   // Adds one data point to the set of data points to be averaged.
   // If more than "num_samples" samples are added, they will start overwriting
   // the oldest ones.
diff --git a/frc971/zeroing/imu_zeroer.cc b/frc971/zeroing/imu_zeroer.cc
index 25bf6f6..b13b43d 100644
--- a/frc971/zeroing/imu_zeroer.cc
+++ b/frc971/zeroing/imu_zeroer.cc
@@ -4,22 +4,30 @@
 
 ImuZeroer::ImuZeroer() {
   gyro_average_.setZero();
+  accel_average_.setZero();
   last_gyro_sample_.setZero();
   last_accel_sample_.setZero();
 }
 
-bool ImuZeroer::Zeroed() const { return zeroed_ || Faulted(); }
+bool ImuZeroer::Zeroed() const {
+  return num_zeroes_ > kRequiredZeroPoints || Faulted();
+}
 
 bool ImuZeroer::Faulted() const { return faulted_; }
 
 Eigen::Vector3d ImuZeroer::ZeroedGyro() const {
   return last_gyro_sample_ - gyro_average_;
 }
-Eigen::Vector3d ImuZeroer::ZeroedAccel() const { return last_accel_sample_; }
+Eigen::Vector3d ImuZeroer::ZeroedAccel() const {
+  return last_accel_sample_ - accel_average_;
+}
 Eigen::Vector3d ImuZeroer::GyroOffset() const { return gyro_average_; }
 
 bool ImuZeroer::GyroZeroReady() const {
-  return gyro_averager_.full() && gyro_averager_.GetRange() < kGyroMaxVariation;
+  return gyro_averager_.full() &&
+         gyro_averager_.GetRange() < kGyroMaxVariation &&
+         (last_gyro_sample_.lpNorm<Eigen::Infinity>() <
+          kGyroMaxZeroingMagnitude);
 }
 
 bool ImuZeroer::AccelZeroReady() const {
@@ -34,19 +42,45 @@
                            values.accelerometer_z();
   accel_averager_.AddData(last_accel_sample_);
   if (GyroZeroReady() && AccelZeroReady()) {
-    if (!zeroed_) {
-      zeroed_ = true;
-      gyro_average_ = gyro_averager_.GetAverage();
-    } else {
-      // If we got a new zero and it is substantially different from the
-      // original zero, fault.
-      if ((gyro_averager_.GetAverage() - gyro_average_).norm() >
-          kGyroFaultVariation) {
-        faulted_ = true;
+    ++good_iters_;
+    if (good_iters_ > kSamplesToAverage / 4) {
+      const Eigen::Vector3d current_gyro_average = gyro_averager_.GetAverage();
+      constexpr double kAverageUpdateWeight = 0.05;
+      if (num_zeroes_ > 0) {
+        gyro_average_ +=
+            (current_gyro_average - gyro_average_) * kAverageUpdateWeight;
+      } else {
+        gyro_average_ = current_gyro_average;
       }
+      if (num_zeroes_ > 0) {
+        // If we got a new zero and it is substantially different from the
+        // original zero, fault.
+        if ((current_gyro_average - gyro_average_).norm() >
+            kGyroFaultVariation) {
+          faulted_ = true;
+        }
+      }
+      ++num_zeroes_;
+      gyro_averager_.Reset();
     }
-    gyro_averager_.Reset();
+  } else {
+    good_iters_ = 0;
   }
 }
 
+flatbuffers::Offset<control_loops::drivetrain::ImuZeroerState>
+ImuZeroer::PopulateStatus(flatbuffers::FlatBufferBuilder *fbb) const {
+  control_loops::drivetrain::ImuZeroerState::Builder builder(*fbb);
+
+  builder.add_zeroed(Zeroed());
+  builder.add_faulted(Faulted());
+  builder.add_number_of_zeroes(num_zeroes_);
+
+  builder.add_gyro_x_average(GyroOffset().x());
+  builder.add_gyro_y_average(GyroOffset().y());
+  builder.add_gyro_z_average(GyroOffset().z());
+
+  return builder.Finish();
+}
+
 }  // namespace frc971::zeroing
diff --git a/frc971/zeroing/imu_zeroer.h b/frc971/zeroing/imu_zeroer.h
index 2662934..b55c6dd 100644
--- a/frc971/zeroing/imu_zeroer.h
+++ b/frc971/zeroing/imu_zeroer.h
@@ -1,6 +1,7 @@
 #ifndef FRC971_ZEROING_IMU_ZEROER_H_
 #define FRC971_ZEROING_IMU_ZEROER_H_
 
+#include "frc971/control_loops/drivetrain/drivetrain_status_generated.h"
 #include "frc971/wpilib/imu_generated.h"
 #include "frc971/zeroing/averager.h"
 
@@ -15,7 +16,8 @@
   // constant number of samples...
   // TODO(james): Run average and GetRange calculations over every sample on
   // every timestep, to provide consistent timing.
-  static constexpr size_t kSamplesToAverage = 1000.0;
+  static constexpr size_t kSamplesToAverage = 1000;
+  static constexpr size_t kRequiredZeroPoints = 10;
 
   ImuZeroer();
   bool Zeroed() const;
@@ -24,14 +26,22 @@
   Eigen::Vector3d GyroOffset() const;
   Eigen::Vector3d ZeroedGyro() const;
   Eigen::Vector3d ZeroedAccel() const;
+
+  flatbuffers::Offset<control_loops::drivetrain::ImuZeroerState> PopulateStatus(
+      flatbuffers::FlatBufferBuilder *fbb) const;
+
  private:
   // Max variation (difference between the maximum and minimum value) in a
   // kSamplesToAverage range before we allow using the samples for zeroing.
   // These values are currently based on looking at results from the ADIS16448.
-  static constexpr double kGyroMaxVariation = 0.02;     // rad / sec
+  static constexpr double kGyroMaxVariation = 0.02;        // rad / sec
+  // Maximum magnitude we allow the gyro zero to have--this is used to prevent
+  // us from zeroing the gyro if we just happen to be spinning at a very
+  // consistent non-zero rate. Currently this is only plausible in simulation.
+  static constexpr double kGyroMaxZeroingMagnitude = 0.1;  // rad / sec
   // Max variation in the range before we consider the accelerometer readings to
   // be steady.
-  static constexpr double kAccelMaxVariation = 0.02;    // g's
+  static constexpr double kAccelMaxVariation = 0.05;    // g's
   // If we ever are able to rezero and get a zero that is more than
   // kGyroFaultVariation away from the original zeroing, fault.
   static constexpr double kGyroFaultVariation = 0.005;  // rad / sec
@@ -46,12 +56,13 @@
   Averager<double, kSamplesToAverage, 3> accel_averager_;
   // The average zero position of the gyro.
   Eigen::Vector3d gyro_average_;
+  Eigen::Vector3d accel_average_;
   Eigen::Vector3d last_gyro_sample_;
   Eigen::Vector3d last_accel_sample_;
-  // Whether we have currently zeroed yet.
-  bool zeroed_ = false;
   // Whether the zeroing has faulted at any point thus far.
   bool faulted_ = false;
+  size_t good_iters_ = 0;
+  size_t num_zeroes_ = 0;
 };
 
 }  // namespace frc971::zeroing
diff --git a/frc971/zeroing/imu_zeroer_test.cc b/frc971/zeroing/imu_zeroer_test.cc
index 9ec52da..7a74413 100644
--- a/frc971/zeroing/imu_zeroer_test.cc
+++ b/frc971/zeroing/imu_zeroer_test.cc
@@ -4,6 +4,9 @@
 
 namespace frc971::zeroing {
 
+static constexpr int kMinSamplesToZero =
+    2 * ImuZeroer::kSamplesToAverage * ImuZeroer::kRequiredZeroPoints;
+
 aos::FlatbufferDetachedBuffer<IMUValues> MakeMeasurement(
     const Eigen::Vector3d &gyro, const Eigen::Vector3d &accel) {
   flatbuffers::FlatBufferBuilder fbb;
@@ -29,13 +32,14 @@
   ASSERT_EQ(0.0, zeroer.ZeroedAccel().norm());
   // A measurement before we are zeroed should just result in the measurement
   // being passed through without modification.
-  zeroer.ProcessMeasurement(MakeMeasurement({1, 2, 3}, {4, 5, 6}).message());
+  zeroer.ProcessMeasurement(
+      MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   ASSERT_FALSE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
   ASSERT_EQ(0.0, zeroer.GyroOffset().norm());
-  ASSERT_EQ(1.0, zeroer.ZeroedGyro().x());
-  ASSERT_EQ(2.0, zeroer.ZeroedGyro().y());
-  ASSERT_EQ(3.0, zeroer.ZeroedGyro().z());
+  ASSERT_FLOAT_EQ(0.01, zeroer.ZeroedGyro().x());
+  ASSERT_FLOAT_EQ(0.02, zeroer.ZeroedGyro().y());
+  ASSERT_FLOAT_EQ(0.03, zeroer.ZeroedGyro().z());
   ASSERT_EQ(4.0, zeroer.ZeroedAccel().x());
   ASSERT_EQ(5.0, zeroer.ZeroedAccel().y());
   ASSERT_EQ(6.0, zeroer.ZeroedAccel().z());
@@ -45,16 +49,16 @@
 TEST(ImuZeroerTest, ZeroOnConstantData) {
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
-    ASSERT_FALSE(zeroer.Zeroed());
-    zeroer.ProcessMeasurement(MakeMeasurement({1, 2, 3}, {4, 5, 6}).message());
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
+    zeroer.ProcessMeasurement(
+        MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
   // Gyro should be zeroed to {1, 2, 3}.
-  ASSERT_EQ(1.0, zeroer.GyroOffset().x());
-  ASSERT_EQ(2.0, zeroer.GyroOffset().y());
-  ASSERT_EQ(3.0, zeroer.GyroOffset().z());
+  ASSERT_FLOAT_EQ(0.01, zeroer.GyroOffset().x());
+  ASSERT_FLOAT_EQ(0.02, zeroer.GyroOffset().y());
+  ASSERT_FLOAT_EQ(0.03, zeroer.GyroOffset().z());
   ASSERT_EQ(0.0, zeroer.ZeroedGyro().x());
   ASSERT_EQ(0.0, zeroer.ZeroedGyro().y());
   ASSERT_EQ(0.0, zeroer.ZeroedGyro().z());
@@ -64,11 +68,25 @@
   ASSERT_EQ(6.0, zeroer.ZeroedAccel().z());
   // If we get another measurement offset by {1, 1, 1} we should read the result
   // as {1, 1, 1}.
-  zeroer.ProcessMeasurement(MakeMeasurement({2, 3, 4}, {0, 0, 0}).message());
+  zeroer.ProcessMeasurement(
+      MakeMeasurement({0.02, 0.03, 0.04}, {0, 0, 0}).message());
   ASSERT_FALSE(zeroer.Faulted());
-  ASSERT_EQ(1.0, zeroer.ZeroedGyro().x());
-  ASSERT_EQ(1.0, zeroer.ZeroedGyro().y());
-  ASSERT_EQ(1.0, zeroer.ZeroedGyro().z());
+  ASSERT_FLOAT_EQ(0.01, zeroer.ZeroedGyro().x());
+  ASSERT_FLOAT_EQ(0.01, zeroer.ZeroedGyro().y());
+  ASSERT_FLOAT_EQ(0.01, zeroer.ZeroedGyro().z());
+}
+
+// Tests that we do not zero if the gyro is producing particularly high
+// magnitude results.
+TEST(ImuZeroerTest, NoZeroOnHighMagnitudeGyro) {
+  ImuZeroer zeroer;
+  ASSERT_FALSE(zeroer.Zeroed());
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
+    zeroer.ProcessMeasurement(
+        MakeMeasurement({0.1, 0.2, 0.3}, {4, 5, 6}).message());
+    ASSERT_FALSE(zeroer.Zeroed());
+  }
+  ASSERT_FALSE(zeroer.Faulted());
 }
 
 // Tests that we tolerate small amounts of noise in the incoming data and can
@@ -76,29 +94,27 @@
 TEST(ImuZeroerTest, ZeroOnLowNoiseData) {
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
-    ASSERT_FALSE(zeroer.Zeroed());
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
     const double offset =
-        (static_cast<double>(ii) / (ImuZeroer::kSamplesToAverage - 1) - 0.5) *
-        0.01;
+        (static_cast<double>(ii) / (kMinSamplesToZero - 1) - 0.5) * 0.001;
     zeroer.ProcessMeasurement(
-        MakeMeasurement({1 + offset, 2 + offset, 3 + offset},
+        MakeMeasurement({0.01 + offset, 0.02 + offset, 0.03 + offset},
                         {4 + offset, 5 + offset, 6 + offset})
             .message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
   ASSERT_FALSE(zeroer.Faulted());
-  // Gyro should be zeroed to {1, 2, 3}.
-  ASSERT_NEAR(1.0, zeroer.GyroOffset().x(), 1e-8);
-  ASSERT_NEAR(2.0, zeroer.GyroOffset().y(), 1e-8);
-  ASSERT_NEAR(3.0, zeroer.GyroOffset().z(), 1e-8);
-  // If we get another measurement offset by {1, 1, 1} we should read the result
-  // as {1, 1, 1}.
-  zeroer.ProcessMeasurement(MakeMeasurement({2, 3, 4}, {0, 0, 0}).message());
+  ASSERT_NEAR(0.01, zeroer.GyroOffset().x(), 1e-3);
+  ASSERT_NEAR(0.02, zeroer.GyroOffset().y(), 1e-3);
+  ASSERT_NEAR(0.03, zeroer.GyroOffset().z(), 1e-3);
+  // If we get another measurement offset by {0.01, 0.01, 0.01} we should read
+  // the result as {0.01, 0.01, 0.01}.
+  zeroer.ProcessMeasurement(
+      MakeMeasurement({0.02, 0.03, 0.04}, {0, 0, 0}).message());
   ASSERT_FALSE(zeroer.Faulted());
-  ASSERT_NEAR(1.0, zeroer.ZeroedGyro().x(), 1e-8);
-  ASSERT_NEAR(1.0, zeroer.ZeroedGyro().y(), 1e-8);
-  ASSERT_NEAR(1.0, zeroer.ZeroedGyro().z(), 1e-8);
+  ASSERT_NEAR(0.01, zeroer.ZeroedGyro().x(), 1e-3);
+  ASSERT_NEAR(0.01, zeroer.ZeroedGyro().y(), 1e-3);
+  ASSERT_NEAR(0.01, zeroer.ZeroedGyro().z(), 1e-3);
   ASSERT_EQ(0.0, zeroer.ZeroedAccel().x());
   ASSERT_EQ(0.0, zeroer.ZeroedAccel().y());
   ASSERT_EQ(0.0, zeroer.ZeroedAccel().z());
@@ -108,13 +124,12 @@
 TEST(ImuZeroerTest, NoZeroOnHighNoiseData) {
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
     ASSERT_FALSE(zeroer.Zeroed());
     const double offset =
-        (static_cast<double>(ii) / (ImuZeroer::kSamplesToAverage - 1) - 0.5) *
-        1.0;
+        (static_cast<double>(ii) / (kMinSamplesToZero - 1) - 0.5) * 1.0;
     zeroer.ProcessMeasurement(
-        MakeMeasurement({1 + offset, 2 + offset, 3 + offset},
+        MakeMeasurement({0.01 + offset, 0.02 + offset, 0.03 + offset},
                         {4 + offset, 5 + offset, 6 + offset})
             .message());
   }
@@ -127,15 +142,16 @@
 TEST(ImuZeroerTest, FaultOnNewZero) {
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
-    ASSERT_FALSE(zeroer.Zeroed());
-    zeroer.ProcessMeasurement(MakeMeasurement({1, 2, 3}, {4, 5, 6}).message());
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
+    zeroer.ProcessMeasurement(
+        MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
-    ASSERT_FALSE(zeroer.Faulted())
-        << "We should not fault until we complete a second cycle of zeroing.";
-    zeroer.ProcessMeasurement(MakeMeasurement({1, 5, 3}, {4, 5, 6}).message());
+  ASSERT_FALSE(zeroer.Faulted())
+      << "We should not fault until we complete a second cycle of zeroing.";
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
+    zeroer.ProcessMeasurement(
+        MakeMeasurement({0.01, 0.05, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Faulted());
 }
@@ -144,14 +160,14 @@
 TEST(ImuZeroerTest, NoFaultOnSimilarZero) {
   ImuZeroer zeroer;
   ASSERT_FALSE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
-    ASSERT_FALSE(zeroer.Zeroed());
-    zeroer.ProcessMeasurement(MakeMeasurement({1, 2, 3}, {4, 5, 6}).message());
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
+    zeroer.ProcessMeasurement(
+        MakeMeasurement({0.01, 0.02, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_TRUE(zeroer.Zeroed());
-  for (size_t ii = 0; ii < ImuZeroer::kSamplesToAverage; ++ii) {
+  for (size_t ii = 0; ii < kMinSamplesToZero; ++ii) {
     zeroer.ProcessMeasurement(
-        MakeMeasurement({1, 2.0001, 3}, {4, 5, 6}).message());
+        MakeMeasurement({0.01, 0.020001, 0.03}, {4, 5, 6}).message());
   }
   ASSERT_FALSE(zeroer.Faulted());
 }
diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl
index 2f6c944..6ac1d87 100644
--- a/third_party/flatbuffers/build_defs.bzl
+++ b/third_party/flatbuffers/build_defs.bzl
@@ -33,6 +33,7 @@
     "--no-ts-reexport",
     "--reflect-names",
     "--reflect-types",
+    "--gen-name-strings",
 ]
 
 def flatbuffer_library_public(
diff --git a/third_party/flatbuffers/src/idl_gen_js_ts.cpp b/third_party/flatbuffers/src/idl_gen_js_ts.cpp
index 9c89c1a..be0a205 100644
--- a/third_party/flatbuffers/src/idl_gen_js_ts.cpp
+++ b/third_party/flatbuffers/src/idl_gen_js_ts.cpp
@@ -768,6 +768,14 @@
       code += "');\n};\n\n";
     }
 
+    // Generate the name method
+    if (parser_.opts.generate_name_strings) {
+      code +=
+        "static getFullyQualifiedName(): string {\n"
+        "  return '" + object_namespace + "." + struct_def.name + "';\n"
+        "}\n";
+    }
+
     // Emit field accessors
     for (auto it = struct_def.fields.vec.begin();
          it != struct_def.fields.vec.end(); ++it) {
diff --git a/y2020/BUILD b/y2020/BUILD
index 1d281bb..c32cd10 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -110,6 +110,7 @@
         "//y2019/control_loops/drivetrain:target_selector_fbs",
         "//y2020/control_loops/superstructure:superstructure_status_fbs",
         "//y2020/vision:vision_fbs",
+        "//y2020/vision/sift:sift_fbs",
     ],
     visibility = ["//visibility:public"],
     deps = [
@@ -125,3 +126,15 @@
     srcs = ["__init__.py"],
     visibility = ["//visibility:public"],
 )
+
+sh_binary(
+    name = "web_proxy",
+    srcs = ["web_proxy.sh"],
+    data = [
+        ":config.json",
+        "//aos/network:web_proxy_main",
+        "//y2020/www:main_bundle",
+        "//y2020/www:files",
+        "//y2020/www:flatbuffers",
+    ],
+)
diff --git a/y2020/control_loops/drivetrain/BUILD b/y2020/control_loops/drivetrain/BUILD
index d03c1b9..9abb2a1 100644
--- a/y2020/control_loops/drivetrain/BUILD
+++ b/y2020/control_loops/drivetrain/BUILD
@@ -81,3 +81,20 @@
         "//frc971/control_loops/drivetrain:drivetrain_lib",
     ],
 )
+
+cc_binary(
+    name = "drivetrain_replay",
+    srcs = ["drivetrain_replay.cc"],
+    data = ["//y2020:config.json"],
+    deps = [
+        ":drivetrain_base",
+        "//aos:configuration",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+        "//aos/events:simulated_event_loop",
+        "//aos/events/logging:logger",
+        "//frc971/control_loops/drivetrain:drivetrain_lib",
+        "@com_github_gflags_gflags//:gflags",
+        "@com_github_google_glog//:glog",
+    ],
+)
diff --git a/y2020/control_loops/drivetrain/drivetrain_replay.cc b/y2020/control_loops/drivetrain/drivetrain_replay.cc
new file mode 100644
index 0000000..c7d81c2
--- /dev/null
+++ b/y2020/control_loops/drivetrain/drivetrain_replay.cc
@@ -0,0 +1,65 @@
+// This binary allows us to replay the drivetrain code over existing logfile,
+// primarily for use in testing changes to the localizer code.
+// When you run this code, it generates a new logfile with the data all
+// replayed, so that it can then be run through the plotting tool or analyzed
+// in some other way. The original drivetrain status data will be on the
+// /original/drivetrain channel.
+#include "aos/configuration.h"
+#include "aos/events/logging/logger.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/init.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/network/team_number.h"
+#include "frc971/control_loops/drivetrain/drivetrain.h"
+#include "gflags/gflags.h"
+#include "y2020/control_loops/drivetrain/drivetrain_base.h"
+
+DEFINE_string(logfile, "/tmp/logfile.bfbs",
+              "Name of the logfile to read from.");
+DEFINE_string(config, "y2020/config.json",
+              "Name of the config file to replay using.");
+DEFINE_string(output_file, "/tmp/replayed.bfbs",
+              "Name of the logfile to write replayed data to.");
+DEFINE_int32(team, 971, "Team number to use for logfile replay.");
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+
+  aos::network::OverrideTeamNumber(FLAGS_team);
+
+  aos::InitCreate();
+
+  const aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  aos::logger::LogReader reader(FLAGS_logfile, &config.message());
+  // TODO(james): Actually enforce not sending on the same buses as the logfile
+  // spews out.
+  reader.RemapLoggedChannel("/drivetrain",
+                            "frc971.control_loops.drivetrain.Status");
+  reader.RemapLoggedChannel("/drivetrain",
+                            "frc971.control_loops.drivetrain.Output");
+  reader.Register();
+
+  aos::logger::DetachedBufferWriter file_writer(FLAGS_output_file);
+  std::unique_ptr<aos::EventLoop> log_writer_event_loop =
+      reader.event_loop_factory()->MakeEventLoop("log_writer");
+  log_writer_event_loop->SkipTimingReport();
+  CHECK(nullptr == log_writer_event_loop->node());
+  aos::logger::Logger writer(&file_writer, log_writer_event_loop.get());
+
+  std::unique_ptr<aos::EventLoop> drivetrain_event_loop =
+      reader.event_loop_factory()->MakeEventLoop("drivetrain");
+  drivetrain_event_loop->SkipTimingReport();
+
+  frc971::control_loops::drivetrain::DeadReckonEkf localizer(
+      drivetrain_event_loop.get(),
+      y2020::control_loops::drivetrain::GetDrivetrainConfig());
+  frc971::control_loops::drivetrain::DrivetrainLoop drivetrain(
+      y2020::control_loops::drivetrain::GetDrivetrainConfig(),
+      drivetrain_event_loop.get(), &localizer);
+
+  reader.event_loop_factory()->Run();
+
+  aos::Cleanup();
+  return 0;
+}
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 2e3723d..44e14e1 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -1,4 +1,4 @@
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_ts_library")
 load("//aos:config.bzl", "aos_config")
 
 flatbuffer_cc_library(
@@ -30,9 +30,25 @@
     srcs = [
         "camera_reader.cc",
     ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
     deps = [
         ":v4l2_reader",
+        ":vision_fbs",
         "//aos:init",
         "//aos/events:shm_event_loop",
+        "//third_party:opencv",
+        "//y2020/vision/sift:demo_sift",
+        "//y2020/vision/sift:sift971",
+        "//y2020/vision/sift:sift_fbs",
+        "//y2020/vision/sift:sift_training_fbs",
     ],
 )
+
+flatbuffer_ts_library(
+    name = "vision_ts_fbs",
+    srcs = ["vision.fbs"],
+    visibility = ["//y2020:__subpackages__"],
+)
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index e5bcb64..de4dfb7 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -1,34 +1,235 @@
+#include <opencv2/features2d.hpp>
+#include <opencv2/imgproc.hpp>
+
 #include "aos/events/shm_event_loop.h"
 #include "aos/init.h"
 
+#include "y2020/vision/sift/demo_sift.h"
+#include "y2020/vision/sift/sift971.h"
+#include "y2020/vision/sift/sift_generated.h"
+#include "y2020/vision/sift/sift_training_generated.h"
 #include "y2020/vision/v4l2_reader.h"
+#include "y2020/vision/vision_generated.h"
 
 namespace frc971 {
 namespace vision {
 namespace {
 
+class CameraReader {
+ public:
+  CameraReader(aos::EventLoop *event_loop,
+               const sift::TrainingData *training_data, V4L2Reader *reader,
+               cv::FlannBasedMatcher *matcher)
+      : event_loop_(event_loop),
+        training_data_(training_data),
+        reader_(reader),
+        matcher_(matcher),
+        image_sender_(event_loop->MakeSender<CameraImage>("/camera")),
+        result_sender_(
+            event_loop->MakeSender<sift::ImageMatchResult>("/camera")),
+        read_image_timer_(event_loop->AddTimer([this]() {
+          ReadImage();
+          read_image_timer_->Setup(event_loop_->monotonic_now());
+        })) {
+    CopyTrainingFeatures();
+    // Technically we don't need to do this, but doing it now avoids the first
+    // match attempt being slow.
+    matcher_->train();
+
+    event_loop->OnRun(
+        [this]() { read_image_timer_->Setup(event_loop_->monotonic_now()); });
+  }
+
+ private:
+  // Copies the information from training_data_ into matcher_.
+  void CopyTrainingFeatures();
+  // Processes an image (including sending the results).
+  void ProcessImage(const CameraImage &image);
+  // Reads an image, and then performs all of our processing on it.
+  void ReadImage();
+
+  flatbuffers::Offset<
+      flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
+  PackImageMatches(flatbuffers::FlatBufferBuilder *fbb,
+                   const std::vector<std::vector<cv::DMatch>> &matches);
+  flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+  PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
+               const std::vector<cv::KeyPoint> &keypoints,
+               const cv::Mat &descriptors);
+
+  aos::EventLoop *const event_loop_;
+  const sift::TrainingData *const training_data_;
+  V4L2Reader *const reader_;
+  cv::FlannBasedMatcher *const matcher_;
+  aos::Sender<CameraImage> image_sender_;
+  aos::Sender<sift::ImageMatchResult> result_sender_;
+  // We schedule this immediately to read an image. Having it on a timer means
+  // other things can run on the event loop in between.
+  aos::TimerHandler *const read_image_timer_;
+
+  const std::unique_ptr<frc971::vision::SIFT971_Impl> sift_{
+      new frc971::vision::SIFT971_Impl()};
+};
+
+void CameraReader::CopyTrainingFeatures() {
+  for (const sift::TrainingImage *training_image : *training_data_->images()) {
+    cv::Mat features(training_image->features()->size(), 128, CV_32F);
+    for (size_t i = 0; i <  training_image->features()->size(); ++i) {
+      const sift::Feature *feature_table = training_image->features()->Get(i);
+      const flatbuffers::Vector<float> *const descriptor =
+          feature_table->descriptor();
+      CHECK_EQ(descriptor->size(), 128u) << ": Unsupported feature size";
+      cv::Mat(1, descriptor->size(), CV_32F,
+              const_cast<void *>(static_cast<const void *>(descriptor->data())))
+          .copyTo(features(cv::Range(i, i + 1), cv::Range(0, 128)));
+    }
+    matcher_->add(features);
+  }
+}
+
+void CameraReader::ProcessImage(const CameraImage &image) {
+  // First, we need to extract the brightness information. This can't really be
+  // fused into the beginning of the SIFT algorithm because the algorithm needs
+  // to look at the base image directly. It also only takes 2ms on our images.
+  // This is converting from YUYV to a grayscale image.
+  cv::Mat image_mat(
+      image.rows(), image.cols(), CV_8U);
+  CHECK(image_mat.isContinuous());
+  const int number_pixels = image.rows() * image.cols();
+  for (int i = 0; i < number_pixels; ++i) {
+    reinterpret_cast<uint8_t *>(image_mat.data)[i] =
+        image.data()->data()[i * 2];
+  }
+
+  // Next, grab the features from the image.
+  std::vector<cv::KeyPoint> keypoints;
+  cv::Mat descriptors;
+  sift_->detectAndCompute(image_mat, cv::noArray(), keypoints, descriptors);
+
+  // Then, match those features against our training data.
+  std::vector<std::vector<cv::DMatch>> matches;
+  matcher_->knnMatch(/* queryDescriptors */ descriptors, matches, /* k */ 2);
+
+  // Now, pack the results up and send them out.
+  auto builder = result_sender_.MakeBuilder();
+
+  const auto image_matches_offset = PackImageMatches(builder.fbb(), matches);
+  // TODO(Brian): PackCameraPoses (and put it in the result)
+  const auto features_offset =
+      PackFeatures(builder.fbb(), keypoints, descriptors);
+
+  sift::ImageMatchResult::Builder result_builder(*builder.fbb());
+  result_builder.add_image_matches(image_matches_offset);
+  result_builder.add_features(features_offset);
+  result_builder.add_image_monotonic_timestamp_ns(
+      image.monotonic_timestamp_ns());
+  builder.Send(result_builder.Finish());
+}
+
+void CameraReader::ReadImage() {
+  if (!reader_->ReadLatestImage()) {
+    LOG(INFO) << "No image, sleeping";
+    std::this_thread::sleep_for(std::chrono::milliseconds(10));
+    return;
+  }
+
+  ProcessImage(reader_->LatestImage());
+
+  reader_->SendLatestImage();
+}
+
+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::ImageMatch>>>
+CameraReader::PackImageMatches(
+    flatbuffers::FlatBufferBuilder *fbb,
+    const std::vector<std::vector<cv::DMatch>> &matches) {
+  // First, we need to pull out all the matches for each image. Might as well
+  // build up the Match tables at the same time.
+  std::vector<std::vector<flatbuffers::Offset<sift::Match>>> per_image_matches;
+  for (const std::vector<cv::DMatch> &image_matches : matches) {
+    for (const cv::DMatch &image_match : image_matches) {
+      sift::Match::Builder match_builder(*fbb);
+      match_builder.add_query_feature(image_match.queryIdx);
+      match_builder.add_train_feature(image_match.trainIdx);
+      if (per_image_matches.size() <= static_cast<size_t>(image_match.imgIdx)) {
+        per_image_matches.resize(image_match.imgIdx + 1);
+      }
+      per_image_matches[image_match.imgIdx].emplace_back(
+          match_builder.Finish());
+    }
+  }
+
+  // Then, we need to build up each ImageMatch table.
+  std::vector<flatbuffers::Offset<sift::ImageMatch>> image_match_tables;
+  for (size_t i = 0; i < per_image_matches.size(); ++i) {
+    const std::vector<flatbuffers::Offset<sift::Match>> &this_image_matches =
+        per_image_matches[i];
+    if (this_image_matches.empty()) {
+      continue;
+    }
+    const auto vector_offset = fbb->CreateVector(this_image_matches);
+    sift::ImageMatch::Builder image_builder(*fbb);
+    image_builder.add_train_image(i);
+    image_builder.add_matches(vector_offset);
+    image_match_tables.emplace_back(image_builder.Finish());
+  }
+
+  return fbb->CreateVector(image_match_tables);
+}
+
+flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<sift::Feature>>>
+CameraReader::PackFeatures(flatbuffers::FlatBufferBuilder *fbb,
+                           const std::vector<cv::KeyPoint> &keypoints,
+                           const cv::Mat &descriptors) {
+  const int number_features = keypoints.size();
+  CHECK_EQ(descriptors.rows, number_features);
+  std::vector<flatbuffers::Offset<sift::Feature>> features_vector(
+      number_features);
+  for (int i = 0; i < number_features; ++i) {
+    const auto submat = descriptors(cv::Range(i, i + 1), cv::Range(0, 128));
+    CHECK(submat.isContinuous());
+    const auto descriptor_offset =
+        fbb->CreateVector(reinterpret_cast<float *>(submat.data), 128);
+    sift::Feature::Builder feature_builder(*fbb);
+    feature_builder.add_descriptor(descriptor_offset);
+    feature_builder.add_x(keypoints[i].pt.x);
+    feature_builder.add_y(keypoints[i].pt.y);
+    feature_builder.add_size(keypoints[i].size);
+    feature_builder.add_angle(keypoints[i].angle);
+    feature_builder.add_response(keypoints[i].response);
+    feature_builder.add_octave(keypoints[i].octave);
+    CHECK_EQ(-1, keypoints[i].class_id)
+        << ": Not sure what to do with a class id";
+    features_vector[i] = feature_builder.Finish();
+  }
+  return fbb->CreateVector(features_vector);
+}
+
 void CameraReaderMain() {
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
       aos::configuration::ReadConfig("config.json");
 
+  const auto training_data_bfbs = DemoSiftData();
+  const sift::TrainingData *const training_data =
+      flatbuffers::GetRoot<sift::TrainingData>(training_data_bfbs.data());
+  {
+    flatbuffers::Verifier verifier(
+        reinterpret_cast<const uint8_t *>(training_data_bfbs.data()),
+        training_data_bfbs.size());
+    CHECK(training_data->Verify(verifier));
+  }
+
+  const auto index_params = cv::makePtr<cv::flann::IndexParams>();
+  index_params->setAlgorithm(cvflann::FLANN_INDEX_KDTREE);
+  index_params->setInt("trees", 5);
+  const auto search_params =
+      cv::makePtr<cv::flann::SearchParams>(/* checks */ 50);
+  cv::FlannBasedMatcher matcher(index_params, search_params);
+
   aos::ShmEventLoop event_loop(&config.message());
   V4L2Reader v4l2_reader(&event_loop, "/dev/video0");
+  CameraReader camera_reader(&event_loop, training_data, &v4l2_reader, &matcher);
 
-  while (true) {
-    const auto image = v4l2_reader.ReadLatestImage();
-    if (image.empty()) {
-      LOG(INFO) << "No image, sleeping";
-      std::this_thread::sleep_for(std::chrono::milliseconds(10));
-      continue;
-    }
-
-    // Now, process image.
-    // TODO(Brian): Actually process it, rather than just logging its size...
-    LOG(INFO) << image.size();
-    std::this_thread::sleep_for(std::chrono::milliseconds(70));
-
-    v4l2_reader.SendLatestImage();
-  }
+  event_loop.Run();
 }
 
 }  // namespace
diff --git a/y2020/vision/sift/BUILD b/y2020/vision/sift/BUILD
index ec06126..5cfb6aa 100644
--- a/y2020/vision/sift/BUILD
+++ b/y2020/vision/sift/BUILD
@@ -76,7 +76,7 @@
     # all the redundant versions for other sizes, and maybe stop doing the one
     # we don't actually use.
     [
-        "1.2489997148513794",
+        "1.2489995956420898",
         "1p24",
         "11",
     ],
@@ -90,23 +90,23 @@
 sizes = [
     [
         1280,
-        720,
+        960,
     ],
     [
         640,
-        360,
+        480,
     ],
     [
         320,
-        180,
+        240,
     ],
     [
         160,
-        90,
+        120,
     ],
     [
         80,
-        45,
+        60,
     ],
 ]
 
@@ -152,6 +152,22 @@
     ],
 )
 
+cc_test(
+    name = "fast_gaussian_test",
+    srcs = [
+        "fast_gaussian_test.cc",
+    ],
+    restricted_to = [
+        "//tools:k8",
+        "//tools:armhf-debian",
+    ],
+    deps = [
+        ":fast_gaussian",
+        "//aos/testing:googletest",
+        "//third_party:opencv",
+    ],
+)
+
 cc_binary(
     name = "testing_sift",
     srcs = [
@@ -204,3 +220,40 @@
     includes = [":sift_fbs_includes"],
     visibility = ["//visibility:public"],
 )
+
+py_binary(
+    name = "demo_sift_training",
+    srcs = ["demo_sift_training.py"],
+    default_python_version = "PY3",
+    srcs_version = "PY2AND3",
+    deps = [
+        ":sift_fbs_python",
+        "@opencv_contrib_nonfree_amd64//:python_opencv",
+    ],
+)
+
+genrule(
+    name = "run_demo_sift_training",
+    srcs = [
+        "images/demo/FRC-Image4-cleaned.png",
+    ],
+    outs = [
+        "demo_sift.h",
+    ],
+    cmd = " ".join([
+        "$(location :demo_sift_training)",
+        "$(location images/demo/FRC-Image4-cleaned.png)",
+        "$(location demo_sift.h)",
+    ]),
+    tools = [
+        ":demo_sift_training",
+    ],
+)
+
+cc_library(
+    name = "demo_sift",
+    hdrs = [
+        "demo_sift.h",
+    ],
+    visibility = ["//visibility:public"],
+)
diff --git a/y2020/vision/sift/demo_sift_training.py b/y2020/vision/sift/demo_sift_training.py
new file mode 100644
index 0000000..a6650fd
--- /dev/null
+++ b/y2020/vision/sift/demo_sift_training.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python3
+
+import cv2
+import sys
+import flatbuffers
+
+import frc971.vision.sift.TrainingImage as TrainingImage
+import frc971.vision.sift.TrainingData as TrainingData
+import frc971.vision.sift.Feature as Feature
+
+def main():
+  image4_cleaned_path = sys.argv[1]
+  output_path = sys.argv[2]
+
+  image4_cleaned = cv2.imread(image4_cleaned_path)
+
+  image = cv2.cvtColor(image4_cleaned, cv2.COLOR_BGR2GRAY)
+  image = cv2.resize(image, (640, 480))
+  sift = cv2.xfeatures2d.SIFT_create()
+  keypoints, descriptors = sift.detectAndCompute(image, None)
+
+  fbb = flatbuffers.Builder(0)
+
+  features_vector = []
+
+  for keypoint, descriptor in zip(keypoints, descriptors):
+    Feature.FeatureStartDescriptorVector(fbb, len(descriptor))
+    for n in reversed(descriptor):
+      fbb.PrependFloat32(n)
+    descriptor_vector = fbb.EndVector(len(descriptor))
+
+    Feature.FeatureStart(fbb)
+
+    Feature.FeatureAddDescriptor(fbb, descriptor_vector)
+    Feature.FeatureAddX(fbb, keypoint.pt[0])
+    Feature.FeatureAddY(fbb, keypoint.pt[1])
+    Feature.FeatureAddSize(fbb, keypoint.size)
+    Feature.FeatureAddAngle(fbb, keypoint.angle)
+    Feature.FeatureAddResponse(fbb, keypoint.response)
+    Feature.FeatureAddOctave(fbb, keypoint.octave)
+
+    features_vector.append(Feature.FeatureEnd(fbb))
+
+  TrainingImage.TrainingImageStartFeaturesVector(fbb, len(features_vector))
+  for feature in reversed(features_vector):
+    fbb.PrependUOffsetTRelative(feature)
+  features_vector_table = fbb.EndVector(len(features_vector))
+
+  TrainingImage.TrainingImageStart(fbb)
+  TrainingImage.TrainingImageAddFeatures(fbb, features_vector_table)
+  # TODO(Brian): Fill out the transformation matrices.
+  training_image = TrainingImage.TrainingImageEnd(fbb)
+
+  TrainingData.TrainingDataStartImagesVector(fbb, 1)
+  fbb.PrependUOffsetTRelative(training_image)
+  images = fbb.EndVector(1)
+
+  TrainingData.TrainingDataStart(fbb)
+  TrainingData.TrainingDataAddImages(fbb, images)
+  fbb.Finish(TrainingData.TrainingDataEnd(fbb))
+
+  bfbs = fbb.Output()
+
+  output_prefix = [
+      b'#ifndef Y2020_VISION_SIFT_DEMO_SIFT_H_',
+      b'#define Y2020_VISION_SIFT_DEMO_SIFT_H_',
+      b'#include <string_view>',
+      b'namespace frc971 {',
+      b'namespace vision {',
+      b'inline std::string_view DemoSiftData() {',
+  ]
+  output_suffix = [
+      b'  return std::string_view(kData, sizeof(kData));',
+      b'}',
+      b'}  // namespace vision',
+      b'}  // namespace frc971',
+      b'#endif  // Y2020_VISION_SIFT_DEMO_SIFT_H_',
+  ]
+
+  with open(output_path, 'wb') as output:
+    for line in output_prefix:
+      output.write(line)
+      output.write(b'\n')
+    output.write(b'alignas(64) static constexpr char kData[] = "')
+    for byte in fbb.Output():
+      output.write(b'\\x' + (b'%x' % byte).zfill(2))
+    output.write(b'";\n')
+    for line in output_suffix:
+      output.write(line)
+      output.write(b'\n')
+
+if __name__ == '__main__':
+  main()
diff --git a/y2020/vision/sift/fast_gaussian_test.cc b/y2020/vision/sift/fast_gaussian_test.cc
new file mode 100644
index 0000000..20a56f7
--- /dev/null
+++ b/y2020/vision/sift/fast_gaussian_test.cc
@@ -0,0 +1,75 @@
+#include "y2020/vision/sift/fast_gaussian.h"
+
+#include <opencv2/imgproc.hpp>
+#include "gtest/gtest.h"
+
+#include "y2020/vision/sift/fast_gaussian_all.h"
+
+namespace frc971 {
+namespace vision {
+namespace testing {
+
+class FastGaussianTest : public ::testing::Test {
+ public:
+  cv::Mat RandomImage(int width = 500, int height = 500, int type = CV_8UC3) {
+    cv::Mat result(width, height, type);
+    cv::randu(result, 0, 255);
+    return result;
+  }
+
+  void ExpectEqual(const cv::Mat &a, const cv::Mat &b, double epsilon = 1e-10) {
+    const cv::Mat difference = a - b;
+    double min, max;
+    cv::minMaxLoc(difference, &min, &max);
+    EXPECT_GE(min, -epsilon);
+    EXPECT_LE(max, epsilon);
+  }
+};
+
+// Verifies that the default GaussianBlur parameters work out to 15x15 with
+// sigma of 1.6.
+TEST_F(FastGaussianTest, DefaultGaussianSize) {
+  const auto image = RandomImage(500, 500, CV_32FC3);
+  cv::Mat default_blurred, explicitly_blurred;
+  cv::GaussianBlur(image, default_blurred, cv::Size(), 1.6, 1.6);
+  cv::GaussianBlur(image, explicitly_blurred, cv::Size(15, 15), 1.6, 1.6);
+  ExpectEqual(default_blurred, explicitly_blurred);
+}
+
+// Verifies that with 8U just a 9x9 blur is as much as you get.
+TEST_F(FastGaussianTest, GaussianSizeS8) {
+  const auto image = RandomImage(500, 500, CV_8UC3);
+  cv::Mat big_blurred, little_blurred;
+  cv::GaussianBlur(image, big_blurred, cv::Size(15, 15), 1.6, 1.6);
+  cv::GaussianBlur(image, little_blurred, cv::Size(9, 9), 1.6, 1.6);
+  ExpectEqual(big_blurred, little_blurred);
+}
+
+// Verifies that FastGaussian and cv::GaussianBlur give the same result.
+TEST_F(FastGaussianTest, FastGaussian) {
+  const auto image = RandomImage(480, 640, CV_16SC1);
+  cv::Mat slow, fast, fast_direct;
+  static constexpr double kSigma = 1.9465878414647133;
+  static constexpr int kSize = 13;
+
+  cv::GaussianBlur(image, slow, cv::Size(kSize, kSize), kSigma, kSigma);
+  FastGaussian(image, &fast, kSigma);
+
+  // Explicitly call the generated code, to verify that our chosen parameters do
+  // in fact result in using the generated one.
+  fast_direct.create(slow.size(), slow.type());
+  ASSERT_EQ(0,
+            DoGeneratedFastGaussian(MatToHalide<const int16_t>(image),
+                                    MatToHalide<int16_t>(fast_direct), kSigma));
+
+
+  // 50/65536 = 0.00076, which is under 1%, which is pretty close.
+  ExpectEqual(slow, fast, 50);
+  // The wrapper should be calling the exact same code, so it should end up with
+  // the exact same result.
+  ExpectEqual(fast, fast_direct, 0);
+}
+
+}  // namespace testing
+}  // namespace vision
+}  // namespace frc971
diff --git a/y2020/vision/sift/images/demo/FRC-Image4-cleaned.png b/y2020/vision/sift/images/demo/FRC-Image4-cleaned.png
new file mode 100644
index 0000000..9d2f0bf
--- /dev/null
+++ b/y2020/vision/sift/images/demo/FRC-Image4-cleaned.png
Binary files differ
diff --git a/y2020/vision/sift/sift.fbs b/y2020/vision/sift/sift.fbs
index 84697de..5a1384e 100644
--- a/y2020/vision/sift/sift.fbs
+++ b/y2020/vision/sift/sift.fbs
@@ -82,6 +82,9 @@
 
   // The features for this image.
   features:[Feature];
+
+  // Timestamp when the frame was captured.
+  image_monotonic_timestamp_ns:long;
 }
 
 root_type ImageMatchResult;
diff --git a/y2020/vision/sift/sift971.cc b/y2020/vision/sift/sift971.cc
index 7152906..93d71e4 100644
--- a/y2020/vision/sift/sift971.cc
+++ b/y2020/vision/sift/sift971.cc
@@ -294,48 +294,62 @@
   gpyr.resize(number_octaves * gpyr_layers_per_octave);
   dogpyr.resize(number_octaves * dogpyr_layers_per_octave);
 
-  std::vector<double> sig(gpyr_layers_per_octave);
-  // precompute Gaussian sigmas using the following formula:
-  //  \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
-  sig[0] = sigma;
+  // Precompute Gaussian sigmas using the following formula:
+  //   \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
+  // We need one for each of the layers in the pyramid we blur, which skips the
+  // first one because it's just the base image without any blurring.
+  std::vector<double> sig(gpyr_layers_per_octave - 1);
   double k = std::pow(2., 1. / layers_per_octave);
-  for (int i = 1; i < gpyr_layers_per_octave; i++) {
-    double sig_prev = std::pow<double>(k, i - 1) * sigma;
+  for (int i = 0; i < gpyr_layers_per_octave - 1; i++) {
+    double sig_prev = std::pow<double>(k, i) * sigma;
     double sig_total = sig_prev * k;
     sig[i] = std::sqrt(sig_total * sig_total - sig_prev * sig_prev);
   }
 
   for (int octave = 0; octave < number_octaves; octave++) {
+    const int octave_gpyr_index = octave * gpyr_layers_per_octave;
+    const int octave_dogpyr_index = octave * dogpyr_layers_per_octave;
+
     // At the beginning of each octave, calculate the new base image.
     {
-      Mat &dst = gpyr[octave * gpyr_layers_per_octave];
+      Mat &dst = gpyr[octave_gpyr_index];
       if (octave == 0) {
         // For the first octave, it's just the base image.
         dst = base;
       } else {
-        // For the other octaves, it's a halved version of the end of the
-        // previous octave.
-        const Mat &src = gpyr[(octave - 1) * gpyr_layers_per_octave +
-                              gpyr_layers_per_octave - 1];
+        // For the other octaves, OpenCV's code claims that it's a halved
+        // version of the end of the previous octave.
+        // TODO(Brian): But this isn't really the end of the previous octave?
+        // But if you use the end, it finds way fewer features? Maybe this is
+        // just a arbitrarily-ish-somewhat-blurred thing from the previous
+        // octave??
+        const int gpyr_index = octave_gpyr_index - 3;
+        // Verify that the indexing in the original OpenCV code gives the same
+        // result. It's unclear which one makes more logical sense.
+        CHECK_EQ((octave - 1) * gpyr_layers_per_octave + layers_per_octave,
+                 gpyr_index);
+        const Mat &src = gpyr[gpyr_index];
         resize(src, dst, Size(src.cols / 2, src.rows / 2), 0, 0, INTER_NEAREST);
       }
     }
-    // We start with layer==1 because the "first layer" is just the base image
-    // (or a downscaled version of it).
-    for (int layer = 1; layer < gpyr_layers_per_octave; layer++) {
+
+    // Then, go through all the layers and calculate the appropriate
+    // differences.
+    for (int layer = 0; layer < dogpyr_layers_per_octave; layer++) {
       // The index where the current layer starts.
-      const int layer_index = octave * gpyr_layers_per_octave + layer;
+      const int layer_gpyr_index = octave_gpyr_index + layer;
+      const int layer_dogpyr_index = octave_dogpyr_index + layer;
+
       if (use_fast_pyramid_difference_) {
-        const Mat &input = gpyr[layer_index - 1];
-        Mat &blurred = gpyr[layer_index];
-        Mat &difference =
-            dogpyr[octave * dogpyr_layers_per_octave + (layer - 1)];
+        const Mat &input = gpyr[layer_gpyr_index];
+        Mat &blurred = gpyr[layer_gpyr_index + 1];
+        Mat &difference = dogpyr[layer_dogpyr_index];
         FastGaussianAndSubtract(input, &blurred, &difference, sig[layer]);
       } else {
         // First, calculate the new gaussian blur.
         {
-          const Mat &src = gpyr[layer_index - 1];
-          Mat &dst = gpyr[layer_index];
+          const Mat &src = gpyr[layer_gpyr_index];
+          Mat &dst = gpyr[layer_gpyr_index + 1];
           if (use_fast_gaussian_pyramid_) {
             FastGaussian(src, &dst, sig[layer]);
           } else {
@@ -345,9 +359,9 @@
 
         // Then, calculate the difference from the previous one.
         {
-          const Mat &src1 = gpyr[layer_index - 1];
-          const Mat &src2 = gpyr[layer_index];
-          Mat &dst = dogpyr[octave * dogpyr_layers_per_octave + (layer - 1)];
+          const Mat &src1 = gpyr[layer_gpyr_index];
+          const Mat &src2 = gpyr[layer_gpyr_index + 1];
+          Mat &dst = dogpyr[layer_dogpyr_index];
           if (use_fast_subtract_dogpyr_) {
             FastSubtract(src2, src1, &dst);
           } else {
@@ -1150,7 +1164,7 @@
                                     std::vector<KeyPoint> &keypoints,
                                     OutputArray _descriptors,
                                     bool useProvidedKeypoints) {
-  int firstOctave = 0, actualNOctaves = 0, actualNLayers = 0;
+  int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
   Mat image = _image.getMat(), mask = _mask.getMat();
 
   if (image.empty() || image.depth() != CV_8U)
diff --git a/y2020/vision/tools/python_code/field_display.py b/y2020/vision/tools/python_code/field_display.py
new file mode 100644
index 0000000..a3f1024
--- /dev/null
+++ b/y2020/vision/tools/python_code/field_display.py
@@ -0,0 +1,143 @@
+import cv2
+import math
+from matplotlib import pyplot as plt
+import numpy as np
+
+
+def load_field_image():
+    field_image = cv2.imread('test_images/field_cropped_scaled.png')
+    return field_image
+
+
+def show_field(img_in=None):
+    if (img_in is None):
+        img_in = load_field_image
+
+    cv2.imshow('Field', img_in), cv2.waitKey()
+    return
+
+
+# Convert field coordinates (meters) to image coordinates (pixels).
+# Field origin is (x,y) at center of red alliance driver station wall,
+# with x pointing into the field.
+# The default field image is 1598 x 821 pixels, so 1 cm per pixel on field
+# I.e., Field is 1598 x 821 pixels = 15.98 x 8.21 meters
+# Our origin in image coordinates is at (1598, 410) pixels, facing in the -x image direction
+
+
+def field_coord_to_image_coord(robot_position):
+    # Scale by 100 pixel / cm
+    robot_pos_img_coord = np.array([[1598, 410]]).T + np.int32(
+        100.0 * np.array([[-robot_position[0][0], robot_position[1][0]]]).T)
+    return robot_pos_img_coord
+
+
+# Given a field image, plot the robot and an optional heading vector on it
+def plot_bot_on_field(img_in, color, robot_position, robot_heading=None):
+    if img_in is None:
+        img_out = load_field_image()
+    else:
+        img_out = img_in.copy()
+
+    robot_pos_img_coord = field_coord_to_image_coord(robot_position)
+
+    ROBOT_RADIUS = 10
+    img_out = cv2.circle(
+        img_out, (robot_pos_img_coord[0][0], robot_pos_img_coord[1][0]),
+        ROBOT_RADIUS, color, 3)
+
+    if (robot_heading is not None):
+        img_out = cv2.line(
+            img_out, (robot_pos_img_coord[0][0], robot_pos_img_coord[1][0]),
+            (int(robot_pos_img_coord[0][0] -
+                 3 * ROBOT_RADIUS * math.cos(robot_heading)),
+             int(robot_pos_img_coord[1][0] +
+                 3 * ROBOT_RADIUS * math.sin(robot_heading))), color, 3,
+            cv2.LINE_AA)
+
+    return img_out
+
+
+# Plot a line on the field, given starting and ending field positions
+def plot_line_on_field(img_in, color, start_position, end_position):
+    if img_in is None:
+        img_out = load_field_image()
+    else:
+        img_out = img_in.copy()
+
+    start_pos_img_coord = field_coord_to_image_coord(start_position)
+    end_pos_img_coord = field_coord_to_image_coord(end_position)
+
+    img_out = cv2.line(img_out,
+                       (start_pos_img_coord[0][0], start_pos_img_coord[1][0]),
+                       (end_pos_img_coord[0][0], end_pos_img_coord[1][0]),
+                       color, 3, cv2.LINE_AA)
+
+    return img_out
+
+
+# Helper function to plot a few quantities like
+#   Heading (angle of the target relative to the robot)
+#   Distance (of the target to the robot)
+#   Skew (angle of the target normal relative to the robot)
+def plot_camera_to_target(img_in, color, heading, distance, skew):
+    if img_in is None:
+        img_out = np.zeros((821, 1598, 3), np.uint8)
+    else:
+        img_out = img_in
+
+    # Bot location is at origin
+    bot_location = np.array([[0., 0.]]).T
+
+    # Target location is distance away along the heading
+    target_location = bot_location + np.array(
+        [[distance * math.cos(heading), distance * math.sin(heading)]]).T
+
+    # Create part of a line from the target location to the end of the target, based on the skew
+    skew_line_offset = np.array(
+        [[math.cos(skew + math.pi / 2),
+          math.sin(skew + math.pi / 2)]]).T
+
+    # Plot bot origin
+    img_out = plot_bot_on_field(img_out, color, bot_location)
+
+    # Plot heading line
+    img_out = plot_line_on_field(img_out, (255, 255, 0), bot_location,
+                                 target_location)
+
+    # Plot target location
+    img_out = plot_bot_on_field(img_out, (255, 0, 0), target_location)
+
+    # Plot target rotation
+    img_out = plot_line_on_field(img_out, (0, 255, 0),
+                                 target_location - skew_line_offset,
+                                 target_location + skew_line_offset)
+
+    return img_out
+
+
+# Helper function to take camera rotation and bring it back to world coordinate frame
+def camera_rot_to_world(R_mat):
+    R_c_w_mat = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
+
+    return (R_mat).dot(R_c_w_mat.T)
+
+
+def camera_rot_to_world_Rodrigues(R):
+    R_mat = cv2.Rodrigues(R)[0]
+    R_mat_world = camera_rot_to_world(R_mat)
+    R_world = cv2.Rodrigues(R_mat_world)[0]
+    return R_world
+
+
+def invert_pose_Rodrigues(R, T):
+    R_inv_mat = cv2.Rodrigues(R)[0].T
+    T_inv = -R_inv_mat.dot(T)
+    R_inv = cv2.Rodrigues(R_inv_mat)[0]
+    return R_inv, T_inv
+
+
+def invert_pose_mat(R, T):
+    R_inv = R.T
+    T_inv = -R_inv.dot(T)
+    return R_inv, T_inv
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
new file mode 100644
index 0000000..bcf610f
--- /dev/null
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -0,0 +1,307 @@
+import cv2
+import math
+from matplotlib import pyplot as plt
+import numpy as np
+import time
+
+import field_display
+import train_and_match as tam
+
+### DEFINITIONS
+
+# List of images to train on
+training_image_names = [
+    'test_images/train_power_port_blue.png',
+    # Using webcam captured image for testing-- best to use only one of the two
+    # training images for the power_port_red
+    'test_images/train_power_port_red_webcam.png',
+    #    'test_images/train_power_port_red.png',
+    'test_images/train_loading_bay_blue.png',
+    'test_images/train_loading_bay_red.png'
+]
+
+# Target points on the image -- needs to match training images-- one per image
+# These are manually captured by examining the images, and entering the pixel
+# values from the target center for each image.
+# These are currently only used for visualization of the target
+target_pt_list = [
+    np.float32([[393, 147]]).reshape(-1, 1, 2),  # train_power_port_blue.png
+    np.float32([[305, 97]]).reshape(-1, 1, 2),  #train_power_port_red_webcam.png
+    #    np.float32([[570,192]]).reshape(-1,1,2), # train_power_port_red.png
+    np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_blue.png
+    np.float32([[366, 236]]).reshape(-1, 1, 2),  # train_loading_bay_red.png
+]
+
+# Put list of all possible images we want to test, and then we'll select one
+# Note, if you query on the same image as training, only that image will match
+query_image_names = [
+    'test_images/train_power_port_red.png',  #0
+    'test_images/train_power_port_blue.png',  #1
+    'test_images/test_game_manual.png',  #2
+    'test_images/test_train_shift_left_100.png',  #3
+    'test_images/test_train_shift_right_100.png',  #4
+    'test_images/test_train_down_2x.png',  #5
+    'test_images/test_train_down_4x.png',  #6
+    'test_images/test_raspi3_sample.jpg',  #7
+    'test_images/test_VR_sample1.png',  #8
+    'test_images/train_loading_bay_blue.png',  #9
+    'test_images/train_loading_bay_red.png'
+]  #10
+
+training_image_index = 0
+# TODO: Should add argParser here to select this
+query_image_index = 0  # Use -1 to use camera capture; otherwise index above list
+
+##### Let's get to work!
+
+# Load the training and query images
+training_images = tam.load_images(training_image_names)
+
+# Create feature extractor
+feature_extractor = tam.load_feature_extractor()
+
+# Extract keypoints and descriptors for model
+train_keypoint_lists, train_descriptor_lists = tam.detect_and_compute(
+    feature_extractor, training_images)
+
+# # Create the matcher.  This only needs to be created once
+ts = time.monotonic()  # Do some basic timing
+matcher = tam.train_matcher(train_descriptor_lists)
+tf = time.monotonic()
+print("Done training matcher, took ", tf - ts, " secs")
+for i in range(len(train_keypoint_lists)):
+    print("Model ", i, " has ", len(train_keypoint_lists[i]), " features: ")
+
+# Load the query image in.  Based on index in our list, or using camera if index is -1
+query_images = []
+
+if (query_image_index is -1):
+    # Based on /dev/videoX setting
+    CAMERA_INDEX = 2
+    print("#### Using camera at /dev/video%d" % CAMERA_INDEX)
+    # Open the device at the ID X for /dev/videoX
+    cap = cv2.VideoCapture(CAMERA_INDEX)
+
+    # Check whether user selected camera is opened successfully.
+    if not (cap.isOpened()):
+        print("Could not open video device")
+        quit()
+
+    # Capture frame-by-frame
+    ret, frame = cap.read()
+    query_images.append(frame)
+
+else:
+    # Load default images
+    query_images = tam.load_images([query_image_names[query_image_index]])
+
+# For storing out pose to measure noise
+log_file = open("pose.out", 'w')
+
+looping = True
+# Grab images until 'q' is pressed (or just once for canned images)
+while (looping):
+    if (query_image_index is -1):
+        # Capture frame-by-frame
+        ret, frame = cap.read()
+        query_images[0] = frame
+    else:
+        looping = False
+
+    # Extract features from query image
+    query_keypoint_lists, query_descriptor_lists = tam.detect_and_compute(
+        feature_extractor, query_images)
+    print("Query image has ", len(query_keypoint_lists[0]), " features")
+
+    ts = time.monotonic()
+    good_matches_list = tam.compute_matches(matcher, train_descriptor_lists,
+                                            query_descriptor_lists)
+    tf = time.monotonic()
+    print("Done finding matches (took ", tf - ts, " secs)")
+    for i in range(len(train_keypoint_lists)):
+        print("Model ", i, " has # good matches: ", len(good_matches_list[i]))
+
+    # TEMP: Use first list for what follows
+    # TODO: Should look for best match after homography and display that one
+    # OR: show results on all good matches
+    good_matches = good_matches_list[0]
+
+    # Next, find homography (map between training and query images)
+    homography_list, matches_mask_list = tam.compute_homographies(
+        train_keypoint_lists, query_keypoint_lists, good_matches_list)
+
+    if matches_mask_list[0].count(1) < tam.MIN_MATCH_COUNT:
+        print(
+            "Not continuing pose calc because not enough good matches after homography-- only had ",
+            matches_mask_list[0].count(1))
+        continue
+
+    # Next, let's go to compute pose
+    # I've chosen some coordinate frames to help track things.  Using indices:
+    #   w:    world coordinate frame (at center wall of driver station)
+    #   tarj: camera pose when jth target (e.g., power panel) was captured
+    #   ci:   ith camera
+    #   b:    body frame of robot (robot location)
+    #
+    # And, T for translation, R for rotation, "est" for estimated
+    # So, T_w_b_est is the estimated translation from world to body coords
+    #
+    # A few notes:
+    #   -- CV uses rotated frame, where z points out of camera, y points down
+    #   -- This causes a lot of mapping around of points, but overall OK
+
+    ### TODO: Still need to clean this rest up
+
+    # TODO: Need to generalize this for each target-- need to have 3D global locations of each target's keypoints
+    field_length = 15.98
+    target_center_offset_y = 1.67
+    target_center_height = 2.49
+    if training_image_index is 0:
+        # webcam
+        fx = 810.
+        fy = 810.
+        cx = 320.
+        cy = 240.
+        cy_above_ground = 1.  # meters above ground
+        depth = 4.57  # meters
+        target_center_offset_y = 1.67
+    elif training_image_index is 1:
+        # Made up for screenshot from manual
+        cx = int(1165 / 2)
+        cy_above_ground = -0.5856  # meters above ground
+        cy = 776 / 2
+        fx = 1143  # pixels (or 0.00160m or 1143 pixels?)
+        depth = 4.57  # meters
+    elif training_image_index is 2:
+        cx = 732 / 2
+        cy_above_ground = 0.454  # meters above ground
+        cy = 436 / 2
+        target_width = 5 * 12 * 0.0254  # width in ft * in/ft * m/in
+        target_height = target_width * cy / cx  # width in ft * in/ft * m/in * cy / cx
+        FOV_x = 54 * math.pi / 180.  # camera FOV in radians (from 54 degree lens)
+        fx = cx / math.tan(FOV_x / 2)  # pixels
+        depth = (target_height / 2) / math.tan(FOV_x / 2)  # meters
+
+    src_pts_3d = []
+    for m in good_matches:
+        # Project the matches all back to 3D, assuming they're all at the same depth
+        # Add in the distance across the field and any lateral offset of the target
+        src_pts_3d.append([(
+            field_length,
+            -(train_keypoint_lists[training_image_index][m.trainIdx].pt[0] - cx
+              ) * depth / fx - target_center_offset_y,
+            -(train_keypoint_lists[training_image_index][m.trainIdx].pt[1] - cy
+              ) * depth / fx + cy_above_ground)])
+
+    # Reshape 3d point list to work with computations to follow
+    src_pts_3d_array = np.asarray(np.float32(src_pts_3d)).reshape(-1, 3, 1)
+
+    cam_mat = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
+    distortion_coeffs = np.zeros((5, 1))
+
+    # Create list of matching query point locations
+    dst_pts = np.float32([
+        query_keypoint_lists[0][m.queryIdx].pt for m in good_matches
+    ]).reshape(-1, 1, 2)
+
+    ts = time.monotonic()
+    # TODO: May want to supply it with estimated guess as starting point
+    # Find offset from camera to original location of camera relative to target
+    retval, R_ci_w_estj, T_ci_w_estj, inliers = cv2.solvePnPRansac(
+        src_pts_3d_array, dst_pts, cam_mat, distortion_coeffs)
+
+    tf = time.monotonic()
+    print("Solving Pose took ", tf - ts, " secs")
+    print("Found ", len(inliers), " inliers, out of ", len(dst_pts),
+          " matched points")
+    ### THIS is the estimate of the robot on the field!
+    R_w_ci_estj, T_w_ci_estj = field_display.invert_pose_Rodrigues(
+        R_ci_w_estj, T_ci_w_estj)
+    print("Pose from PnP is:\n", R_ci_w_estj, "\n", T_ci_w_estj)
+    # The rotation estimate is for camera with z pointing forwards.
+    # Compute the rotation as if x is forward
+    R_w_ci_estj_robot = field_display.camera_rot_to_world_Rodrigues(
+        R_w_ci_estj)
+    print("Pose in world coords is:\n", R_w_ci_estj, "\n", T_w_ci_estj,
+          "\nWith robot coord frame rotation as \n", R_w_ci_estj_robot)
+
+    # Use the rotational component about the z axis to define the heading
+    heading_est = R_w_ci_estj_robot[2][0]
+    # Plot location of the robot, along with heading
+    img_ret = field_display.plot_bot_on_field(None, (0, 255, 255), T_w_ci_estj,
+                                              heading_est)
+
+    # Compute vector to target
+    T_w_target_pt = np.array(
+        [[field_length, -target_center_offset_y, target_center_height]]).T
+    vector_to_target = T_w_target_pt - T_w_ci_estj
+    # Compute distance to target (full 3D)
+    distance_to_target = np.linalg.norm(vector_to_target)
+    # Compute distance to target (x,y)
+    distance_to_target_ground = np.linalg.norm(vector_to_target[0:2])
+    #print("Distance comparison: all: ", distance_to_target, " vs. ground: ", distance_to_target_ground)
+
+    angle_to_target_abs = math.atan2(vector_to_target[1][0],
+                                     vector_to_target[0][0])
+    angle_to_target_robot = angle_to_target_abs - heading_est
+    img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0),
+                                               T_w_ci_estj, T_w_target_pt)
+    # THESE ARE OUR ESTIMATES OF HEADING, DISTANCE, AND SKEW TO TARGET
+    log_file.write('%lf, %lf, %lf\n' % (heading_est, distance_to_target_ground,
+                                        angle_to_target_robot))
+
+    # A bunch of code to visualize things...
+    #
+    # Draw target on the image
+    h, w, ch = query_images[0].shape
+    query_image_copy = query_images[0].copy()
+    # Map target_pt onto image, for display
+    transformed_target = cv2.perspectiveTransform(target_pt_list[0],
+                                                  homography_list[0])
+    # Ballpark the size of the circle so it looks right on image
+    radius = int(32 * abs(homography_list[0][0][0] + homography_list[0][1][1])
+                 / 2)  # Average of scale factors
+    query_image_copy = cv2.circle(
+        query_image_copy,
+        (transformed_target.flatten()[0], transformed_target.flatten()[1]),
+        radius, (0, 255, 0), 3)
+
+    # If we're querying static images, show full results
+    if (query_image_index is not -1):
+        homography_list, matches_mask_list = tam.show_results(
+            training_images, train_keypoint_lists, query_images,
+            query_keypoint_lists, target_pt_list, good_matches_list)
+
+    # Create empty image the size of the field
+    img_heading = field_display.load_field_image()
+    img_heading[:, :, :] = 0
+    f_h, f_w, f_ch = img_heading.shape
+
+    # Create heading view, and paste it to bottom of field
+    img_heading = field_display.plot_camera_to_target(
+        img_heading, (0, 255, 0), heading_est, distance_to_target_ground,
+        angle_to_target_robot)
+    vis = np.concatenate((img_ret, img_heading), axis=0)
+
+    # Paste query image to right of other views (scale to keep aspect ratio)
+    img_query_scaled = cv2.resize(query_image_copy,
+                                  (int(2 * w * f_h / h), 2 * f_h))
+    vis = np.concatenate((vis, img_query_scaled), axis=1)
+
+    # Scale down to make it fit on screen
+    vis = cv2.resize(vis, (int(vis.shape[1] / 4), int(vis.shape[0] / 4)))
+    cv2.imshow('field_display', vis)
+
+    #Waits for a user input to quit the application
+    pause_time = 0
+    if (query_image_index is -1):
+        pause_time = 1
+    if cv2.waitKey(pause_time) & 0xFF == ord('q'):
+        break
+
+# Done.  Clean things up
+if (query_image_index is -1):
+    # When everything done, release the capture
+    cap.release()
+
+cv2.destroyAllWindows()
diff --git a/y2020/vision/tools/python_code/test_images/field_cropped_scaled.png b/y2020/vision/tools/python_code/test_images/field_cropped_scaled.png
new file mode 100644
index 0000000..c971c19
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/field_cropped_scaled.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_VR_sample1.png b/y2020/vision/tools/python_code/test_images/test_VR_sample1.png
new file mode 100644
index 0000000..86495b2
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_VR_sample1.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_game_manual.png b/y2020/vision/tools/python_code/test_images/test_game_manual.png
new file mode 100644
index 0000000..825f1c3
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_game_manual.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_raspi3_sample.jpg b/y2020/vision/tools/python_code/test_images/test_raspi3_sample.jpg
new file mode 100644
index 0000000..376149c
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_raspi3_sample.jpg
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_train_down_2x.png b/y2020/vision/tools/python_code/test_images/test_train_down_2x.png
new file mode 100644
index 0000000..b004c51
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_train_down_2x.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_train_down_4x.png b/y2020/vision/tools/python_code/test_images/test_train_down_4x.png
new file mode 100644
index 0000000..e217280
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_train_down_4x.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_train_shift_left_100.png b/y2020/vision/tools/python_code/test_images/test_train_shift_left_100.png
new file mode 100644
index 0000000..42165f8
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_train_shift_left_100.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/test_train_shift_right_100.png b/y2020/vision/tools/python_code/test_images/test_train_shift_right_100.png
new file mode 100644
index 0000000..9e7c274
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/test_train_shift_right_100.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_loading_bay.xcf b/y2020/vision/tools/python_code/test_images/train_loading_bay.xcf
new file mode 100644
index 0000000..6bc62f5
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_loading_bay.xcf
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_loading_bay_blue.png b/y2020/vision/tools/python_code/test_images/train_loading_bay_blue.png
new file mode 100644
index 0000000..c3c0aea
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_loading_bay_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_loading_bay_red.png b/y2020/vision/tools/python_code/test_images/train_loading_bay_red.png
new file mode 100644
index 0000000..42091a6
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_loading_bay_red.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_power_port_blue.png b/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
new file mode 100644
index 0000000..6ade26f
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_power_port_blue.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_power_port_red.png b/y2020/vision/tools/python_code/test_images/train_power_port_red.png
new file mode 100644
index 0000000..9d2f0bf
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_power_port_red.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_power_port_red.xcf b/y2020/vision/tools/python_code/test_images/train_power_port_red.xcf
new file mode 100644
index 0000000..ad6bed1
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_power_port_red.xcf
Binary files differ
diff --git a/y2020/vision/tools/python_code/test_images/train_power_port_red_webcam.png b/y2020/vision/tools/python_code/test_images/train_power_port_red_webcam.png
new file mode 100644
index 0000000..6115348
--- /dev/null
+++ b/y2020/vision/tools/python_code/test_images/train_power_port_red_webcam.png
Binary files differ
diff --git a/y2020/vision/tools/python_code/train_and_match.py b/y2020/vision/tools/python_code/train_and_match.py
new file mode 100644
index 0000000..8fed4c3
--- /dev/null
+++ b/y2020/vision/tools/python_code/train_and_match.py
@@ -0,0 +1,291 @@
+import cv2
+import math
+from matplotlib import pyplot as plt
+import numpy as np
+#import tkinter
+# pip install pillow
+#import PIL.Image, PIL.ImageTk
+import time
+
+import field_display
+
+### DEFINITIONS
+MIN_MATCH_COUNT = 10  # 10 is min; more gives better matches
+FEATURE_EXTRACTOR_NAME = 'SIFT'
+QUERY_INDEX = 0  # We use a list for both training and query info, but only ever have one query item
+
+
+# Calculates rotation matrix to euler angles
+# The result is the same as MATLAB except the order
+# of the euler angles ( x and z are swapped ).
+# Input: R: numpy 3x3 rotation matrix
+# Output: numpy 3x1 vector of Euler angles (x,y,z)
+def rotationMatrixToEulerAngles(R):
+    sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
+
+    singular = sy < 1e-6
+    if not singular:
+        x = math.atan2(R[2, 1], R[2, 2])
+        y = math.atan2(-R[2, 0], sy)
+        z = math.atan2(R[1, 0], R[0, 0])
+    else:
+        x = math.atan2(-R[1, 2], R[1, 1])
+        y = math.atan2(-R[2, 0], sy)
+        z = 0
+
+    return np.array([x, y, z])
+
+
+# Load images based on list of image names
+# Return list containing loaded images
+def load_images(image_names):
+    image_list = []
+    for im in image_names:
+        # Load image (in color; let opencv convert to B&W for features)
+        img_data = cv2.imread(im)
+        if img_data is None:
+            print("Failed to load image: ", im)
+            exit()
+        else:
+            image_list.append(img_data)
+
+    return image_list
+
+
+# Load feature extractor based on extractor name
+# Returns feature extractor object
+def load_feature_extractor():
+    if FEATURE_EXTRACTOR_NAME is 'SIFT':
+        # Initiate SIFT detector
+        feature_extractor = cv2.xfeatures2d.SIFT_create()
+    elif FEATURE_EXTRACTOR_NAME is 'SURF':
+        # Initiate SURF detector
+        feature_extractor = cv2.xfeatures2d.SURF_create()
+    elif FEATURE_EXTRACTOR_NAME is 'ORB':
+        # Initiate ORB detector
+        feature_extractor = cv2.ORB_create()
+
+    return feature_extractor
+
+
+# Run feature detector and compute feature descriptions on image_list
+# Input: feature_extractor object
+#        image_list: list containing images
+# Return: Lists of keypoints and lists of feature descriptors, one for each image
+def detect_and_compute(feature_extractor, image_list):
+    descriptor_lists = []
+    keypoint_lists = []
+    if (FEATURE_EXTRACTOR_NAME in ('SIFT', 'SURF')):
+        for i in range(len(image_list)):
+            # find the keypoints and descriptors with SIFT
+            kp, des = feature_extractor.detectAndCompute(image_list[i], None)
+            descriptor_lists.append(des)
+            keypoint_lists.append(kp)
+    elif FEATURE_EXTRACTOR_NAME is 'ORB':
+        # TODO: Check whether ORB extractor can do detectAndCompute.
+        # If so, we don't need to have this branch for ORB
+        for i in range(len(image_list)):
+            # find the keypoints and descriptors with ORB
+            kp = feature_extractor.detect(image_list[i], None)
+            keypoint_lists.append(kp)
+            des = feature_extractor.compute(image_list[i], None)
+            descriptor_lists.append(des)
+
+    return keypoint_lists, descriptor_lists
+
+
+# Given a keypoint descriptor list, create a matcher
+# Input: descriptor_lists: List of descriptors, one for each training image
+# Returns: a keypoint matcher trained on all descriptors, indexed by image
+def train_matcher(descriptor_lists):
+    if (FEATURE_EXTRACTOR_NAME in ('SIFT', 'SURF')):
+        # Use FLANN KD tree for SIFT & SURF
+        FLANN_INDEX_KDTREE = 0
+        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
+        search_params = dict(checks=50)
+
+        matcher = cv2.FlannBasedMatcher(index_params, search_params)
+        matcher.add(descriptor_lists)
+        matcher.train()
+    elif FEATURE_EXTRACTOR_NAME is 'ORB':
+        # Use FLANN LSH for ORB
+        FLANN_INDEX_LSH = 6
+        index_params = dict(
+            algorithm=FLANN_INDEX_LSH,
+            table_number=6,  # 12
+            key_size=12,  # 20
+            multi_probe_level=2)  #2
+        search_params = dict(checks=50)
+        matcher = cv2.FlannBasedMatcher(index_params, search_params)
+        # Other option: BFMatcher with default params
+        # NOTE: I think Brute Force matcher can only do 1 match at a time,
+        # rather than loading keypoints from all model images
+        #matcher = cv2.BFMatcher()
+
+    return matcher
+
+
+# Given a matcher and the query descriptors (and for ORB the training list),
+# Compute the best matches, filtering using the David Lowe magic ratio of 0.7
+# Return list of good match lists (one set of good matches for each training image for SIFT/SURF)
+def compute_matches(matcher, train_descriptor_lists, query_descriptor_lists):
+
+    # We'll create a match list for each of the training images
+    good_matches_list = []
+    if (FEATURE_EXTRACTOR_NAME in ('SIFT', 'SURF')):
+        # We're just doing one query at a time, so grab the first from the list
+        desc_query = query_descriptor_lists[QUERY_INDEX]
+        matches = matcher.knnMatch(desc_query, k=2)
+
+        for train_index in range(len(train_descriptor_lists)):
+            # store all the good matches as per Lowe's ratio test.
+            # (Lowe originally proposed 0.7 ratio, but 0.75 was later proposed as a better option.  We'll go with the more conservative (fewer, better matches) for now)
+            good_matches = []
+            for m, n in matches:
+                if m.distance < 0.7 * n.distance and m.imgIdx is train_index:
+                    good_matches.append(m)
+
+            good_matches_list.append(good_matches)
+
+    elif FEATURE_EXTRACTOR_NAME is 'ORB':
+        matches = matcher.knnMatch(train_keypoint_lists[0], desc_query, k=2)
+        print(matches)
+        good_matches = []
+        for m in matches:
+            if m:
+                if len(m) == 2:
+                    print(m[0].distance, m[1].distance)
+                    if m[0].distance < 0.7 * m[1].distance:
+                        good_matches.append(m[0])
+                        print(m[0].distance)
+
+        good_matches_list.append(good_matches)
+
+    return good_matches_list
+
+
+# Given a set of training keypoints lists, and a query keypoint list,
+# and the good matches for each training<->query match, returns the
+# homography for each pair and a mask list of matchces considered good
+# by the homography
+# Inputs: train_keypoint_lists: List of keypoints lists from training images
+#         query_keypoint_lists: Keypoint list (only 1) from query image
+#         good_matches_list: List of matches for each training image -> query
+# Returns: homography_list: List of each homography between train->query
+#                           Returns [] if not enough matches / bad homography
+#          matches_mask_list: Mask list of matches kept by homography for each
+#                             set of matches
+def compute_homographies(train_keypoint_lists, query_keypoint_lists,
+                         good_matches_list):
+    homography_list = []
+    matches_mask_list = []
+    for i in range(len(train_keypoint_lists)):
+        good_matches = good_matches_list[i]
+        if len(good_matches) < MIN_MATCH_COUNT:
+            print("Not enough matches are for model ", i, ": ",
+                  len(good_matches), " out of needed #: ", MIN_MATCH_COUNT)
+            homography_list.append([])
+            matches_mask_list.append([])
+            continue
+
+        print("Got good number of matches for model %d: %d (needed only %d)" %
+              (i, len(good_matches), MIN_MATCH_COUNT))
+        # Extract and bundle keypoint locations for computations
+        src_pts = np.float32([
+            train_keypoint_lists[i][m.trainIdx].pt for m in good_matches
+        ]).reshape(-1, 1, 2)
+        dst_pts = np.float32([
+            query_keypoint_lists[QUERY_INDEX][m.queryIdx].pt
+            for m in good_matches
+        ]).reshape(-1, 1, 2)
+        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 3.0)
+        matches_mask = mask.ravel().tolist()
+        homography_list.append(H)
+        matches_mask_list.append(matches_mask)
+
+    return homography_list, matches_mask_list
+
+
+# Helper function to display images
+# Shows side-by-side panel with query on left, training on right
+# connects keypoints between two and draws target on query
+# Also shows image with query unwarped (to match training image) and target pt
+def show_results(training_images, train_keypoint_lists, query_images,
+                 query_keypoint_lists, target_point_list, good_matches_list):
+    print("Showing results for ", len(training_images), " training images")
+
+    homography_list, matches_mask_list = compute_homographies(
+        train_keypoint_lists, query_keypoint_lists, good_matches_list)
+    for i in range(len(train_keypoint_lists)):
+        good_matches = good_matches_list[i]
+        if len(good_matches) < MIN_MATCH_COUNT:
+            continue
+        print("Showing results for model ", i)
+        matches_mask_count = matches_mask_list[i].count(1)
+        if matches_mask_count != len(good_matches):
+            print("Homography rejected some matches!  From ",
+                  len(good_matches), ", only ", matches_mask_count,
+                  " were used")
+
+        if matches_mask_count < MIN_MATCH_COUNT:
+            print(
+                "Skipping match because homography rejected matches down to below ",
+                MIN_MATCH_COUNT)
+            continue
+
+        # Create a box based on the training image to map onto the query img
+        h, w, ch = training_images[i].shape
+        H = homography_list[i]
+        train_box_pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
+                                    [w - 1, 0]]).reshape(-1, 1, 2)
+        query_box_pts = cv2.perspectiveTransform(train_box_pts, H)
+
+        # Figure out where the training target goes on the query img
+        transformed_target = cv2.perspectiveTransform(target_point_list[i], H)
+        # Ballpark the size of the circle so it looks right on image
+        radius = int(
+            32 * abs(H[0][0] + H[1][1]) / 2)  # Average of scale factors
+        # We're only using one query image at this point
+        query_image = query_images[QUERY_INDEX].copy()
+
+        # Draw training box and target points on the query image
+        query_image = cv2.polylines(query_image, [np.int32(query_box_pts)],
+                                    True, (0, 255, 0), 3, cv2.LINE_AA)
+        query_image = cv2.circle(
+            query_image,
+            (transformed_target.flatten()[0], transformed_target.flatten()[1]),
+            radius, (0, 255, 0), 3)
+
+        # Draw the matches and show it
+        draw_params = dict(
+            matchColor=(0, 255, 0),  # draw matches in green color
+            singlePointColor=None,
+            matchesMask=matches_mask_list[i],  # draw only inliers
+            flags=2)
+
+        img3 = cv2.drawMatches(query_image, query_keypoint_lists[QUERY_INDEX],
+                               training_images[i], train_keypoint_lists[i],
+                               good_matches_list[i], None, **draw_params)
+        print("Drawing matches for model ", i,
+              ".  Query on left, Training image on right")
+        cv2.imshow('Matches', img3), cv2.waitKey()
+
+        # Next, unwarp the query image so it looks like the training view
+        H_inv = np.linalg.inv(H)
+        query_image_warp = cv2.warpPerspective(query_image, H_inv, (w, h))
+        print("Showing unwarped query image for model ", i)
+        cv2.imshow('Unwarped Image', query_image_warp), cv2.waitKey()
+
+    # Go ahead and return these, for use elsewhere
+    return homography_list, matches_mask_list
+
+
+# Helper function to display keypoints
+# Input: image
+#        keypoint_list: List of opencv keypoints
+def show_keypoints(image, keypoint_list):
+    ret_img = image.copy()
+    ret_img = cv2.drawKeypoints(ret_img, keypoint_list, ret_img)
+    cv2.imshow("Keypoints", ret_img)
+    cv2.waitKey(0)
+    return ret_img
diff --git a/y2020/vision/tools/python_code/transform_projection_test.py b/y2020/vision/tools/python_code/transform_projection_test.py
new file mode 100644
index 0000000..b8d94fd
--- /dev/null
+++ b/y2020/vision/tools/python_code/transform_projection_test.py
@@ -0,0 +1,111 @@
+import cv2
+import math
+from matplotlib import pyplot as plt
+import numpy as np
+
+import field_display
+
+# Assume image is 640x480 (VGA)
+num_pixels_x = 640
+num_pixels_y = 480
+
+# Camera center is 320, 240
+c_x = num_pixels_x / 2
+c_y = num_pixels_y / 2
+
+# Horiz. FOV is 54 degrees
+FOV_h = 54 * math.pi / 180.0  # (in radians)
+# Vert FOV is horizontal FOV scaled by aspect ratio (through tan function)
+FOV_v = math.atan(c_y / c_x * math.tan(FOV_h / 2.)) * 2
+
+# Assuming square pixels
+# focal length is (640/2)/tan(FOV_h/2)
+f_x = c_x / (math.tan(FOV_h / 2.))
+f_y = c_y / (math.tan(FOV_v / 2.))
+
+# Height of camera on robot above ground
+cam_above_ground = 0.5
+
+# TODO: Should fix f_y entry below.
+cam_mat = np.array([[f_x, 0, c_x], [0, f_y, c_y], [0, 0, 1.]])
+
+# Use default distortion (i.e., none)
+distortion_coeffs = np.zeros((5, 1))
+
+depth = 5.0  # meters
+
+R_w_tarj_mat = np.array([[0., 0., 1.], [-1, 0, 0], [0, -1., 0]])
+R_w_tarj, jac = cv2.Rodrigues(R_w_tarj_mat)
+T_w_tarj = np.array([[15.98 - depth, -4.10 + 2.36, cam_above_ground]]).T
+
+img_ret = field_display.plot_bot_on_field(None, (0, 255, 0), T_w_tarj)
+#field_display.show_field(img_ret)
+
+# Create fake set of points relative to camera capture (target) frame
+# at +/- 1 meter in x, 5 meter depth, and every 1 meter in y from 0 to 4m (above the camera, so negative y values)
+pts_3d_T_t = np.array([[-1., 0., depth], [1., 0., depth], [-1., -1., depth], [
+    1., -1., depth
+], [-1., -2., depth], [0., -2., depth], [1., -2., depth], [-1., -3., depth],
+                       [1., -3., depth], [-1., -4., depth], [1., -4., depth]])
+
+# Ground truth shift of camera, to compute projections
+R_tarj_ci_gt = np.array([[0., 0.2, 0.2]]).T
+
+R_tarj_ci_gt_mat, jac = cv2.Rodrigues(R_tarj_ci_gt)
+
+T_tarj_ci_gt = np.array([[1., 2., -5.]]).T
+
+# To transform vectors, we apply the inverse transformation
+R_tarj_ci_gt_mat_inv = R_tarj_ci_gt_mat.T
+T_tarj_ci_gt_inv = -R_tarj_ci_gt_mat_inv.dot(T_tarj_ci_gt)
+
+#pts_3d_T_t_shifted = (R_tarj_ci_gt_mat_inv.dot(pts_3d_T_t.T) + T_tarj_ci_gt_inv).T
+
+# Now project from new position
+# This was the manual way to do it-- use cv2.projectPoints instead
+#pts_proj_3d = cam_mat.dot(pts_3d_T_t_shifted.T).T
+#pts_proj_2d = np.divide(pts_proj_3d[:,0:2],(pts_proj_3d[:,2].reshape(-1,1)))
+
+pts_proj_2d_ci, jac_2d = cv2.projectPoints(pts_3d_T_t, R_tarj_ci_gt_mat_inv,
+                                           T_tarj_ci_gt_inv, cam_mat,
+                                           distortion_coeffs)
+#print(pts_proj_2d_ci)
+
+# Now, solve for the pose using the original 3d points (pts_3d_T_t) and the projections from the new location
+retval, R_ci_tarj_est, T_ci_tarj_est, inliers = cv2.solvePnPRansac(
+    pts_3d_T_t, pts_proj_2d_ci, cam_mat, distortion_coeffs)
+
+# This is the pose from camera to original target spot.  We need to invert to get back to the pose we want
+R_tarj_ci_est_mat = cv2.Rodrigues(R_ci_tarj_est)[0].T
+T_tarj_ci_est = -R_tarj_ci_est_mat.dot(T_ci_tarj_est)
+R_tarj_ci_est = cv2.Rodrigues(R_tarj_ci_est_mat)[0]
+
+print("Check:\n", "Rot error:\n", R_tarj_ci_gt - R_tarj_ci_est,
+      "\nTrans error:\n", T_tarj_ci_gt - T_tarj_ci_est,
+      "\nError is < 0.001 in R & T: ",
+      np.linalg.norm(R_tarj_ci_gt - R_tarj_ci_est) < 0.001,
+      np.linalg.norm(T_tarj_ci_gt - T_tarj_ci_est) < 0.001)
+
+# Compute camera location in world coordinates
+R_w_ci, T_w_ci, d1, d2, d3, d4, d5, d6, d7, d8 = cv2.composeRT(
+    R_tarj_ci_est, T_tarj_ci_est, R_w_tarj, T_w_tarj)
+
+print("Estimate in world coordinates")
+print("R, T:\n", R_w_ci, "\n", T_w_ci)
+img_ret = field_display.plot_bot_on_field(img_ret, (0, 255, 255), T_w_ci)
+field_display.show_field(img_ret)
+
+# Compute vector to target
+# TODO: Likely better to do this from the homography, rather than the pose estimate...
+
+T_w_target_pt = np.array([[15.98, -4.10 + 2.36, 2.0 - cam_above_ground]]).T
+vector_to_target = T_w_target_pt - T_w_ci
+d_ci_target = np.linalg.norm(vector_to_target)
+phi_ci_target = math.atan2(vector_to_target[1][0], vector_to_target[0][0])
+print("Vector to target (from cam frame):\n", vector_to_target,
+      "\nDistance to target: ", d_ci_target, "\nAngle to target (deg): ",
+      phi_ci_target * 180. / math.pi)
+
+img_ret = field_display.plot_line_on_field(img_ret, (255, 255, 0), T_w_ci,
+                                           T_w_target_pt)
+field_display.show_field(img_ret)
diff --git a/y2020/vision/tools/python_code/usb_camera_stream.py b/y2020/vision/tools/python_code/usb_camera_stream.py
new file mode 100644
index 0000000..42cb1f1
--- /dev/null
+++ b/y2020/vision/tools/python_code/usb_camera_stream.py
@@ -0,0 +1,25 @@
+import cv2
+# Open the device at the ID X for /dev/videoX
+cap = cv2.VideoCapture(2)
+
+#Check whether user selected camera is opened successfully.
+if not (cap.isOpened()):
+    print("Could not open video device")
+    quit()
+
+while (True):
+    # Capture frame-by-frame
+    ret, frame = cap.read()
+
+    exp = cap.get(cv2.CAP_PROP_EXPOSURE)
+    print("Exposure:", exp)
+    # Display the resulting frame
+    cv2.imshow('preview', frame)
+
+    #Waits for a user input to quit the application
+    if cv2.waitKey(1) & 0xFF == ord('q'):
+        break
+
+# When everything done, release the capture
+cap.release()
+cv2.destroyAllWindows()
diff --git a/y2020/vision/v4l2_reader.cc b/y2020/vision/v4l2_reader.cc
index f43a2ac..f1944c1 100644
--- a/y2020/vision/v4l2_reader.cc
+++ b/y2020/vision/v4l2_reader.cc
@@ -56,63 +56,94 @@
   }
 }
 
-absl::Span<const char> V4L2Reader::ReadLatestImage() {
+bool V4L2Reader::ReadLatestImage() {
   // First, enqueue any old buffer we already have. This is the one which may
   // have been sent.
-  if (saved_buffer_ != -1) {
-    EnqueueBuffer(saved_buffer_);
-    saved_buffer_ = -1;
+  if (saved_buffer_) {
+    EnqueueBuffer(saved_buffer_.index);
+    saved_buffer_.Clear();
   }
   while (true) {
-    const int previous_buffer = saved_buffer_;
+    const BufferInfo previous_buffer = saved_buffer_;
     saved_buffer_ = DequeueBuffer();
-    if (saved_buffer_ != -1) {
+    if (saved_buffer_) {
       // We got a new buffer. Return the previous one (if relevant) and keep
       // going.
-      if (previous_buffer != -1) {
-        EnqueueBuffer(previous_buffer);
+      if (previous_buffer) {
+        EnqueueBuffer(previous_buffer.index);
       }
       continue;
     }
-    if (previous_buffer == -1) {
+    if (!previous_buffer) {
       // There were no images to read. Return an indication of that.
-      return absl::Span<const char>();
+      return false;
     }
     // We didn't get a new one, but we already got one in a previous
     // iteration, which means we found an image so return it.
     saved_buffer_ = previous_buffer;
-    return buffers_[saved_buffer_].DataSpan(ImageSize());
+    buffers_[saved_buffer_.index].PrepareMessage(rows_, cols_, ImageSize(),
+                                                 saved_buffer_.monotonic_eof);
+    return true;
   }
 }
 
-void V4L2Reader::SendLatestImage() {
-  buffers_[saved_buffer_].Send(rows_, cols_, ImageSize());
+void V4L2Reader::SendLatestImage() { buffers_[saved_buffer_.index].Send(); }
+
+void V4L2Reader::Buffer::InitializeMessage(size_t max_image_size) {
+  message_offset = flatbuffers::Offset<CameraImage>();
+  builder = aos::Sender<CameraImage>::Builder();
+  builder = sender.MakeBuilder();
+  // The kernel has an undocumented requirement that the buffer is aligned
+  // to 64 bytes. If you give it a nonaligned pointer, it will return EINVAL
+  // and only print something in dmesg with the relevant dynamic debug
+  // prints turned on.
+  builder.fbb()->StartIndeterminateVector(max_image_size, 1, 64, &data_pointer);
+  CHECK_EQ(reinterpret_cast<uintptr_t>(data_pointer) % 64, 0u)
+      << ": Flatbuffers failed to align things as requested";
+}
+
+void V4L2Reader::Buffer::PrepareMessage(
+    int rows, int cols, size_t image_size,
+    aos::monotonic_clock::time_point monotonic_eof) {
+  CHECK(data_pointer != nullptr);
+  data_pointer = nullptr;
+
+  const auto data_offset = builder.fbb()->EndIndeterminateVector(image_size, 1);
+  auto image_builder = builder.MakeBuilder<CameraImage>();
+  image_builder.add_data(data_offset);
+  image_builder.add_rows(rows);
+  image_builder.add_cols(cols);
+  image_builder.add_monotonic_timestamp_ns(
+      std::chrono::nanoseconds(monotonic_eof.time_since_epoch()).count());
+  message_offset = image_builder.Finish();
 }
 
 int V4L2Reader::Ioctl(unsigned long number, void *arg) {
   return ioctl(fd_.get(), number, arg);
 }
 
-int V4L2Reader::DequeueBuffer() {
+V4L2Reader::BufferInfo V4L2Reader::DequeueBuffer() {
   struct v4l2_buffer buffer;
   memset(&buffer, 0, sizeof(buffer));
   buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
   buffer.memory = V4L2_MEMORY_USERPTR;
   const int result = Ioctl(VIDIOC_DQBUF, &buffer);
   if (result == -1 && errno == EAGAIN) {
-    return -1;
+    return BufferInfo();
   }
   PCHECK(result == 0) << ": VIDIOC_DQBUF failed";
   CHECK_LT(buffer.index, buffers_.size());
-  LOG(INFO) << "dequeued " << buffer.index;
   CHECK_EQ(reinterpret_cast<uintptr_t>(buffers_[buffer.index].data_pointer),
            buffer.m.userptr);
   CHECK_EQ(ImageSize(), buffer.length);
-  return buffer.index;
+  CHECK(buffer.flags & V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC);
+  CHECK_EQ(buffer.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK,
+           static_cast<uint32_t>(V4L2_BUF_FLAG_TSTAMP_SRC_EOF));
+  return {static_cast<int>(buffer.index),
+          aos::time::from_timeval(buffer.timestamp)};
 }
 
 void V4L2Reader::EnqueueBuffer(int buffer_number) {
-  LOG(INFO) << "enqueueing " << buffer_number;
   CHECK_GE(buffer_number, 0);
   CHECK_LT(buffer_number, static_cast<int>(buffers_.size()));
   buffers_[buffer_number].InitializeMessage(ImageSize());
diff --git a/y2020/vision/v4l2_reader.h b/y2020/vision/v4l2_reader.h
index bdf4a8e..3c9d795 100644
--- a/y2020/vision/v4l2_reader.h
+++ b/y2020/vision/v4l2_reader.h
@@ -25,10 +25,10 @@
 
   // Reads the latest image.
   //
-  // Returns an empty span if no image was available since this object was
-  // created. The data referenced in the return value is valid until this method
-  // is called again.
-  absl::Span<const char> ReadLatestImage();
+  // Returns false if no image was available since the last image was read.
+  // Call LatestImage() to get a reference to the data, which will be valid
+  // until this method is called again.
+  bool ReadLatestImage();
 
   // Sends the latest image.
   //
@@ -37,52 +37,58 @@
   // ReadLatestImage() will no longer be valid.
   void SendLatestImage();
 
+  const CameraImage &LatestImage() {
+    Buffer *const buffer = &buffers_[saved_buffer_.index];
+    return *flatbuffers::GetTemporaryPointer(*buffer->builder.fbb(),
+                                             buffer->message_offset);
+  }
+
  private:
   static constexpr int kNumberBuffers = 16;
 
   struct Buffer {
-    void InitializeMessage(size_t max_image_size) {
-      builder = aos::Sender<CameraImage>::Builder();
-      builder = sender.MakeBuilder();
-      // The kernel has an undocumented requirement that the buffer is aligned
-      // to 64 bytes. If you give it a nonaligned pointer, it will return EINVAL
-      // and only print something in dmesg with the relevant dynamic debug
-      // prints turned on.
-      builder.fbb()->StartIndeterminateVector(max_image_size, 1, 64,
-                                              &data_pointer);
-      CHECK_EQ(reinterpret_cast<uintptr_t>(data_pointer) % 64, 0u)
-          << ": Flatbuffers failed to align things as requested";
-    }
+    void InitializeMessage(size_t max_image_size);
 
-    void Send(int rows, int cols, size_t image_size) {
-      const auto data_offset =
-          builder.fbb()->EndIndeterminateVector(image_size, 1);
-      auto image_builder = builder.MakeBuilder<CameraImage>();
-      image_builder.add_data(data_offset);
-      image_builder.add_rows(rows);
-      image_builder.add_cols(cols);
-      builder.Send(image_builder.Finish());
-      data_pointer = nullptr;
+    void PrepareMessage(int rows, int cols, size_t image_size,
+                        aos::monotonic_clock::time_point monotonic_eof);
+
+    void Send() {
+      builder.Send(message_offset);
+      message_offset = flatbuffers::Offset<CameraImage>();
     }
 
     absl::Span<const char> DataSpan(size_t image_size) {
-      return absl::Span<const char>(reinterpret_cast<char *>(data_pointer),
-                                    image_size);
+      return absl::Span<const char>(
+          reinterpret_cast<char *>(CHECK_NOTNULL(data_pointer)), image_size);
     }
 
     aos::Sender<CameraImage> sender;
     aos::Sender<CameraImage>::Builder builder;
+    flatbuffers::Offset<CameraImage> message_offset;
 
     uint8_t *data_pointer = nullptr;
   };
 
+  struct BufferInfo {
+    int index = -1;
+    aos::monotonic_clock::time_point monotonic_eof =
+        aos::monotonic_clock::min_time;
+
+    explicit operator bool() const { return index != -1; }
+
+    void Clear() {
+      index = -1;
+      monotonic_eof = aos::monotonic_clock::min_time;
+    }
+  };
+
   // TODO(Brian): This concept won't exist once we start using variable-size
   // H.264 frames.
   size_t ImageSize() const { return rows_ * cols_ * 2 /* bytes per pixel */; }
 
   // Attempts to dequeue a buffer (nonblocking). Returns the index of the new
-  // buffer, or -1 if there wasn't a frame to dequeue.
-  int DequeueBuffer();
+  // buffer, or BufferInfo() if there wasn't a frame to dequeue.
+  BufferInfo DequeueBuffer();
 
   void EnqueueBuffer(int buffer);
 
@@ -95,7 +101,7 @@
 
   // If this is non-negative, it's the buffer number we're currently holding
   // onto.
-  int saved_buffer_ = -1;
+  BufferInfo saved_buffer_;
 
   const int rows_ = 480;
   const int cols_ = 640;
diff --git a/y2020/vision/vision.fbs b/y2020/vision/vision.fbs
index 66f695b..17dc4a4 100644
--- a/y2020/vision/vision.fbs
+++ b/y2020/vision/vision.fbs
@@ -14,10 +14,9 @@
   // The number of columns in the image.
   cols:int;
   // The image data.
-  data:[byte];
-  // Timestamp when the frame was captured.
+  data:[ubyte];
+  // Timestamp when the frame was captured. This is the end-of-frame timestamp.
   monotonic_timestamp_ns:long;
-  realtime_timestamp_ns:long;
 }
 
 root_type CameraImage;
diff --git a/y2020/web_proxy.sh b/y2020/web_proxy.sh
new file mode 100755
index 0000000..8e1a570
--- /dev/null
+++ b/y2020/web_proxy.sh
@@ -0,0 +1 @@
+./aos/network/web_proxy_main --config=y2020/config.json --data_dir=y2020/www
diff --git a/y2020/wpilib_interface.cc b/y2020/wpilib_interface.cc
index 4989390..3276722 100644
--- a/y2020/wpilib_interface.cc
+++ b/y2020/wpilib_interface.cc
@@ -228,6 +228,9 @@
     sensor_reader.set_drivetrain_left_encoder(make_encoder(0));
     sensor_reader.set_drivetrain_right_encoder(make_encoder(1));
 
+    // Note: If ADIS16470 is plugged in directly to the roboRIO SPI port without
+    // the Spartan Board, then trigger is on 26, reset 27, and chip select is
+    // CS0.
     auto imu_trigger = make_unique<frc::DigitalInput>(0);
     auto imu_reset = make_unique<frc::DigitalOutput>(1);
     auto spi = make_unique<frc::SPI>(frc::SPI::Port::kOnboardCS2);
diff --git a/y2020/www/BUILD b/y2020/www/BUILD
new file mode 100644
index 0000000..146456f
--- /dev/null
+++ b/y2020/www/BUILD
@@ -0,0 +1,45 @@
+load("@build_bazel_rules_typescript//:defs.bzl", "ts_library")
+load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle")
+
+ts_library(
+    name = "main",
+    srcs = [
+        "main.ts",
+        "image_handler.ts",
+    ],
+    deps = [
+        "//aos/network/www:proxy",
+        "//y2020/vision:vision_ts_fbs",
+    ],
+    visibility = ["//y2020:__subpackages__"],
+)
+
+rollup_bundle(
+    name = "main_bundle",
+    entry_point = "y2020/www/main",
+    deps = [
+        "main",
+    ],
+    visibility = ["//y2020:__subpackages__"],
+)
+
+filegroup(
+    name = "files",
+    srcs = glob([
+        "**/*.html",
+        "**/*.css",
+    ]),
+    visibility=["//visibility:public"],
+)
+
+genrule(
+    name = "flatbuffers",
+    srcs = [
+        "@com_github_google_flatbuffers//:flatjs",
+    ],
+    outs = [
+        "flatbuffers.js",
+    ],
+    cmd = "cp $(location @com_github_google_flatbuffers//:flatjs) $@",
+    visibility=["//y2020:__subpackages__"],
+)
diff --git a/y2020/www/image_handler.ts b/y2020/www/image_handler.ts
new file mode 100644
index 0000000..abaf831
--- /dev/null
+++ b/y2020/www/image_handler.ts
@@ -0,0 +1,61 @@
+import {frc971} from 'y2020/vision/vision_generated';
+
+export class ImageHandler {
+  private canvas = document.createElement('canvas');
+
+  constructor() {
+    document.body.appendChild(this.canvas);
+  }
+
+  handleImage(data: Uint8Array) {
+    const fbBuffer = new flatbuffers.ByteBuffer(data);
+    const image = frc971.vision.CameraImage.getRootAsCameraImage(fbBuffer);
+
+    const width = image.cols();
+    const height = image.rows();
+    if (width === 0 || height === 0) {
+      return;
+    }
+    const imageBuffer = new Uint8ClampedArray(width * height * 4); // RGBA
+
+    // Read four bytes (YUYV) from the data and transform into two pixels of
+    // RGBA for canvas
+    for (const j = 0; j < height; j++) {
+      for (const i = 0; i < width; i += 2) {
+        const y1 = image.data((j * width + i) * 2);
+        const u = image.data((j * width + i) * 2 + 1);
+        const y2 = image.data((j * width + i + 1) * 2);
+        const v = image.data((j * width + i + 1) * 2 + 1);
+
+        // Based on https://en.wikipedia.org/wiki/YUV#Converting_between_Y%E2%80%B2UV_and_RGB
+        const c1 = y1 - 16;
+        const c2 = y2 - 16;
+        const d = u - 128;
+        const e = v - 128;
+
+        imageBuffer[(j * width + i) * 4 + 0] = (298 * c1 + 409 * e + 128) >> 8;
+        imageBuffer[(j * width + i) * 4 + 1] =
+            (298 * c1 - 100 * d - 208 * e + 128) >> 8;
+        imageBuffer[(j * width + i) * 4 + 2] = (298 * c1 + 516 * d + 128) >> 8;
+        imageBuffer[(j * width + i) * 4 + 3] = 255;
+        imageBuffer[(j * width + i) * 4 + 4] = (298 * c2 + 409 * e + 128) >> 8;
+        imageBuffer[(j * width + i) * 4 + 5] =
+            (298 * c2 - 100 * d - 208 * e + 128) >> 8;
+        imageBuffer[(j * width + i) * 4 + 6] = (298 * c2 + 516 * d + 128) >> 8;
+        imageBuffer[(j * width + i) * 4 + 7] = 255;
+      }
+    }
+
+    const ctx = this.canvas.getContext('2d');
+
+    this.canvas.width = width;
+    this.canvas.height = height;
+    const idata = ctx.createImageData(width, height);
+    idata.data.set(imageBuffer);
+    ctx.putImageData(idata, 0, 0);
+  }
+
+  getId() {
+    return frc971.vision.CameraImage.getFullyQualifiedName();
+  }
+}
diff --git a/y2020/www/index.html b/y2020/www/index.html
new file mode 100644
index 0000000..97e16d4
--- /dev/null
+++ b/y2020/www/index.html
@@ -0,0 +1,11 @@
+<html>
+  <head>
+    <script src="flatbuffers.js"></script>
+    <script src="main_bundle.min.js" defer></script>
+    <link rel="stylesheet" href="styles.css">
+  </head>
+  <body>
+    <div id="config">
+    </div>
+  </body>
+</html>
diff --git a/y2020/www/main.ts b/y2020/www/main.ts
new file mode 100644
index 0000000..7831713
--- /dev/null
+++ b/y2020/www/main.ts
@@ -0,0 +1,11 @@
+import {Connection} from 'aos/network/www/proxy';
+
+import {ImageHandler} from './image_handler';
+
+const conn = new Connection();
+
+conn.connect();
+
+const iHandler = new ImageHandler();
+
+conn.addHandler(iHandler.getId(), (data) => iHandler.handleImage(data));
diff --git a/y2020/www/styles.css b/y2020/www/styles.css
new file mode 100644
index 0000000..23ceb21
--- /dev/null
+++ b/y2020/www/styles.css
@@ -0,0 +1,5 @@
+.channel {
+  display: flex;
+  border-bottom: 1px solid;
+  font-size: 24px;
+}
diff --git a/y2020/y2020.json b/y2020/y2020.json
index 238d73f..d0c74e4 100644
--- a/y2020/y2020.json
+++ b/y2020/y2020.json
@@ -31,6 +31,12 @@
       "frequency": 25,
       "max_size": 620000,
       "num_senders": 18
+    },
+    {
+      "name": "/camera",
+      "type": "frc971.vision.sift.ImageMatchResult",
+      "frequency": 25,
+      "max_size": 300000
     }
   ],
   "applications": [