Merge "Remove unnecessary #include"
diff --git a/WORKSPACE b/WORKSPACE
index cbc390c..fb736ee 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -277,11 +277,14 @@
     url = "http://www.frc971.org/Build-Dependencies/mingw_compiler.tar.gz",
 )
 
+# Note that we should generally keep the matplotlib repo in a folder not
+# named matplotlib, because otherwise the repository itself tends to end up
+# on the PYTHONPATH, rather than the matplotlib folder within this repo.
 http_archive(
-    name = "matplotlib",
+    name = "matplotlib_repo",
     build_file = "@//debian:matplotlib.BUILD",
-    sha256 = "fa1ff9f3bb7fddba6d0b904af5ffdca97c6233a950840273c25145b8cad80483",
-    url = "http://www.frc971.org/Build-Dependencies/matplotlib-3.tar.gz",
+    sha256 = "24f8b75754e465299ddf92bd895ab111d54945a45b0f410d7cfa16b15b162e2f",
+    url = "http://www.frc971.org/Build-Dependencies/matplotlib-4.tar.gz",
 )
 
 http_archive(
@@ -549,3 +552,10 @@
     name = "com_github_google_flatbuffers",
     path = "third_party/flatbuffers",
 )
+
+http_file(
+    name = "sample_logfile",
+    downloaded_file_path = "log.fbs",
+    sha256 = "91c98edee0c90a19992792c711dde4a6743af2d6d7e45b5079ec228fdf51ff11",
+    urls = ["http://www.frc971.org/Build-Dependencies/small_sample_logfile.fbs"],
+)
diff --git a/aos/BUILD b/aos/BUILD
index aef8a59..e964066 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -1,5 +1,5 @@
 load("//tools:environments.bzl", "mcu_cpus")
-load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_python_library")
 
 filegroup(
     name = "prime_binaries",
@@ -295,6 +295,19 @@
     visibility = ["//visibility:public"],
 )
 
+flatbuffer_python_library(
+    name = "configuration_fbs_python",
+    srcs = ["configuration.fbs"],
+    namespace = "aos",
+    tables = [
+        "Configuration",
+        "Channel",
+        "Map",
+        "Node",
+    ],
+    visibility = ["//visibility:public"],
+)
+
 cc_library(
     name = "configuration",
     srcs = [
@@ -313,7 +326,6 @@
         "//aos/network:team_number",
         "//aos/util:file",
         "@com_github_google_glog//:glog",
-        "@com_google_absl//absl/base",
         "@com_google_absl//absl/container:btree",
         "@com_google_absl//absl/strings",
     ],
diff --git a/aos/aos_dump.cc b/aos/aos_dump.cc
index e7d9d7c..8d6c2b1 100644
--- a/aos/aos_dump.cc
+++ b/aos/aos_dump.cc
@@ -8,6 +8,7 @@
 #include "gflags/gflags.h"
 
 DEFINE_string(config, "./config.json", "File path of aos configuration");
+
 int main(int argc, char **argv) {
   aos::InitGoogle(&argc, &argv);
 
@@ -43,13 +44,28 @@
     if (channel->name()->c_str() == channel_name &&
         channel->type()->str().find(message_type) != std::string::npos) {
       event_loop.MakeRawWatcher(
-          channel,
-          [channel](const aos::Context /* &context*/, const void *message) {
-            LOG(INFO) << '(' << channel->type()->c_str() << ") "
-                      << aos::FlatbufferToJson(
-                             channel->schema(),
-                             static_cast<const uint8_t *>(message))
-                      << '\n';
+          channel, [channel](const aos::Context &context, const void *message) {
+            // Print the flatbuffer out to stdout, both to remove the
+            // unnecessary cruft from glog and to allow the user to readily
+            // redirect just the logged output independent of any debugging
+            // information on stderr.
+            if (context.monotonic_remote_time != context.monotonic_event_time) {
+              std::cout << context.realtime_remote_time << " ("
+                        << context.monotonic_remote_time << ") delivered "
+                        << context.realtime_event_time << " ("
+                        << context.monotonic_event_time << "): "
+                        << aos::FlatbufferToJson(
+                               channel->schema(),
+                               static_cast<const uint8_t *>(message))
+                        << '\n';
+            } else {
+              std::cout << context.realtime_event_time << " ("
+                        << context.monotonic_event_time << "): "
+                        << aos::FlatbufferToJson(
+                               channel->schema(),
+                               static_cast<const uint8_t *>(message))
+                        << '\n';
+            }
           });
       found_channels++;
     }
diff --git a/aos/configuration.cc b/aos/configuration.cc
index 4a98297..46f07ac 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -9,7 +9,6 @@
 #include <unistd.h>
 #include <string_view>
 
-#include "absl/base/call_once.h"
 #include "absl/container/btree_set.h"
 #include "aos/configuration_generated.h"
 #include "aos/flatbuffer_merge.h"
@@ -78,44 +77,6 @@
 namespace configuration {
 namespace {
 
-void DoGetRootDirectory(char** retu) {
-  ssize_t size = 0;
-  *retu = NULL;
-  while (true) {
-    size += 256;
-    if (*retu != nullptr) delete *retu;
-    *retu = new char[size];
-
-    ssize_t ret = readlink("/proc/self/exe", *retu, size);
-    if (ret < 0) {
-      if (ret != -1) {
-        LOG(WARNING) << "it returned " << ret << ", not -1";
-      }
-
-      PLOG(FATAL) << "readlink(\"/proc/self/exe\", " << *retu << ", " << size
-                  << ") failed";
-    }
-    if (ret < size) {
-      void *last_slash = memrchr(*retu, '/', ret);
-      if (last_slash == NULL) {
-        *retu[ret] = '\0';
-        LOG(FATAL) << "couldn't find a '/' in \"" << *retu << "\"";
-      }
-      *static_cast<char *>(last_slash) = '\0';
-      LOG(INFO) << "got a root dir of \"" << *retu << "\"";
-      return;
-    }
-  }
-}
-
-void DoGetLoggingDirectory(char** retu) {
-  static const char kSuffix[] = "/../../tmp/robot_logs";
-  const char *root = GetRootDirectory();
-  *retu = new char[strlen(root) + sizeof(kSuffix)];
-  strcpy(*retu, root);
-  strcat(*retu, kSuffix);
-}
-
 // Extracts the folder part of a path.  Returns ./ if there is no path.
 std::string_view ExtractFolder(
     const std::string_view filename) {
@@ -409,12 +370,34 @@
           << " has an unknown \"source_node\"";
 
       if (c->has_destination_nodes()) {
-        for (const flatbuffers::String *n : *c->destination_nodes()) {
-          CHECK(GetNode(&result.message(), n->string_view()) != nullptr)
+        for (const Connection *connection : *c->destination_nodes()) {
+          CHECK(connection->has_name());
+          CHECK(GetNode(&result.message(), connection->name()->string_view()) !=
+                nullptr)
               << ": Channel " << FlatbufferToJson(c)
-              << " has an unknown \"destination_nodes\" " << n->string_view();
+              << " has an unknown \"destination_nodes\" "
+              << connection->name()->string_view();
 
-          CHECK_NE(n->string_view(), c->source_node()->string_view())
+          switch (connection->timestamp_logger()) {
+            case LoggerConfig::LOCAL_LOGGER:
+            case LoggerConfig::NOT_LOGGED:
+              CHECK(!connection->has_timestamp_logger_node());
+              break;
+            case LoggerConfig::REMOTE_LOGGER:
+            case LoggerConfig::LOCAL_AND_REMOTE_LOGGER:
+              CHECK(connection->has_timestamp_logger_node());
+              CHECK(
+                  GetNode(&result.message(),
+                          connection->timestamp_logger_node()->string_view()) !=
+                  nullptr)
+                  << ": Channel " << FlatbufferToJson(c)
+                  << " has an unknown \"timestamp_logger_node\""
+                  << connection->name()->string_view();
+              break;
+          }
+
+          CHECK_NE(connection->name()->string_view(),
+                   c->source_node()->string_view())
               << ": Channel " << FlatbufferToJson(c)
               << " is forwarding data to itself";
         }
@@ -425,20 +408,6 @@
   return result;
 }
 
-const char *GetRootDirectory() {
-  static char *root_dir;  // return value
-  static absl::once_flag once_;
-  absl::call_once(once_, DoGetRootDirectory, &root_dir);
-  return root_dir;
-}
-
-const char *GetLoggingDirectory() {
-  static char *retu;  // return value
-  static absl::once_flag once_;
-  absl::call_once(once_, DoGetLoggingDirectory, &retu);
-  return retu;
-}
-
 FlatbufferDetachedBuffer<Configuration> ReadConfig(
     const std::string_view path) {
   // We only want to read a file once.  So track the visited files in a set.
@@ -634,7 +603,10 @@
 }
 
 const Node *GetNode(const Configuration *config, std::string_view name) {
+  CHECK(config->has_nodes())
+      << ": Asking for a node from a single node configuration.";
   for (const Node *node : *config->nodes()) {
+    CHECK(node->has_name()) << ": Malformed node " << FlatbufferToJson(node);
     if (node->name()->string_view() == name) {
       return node;
     }
@@ -662,8 +634,9 @@
     return false;
   }
 
-  for (const flatbuffers::String *s : *channel->destination_nodes()) {
-    if (s->string_view() == node->name()->string_view()) {
+  for (const Connection *connection : *channel->destination_nodes()) {
+    CHECK(connection->has_name());
+    if (connection->name()->string_view() == node->name()->string_view()) {
       return true;
     }
   }
@@ -671,5 +644,91 @@
   return false;
 }
 
+bool ChannelMessageIsLoggedOnNode(const Channel *channel, const Node *node) {
+  switch(channel->logger()) {
+    case LoggerConfig::LOCAL_LOGGER:
+      if (node == nullptr) {
+        // Single node world.  If there is a local logger, then we want to use
+        // it.
+        return true;
+      }
+      return channel->source_node()->string_view() ==
+             node->name()->string_view();
+    case LoggerConfig::REMOTE_LOGGER:
+      CHECK(channel->has_logger_node());
+
+      return channel->logger_node()->string_view() ==
+             CHECK_NOTNULL(node)->name()->string_view();
+    case LoggerConfig::LOCAL_AND_REMOTE_LOGGER:
+      CHECK(channel->has_logger_node());
+
+      if (channel->source_node()->string_view() ==
+          CHECK_NOTNULL(node)->name()->string_view()) {
+        return true;
+      }
+      if (channel->logger_node()->string_view() == node->name()->string_view()) {
+        return true;
+      }
+
+      return false;
+    case LoggerConfig::NOT_LOGGED:
+      return false;
+  }
+
+  LOG(FATAL) << "Unknown logger config " << static_cast<int>(channel->logger());
+}
+
+const Connection *ConnectionToNode(const Channel *channel, const Node *node) {
+  if (!channel->has_destination_nodes()) {
+    return nullptr;
+  }
+  for (const Connection *connection : *channel->destination_nodes()) {
+    if (connection->name()->string_view() == node->name()->string_view()) {
+      return connection;
+    }
+  }
+  return nullptr;
+}
+
+bool ConnectionDeliveryTimeIsLoggedOnNode(const Channel *channel,
+                                          const Node *node,
+                                          const Node *logger_node) {
+  const Connection *connection = ConnectionToNode(channel, node);
+  if (connection == nullptr) {
+    return false;
+  }
+  return ConnectionDeliveryTimeIsLoggedOnNode(connection, logger_node);
+}
+
+bool ConnectionDeliveryTimeIsLoggedOnNode(const Connection *connection,
+                                          const Node *node) {
+  switch (connection->timestamp_logger()) {
+    case LoggerConfig::LOCAL_AND_REMOTE_LOGGER:
+      CHECK(connection->has_timestamp_logger_node());
+      if (connection->name()->string_view() == node->name()->string_view()) {
+        return true;
+      }
+
+      if (connection->timestamp_logger_node()->string_view() ==
+          node->name()->string_view()) {
+        return true;
+      }
+
+      return false;
+    case LoggerConfig::LOCAL_LOGGER:
+      return connection->name()->string_view() == node->name()->string_view();
+    case LoggerConfig::REMOTE_LOGGER:
+      CHECK(connection->has_timestamp_logger_node());
+
+      return connection->timestamp_logger_node()->string_view() ==
+             node->name()->string_view();
+    case LoggerConfig::NOT_LOGGED:
+      return false;
+  }
+
+  LOG(FATAL) << "Unknown logger config "
+             << static_cast<int>(connection->timestamp_logger());
+}
+
 }  // namespace configuration
 }  // namespace aos
diff --git a/aos/configuration.fbs b/aos/configuration.fbs
index cc1f950..317c100 100644
--- a/aos/configuration.fbs
+++ b/aos/configuration.fbs
@@ -2,6 +2,46 @@
 
 namespace aos;
 
+enum LoggerConfig : ubyte {
+  // This data should be logged on this node.
+  LOCAL_LOGGER,
+  // This data should be logged on a remote node.
+  REMOTE_LOGGER,
+  // This data should not be logged.
+  NOT_LOGGED,
+  // This data should be logged both on this node and on the remote node.
+  // This is useful where you want to log a message on both the sender and
+  // receiver to create self-contained log files.
+  LOCAL_AND_REMOTE_LOGGER
+}
+
+table Connection {
+  // Node name to forward to.
+  name:string;
+
+  // How the delivery timestamps for this connection should be logged.  Do we
+  // log them with the local logger (i.e. the logger running on the node that
+  // this message is delivered to)?  Do we log them on another node (a central
+  // logging node)?  Do we log them in both places redundantly?
+  timestamp_logger:LoggerConfig = LOCAL_LOGGER;
+
+  // If the corresponding delivery timestamps for this channel are logged
+  // remotely, which node should be responsible for logging the data.  Note:
+  // for now, this can only be the source node.  Empty implies the node this
+  // connection is connecting to (i.e. name).
+  timestamp_logger_node:string;
+
+  // Priority to forward data with.
+  priority:ushort = 100;
+
+  // Time to live in nanoseconds before the message is dropped.
+  // A value of 0 means no timeout, i.e. reliable.  When a client connects, the
+  // latest message from this channel will be sent regardless.
+  // TODO(austin): We can retry more than just the last message on reconnect
+  //   if we want.  This is an unlikely scenario though.
+  time_to_live:uint = 0;
+}
+
 // Table representing a channel.  Channels are where data is published and
 // subscribed from.  The tuple of name, type is the identifying information.
 table Channel {
@@ -11,8 +51,8 @@
   type:string;
   // Max frequency in messages/sec of the data published on this channel.
   frequency:int = 100;
-  // Max size of the data being published.  (This will be automatically
-  // computed in the future.)
+  // Max size of the data being published.  (This will hopefully be
+  // automatically computed in the future.)
   max_size:int = 1000;
 
   // Sets the maximum number of senders on a channel.
@@ -27,9 +67,17 @@
   // If nodes is populated below, this needs to also be populated.
   source_node:string;
 
-  // The destination node names for data sent on this channel.
+  // The destination nodes for data sent on this channel.
   // This only needs to be populated if this message is getting forwarded.
-  destination_nodes:[string];
+  destination_nodes:[Connection];
+
+  // What service is responsible for logging this channel:
+  logger:LoggerConfig = LOCAL_LOGGER;
+  // If the channel is logged remotely, which node should be responsible for
+  // logging the data.  Note: this requires that the data is forwarded to the
+  // node responsible for logging it.  Empty implies the node this connection
+  // is connecting to (i.e. name).
+  logger_node:string;
 }
 
 // Table to support renaming channel names.
@@ -99,7 +147,7 @@
 
   // Length of the channels in nanoseconds.  Every channel will have enough
   // data allocated so that if data is published at the configured frequency,
-  // this many seconds of messages will be available for fetchers.
+  // at least this many nanoseconds of messages will be available for fetchers.
   channel_storage_duration:long = 2000000000 (id: 5);
 }
 
diff --git a/aos/configuration.h b/aos/configuration.h
index f2cf499..7bf8203 100644
--- a/aos/configuration.h
+++ b/aos/configuration.h
@@ -65,21 +65,23 @@
 // provided node.
 bool ChannelIsReadableOnNode(const Channel *channel, const Node *node);
 
+// Returns true if the message is supposed to be logged on this node.
+bool ChannelMessageIsLoggedOnNode(const Channel *channel, const Node *node);
+
+const Connection *ConnectionToNode(const Channel *channel, const Node *node);
+// Returns true if the delivery timestamps are supposed to be logged on this
+// node.
+bool ConnectionDeliveryTimeIsLoggedOnNode(const Channel *channel,
+                                          const Node *node,
+                                          const Node *logger_node);
+bool ConnectionDeliveryTimeIsLoggedOnNode(const Connection *connection,
+                                          const Node *node);
+
 // Prints a channel to json, but without the schema.
 std::string CleanedChannelToString(const Channel *channel);
 
 // TODO(austin): GetSchema<T>(const Flatbuffer<Configuration> &config);
 
-// Returns the "root directory" for this run. Under linux, this is the
-// directory where the executable is located (from /proc/self/exe)
-// The return value will always be to a static string, so no freeing is
-// necessary.
-const char *GetRootDirectory();
-// Returns the directory where logs get written. Relative to GetRootDirectory().
-// The return value will always be to a static string, so no freeing is
-// necessary.
-const char *GetLoggingDirectory();
-
 }  // namespace configuration
 
 // Compare and equality operators for Channel.  Note: these only check the name
diff --git a/aos/configuration_test.cc b/aos/configuration_test.cc
index ff0b570..fab6745 100644
--- a/aos/configuration_test.cc
+++ b/aos/configuration_test.cc
@@ -200,7 +200,7 @@
 // Tests that our node writeable helpers work as intended.
 TEST_F(ConfigurationTest, ChannelIsSendableOnNode) {
   FlatbufferDetachedBuffer<Channel> good_channel(JsonToFlatbuffer(
-R"channel({
+      R"channel({
   "name": "/test",
   "type": "aos.examples.Ping",
   "source_node": "foo"
@@ -208,7 +208,7 @@
       Channel::MiniReflectTypeTable()));
 
   FlatbufferDetachedBuffer<Channel> bad_channel(JsonToFlatbuffer(
-R"channel({
+      R"channel({
   "name": "/test",
   "type": "aos.examples.Ping",
   "source_node": "bar"
@@ -216,7 +216,7 @@
       Channel::MiniReflectTypeTable()));
 
   FlatbufferDetachedBuffer<Node> node(JsonToFlatbuffer(
-R"node({
+      R"node({
   "name": "foo"
 })node",
       Node::MiniReflectTypeTable()));
@@ -230,19 +230,23 @@
 // Tests that our node readable and writeable helpers work as intended.
 TEST_F(ConfigurationTest, ChannelIsReadableOnNode) {
   FlatbufferDetachedBuffer<Channel> good_channel(JsonToFlatbuffer(
-R"channel({
+      R"channel({
   "name": "/test",
   "type": "aos.examples.Ping",
   "source_node": "bar",
   "destination_nodes": [
-    "baz",
-    "foo",
+    {
+      "name": "baz"
+    },
+    {
+      "name": "foo"
+    }
   ]
 })channel",
       Channel::MiniReflectTypeTable()));
 
   FlatbufferDetachedBuffer<Channel> bad_channel1(JsonToFlatbuffer(
-R"channel({
+      R"channel({
   "name": "/test",
   "type": "aos.examples.Ping",
   "source_node": "bar"
@@ -250,18 +254,20 @@
       Channel::MiniReflectTypeTable()));
 
   FlatbufferDetachedBuffer<Channel> bad_channel2(JsonToFlatbuffer(
-R"channel({
+      R"channel({
   "name": "/test",
   "type": "aos.examples.Ping",
   "source_node": "bar",
   "destination_nodes": [
-    "baz"
+    {
+      "name": "baz"
+    }
   ]
 })channel",
       Channel::MiniReflectTypeTable()));
 
   FlatbufferDetachedBuffer<Node> node(JsonToFlatbuffer(
-R"node({
+      R"node({
   "name": "foo"
 })node",
       Node::MiniReflectTypeTable()));
@@ -274,7 +280,293 @@
       ChannelIsReadableOnNode(&bad_channel2.message(), &node.message()));
 }
 
+// Tests that our node message is logged helpers work as intended.
+TEST_F(ConfigurationTest, ChannelMessageIsLoggedOnNode) {
+  FlatbufferDetachedBuffer<Channel> logged_on_self_channel(JsonToFlatbuffer(
+      R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "destination_nodes": [
+    {
+      "name": "baz"
+    }
+  ]
+})channel",
+      Channel::MiniReflectTypeTable()));
 
+  FlatbufferDetachedBuffer<Channel> not_logged_channel(JsonToFlatbuffer(
+      R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "NOT_LOGGED",
+  "destination_nodes": [
+    {
+      "name": "baz",
+      "timestamp_logger": "LOCAL_LOGGER"
+    }
+  ]
+})channel",
+      Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> logged_on_remote_channel(JsonToFlatbuffer(
+      R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "REMOTE_LOGGER",
+  "logger_node": "baz",
+  "destination_nodes": [
+    {
+      "name": "baz"
+    }
+  ]
+})channel",
+      Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> logged_on_separate_logger_node_channel(
+      JsonToFlatbuffer(
+          R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "REMOTE_LOGGER",
+  "logger_node": "foo",
+  "destination_nodes": [
+    {
+      "name": "baz"
+    }
+  ]
+})channel",
+          Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> logged_on_both_channel (
+      JsonToFlatbuffer(
+          R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "LOCAL_AND_REMOTE_LOGGER",
+  "logger_node": "baz",
+  "destination_nodes": [
+    {
+      "name": "baz"
+    }
+  ]
+})channel",
+          Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Node> foo_node(JsonToFlatbuffer(
+      R"node({
+  "name": "foo"
+})node",
+      Node::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Node> bar_node(JsonToFlatbuffer(
+      R"node({
+  "name": "bar"
+})node",
+      Node::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Node> baz_node(JsonToFlatbuffer(
+      R"node({
+  "name": "baz"
+})node",
+      Node::MiniReflectTypeTable()));
+
+  // Local logger.
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&logged_on_self_channel.message(),
+                                            &foo_node.message()));
+  EXPECT_TRUE(ChannelMessageIsLoggedOnNode(&logged_on_self_channel.message(),
+                                           &bar_node.message()));
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&logged_on_self_channel.message(),
+                                            &baz_node.message()));
+
+  // No logger.
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&not_logged_channel.message(),
+                                            &foo_node.message()));
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&not_logged_channel.message(),
+                                           &bar_node.message()));
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&not_logged_channel.message(),
+                                            &baz_node.message()));
+
+  // Remote logger.
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&logged_on_remote_channel.message(),
+                                            &foo_node.message()));
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&logged_on_remote_channel.message(),
+                                            &bar_node.message()));
+  EXPECT_TRUE(ChannelMessageIsLoggedOnNode(&logged_on_remote_channel.message(),
+                                           &baz_node.message()));
+
+  // Separate logger.
+  EXPECT_TRUE(ChannelMessageIsLoggedOnNode(
+      &logged_on_separate_logger_node_channel.message(), &foo_node.message()));
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(
+      &logged_on_separate_logger_node_channel.message(), &bar_node.message()));
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(
+      &logged_on_separate_logger_node_channel.message(), &baz_node.message()));
+
+  // Logged in multiple places.
+  EXPECT_FALSE(ChannelMessageIsLoggedOnNode(&logged_on_both_channel.message(),
+                                            &foo_node.message()));
+  EXPECT_TRUE(ChannelMessageIsLoggedOnNode(&logged_on_both_channel.message(),
+                                           &bar_node.message()));
+  EXPECT_TRUE(ChannelMessageIsLoggedOnNode(&logged_on_both_channel.message(),
+                                           &baz_node.message()));
+}
+
+// Tests that our forwarding timestamps are logged helpers work as intended.
+TEST_F(ConfigurationTest, ConnectionDeliveryTimeIsLoggedOnNode) {
+  FlatbufferDetachedBuffer<Channel> logged_on_self_channel(JsonToFlatbuffer(
+      R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "REMOTE_LOGGER",
+  "logger_node": "baz",
+  "destination_nodes": [
+    {
+      "name": "baz"
+    }
+  ]
+})channel",
+      Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> not_logged_channel(JsonToFlatbuffer(
+      R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "NOT_LOGGED",
+  "destination_nodes": [
+    {
+      "name": "baz",
+      "timestamp_logger": "NOT_LOGGED"
+    }
+  ]
+})channel",
+      Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> logged_on_remote_channel(JsonToFlatbuffer(
+      R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "destination_nodes": [
+    {
+      "name": "baz",
+      "timestamp_logger": "REMOTE_LOGGER",
+      "timestamp_logger_node": "bar"
+    }
+  ]
+})channel",
+      Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> logged_on_separate_logger_node_channel(
+      JsonToFlatbuffer(
+          R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "logger": "REMOTE_LOGGER",
+  "logger_node": "foo",
+  "destination_nodes": [
+    {
+      "name": "baz",
+      "timestamp_logger": "REMOTE_LOGGER",
+      "timestamp_logger_node": "foo"
+    }
+  ]
+})channel",
+          Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Channel> logged_on_both_channel (
+      JsonToFlatbuffer(
+          R"channel({
+  "name": "/test",
+  "type": "aos.examples.Ping",
+  "source_node": "bar",
+  "destination_nodes": [
+    {
+      "name": "baz",
+      "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+      "timestamp_logger_node": "bar"
+    }
+  ]
+})channel",
+          Channel::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Node> foo_node(JsonToFlatbuffer(
+      R"node({
+  "name": "foo"
+})node",
+      Node::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Node> bar_node(JsonToFlatbuffer(
+      R"node({
+  "name": "bar"
+})node",
+      Node::MiniReflectTypeTable()));
+
+  FlatbufferDetachedBuffer<Node> baz_node(JsonToFlatbuffer(
+      R"node({
+  "name": "baz"
+})node",
+      Node::MiniReflectTypeTable()));
+
+  // Local logger.
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_self_channel.message(), &baz_node.message(),
+      &foo_node.message()));
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_self_channel.message(), &baz_node.message(),
+      &bar_node.message()));
+  EXPECT_TRUE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_self_channel.message(), &baz_node.message(),
+      &baz_node.message()));
+
+  // No logger means.
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &not_logged_channel.message(), &baz_node.message(), &foo_node.message()));
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &not_logged_channel.message(), &baz_node.message(), &bar_node.message()));
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &not_logged_channel.message(), &baz_node.message(), &baz_node.message()));
+
+  // Remote logger.
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_remote_channel.message(), &baz_node.message(),
+      &foo_node.message()));
+  EXPECT_TRUE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_remote_channel.message(), &baz_node.message(),
+      &bar_node.message()));
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_remote_channel.message(), &baz_node.message(),
+      &baz_node.message()));
+
+  // Separate logger.
+  EXPECT_TRUE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_separate_logger_node_channel.message(), &baz_node.message(),
+      &foo_node.message()));
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_separate_logger_node_channel.message(), &baz_node.message(),
+      &bar_node.message()));
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_separate_logger_node_channel.message(), &baz_node.message(),
+      &baz_node.message()));
+
+  // Logged on both the node and a remote node.
+  EXPECT_FALSE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_both_channel.message(), &baz_node.message(),
+      &foo_node.message()));
+  EXPECT_TRUE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_both_channel.message(), &baz_node.message(),
+      &bar_node.message()));
+  EXPECT_TRUE(ConnectionDeliveryTimeIsLoggedOnNode(
+      &logged_on_both_channel.message(), &baz_node.message(),
+      &baz_node.message()));
+}
 }  // namespace testing
 }  // namespace configuration
 }  // namespace aos
diff --git a/aos/events/BUILD b/aos/events/BUILD
index dfad4ff..e40ab99 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -115,6 +115,16 @@
     deps = [":config"],
 )
 
+aos_config(
+    name = "multinode_pingpong_config",
+    src = "multinode_pingpong.json",
+    flatbuffers = [
+        ":ping_fbs",
+        ":pong_fbs",
+    ],
+    deps = [":config"],
+)
+
 cc_library(
     name = "pong_lib",
     srcs = [
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 94e25c2..80d8798 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -65,7 +65,10 @@
 cc_test(
     name = "logger_test",
     srcs = ["logger_test.cc"],
-    data = ["//aos/events:pingpong_config.json"],
+    data = [
+        "//aos/events:multinode_pingpong_config.json",
+        "//aos/events:pingpong_config.json",
+    ],
     deps = [
         ":logger",
         "//aos/events:ping_lib",
diff --git a/aos/events/logging/log_cat.cc b/aos/events/logging/log_cat.cc
index a53d337..45f0811 100644
--- a/aos/events/logging/log_cat.cc
+++ b/aos/events/logging/log_cat.cc
@@ -28,7 +28,8 @@
   aos::InitGoogle(&argc, &argv);
 
   aos::logger::LogReader reader(FLAGS_logfile);
-  aos::SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  aos::SimulatedEventLoopFactory log_reader_factory(reader.configuration(),
+                                                    reader.node());
   reader.Register(&log_reader_factory);
 
   std::unique_ptr<aos::EventLoop> printer_event_loop =
@@ -44,6 +45,10 @@
     const flatbuffers::string_view type = channel->type()->string_view();
     if (name.find(FLAGS_name) != std::string::npos &&
         type.find(FLAGS_type) != std::string::npos) {
+      if (!aos::configuration::ChannelIsReadableOnNode(
+              channel, printer_event_loop->node())) {
+        continue;
+      }
       LOG(INFO) << "Listening on " << name << " " << type;
 
       CHECK_NOTNULL(channel->schema());
@@ -53,14 +58,27 @@
             // unnecessary cruft from glog and to allow the user to readily
             // redirect just the logged output independent of any debugging
             // information on stderr.
-            std::cout << context.realtime_event_time << " ("
-                      << context.monotonic_event_time << ") "
-                      << channel->name()->c_str() << ' '
-                      << channel->type()->c_str() << ": "
-                      << aos::FlatbufferToJson(
-                             channel->schema(),
-                             static_cast<const uint8_t *>(message))
-                      << '\n';
+            if (context.monotonic_remote_time != context.monotonic_event_time) {
+              std::cout << context.realtime_remote_time << " ("
+                        << context.monotonic_remote_time << ") delivered "
+                        << context.realtime_event_time << " ("
+                        << context.monotonic_event_time << ") "
+                        << channel->name()->c_str() << ' '
+                        << channel->type()->c_str() << ": "
+                        << aos::FlatbufferToJson(
+                               channel->schema(),
+                               static_cast<const uint8_t *>(message))
+                        << '\n';
+            } else {
+              std::cout << context.realtime_event_time << " ("
+                        << context.monotonic_event_time << ") "
+                        << channel->name()->c_str() << ' '
+                        << channel->type()->c_str() << ": "
+                        << aos::FlatbufferToJson(
+                               channel->schema(),
+                               static_cast<const uint8_t *>(message))
+                        << '\n';
+            }
           });
       found_channel = true;
     }
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index e35fe5b..b247944 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -18,6 +18,9 @@
 
 DEFINE_int32(flush_size, 1000000,
              "Number of outstanding bytes to allow before flushing to disk.");
+DEFINE_bool(skip_missing_forwarding_entries, false,
+            "If true, drop any forwarding entries with missing data.  If "
+            "false, CHECK.");
 
 namespace aos {
 namespace logger {
@@ -86,7 +89,44 @@
       polling_period_(polling_period) {
   for (const Channel *channel : *event_loop_->configuration()->channels()) {
     FetcherStruct fs;
-    fs.fetcher = event_loop->MakeRawFetcher(channel);
+    const bool is_readable =
+        configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
+    const bool log_message = configuration::ChannelMessageIsLoggedOnNode(
+                                 channel, event_loop_->node()) &&
+                             is_readable;
+
+    const bool log_delivery_times =
+        (event_loop_->node() == nullptr)
+            ? false
+            : configuration::ConnectionDeliveryTimeIsLoggedOnNode(
+                  channel, event_loop_->node(), event_loop_->node());
+
+    if (log_message || log_delivery_times) {
+      fs.fetcher = event_loop->MakeRawFetcher(channel);
+      VLOG(1) << "Logging channel "
+              << configuration::CleanedChannelToString(channel);
+
+      if (log_delivery_times) {
+        if (log_message) {
+          VLOG(1) << "  Logging message and delivery times";
+          fs.log_type = LogType::kLogMessageAndDeliveryTime;
+        } else {
+          VLOG(1) << "  Logging delivery times only";
+          fs.log_type = LogType::kLogDeliveryTimeOnly;
+        }
+      } else {
+        // We don't have a particularly great use case right now for logging a
+        // forwarded message, but either not logging the delivery times, or
+        // logging them on another node.  Fail rather than produce bad results.
+        CHECK(configuration::ChannelIsSendableOnNode(channel,
+                                                     event_loop_->node()))
+            << ": Logger only knows how to log remote messages with "
+               "forwarding timestamps.";
+        VLOG(1) << "  Logging message only";
+        fs.log_type = LogType::kLogMessage;
+      }
+    }
+
     fs.written = false;
     fetchers_.emplace_back(std::move(fs));
   }
@@ -99,7 +139,9 @@
     // so we can capture the latest message on each channel.  This lets us have
     // non periodic messages with configuration that now get logged.
     for (FetcherStruct &f : fetchers_) {
-      f.written = !f.fetcher->Fetch();
+      if (f.fetcher.get() != nullptr) {
+        f.written = !f.fetcher->Fetch();
+      }
     }
 
     // We need to pick a point in time to declare the log file "started".  This
@@ -122,10 +164,21 @@
       flatbuffers::Offset<flatbuffers::String> string_offset =
           fbb.CreateString(network::GetHostname());
 
+      flatbuffers::Offset<Node> node_offset;
+      if (event_loop_->node() != nullptr) {
+        node_offset = CopyFlatBuffer(event_loop_->node(), &fbb);
+      }
+      LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node());
+
       aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
 
       log_file_header_builder.add_name(string_offset);
 
+      // Only add the node if we are running in a multinode configuration.
+      if (event_loop_->node() != nullptr) {
+        log_file_header_builder.add_node(node_offset);
+      }
+
       log_file_header_builder.add_configuration(configuration_offset);
       // The worst case theoretical out of order is the polling period times 2.
       // One message could get logged right after the boundary, but be for right
@@ -157,20 +210,46 @@
 
 flatbuffers::Offset<MessageHeader> PackMessage(
     flatbuffers::FlatBufferBuilder *fbb, const Context &context,
-    int channel_index) {
-  flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data_offset =
-      fbb->CreateVector(static_cast<uint8_t *>(context.data), context.size);
+    int channel_index, LogType log_type) {
+  flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data_offset;
+
+  switch(log_type) {
+    case LogType::kLogMessage:
+    case LogType::kLogMessageAndDeliveryTime:
+      data_offset =
+          fbb->CreateVector(static_cast<uint8_t *>(context.data), context.size);
+      break;
+
+    case LogType::kLogDeliveryTimeOnly:
+      break;
+  }
 
   MessageHeader::Builder message_header_builder(*fbb);
   message_header_builder.add_channel_index(channel_index);
+  message_header_builder.add_queue_index(context.queue_index);
   message_header_builder.add_monotonic_sent_time(
       context.monotonic_event_time.time_since_epoch().count());
   message_header_builder.add_realtime_sent_time(
       context.realtime_event_time.time_since_epoch().count());
 
-  message_header_builder.add_queue_index(context.queue_index);
+  switch (log_type) {
+    case LogType::kLogMessage:
+      message_header_builder.add_data(data_offset);
+      break;
 
-  message_header_builder.add_data(data_offset);
+    case LogType::kLogMessageAndDeliveryTime:
+      message_header_builder.add_data(data_offset);
+      [[fallthrough]];
+
+    case LogType::kLogDeliveryTimeOnly:
+      message_header_builder.add_monotonic_remote_time(
+          context.monotonic_remote_time.time_since_epoch().count());
+      message_header_builder.add_realtime_remote_time(
+          context.realtime_remote_time.time_since_epoch().count());
+      message_header_builder.add_remote_queue_index(context.remote_queue_index);
+      break;
+  }
+
   return message_header_builder.Finish();
 }
 
@@ -188,51 +267,46 @@
     size_t channel_index = 0;
     // Write each channel to disk, one at a time.
     for (FetcherStruct &f : fetchers_) {
-      while (true) {
-        if (f.fetcher.get() == nullptr) {
-          if (!f.fetcher->FetchNext()) {
-            VLOG(1) << "No new data on "
-                    << FlatbufferToJson(f.fetcher->channel());
-            break;
-          } else {
-            f.written = false;
+      // Skip any channels which we aren't supposed to log.
+      if (f.fetcher.get() != nullptr) {
+        while (true) {
+          if (f.written) {
+            if (!f.fetcher->FetchNext()) {
+              VLOG(2) << "No new data on "
+                      << configuration::CleanedChannelToString(
+                             f.fetcher->channel());
+              break;
+            } else {
+              f.written = false;
+            }
           }
-        }
 
-        if (f.written) {
-          if (!f.fetcher->FetchNext()) {
-            VLOG(1) << "No new data on "
-                    << FlatbufferToJson(f.fetcher->channel());
-            break;
+          CHECK(!f.written);
+
+          // TODO(james): Write tests to exercise this logic.
+          if (f.fetcher->context().monotonic_event_time <
+              last_synchronized_time_) {
+            // Write!
+            flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
+                                               max_header_size_);
+            fbb.ForceDefaults(1);
+
+            fbb.FinishSizePrefixed(PackMessage(&fbb, f.fetcher->context(),
+                                               channel_index, f.log_type));
+
+            VLOG(2) << "Writing data for channel "
+                    << configuration::CleanedChannelToString(
+                           f.fetcher->channel());
+
+            max_header_size_ = std::max(
+                max_header_size_, fbb.GetSize() - f.fetcher->context().size);
+            writer_->QueueSizedFlatbuffer(&fbb);
+
+            f.written = true;
           } else {
-            f.written = false;
+            break;
           }
         }
-
-        CHECK(!f.written);
-
-        // TODO(james): Write tests to exercise this logic.
-        if (f.fetcher->context().monotonic_event_time <
-            last_synchronized_time_) {
-          // Write!
-          flatbuffers::FlatBufferBuilder fbb(f.fetcher->context().size +
-                                             max_header_size_);
-          fbb.ForceDefaults(1);
-
-          fbb.FinishSizePrefixed(
-              PackMessage(&fbb, f.fetcher->context(), channel_index));
-
-          VLOG(1) << "Writing data for channel "
-                  << FlatbufferToJson(f.fetcher->channel());
-
-          max_header_size_ = std::max(
-              max_header_size_, fbb.GetSize() - f.fetcher->context().size);
-          writer_->QueueSizedFlatbuffer(&fbb);
-
-          f.written = true;
-        } else {
-          break;
-        }
       }
 
       ++channel_index;
@@ -269,6 +343,10 @@
   QueueMessages();
 }
 
+LogReader::~LogReader() {
+  CHECK(!event_loop_unique_ptr_) << "Did you remember to call Deregister?";
+}
+
 bool LogReader::ReadBlock() {
   if (end_of_file_) {
     return false;
@@ -373,11 +451,32 @@
   queue_data_time_ = newest_timestamp_ - max_out_of_order_duration_;
 }
 
-const Configuration *LogReader::configuration() {
+const Configuration *LogReader::configuration() const {
   return flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
       ->configuration();
 }
 
+const Node *LogReader::node() const {
+  if (configuration()->has_nodes()) {
+    CHECK(flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
+              ->has_node());
+    CHECK(flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
+              ->node()
+              ->has_name());
+    return configuration::GetNode(
+        configuration(),
+        flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
+            ->node()
+            ->name()
+            ->string_view());
+  } else {
+    CHECK(
+        !flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
+             ->has_node());
+    return nullptr;
+  }
+}
+
 monotonic_clock::time_point LogReader::monotonic_start_time() {
   return monotonic_clock::time_point(std::chrono::nanoseconds(
       flatbuffers::GetSizePrefixedRoot<LogFileHeader>(configuration_.data())
@@ -434,20 +533,32 @@
 
     FlatbufferVector<MessageHeader> front = std::move(channel.front());
 
-    CHECK(front.message().data() != nullptr);
+    if (oldest_channel_index.first > monotonic_start_time() ||
+        event_loop_factory_ != nullptr) {
+      if (!FLAGS_skip_missing_forwarding_entries ||
+          front.message().data() != nullptr) {
+        CHECK(front.message().data() != nullptr)
+            << ": Got a message without data.  Forwarding entry which was not "
+               "matched?  Use --skip_missing_forwarding_entries to ignore "
+               "this.";
 
-    if (oldest_channel_index.first > monotonic_start_time()) {
-      // If we have access to the factory, use it to fix the realtime time.
-      if (event_loop_factory_ != nullptr) {
-        event_loop_factory_->SetRealtimeOffset(
+        // If we have access to the factory, use it to fix the realtime time.
+        if (event_loop_factory_ != nullptr) {
+          event_loop_factory_->SetRealtimeOffset(
+              monotonic_clock::time_point(
+                  chrono::nanoseconds(front.message().monotonic_sent_time())),
+              realtime_clock::time_point(
+                  chrono::nanoseconds(front.message().realtime_sent_time())));
+        }
+
+        channel.raw_sender->Send(
+            front.message().data()->Data(), front.message().data()->size(),
             monotonic_clock::time_point(
-                chrono::nanoseconds(front.message().monotonic_sent_time())),
+                chrono::nanoseconds(front.message().monotonic_remote_time())),
             realtime_clock::time_point(
-                chrono::nanoseconds(front.message().realtime_sent_time())));
+                chrono::nanoseconds(front.message().realtime_remote_time())),
+            front.message().remote_queue_index());
       }
-
-      channel.raw_sender->Send(front.message().data()->Data(),
-                               front.message().data()->size());
     } else {
       LOG(WARNING) << "Not sending data from before the start of the log file. "
                    << oldest_channel_index.first.time_since_epoch().count()
diff --git a/aos/events/logging/logger.fbs b/aos/events/logging/logger.fbs
index 3e89ff3..f1af17e 100644
--- a/aos/events/logging/logger.fbs
+++ b/aos/events/logging/logger.fbs
@@ -31,7 +31,8 @@
   // Name of the device which this log file is for.
   name:string;
 
-  // TODO(austin): Node!
+  // The current node, if known and running in a multi-node configuration.
+  node:Node;
 }
 
 // Table holding a message.
@@ -39,18 +40,28 @@
   // Index into the channel datastructure in the log file header.  This
   // provides the data type.
   channel_index:uint;
-  // Time this message was sent on the monotonic clock in nanoseconds.
+  // Time this message was sent on the monotonic clock in nanoseconds on this
+  // node.
   monotonic_sent_time:long;
-  // Time this message was sent on the realtime clock in nanoseconds.
+  // Time this message was sent on the realtime clock in nanoseconds on this
+  // node.
   realtime_sent_time:long;
   // Index into the ipc queue of this message.  This should start with 0 and
   // always monotonically increment if no messages were ever lost.  It will
   // wrap at a multiple of the queue size.
   queue_index:uint;
-  // TODO(austin): Node.
 
   // TODO(austin): Format?  Compressed?
 
   // The nested flatbuffer.
   data:[ubyte];
+
+  // Time this message was sent on the monotonic clock of the remote node in
+  // nanoseconds.
+  monotonic_remote_time:long = -9223372036854775808;
+  // Time this message was sent on the realtime clock of the remote node in
+  // nanoseconds.
+  realtime_remote_time:long = -9223372036854775808;
+  // Queue index of this message on the remote node.
+  remote_queue_index:uint = 4294967295;
 }
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index 497dabb..d44a885 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -46,10 +46,17 @@
   std::vector<struct iovec> iovec_;
 };
 
-// Packes a message pointed to by the context into a MessageHeader.
-flatbuffers::Offset<MessageHeader> PackMessage(
-    flatbuffers::FlatBufferBuilder *fbb, const Context &context,
-    int channel_index);
+enum class LogType : uint8_t {
+  // The message originated on this node and should be logged here.
+  kLogMessage,
+  // The message originated on another node, but only the delivery times are
+  // logged here.
+  kLogDeliveryTimeOnly,
+  // The message originated on another node. Log it and the delivery times
+  // together.  The message_gateway is responsible for logging any messages
+  // which didn't get delivered.
+  kLogMessageAndDeliveryTime
+};
 
 // Logs all channels available in the event loop to disk every 100 ms.
 // Start by logging one message per channel to capture any state and
@@ -73,6 +80,8 @@
   struct FetcherStruct {
     std::unique_ptr<RawFetcher> fetcher;
     bool written = false;
+
+    LogType log_type;
   };
 
   std::vector<FetcherStruct> fetchers_;
@@ -89,10 +98,16 @@
   size_t max_header_size_ = 0;
 };
 
+// Packes a message pointed to by the context into a MessageHeader.
+flatbuffers::Offset<MessageHeader> PackMessage(
+    flatbuffers::FlatBufferBuilder *fbb, const Context &context,
+    int channel_index, LogType log_type);
+
 // Replays all the channels in the logfile to the event loop.
 class LogReader {
  public:
   LogReader(absl::string_view filename);
+  ~LogReader();
 
   // Registers the timer and senders used to resend the messages from the log
   // file.
@@ -106,7 +121,10 @@
   // TODO(austin): Remap channels?
 
   // Returns the configuration from the log file.
-  const Configuration *configuration();
+  const Configuration *configuration() const;
+
+  // Returns the node that this log file was created on.
+  const Node *node() const;
 
   // Returns the starting timestamp for the log file.
   monotonic_clock::time_point monotonic_start_time();
@@ -125,8 +143,8 @@
   // will have to read more data from disk.
   bool MessageAvailable();
 
-  // Returns a span with the data for a message from the log file, excluding the
-  // size.
+  // Returns a span with the data for a message from the log file, excluding
+  // the size.
   absl::Span<const uint8_t> ReadMessage();
 
   // Queues at least max_out_of_order_duration_ messages into channels_.
@@ -144,16 +162,16 @@
   // buffer, then into sender), but none of it is all that expensive.  We can
   // optimize if it is slow later.
   //
-  // As we place the elements in the sorted list of times, keep doing this until
-  // we read a message that is newer than the threshold.
+  // As we place the elements in the sorted list of times, keep doing this
+  // until we read a message that is newer than the threshold.
   //
   // Then repeat.  Keep filling up the sorted list with 256 KB chunks (need a
   // small state machine so we can resume), and keep pulling messages back out
   // and sending.
   //
-  // For sorting, we want to use the fact that each channel is sorted, and then
-  // merge sort the channels.  Have a vector of deques, and then hold a sorted
-  // list of pointers to those.
+  // For sorting, we want to use the fact that each channel is sorted, and
+  // then merge sort the channels.  Have a vector of deques, and then hold a
+  // sorted list of pointers to those.
   //
   // TODO(austin): Multithreaded read at some point.  Gotta go faster!
   // Especially if we start compressing.
@@ -183,8 +201,8 @@
     }
   };
 
-  // Minimum amount of data to queue up for sorting before we are guarenteed to
-  // not see data out of order.
+  // Minimum amount of data to queue up for sorting before we are guarenteed
+  // to not see data out of order.
   std::chrono::nanoseconds max_out_of_order_duration_;
 
   // File descriptor for the log file.
@@ -195,8 +213,8 @@
   EventLoop *event_loop_ = nullptr;
   TimerHandler *timer_handler_;
 
-  // Vector to read into.  This uses an allocator which doesn't zero initialize
-  // the memory.
+  // Vector to read into.  This uses an allocator which doesn't zero
+  // initialize the memory.
   std::vector<uint8_t, DefaultInitAllocator<uint8_t>> data_;
 
   // Amount of data consumed already in data_.
@@ -223,8 +241,8 @@
   // timestamp.
   std::pair<monotonic_clock::time_point, int> PopOldestChannel();
 
-  // Datastructure to hold the list of messages, cached timestamp for the oldest
-  // message, and sender to send with.
+  // Datastructure to hold the list of messages, cached timestamp for the
+  // oldest message, and sender to send with.
   struct ChannelData {
     monotonic_clock::time_point oldest_timestamp = monotonic_clock::min_time;
     std::deque<FlatbufferVector<MessageHeader>> data;
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 9aae936..91190e7 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -62,12 +62,16 @@
   LogReader reader(logfile);
 
   LOG(INFO) << "Config " << FlatbufferToJson(reader.configuration());
+  EXPECT_EQ(reader.node(), nullptr);
 
   SimulatedEventLoopFactory log_reader_factory(reader.configuration());
 
-  // This sends out the fetched messages and advances time to the start of the log file.
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
   reader.Register(&log_reader_factory);
 
+  EXPECT_EQ(log_reader_factory.node(), nullptr);
+
   std::unique_ptr<EventLoop> test_event_loop =
       log_reader_factory.MakeEventLoop("log_reader");
 
@@ -136,6 +140,125 @@
   }
 }
 
+class MultinodeLoggerTest : public ::testing::Test {
+ public:
+  MultinodeLoggerTest()
+      : config_(aos::configuration::ReadConfig(
+            "aos/events/multinode_pingpong_config.json")),
+        event_loop_factory_(&config_.message(), "pi1"),
+        ping_event_loop_(event_loop_factory_.MakeEventLoop("ping")),
+        ping_(ping_event_loop_.get()) {}
+
+  // Config and factory.
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
+  SimulatedEventLoopFactory event_loop_factory_;
+
+  // Event loop and app for Ping
+  std::unique_ptr<EventLoop> ping_event_loop_;
+  Ping ping_;
+};
+
+// Tests that we can startup at all in a multinode configuration.
+TEST_F(MultinodeLoggerTest, MultiNode) {
+  constexpr chrono::seconds kTimeOffset = chrono::seconds(10000);
+  constexpr uint32_t kQueueIndexOffset = 1024;
+  const ::std::string tmpdir(getenv("TEST_TMPDIR"));
+  const ::std::string logfile = tmpdir + "/multi_logfile.bfbs";
+  // Remove it.
+  unlink(logfile.c_str());
+
+  LOG(INFO) << "Logging data to " << logfile;
+
+  {
+    std::unique_ptr<EventLoop> pong_event_loop =
+        event_loop_factory_.MakeEventLoop("pong");
+
+    std::unique_ptr<aos::RawSender> pong_sender(
+        pong_event_loop->MakeRawSender(aos::configuration::GetChannel(
+            pong_event_loop->configuration(), "/test", "aos.examples.Pong",
+            pong_event_loop->name(), pong_event_loop->node())));
+
+    // Ok, let's fake a remote node.  We use the fancy raw sender Send
+    // method that message_gateway will use to do that.
+    int pong_count = 0;
+    pong_event_loop->MakeWatcher(
+        "/test", [&pong_event_loop, &pong_count, &pong_sender,
+                  kTimeOffset](const examples::Ping &ping) {
+          flatbuffers::FlatBufferBuilder fbb;
+          examples::Pong::Builder pong_builder(fbb);
+          pong_builder.add_value(ping.value());
+          pong_builder.add_initial_send_time(ping.send_time());
+          fbb.Finish(pong_builder.Finish());
+
+          pong_sender->Send(fbb.GetBufferPointer(), fbb.GetSize(),
+                            pong_event_loop->monotonic_now() + kTimeOffset,
+                            pong_event_loop->realtime_now() + kTimeOffset,
+                            kQueueIndexOffset + pong_count);
+          ++pong_count;
+        });
+
+    DetachedBufferWriter writer(logfile);
+    std::unique_ptr<EventLoop> logger_event_loop =
+        event_loop_factory_.MakeEventLoop("logger");
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    Logger logger(&writer, logger_event_loop.get(),
+                  std::chrono::milliseconds(100));
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  LogReader reader(logfile);
+  ASSERT_NE(reader.node(), nullptr);
+  EXPECT_EQ(reader.node()->name()->string_view(), "pi1");
+
+  // TODO(austin): Also replay as pi2 or pi3 and make sure we see the pong
+  // messages.  This won't work today yet until the log reading code gets
+  // significantly better.
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration(),
+                                               reader.node());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
+  reader.Register(&log_reader_factory);
+
+  std::unique_ptr<EventLoop> test_event_loop =
+      log_reader_factory.MakeEventLoop("test");
+
+  int ping_count = 10;
+  int pong_count = 10;
+
+  // Confirm that the ping value matches.
+  test_event_loop->MakeWatcher("/test",
+                               [&ping_count](const examples::Ping &ping) {
+                                 EXPECT_EQ(ping.value(), ping_count + 1);
+                                 ++ping_count;
+                               });
+  // Confirm that the ping and pong counts both match, and the value also
+  // matches.
+  test_event_loop->MakeWatcher(
+      "/test", [&test_event_loop, &ping_count, &pong_count,
+                kTimeOffset](const examples::Pong &pong) {
+        EXPECT_EQ(test_event_loop->context().remote_queue_index,
+                  pong_count + kQueueIndexOffset);
+        EXPECT_EQ(test_event_loop->context().monotonic_remote_time,
+                  test_event_loop->monotonic_now() + kTimeOffset);
+        EXPECT_EQ(test_event_loop->context().realtime_remote_time,
+                  test_event_loop->realtime_now() + kTimeOffset);
+
+        EXPECT_EQ(pong.value(), pong_count + 1);
+        ++pong_count;
+        EXPECT_EQ(ping_count, pong_count);
+      });
+
+  log_reader_factory.RunFor(std::chrono::seconds(100));
+  EXPECT_EQ(ping_count, 2010);
+  EXPECT_EQ(pong_count, 2010);
+
+  reader.Deregister();
+}
+
 }  // namespace testing
 }  // namespace logger
 }  // namespace aos
diff --git a/aos/events/multinode_pingpong.json b/aos/events/multinode_pingpong.json
new file mode 100644
index 0000000..f0e532e
--- /dev/null
+++ b/aos/events/multinode_pingpong.json
@@ -0,0 +1,161 @@
+{
+  "channels": [
+    {
+      "name": "/aos/pi1",
+      "type": "aos.timing.Report",
+      "source_node": "pi1",
+      "frequency": 50,
+      "num_senders": 20,
+      "max_size": 2048
+    },
+    {
+      "name": "/aos/pi2",
+      "type": "aos.timing.Report",
+      "source_node": "pi2",
+      "frequency": 50,
+      "num_senders": 20,
+      "max_size": 2048
+    },
+    {
+      "name": "/aos/pi3",
+      "type": "aos.timing.Report",
+      "source_node": "pi3",
+      "frequency": 50,
+      "num_senders": 20,
+      "max_size": 2048
+    },
+    {
+      "name": "/test",
+      "type": "aos.examples.Ping",
+      "source_node": "pi1",
+      "destination_nodes": [
+        {
+          "name": "pi2",
+          "priority": 1,
+          "timestamp_logger": "REMOTE_LOGGER",
+          "timestamp_logger_node": "pi1"
+        }
+      ]
+    },
+    {
+      "name": "/test",
+      "type": "aos.examples.Pong",
+      "source_node": "pi2",
+      "logger": "LOCAL_AND_REMOTE_LOGGER",
+      "logger_node": "pi1",
+      "destination_nodes": [
+        {
+          "name": "pi1",
+          "priority": 1,
+          "timestamp_logger": "REMOTE_LOGGER",
+          "timestamp_logger_node": "pi1"
+        }
+      ]
+    },
+    {
+      "name": "/test2",
+      "type": "aos.examples.Ping",
+      "source_node": "pi1",
+      "destination_nodes": [
+        {
+          "name": "pi3",
+          "priority": 1,
+          "timestamp_logger": "REMOTE_LOGGER",
+          "timestamp_logger_node": "pi1"
+        }
+      ]
+    },
+    {
+      "name": "/test2",
+      "type": "aos.examples.Pong",
+      "source_node": "pi3",
+      "logger": "LOCAL_AND_REMOTE_LOGGER",
+      "logger_node": "pi1",
+      "destination_nodes": [
+        {
+          "name": "pi1",
+          "priority": 1,
+          "timestamp_logger": "REMOTE_LOGGER",
+          "timestamp_logger_node": "pi1"
+        }
+      ]
+    }
+  ],
+  "maps": [
+    {
+      "match": {
+        "name": "/aos",
+        "type": "aos.timing.Report",
+        "source_node": "pi1"
+      },
+      "rename": {
+        "name": "/aos/pi1"
+      }
+    },
+    {
+      "match": {
+        "name": "/aos",
+        "type": "aos.timing.Report",
+        "source_node": "pi2"
+      },
+      "rename": {
+        "name": "/aos/pi2"
+      }
+    },
+    {
+      "match": {
+        "name": "/aos",
+        "type": "aos.timing.Report",
+        "source_node": "pi3"
+      },
+      "rename": {
+        "name": "/aos/pi3"
+      }
+    }
+  ],
+  "nodes": [
+    {
+      "name": "pi1",
+      "hostname": "raspberrypi",
+      "port": 9971
+    },
+    {
+      "name": "pi2",
+      "hostname": "raspberrypi2",
+      "port": 9971
+    },
+    {
+      "name": "pi3",
+      "hostname": "raspberrypi3",
+      "port": 9971
+    }
+  ],
+  "applications": [
+    {
+      "name": "ping2",
+      "maps": [
+        {
+          "match": {
+            "name": "/test"
+          },
+          "rename": {
+            "name": "/test2"
+          }
+        }
+      ]
+    },
+    {
+      "name": "pong2",
+      "maps": [
+        {
+          "match": {
+            "name": "/test"
+          },
+          "rename": {
+            "name": "/test2"
+          }
+        }
+      ]
+    }
+  ]
+}
diff --git a/aos/events/ping.cc b/aos/events/ping.cc
index 5a79635..f829b41 100644
--- a/aos/events/ping.cc
+++ b/aos/events/ping.cc
@@ -6,11 +6,13 @@
 #include "gflags/gflags.h"
 #include "glog/logging.h"
 
+DEFINE_string(config, "aos/events/pingpong_config.json", "Path to the config.");
+
 int main(int argc, char **argv) {
   aos::InitGoogle(&argc, &argv);
 
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig("aos/events/pingpong_config.json");
+      aos::configuration::ReadConfig(FLAGS_config);
 
   aos::ShmEventLoop event_loop(&config.message());
 
diff --git a/aos/events/ping_lib.cc b/aos/events/ping_lib.cc
index 4bbf580..9bc2446 100644
--- a/aos/events/ping_lib.cc
+++ b/aos/events/ping_lib.cc
@@ -1,13 +1,12 @@
 #include "aos/events/ping_lib.h"
 
-#include "aos/events/pong_generated.h"
 #include "aos/events/ping_generated.h"
+#include "aos/events/pong_generated.h"
 #include "aos/json_to_flatbuffer.h"
 #include "gflags/gflags.h"
 #include "glog/logging.h"
 
 DEFINE_int32(sleep_ms, 10, "Time to sleep between pings");
-DEFINE_bool(phased_loop, false, "If true, use a phased loop");
 
 namespace aos {
 
@@ -15,25 +14,16 @@
 
 Ping::Ping(EventLoop *event_loop)
     : event_loop_(event_loop),
-      sender_(event_loop_->MakeSender<examples::Ping>("/test")),
-      pong_fetcher_(event_loop_->MakeFetcher<examples::Pong>("/test")) {
-  if (FLAGS_phased_loop) {
-    phased_loop_handle_ = event_loop_->AddPhasedLoop(
-        [this](int) { SendPing(); }, chrono::milliseconds(FLAGS_sleep_ms));
-    phased_loop_handle_->set_name("ping");
-  } else {
-    timer_handle_ = event_loop_->AddTimer([this]() { SendPing(); });
-    timer_handle_->set_name("ping");
-  }
+      sender_(event_loop_->MakeSender<examples::Ping>("/test")) {
+  timer_handle_ = event_loop_->AddTimer([this]() { SendPing(); });
+  timer_handle_->set_name("ping");
 
   event_loop_->MakeWatcher(
       "/test", [this](const examples::Pong &pong) { HandlePong(pong); });
 
   event_loop_->OnRun([this]() {
-    if (!FLAGS_phased_loop) {
-      timer_handle_->Setup(event_loop_->monotonic_now(),
-                           chrono::milliseconds(FLAGS_sleep_ms));
-    }
+    timer_handle_->Setup(event_loop_->monotonic_now(),
+                         chrono::milliseconds(FLAGS_sleep_ms));
   });
 
   event_loop_->SetRuntimeRealtimePriority(5);
@@ -41,17 +31,16 @@
 
 void Ping::SendPing() {
   ++count_;
-  aos::Sender<examples::Ping>::Builder msg = sender_.MakeBuilder();
-  examples::Ping::Builder builder = msg.MakeBuilder<examples::Ping>();
-  builder.add_value(count_);
-  builder.add_send_time(
+  aos::Sender<examples::Ping>::Builder builder = sender_.MakeBuilder();
+  examples::Ping::Builder ping_builder = builder.MakeBuilder<examples::Ping>();
+  ping_builder.add_value(count_);
+  ping_builder.add_send_time(
       event_loop_->monotonic_now().time_since_epoch().count());
-  CHECK(msg.Send(builder.Finish()));
+  CHECK(builder.Send(ping_builder.Finish()));
   VLOG(2) << "Sending ping";
 }
 
 void Ping::HandlePong(const examples::Pong &pong) {
-  pong_fetcher_.Fetch();
   const aos::monotonic_clock::time_point monotonic_send_time(
       chrono::nanoseconds(pong.initial_send_time()));
   const aos::monotonic_clock::time_point monotonic_now =
@@ -60,11 +49,19 @@
   const chrono::nanoseconds round_trip_time =
       monotonic_now - monotonic_send_time;
 
+  if (last_pong_value_ + 1 != pong.value()) {
+    LOG(WARNING) << "Pong message lost";
+  }
+
   if (pong.value() == count_) {
     VLOG(1) << "Elapsed time " << round_trip_time.count() << " ns "
             << FlatbufferToJson(&pong);
   } else {
-    VLOG(1) << "Missmatched pong message";
+    LOG(WARNING) << "Missmatched pong message, got " << FlatbufferToJson(&pong)
+                 << " expected " << count_;
   }
+
+  last_pong_value_ = pong.value();
 }
+
 }  // namespace aos
diff --git a/aos/events/ping_lib.h b/aos/events/ping_lib.h
index 6be75ed..e14c3f2 100644
--- a/aos/events/ping_lib.h
+++ b/aos/events/ping_lib.h
@@ -25,10 +25,10 @@
   aos::Sender<examples::Ping> sender_;
   // Timer handle which sends the Ping message.
   aos::TimerHandler *timer_handle_;
-  aos::PhasedLoopHandler *phased_loop_handle_;
-  aos::Fetcher<examples::Pong> pong_fetcher_;
   // Number of pings sent.
   int count_ = 0;
+  // Last pong value received so we can detect missed pongs.
+  int last_pong_value_ = 0;
 };
 
 }  // namespace aos
diff --git a/aos/events/pong.cc b/aos/events/pong.cc
index 339d4d7..581d04f 100644
--- a/aos/events/pong.cc
+++ b/aos/events/pong.cc
@@ -6,13 +6,15 @@
 #include "aos/init.h"
 #include "glog/logging.h"
 
+DEFINE_string(config, "aos/events/pingpong_config.json", "Path to the config.");
+
 int main(int argc, char **argv) {
   FLAGS_logtostderr = true;
   google::InitGoogleLogging(argv[0]);
   ::gflags::ParseCommandLineFlags(&argc, &argv, true);
 
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig("aos/events/pingpong_config.json");
+      aos::configuration::ReadConfig(FLAGS_config);
 
   ::aos::ShmEventLoop event_loop(&config.message());
 
diff --git a/aos/events/pong_lib.cc b/aos/events/pong_lib.cc
index 5ff38b9..9d7731d 100644
--- a/aos/events/pong_lib.cc
+++ b/aos/events/pong_lib.cc
@@ -1,8 +1,8 @@
 #include "aos/events/pong_lib.h"
 
 #include "aos/events/event_loop.h"
-#include "aos/events/pong_generated.h"
 #include "aos/events/ping_generated.h"
+#include "aos/events/pong_generated.h"
 #include "glog/logging.h"
 
 namespace aos {
@@ -11,11 +11,12 @@
     : event_loop_(event_loop),
       sender_(event_loop_->MakeSender<examples::Pong>("/test")) {
   event_loop_->MakeWatcher("/test", [this](const examples::Ping &ping) {
-    aos::Sender<examples::Pong>::Builder msg = sender_.MakeBuilder();
-    examples::Pong::Builder builder = msg.MakeBuilder<examples::Pong>();
-    builder.add_value(ping.value());
-    builder.add_initial_send_time(ping.send_time());
-    CHECK(msg.Send(builder.Finish()));
+    aos::Sender<examples::Pong>::Builder builder = sender_.MakeBuilder();
+    examples::Pong::Builder pong_builder =
+        builder.MakeBuilder<examples::Pong>();
+    pong_builder.add_value(ping.value());
+    pong_builder.add_initial_send_time(ping.send_time());
+    CHECK(builder.Send(pong_builder.Finish()));
   });
 
   event_loop_->SetRuntimeRealtimePriority(5);
diff --git a/aos/events/shm_event_loop.cc b/aos/events/shm_event_loop.cc
index a8e227e..3ced933 100644
--- a/aos/events/shm_event_loop.cc
+++ b/aos/events/shm_event_loop.cc
@@ -28,6 +28,10 @@
 
 namespace aos {
 
+void SetShmBase(const std::string_view base) {
+  FLAGS_shm_base = std::string(base) + "/dev/shm/aos";
+}
+
 std::string ShmFolder(const Channel *channel) {
   CHECK(channel->has_name());
   CHECK_EQ(channel->name()->string_view()[0], '/');
@@ -155,7 +159,11 @@
 ShmEventLoop::ShmEventLoop(const Configuration *configuration)
     : EventLoop(configuration),
       name_(Filename(program_invocation_name)),
-      node_(MaybeMyNode(configuration)) {}
+      node_(MaybeMyNode(configuration)) {
+  if (configuration->has_nodes()) {
+    CHECK(node_ != nullptr) << ": Couldn't find node in config.";
+  }
+}
 
 namespace internal {
 
@@ -165,7 +173,7 @@
       : channel_(channel),
         lockless_queue_memory_(
             channel,
-            chrono::duration_cast<chrono::seconds>(chrono::nanoseconds(
+            chrono::ceil<chrono::seconds>(chrono::nanoseconds(
                 event_loop->configuration()->channel_storage_duration()))),
         lockless_queue_(lockless_queue_memory_.memory(),
                         lockless_queue_memory_.config()),
@@ -350,7 +358,7 @@
       : RawSender(event_loop, channel),
         lockless_queue_memory_(
             channel,
-            chrono::duration_cast<chrono::seconds>(chrono::nanoseconds(
+            chrono::ceil<chrono::seconds>(chrono::nanoseconds(
                 event_loop->configuration()->channel_storage_duration()))),
         lockless_queue_(lockless_queue_memory_.memory(),
                         lockless_queue_memory_.config()),
diff --git a/aos/events/simulated_event_loop.cc b/aos/events/simulated_event_loop.cc
index a2740e4..1dfb3af 100644
--- a/aos/events/simulated_event_loop.cc
+++ b/aos/events/simulated_event_loop.cc
@@ -739,13 +739,25 @@
 
 SimulatedEventLoopFactory::SimulatedEventLoopFactory(
     const Configuration *configuration, std::string_view node_name)
-    : configuration_(CHECK_NOTNULL(configuration)),
-      node_(configuration::GetNode(configuration, node_name)) {
-  CHECK(configuration_->has_nodes())
-      << ": Got a configuration with no nodes and node \"" << node_name
-      << "\" was selected.";
-  CHECK(node_ != nullptr) << ": Can't find node \"" << node_name
-                          << "\" in the configuration.";
+    : SimulatedEventLoopFactory(
+          configuration, configuration::GetNode(configuration, node_name)) {}
+
+SimulatedEventLoopFactory::SimulatedEventLoopFactory(
+    const Configuration *configuration, const Node *node)
+    : configuration_(CHECK_NOTNULL(configuration)), node_(node) {
+  if (node != nullptr) {
+    CHECK(configuration_->has_nodes())
+        << ": Got a configuration with no nodes and node \""
+        << node->name()->string_view() << "\" was selected.";
+    bool found = false;
+    for (const Node *node : *configuration_->nodes()) {
+      if (node == node_) {
+        found = true;
+        break;
+      }
+    }
+    CHECK(found) << ": node must be a pointer in the configuration.";
+  }
 }
 
 SimulatedEventLoopFactory::~SimulatedEventLoopFactory() {}
diff --git a/aos/events/simulated_event_loop.h b/aos/events/simulated_event_loop.h
index fee3ef0..db0b840 100644
--- a/aos/events/simulated_event_loop.h
+++ b/aos/events/simulated_event_loop.h
@@ -54,6 +54,8 @@
   SimulatedEventLoopFactory(const Configuration *configuration);
   SimulatedEventLoopFactory(const Configuration *configuration,
                             std::string_view node_name);
+  SimulatedEventLoopFactory(const Configuration *configuration,
+                            const Node *node);
   ~SimulatedEventLoopFactory();
 
   ::std::unique_ptr<EventLoop> MakeEventLoop(std::string_view name);
diff --git a/aos/flatbuffers.h b/aos/flatbuffers.h
index 1050417..630741f 100644
--- a/aos/flatbuffers.h
+++ b/aos/flatbuffers.h
@@ -162,6 +162,9 @@
   FlatbufferVector(const Flatbuffer<T> &other)
       : data_(other.data(), other.data() + other.size()) {}
 
+  // Move constructor.
+  FlatbufferVector(Flatbuffer<T> &&other) : data_(std::move(other.data())) {}
+
   // Copies the data from the other flatbuffer.
   FlatbufferVector &operator=(const Flatbuffer<T> &other) {
     data_ = std::vector<uint8_t>(other.data(), other.data() + other.size());
diff --git a/aos/logging/log_displayer.cc b/aos/logging/log_displayer.cc
index 24bebbd..d3e5728 100644
--- a/aos/logging/log_displayer.cc
+++ b/aos/logging/log_displayer.cc
@@ -59,6 +59,36 @@
     "To view the statuses of the shooter hall effects in realtime:\n"
     "\t`log_displayer -f -n shooter -l DEBUG | grep .Position`\n";
 
+std::string GetRootDirectory() {
+  ssize_t size = 0;
+  std::unique_ptr<char[]> retu;
+  while (true) {
+    size += 256;
+    retu.reset(new char[size]);
+
+    ssize_t ret = readlink("/proc/self/exe", retu.get(), size);
+    if (ret < 0) {
+      if (ret != -1) {
+        LOG(WARNING) << "it returned " << ret << ", not -1";
+      }
+
+      PLOG(FATAL) << "readlink(\"/proc/self/exe\", " << retu.get() << ", " << size
+                  << ") failed";
+    }
+    if (ret < size) {
+      void *last_slash = memrchr(retu.get(), '/', ret);
+      if (last_slash == NULL) {
+        retu.get()[ret] = '\0';
+        LOG(FATAL) << "couldn't find a '/' in \"" << retu.get() << "\"";
+      }
+      *static_cast<char *>(last_slash) = '\0';
+      LOG(INFO) << "got a root dir of \"" << retu.get() << "\"";
+      return std::string(retu.get());
+    }
+  }
+}
+
+
 void PrintHelpAndExit() {
   fprintf(stderr, "Usage: %s %s", program_invocation_name, kArgsHelp);
   fprintf(stderr, "\nExample usages:\n\n%s", kExampleUsages);
@@ -66,14 +96,15 @@
   // Get the possible executables from start_list.txt.
   FILE *start_list = fopen("start_list.txt", "r");
   if (!start_list) {
-    ::std::string path(::aos::configuration::GetRootDirectory());
+    ::std::string path(GetRootDirectory());
     path += "/start_list.txt";
     start_list = fopen(path.c_str(), "r");
     if (!start_list) {
-      printf("\nCannot open start_list.txt. This means that the\n"
-      "possible arguments for the -n option cannot be shown. log_displayer\n"
-      "looks for start_list.txt in the current working directory and in\n"
-      "%s.\n\n", ::aos::configuration::GetRootDirectory());
+      printf(
+          "\nCannot open start_list.txt. This means that the\npossible "
+          "arguments for the -n option cannot be shown. log_displayer\nlooks "
+          "for start_list.txt in the current working directory and in\n%s.\n\n",
+          path.c_str());
       AOS_PLOG(FATAL, "Unable to open start_list.txt");
     }
   }
diff --git a/aos/logging/log_namer.cc b/aos/logging/log_namer.cc
index d4584ef..22db82a 100644
--- a/aos/logging/log_namer.cc
+++ b/aos/logging/log_namer.cc
@@ -16,6 +16,15 @@
 #include "aos/configuration.h"
 #include "glog/logging.h"
 
+DEFINE_string(logging_folder,
+#ifdef AOS_ARCHITECTURE_arm_frc
+              "",
+#else
+              "./logs",
+#endif
+              "The folder to log to.  If empty, search for the /media/sd*1/ "
+              "folder and place logs there.");
+
 namespace aos {
 namespace logging {
 namespace {
@@ -66,7 +75,6 @@
             << previous << ").";
 }
 
-#ifdef AOS_ARCHITECTURE_arm_frc
 bool FoundThumbDrive(const char *path) {
   FILE *mnt_fp = setmntent("/etc/mtab", "r");
   if (mnt_fp == nullptr) {
@@ -101,31 +109,35 @@
   }
   return false;
 }
-#endif
+
 }  // namespace
 
 std::string GetLogName(const char *basename) {
-#ifdef AOS_ARCHITECTURE_arm_frc
-  char folder[128];
-  {
-    char dev_name[8];
-    while (!FindDevice(dev_name, sizeof(dev_name))) {
-      LOG(INFO) <<  "Waiting for a device";
-      sleep(5);
+  if (FLAGS_logging_folder.empty()) {
+    char folder[128];
+    {
+      char dev_name[8];
+      while (!FindDevice(dev_name, sizeof(dev_name))) {
+        LOG(INFO) << "Waiting for a device";
+        sleep(5);
+      }
+      snprintf(folder, sizeof(folder), "/media/%s1", dev_name);
+      while (!FoundThumbDrive(folder)) {
+        LOG(INFO) << "Waiting for" << folder;
+        sleep(1);
+      }
+      snprintf(folder, sizeof(folder), "/media/%s1/", dev_name);
     }
-    snprintf(folder, sizeof(folder), "/media/%s1", dev_name);
-    while (!FoundThumbDrive(folder)) {
-      LOG(INFO) << "Waiting for" << folder;
-      sleep(1);
-    }
-    snprintf(folder, sizeof(folder), "/media/%s1/", dev_name);
-  }
 
-  if (access(folder, F_OK) == -1) {
-#else
-  const char *folder = configuration::GetLoggingDirectory();
+    if (access(folder, F_OK) == -1) {
+      LOG(FATAL) << "folder '" << folder
+                 << "' does not exist. please create it.";
+    }
+
+    FLAGS_logging_folder = folder;
+  }
+  const char *folder = FLAGS_logging_folder.c_str();
   if (access(folder, R_OK | W_OK) == -1) {
-#endif
     LOG(FATAL) << "folder '" << folder << "' does not exist. please create it.";
   }
   LOG(INFO) << "logging to folder '" << folder << "'";
diff --git a/aos/testdata/invalid_destination_node.json b/aos/testdata/invalid_destination_node.json
index 95dc6d3..1367d13 100644
--- a/aos/testdata/invalid_destination_node.json
+++ b/aos/testdata/invalid_destination_node.json
@@ -5,8 +5,12 @@
       "type": ".aos.bar",
       "source_node": "roborio",
       "destination_nodes": [
-        "dest",
-        "trojan_horse"
+        {
+          "name": "dest"
+        },
+        {
+          "name": "trojan_horse"
+        }
       ]
     }
   ],
diff --git a/aos/testdata/self_forward.json b/aos/testdata/self_forward.json
index 101b018..0e88cdf 100644
--- a/aos/testdata/self_forward.json
+++ b/aos/testdata/self_forward.json
@@ -5,7 +5,9 @@
       "type": ".aos.bar",
       "source_node": "roborio",
       "destination_nodes": [
-        "roborio"
+        {
+          "name": "roborio"
+        }
       ]
     }
   ],
diff --git a/aos/testing/gtest_main.cc b/aos/testing/gtest_main.cc
index 52159c3..adb516c 100644
--- a/aos/testing/gtest_main.cc
+++ b/aos/testing/gtest_main.cc
@@ -11,6 +11,8 @@
               "Print all log messages to FILE instead of standard output.");
 
 namespace aos {
+void SetShmBase(const std::string_view base) __attribute__((weak));
+
 namespace testing {
 
 // Actually declared/defined in //aos/testing:test_logging.
@@ -42,5 +44,14 @@
     }
   }
 
+  // Point shared memory away from /dev/shm if we are testing.  We don't care
+  // about RT in this case, so if it is backed by disk, we are fine.
+  if (::aos::SetShmBase) {
+    const char *tmpdir_c_str = getenv("TEST_TMPDIR");
+    if (tmpdir_c_str != nullptr) {
+      aos::SetShmBase(tmpdir_c_str);
+    }
+  }
+
   return RUN_ALL_TESTS();
 }
diff --git a/aos/unique_malloc_ptr.h b/aos/unique_malloc_ptr.h
index bd2c37c..eb92f11 100644
--- a/aos/unique_malloc_ptr.h
+++ b/aos/unique_malloc_ptr.h
@@ -1,8 +1,11 @@
+#ifndef AOS_UNIQUE_MALLOC_PTR_H_
+#define AOS_UNIQUE_MALLOC_PTR_H_
+
 #include <memory>
 
 namespace aos {
 
-namespace {
+namespace internal {
 
 // Written as a functor so that it doesn't have to get passed to
 // std::unique_ptr's constructor as an argument.
@@ -19,26 +22,30 @@
 template<typename T>
 void free_type(T *ptr) { ::free(reinterpret_cast<void *>(ptr)); }
 
-}  // namespace
+}  // namespace internal
 
 // A std::unique_ptr that should get freed with a C-style free function
 // (free(2) by default).
-template<typename T, void(*function)(T *) = free_type<T>>
-class unique_c_ptr : public std::unique_ptr<T, const_wrap<T, function>> {
+template <typename T, void (*function)(T *) = internal::free_type<T>>
+class unique_c_ptr
+    : public std::unique_ptr<T, internal::const_wrap<T, function>> {
  public:
-  unique_c_ptr(T *value) : std::unique_ptr<T, const_wrap<T, function>>(value) {}
+  unique_c_ptr(T *value)
+      : std::unique_ptr<T, internal::const_wrap<T, function>>(value) {}
 
   // perfect forwarding of these 2 to make unique_ptr work
-  template<typename... Args>
-  unique_c_ptr(Args&&... args)
-    : std::unique_ptr<T, const_wrap<T, function>>(std::forward<Args>(args)...) {
-  }
+  template <typename... Args>
+  unique_c_ptr(Args &&... args)
+      : std::unique_ptr<T, internal::const_wrap<T, function>>(
+            std::forward<Args>(args)...) {}
   template<typename... Args>
   unique_c_ptr<T, function> &operator=(Args&&... args) {
-    std::unique_ptr<T, const_wrap<T, function>>::operator=(
+    std::unique_ptr<T, internal::const_wrap<T, function>>::operator=(
         std::forward<Args>(args)...);
     return *this;
   }
 };
 
 }  // namespace aos
+
+#endif  // AOS_UNIQUE_MALLOC_PTR_H_
diff --git a/build_tests/BUILD b/build_tests/BUILD
index 076e18c..a4c5829 100644
--- a/build_tests/BUILD
+++ b/build_tests/BUILD
@@ -26,6 +26,24 @@
     ],
 )
 
+emcc_binary(
+    name = "plotter.html",
+    srcs = ["webgl2_plot_test.cc"],
+    html_shell = "minimal_shell.html",
+    linkopts = [
+        "-s",
+        "USE_WEBGL2=1",
+        "-s",
+        "FULL_ES3=1",
+        "-s",
+        "TOTAL_MEMORY=" + repr(256 * 1024 * 1024),
+    ],
+    deps = [
+        "//frc971/analysis/plotting:webgl2_animator",
+        "//frc971/analysis/plotting:webgl2_plotter",
+    ],
+)
+
 cc_test(
     name = "gflags_build_test",
     size = "small",
diff --git a/build_tests/minimal_shell.html b/build_tests/minimal_shell.html
index 6158571..c9d2b86 100644
--- a/build_tests/minimal_shell.html
+++ b/build_tests/minimal_shell.html
@@ -56,7 +56,21 @@
     </div>
     <!--The width and height values in the canvas specify the pixel width/height for the WebGL canvas.
         The actual on-screen size is controlled by the stylesheet.-->
-    <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" width=3000 height=2000></canvas>
+    <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" width=1200 height=600></canvas>
+
+    <div>
+      General help information:<br>
+      <ul>
+        <li>Double-click to reset zoom.</li>
+        <li>Left-click to print mouse position (within plot) to console.</li>
+        <li>Ctrl-Z to undo zoom actions.</li>
+        <li>Right-click and drag to pan.</li>
+        <li>Left-click and drag zooms to the dragged area. If you press Escape while dragging, it will cancel the zoom.</li>
+        <li>Scroll up/down will zoom in and out.</li>
+        <li>Holding down "x" and "y" will restrict any
+            movement to the x and y axes respectively.</li>
+      </ul>
+    </div>
 
     <script type='text/javascript'>
       var statusElement = document.getElementById('status');
diff --git a/build_tests/webgl2_plot_test.cc b/build_tests/webgl2_plot_test.cc
new file mode 100644
index 0000000..2baefa9
--- /dev/null
+++ b/build_tests/webgl2_plot_test.cc
@@ -0,0 +1,35 @@
+#include <emscripten/emscripten.h>
+#include <emscripten/html5.h>
+
+#include <iostream>
+
+#include "frc971/analysis/plotting/webgl2_plotter.h"
+#include "frc971/analysis/plotting/webgl2_animator.h"
+
+float rand1() {
+  return static_cast<float>(rand()) / RAND_MAX;
+}
+
+int main() {
+  // Note that the animation_state must last until Redraw stops being called,
+  // which we cannot provide any bound on. As such, we don't currently destroy
+  // the memory until the webpage is closed.
+  frc971::plotting::Animator *animation_state =
+      new frc971::plotting::Animator("#canvas");
+  // Generate a bunch of lines with random y-values and evenly spaced x-values,
+  // such that each line takes up a set amount of space in the y-space. If
+  // that's unclear, then try running this and seeing what it looks like.
+  constexpr size_t kNLines = 30;
+  for (int jj = 0; jj < kNLines; ++jj) {
+    frc971::plotting::Line *line = animation_state->plotter()->AddLine();
+    // Randomly generate a color to use; each of r/g/b are between 0 and 1.
+    line->SetColor({.r = rand1(), .g = rand1(), .b = rand1()});
+    std::vector<Eigen::Vector2d> points;
+    constexpr size_t kNPoints = 100000;
+    for (int ii = 0; ii < kNPoints; ++ii) {
+      const float x = static_cast<float>(ii) / kNPoints;
+      points.emplace_back(x, std::sin(x + jj));
+    }
+    line->SetPoints(points);
+  }
+}
diff --git a/debian/BUILD b/debian/BUILD
index 0cb12e7..9b7ad4d 100644
--- a/debian/BUILD
+++ b/debian/BUILD
@@ -55,7 +55,7 @@
     srcs = [
         "matplotlib_init.patch",
     ],
-    visibility = ["@matplotlib//:__pkg__"],
+    visibility = ["@matplotlib_repo//:__pkg__"],
 )
 
 filegroup(
@@ -182,6 +182,7 @@
         "python-matplotlib",
         "python-tk",
         "python3-matplotlib",
+        "python3-tk",
     ],
 )
 
diff --git a/debian/matplotlib.BUILD b/debian/matplotlib.BUILD
index 5f89ba9..af2c4b7 100644
--- a/debian/matplotlib.BUILD
+++ b/debian/matplotlib.BUILD
@@ -1,124 +1,8 @@
-genrule(
-    name = "patch_init",
-    srcs = [
-        "usr/lib/python2.7/dist-packages/matplotlib/__init__.py",
-        "@//debian:matplotlib_patches",
-    ],
-    outs = ["matplotlib/__init__.py"],
-    cmd = " && ".join([
-        "cp $(location usr/lib/python2.7/dist-packages/matplotlib/__init__.py) $@",
-        "readonly PATCH=\"$$(readlink -f $(location @patch))\"",
-        "readonly FILE=\"$$(readlink -f $(location @//debian:matplotlib_patches))\"",
-        "(cd $(@D) && \"$${PATCH}\" -p1 < \"$${FILE}\") > /dev/null",
-    ]),
-    tools = [
-        "@patch",
-    ],
-)
+load("@//debian:matplotlib.bzl", "build_matplotlib")
 
-_src_files = glob(
-    include = ["usr/lib/python2.7/dist-packages/**/*.py"],
-    exclude = [
-        "usr/lib/python2.7/dist-packages/matplotlib/__init__.py",
-    ],
-)
+build_matplotlib("3", "3.5")
 
-_data_files = glob([
-    "usr/share/matplotlib/mpl-data/**",
-    "usr/share/tcltk/**",
-])
-
-_src_copied = ["/".join(f.split("/")[4:]) for f in _src_files]
-
-_builtin_so_files = glob([
-    "usr/lib/python2.7/dist-packages/**/*.x86_64-linux-gnu.so",
-    "usr/lib/python2.7/lib-dynload/*.so",
-])
-
-_system_so_files = glob([
-    "usr/lib/**/*.so*",
-    "lib/x86_64-linux-gnu/**/*.so*",
-])
-
-_builtin_so_copied = ["/".join(f.split("/")[4:]) for f in _builtin_so_files]
-
-_system_so_copied = ["rpathed/" + f for f in _system_so_files]
-
-_builtin_rpaths = [":".join([
-    "\\$$ORIGIN/%s" % rel,
-    "\\$$ORIGIN/%s/rpathed/usr/lib/x86_64-linux-gnu" % rel,
-    "\\$$ORIGIN/%s/rpathed/usr/lib" % rel,
-    "\\$$ORIGIN/%s/rpathed/lib/x86_64-linux-gnu" % rel,
-]) for rel in ["/".join([".." for _ in so.split("/")[1:]]) for so in _builtin_so_copied]]
-
-_system_rpaths = [":".join([
-    "\\$$ORIGIN/%s/rpathed/usr/lib/x86_64-linux-gnu" % rel,
-    "\\$$ORIGIN/%s/rpathed/lib/x86_64-linux-gnu" % rel,
-]) for rel in ["/".join([".." for _ in so.split("/")[1:]]) for so in _system_so_copied]]
-
-genrule(
-    name = "run_patchelf_builtin",
-    srcs = _builtin_so_files,
-    outs = _builtin_so_copied,
-    cmd = "\n".join(
-        [
-            "cp $(location %s) $(location %s)" % (src, dest)
-            for src, dest in zip(_builtin_so_files, _builtin_so_copied)
-        ] +
-        ["$(location @patchelf) --set-rpath %s $(location %s)" % (rpath, so) for rpath, so in zip(_builtin_rpaths, _builtin_so_copied)],
-    ),
-    tools = [
-        "@patchelf",
-    ],
-)
-
-genrule(
-    name = "run_patchelf_system",
-    srcs = _system_so_files,
-    outs = _system_so_copied,
-    cmd = "\n".join(
-        [
-            "cp $(location %s) $(location %s)" % (src, dest)
-            for src, dest in zip(_system_so_files, _system_so_copied)
-        ] +
-        ["$(location @patchelf) --set-rpath %s $(location %s)" % (rpath, so) for rpath, so in zip(_system_rpaths, _system_so_copied)],
-    ),
-    tools = [
-        "@patchelf",
-    ],
-)
-
-genrule(
-    name = "copy_files",
-    srcs = _src_files,
-    outs = _src_copied,
-    cmd = " && ".join(["cp $(location %s) $(location %s)" % (src, dest) for src, dest in zip(
-        _src_files,
-        _src_copied,
-    )]),
-)
-
-genrule(
-    name = "create_rc",
-    outs = ["usr/share/matplotlib/mpl-data/matplotlibrc"],
-    cmd = "\n".join([
-        "cat > $@ << END",
-        # This is necessary to make matplotlib actually plot things to the
-        # screen by default.
-        "backend      : TkAgg",
-        "END",
-    ]),
-)
-
-py_library(
-    name = "matplotlib",
-    srcs = _src_copied + [
-        "matplotlib/__init__.py",
-    ],
-    data = _data_files + _builtin_so_copied + _system_so_copied + [
-        ":usr/share/matplotlib/mpl-data/matplotlibrc",
-    ],
-    imports = ["usr/lib/python2.7/dist-packages"],
-    restricted_to = ["@//tools:k8"],
-    visibility = ["//visibility:public"],
+build_matplotlib(
+    "2.7",
+    copy_shared_files = False,
 )
diff --git a/debian/matplotlib.bzl b/debian/matplotlib.bzl
index b1687d6..a2c6683 100644
--- a/debian/matplotlib.bzl
+++ b/debian/matplotlib.bzl
@@ -90,6 +90,7 @@
     "python3-matplotlib_2.0.0+dfsg1-2_amd64.deb": "8f5d3509d4f5451468c6de44fc8dfe391c3df4120079adc01ab5f13ff4194f5a",
     "python3-pyparsing_2.1.10+dfsg1-1_all.deb": "ee8d7f04f841248127e81b3d356d37e623ed29da284b28c7d2b8a5b34f0eebba",
     "python3-six_1.10.0-3_all.deb": "597005e64cf70e4be97170a47c33287f70a1c87a2979d47a434c10c9201af3ca",
+    "python3-tk_3.5.3-1_amd64.deb": "67489a1c86a9e501dbe2989cd72b5b2c70511fe3829af3567a009271b61fdbb5",
     "python3-tz_2016.7-0.3_all.deb": "5f1c7db456aac5fe9b0ea66d7413c12660c7652ae382c640f71c517a05d39551",
     "shared-mime-info_1.8-1+deb9u1_amd64.deb": "d6591f13ee1200c4f0b5581c2299eb7b8097a6b04742dc333e34a7bb7ba47532",
     "tk8.6-blt2.5_2.5.3+dfsg-3_amd64.deb": "88587a928e2bd692650d98c1483b67f1dee1fed57730077c895e689462af1569",
@@ -97,3 +98,148 @@
     "tzdata_2019c-0+deb9u1_all.deb": "80c9809dafc62ec741cbf3024130253de6047af31a10f0c86bb17f2d12ad10d5",
     "ucf_3.0036_all.deb": "796a65e765d6045007175531d512c720f4eb04e7f3326b79b848bc6123947225",
 }
+
+def build_matplotlib(version, tkinter_py_version = None, copy_shared_files = True):
+  """Creates a py_library rule for matplotlib for the given python version.
+
+  See debian/matplotlib.BUILD for the usage.
+
+  All the rules generated by this will be suffixed by version. Only one
+  instance of this macro should set copy_shared_files, which generate the
+  files that are shared between python versions.
+
+  tkinter_py_version is used because for the Python3 instance, some files
+  are in folders named python3 and some are in folders named python3.5...
+
+  version numbers should both be strings.
+  """
+  if tkinter_py_version == None:
+    tkinter_py_version = version
+
+  native.genrule(
+      name = "patch_init" + version,
+      srcs = [
+          "usr/lib/python" + version + "/dist-packages/matplotlib/__init__.py",
+          "@//debian:matplotlib_patches",
+      ],
+      outs = [version + "/matplotlib/__init__.py"],
+      cmd = " && ".join([
+          "cp $(location usr/lib/python" + version + "/dist-packages/matplotlib/__init__.py) $@",
+          "readonly PATCH=\"$$(readlink -f $(location @patch))\"",
+          "readonly FILE=\"$$(readlink -f $(location @//debian:matplotlib_patches))\"",
+          "(cd $(@D) && \"$${PATCH}\" -p1 < \"$${FILE}\") > /dev/null",
+      ]),
+      tools = [
+          "@patch",
+      ],
+  )
+
+  _src_files = native.glob(
+      include = ["usr/lib/python" + version + "/dist-packages/**/*.py"],
+      exclude = [
+          "usr/lib/python" + version + "/dist-packages/matplotlib/__init__.py",
+      ],
+  )
+
+  _data_files = native.glob([
+      "usr/share/matplotlib/mpl-data/**",
+      "usr/share/tcltk/**",
+  ])
+
+  _src_copied = ["/".join([version] + f.split("/")[4:]) for f in _src_files]
+
+  _builtin_so_files = native.glob([
+      "usr/lib/python" + version + "/dist-packages/**/*x86_64-linux-gnu.so",
+      "usr/lib/python" + tkinter_py_version + "/lib-dynload/*.so",
+  ])
+
+  _system_so_files = native.glob([
+      "usr/lib/**/*.so*",
+      "lib/x86_64-linux-gnu/**/*.so*",
+  ])
+
+  _builtin_so_copied = ["/".join([version] + f.split("/")[4:]) for f in _builtin_so_files]
+
+  rpath_prefix = "rpathed" + version + "/"
+
+  _system_so_copied = [rpath_prefix + f for f in _system_so_files]
+
+  _builtin_rpaths = [":".join([
+      "\\$$ORIGIN/%s" % rel,
+      "\\$$ORIGIN/%s/%s/usr/lib/x86_64-linux-gnu" % (rel, rpath_prefix),
+      "\\$$ORIGIN/%s/%s/usr/lib" % (rel, rpath_prefix),
+      "\\$$ORIGIN/%s/%s/lib/x86_64-linux-gnu" % (rel, rpath_prefix),
+  ]) for rel in ["/".join([".." for _ in so.split("/")[1:]]) for so in _builtin_so_copied]]
+
+  _system_rpaths = [":".join([
+      "\\$$ORIGIN/%s/%s/usr/lib/x86_64-linux-gnu" % (rel, rpath_prefix),
+      "\\$$ORIGIN/%s/%s/lib/x86_64-linux-gnu" % (rel, rpath_prefix),
+  ]) for rel in ["/".join([".." for _ in so.split("/")[1:]]) for so in _system_so_copied]]
+
+  native.genrule(
+      name = "run_patchelf_builtin" + version,
+      srcs = _builtin_so_files,
+      outs = _builtin_so_copied,
+      cmd = "\n".join(
+          [
+              "cp $(location %s) $(location %s)" % (src, dest)
+              for src, dest in zip(_builtin_so_files, _builtin_so_copied)
+          ] +
+          ["$(location @patchelf) --set-rpath %s $(location %s)" % (rpath, so) for rpath, so in zip(_builtin_rpaths, _builtin_so_copied)],
+      ),
+      tools = [
+          "@patchelf",
+      ],
+  )
+
+  native.genrule(
+      name = "run_patchelf_system" + version,
+      srcs = _system_so_files,
+      outs = _system_so_copied,
+      cmd = "\n".join(
+          [
+              "cp $(location %s) $(location %s)" % (src, dest)
+              for src, dest in zip(_system_so_files, _system_so_copied)
+          ] +
+          ["$(location @patchelf) --set-rpath %s $(location %s)" % (rpath, so) for rpath, so in zip(_system_rpaths, _system_so_copied)],
+      ),
+      tools = [
+          "@patchelf",
+      ],
+  )
+
+  native.genrule(
+      name = "copy_files" + version,
+      srcs = _src_files,
+      outs = _src_copied,
+      cmd = " && ".join(["cp $(location %s) $(location %s)" % (src, dest) for src, dest in zip(
+          _src_files,
+          _src_copied,
+      )]),
+  )
+
+  if copy_shared_files:
+    native.genrule(
+        name = "create_rc" + version,
+        outs = ["usr/share/matplotlib/mpl-data/matplotlibrc"],
+        cmd = "\n".join([
+            "cat > $@ << END",
+            # This is necessary to make matplotlib actually plot things to the
+            # screen by default.
+            "backend      : TkAgg",
+            "END",
+        ]),
+    )
+
+  native.py_library(
+      name = "matplotlib" + version,
+      srcs = _src_copied + [
+          version + "/matplotlib/__init__.py",
+      ],
+      data = _data_files + _builtin_so_copied + _system_so_copied + [
+          ":usr/share/matplotlib/mpl-data/matplotlibrc",
+      ],
+      imports = ["usr/lib/python" + version + "/dist-packages", version, "."],
+      restricted_to = ["@//tools:k8"],
+      visibility = ["//visibility:public"],
+  )
diff --git a/debian/matplotlib_init.patch b/debian/matplotlib_init.patch
index 0106b64..e547108 100644
--- a/debian/matplotlib_init.patch
+++ b/debian/matplotlib_init.patch
@@ -18,7 +18,7 @@
 +os.environ['MATPLOTLIBDATA'] = \
 +        os.path.join( \
 +            _matplotlib_base,
-+            "usr", "share", "matplotlib", "mpl-data")
++            "..", "usr", "share", "matplotlib", "mpl-data")
 +# Avoid reading /etc/matplotlib in all cases. Matplotlib is pretty happy to
 +# escape the sandbox by using absolute paths.
 +os.environ['MATPLOTLIBRC'] = os.environ['MATPLOTLIBDATA']
@@ -29,7 +29,7 @@
 +
 +# Tell Tcl where to find the sandboxed version. Otherwise, it will try using
 +# one from the host system, even if that's an incompatible version.
-+os.environ['TCL_LIBRARY'] = os.path.join(_matplotlib_base, 'usr', 'share',
++os.environ['TCL_LIBRARY'] = os.path.join(_matplotlib_base, '..', 'usr', 'share',
 +                                         'tcltk', 'tcl8.6')
 +
  try:
diff --git a/debian/python.BUILD b/debian/python.BUILD
index 666f2d8..5e5d810 100644
--- a/debian/python.BUILD
+++ b/debian/python.BUILD
@@ -1,16 +1,20 @@
 package(default_visibility = ["@//debian:__pkg__"])
 
 cc_library(
-    name = "python3.4_lib",
-    hdrs = glob(["usr/include/python3.4m/**/*.h"]),
+    name = "python3.5_lib",
+    srcs = [
+        "usr/lib/x86_64-linux-gnu/libpython3.5m.so",
+    ],
+    hdrs = glob(["usr/include/**/*.h"]),
     includes = [
-        "usr/include/python3.4m/",
+        "usr/include/",
+        "usr/include/python3.5m/",
     ],
     visibility = ["//visibility:public"],
 )
 
 cc_library(
-    name = "python3.4_f2py",
+    name = "python3.5_f2py",
     srcs = [
         "usr/lib/python3/dist-packages/numpy/f2py/src/fortranobject.c",
     ],
@@ -26,7 +30,7 @@
     ],
     visibility = ["//visibility:public"],
     deps = [
-        ":python3.4_lib",
+        ":python3.5_lib",
     ],
 )
 
diff --git a/frc971/analysis/BUILD b/frc971/analysis/BUILD
index b45031f..2686bcd 100644
--- a/frc971/analysis/BUILD
+++ b/frc971/analysis/BUILD
@@ -1,5 +1,7 @@
 package(default_visibility = ["//visibility:public"])
 
+load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
+
 py_binary(
     name = "plot_action",
     srcs = [
@@ -12,7 +14,7 @@
     restricted_to = ["//tools:k8"],
     deps = [
         ":python_init",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -21,3 +23,59 @@
     srcs = ["__init__.py"],
     deps = ["//frc971:python_init"],
 )
+
+cc_binary(
+    name = "py_log_reader.so",
+    srcs = ["py_log_reader.cc"],
+    linkshared = True,
+    restricted_to = ["//tools:k8"],
+    deps = [
+        "//aos:configuration",
+        "//aos:json_to_flatbuffer",
+        "//aos/events:shm_event_loop",
+        "//aos/events:simulated_event_loop",
+        "//aos/events/logging:logger",
+        "@com_github_google_glog//:glog",
+        "@python_repo//:python3.5_lib",
+    ],
+)
+
+py_test(
+    name = "log_reader_test",
+    srcs = ["log_reader_test.py"],
+    data = [
+        ":py_log_reader.so",
+        "@sample_logfile//file",
+    ],
+    restricted_to = ["//tools:k8"],
+    deps = ["//aos:configuration_fbs_python"],
+)
+
+py_proto_library(
+    name = "plot_config_proto",
+    srcs = ["plot_config.proto"],
+)
+
+py_binary(
+    name = "plot",
+    srcs = ["plot.py"],
+    data = [
+        ":py_log_reader.so",
+    ] + glob(["plot_configs/**"]),
+    restricted_to = ["//tools:k8"],
+    deps = [
+        ":plot_config_proto",
+        ":python_init",
+        "@matplotlib_repo//:matplotlib3",
+    ],
+)
+
+py_test(
+    name = "plot_test",
+    srcs = ["plot_test.py"],
+    data = [
+        "@sample_logfile//file",
+    ],
+    restricted_to = ["//tools:k8"],
+    deps = [":plot"],
+)
diff --git a/frc971/analysis/log_reader_test.py b/frc971/analysis/log_reader_test.py
new file mode 100644
index 0000000..3389d00
--- /dev/null
+++ b/frc971/analysis/log_reader_test.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python3
+import json
+import unittest
+
+from aos.Configuration import Configuration
+from frc971.analysis.py_log_reader import LogReader
+
+
+class LogReaderTest(unittest.TestCase):
+    def setUp(self):
+        self.reader = LogReader("external/sample_logfile/file/log.fbs")
+        # A list of all the channels in the logfile--this is used to confirm that
+        # we did indeed read the config correctly.
+        self.all_channels = [
+            ("/aos", "aos.JoystickState"), ("/aos", "aos.RobotState"),
+            ("/aos", "aos.timing.Report"), ("/aos", "frc971.PDPValues"),
+            ("/aos",
+             "frc971.wpilib.PneumaticsToLog"), ("/autonomous",
+                                                "aos.common.actions.Status"),
+            ("/autonomous", "frc971.autonomous.AutonomousMode"),
+            ("/autonomous", "frc971.autonomous.Goal"), ("/camera",
+                                                        "y2019.CameraLog"),
+            ("/camera", "y2019.control_loops.drivetrain.CameraFrame"),
+            ("/drivetrain",
+             "frc971.IMUValues"), ("/drivetrain",
+                                   "frc971.control_loops.drivetrain.Goal"),
+            ("/drivetrain",
+             "frc971.control_loops.drivetrain.LocalizerControl"),
+            ("/drivetrain", "frc971.control_loops.drivetrain.Output"),
+            ("/drivetrain", "frc971.control_loops.drivetrain.Position"),
+            ("/drivetrain", "frc971.control_loops.drivetrain.Status"),
+            ("/drivetrain", "frc971.sensors.GyroReading"),
+            ("/drivetrain",
+             "y2019.control_loops.drivetrain.TargetSelectorHint"),
+            ("/superstructure",
+             "y2019.StatusLight"), ("/superstructure",
+                                    "y2019.control_loops.superstructure.Goal"),
+            ("/superstructure", "y2019.control_loops.superstructure.Output"),
+            ("/superstructure", "y2019.control_loops.superstructure.Position"),
+            ("/superstructure", "y2019.control_loops.superstructure.Status")
+        ]
+        # A channel that is known to have data on it which we will use for testing.
+        self.test_channel = ("/aos", "aos.timing.Report")
+        # A non-existent channel
+        self.bad_channel = ("/aos", "aos.timing.FooBar")
+
+    def test_do_nothing(self):
+        """Tests that we sanely handle doing nothing.
+
+        A previous iteration of the log reader seg faulted when doing this."""
+        pass
+
+    def test_read_config(self):
+        """Tests that we can read the configuration from the logfile."""
+        config_bytes = self.reader.configuration()
+        config = Configuration.GetRootAsConfiguration(config_bytes, 0)
+
+        channel_set = set(self.all_channels)
+        for ii in range(config.ChannelsLength()):
+            channel = config.Channels(ii)
+            # Will raise KeyError if the channel does not exist
+            channel_set.remove((channel.Name().decode("utf-8"),
+                                channel.Type().decode("utf-8")))
+
+        self.assertEqual(0, len(channel_set))
+
+    def test_empty_process(self):
+        """Tests running process() without subscribing to anything succeeds."""
+        self.reader.process()
+        for channel in self.all_channels:
+            with self.assertRaises(ValueError) as context:
+                self.reader.get_data_for_channel(channel[0], channel[1])
+
+    def test_subscribe(self):
+        """Tests that we can subscribe to a channel and get data out."""
+        name = self.test_channel[0]
+        message_type = self.test_channel[1]
+        self.assertTrue(self.reader.subscribe(name, message_type))
+        self.reader.process()
+        data = self.reader.get_data_for_channel(name, message_type)
+        self.assertLess(100, len(data))
+        last_monotonic_time = 0
+        for entry in data:
+            monotonic_time = entry[0]
+            realtime_time = entry[1]
+            json_data = entry[2].replace('nan', '\"nan\"')
+            self.assertLess(last_monotonic_time, monotonic_time)
+            # Sanity check that the realtime times are in the correct range.
+            self.assertLess(1500000000e9, realtime_time)
+            self.assertGreater(2000000000e9, realtime_time)
+            parsed_json = json.loads(json_data)
+            self.assertIn("name", parsed_json)
+
+            last_monotonic_time = monotonic_time
+
+    def test_bad_subscribe(self):
+        """Tests that we return false when subscribing to a non-existent channel."""
+        self.assertFalse(
+            self.reader.subscribe(self.bad_channel[0], self.bad_channel[1]),
+            self.bad_channel)
+
+    def test_subscribe_after_process(self):
+        """Tests that an exception is thrown if we subscribe after calling process()."""
+        self.reader.process()
+        for channel in self.all_channels:
+            with self.assertRaises(RuntimeError) as context:
+                self.reader.subscribe(channel[0], channel[1])
+
+    def test_get_data_before_processj(self):
+        """Tests that an exception is thrown if we retrieve data before calling process()."""
+        for channel in self.all_channels:
+            with self.assertRaises(RuntimeError) as context:
+                self.reader.get_data_for_channel(channel[0], channel[1])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/frc971/analysis/plot.py b/frc971/analysis/plot.py
new file mode 100644
index 0000000..4367c08
--- /dev/null
+++ b/frc971/analysis/plot.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python3
+# Sample usage:
+# bazel run -c opt //frc971/analysis:plot  -- --logfile /tmp/log.fbs --config gyro.pb
+import argparse
+import json
+import os.path
+from pathlib import Path
+import sys
+
+from frc971.analysis.py_log_reader import LogReader
+from frc971.analysis.plot_config_pb2 import PlotConfig, Signal
+from google.protobuf import text_format
+
+import matplotlib
+from matplotlib import pyplot as plt
+
+
+class Plotter:
+    def __init__(self, plot_config: PlotConfig, reader: LogReader):
+        self.config = plot_config
+        self.reader = reader
+        # Data streams, indexed by alias.
+        self.data = {}
+
+    def process_logfile(self):
+        aliases = set()
+        for channel in self.config.channel:
+            if channel.alias in aliases:
+                raise ValueError("Duplicate alias " + channel.alias)
+            aliases.add(channel.alias)
+            if not self.reader.subscribe(channel.name, channel.type):
+                raise ValueError("No such channel with name " + channel.name +
+                                 " and type " + channel.type)
+
+        self.reader.process()
+
+        for channel in self.config.channel:
+            self.data[channel.alias] = []
+            for message in self.reader.get_data_for_channel(
+                    channel.name, channel.type):
+                valid_json = message[2].replace('nan', '"nan"')
+                parsed_json = json.loads(valid_json)
+                self.data[channel.alias].append((message[0], message[1],
+                                                 parsed_json))
+
+    def plot_signal(self, axes: matplotlib.axes.Axes, signal: Signal):
+        if not signal.channel in self.data:
+            raise ValueError("No channel alias " + signal.channel)
+        field_path = signal.field.split('.')
+        monotonic_time = []
+        signal_data = []
+        for entry in self.data[signal.channel]:
+            monotonic_time.append(entry[0] * 1e-9)
+            value = entry[2]
+            for name in field_path:
+                value = value[name]
+            # Catch NaNs and convert them to floats.
+            value = float(value)
+            signal_data.append(value)
+        label_name = signal.channel + "." + signal.field
+        axes.plot(monotonic_time, signal_data, label=label_name)
+
+    def plot(self):
+        for figure_config in self.config.figure:
+            fig = plt.figure()
+            num_subplots = len(figure_config.axes)
+            for ii in range(num_subplots):
+                axes = fig.add_subplot(num_subplots, 1, ii + 1)
+                axes_config = figure_config.axes[ii]
+                for signal in axes_config.signal:
+                    self.plot_signal(axes, signal)
+                axes.legend()
+                axes.set_xlabel("Monotonic Time (sec)")
+                if axes_config.HasField("ylabel"):
+                    axes.set_ylabel(axes_config.ylabel)
+
+
+def main(argv):
+    parser = argparse.ArgumentParser(
+        description="Plot data from an aos logfile.")
+    parser.add_argument(
+        "--logfile",
+        type=str,
+        required=True,
+        help="Path to the logfile to parse.")
+    parser.add_argument(
+        "--config",
+        type=str,
+        required=True,
+        help="Name of the plot config to use.")
+    parser.add_argument(
+        "--config_dir",
+        type=str,
+        default="frc971/analysis/plot_configs",
+        help="Directory to look for plot configs in.")
+    args = parser.parse_args(argv[1:])
+
+    if not os.path.isdir(args.config_dir):
+        print(args.config_dir + " is not a directory.")
+        return 1
+    config_path = os.path.join(args.config_dir, args.config)
+    if not os.path.isfile(config_path):
+        print(config_path +
+              " does not exist or is not a file--available configs are")
+        for file_name in os.listdir(args.config_dir):
+            print(os.path.basename(file_name))
+        return 1
+
+    config = PlotConfig()
+    with open(config_path) as config_file:
+        text_format.Merge(config_file.read(), config)
+
+    if not os.path.isfile(args.logfile):
+        print(args.logfile + " is not a file.")
+        return 1
+
+    reader = LogReader(args.logfile)
+
+    plotter = Plotter(config, reader)
+    plotter.process_logfile()
+    plotter.plot()
+    plt.show()
+
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv))
diff --git a/frc971/analysis/plot_config.proto b/frc971/analysis/plot_config.proto
new file mode 100644
index 0000000..9aaea89
--- /dev/null
+++ b/frc971/analysis/plot_config.proto
@@ -0,0 +1,45 @@
+syntax = "proto2";
+
+package frc971.analysis;
+
+// Specification fo a Channel to pull from the logfile. The name and type will
+// be the full name/type of the channel to pull from the logfile. The alias is a
+// shorter, easier to type, name that the rest of the logfile will use to refer
+// to the channel.
+message Channel {
+  optional string name = 1;
+  optional string type = 2;
+  optional string alias = 3;
+}
+
+// A specification for a single signal within a Channel.
+message Signal {
+  // Alias for the channel to pull the signal from--this should match an alias
+  // specified in one of the Channels.
+  optional string channel = 1;
+  // Specification of the field to plot. Currently, this only supports simple
+  // submessages, using dots. To access, e.g., the "bar" member of the "foo"
+  // submessage, field would be "foo.bar". This does not currently support
+  // working with repeated fields.
+  optional string field = 2;
+}
+
+// Message representing a single pyplot Axes, with specifications for exactly
+// which signals to show in the supplied subplot.
+message Axes {
+  repeated Signal signal = 1;
+  optional string ylabel = 2;
+}
+
+// Message representing a single pyplot figure.
+message Figure {
+  repeated Axes axes = 1;
+}
+
+// This configuration specifies what to plot when reading from a logfile.
+message PlotConfig {
+  // List of channels and their aliases to use in the plot.
+  repeated Channel channel = 1;
+  // Figures to plot.
+  repeated Figure figure = 2;
+}
diff --git a/frc971/analysis/plot_configs/gyro.pb b/frc971/analysis/plot_configs/gyro.pb
new file mode 100644
index 0000000..e398a2e
--- /dev/null
+++ b/frc971/analysis/plot_configs/gyro.pb
@@ -0,0 +1,38 @@
+channel {
+  name: "/drivetrain"
+  type: "frc971.IMUValues"
+  alias: "IMU"
+}
+
+figure {
+  axes {
+    signal {
+      channel: "IMU"
+      field: "gyro_x"
+    }
+    signal {
+      channel: "IMU"
+      field: "gyro_y"
+    }
+    signal {
+      channel: "IMU"
+      field: "gyro_z"
+    }
+    ylabel: "rad / sec"
+  }
+  axes {
+    signal {
+      channel: "IMU"
+      field: "accelerometer_x"
+    }
+    signal {
+      channel: "IMU"
+      field: "accelerometer_y"
+    }
+    signal {
+      channel: "IMU"
+      field: "accelerometer_z"
+    }
+    ylabel: "g"
+  }
+}
diff --git a/frc971/analysis/plot_test.py b/frc971/analysis/plot_test.py
new file mode 100644
index 0000000..1c57ae2
--- /dev/null
+++ b/frc971/analysis/plot_test.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python3
+import unittest
+
+import matplotlib
+# Use a non-interactive backend so that the test can actually run...
+matplotlib.use('Agg')
+
+import frc971.analysis.plot
+
+
+class PlotterTest(unittest.TestCase):
+    def test_plotter(self):
+        """Basic test that makes sure that we can run the test without crashing."""
+        self.assertEqual(0,
+                         frc971.analysis.plot.main([
+                             "binary", "--logfile",
+                             "external/sample_logfile/file/log.fbs",
+                             "--config", "gyro.pb"
+                         ]))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/frc971/analysis/plotting/BUILD b/frc971/analysis/plotting/BUILD
new file mode 100644
index 0000000..eb817ba
--- /dev/null
+++ b/frc971/analysis/plotting/BUILD
@@ -0,0 +1,30 @@
+cc_library(
+    name = "webgl2_plotter",
+    srcs = ["webgl2_plotter.cc"],
+    hdrs = ["webgl2_plotter.h"],
+    linkopts = [
+        "-s",
+        "USE_WEBGL2=1",
+        "-s",
+        "FULL_ES3=1",
+    ],
+    restricted_to = ["//tools:web"],
+    visibility = ["//visibility:public"],
+    deps = ["@org_tuxfamily_eigen//:eigen"],
+)
+
+cc_library(
+    name = "webgl2_animator",
+    srcs = ["webgl2_animator.cc"],
+    hdrs = ["webgl2_animator.h"],
+    linkopts = [
+        "-s",
+        "USE_WEBGL2=1",
+    ],
+    restricted_to = ["//tools:web"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":webgl2_plotter",
+        "@org_tuxfamily_eigen//:eigen",
+    ],
+)
diff --git a/frc971/analysis/plotting/webgl2_animator.cc b/frc971/analysis/plotting/webgl2_animator.cc
new file mode 100644
index 0000000..148245e
--- /dev/null
+++ b/frc971/analysis/plotting/webgl2_animator.cc
@@ -0,0 +1,283 @@
+#include "frc971/analysis/plotting/webgl2_animator.h"
+
+namespace frc971 {
+namespace plotting {
+
+namespace {
+struct Button {
+  bool IsTransition(const EmscriptenMouseEvent &mouse_event) {
+    return mouse_event.button == transition_number_;
+  }
+  bool IsPressed(const EmscriptenMouseEvent &mouse_event) {
+    return mouse_event.buttons & (1 << pressed_index_);
+  }
+  const size_t transition_number_;
+  const size_t pressed_index_;
+};
+constexpr Button kLeftButton() { return {0, 0}; }
+//constexpr Button kMiddleButton() { return {1, 2}; }
+constexpr Button kRightButton() { return {2, 1}; }
+
+constexpr Button kPanButton() { return kLeftButton(); }
+constexpr Button kZoomButton() { return kRightButton(); }
+}  // namespace
+
+Animator::Animator(const char *canvas_target) : plotter_(canvas_target) {
+  // TODO(james): Write a proper CHECK macro or figure out how to import glog.
+  // Importing glog is a bit of a pain, since it seems to assume all sorts of
+  // things that don't really apply on the web.
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_get_canvas_element_size(canvas_target, &canvas_width_,
+                                            &canvas_height_));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_mousemove_callback("#canvas", this, true,
+                                           &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_click_callback("#canvas", this, true,
+                                       &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_mousedown_callback("#canvas", this, true,
+                                           &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_mouseup_callback("#canvas", this, true,
+                                         &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_mouseleave_callback("#canvas", this, true,
+                                            &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_mouseenter_callback("#canvas", this, true,
+                                            &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_dblclick_callback("#canvas", this, true,
+                                          &Animator::MouseCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_wheel_callback("#canvas", this, true,
+                                       &Animator::WheelCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_keypress_callback("#document", this, true,
+                                          &Animator::KeyboardCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_keydown_callback("#document", this, true,
+                                         &Animator::KeyboardCallback));
+  assert(EMSCRIPTEN_RESULT_SUCCESS ==
+         emscripten_set_keyup_callback("#document", this, true,
+                                       &Animator::KeyboardCallback));
+  emscripten_request_animation_frame_loop(&Animator::Redraw, this);
+}
+
+Eigen::Vector2d Animator::MouseCanvasLocation(
+    const EmscriptenMouseEvent &mouse_event) {
+  return {mouse_event.canvasX * 2.0 / canvas_width_ - 1.0,
+          -mouse_event.canvasY * 2.0 / canvas_height_ + 1.0};
+}
+
+Eigen::Vector2d Animator::CanvasToPlotLocation(
+    const Eigen::Vector2d &canvas_loc) {
+  return (canvas_loc - plotter_.GetOffset()).cwiseQuotient(plotter_.GetScale());
+}
+
+void Animator::PrintZoom() {
+  const Eigen::Vector2d upper_right = CanvasToPlotLocation({1.0, 1.0});
+  const Eigen::Vector2d lower_left = CanvasToPlotLocation({-1.0, -1.0});
+  printf("X range is [%f, %f]; Y range is [%f, %f]\n", lower_left.x(),
+         upper_right.x(), lower_left.y(), upper_right.y());
+}
+
+void Animator::PrintPosition(const EmscriptenMouseEvent &mouse_event) {
+  const Eigen::Vector2d mouse_pos =
+      CanvasToPlotLocation(MouseCanvasLocation(mouse_event));
+  printf("Mouse position: (%f, %f)\n", mouse_pos.x(), mouse_pos.y());
+}
+
+void Animator::HandleMouseUp(const EmscriptenMouseEvent &mouse_event) {
+  if (!kZoomButton().IsTransition(mouse_event)) {
+    return;
+  }
+  // We aborted the zoom early for some reason and so shouldn't execute on it:
+  if (!doing_rectangle_zoom_) {
+    return;
+  }
+  const Eigen::Vector2d mouse_up_location = MouseCanvasLocation(mouse_event);
+  doing_rectangle_zoom_ = false;
+  plotter_.ClearZoomRectangle();
+  // The user probably didn't mean to zoom on that click...
+  if ((mouse_up_location - mouse_down_location_).cwiseAbs().minCoeff() < 1e-3) {
+    return;
+  }
+  const Eigen::Vector2d mouse_up_plot_location =
+      CanvasToPlotLocation(mouse_up_location);
+  const Eigen::Vector2d mouse_down_plot_location =
+      CanvasToPlotLocation(mouse_down_location_);
+  SetZoomCorners(mouse_down_plot_location, mouse_up_plot_location);
+}
+
+void Animator::HandleMouseDown(const EmscriptenMouseEvent &mouse_event) {
+  if (kZoomButton().IsTransition(mouse_event)) {
+    mouse_down_location_ = MouseCanvasLocation(mouse_event);
+    doing_rectangle_zoom_ = true;
+  } else if (kPanButton().IsTransition(mouse_event)) {
+    last_pan_mouse_location_ = MouseCanvasLocation(mouse_event);
+  }
+}
+
+void Animator::HandleMouseMove(const EmscriptenMouseEvent &mouse_event) {
+  const Eigen::Vector2d mouse_location = MouseCanvasLocation(mouse_event);
+  if (kPanButton().IsPressed(mouse_event)) {
+    SetFilteredZoom(plotter_.GetScale(), plotter_.GetOffset() + mouse_location -
+                                            last_pan_mouse_location_);
+    last_pan_mouse_location_ = mouse_location;
+  }
+  if (doing_rectangle_zoom_) {
+    Eigen::Vector2d c1 = CanvasToPlotLocation(mouse_down_location_);
+    Eigen::Vector2d c2 = CanvasToPlotLocation(mouse_location);
+    const Eigen::Vector2d upper_right = CanvasToPlotLocation({1.0, 1.0});
+    const Eigen::Vector2d bottom_left = CanvasToPlotLocation({-1.0, -1.0});
+    if (x_pressed_ && !y_pressed_) {
+      c1.y() = upper_right.y();
+      c2.y() = bottom_left.y();
+    }
+    if (y_pressed_ && !x_pressed_) {
+      c1.x() = upper_right.x();
+      c2.x() = bottom_left.x();
+    }
+    plotter_.SetZoomRectangle(c1, c2);
+  }
+}
+
+void Animator::HandleMouseEnter(const EmscriptenMouseEvent &mouse_event) {
+  // If the zoom button is unclicked and we were zooming, instantly finish the
+  // rectangle zoom.
+  if (doing_rectangle_zoom_ && !kZoomButton().IsPressed(mouse_event)) {
+    plotter_.ClearZoomRectangle();
+    doing_rectangle_zoom_ = false;
+    // Round the current mouse location to the nearest of the four corners.
+    // This is to ensure that a zoom that occurs when the use goes of the edge
+    // of the screen actually goes right up to the edge of the canvas.
+    // Technically, cwiseSign will return zero if you get the mouse enter
+    // event to trigger with the mouse at the center of the screen, but that
+    // seems unlikely.
+    const Eigen::Vector2d canvas_corner =
+        MouseCanvasLocation(mouse_event).cwiseSign();
+    SetZoomCorners(CanvasToPlotLocation(mouse_down_location_),
+                   CanvasToPlotLocation(canvas_corner));
+  }
+}
+
+void Animator::SetZoomCorners(const Eigen::Vector2d &c1,
+                              const Eigen::Vector2d &c2) {
+  const Eigen::Vector2d scale = ((c2 - c1).cwiseAbs() / 2.0).cwiseInverse();
+  const Eigen::Vector2d offset =
+      Eigen::Vector2d::Ones() - scale.cwiseProduct(c2.cwiseMax(c1));
+  SetFilteredZoom(scale, offset);
+}
+
+void Animator::SetFilteredZoom(Eigen::Vector2d scale, Eigen::Vector2d offset) {
+  if (!x_pressed_ && y_pressed_) {
+    scale.x() = plotter_.GetScale().x();
+    offset.x() = plotter_.GetOffset().x();
+  }
+  if (!y_pressed_ && x_pressed_) {
+    scale.y() = plotter_.GetScale().y();
+    offset.y() = plotter_.GetOffset().y();
+  }
+  plotter_.RecordState();
+  plotter_.SetScale(scale);
+  plotter_.SetOffset(offset);
+  PrintZoom();
+}
+
+void Animator::ResetView() {
+  SetZoomCorners(plotter_.MinValues(), plotter_.MaxValues());
+}
+
+int Animator::Redraw(double time_ms, void *data) {
+  Animator *state = reinterpret_cast<Animator *>(data);
+  state->plotter_.Redraw();
+  return 1;
+}
+
+int Animator::KeyboardCallback(int event_type,
+                               const EmscriptenKeyboardEvent *key_event,
+                               void *data) {
+  Animator *state = reinterpret_cast<Animator *>(data);
+  const bool key_is_pressed = event_type == EMSCRIPTEN_EVENT_KEYDOWN;
+  if (strncmp(key_event->key, "x", 2) == 0) {
+    state->x_pressed_ = key_is_pressed;
+    return true;
+  } else if (strncmp(key_event->key, "y", 2) == 0) {
+    state->y_pressed_ = key_is_pressed;
+    return true;
+  } else if (strncmp(key_event->key, "z", 2) == 0 && key_event->ctrlKey) {
+    if (event_type == EMSCRIPTEN_EVENT_KEYUP) {
+      state->plotter_.Undo();
+      state->PrintZoom();
+    }
+    return true;
+  } else if (strncmp(key_event->key, "Escape", 7) == 0) {
+    state->doing_rectangle_zoom_ = false;
+    state->plotter_.ClearZoomRectangle();
+    return true;
+  }
+  return false;
+}
+
+int Animator::WheelCallback(int event_type,
+                            const EmscriptenWheelEvent *wheel_event,
+                            void *data) {
+  Animator *state = reinterpret_cast<Animator *>(data);
+  assert(event_type == EMSCRIPTEN_EVENT_WHEEL);
+  if (wheel_event->deltaMode == DOM_DELTA_PIXEL) {
+    const Eigen::Vector2d canvas_pos =
+        state->MouseCanvasLocation(wheel_event->mouse);
+    constexpr double kWheelTuningScalar = 3.0;
+    const double zoom =
+        -kWheelTuningScalar * wheel_event->deltaY / state->canvas_height_;
+    double zoom_scalar = 1.0 + std::abs(zoom);
+    if (zoom < 0) {
+      zoom_scalar = 1.0 / zoom_scalar;
+    }
+    const Eigen::Vector2d scale = state->plotter_.GetScale() * zoom_scalar;
+    const Eigen::Vector2d offset = (1.0 - zoom_scalar) * canvas_pos +
+                                   zoom_scalar * state->plotter_.GetOffset();
+    state->SetFilteredZoom(scale, offset);
+    return true;
+  }
+  return false;
+}
+
+int Animator::MouseCallback(int event_type,
+                            const EmscriptenMouseEvent *mouse_event,
+                            void *data) {
+  Animator *state = reinterpret_cast<Animator *>(data);
+  switch (event_type) {
+    case EMSCRIPTEN_EVENT_CLICK:
+      state->PrintZoom();
+      state->PrintPosition(*mouse_event);
+      return true;
+      break;
+    case EMSCRIPTEN_EVENT_DBLCLICK:
+      state->ResetView();
+      return true;
+      break;
+    case EMSCRIPTEN_EVENT_MOUSEDOWN:
+      state->HandleMouseDown(*mouse_event);
+      return true;
+      break;
+    case EMSCRIPTEN_EVENT_MOUSEUP:
+      state->HandleMouseUp(*mouse_event);
+      return true;
+      break;
+    case EMSCRIPTEN_EVENT_MOUSEMOVE:
+      state->HandleMouseMove(*mouse_event);
+      return true;
+      break;
+    case EMSCRIPTEN_EVENT_MOUSEENTER:
+      state->HandleMouseEnter(*mouse_event);
+      return true;
+      break;
+  }
+  return false;
+}
+
+}  // namespace plotting
+}  // namespace frc971
diff --git a/frc971/analysis/plotting/webgl2_animator.h b/frc971/analysis/plotting/webgl2_animator.h
new file mode 100644
index 0000000..4674bc0
--- /dev/null
+++ b/frc971/analysis/plotting/webgl2_animator.h
@@ -0,0 +1,79 @@
+#ifndef FRC971_ANALYSIS_PLOTTING_WEBGL2_ANIMATOR_H_
+#define FRC971_ANALYSIS_PLOTTING_WEBGL2_ANIMATOR_H_
+
+#include <Eigen/Dense>
+#include <emscripten/emscripten.h>
+#include <emscripten/html5.h>
+
+#include "frc971/analysis/plotting/webgl2_plotter.h"
+
+namespace frc971 {
+namespace plotting {
+
+// TODO(james): Write some tests for this class. It shouldn't be too hard to
+// abstract out all the direct emscripten calls. Mostly it's just some
+// initialization at the moment.
+class Animator {
+ public:
+  Animator(const char *canvas_target);
+
+  Plotter *plotter() { return &plotter_; }
+
+ private:
+  Eigen::Vector2d MouseCanvasLocation(const EmscriptenMouseEvent &mouse_event);
+
+  Eigen::Vector2d CanvasToPlotLocation(const Eigen::Vector2d &canvas_loc);
+
+  void PrintZoom();
+
+  void PrintPosition(const EmscriptenMouseEvent &mouse_event);
+
+  void HandleMouseUp(const EmscriptenMouseEvent &mouse_event);
+
+  void HandleMouseDown(const EmscriptenMouseEvent &mouse_event);
+
+  void HandleMouseMove(const EmscriptenMouseEvent &mouse_event);
+
+  void HandleMouseEnter(const EmscriptenMouseEvent &mouse_event);
+
+  void SetZoomCorners(const Eigen::Vector2d &c1, const Eigen::Vector2d &c2);
+
+  void SetFilteredZoom(Eigen::Vector2d scale, Eigen::Vector2d offset);
+
+  void ResetView();
+
+  static int Redraw(double time_ms, void *data);
+  static int KeyboardCallback(int event_type,
+                              const EmscriptenKeyboardEvent *key_event,
+                              void *data);
+  static int WheelCallback(int event_type,
+                           const EmscriptenWheelEvent *wheel_event, void *data);
+  static int MouseCallback(int event_type,
+                           const EmscriptenMouseEvent *mouse_event, void *data);
+
+  int canvas_width_ = 0.0;
+  int canvas_height_ = 0.0;
+
+  // Location, in canvas coordinates of the last left click mouse-down event.
+  Eigen::Vector2d mouse_down_location_{0, 0};
+
+  // True if the user is currently dragging their mouse to zoom to a rectangle.
+  // This is used to (a) determine whether we should subsequently execute the
+  // zoom when the user releases the mouse and (b) to know when to draw a
+  // rectangle indicating where the user is zooming to.
+  bool doing_rectangle_zoom_ = false;
+
+  // The last location of the mouse when panning, so that we can calculate
+  // exactly how much the mouse has moved since the last mouse-move callback.
+  Eigen::Vector2d last_pan_mouse_location_{0, 0};
+
+  // Whether the "x" or "y" key is currently pressed on the keyboard.
+  bool x_pressed_ = false;
+  bool y_pressed_ = false;
+
+  WebglCanvasPlotter plotter_;
+};
+
+}  // namespace plotting
+}  // namespace frc971
+#endif  // FRC971_ANALYSIS_PLOTTING_WEBGL2_ANIMATOR_H_
diff --git a/frc971/analysis/plotting/webgl2_plotter.cc b/frc971/analysis/plotting/webgl2_plotter.cc
new file mode 100644
index 0000000..b10a1de
--- /dev/null
+++ b/frc971/analysis/plotting/webgl2_plotter.cc
@@ -0,0 +1,277 @@
+#include "frc971/analysis/plotting/webgl2_plotter.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <iostream>
+
+#include <emscripten/emscripten.h>
+#include <emscripten/html5.h>
+
+namespace frc971 {
+namespace plotting {
+
+namespace {
+// Shader and program construction taken from examples at
+// https://github.com/emscripten-core/emscripten/blob/incoming/tests/webgl2_draw_packed_triangle.c
+GLuint compile_shader(GLenum shaderType, const char *src) {
+  GLuint shader = glCreateShader(shaderType);
+  glShaderSource(shader, 1, &src, nullptr);
+  glCompileShader(shader);
+
+  GLint isCompiled = 0;
+  glGetShaderiv(shader, GL_COMPILE_STATUS, &isCompiled);
+  if (!isCompiled) {
+    GLint maxLength = 0;
+    glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength);
+    char *buf = (char *)malloc(maxLength + 1);
+    glGetShaderInfoLog(shader, maxLength, &maxLength, buf);
+    printf("%s\n", buf);
+    free(buf);
+    return 0;
+  }
+
+  return shader;
+}
+
+GLuint create_program(GLuint vertexShader, GLuint fragmentShader,
+                      GLuint attribute_location) {
+  GLuint program = glCreateProgram();
+  glAttachShader(program, vertexShader);
+  glAttachShader(program, fragmentShader);
+  glBindAttribLocation(program, attribute_location, "apos");
+  glLinkProgram(program);
+  return program;
+}
+
+GLuint CreateLineProgram(GLuint attribute_location) {
+  // Create a shader program which will take in:
+  // -A series of points to plot
+  // -Scale/offset parameters for choosing how to zoom/pan
+  // -Point size/color information
+  //
+  // The vertex shader then takes in the series of points (apos) and
+  // transforms the points by the scale/offset to determine their on-screen
+  // position. We aren't doing any funny with 3D or perspective, so we leave
+  // the z and w components untouched.
+  //
+  // We set the color of the line in the fragment shader.
+  const char vertex_shader[] =
+    "#version 100\n"
+    "attribute vec2 apos;"
+    "uniform vec2 scale;"
+    "uniform vec2 offset;"
+    "uniform float point_size;"
+    "void main() {"
+      "gl_Position.xy = apos.xy * scale.xy + offset.xy;"
+      "gl_Position.z = 0.0;"
+      "gl_Position.w = 1.0;"
+      "gl_PointSize = point_size;"
+    "}";
+  GLuint vs = compile_shader(GL_VERTEX_SHADER, vertex_shader);
+
+  const char fragment_shader[] =
+    "#version 100\n"
+    "precision lowp float;"
+    "uniform vec4 color;"
+    "void main() {"
+      "gl_FragColor = color;"
+    "}";
+  GLuint fs = compile_shader(GL_FRAGMENT_SHADER, fragment_shader);
+  return create_program(vs, fs, attribute_location);
+}
+
+const Eigen::Vector2d Vector2dInfinity() {
+  return Eigen::Vector2d::Ones() * std::numeric_limits<double>::infinity();
+}
+
+}  // namespace
+
+class WebglLine : public Line {
+ public:
+  WebglLine(GLuint program, size_t buffer_size = 1000000)
+      : color_uniform_location_(glGetUniformLocation(program, "color")),
+        point_size_uniform_location_(
+            glGetUniformLocation(program, "point_size")),
+        line_size_(0) {}
+  virtual ~WebglLine() {}
+  void SetPoints(const std::vector<Eigen::Vector2d> &pts) override {
+    updated_ = true;
+    max_values_ = -Vector2dInfinity();
+    min_values_ = Vector2dInfinity();
+    line_size_ = 0;
+    buffer_.clear();
+    for (const auto &pt : pts) {
+      buffer_.push_back(pt.x());
+      buffer_.push_back(pt.y());
+      max_values_ = max_values_.cwiseMax(pt);
+      min_values_ = min_values_.cwiseMin(pt);
+      ++line_size_;
+    }
+  }
+  void Draw() override {
+    updated_ = false;
+    if (buffer_.empty()) {
+      return;
+    }
+    // TODO(james): Flushing and rewriting the buffer on every line draw seems
+    // like it should be inefficient, but in practice it seems to actually be
+    // fine for the amounts of data that we are dealing with (i.e., doing a few
+    // tens of MB of copies at the most is not actually that expensive).
+    glBufferData(GL_ARRAY_BUFFER, buffer_.size() * sizeof(float),
+                 buffer_.data(), GL_STATIC_DRAW);
+    glUniform4f(color_uniform_location_, color_.r, color_.g, color_.b, 1.0);
+    glUniform1f(point_size_uniform_location_, point_size_);
+    if (point_size_ != 0) {
+      glDrawArrays(GL_POINTS, 0, line_size_);
+    }
+    if (line_width_ != 0) {
+      glDrawArrays(GL_LINE_STRIP, 0, line_size_);
+    }
+    assert(GL_NO_ERROR == glGetError() && "glDrawArray failed");
+  }
+  void SetColor(const Color &color) override {
+    updated_ = true;
+    color_ = color;
+  }
+
+  Eigen::Vector2d MaxValues() const override { return max_values_; }
+  Eigen::Vector2d MinValues() const override { return min_values_; }
+
+  void SetLineWidth(const float width) override {
+    updated_ = true;
+    line_width_ = width;
+  }
+  void SetPointSize(const float point_size) override {
+    updated_ = true;
+    point_size_ = point_size;
+  }
+
+  bool HasUpdate() override { return updated_; }
+
+ private:
+  const GLuint color_uniform_location_;
+  const GLuint point_size_uniform_location_;
+  std::vector<float> buffer_;
+  size_t line_size_;
+  Color color_;
+  Eigen::Vector2d max_values_ = -Vector2dInfinity();
+  Eigen::Vector2d min_values_ = Vector2dInfinity();
+  float line_width_ = 1.0;
+  float point_size_ = 3.0;
+  bool updated_ = true;
+};
+
+WebglCanvasPlotter::WebglCanvasPlotter(const std::string &canvas_id,
+                                       GLuint attribute_location)  {
+  EmscriptenWebGLContextAttributes attr;
+  emscripten_webgl_init_context_attributes(&attr);
+  assert(attr.antialias && "Antialiasing should be enabled by default.");
+  attr.majorVersion = 2;
+  EMSCRIPTEN_WEBGL_CONTEXT_HANDLE ctx = emscripten_webgl_create_context("#canvas", &attr);
+  assert(ctx && "Failed to create WebGL2 context");
+  emscripten_webgl_make_context_current(ctx);
+
+  program_ = CreateLineProgram(attribute_location);
+  scale_uniform_location_ = glGetUniformLocation(program_, "scale");
+  offset_uniform_location_ = glGetUniformLocation(program_, "offset");
+
+  glGenBuffers(1, &gl_buffer_);
+  glUseProgram(program_);
+  glBindBuffer(GL_ARRAY_BUFFER, gl_buffer_);
+  glVertexAttribPointer(attribute_location, 2, GL_FLOAT, GL_FALSE, 8, 0);
+  glEnableVertexAttribArray(attribute_location);
+
+  zoom_rectangle_ = std::make_unique<WebglLine>(program_);
+  zoom_rectangle_->SetColor({.r = 1.0, .g = 1.0, .b = 1.0});
+  zoom_rectangle_->SetLineWidth(2.0);
+  zoom_rectangle_->SetPointSize(0.0);
+}
+
+Line *WebglCanvasPlotter::AddLine() {
+  lines_.push_back(std::make_unique<WebglLine>(program_));
+  return lines_.back().get();
+}
+void WebglCanvasPlotter::Undo() {
+  if (old_scales_.empty() || old_offsets_.empty()) {
+    return;
+  }
+  scale_ = old_scales_.back();
+  old_scales_.pop_back();
+  offset_ = old_offsets_.back();
+  old_offsets_.pop_back();
+}
+
+void WebglCanvasPlotter::RecordState() {
+  old_scales_.push_back(scale_);
+  old_offsets_ .push_back(offset_);
+}
+
+void WebglCanvasPlotter::SetScale(const Eigen::Vector2d &scale) {
+  scale_ = scale;
+}
+
+Eigen::Vector2d WebglCanvasPlotter::GetScale() const {
+  return scale_;
+}
+
+void WebglCanvasPlotter::SetOffset(const Eigen::Vector2d &offset) {
+  offset_ = offset;
+}
+
+Eigen::Vector2d WebglCanvasPlotter::GetOffset() const {
+  return offset_;
+}
+
+void WebglCanvasPlotter::Redraw() {
+  const bool scaling_update = last_scale_ != scale_ || last_offset_ != offset_;
+  bool data_update = zoom_rectangle_->HasUpdate();
+  for (const auto &line : lines_) {
+    data_update = line->HasUpdate() || data_update;
+  }
+  if (!scaling_update && !data_update) {
+    return;
+  }
+  glUseProgram(program_);
+  glBindBuffer(GL_ARRAY_BUFFER, gl_buffer_);
+  glUniform2f(scale_uniform_location_, scale_.x(), scale_.y());
+  glUniform2f(offset_uniform_location_, offset_.x(), offset_.y());
+  for (const auto &line : lines_) {
+    line->Draw();
+  }
+  zoom_rectangle_->Draw();
+  last_scale_ = scale_;
+  last_offset_ = offset_;
+}
+
+Eigen::Vector2d WebglCanvasPlotter::MaxValues() const {
+  Eigen::Vector2d max = -Vector2dInfinity();
+  for (const auto &line : lines_) {
+    max = max.cwiseMax(line->MaxValues());
+  }
+  return max;
+}
+Eigen::Vector2d WebglCanvasPlotter::MinValues() const {
+  Eigen::Vector2d min = Vector2dInfinity();
+  for (const auto &line : lines_) {
+    min = min.cwiseMin(line->MinValues());
+  }
+  return min;
+}
+
+void WebglCanvasPlotter::ClearZoomRectangle() {
+  zoom_rectangle_->SetPoints({});
+}
+
+
+void WebglCanvasPlotter::SetZoomRectangle(const Eigen::Vector2d &corner1,
+                                          const Eigen::Vector2d &corner2) {
+  zoom_rectangle_->SetPoints({corner1,
+                              {corner1.x(), corner2.y()},
+                              corner2,
+                              {corner2.x(), corner1.y()},
+                              corner1});
+}
+
+}  // namespace plotting
+}  // namespace frc971
diff --git a/frc971/analysis/plotting/webgl2_plotter.h b/frc971/analysis/plotting/webgl2_plotter.h
new file mode 100644
index 0000000..6da086c
--- /dev/null
+++ b/frc971/analysis/plotting/webgl2_plotter.h
@@ -0,0 +1,88 @@
+#ifndef FRC971_ANALYSIS_PLOTTING_WEBGL2_PLOTTER_H_
+#define FRC971_ANALYSIS_PLOTTING_WEBGL2_PLOTTER_H_
+
+#include <vector>
+
+#include <Eigen/Dense>
+#define GL_GLEXT_PROTOTYPES
+#include <GLES3/gl3.h>
+#include <GLES3/gl2ext.h>
+#include <GLES3/gl32.h>
+
+namespace frc971 {
+namespace plotting {
+
+struct Color {
+  float r;
+  float g;
+  float b;
+};
+
+class Line {
+ public:
+  virtual ~Line() {}
+  virtual void SetPoints(const std::vector<Eigen::Vector2d> &pts) = 0;
+  virtual void SetColor(const Color &color) = 0;
+  virtual void Draw() = 0;
+  virtual Eigen::Vector2d MaxValues() const = 0;
+  virtual Eigen::Vector2d MinValues() const = 0;
+  virtual void SetLineWidth(const float width) = 0;
+  virtual void SetPointSize(const float point_size) = 0;
+  virtual bool HasUpdate() = 0;
+};
+
+// TODO(james): Actually do something with this interface; originally, I'd meant
+// to look at writing some tests, but right now it's just extra boilerplate.
+class Plotter {
+ public:
+  virtual Line *AddLine() = 0;
+  virtual void SetScale(const Eigen::Vector2d &scale) = 0;
+  virtual Eigen::Vector2d GetScale() const = 0;
+  virtual void SetOffset(const Eigen::Vector2d &offset) = 0;
+  virtual Eigen::Vector2d GetOffset() const = 0;
+  virtual void Redraw() = 0;
+  virtual Eigen::Vector2d MaxValues() const = 0;
+  virtual Eigen::Vector2d MinValues() const = 0;
+  virtual void ClearZoomRectangle() = 0;
+  virtual void SetZoomRectangle(const Eigen::Vector2d &corner1,
+                                const Eigen::Vector2d &corner2) = 0;
+  virtual void RecordState() = 0;
+  virtual void Undo() = 0;
+};
+
+class WebglCanvasPlotter : public Plotter {
+ public:
+  WebglCanvasPlotter(const std::string &canvas_id,
+                     GLuint attribute_location = 0);
+  Line *AddLine() override;
+  void SetScale(const Eigen::Vector2d &scale) override;
+  Eigen::Vector2d GetScale() const override;
+  void SetOffset(const Eigen::Vector2d &offset) override;
+  Eigen::Vector2d GetOffset() const override;
+  void Redraw() override;
+  Eigen::Vector2d MaxValues() const override;
+  Eigen::Vector2d MinValues() const override;
+  void ClearZoomRectangle() override;
+  void SetZoomRectangle(const Eigen::Vector2d &corner1,
+                        const Eigen::Vector2d &corner2) override;
+  void RecordState() override;
+  void Undo() override;
+
+ private:
+  std::vector<std::unique_ptr<Line>> lines_;
+  std::unique_ptr<Line> zoom_rectangle_;
+  Eigen::Vector2d scale_{1.0, 1.0};
+  Eigen::Vector2d offset_{0.0, 0.0};
+  std::vector<Eigen::Vector2d> old_scales_;
+  std::vector<Eigen::Vector2d> old_offsets_;
+  Eigen::Vector2d last_scale_{1.0, 1.0};
+  Eigen::Vector2d last_offset_{0.0, 0.0};
+  GLuint program_;
+  GLuint scale_uniform_location_;
+  GLuint offset_uniform_location_;
+  GLuint gl_buffer_;
+};
+
+}  // namespace plotting
+}  // namespace frc971
+#endif  // FRC971_ANALYSIS_PLOTTING_WEBGL2_PLOTTER_H_
diff --git a/frc971/analysis/py_log_reader.cc b/frc971/analysis/py_log_reader.cc
new file mode 100644
index 0000000..5a6fce4
--- /dev/null
+++ b/frc971/analysis/py_log_reader.cc
@@ -0,0 +1,283 @@
+// This file provides a Python module for reading logfiles. See
+// log_reader_test.py for usage.
+//
+// This reader works by having the user specify exactly what channels they want
+// data for. We then process the logfile and store all the data on that channel
+// into a list of timestamps + JSON message data. The user can then use an
+// accessor method (get_data_for_channel) to retrieve the cached data.
+
+// Defining PY_SSIZE_T_CLEAN seems to be suggested by most of the Python
+// documentation.
+#define PY_SSIZE_T_CLEAN
+// Note that Python.h needs to be included before anything else.
+#include <Python.h>
+
+#include <memory>
+
+#include "aos/configuration.h"
+#include "aos/events/logging/logger.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/flatbuffer_merge.h"
+#include "aos/json_to_flatbuffer.h"
+
+namespace frc971 {
+namespace analysis {
+namespace {
+
+// All the data corresponding to a single message.
+struct MessageData {
+  aos::monotonic_clock::time_point monotonic_sent_time;
+  aos::realtime_clock::time_point realtime_sent_time;
+  // JSON representation of the message.
+  std::string json_data;
+};
+
+// Data corresponding to an entire channel.
+struct ChannelData {
+  std::string name;
+  std::string type;
+  // Each message published on the channel, in order by monotonic time.
+  std::vector<MessageData> messages;
+};
+
+// All the objects that we need for managing reading a logfile.
+struct LogReaderTools {
+  std::unique_ptr<aos::logger::LogReader> reader;
+  std::unique_ptr<aos::SimulatedEventLoopFactory> event_loop_factory;
+  // Event loop to use for subscribing to buses.
+  std::unique_ptr<aos::EventLoop> event_loop;
+  std::vector<ChannelData> channel_data;
+  // Whether we have called process() on the reader yet.
+  bool processed = false;
+};
+
+struct LogReaderType {
+  PyObject_HEAD;
+  LogReaderTools *tools = nullptr;
+};
+
+void LogReader_dealloc(LogReaderType *self) {
+  LogReaderTools *tools = self->tools;
+  if (!tools->processed) {
+    tools->reader->Deregister();
+  }
+  delete tools;
+  Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+PyObject *LogReader_new(PyTypeObject *type, PyObject * /*args*/,
+                            PyObject * /*kwds*/) {
+  LogReaderType *self;
+  self = (LogReaderType *)type->tp_alloc(type, 0);
+  if (self != nullptr) {
+    self->tools = new LogReaderTools();
+    if (self->tools == nullptr) {
+      return nullptr;
+    }
+  }
+  return (PyObject *)self;
+}
+
+int LogReader_init(LogReaderType *self, PyObject *args, PyObject *kwds) {
+  const char *kwlist[] = {"log_file_name", nullptr};
+
+  const char *log_file_name;
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "s", const_cast<char **>(kwlist),
+                                   &log_file_name)) {
+    return -1;
+  }
+
+  LogReaderTools *tools = CHECK_NOTNULL(self->tools);
+  tools->reader = std::make_unique<aos::logger::LogReader>(log_file_name);
+  tools->event_loop_factory = std::make_unique<aos::SimulatedEventLoopFactory>(
+      tools->reader->configuration());
+  tools->reader->Register(tools->event_loop_factory.get());
+
+  tools->event_loop = tools->event_loop_factory->MakeEventLoop("data_fetcher");
+  tools->event_loop->SkipTimingReport();
+
+  return 0;
+}
+
+PyObject *LogReader_get_data_for_channel(LogReaderType *self,
+                                                PyObject *args,
+                                                PyObject *kwds) {
+  const char *kwlist[] = {"name", "type", nullptr};
+
+  const char *name;
+  const char *type;
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "ss",
+                                   const_cast<char **>(kwlist), &name, &type)) {
+    return nullptr;
+  }
+
+  LogReaderTools *tools = CHECK_NOTNULL(self->tools);
+
+  if (!tools->processed) {
+    PyErr_SetString(PyExc_RuntimeError,
+                    "Called get_data_for_bus before calling process().");
+    return nullptr;
+  }
+
+  for (const auto &channel : tools->channel_data) {
+    if (channel.name == name && channel.type == type) {
+      PyObject *list = PyList_New(channel.messages.size());
+      for (size_t ii = 0; ii < channel.messages.size(); ++ii)
+      {
+        const auto &message = channel.messages[ii];
+        PyObject *monotonic_time = PyLong_FromLongLong(
+            std::chrono::duration_cast<std::chrono::nanoseconds>(
+                message.monotonic_sent_time.time_since_epoch())
+                .count());
+        PyObject *realtime_time = PyLong_FromLongLong(
+            std::chrono::duration_cast<std::chrono::nanoseconds>(
+                message.realtime_sent_time.time_since_epoch())
+                .count());
+        PyObject *json_data = PyUnicode_FromStringAndSize(
+            message.json_data.data(), message.json_data.size());
+        PyObject *entry =
+            PyTuple_Pack(3, monotonic_time, realtime_time, json_data);
+        if (PyList_SetItem(list, ii, entry) != 0) {
+          return nullptr;
+        }
+      }
+      return list;
+    }
+  }
+  PyErr_SetString(PyExc_ValueError,
+                  "The provided channel was never subscribed to.");
+  return nullptr;
+}
+
+PyObject *LogReader_subscribe(LogReaderType *self, PyObject *args,
+                                     PyObject *kwds) {
+  const char *kwlist[] = {"name", "type", nullptr};
+
+  const char *name;
+  const char *type;
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "ss",
+                                   const_cast<char **>(kwlist), &name, &type)) {
+    return nullptr;
+  }
+
+  LogReaderTools *tools = CHECK_NOTNULL(self->tools);
+
+  if (tools->processed) {
+    PyErr_SetString(PyExc_RuntimeError,
+                    "Called subscribe after calling process().");
+    return nullptr;
+  }
+
+  const aos::Channel *const channel = aos::configuration::GetChannel(
+      tools->reader->configuration(), name, type, "", nullptr);
+  if (channel == nullptr) {
+    return Py_False;
+  }
+  const int index = tools->channel_data.size();
+  tools->channel_data.push_back(
+      {.name = name, .type = type, .messages = {}});
+  tools->event_loop->MakeRawWatcher(
+      channel, [channel, index, tools](const aos::Context &context,
+                                       const void *message) {
+        tools->channel_data[index].messages.push_back(
+            {.monotonic_sent_time = context.monotonic_event_time,
+             .realtime_sent_time = context.realtime_event_time,
+             .json_data = aos::FlatbufferToJson(
+                 channel->schema(), static_cast<const uint8_t *>(message))});
+      });
+  return Py_True;
+}
+
+static PyObject *LogReader_process(LogReaderType *self,
+                                   PyObject *Py_UNUSED(ignored)) {
+  LogReaderTools *tools = CHECK_NOTNULL(self->tools);
+
+  if (tools->processed) {
+    PyErr_SetString(PyExc_RuntimeError, "process() may only be called once.");
+    return nullptr;
+  }
+
+  tools->processed = true;
+
+  tools->event_loop_factory->Run();
+  tools->reader->Deregister();
+
+  Py_RETURN_NONE;
+}
+
+static PyObject *LogReader_configuration(LogReaderType *self,
+                                         PyObject *Py_UNUSED(ignored)) {
+  LogReaderTools *tools = CHECK_NOTNULL(self->tools);
+
+  // I have no clue if the Configuration that we get from the log reader is in a
+  // contiguous chunk of memory, and I'm too lazy to either figure it out or
+  // figure out how to extract the actual data buffer + offset.
+  // Instead, copy the flatbuffer and return a copy of the new buffer.
+  aos::FlatbufferDetachedBuffer<aos::Configuration> buffer =
+      aos::CopyFlatBuffer(tools->reader->configuration());
+
+  return PyBytes_FromStringAndSize(
+      reinterpret_cast<const char *>(buffer.data()), buffer.size());
+}
+
+static PyMethodDef LogReader_methods[] = {
+    {"configuration", (PyCFunction)LogReader_configuration, METH_NOARGS,
+     "Return a bytes buffer for the Configuration of the logfile."},
+    {"process", (PyCFunction)LogReader_process, METH_NOARGS,
+     "Processes the logfile and all the subscribed to channels."},
+    {"subscribe", (PyCFunction)LogReader_subscribe,
+     METH_VARARGS | METH_KEYWORDS,
+     "Attempts to subscribe to the provided channel name + type. Returns True "
+     "if successful."},
+    {"get_data_for_channel", (PyCFunction)LogReader_get_data_for_channel,
+     METH_VARARGS | METH_KEYWORDS,
+     "Returns the logged data for a given channel. Raises an exception if you "
+     "did not subscribe to the provided channel. Returned data is a list of "
+     "tuples where each tuple is of the form (monotonic_nsec, realtime_nsec, "
+     "json_message_data)."},
+    {nullptr, 0, 0, nullptr} /* Sentinel */
+};
+
+static PyTypeObject LogReaderType = {
+    PyVarObject_HEAD_INIT(NULL, 0).tp_name = "py_log_reader.LogReader",
+    .tp_doc = "LogReader objects",
+    .tp_basicsize = sizeof(LogReaderType),
+    .tp_itemsize = 0,
+    .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+    .tp_new = LogReader_new,
+    .tp_init = (initproc)LogReader_init,
+    .tp_dealloc = (destructor)LogReader_dealloc,
+    .tp_methods = LogReader_methods,
+};
+
+static PyModuleDef log_reader_module = {
+    PyModuleDef_HEAD_INIT,
+    .m_name = "py_log_reader",
+    .m_doc = "Example module that creates an extension type.",
+    .m_size = -1,
+};
+
+PyObject *InitModule() {
+  PyObject *m;
+  if (PyType_Ready(&LogReaderType) < 0) return nullptr;
+
+  m = PyModule_Create(&log_reader_module);
+  if (m == nullptr) return nullptr;
+
+  Py_INCREF(&LogReaderType);
+  if (PyModule_AddObject(m, "LogReader", (PyObject *)&LogReaderType) < 0) {
+    Py_DECREF(&LogReaderType);
+    Py_DECREF(m);
+    return nullptr;
+  }
+
+  return m;
+}
+
+}  // namespace
+}  // namespace analysis
+}  // namespace frc971
+
+PyMODINIT_FUNC PyInit_py_log_reader(void) {
+  return frc971::analysis::InitModule();
+}
diff --git a/frc971/control_loops/python/BUILD b/frc971/control_loops/python/BUILD
index b3aa2c0..50764c7 100644
--- a/frc971/control_loops/python/BUILD
+++ b/frc971/control_loops/python/BUILD
@@ -11,7 +11,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -59,7 +59,7 @@
     deps = [
         ":controls",
         ":python_init",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -72,7 +72,7 @@
     deps = [
         ":controls",
         ":python_init",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -133,7 +133,7 @@
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
         "//y2016/control_loops/python:polydrivetrain_lib",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -146,7 +146,7 @@
         ":controls",
         "//aos/util:py_trapezoid_profile",
         "//frc971/control_loops:python_init",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -159,7 +159,7 @@
         ":controls",
         "//aos/util:py_trapezoid_profile",
         "//frc971/control_loops:python_init",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
diff --git a/motors/python/BUILD b/motors/python/BUILD
index d5d74ae..5eafc3a 100644
--- a/motors/python/BUILD
+++ b/motors/python/BUILD
@@ -13,7 +13,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
diff --git a/third_party/eigen/BUILD b/third_party/eigen/BUILD
index 54c5cd1..f3036f2 100644
--- a/third_party/eigen/BUILD
+++ b/third_party/eigen/BUILD
@@ -20,7 +20,7 @@
     ) + ["unsupported/Eigen/MatrixFunctions"] + glob([
         "unsupported/Eigen/src/MatrixFunctions/*.h",
     ]),
-    compatible_with = mcu_cpus,
+    compatible_with = mcu_cpus + ["@//tools:web"],
     includes = ["."],
     visibility = ["//visibility:public"],
 )
diff --git a/y2014/control_loops/python/BUILD b/y2014/control_loops/python/BUILD
index 5216eb9..cba8cea 100644
--- a/y2014/control_loops/python/BUILD
+++ b/y2014/control_loops/python/BUILD
@@ -60,7 +60,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -76,7 +76,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -92,7 +92,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
diff --git a/y2016/control_loops/python/BUILD b/y2016/control_loops/python/BUILD
index c49a250..65964fd 100644
--- a/y2016/control_loops/python/BUILD
+++ b/y2016/control_loops/python/BUILD
@@ -61,7 +61,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -94,7 +94,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -111,7 +111,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -140,7 +140,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -157,7 +157,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
diff --git a/y2017/control_loops/python/BUILD b/y2017/control_loops/python/BUILD
index 50f5584..6afb242 100644
--- a/y2017/control_loops/python/BUILD
+++ b/y2017/control_loops/python/BUILD
@@ -58,7 +58,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -106,7 +106,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -123,7 +123,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
@@ -168,7 +168,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )
 
diff --git a/y2018/control_loops/python/BUILD b/y2018/control_loops/python/BUILD
index 5690b08..ba34d86 100644
--- a/y2018/control_loops/python/BUILD
+++ b/y2018/control_loops/python/BUILD
@@ -94,7 +94,7 @@
         "//external:python-gflags",
         "//external:python-glog",
         "//frc971/control_loops/python:controls",
-        "@matplotlib",
+        "@matplotlib_repo//:matplotlib2.7",
     ],
 )