Merge "Add a help message to ArmUI"
diff --git a/WORKSPACE b/WORKSPACE
index 9bb5f38..698c066 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -769,9 +769,9 @@
     deps = ["@//third_party/allwpilib/wpimath"],
 )
 """,
-    sha256 = "7ffc54bf40814a5c101ea3159af15215f15087298cfc2ae65826f987ccf65499",
+    sha256 = "decff0a28fa4a167696cc2e1122b6a5acd2fef01d3bfd356d8cad25bb487a191",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.5/api-cpp-23.0.5-headers.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.10/api-cpp-23.0.10-headers.zip",
     ],
 )
 
@@ -793,9 +793,9 @@
     target_compatible_with = ['@//tools/platforms/hardware:roborio'],
 )
 """,
-    sha256 = "1e8a487cb538388de437d04985512533a9dea79e6c56ee0f319c5eb80260fcab",
+    sha256 = "00aea02c583d109056e2716e73b7d70e84d5c56a6daebd1dc9f4612c430894f8",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.5/api-cpp-23.0.5-linuxathena.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/api-cpp/23.0.10/api-cpp-23.0.10-linuxathena.zip",
     ],
 )
 
@@ -808,9 +808,9 @@
     hdrs = glob(['ctre/**/*.h', 'ctre/**/*.hpp']),
 )
 """,
-    sha256 = "51c52dfce4c2491887a7b7380e2f17e93a4092b6ac9f60d716738447a8ebedd7",
+    sha256 = "3d503df97b711c150c0b50238f644c528e55d5b82418c8e3970c79faa14b749c",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.5/tools-23.0.5-headers.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.10/tools-23.0.10-headers.zip",
     ],
 )
 
@@ -832,9 +832,9 @@
     target_compatible_with = ['@//tools/platforms/hardware:roborio'],
 )
 """,
-    sha256 = "9fb137321745c1eff63bdcfe486806afb46ede11ea4d4c59461320845698cc1e",
+    sha256 = "4ada1ed9e11c208da9e8a8e8a648a0fe426e6717121ebc2f1392ae3ddc7f2b8c",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.5/tools-23.0.5-linuxathena.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenixpro/tools/23.0.10/tools-23.0.10-linuxathena.zip",
     ],
 )
 
@@ -847,9 +847,9 @@
     hdrs = glob(['ctre/phoenix/**/*.h']),
 )
 """,
-    sha256 = "93cc41c53e98bbcd5db7b0631ab95a7de7744527d5847d2e795e6c8acec46bf8",
+    sha256 = "0f38d570949a4e8833aa6ab5a9fa0caf232344d96674d1e4ae342c63a47bdf2a",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/api-cpp/5.30.2/api-cpp-5.30.2-headers.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/api-cpp/5.30.4/api-cpp-5.30.4-headers.zip",
     ],
 )
 
@@ -871,9 +871,9 @@
     target_compatible_with = ['@//tools/platforms/hardware:roborio'],
 )
 """,
-    sha256 = "63889beeeaac8bbef2573d23f1a9500b6382d28ab91c78f3605b6b624c27d68e",
+    sha256 = "1ba6c3a17a644bb7f9643faf5ba6cc6d20e43991fbfffb58c8f0d3e780f3a2bc",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/api-cpp/5.30.2/api-cpp-5.30.2-linuxathena.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/api-cpp/5.30.4/api-cpp-5.30.4-linuxathena.zip",
     ],
 )
 
@@ -886,9 +886,9 @@
     hdrs = glob(['ctre/phoenix/**/*.h']),
 )
 """,
-    sha256 = "d41dd70aa4397cba934292e636c90511e571a56971f696348851fcd3bb88894d",
+    sha256 = "c6be4d8472dabe57889ca14deee22487a6ae964f7e21ad4b7adfa2d524980614",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/cci/5.30.2/cci-5.30.2-headers.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/cci/5.30.4/cci-5.30.4-headers.zip",
     ],
 )
 
@@ -910,9 +910,9 @@
     target_compatible_with = ['@//tools/platforms/hardware:roborio'],
 )
 """,
-    sha256 = "b01f78b74ffcf01f48636dca894942e801ec6eac3daadcea7d65c4b74a80a056",
+    sha256 = "e4f31ac2a08360f2d5061cdf4d288f95379f2286fcd6736def384723d2d69f24",
     urls = [
-        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/cci/5.30.2/cci-5.30.2-linuxathena.zip",
+        "https://maven.ctr-electronics.com/release/com/ctre/phoenix/cci/5.30.4/cci-5.30.4-linuxathena.zip",
     ],
 )
 
diff --git a/aos/aos_cli_utils.cc b/aos/aos_cli_utils.cc
index a4ea95e..bd54e38 100644
--- a/aos/aos_cli_utils.cc
+++ b/aos/aos_cli_utils.cc
@@ -17,6 +17,11 @@
 DEFINE_bool(
     _bash_autocomplete, false,
     "Internal use: Outputs channel list for use with autocomplete script.");
+
+DEFINE_bool(_zsh_compatability, false,
+            "Internal use: Force completion to complete either channels or "
+            "message_types, zsh doesn't handle spaces well.");
+
 DEFINE_string(_bash_autocomplete_word, "",
               "Internal use: Current word being autocompleted");
 
@@ -195,7 +200,9 @@
           // Otherwise, since the message type is poulated yet not being edited,
           // the user must be editing the channel name alone, in which case only
           // suggest channel names, not pairs.
-          if (message_type.empty()) {
+          // If _split_complete flag is set then dont return
+          // pairs of values
+          if (!FLAGS__zsh_compatability && message_type.empty()) {
             std::cout << '\'' << channel->name()->c_str() << ' '
                       << channel->type()->c_str() << "' ";
           } else {
diff --git a/aos/events/BUILD b/aos/events/BUILD
index ad3258c..08e3998 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -396,6 +396,7 @@
 cc_test(
     name = "shm_event_loop_test",
     srcs = ["shm_event_loop_test.cc"],
+    flaky = True,
     shard_count = 24,
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 364cabb..07a751c 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -55,6 +55,7 @@
         "log_replayer.cc",
     ],
     target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
     deps = [
         ":log_reader",
         ":log_reader_utils",
@@ -505,6 +506,30 @@
     ],
 )
 
+cc_library(
+    name = "multinode_logger_test_lib",
+    testonly = True,
+    srcs = [
+        "multinode_logger_test_lib.cc",
+    ],
+    hdrs = ["multinode_logger_test_lib.h"],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":log_reader",
+        ":log_writer",
+        ":snappy_encoder",
+        "//aos/events:message_counter",
+        "//aos/events:ping_lib",
+        "//aos/events:pong_lib",
+        "//aos/events:simulated_event_loop",
+        "//aos/network:testing_time_converter",
+        "//aos/testing:googletest",
+        "//aos/testing:path",
+        "//aos/testing:tmpdir",
+    ],
+)
+
 aos_config(
     name = "multinode_pingpong_split_config",
     src = "multinode_pingpong_split.json",
@@ -617,8 +642,8 @@
 )
 
 cc_test(
-    name = "logger_test",
-    srcs = ["logger_test.cc"],
+    name = "multinode_logger_test",
+    srcs = ["multinode_logger_test.cc"],
     copts = select({
         "//tools:cpu_k8": ["-DLZMA=1"],
         "//tools:cpu_arm64": ["-DLZMA=1"],
@@ -636,6 +661,24 @@
     shard_count = 10,
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
+        ":multinode_logger_test_lib",
+    ],
+)
+
+cc_test(
+    name = "logger_test",
+    srcs = ["logger_test.cc"],
+    copts = select({
+        "//tools:cpu_k8": ["-DLZMA=1"],
+        "//tools:cpu_arm64": ["-DLZMA=1"],
+        "//conditions:default": [],
+    }),
+    data = [
+        "//aos/events:pingpong_config",
+    ],
+    shard_count = 10,
+    target_compatible_with = ["@platforms//os:linux"],
+    deps = [
         ":log_reader",
         ":log_writer",
         ":snappy_encoder",
diff --git a/aos/events/logging/log_reader.cc b/aos/events/logging/log_reader.cc
index d3ce16b..fb63e79 100644
--- a/aos/events/logging/log_reader.cc
+++ b/aos/events/logging/log_reader.cc
@@ -617,8 +617,8 @@
         filtered_parts.size() == 0u
             ? nullptr
             : std::make_unique<TimestampMapper>(std::move(filtered_parts)),
-        filters_.get(), node, State::ThreadedBuffering::kNo,
-        MaybeMakeReplayChannelIndicies(node));
+        filters_.get(), std::bind(&LogReader::NoticeRealtimeEnd, this), node,
+        State::ThreadedBuffering::kNo, MaybeMakeReplayChannelIndicies(node));
     State *state = states_[node_index].get();
     state->SetNodeEventLoopFactory(
         event_loop_factory_->GetNodeEventLoopFactory(node),
@@ -808,8 +808,8 @@
         filtered_parts.size() == 0u
             ? nullptr
             : std::make_unique<TimestampMapper>(std::move(filtered_parts)),
-        filters_.get(), node, State::ThreadedBuffering::kYes,
-        MaybeMakeReplayChannelIndicies(node));
+        filters_.get(), std::bind(&LogReader::NoticeRealtimeEnd, this), node,
+        State::ThreadedBuffering::kYes, MaybeMakeReplayChannelIndicies(node));
     State *state = states_[node_index].get();
 
     state->SetChannelCount(logged_configuration()->channels()->size());
@@ -964,7 +964,7 @@
       VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
       if (exit_on_finish_ && live_nodes_ == 0 &&
           event_loop_factory_ != nullptr) {
-        CHECK_NOTNULL(event_loop_factory_)->Exit();
+        event_loop_factory_->Exit();
       }
       return;
     }
@@ -1233,6 +1233,7 @@
     }
     if (end_time_ != realtime_clock::max_time) {
       state->SetEndTimeFlag(end_time_);
+      ++live_nodes_with_realtime_time_end_;
     }
     event_loop->OnRun([state]() {
       BootTimestamp next_time = state->SingleThreadedOldestMessageTime();
@@ -1745,9 +1746,11 @@
 LogReader::State::State(
     std::unique_ptr<TimestampMapper> timestamp_mapper,
     message_bridge::MultiNodeNoncausalOffsetEstimator *multinode_filters,
-    const Node *node, LogReader::State::ThreadedBuffering threading,
+    std::function<void()> notice_realtime_end, const Node *node,
+    LogReader::State::ThreadedBuffering threading,
     std::unique_ptr<const ReplayChannelIndicies> replay_channel_indicies)
     : timestamp_mapper_(std::move(timestamp_mapper)),
+      notice_realtime_end_(notice_realtime_end),
       node_(node),
       multinode_filters_(multinode_filters),
       threading_(threading),
@@ -2347,6 +2350,8 @@
   if (!stopped_ && started_) {
     RunOnEnd();
     SetFoundLastMessage(true);
+    CHECK(notice_realtime_end_);
+    notice_realtime_end_();
   }
 }
 
@@ -2372,5 +2377,14 @@
   event_loop_factory_->SetRealtimeReplayRate(replay_rate);
 }
 
+void LogReader::NoticeRealtimeEnd() {
+  CHECK_GE(live_nodes_with_realtime_time_end_, 1u);
+  --live_nodes_with_realtime_time_end_;
+  if (live_nodes_with_realtime_time_end_ == 0 && exit_on_finish() &&
+      event_loop_factory_ != nullptr) {
+    event_loop_factory_->Exit();
+  }
+}
+
 }  // namespace logger
 }  // namespace aos
diff --git a/aos/events/logging/log_reader.h b/aos/events/logging/log_reader.h
index 0d50fb9..fd5a935 100644
--- a/aos/events/logging/log_reader.h
+++ b/aos/events/logging/log_reader.h
@@ -260,6 +260,7 @@
   void set_exit_on_finish(bool exit_on_finish) {
     exit_on_finish_ = exit_on_finish;
   }
+  bool exit_on_finish() const { return exit_on_finish_; }
 
   // Sets the realtime replay rate. A value of 1.0 will cause the scheduler to
   // try to play events in realtime. 0.5 will run at half speed. Use infinity
@@ -289,6 +290,10 @@
                : logged_configuration()->nodes()->size();
   }
 
+  // Handles when an individual node hits the realtime end time, exitting the
+  // entire event loop once all nodes are stopped.
+  void NoticeRealtimeEnd();
+
   const std::vector<LogFile> log_files_;
 
   // Class to manage sending RemoteMessages on the provided node after the
@@ -343,7 +348,8 @@
     enum class ThreadedBuffering { kYes, kNo };
     State(std::unique_ptr<TimestampMapper> timestamp_mapper,
           message_bridge::MultiNodeNoncausalOffsetEstimator *multinode_filters,
-          const Node *node, ThreadedBuffering threading,
+          std::function<void()> notice_realtime_end, const Node *node,
+          ThreadedBuffering threading,
           std::unique_ptr<const ReplayChannelIndicies> replay_channel_indicies);
 
     // Connects up the timestamp mappers.
@@ -667,6 +673,9 @@
     NodeEventLoopFactory *node_event_loop_factory_ = nullptr;
     SimulatedEventLoopFactory *event_loop_factory_ = nullptr;
 
+    // Callback for when this node hits its realtime end time.
+    std::function<void()> notice_realtime_end_;
+
     std::unique_ptr<EventLoop> event_loop_unique_ptr_;
     // Event loop.
     const Node *node_ = nullptr;
@@ -785,6 +794,10 @@
   // when to exit.
   size_t live_nodes_ = 0;
 
+  // Similar counter to live_nodes_, but for tracking which individual nodes are
+  // running and have yet to hit the realtime end time, if any.
+  size_t live_nodes_with_realtime_time_end_ = 0;
+
   const Configuration *remapped_configuration_ = nullptr;
   const Configuration *replay_configuration_ = nullptr;
 
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index cdf080b..80b3a4a 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -27,30 +27,14 @@
 namespace logger {
 namespace testing {
 
-using aos::testing::ArtifactPath;
-
 namespace chrono = std::chrono;
 using aos::message_bridge::RemoteMessage;
+using aos::testing::ArtifactPath;
 using aos::testing::MessageCounter;
 
 constexpr std::string_view kSingleConfigSha256(
     "bbe1b563139273b23a5405eebc2f2740cefcda5f96681acd0a84b8ff9ab93ea4");
 
-std::vector<std::vector<std::string>> ToLogReaderVector(
-    const std::vector<LogFile> &log_files) {
-  std::vector<std::vector<std::string>> result;
-  for (const LogFile &log_file : log_files) {
-    for (const LogParts &log_parts : log_file.parts) {
-      std::vector<std::string> parts;
-      for (const std::string &part : log_parts.parts) {
-        parts.emplace_back(part);
-      }
-      result.emplace_back(std::move(parts));
-    }
-  }
-  return result;
-}
-
 class LoggerTest : public ::testing::Test {
  public:
   LoggerTest()
@@ -538,4250 +522,6 @@
   EXPECT_EQ(replay_count, sent_messages);
 }
 
-struct CompressionParams {
-  std::string_view extension;
-  std::function<std::unique_ptr<DataEncoder>(size_t max_message_size)>
-      encoder_factory;
-};
-
-std::ostream &operator<<(std::ostream &ostream,
-                         const CompressionParams &params) {
-  ostream << "\"" << params.extension << "\"";
-  return ostream;
-}
-
-std::vector<CompressionParams> SupportedCompressionAlgorithms() {
-  return {{"",
-           [](size_t max_message_size) {
-             return std::make_unique<DummyEncoder>(max_message_size);
-           }},
-          {SnappyDecoder::kExtension,
-           [](size_t max_message_size) {
-             return std::make_unique<SnappyEncoder>(max_message_size, 32768);
-           }},
-#ifdef LZMA
-          {LzmaDecoder::kExtension,
-           [](size_t max_message_size) {
-             return std::make_unique<LzmaEncoder>(max_message_size, 3);
-           }}
-#endif  // LZMA
-  };
-}
-
-// Parameters to run all the tests with.
-struct ConfigParams {
-  // The config file to use.
-  std::string config;
-  // If true, the RemoteMessage channel should be shared between all the remote
-  // channels.  If false, there will be 1 RemoteMessage channel per remote
-  // channel.
-  bool shared;
-  // sha256 of the config.
-  std::string_view sha256;
-  // sha256 of the relogged config
-  std::string_view relogged_sha256;
-};
-
-std::ostream &operator<<(std::ostream &ostream, const ConfigParams &params) {
-  ostream << "{config: \"" << params.config << "\", shared: " << params.shared
-          << ", sha256: \"" << params.sha256 << "\", relogged_sha256: \""
-          << params.relogged_sha256 << "\"}";
-  return ostream;
-}
-
-struct LoggerState {
-  static LoggerState MakeLogger(NodeEventLoopFactory *node,
-                                SimulatedEventLoopFactory *factory,
-                                CompressionParams params,
-                                const Configuration *configuration = nullptr) {
-    if (configuration == nullptr) {
-      configuration = factory->configuration();
-    }
-    return {node->MakeEventLoop("logger"),
-            {},
-            configuration,
-            configuration::GetNode(configuration, node->node()),
-            nullptr,
-            params};
-  }
-
-  void StartLogger(std::string logfile_base) {
-    CHECK(!logfile_base.empty());
-
-    logger = std::make_unique<Logger>(event_loop.get(), configuration);
-    logger->set_polling_period(std::chrono::milliseconds(100));
-    logger->set_name(
-        absl::StrCat("name_prefix_", event_loop->node()->name()->str()));
-    logger->set_logger_sha1(
-        absl::StrCat("logger_sha1_", event_loop->node()->name()->str()));
-    logger->set_logger_version(
-        absl::StrCat("logger_version_", event_loop->node()->name()->str()));
-    event_loop->OnRun([this, logfile_base]() {
-      std::unique_ptr<MultiNodeLogNamer> namer =
-          std::make_unique<MultiNodeLogNamer>(logfile_base, configuration,
-                                              event_loop.get(), node);
-      namer->set_extension(params.extension);
-      namer->set_encoder_factory(params.encoder_factory);
-      log_namer = namer.get();
-
-      logger->StartLogging(std::move(namer));
-    });
-  }
-
-  std::unique_ptr<EventLoop> event_loop;
-  std::unique_ptr<Logger> logger;
-  const Configuration *configuration;
-  const Node *node;
-  MultiNodeLogNamer *log_namer;
-  CompressionParams params;
-
-  void AppendAllFilenames(std::vector<std::string> *filenames) {
-    for (const std::string &file : log_namer->all_filenames()) {
-      const std::string_view separator =
-          log_namer->base_name().back() == '/' ? "" : "_";
-      filenames->emplace_back(
-          absl::StrCat(log_namer->base_name(), separator, file));
-    }
-  }
-
-  ~LoggerState() {
-    if (logger) {
-      std::vector<std::string> filenames;
-      AppendAllFilenames(&filenames);
-      std::sort(filenames.begin(), filenames.end());
-      for (const std::string &file : filenames) {
-        LOG(INFO) << "Wrote to " << file;
-        auto x = ReadHeader(file);
-        if (x) {
-          VLOG(1) << aos::FlatbufferToJson(x.value());
-        }
-      }
-    }
-  }
-};
-
-std::vector<std::pair<std::vector<realtime_clock::time_point>,
-                      std::vector<realtime_clock::time_point>>>
-ConfirmReadable(
-    const std::vector<std::string> &files,
-    realtime_clock::time_point start_time = realtime_clock::min_time,
-    realtime_clock::time_point end_time = realtime_clock::max_time) {
-  {
-    LogReader reader(SortParts(files));
-
-    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-    reader.Register(&log_reader_factory);
-
-    log_reader_factory.Run();
-
-    reader.Deregister();
-  }
-  {
-    std::vector<std::pair<std::vector<realtime_clock::time_point>,
-                          std::vector<realtime_clock::time_point>>>
-        result;
-    LogReader reader(SortParts(files));
-
-    reader.SetStartTime(start_time);
-    reader.SetEndTime(end_time);
-
-    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-    reader.RegisterWithoutStarting(&log_reader_factory);
-    result.resize(
-        configuration::NodesCount(log_reader_factory.configuration()));
-    if (configuration::MultiNode(log_reader_factory.configuration())) {
-      size_t i = 0;
-      for (const aos::Node *node :
-           *log_reader_factory.configuration()->nodes()) {
-        LOG(INFO) << "Registering start";
-        reader.OnStart(node, [node, &log_reader_factory, &result,
-                              node_index = i]() {
-          LOG(INFO) << "Starting " << node->name()->string_view();
-          result[node_index].first.push_back(
-              log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
-        });
-        reader.OnEnd(node, [node, &log_reader_factory, &result,
-                            node_index = i]() {
-          LOG(INFO) << "Ending " << node->name()->string_view();
-          result[node_index].second.push_back(
-              log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
-        });
-        ++i;
-      }
-    } else {
-      reader.OnStart([&log_reader_factory, &result]() {
-        LOG(INFO) << "Starting";
-        result[0].first.push_back(
-            log_reader_factory.GetNodeEventLoopFactory(nullptr)
-                ->realtime_now());
-      });
-      reader.OnEnd([&log_reader_factory, &result]() {
-        LOG(INFO) << "Ending";
-        result[0].second.push_back(
-            log_reader_factory.GetNodeEventLoopFactory(nullptr)
-                ->realtime_now());
-      });
-    }
-
-    log_reader_factory.Run();
-
-    reader.Deregister();
-
-    for (auto x : result) {
-      for (auto y : x.first) {
-        VLOG(1) << "Start " << y;
-      }
-      for (auto y : x.second) {
-        VLOG(1) << "End " << y;
-      }
-    }
-    return result;
-  }
-}
-
-class MultinodeLoggerTest : public ::testing::TestWithParam<
-                                std::tuple<ConfigParams, CompressionParams>> {
- public:
-  MultinodeLoggerTest()
-      : config_(aos::configuration::ReadConfig(ArtifactPath(absl::StrCat(
-            "aos/events/logging/", std::get<0>(GetParam()).config)))),
-        time_converter_(configuration::NodesCount(&config_.message())),
-        event_loop_factory_(&config_.message()),
-        pi1_(event_loop_factory_.GetNodeEventLoopFactory("pi1")),
-        pi1_index_(configuration::GetNodeIndex(
-            event_loop_factory_.configuration(), pi1_->node())),
-        pi2_(event_loop_factory_.GetNodeEventLoopFactory("pi2")),
-        pi2_index_(configuration::GetNodeIndex(
-            event_loop_factory_.configuration(), pi2_->node())),
-        tmp_dir_(aos::testing::TestTmpDir()),
-        logfile_base1_(tmp_dir_ + "/multi_logfile1"),
-        logfile_base2_(tmp_dir_ + "/multi_logfile2"),
-        pi1_reboot_logfiles_(MakePi1RebootLogfiles()),
-        logfiles_(MakeLogFiles(logfile_base1_, logfile_base2_)),
-        pi1_single_direction_logfiles_(MakePi1SingleDirectionLogfiles()),
-        structured_logfiles_(StructureLogFiles()) {
-    LOG(INFO) << "Config " << std::get<0>(GetParam()).config;
-    event_loop_factory_.SetTimeConverter(&time_converter_);
-
-    // Go through and remove the logfiles if they already exist.
-    for (const auto &file : logfiles_) {
-      unlink(file.c_str());
-      unlink((file + ".xz").c_str());
-    }
-
-    for (const auto &file : MakeLogFiles(tmp_dir_ + "/relogged1",
-                                         tmp_dir_ + "/relogged2", 3, 3, true)) {
-      unlink(file.c_str());
-    }
-
-    for (const auto &file : pi1_reboot_logfiles_) {
-      unlink(file.c_str());
-    }
-
-    LOG(INFO) << "Logging data to " << logfiles_[0] << ", " << logfiles_[1]
-              << " and " << logfiles_[2];
-
-    pi1_->OnStartup([this]() { pi1_->AlwaysStart<Ping>("ping"); });
-    pi2_->OnStartup([this]() { pi2_->AlwaysStart<Pong>("pong"); });
-  }
-
-  bool shared() const { return std::get<0>(GetParam()).shared; }
-
-  std::vector<std::string> MakeLogFiles(std::string logfile_base1,
-                                        std::string logfile_base2,
-                                        size_t pi1_data_count = 3,
-                                        size_t pi2_data_count = 3,
-                                        bool relogged_config = false) {
-    std::string_view sha256 = relogged_config
-                                  ? std::get<0>(GetParam()).relogged_sha256
-                                  : std::get<0>(GetParam()).sha256;
-    std::vector<std::string> result;
-    result.emplace_back(absl::StrCat(logfile_base1, "_", sha256, Extension()));
-    result.emplace_back(absl::StrCat(logfile_base2, "_", sha256, Extension()));
-    for (size_t i = 0; i < pi1_data_count; ++i) {
-      result.emplace_back(
-          absl::StrCat(logfile_base1, "_pi1_data.part", i, Extension()));
-    }
-    result.emplace_back(logfile_base1 +
-                        "_pi2_data/test/aos.examples.Pong.part0" + Extension());
-    result.emplace_back(logfile_base1 +
-                        "_pi2_data/test/aos.examples.Pong.part1" + Extension());
-    for (size_t i = 0; i < pi2_data_count; ++i) {
-      result.emplace_back(
-          absl::StrCat(logfile_base2, "_pi2_data.part", i, Extension()));
-    }
-    result.emplace_back(logfile_base2 +
-                        "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part0" +
-                        Extension());
-    result.emplace_back(logfile_base2 +
-                        "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part1" +
-                        Extension());
-    result.emplace_back(logfile_base1 +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
-                        Extension());
-    result.emplace_back(logfile_base1 +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
-                        Extension());
-    if (shared()) {
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/"
-                          "aos.message_bridge.RemoteMessage.part2" +
-                          Extension());
-      result.emplace_back(logfile_base2 +
-                          "_timestamps/pi2/aos/remote_timestamps/pi1/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base2 +
-                          "_timestamps/pi2/aos/remote_timestamps/pi1/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-    } else {
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-      result.emplace_back(logfile_base2 +
-                          "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base2 +
-                          "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
-                          "aos-examples-Ping/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base1 +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
-                          "aos-examples-Ping/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-    }
-
-    return result;
-  }
-
-  std::vector<std::string> MakePi1RebootLogfiles() {
-    std::vector<std::string> result;
-    result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
-    result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
-    result.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
-    result.emplace_back(logfile_base1_ + "_pi1_data.part3" + Extension());
-    result.emplace_back(logfile_base1_ + "_pi1_data.part4" + Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/test/aos.examples.Pong.part0" + Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/test/aos.examples.Pong.part1" + Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/test/aos.examples.Pong.part2" + Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/test/aos.examples.Pong.part3" + Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
-                        Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
-                        Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part2" +
-                        Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part3" +
-                        Extension());
-    result.emplace_back(absl::StrCat(
-        logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
-    if (shared()) {
-      for (size_t i = 0; i < 6; ++i) {
-        result.emplace_back(
-            absl::StrCat(logfile_base1_,
-                         "_timestamps/pi1/aos/remote_timestamps/pi2/"
-                         "aos.message_bridge.RemoteMessage.part",
-                         i, Extension()));
-      }
-    } else {
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part2" +
-                          Extension());
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                          "aos-message_bridge-Timestamp/"
-                          "aos.message_bridge.RemoteMessage.part3" +
-                          Extension());
-
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
-                          "aos-examples-Ping/"
-                          "aos.message_bridge.RemoteMessage.part0" +
-                          Extension());
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
-                          "aos-examples-Ping/"
-                          "aos.message_bridge.RemoteMessage.part1" +
-                          Extension());
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
-                          "aos-examples-Ping/"
-                          "aos.message_bridge.RemoteMessage.part2" +
-                          Extension());
-      result.emplace_back(logfile_base1_ +
-                          "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
-                          "aos-examples-Ping/"
-                          "aos.message_bridge.RemoteMessage.part3" +
-                          Extension());
-    }
-    return result;
-  }
-
-  std::vector<std::string> MakePi1SingleDirectionLogfiles() {
-    std::vector<std::string> result;
-    result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
-    result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
-    result.emplace_back(logfile_base1_ +
-                        "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
-                        Extension());
-    result.emplace_back(absl::StrCat(
-        logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
-    return result;
-  }
-
-  std::vector<std::string> MakePi1DeadNodeLogfiles() {
-    std::vector<std::string> result;
-    result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
-    result.emplace_back(absl::StrCat(
-        logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
-    return result;
-  }
-
-  std::vector<std::vector<std::string>> StructureLogFiles() {
-    std::vector<std::vector<std::string>> result{
-        std::vector<std::string>{logfiles_[2], logfiles_[3], logfiles_[4]},
-        std::vector<std::string>{logfiles_[5], logfiles_[6]},
-        std::vector<std::string>{logfiles_[7], logfiles_[8], logfiles_[9]},
-        std::vector<std::string>{logfiles_[10], logfiles_[11]},
-        std::vector<std::string>{logfiles_[12], logfiles_[13]}};
-
-    if (shared()) {
-      result.emplace_back(std::vector<std::string>{logfiles_[14], logfiles_[15],
-                                                   logfiles_[16]});
-      result.emplace_back(
-          std::vector<std::string>{logfiles_[17], logfiles_[18]});
-    } else {
-      result.emplace_back(
-          std::vector<std::string>{logfiles_[14], logfiles_[15]});
-      result.emplace_back(
-          std::vector<std::string>{logfiles_[16], logfiles_[17]});
-      result.emplace_back(
-          std::vector<std::string>{logfiles_[18], logfiles_[19]});
-    }
-
-    return result;
-  }
-
-  std::string Extension() {
-    return absl::StrCat(".bfbs", std::get<1>(GetParam()).extension);
-  }
-
-  LoggerState MakeLogger(NodeEventLoopFactory *node,
-                         SimulatedEventLoopFactory *factory = nullptr,
-                         const Configuration *configuration = nullptr) {
-    if (factory == nullptr) {
-      factory = &event_loop_factory_;
-    }
-    return LoggerState::MakeLogger(node, factory, std::get<1>(GetParam()),
-                                   configuration);
-  }
-
-  void StartLogger(LoggerState *logger, std::string logfile_base = "") {
-    if (logfile_base.empty()) {
-      if (logger->event_loop->node()->name()->string_view() == "pi1") {
-        logfile_base = logfile_base1_;
-      } else {
-        logfile_base = logfile_base2_;
-      }
-    }
-    logger->StartLogger(logfile_base);
-  }
-
-  void VerifyParts(const std::vector<LogFile> &sorted_parts,
-                   const std::vector<std::string> &corrupted_parts = {}) {
-    EXPECT_EQ(sorted_parts.size(), 2u);
-
-    // Count up the number of UUIDs and make sure they are what we expect as a
-    // sanity check.
-    std::set<std::string> log_event_uuids;
-    std::set<std::string> parts_uuids;
-    std::set<std::string> both_uuids;
-
-    size_t missing_rt_count = 0;
-
-    std::vector<std::string> logger_nodes;
-    for (const LogFile &log_file : sorted_parts) {
-      EXPECT_FALSE(log_file.log_event_uuid.empty());
-      log_event_uuids.insert(log_file.log_event_uuid);
-      logger_nodes.emplace_back(log_file.logger_node);
-      both_uuids.insert(log_file.log_event_uuid);
-      EXPECT_TRUE(log_file.config);
-      EXPECT_EQ(log_file.name,
-                absl::StrCat("name_prefix_", log_file.logger_node));
-      EXPECT_EQ(log_file.logger_sha1,
-                absl::StrCat("logger_sha1_", log_file.logger_node));
-      EXPECT_EQ(log_file.logger_version,
-                absl::StrCat("logger_version_", log_file.logger_node));
-
-      for (const LogParts &part : log_file.parts) {
-        EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
-            << ": " << part;
-        missing_rt_count +=
-            part.realtime_start_time == aos::realtime_clock::min_time;
-
-        EXPECT_TRUE(log_event_uuids.find(part.log_event_uuid) !=
-                    log_event_uuids.end());
-        EXPECT_NE(part.node, "");
-        EXPECT_TRUE(log_file.config);
-        parts_uuids.insert(part.parts_uuid);
-        both_uuids.insert(part.parts_uuid);
-      }
-    }
-
-    // We won't have RT timestamps for 5 or 6 log files.  We don't log the RT
-    // start time on remote nodes because we don't know it and would be
-    // guessing.  And the log reader can actually do a better job.  The number
-    // depends on if we have the remote timestamps split across 2 files, or just
-    // across 1, depending on if we are using a split or combined timestamp
-    // channel config.
-    EXPECT_EQ(missing_rt_count, shared() ? 5u : 6u);
-
-    EXPECT_EQ(log_event_uuids.size(), 2u);
-    EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
-    EXPECT_EQ(log_event_uuids.size() + parts_uuids.size(), both_uuids.size());
-
-    // Test that each list of parts is in order.  Don't worry about the ordering
-    // between part file lists though.
-    // (inner vectors all need to be in order, but outer one doesn't matter).
-    ASSERT_THAT(ToLogReaderVector(sorted_parts),
-                ::testing::UnorderedElementsAreArray(structured_logfiles_));
-
-    EXPECT_THAT(logger_nodes, ::testing::UnorderedElementsAre("pi1", "pi2"));
-
-    EXPECT_NE(sorted_parts[0].realtime_start_time,
-              aos::realtime_clock::min_time);
-    EXPECT_NE(sorted_parts[1].realtime_start_time,
-              aos::realtime_clock::min_time);
-
-    EXPECT_NE(sorted_parts[0].monotonic_start_time,
-              aos::monotonic_clock::min_time);
-    EXPECT_NE(sorted_parts[1].monotonic_start_time,
-              aos::monotonic_clock::min_time);
-
-    EXPECT_THAT(sorted_parts[0].corrupted, ::testing::Eq(corrupted_parts));
-    EXPECT_THAT(sorted_parts[1].corrupted, ::testing::Eq(corrupted_parts));
-  }
-
-  void AddExtension(std::string_view extension) {
-    std::transform(logfiles_.begin(), logfiles_.end(), logfiles_.begin(),
-                   [extension](const std::string &in) {
-                     return absl::StrCat(in, extension);
-                   });
-
-    std::transform(structured_logfiles_.begin(), structured_logfiles_.end(),
-                   structured_logfiles_.begin(),
-                   [extension](std::vector<std::string> in) {
-                     std::transform(in.begin(), in.end(), in.begin(),
-                                    [extension](const std::string &in_str) {
-                                      return absl::StrCat(in_str, extension);
-                                    });
-                     return in;
-                   });
-  }
-
-  // Config and factory.
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
-  message_bridge::TestingTimeConverter time_converter_;
-  SimulatedEventLoopFactory event_loop_factory_;
-
-  NodeEventLoopFactory *const pi1_;
-  const size_t pi1_index_;
-  NodeEventLoopFactory *const pi2_;
-  const size_t pi2_index_;
-
-  std::string tmp_dir_;
-  std::string logfile_base1_;
-  std::string logfile_base2_;
-  std::vector<std::string> pi1_reboot_logfiles_;
-  std::vector<std::string> logfiles_;
-  std::vector<std::string> pi1_single_direction_logfiles_;
-
-  std::vector<std::vector<std::string>> structured_logfiles_;
-};
-
-// Counts the number of messages on a channel.  Returns (channel name, channel
-// type, count) for every message matching matcher()
-std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
-    std::shared_ptr<const aos::Configuration> config, std::string_view filename,
-    std::function<bool(const UnpackedMessageHeader *)> matcher) {
-  MessageReader message_reader(filename);
-  std::vector<int> counts(config->channels()->size(), 0);
-
-  while (true) {
-    std::shared_ptr<UnpackedMessageHeader> msg = message_reader.ReadMessage();
-    if (!msg) {
-      break;
-    }
-
-    if (matcher(msg.get())) {
-      counts[msg->channel_index]++;
-    }
-  }
-
-  std::vector<std::tuple<std::string, std::string, int>> result;
-  int channel = 0;
-  for (size_t i = 0; i < counts.size(); ++i) {
-    if (counts[i] != 0) {
-      const Channel *channel = config->channels()->Get(i);
-      result.push_back(std::make_tuple(channel->name()->str(),
-                                       channel->type()->str(), counts[i]));
-    }
-    ++channel;
-  }
-
-  return result;
-}
-
-// Counts the number of messages (channel, count) for all data messages.
-std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
-    std::shared_ptr<const aos::Configuration> config,
-    std::string_view filename) {
-  return CountChannelsMatching(
-      config, filename, [](const UnpackedMessageHeader *msg) {
-        if (msg->span.data() != nullptr) {
-          CHECK(!msg->monotonic_remote_time.has_value());
-          CHECK(!msg->realtime_remote_time.has_value());
-          CHECK(!msg->remote_queue_index.has_value());
-          return true;
-        }
-        return false;
-      });
-}
-
-// Counts the number of messages (channel, count) for all timestamp messages.
-std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
-    std::shared_ptr<const aos::Configuration> config,
-    std::string_view filename) {
-  return CountChannelsMatching(
-      config, filename, [](const UnpackedMessageHeader *msg) {
-        if (msg->span.data() == nullptr) {
-          CHECK(msg->monotonic_remote_time.has_value());
-          CHECK(msg->realtime_remote_time.has_value());
-          CHECK(msg->remote_queue_index.has_value());
-          return true;
-        }
-        return false;
-      });
-}
-
-// Tests that we can write and read simple multi-node log files.
-TEST_P(MultinodeLoggerTest, SimpleMultiNode) {
-  std::vector<std::string> actual_filenames;
-  time_converter_.StartEqual();
-
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-    pi1_logger.AppendAllFilenames(&actual_filenames);
-    pi2_logger.AppendAllFilenames(&actual_filenames);
-  }
-
-  ASSERT_THAT(actual_filenames,
-              ::testing::UnorderedElementsAreArray(logfiles_));
-
-  {
-    std::set<std::string> logfile_uuids;
-    std::set<std::string> parts_uuids;
-    // Confirm that we have the expected number of UUIDs for both the logfile
-    // UUIDs and parts UUIDs.
-    std::vector<SizePrefixedFlatbufferVector<LogFileHeader>> log_header;
-    for (std::string_view f : logfiles_) {
-      log_header.emplace_back(ReadHeader(f).value());
-      if (!log_header.back().message().has_configuration()) {
-        logfile_uuids.insert(
-            log_header.back().message().log_event_uuid()->str());
-        parts_uuids.insert(log_header.back().message().parts_uuid()->str());
-      }
-    }
-
-    EXPECT_EQ(logfile_uuids.size(), 2u);
-    if (shared()) {
-      EXPECT_EQ(parts_uuids.size(), 7u);
-    } else {
-      EXPECT_EQ(parts_uuids.size(), 8u);
-    }
-
-    // And confirm everything is on the correct node.
-    EXPECT_EQ(log_header[2].message().node()->name()->string_view(), "pi1");
-    EXPECT_EQ(log_header[3].message().node()->name()->string_view(), "pi1");
-    EXPECT_EQ(log_header[4].message().node()->name()->string_view(), "pi1");
-
-    EXPECT_EQ(log_header[5].message().node()->name()->string_view(), "pi2");
-    EXPECT_EQ(log_header[6].message().node()->name()->string_view(), "pi2");
-
-    EXPECT_EQ(log_header[7].message().node()->name()->string_view(), "pi2");
-    EXPECT_EQ(log_header[8].message().node()->name()->string_view(), "pi2");
-    EXPECT_EQ(log_header[9].message().node()->name()->string_view(), "pi2");
-
-    EXPECT_EQ(log_header[10].message().node()->name()->string_view(), "pi1");
-    EXPECT_EQ(log_header[11].message().node()->name()->string_view(), "pi1");
-
-    EXPECT_EQ(log_header[12].message().node()->name()->string_view(), "pi2");
-    EXPECT_EQ(log_header[13].message().node()->name()->string_view(), "pi2");
-
-    if (shared()) {
-      EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
-      EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
-      EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi2");
-
-      EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
-      EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi1");
-    } else {
-      EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
-      EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
-
-      EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi1");
-      EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
-
-      EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi2");
-      EXPECT_EQ(log_header[19].message().node()->name()->string_view(), "pi2");
-    }
-
-    // And the parts index matches.
-    EXPECT_EQ(log_header[2].message().parts_index(), 0);
-    EXPECT_EQ(log_header[3].message().parts_index(), 1);
-    EXPECT_EQ(log_header[4].message().parts_index(), 2);
-
-    EXPECT_EQ(log_header[5].message().parts_index(), 0);
-    EXPECT_EQ(log_header[6].message().parts_index(), 1);
-
-    EXPECT_EQ(log_header[7].message().parts_index(), 0);
-    EXPECT_EQ(log_header[8].message().parts_index(), 1);
-    EXPECT_EQ(log_header[9].message().parts_index(), 2);
-
-    EXPECT_EQ(log_header[10].message().parts_index(), 0);
-    EXPECT_EQ(log_header[11].message().parts_index(), 1);
-
-    EXPECT_EQ(log_header[12].message().parts_index(), 0);
-    EXPECT_EQ(log_header[13].message().parts_index(), 1);
-
-    if (shared()) {
-      EXPECT_EQ(log_header[14].message().parts_index(), 0);
-      EXPECT_EQ(log_header[15].message().parts_index(), 1);
-      EXPECT_EQ(log_header[16].message().parts_index(), 2);
-
-      EXPECT_EQ(log_header[17].message().parts_index(), 0);
-      EXPECT_EQ(log_header[18].message().parts_index(), 1);
-    } else {
-      EXPECT_EQ(log_header[14].message().parts_index(), 0);
-      EXPECT_EQ(log_header[15].message().parts_index(), 1);
-
-      EXPECT_EQ(log_header[16].message().parts_index(), 0);
-      EXPECT_EQ(log_header[17].message().parts_index(), 1);
-
-      EXPECT_EQ(log_header[18].message().parts_index(), 0);
-      EXPECT_EQ(log_header[19].message().parts_index(), 1);
-    }
-  }
-
-  const std::vector<LogFile> sorted_log_files = SortParts(logfiles_);
-  {
-    using ::testing::UnorderedElementsAre;
-    std::shared_ptr<const aos::Configuration> config =
-        sorted_log_files[0].config;
-
-    // Timing reports, pings
-    EXPECT_THAT(CountChannelsData(config, logfiles_[2]),
-                UnorderedElementsAre(
-                    std::make_tuple("/pi1/aos",
-                                    "aos.message_bridge.ServerStatistics", 1),
-                    std::make_tuple("/test", "aos.examples.Ping", 1)))
-        << " : " << logfiles_[2];
-    {
-      std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
-          std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 1),
-          std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
-                          1)};
-      if (!std::get<0>(GetParam()).shared) {
-        channel_counts.push_back(
-            std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                            "aos-message_bridge-Timestamp",
-                            "aos.message_bridge.RemoteMessage", 1));
-      }
-      EXPECT_THAT(CountChannelsData(config, logfiles_[3]),
-                  ::testing::UnorderedElementsAreArray(channel_counts))
-          << " : " << logfiles_[3];
-    }
-    {
-      std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
-          std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 199),
-          std::make_tuple("/pi1/aos", "aos.message_bridge.ServerStatistics",
-                          20),
-          std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
-                          199),
-          std::make_tuple("/pi1/aos", "aos.timing.Report", 40),
-          std::make_tuple("/test", "aos.examples.Ping", 2000)};
-      if (!std::get<0>(GetParam()).shared) {
-        channel_counts.push_back(
-            std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-                            "aos-message_bridge-Timestamp",
-                            "aos.message_bridge.RemoteMessage", 199));
-      }
-      EXPECT_THAT(CountChannelsData(config, logfiles_[4]),
-                  ::testing::UnorderedElementsAreArray(channel_counts))
-          << " : " << logfiles_[4];
-    }
-    // Timestamps for pong
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[2]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[2];
-    EXPECT_THAT(
-        CountChannelsTimestamp(config, logfiles_[3]),
-        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
-        << " : " << logfiles_[3];
-    EXPECT_THAT(
-        CountChannelsTimestamp(config, logfiles_[4]),
-        UnorderedElementsAre(
-            std::make_tuple("/test", "aos.examples.Pong", 2000),
-            std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200)))
-        << " : " << logfiles_[4];
-
-    // Pong data.
-    EXPECT_THAT(
-        CountChannelsData(config, logfiles_[5]),
-        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 91)))
-        << " : " << logfiles_[5];
-    EXPECT_THAT(CountChannelsData(config, logfiles_[6]),
-                UnorderedElementsAre(
-                    std::make_tuple("/test", "aos.examples.Pong", 1910)))
-        << " : " << logfiles_[6];
-
-    // No timestamps
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[5]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[5];
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[6]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[6];
-
-    // Timing reports and pongs.
-    EXPECT_THAT(CountChannelsData(config, logfiles_[7]),
-                UnorderedElementsAre(std::make_tuple(
-                    "/pi2/aos", "aos.message_bridge.ServerStatistics", 1)))
-        << " : " << logfiles_[7];
-    EXPECT_THAT(
-        CountChannelsData(config, logfiles_[8]),
-        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
-        << " : " << logfiles_[8];
-    EXPECT_THAT(
-        CountChannelsData(config, logfiles_[9]),
-        UnorderedElementsAre(
-            std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200),
-            std::make_tuple("/pi2/aos", "aos.message_bridge.ServerStatistics",
-                            20),
-            std::make_tuple("/pi2/aos", "aos.message_bridge.ClientStatistics",
-                            200),
-            std::make_tuple("/pi2/aos", "aos.timing.Report", 40),
-            std::make_tuple("/test", "aos.examples.Pong", 2000)))
-        << " : " << logfiles_[9];
-    // And ping timestamps.
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[7]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[7];
-    EXPECT_THAT(
-        CountChannelsTimestamp(config, logfiles_[8]),
-        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Ping", 1)))
-        << " : " << logfiles_[8];
-    EXPECT_THAT(
-        CountChannelsTimestamp(config, logfiles_[9]),
-        UnorderedElementsAre(
-            std::make_tuple("/test", "aos.examples.Ping", 2000),
-            std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 200)))
-        << " : " << logfiles_[9];
-
-    // And then test that the remotely logged timestamp data files only have
-    // timestamps in them.
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[10]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[10];
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[11]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[11];
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[12]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[12];
-    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[13]),
-                UnorderedElementsAre())
-        << " : " << logfiles_[13];
-
-    EXPECT_THAT(CountChannelsData(config, logfiles_[10]),
-                UnorderedElementsAre(std::make_tuple(
-                    "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
-        << " : " << logfiles_[10];
-    EXPECT_THAT(CountChannelsData(config, logfiles_[11]),
-                UnorderedElementsAre(std::make_tuple(
-                    "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
-        << " : " << logfiles_[11];
-
-    EXPECT_THAT(CountChannelsData(config, logfiles_[12]),
-                UnorderedElementsAre(std::make_tuple(
-                    "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
-        << " : " << logfiles_[12];
-    EXPECT_THAT(CountChannelsData(config, logfiles_[13]),
-                UnorderedElementsAre(std::make_tuple(
-                    "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
-        << " : " << logfiles_[13];
-
-    // Timestamps from pi2 on pi1, and the other way.
-    if (shared()) {
-      EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[14];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[15];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[16];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[17];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[18];
-
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
-                  UnorderedElementsAre(
-                      std::make_tuple("/test", "aos.examples.Ping", 1)))
-          << " : " << logfiles_[14];
-      EXPECT_THAT(
-          CountChannelsTimestamp(config, logfiles_[15]),
-          UnorderedElementsAre(
-              std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 9),
-              std::make_tuple("/test", "aos.examples.Ping", 90)))
-          << " : " << logfiles_[15];
-      EXPECT_THAT(
-          CountChannelsTimestamp(config, logfiles_[16]),
-          UnorderedElementsAre(
-              std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 191),
-              std::make_tuple("/test", "aos.examples.Ping", 1910)))
-          << " : " << logfiles_[16];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
-                  UnorderedElementsAre(std::make_tuple(
-                      "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
-          << " : " << logfiles_[17];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
-                  UnorderedElementsAre(std::make_tuple(
-                      "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
-          << " : " << logfiles_[18];
-    } else {
-      EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[14];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[15];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[16];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[17];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[18];
-      EXPECT_THAT(CountChannelsData(config, logfiles_[19]),
-                  UnorderedElementsAre())
-          << " : " << logfiles_[19];
-
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
-                  UnorderedElementsAre(std::make_tuple(
-                      "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
-          << " : " << logfiles_[14];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[15]),
-                  UnorderedElementsAre(std::make_tuple(
-                      "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
-          << " : " << logfiles_[15];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[16]),
-                  UnorderedElementsAre(std::make_tuple(
-                      "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
-          << " : " << logfiles_[16];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
-                  UnorderedElementsAre(std::make_tuple(
-                      "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
-          << " : " << logfiles_[17];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
-                  UnorderedElementsAre(
-                      std::make_tuple("/test", "aos.examples.Ping", 91)))
-          << " : " << logfiles_[18];
-      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[19]),
-                  UnorderedElementsAre(
-                      std::make_tuple("/test", "aos.examples.Ping", 1910)))
-          << " : " << logfiles_[19];
-    }
-  }
-
-  LogReader reader(sorted_log_files);
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  // This sends out the fetched messages and advances time to the start of the
-  // log file.
-  reader.Register(&log_reader_factory);
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
-  LOG(INFO) << "now pi1 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
-  LOG(INFO) << "now pi2 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
-  EXPECT_THAT(reader.LoggedNodes(),
-              ::testing::ElementsAre(
-                  configuration::GetNode(reader.logged_configuration(), pi1),
-                  configuration::GetNode(reader.logged_configuration(), pi2)));
-
-  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
-  std::unique_ptr<EventLoop> pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  std::unique_ptr<EventLoop> pi2_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi2);
-
-  int pi1_ping_count = 10;
-  int pi2_ping_count = 10;
-  int pi1_pong_count = 10;
-  int pi2_pong_count = 10;
-
-  // Confirm that the ping value matches.
-  pi1_event_loop->MakeWatcher(
-      "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
-        VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping) << " at "
-                << pi1_event_loop->context().monotonic_remote_time << " -> "
-                << pi1_event_loop->context().monotonic_event_time;
-        EXPECT_EQ(ping.value(), pi1_ping_count + 1);
-        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
-                  pi1_ping_count * chrono::milliseconds(10) +
-                      monotonic_clock::epoch());
-        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
-                  pi1_ping_count * chrono::milliseconds(10) +
-                      realtime_clock::epoch());
-        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
-                  pi1_event_loop->context().monotonic_event_time);
-        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
-                  pi1_event_loop->context().realtime_event_time);
-
-        ++pi1_ping_count;
-      });
-  pi2_event_loop->MakeWatcher(
-      "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
-        VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping) << " at "
-                << pi2_event_loop->context().monotonic_remote_time << " -> "
-                << pi2_event_loop->context().monotonic_event_time;
-        EXPECT_EQ(ping.value(), pi2_ping_count + 1);
-
-        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
-                  pi2_ping_count * chrono::milliseconds(10) +
-                      monotonic_clock::epoch());
-        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
-                  pi2_ping_count * chrono::milliseconds(10) +
-                      realtime_clock::epoch());
-        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time +
-                      chrono::microseconds(150),
-                  pi2_event_loop->context().monotonic_event_time);
-        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time +
-                      chrono::microseconds(150),
-                  pi2_event_loop->context().realtime_event_time);
-        ++pi2_ping_count;
-      });
-
-  constexpr ssize_t kQueueIndexOffset = -9;
-  // Confirm that the ping and pong counts both match, and the value also
-  // matches.
-  pi1_event_loop->MakeWatcher(
-      "/test", [&pi1_event_loop, &pi1_ping_count,
-                &pi1_pong_count](const examples::Pong &pong) {
-        VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
-                << pi1_event_loop->context().monotonic_remote_time << " -> "
-                << pi1_event_loop->context().monotonic_event_time;
-
-        EXPECT_EQ(pi1_event_loop->context().remote_queue_index,
-                  pi1_pong_count + kQueueIndexOffset);
-        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
-                  chrono::microseconds(200) +
-                      pi1_pong_count * chrono::milliseconds(10) +
-                      monotonic_clock::epoch());
-        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
-                  chrono::microseconds(200) +
-                      pi1_pong_count * chrono::milliseconds(10) +
-                      realtime_clock::epoch());
-
-        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time +
-                      chrono::microseconds(150),
-                  pi1_event_loop->context().monotonic_event_time);
-        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time +
-                      chrono::microseconds(150),
-                  pi1_event_loop->context().realtime_event_time);
-
-        EXPECT_EQ(pong.value(), pi1_pong_count + 1);
-        ++pi1_pong_count;
-        EXPECT_EQ(pi1_ping_count, pi1_pong_count);
-      });
-  pi2_event_loop->MakeWatcher(
-      "/test", [&pi2_event_loop, &pi2_ping_count,
-                &pi2_pong_count](const examples::Pong &pong) {
-        VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
-                << pi2_event_loop->context().monotonic_remote_time << " -> "
-                << pi2_event_loop->context().monotonic_event_time;
-
-        EXPECT_EQ(pi2_event_loop->context().remote_queue_index,
-                  pi2_pong_count + kQueueIndexOffset);
-
-        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
-                  chrono::microseconds(200) +
-                      pi2_pong_count * chrono::milliseconds(10) +
-                      monotonic_clock::epoch());
-        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
-                  chrono::microseconds(200) +
-                      pi2_pong_count * chrono::milliseconds(10) +
-                      realtime_clock::epoch());
-
-        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
-                  pi2_event_loop->context().monotonic_event_time);
-        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
-                  pi2_event_loop->context().realtime_event_time);
-
-        EXPECT_EQ(pong.value(), pi2_pong_count + 1);
-        ++pi2_pong_count;
-        EXPECT_EQ(pi2_ping_count, pi2_pong_count);
-      });
-
-  log_reader_factory.Run();
-  EXPECT_EQ(pi1_ping_count, 2010);
-  EXPECT_EQ(pi2_ping_count, 2010);
-  EXPECT_EQ(pi1_pong_count, 2010);
-  EXPECT_EQ(pi2_pong_count, 2010);
-
-  reader.Deregister();
-}
-
-typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
-
-// Test that if we feed the replay with a mismatched node list that we die on
-// the LogReader constructor.
-TEST_P(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
-  time_converter_.StartEqual();
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-  }
-
-  // Test that, if we add an additional node to the replay config that the
-  // logger complains about the mismatch in number of nodes.
-  FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
-      configuration::MergeWithConfig(&config_.message(), R"({
-          "nodes": [
-            {
-              "name": "extra-node"
-            }
-          ]
-        }
-      )");
-
-  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
-  EXPECT_DEATH(LogReader(sorted_parts, &extra_nodes_config.message()),
-               "Log file and replay config need to have matching nodes lists.");
-}
-
-// Tests that we can read log files where they don't start at the same monotonic
-// time.
-TEST_P(MultinodeLoggerTest, StaggeredStart) {
-  time_converter_.StartEqual();
-  std::vector<std::string> actual_filenames;
-
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(200));
-
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-    pi1_logger.AppendAllFilenames(&actual_filenames);
-    pi2_logger.AppendAllFilenames(&actual_filenames);
-  }
-
-  // Since we delay starting pi2, it already knows about all the timestamps so
-  // we don't end up with extra parts.
-  LogReader reader(SortParts(actual_filenames));
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  // This sends out the fetched messages and advances time to the start of the
-  // log file.
-  reader.Register(&log_reader_factory);
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  EXPECT_THAT(reader.LoggedNodes(),
-              ::testing::ElementsAre(
-                  configuration::GetNode(reader.logged_configuration(), pi1),
-                  configuration::GetNode(reader.logged_configuration(), pi2)));
-
-  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
-  std::unique_ptr<EventLoop> pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  std::unique_ptr<EventLoop> pi2_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi2);
-
-  int pi1_ping_count = 30;
-  int pi2_ping_count = 30;
-  int pi1_pong_count = 30;
-  int pi2_pong_count = 30;
-
-  // Confirm that the ping value matches.
-  pi1_event_loop->MakeWatcher(
-      "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
-        VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
-                << pi1_event_loop->context().monotonic_remote_time << " -> "
-                << pi1_event_loop->context().monotonic_event_time;
-        EXPECT_EQ(ping.value(), pi1_ping_count + 1);
-
-        ++pi1_ping_count;
-      });
-  pi2_event_loop->MakeWatcher(
-      "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
-        VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
-                << pi2_event_loop->context().monotonic_remote_time << " -> "
-                << pi2_event_loop->context().monotonic_event_time;
-        EXPECT_EQ(ping.value(), pi2_ping_count + 1);
-
-        ++pi2_ping_count;
-      });
-
-  // Confirm that the ping and pong counts both match, and the value also
-  // matches.
-  pi1_event_loop->MakeWatcher(
-      "/test", [&pi1_event_loop, &pi1_ping_count,
-                &pi1_pong_count](const examples::Pong &pong) {
-        VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
-                << pi1_event_loop->context().monotonic_remote_time << " -> "
-                << pi1_event_loop->context().monotonic_event_time;
-
-        EXPECT_EQ(pong.value(), pi1_pong_count + 1);
-        ++pi1_pong_count;
-        EXPECT_EQ(pi1_ping_count, pi1_pong_count);
-      });
-  pi2_event_loop->MakeWatcher(
-      "/test", [&pi2_event_loop, &pi2_ping_count,
-                &pi2_pong_count](const examples::Pong &pong) {
-        VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
-                << pi2_event_loop->context().monotonic_remote_time << " -> "
-                << pi2_event_loop->context().monotonic_event_time;
-
-        EXPECT_EQ(pong.value(), pi2_pong_count + 1);
-        ++pi2_pong_count;
-        EXPECT_EQ(pi2_ping_count, pi2_pong_count);
-      });
-
-  log_reader_factory.Run();
-  EXPECT_EQ(pi1_ping_count, 2030);
-  EXPECT_EQ(pi2_ping_count, 2030);
-  EXPECT_EQ(pi1_pong_count, 2030);
-  EXPECT_EQ(pi2_pong_count, 2030);
-
-  reader.Deregister();
-}
-
-// Tests that we can read log files where the monotonic clocks drift and don't
-// match correctly.  While we are here, also test that different ending times
-// also is readable.
-TEST_P(MultinodeLoggerTest, MismatchedClocks) {
-  // TODO(austin): Negate...
-  const chrono::nanoseconds initial_pi2_offset = chrono::seconds(1000);
-
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + initial_pi2_offset});
-  // Wait for 95 ms, (~0.1 seconds - 1/2 of the ping/pong period), and set the
-  // skew to be 200 uS/s
-  const chrono::nanoseconds startup_sleep1 = time_converter_.AddMonotonic(
-      {chrono::milliseconds(95),
-       chrono::milliseconds(95) - chrono::nanoseconds(200) * 95});
-  // Run another 200 ms to have one logger start first.
-  const chrono::nanoseconds startup_sleep2 = time_converter_.AddMonotonic(
-      {chrono::milliseconds(200), chrono::milliseconds(200)});
-  // Slew one way then the other at the same 200 uS/S slew rate.  Make sure we
-  // go far enough to cause problems if this isn't accounted for.
-  const chrono::nanoseconds logger_run1 = time_converter_.AddMonotonic(
-      {chrono::milliseconds(20000),
-       chrono::milliseconds(20000) - chrono::nanoseconds(200) * 20000});
-  const chrono::nanoseconds logger_run2 = time_converter_.AddMonotonic(
-      {chrono::milliseconds(40000),
-       chrono::milliseconds(40000) + chrono::nanoseconds(200) * 40000});
-  const chrono::nanoseconds logger_run3 = time_converter_.AddMonotonic(
-      {chrono::milliseconds(400), chrono::milliseconds(400)});
-
-  {
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    LOG(INFO) << "pi2 times: " << pi2_->monotonic_now() << " "
-              << pi2_->realtime_now() << " distributed "
-              << pi2_->ToDistributedClock(pi2_->monotonic_now());
-
-    LOG(INFO) << "pi2_ times: " << pi2_->monotonic_now() << " "
-              << pi2_->realtime_now() << " distributed "
-              << pi2_->ToDistributedClock(pi2_->monotonic_now());
-
-    event_loop_factory_.RunFor(startup_sleep1);
-
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(startup_sleep2);
-
-    {
-      // Run pi1's logger for only part of the time.
-      LoggerState pi1_logger = MakeLogger(pi1_);
-
-      StartLogger(&pi1_logger);
-      event_loop_factory_.RunFor(logger_run1);
-
-      // Make sure we slewed time far enough so that the difference is greater
-      // than the network delay.  This confirms that if we sort incorrectly, it
-      // would show in the results.
-      EXPECT_LT(
-          (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
-          -event_loop_factory_.send_delay() -
-              event_loop_factory_.network_delay());
-
-      event_loop_factory_.RunFor(logger_run2);
-
-      // And now check that we went far enough the other way to make sure we
-      // cover both problems.
-      EXPECT_GT(
-          (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
-          event_loop_factory_.send_delay() +
-              event_loop_factory_.network_delay());
-    }
-
-    // And log a bit more on pi2.
-    event_loop_factory_.RunFor(logger_run3);
-  }
-
-  LogReader reader(
-      SortParts(MakeLogFiles(logfile_base1_, logfile_base2_, 3, 2)));
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  // This sends out the fetched messages and advances time to the start of the
-  // log file.
-  reader.Register(&log_reader_factory);
-
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
-  LOG(INFO) << "now pi1 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
-  LOG(INFO) << "now pi2 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
-  LOG(INFO) << "Done registering (pi1) "
-            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now()
-            << " "
-            << log_reader_factory.GetNodeEventLoopFactory(pi1)->realtime_now();
-  LOG(INFO) << "Done registering (pi2) "
-            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now()
-            << " "
-            << log_reader_factory.GetNodeEventLoopFactory(pi2)->realtime_now();
-
-  EXPECT_THAT(reader.LoggedNodes(),
-              ::testing::ElementsAre(
-                  configuration::GetNode(reader.logged_configuration(), pi1),
-                  configuration::GetNode(reader.logged_configuration(), pi2)));
-
-  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
-  std::unique_ptr<EventLoop> pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  std::unique_ptr<EventLoop> pi2_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi2);
-
-  int pi1_ping_count = 30;
-  int pi2_ping_count = 30;
-  int pi1_pong_count = 30;
-  int pi2_pong_count = 30;
-
-  // Confirm that the ping value matches.
-  pi1_event_loop->MakeWatcher(
-      "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
-        VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
-                << pi1_event_loop->context().monotonic_remote_time << " -> "
-                << pi1_event_loop->context().monotonic_event_time;
-        EXPECT_EQ(ping.value(), pi1_ping_count + 1);
-
-        ++pi1_ping_count;
-      });
-  pi2_event_loop->MakeWatcher(
-      "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
-        VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
-                << pi2_event_loop->context().monotonic_remote_time << " -> "
-                << pi2_event_loop->context().monotonic_event_time;
-        EXPECT_EQ(ping.value(), pi2_ping_count + 1);
-
-        ++pi2_ping_count;
-      });
-
-  // Confirm that the ping and pong counts both match, and the value also
-  // matches.
-  pi1_event_loop->MakeWatcher(
-      "/test", [&pi1_event_loop, &pi1_ping_count,
-                &pi1_pong_count](const examples::Pong &pong) {
-        VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
-                << pi1_event_loop->context().monotonic_remote_time << " -> "
-                << pi1_event_loop->context().monotonic_event_time;
-
-        EXPECT_EQ(pong.value(), pi1_pong_count + 1);
-        ++pi1_pong_count;
-        EXPECT_EQ(pi1_ping_count, pi1_pong_count);
-      });
-  pi2_event_loop->MakeWatcher(
-      "/test", [&pi2_event_loop, &pi2_ping_count,
-                &pi2_pong_count](const examples::Pong &pong) {
-        VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
-                << pi2_event_loop->context().monotonic_remote_time << " -> "
-                << pi2_event_loop->context().monotonic_event_time;
-
-        EXPECT_EQ(pong.value(), pi2_pong_count + 1);
-        ++pi2_pong_count;
-        EXPECT_EQ(pi2_ping_count, pi2_pong_count);
-      });
-
-  log_reader_factory.Run();
-  EXPECT_EQ(pi1_ping_count, 6030);
-  EXPECT_EQ(pi2_ping_count, 6030);
-  EXPECT_EQ(pi1_pong_count, 6030);
-  EXPECT_EQ(pi2_pong_count, 6030);
-
-  reader.Deregister();
-}
-
-// Tests that we can sort a bunch of parts into the pre-determined sorted parts.
-TEST_P(MultinodeLoggerTest, SortParts) {
-  time_converter_.StartEqual();
-  // Make a bunch of parts.
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(2000));
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
-  VerifyParts(sorted_parts);
-}
-
-// Tests that we can sort a bunch of parts with an empty part.  We should ignore
-// it and remove it from the sorted list.
-TEST_P(MultinodeLoggerTest, SortEmptyParts) {
-  time_converter_.StartEqual();
-  // Make a bunch of parts.
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(2000));
-  }
-
-  // TODO(austin): Should we flip out if the file can't open?
-  const std::string kEmptyFile("foobarinvalidfiledoesnotexist" + Extension());
-
-  aos::util::WriteStringToFileOrDie(kEmptyFile, "");
-  logfiles_.emplace_back(kEmptyFile);
-
-  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
-  VerifyParts(sorted_parts, {kEmptyFile});
-}
-
-// Tests that we can sort a bunch of parts with the end missing off a
-// file.  We should use the part we can read.
-TEST_P(MultinodeLoggerTest, SortTruncatedParts) {
-  std::vector<std::string> actual_filenames;
-  time_converter_.StartEqual();
-  // Make a bunch of parts.
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(2000));
-
-    pi1_logger.AppendAllFilenames(&actual_filenames);
-    pi2_logger.AppendAllFilenames(&actual_filenames);
-  }
-
-  ASSERT_THAT(actual_filenames,
-              ::testing::UnorderedElementsAreArray(logfiles_));
-
-  // Strip off the end of one of the files.  Pick one with a lot of data.
-  // For snappy, needs to have enough data to be >1 chunk of compressed data so
-  // that we don't corrupt the entire log part.
-  ::std::string compressed_contents =
-      aos::util::ReadFileToStringOrDie(logfiles_[4]);
-
-  aos::util::WriteStringToFileOrDie(
-      logfiles_[4],
-      compressed_contents.substr(0, compressed_contents.size() - 100));
-
-  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
-  VerifyParts(sorted_parts);
-}
-
-// Tests that if we remap a remapped channel, it shows up correctly.
-TEST_P(MultinodeLoggerTest, RemapLoggedChannel) {
-  time_converter_.StartEqual();
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-  }
-
-  LogReader reader(SortParts(logfiles_));
-
-  // Remap just on pi1.
-  reader.RemapLoggedChannel<aos::timing::Report>(
-      "/aos", configuration::GetNode(reader.configuration(), "pi1"));
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  std::vector<const Channel *> remapped_channels = reader.RemappedChannels();
-  // Note: An extra channel gets remapped automatically due to a timestamp
-  // channel being LOCAL_LOGGER'd.
-  ASSERT_EQ(remapped_channels.size(), std::get<0>(GetParam()).shared ? 1u : 2u);
-  EXPECT_EQ(remapped_channels[0]->name()->string_view(), "/original/pi1/aos");
-  EXPECT_EQ(remapped_channels[0]->type()->string_view(), "aos.timing.Report");
-  if (!std::get<0>(GetParam()).shared) {
-    EXPECT_EQ(remapped_channels[1]->name()->string_view(),
-              "/original/pi1/aos/remote_timestamps/pi2/pi1/aos/"
-              "aos-message_bridge-Timestamp");
-    EXPECT_EQ(remapped_channels[1]->type()->string_view(),
-              "aos.message_bridge.RemoteMessage");
-  }
-
-  reader.Register(&log_reader_factory);
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  // Confirm we can read the data on the remapped channel, just for pi1. Nothing
-  // else should have moved.
-  std::unique_ptr<EventLoop> pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  pi1_event_loop->SkipTimingReport();
-  std::unique_ptr<EventLoop> full_pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  full_pi1_event_loop->SkipTimingReport();
-  std::unique_ptr<EventLoop> pi2_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi2);
-  pi2_event_loop->SkipTimingReport();
-
-  MessageCounter<aos::timing::Report> pi1_timing_report(pi1_event_loop.get(),
-                                                        "/aos");
-  MessageCounter<aos::timing::Report> full_pi1_timing_report(
-      full_pi1_event_loop.get(), "/pi1/aos");
-  MessageCounter<aos::timing::Report> pi1_original_timing_report(
-      pi1_event_loop.get(), "/original/aos");
-  MessageCounter<aos::timing::Report> full_pi1_original_timing_report(
-      full_pi1_event_loop.get(), "/original/pi1/aos");
-  MessageCounter<aos::timing::Report> pi2_timing_report(pi2_event_loop.get(),
-                                                        "/aos");
-
-  log_reader_factory.Run();
-
-  EXPECT_EQ(pi1_timing_report.count(), 0u);
-  EXPECT_EQ(full_pi1_timing_report.count(), 0u);
-  EXPECT_NE(pi1_original_timing_report.count(), 0u);
-  EXPECT_NE(full_pi1_original_timing_report.count(), 0u);
-  EXPECT_NE(pi2_timing_report.count(), 0u);
-
-  reader.Deregister();
-}
-
-// Tests that we can remap a forwarded channel as well.
-TEST_P(MultinodeLoggerTest, RemapForwardedLoggedChannel) {
-  time_converter_.StartEqual();
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-  }
-
-  LogReader reader(SortParts(logfiles_));
-
-  reader.RemapLoggedChannel<examples::Ping>("/test");
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  reader.Register(&log_reader_factory);
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  // Confirm we can read the data on the remapped channel, just for pi1. Nothing
-  // else should have moved.
-  std::unique_ptr<EventLoop> pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  pi1_event_loop->SkipTimingReport();
-  std::unique_ptr<EventLoop> full_pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  full_pi1_event_loop->SkipTimingReport();
-  std::unique_ptr<EventLoop> pi2_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi2);
-  pi2_event_loop->SkipTimingReport();
-
-  MessageCounter<examples::Ping> pi1_ping(pi1_event_loop.get(), "/test");
-  MessageCounter<examples::Ping> pi2_ping(pi2_event_loop.get(), "/test");
-  MessageCounter<examples::Ping> pi1_original_ping(pi1_event_loop.get(),
-                                                   "/original/test");
-  MessageCounter<examples::Ping> pi2_original_ping(pi2_event_loop.get(),
-                                                   "/original/test");
-
-  std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
-      pi1_original_ping_timestamp;
-  std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
-      pi1_ping_timestamp;
-  if (!shared()) {
-    pi1_original_ping_timestamp =
-        std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
-            pi1_event_loop.get(),
-            "/pi1/aos/remote_timestamps/pi2/original/test/aos-examples-Ping");
-    pi1_ping_timestamp =
-        std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
-            pi1_event_loop.get(),
-            "/pi1/aos/remote_timestamps/pi2/test/aos-examples-Ping");
-  }
-
-  log_reader_factory.Run();
-
-  EXPECT_EQ(pi1_ping.count(), 0u);
-  EXPECT_EQ(pi2_ping.count(), 0u);
-  EXPECT_NE(pi1_original_ping.count(), 0u);
-  EXPECT_NE(pi2_original_ping.count(), 0u);
-  if (!shared()) {
-    EXPECT_NE(pi1_original_ping_timestamp->count(), 0u);
-    EXPECT_EQ(pi1_ping_timestamp->count(), 0u);
-  }
-
-  reader.Deregister();
-}
-
-// Tests that we observe all the same events in log replay (for a given node)
-// whether we just register an event loop for that node or if we register a full
-// event loop factory.
-TEST_P(MultinodeLoggerTest, SingleNodeReplay) {
-  time_converter_.StartEqual();
-  constexpr chrono::milliseconds kStartupDelay(95);
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(kStartupDelay);
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-  }
-
-  LogReader full_reader(SortParts(logfiles_));
-  LogReader single_node_reader(SortParts(logfiles_));
-
-  SimulatedEventLoopFactory full_factory(full_reader.configuration());
-  SimulatedEventLoopFactory single_node_factory(
-      single_node_reader.configuration());
-  single_node_factory.SkipTimingReport();
-  single_node_factory.DisableStatistics();
-  std::unique_ptr<EventLoop> replay_event_loop =
-      single_node_factory.GetNodeEventLoopFactory("pi1")->MakeEventLoop(
-          "log_reader");
-
-  full_reader.Register(&full_factory);
-  single_node_reader.Register(replay_event_loop.get());
-
-  const Node *full_pi1 =
-      configuration::GetNode(full_factory.configuration(), "pi1");
-
-  // Confirm we can read the data on the remapped channel, just for pi1. Nothing
-  // else should have moved.
-  std::unique_ptr<EventLoop> full_event_loop =
-      full_factory.MakeEventLoop("test", full_pi1);
-  full_event_loop->SkipTimingReport();
-  full_event_loop->SkipAosLog();
-  // maps are indexed on channel index.
-  // observed_messages: {channel_index: [(message_sent_time, was_fetched),...]}
-  std::map<size_t, std::vector<std::pair<monotonic_clock::time_point, bool>>>
-      observed_messages;
-  std::map<size_t, std::unique_ptr<RawFetcher>> fetchers;
-  for (size_t ii = 0; ii < full_event_loop->configuration()->channels()->size();
-       ++ii) {
-    const Channel *channel =
-        full_event_loop->configuration()->channels()->Get(ii);
-    // We currently don't support replaying remote timestamp channels in
-    // realtime replay (unless the remote timestamp channel was not NOT_LOGGED,
-    // in which case it gets auto-remapped and replayed on a /original channel).
-    if (channel->name()->string_view().find("remote_timestamp") !=
-            std::string_view::npos &&
-        channel->name()->string_view().find("/original") ==
-            std::string_view::npos) {
-      continue;
-    }
-    if (configuration::ChannelIsReadableOnNode(channel, full_pi1)) {
-      observed_messages[ii] = {};
-      fetchers[ii] = full_event_loop->MakeRawFetcher(channel);
-      full_event_loop->OnRun([ii, &observed_messages, &fetchers]() {
-        if (fetchers[ii]->Fetch()) {
-          observed_messages[ii].push_back(std::make_pair(
-              fetchers[ii]->context().monotonic_event_time, true));
-        }
-      });
-      full_event_loop->MakeRawNoArgWatcher(
-          channel, [ii, &observed_messages](const Context &context) {
-            observed_messages[ii].push_back(
-                std::make_pair(context.monotonic_event_time, false));
-          });
-    }
-  }
-
-  full_factory.Run();
-  fetchers.clear();
-  full_reader.Deregister();
-
-  const Node *single_node_pi1 =
-      configuration::GetNode(single_node_factory.configuration(), "pi1");
-  std::map<size_t, std::unique_ptr<RawFetcher>> single_node_fetchers;
-
-  std::unique_ptr<EventLoop> single_node_event_loop =
-      single_node_factory.MakeEventLoop("test", single_node_pi1);
-  single_node_event_loop->SkipTimingReport();
-  single_node_event_loop->SkipAosLog();
-  for (size_t ii = 0;
-       ii < single_node_event_loop->configuration()->channels()->size(); ++ii) {
-    const Channel *channel =
-        single_node_event_loop->configuration()->channels()->Get(ii);
-    single_node_factory.DisableForwarding(channel);
-    if (configuration::ChannelIsReadableOnNode(channel, single_node_pi1)) {
-      single_node_fetchers[ii] =
-          single_node_event_loop->MakeRawFetcher(channel);
-      single_node_event_loop->OnRun([channel, ii, &single_node_fetchers]() {
-        EXPECT_FALSE(single_node_fetchers[ii]->Fetch())
-            << "Single EventLoop replay doesn't support pre-loading fetchers. "
-            << configuration::StrippedChannelToString(channel);
-      });
-      single_node_event_loop->MakeRawNoArgWatcher(
-          channel, [ii, &observed_messages, channel,
-                    kStartupDelay](const Context &context) {
-            if (observed_messages[ii].empty()) {
-              FAIL() << "Observed extra message at "
-                     << context.monotonic_event_time << " on "
-                     << configuration::StrippedChannelToString(channel);
-              return;
-            }
-            const std::pair<monotonic_clock::time_point, bool> &message =
-                observed_messages[ii].front();
-            if (message.second) {
-              EXPECT_LE(message.first,
-                        context.monotonic_event_time + kStartupDelay)
-                  << "Mismatched message times " << context.monotonic_event_time
-                  << " and " << message.first << " on "
-                  << configuration::StrippedChannelToString(channel);
-            } else {
-              EXPECT_EQ(message.first,
-                        context.monotonic_event_time + kStartupDelay)
-                  << "Mismatched message times " << context.monotonic_event_time
-                  << " and " << message.first << " on "
-                  << configuration::StrippedChannelToString(channel);
-            }
-            observed_messages[ii].erase(observed_messages[ii].begin());
-          });
-    }
-  }
-
-  single_node_factory.Run();
-
-  single_node_fetchers.clear();
-
-  single_node_reader.Deregister();
-
-  for (const auto &pair : observed_messages) {
-    EXPECT_TRUE(pair.second.empty())
-        << "Missed " << pair.second.size() << " messages on "
-        << configuration::StrippedChannelToString(
-               single_node_event_loop->configuration()->channels()->Get(
-                   pair.first));
-  }
-}
-
-// Tests that we properly recreate forwarded timestamps when replaying a log.
-// This should be enough that we can then re-run the logger and get a valid log
-// back.
-TEST_P(MultinodeLoggerTest, MessageHeader) {
-  time_converter_.StartEqual();
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-  }
-
-  LogReader reader(SortParts(logfiles_));
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  // This sends out the fetched messages and advances time to the start of the
-  // log file.
-  reader.Register(&log_reader_factory);
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
-  LOG(INFO) << "now pi1 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
-  LOG(INFO) << "now pi2 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
-  EXPECT_THAT(reader.LoggedNodes(),
-              ::testing::ElementsAre(
-                  configuration::GetNode(reader.logged_configuration(), pi1),
-                  configuration::GetNode(reader.logged_configuration(), pi2)));
-
-  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
-  std::unique_ptr<EventLoop> pi1_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi1);
-  std::unique_ptr<EventLoop> pi2_event_loop =
-      log_reader_factory.MakeEventLoop("test", pi2);
-
-  aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi1_fetcher =
-      pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
-  aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi2_fetcher =
-      pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
-
-  aos::Fetcher<examples::Ping> ping_on_pi1_fetcher =
-      pi1_event_loop->MakeFetcher<examples::Ping>("/test");
-  aos::Fetcher<examples::Ping> ping_on_pi2_fetcher =
-      pi2_event_loop->MakeFetcher<examples::Ping>("/test");
-
-  aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi2_fetcher =
-      pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
-  aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi1_fetcher =
-      pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
-
-  aos::Fetcher<examples::Pong> pong_on_pi2_fetcher =
-      pi2_event_loop->MakeFetcher<examples::Pong>("/test");
-  aos::Fetcher<examples::Pong> pong_on_pi1_fetcher =
-      pi1_event_loop->MakeFetcher<examples::Pong>("/test");
-
-  const size_t pi1_timestamp_channel = configuration::ChannelIndex(
-      pi1_event_loop->configuration(), pi1_timestamp_on_pi1_fetcher.channel());
-  const size_t ping_timestamp_channel = configuration::ChannelIndex(
-      pi2_event_loop->configuration(), ping_on_pi2_fetcher.channel());
-
-  const size_t pi2_timestamp_channel = configuration::ChannelIndex(
-      pi2_event_loop->configuration(), pi2_timestamp_on_pi2_fetcher.channel());
-  const size_t pong_timestamp_channel = configuration::ChannelIndex(
-      pi1_event_loop->configuration(), pong_on_pi1_fetcher.channel());
-
-  const chrono::nanoseconds network_delay = event_loop_factory_.network_delay();
-  const chrono::nanoseconds send_delay = event_loop_factory_.send_delay();
-
-  for (std::pair<int, std::string> channel :
-       shared()
-           ? std::vector<
-                 std::pair<int, std::string>>{{-1,
-                                               "/aos/remote_timestamps/pi2"}}
-           : std::vector<std::pair<int, std::string>>{
-                 {pi1_timestamp_channel,
-                  "/aos/remote_timestamps/pi2/pi1/aos/"
-                  "aos-message_bridge-Timestamp"},
-                 {ping_timestamp_channel,
-                  "/aos/remote_timestamps/pi2/test/aos-examples-Ping"}}) {
-    pi1_event_loop->MakeWatcher(
-        channel.second,
-        [&pi1_event_loop, &pi2_event_loop, pi1_timestamp_channel,
-         ping_timestamp_channel, &pi1_timestamp_on_pi1_fetcher,
-         &pi1_timestamp_on_pi2_fetcher, &ping_on_pi1_fetcher,
-         &ping_on_pi2_fetcher, network_delay, send_delay,
-         channel_index = channel.first](const RemoteMessage &header) {
-          const aos::monotonic_clock::time_point header_monotonic_sent_time(
-              chrono::nanoseconds(header.monotonic_sent_time()));
-          const aos::realtime_clock::time_point header_realtime_sent_time(
-              chrono::nanoseconds(header.realtime_sent_time()));
-          const aos::monotonic_clock::time_point header_monotonic_remote_time(
-              chrono::nanoseconds(header.monotonic_remote_time()));
-          const aos::realtime_clock::time_point header_realtime_remote_time(
-              chrono::nanoseconds(header.realtime_remote_time()));
-
-          if (channel_index != -1) {
-            ASSERT_EQ(channel_index, header.channel_index());
-          }
-
-          const Context *pi1_context = nullptr;
-          const Context *pi2_context = nullptr;
-
-          if (header.channel_index() == pi1_timestamp_channel) {
-            ASSERT_TRUE(pi1_timestamp_on_pi1_fetcher.FetchNext());
-            ASSERT_TRUE(pi1_timestamp_on_pi2_fetcher.FetchNext());
-            pi1_context = &pi1_timestamp_on_pi1_fetcher.context();
-            pi2_context = &pi1_timestamp_on_pi2_fetcher.context();
-          } else if (header.channel_index() == ping_timestamp_channel) {
-            ASSERT_TRUE(ping_on_pi1_fetcher.FetchNext());
-            ASSERT_TRUE(ping_on_pi2_fetcher.FetchNext());
-            pi1_context = &ping_on_pi1_fetcher.context();
-            pi2_context = &ping_on_pi2_fetcher.context();
-          } else {
-            LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
-                       << configuration::CleanedChannelToString(
-                              pi1_event_loop->configuration()->channels()->Get(
-                                  header.channel_index()));
-          }
-
-          ASSERT_TRUE(header.has_boot_uuid());
-          EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
-                    pi2_event_loop->boot_uuid());
-
-          EXPECT_EQ(pi1_context->queue_index, header.remote_queue_index());
-          EXPECT_EQ(pi2_context->remote_queue_index,
-                    header.remote_queue_index());
-          EXPECT_EQ(pi2_context->queue_index, header.queue_index());
-
-          EXPECT_EQ(pi2_context->monotonic_event_time,
-                    header_monotonic_sent_time);
-          EXPECT_EQ(pi2_context->realtime_event_time,
-                    header_realtime_sent_time);
-          EXPECT_EQ(pi2_context->realtime_remote_time,
-                    header_realtime_remote_time);
-          EXPECT_EQ(pi2_context->monotonic_remote_time,
-                    header_monotonic_remote_time);
-
-          EXPECT_EQ(pi1_context->realtime_event_time,
-                    header_realtime_remote_time);
-          EXPECT_EQ(pi1_context->monotonic_event_time,
-                    header_monotonic_remote_time);
-
-          // Time estimation isn't perfect, but we know the clocks were
-          // identical when logged, so we know when this should have come back.
-          // Confirm we got it when we expected.
-          EXPECT_EQ(pi1_event_loop->context().monotonic_event_time,
-                    pi1_context->monotonic_event_time + 2 * network_delay +
-                        send_delay);
-        });
-  }
-  for (std::pair<int, std::string> channel :
-       shared()
-           ? std::vector<
-                 std::pair<int, std::string>>{{-1,
-                                               "/aos/remote_timestamps/pi1"}}
-           : std::vector<std::pair<int, std::string>>{
-                 {pi2_timestamp_channel,
-                  "/aos/remote_timestamps/pi1/pi2/aos/"
-                  "aos-message_bridge-Timestamp"}}) {
-    pi2_event_loop->MakeWatcher(
-        channel.second,
-        [&pi2_event_loop, &pi1_event_loop, pi2_timestamp_channel,
-         pong_timestamp_channel, &pi2_timestamp_on_pi2_fetcher,
-         &pi2_timestamp_on_pi1_fetcher, &pong_on_pi2_fetcher,
-         &pong_on_pi1_fetcher, network_delay, send_delay,
-         channel_index = channel.first](const RemoteMessage &header) {
-          const aos::monotonic_clock::time_point header_monotonic_sent_time(
-              chrono::nanoseconds(header.monotonic_sent_time()));
-          const aos::realtime_clock::time_point header_realtime_sent_time(
-              chrono::nanoseconds(header.realtime_sent_time()));
-          const aos::monotonic_clock::time_point header_monotonic_remote_time(
-              chrono::nanoseconds(header.monotonic_remote_time()));
-          const aos::realtime_clock::time_point header_realtime_remote_time(
-              chrono::nanoseconds(header.realtime_remote_time()));
-
-          if (channel_index != -1) {
-            ASSERT_EQ(channel_index, header.channel_index());
-          }
-
-          const Context *pi2_context = nullptr;
-          const Context *pi1_context = nullptr;
-
-          if (header.channel_index() == pi2_timestamp_channel) {
-            ASSERT_TRUE(pi2_timestamp_on_pi2_fetcher.FetchNext());
-            ASSERT_TRUE(pi2_timestamp_on_pi1_fetcher.FetchNext());
-            pi2_context = &pi2_timestamp_on_pi2_fetcher.context();
-            pi1_context = &pi2_timestamp_on_pi1_fetcher.context();
-          } else if (header.channel_index() == pong_timestamp_channel) {
-            ASSERT_TRUE(pong_on_pi2_fetcher.FetchNext());
-            ASSERT_TRUE(pong_on_pi1_fetcher.FetchNext());
-            pi2_context = &pong_on_pi2_fetcher.context();
-            pi1_context = &pong_on_pi1_fetcher.context();
-          } else {
-            LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
-                       << configuration::CleanedChannelToString(
-                              pi2_event_loop->configuration()->channels()->Get(
-                                  header.channel_index()));
-          }
-
-          ASSERT_TRUE(header.has_boot_uuid());
-          EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
-                    pi1_event_loop->boot_uuid());
-
-          EXPECT_EQ(pi2_context->queue_index, header.remote_queue_index());
-          EXPECT_EQ(pi1_context->remote_queue_index,
-                    header.remote_queue_index());
-          EXPECT_EQ(pi1_context->queue_index, header.queue_index());
-
-          EXPECT_EQ(pi1_context->monotonic_event_time,
-                    header_monotonic_sent_time);
-          EXPECT_EQ(pi1_context->realtime_event_time,
-                    header_realtime_sent_time);
-          EXPECT_EQ(pi1_context->realtime_remote_time,
-                    header_realtime_remote_time);
-          EXPECT_EQ(pi1_context->monotonic_remote_time,
-                    header_monotonic_remote_time);
-
-          EXPECT_EQ(pi2_context->realtime_event_time,
-                    header_realtime_remote_time);
-          EXPECT_EQ(pi2_context->monotonic_event_time,
-                    header_monotonic_remote_time);
-
-          // Time estimation isn't perfect, but we know the clocks were
-          // identical when logged, so we know when this should have come back.
-          // Confirm we got it when we expected.
-          EXPECT_EQ(pi2_event_loop->context().monotonic_event_time,
-                    pi2_context->monotonic_event_time + 2 * network_delay +
-                        send_delay);
-        });
-  }
-
-  // And confirm we can re-create a log again, while checking the contents.
-  {
-    LoggerState pi1_logger = MakeLogger(
-        log_reader_factory.GetNodeEventLoopFactory("pi1"), &log_reader_factory);
-    LoggerState pi2_logger = MakeLogger(
-        log_reader_factory.GetNodeEventLoopFactory("pi2"), &log_reader_factory);
-
-    StartLogger(&pi1_logger, tmp_dir_ + "/relogged1");
-    StartLogger(&pi2_logger, tmp_dir_ + "/relogged2");
-
-    log_reader_factory.Run();
-  }
-
-  reader.Deregister();
-
-  // And verify that we can run the LogReader over the relogged files without
-  // hitting any fatal errors.
-  {
-    LogReader relogged_reader(SortParts(MakeLogFiles(
-        tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2", 3, 3, true)));
-    relogged_reader.Register();
-
-    relogged_reader.event_loop_factory()->Run();
-  }
-  // And confirm that we can read the logged file using the reader's
-  // configuration.
-  {
-    LogReader relogged_reader(
-        SortParts(MakeLogFiles(tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2",
-                               3, 3, true)),
-        reader.configuration());
-    relogged_reader.Register();
-
-    relogged_reader.event_loop_factory()->Run();
-  }
-}
-
-// Tests that we properly populate and extract the logger_start time by setting
-// up a clock difference between 2 nodes and looking at the resulting parts.
-TEST_P(MultinodeLoggerTest, LoggerStartTime) {
-  std::vector<std::string> actual_filenames;
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-
-    pi1_logger.AppendAllFilenames(&actual_filenames);
-    pi2_logger.AppendAllFilenames(&actual_filenames);
-  }
-
-  ASSERT_THAT(actual_filenames,
-              ::testing::UnorderedElementsAreArray(logfiles_));
-
-  for (const LogFile &log_file : SortParts(logfiles_)) {
-    for (const LogParts &log_part : log_file.parts) {
-      if (log_part.node == log_file.logger_node) {
-        EXPECT_EQ(log_part.logger_monotonic_start_time,
-                  aos::monotonic_clock::min_time);
-        EXPECT_EQ(log_part.logger_realtime_start_time,
-                  aos::realtime_clock::min_time);
-      } else {
-        const chrono::seconds offset = log_file.logger_node == "pi1"
-                                           ? -chrono::seconds(1000)
-                                           : chrono::seconds(1000);
-        EXPECT_EQ(log_part.logger_monotonic_start_time,
-                  log_part.monotonic_start_time + offset);
-        EXPECT_EQ(log_part.logger_realtime_start_time,
-                  log_file.realtime_start_time +
-                      (log_part.logger_monotonic_start_time -
-                       log_file.monotonic_start_time));
-      }
-    }
-  }
-}
-
-// Test that renaming the base, renames the folder.
-TEST_P(MultinodeLoggerTest, LoggerRenameFolder) {
-  util::UnlinkRecursive(tmp_dir_ + "/renamefolder");
-  util::UnlinkRecursive(tmp_dir_ + "/new-good");
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-  logfile_base1_ = tmp_dir_ + "/renamefolder/multi_logfile1";
-  logfile_base2_ = tmp_dir_ + "/renamefolder/multi_logfile2";
-  logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
-  LoggerState pi1_logger = MakeLogger(pi1_);
-  LoggerState pi2_logger = MakeLogger(pi2_);
-
-  StartLogger(&pi1_logger);
-  StartLogger(&pi2_logger);
-
-  event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  logfile_base1_ = tmp_dir_ + "/new-good/multi_logfile1";
-  logfile_base2_ = tmp_dir_ + "/new-good/multi_logfile2";
-  logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
-  ASSERT_TRUE(pi1_logger.logger->RenameLogBase(logfile_base1_));
-  ASSERT_TRUE(pi2_logger.logger->RenameLogBase(logfile_base2_));
-  for (auto &file : logfiles_) {
-    struct stat s;
-    EXPECT_EQ(0, stat(file.c_str(), &s));
-  }
-}
-
-// Test that renaming the file base dies.
-TEST_P(MultinodeLoggerDeathTest, LoggerRenameFile) {
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-  util::UnlinkRecursive(tmp_dir_ + "/renamefile");
-  logfile_base1_ = tmp_dir_ + "/renamefile/multi_logfile1";
-  logfile_base2_ = tmp_dir_ + "/renamefile/multi_logfile2";
-  logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
-  LoggerState pi1_logger = MakeLogger(pi1_);
-  StartLogger(&pi1_logger);
-  event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  logfile_base1_ = tmp_dir_ + "/new-renamefile/new_multi_logfile1";
-  EXPECT_DEATH({ pi1_logger.logger->RenameLogBase(logfile_base1_); },
-               "Rename of file base from");
-}
-
-// TODO(austin): We can write a test which recreates a logfile and confirms that
-// we get it back.  That is the ultimate test.
-
-// Tests that we properly recreate forwarded timestamps when replaying a log.
-// This should be enough that we can then re-run the logger and get a valid log
-// back.
-TEST_P(MultinodeLoggerTest, RemoteReboot) {
-  std::vector<std::string> actual_filenames;
-
-  const UUID pi1_boot0 = UUID::Random();
-  const UUID pi2_boot0 = UUID::Random();
-  const UUID pi2_boot1 = UUID::Random();
-  {
-    CHECK_EQ(pi1_index_, 0u);
-    CHECK_EQ(pi2_index_, 1u);
-
-    time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
-    time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
-    time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
-
-    time_converter_.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch()});
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
-    time_converter_.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp::epoch() + reboot_time,
-         BootTimestamp{
-             .boot = 1,
-             .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
-  }
-
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
-              pi1_boot0);
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
-              pi2_boot0);
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-
-    VLOG(1) << "Reboot now!";
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
-              pi1_boot0);
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
-              pi2_boot1);
-
-    pi1_logger.AppendAllFilenames(&actual_filenames);
-  }
-
-  std::sort(actual_filenames.begin(), actual_filenames.end());
-  std::sort(pi1_reboot_logfiles_.begin(), pi1_reboot_logfiles_.end());
-  ASSERT_THAT(actual_filenames,
-              ::testing::UnorderedElementsAreArray(pi1_reboot_logfiles_));
-
-  // Confirm that our new oldest timestamps properly update as we reboot and
-  // rotate.
-  for (const std::string &file : pi1_reboot_logfiles_) {
-    std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
-        ReadHeader(file);
-    CHECK(log_header);
-    if (log_header->message().has_configuration()) {
-      continue;
-    }
-
-    const monotonic_clock::time_point monotonic_start_time =
-        monotonic_clock::time_point(
-            chrono::nanoseconds(log_header->message().monotonic_start_time()));
-    const UUID source_node_boot_uuid = UUID::FromString(
-        log_header->message().source_node_boot_uuid()->string_view());
-
-    if (log_header->message().node()->name()->string_view() != "pi1") {
-      // The remote message channel should rotate later and have more parts.
-      // This only is true on the log files with shared remote messages.
-      //
-      // TODO(austin): I'm not the most thrilled with this test pattern...  It
-      // feels brittle in a different way.
-      if (file.find("aos.message_bridge.RemoteMessage") == std::string::npos ||
-          !shared()) {
-        switch (log_header->message().parts_index()) {
-          case 0:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          case 1:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            ASSERT_EQ(monotonic_start_time,
-                      monotonic_clock::epoch() + chrono::seconds(1));
-            break;
-          case 2:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
-            break;
-          case 3:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-            ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
-                                                chrono::nanoseconds(2322999462))
-                << " on " << file;
-            break;
-          default:
-            FAIL();
-            break;
-        }
-      } else {
-        switch (log_header->message().parts_index()) {
-          case 0:
-          case 1:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          case 2:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            ASSERT_EQ(monotonic_start_time,
-                      monotonic_clock::epoch() + chrono::seconds(1));
-            break;
-          case 3:
-          case 4:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
-            break;
-          case 5:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-            ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
-                                                chrono::nanoseconds(2322999462))
-                << " on " << file;
-            break;
-          default:
-            FAIL();
-            break;
-        }
-      }
-      continue;
-    }
-    SCOPED_TRACE(file);
-    SCOPED_TRACE(aos::FlatbufferToJson(
-        *log_header, {.multi_line = true, .max_vector_size = 100}));
-    ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
-    ASSERT_EQ(
-        log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
-    EXPECT_EQ(
-        log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
-        monotonic_clock::max_time.time_since_epoch().count());
-    ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
-    ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
-              2u);
-    EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
-              monotonic_clock::max_time.time_since_epoch().count());
-    ASSERT_TRUE(log_header->message()
-                    .has_oldest_remote_unreliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_remote_unreliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-    EXPECT_EQ(log_header->message()
-                  .oldest_remote_unreliable_monotonic_timestamps()
-                  ->Get(0),
-              monotonic_clock::max_time.time_since_epoch().count());
-    ASSERT_TRUE(log_header->message()
-                    .has_oldest_local_unreliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_local_unreliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-    EXPECT_EQ(log_header->message()
-                  .oldest_local_unreliable_monotonic_timestamps()
-                  ->Get(0),
-              monotonic_clock::max_time.time_since_epoch().count());
-
-    const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
-        monotonic_clock::time_point(chrono::nanoseconds(
-            log_header->message().oldest_remote_monotonic_timestamps()->Get(
-                1)));
-    const monotonic_clock::time_point oldest_local_monotonic_timestamps =
-        monotonic_clock::time_point(chrono::nanoseconds(
-            log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
-    const monotonic_clock::time_point
-        oldest_remote_unreliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_remote_unreliable_monotonic_timestamps()
-                    ->Get(1)));
-    const monotonic_clock::time_point
-        oldest_local_unreliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_local_unreliable_monotonic_timestamps()
-                    ->Get(1)));
-    const monotonic_clock::time_point
-        oldest_remote_reliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_remote_reliable_monotonic_timestamps()
-                    ->Get(1)));
-    const monotonic_clock::time_point
-        oldest_local_reliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_local_reliable_monotonic_timestamps()
-                    ->Get(1)));
-    const monotonic_clock::time_point
-        oldest_logger_remote_unreliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_logger_remote_unreliable_monotonic_timestamps()
-                    ->Get(0)));
-    const monotonic_clock::time_point
-        oldest_logger_local_unreliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_logger_local_unreliable_monotonic_timestamps()
-                    ->Get(0)));
-    EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
-              monotonic_clock::max_time);
-    EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
-              monotonic_clock::max_time);
-    switch (log_header->message().parts_index()) {
-      case 0:
-        EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
-        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        break;
-      case 1:
-        EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90200)));
-        EXPECT_EQ(oldest_local_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90350)));
-        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90200)));
-        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90350)));
-        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        break;
-      case 2:
-        EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90200)))
-            << file;
-        EXPECT_EQ(oldest_local_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90350)))
-            << file;
-        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90200)))
-            << file;
-        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(90350)))
-            << file;
-        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(100000)))
-            << file;
-        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(100150)))
-            << file;
-        break;
-      case 3:
-        EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::milliseconds(1323) +
-                                              chrono::microseconds(200)));
-        EXPECT_EQ(oldest_local_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(10100350)));
-        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::milliseconds(1323) +
-                                              chrono::microseconds(200)));
-        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(10100350)));
-        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                  monotonic_clock::max_time)
-            << file;
-        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                  monotonic_clock::max_time)
-            << file;
-        break;
-      case 4:
-        EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::milliseconds(1323) +
-                                              chrono::microseconds(200)));
-        EXPECT_EQ(oldest_local_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(10100350)));
-        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::milliseconds(1323) +
-                                              chrono::microseconds(200)));
-        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(10100350)));
-        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(1423000)))
-            << file;
-        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                  monotonic_clock::time_point(chrono::microseconds(10200150)))
-            << file;
-        break;
-      default:
-        FAIL();
-        break;
-    }
-  }
-
-  // Confirm that we refuse to replay logs with missing boot uuids.
-  {
-    LogReader reader(SortParts(pi1_reboot_logfiles_));
-
-    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-    log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-    // This sends out the fetched messages and advances time to the start of
-    // the log file.
-    reader.Register(&log_reader_factory);
-
-    log_reader_factory.Run();
-
-    reader.Deregister();
-  }
-}
-
-// Tests that we can sort a log which only has timestamps from the remote
-// because the local message_bridge_client failed to connect.
-TEST_P(MultinodeLoggerTest, RemoteRebootOnlyTimestamps) {
-  const UUID pi1_boot0 = UUID::Random();
-  const UUID pi2_boot0 = UUID::Random();
-  const UUID pi2_boot1 = UUID::Random();
-  {
-    CHECK_EQ(pi1_index_, 0u);
-    CHECK_EQ(pi2_index_, 1u);
-
-    time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
-    time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
-    time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
-
-    time_converter_.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch()});
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
-    time_converter_.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp::epoch() + reboot_time,
-         BootTimestamp{
-             .boot = 1,
-             .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
-  }
-  pi2_->Disconnect(pi1_->node());
-
-  std::vector<std::string> filenames;
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
-              pi1_boot0);
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
-              pi2_boot0);
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-
-    VLOG(1) << "Reboot now!";
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
-              pi1_boot0);
-    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
-              pi2_boot1);
-    pi1_logger.AppendAllFilenames(&filenames);
-  }
-
-  std::sort(filenames.begin(), filenames.end());
-
-  // Confirm that our new oldest timestamps properly update as we reboot and
-  // rotate.
-  size_t timestamp_file_count = 0;
-  for (const std::string &file : filenames) {
-    std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
-        ReadHeader(file);
-    CHECK(log_header);
-
-    if (log_header->message().has_configuration()) {
-      continue;
-    }
-
-    const monotonic_clock::time_point monotonic_start_time =
-        monotonic_clock::time_point(
-            chrono::nanoseconds(log_header->message().monotonic_start_time()));
-    const UUID source_node_boot_uuid = UUID::FromString(
-        log_header->message().source_node_boot_uuid()->string_view());
-
-    ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
-    ASSERT_EQ(
-        log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
-    ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
-    ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
-              2u);
-    ASSERT_TRUE(log_header->message()
-                    .has_oldest_remote_unreliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_remote_unreliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-    ASSERT_TRUE(log_header->message()
-                    .has_oldest_local_unreliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_local_unreliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-    ASSERT_TRUE(log_header->message()
-                    .has_oldest_remote_reliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_remote_reliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-    ASSERT_TRUE(
-        log_header->message().has_oldest_local_reliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_local_reliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-
-    ASSERT_TRUE(
-        log_header->message()
-            .has_oldest_logger_remote_unreliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_logger_remote_unreliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-    ASSERT_TRUE(log_header->message()
-                    .has_oldest_logger_local_unreliable_monotonic_timestamps());
-    ASSERT_EQ(log_header->message()
-                  .oldest_logger_local_unreliable_monotonic_timestamps()
-                  ->size(),
-              2u);
-
-    if (log_header->message().node()->name()->string_view() != "pi1") {
-      ASSERT_TRUE(file.find("aos.message_bridge.RemoteMessage") !=
-                  std::string::npos);
-
-      const std::optional<SizePrefixedFlatbufferVector<MessageHeader>> msg =
-          ReadNthMessage(file, 0);
-      CHECK(msg);
-
-      EXPECT_TRUE(msg->message().has_monotonic_sent_time());
-      EXPECT_TRUE(msg->message().has_monotonic_remote_time());
-
-      const monotonic_clock::time_point
-          expected_oldest_local_monotonic_timestamps(
-              chrono::nanoseconds(msg->message().monotonic_sent_time()));
-      const monotonic_clock::time_point
-          expected_oldest_remote_monotonic_timestamps(
-              chrono::nanoseconds(msg->message().monotonic_remote_time()));
-      const monotonic_clock::time_point
-          expected_oldest_timestamp_monotonic_timestamps(
-              chrono::nanoseconds(msg->message().monotonic_timestamp_time()));
-
-      EXPECT_NE(expected_oldest_local_monotonic_timestamps,
-                monotonic_clock::min_time);
-      EXPECT_NE(expected_oldest_remote_monotonic_timestamps,
-                monotonic_clock::min_time);
-      EXPECT_NE(expected_oldest_timestamp_monotonic_timestamps,
-                monotonic_clock::min_time);
-
-      ++timestamp_file_count;
-      // Since the log file is from the perspective of the other node,
-      const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
-          monotonic_clock::time_point(chrono::nanoseconds(
-              log_header->message().oldest_remote_monotonic_timestamps()->Get(
-                  0)));
-      const monotonic_clock::time_point oldest_local_monotonic_timestamps =
-          monotonic_clock::time_point(chrono::nanoseconds(
-              log_header->message().oldest_local_monotonic_timestamps()->Get(
-                  0)));
-      const monotonic_clock::time_point
-          oldest_remote_unreliable_monotonic_timestamps =
-              monotonic_clock::time_point(chrono::nanoseconds(
-                  log_header->message()
-                      .oldest_remote_unreliable_monotonic_timestamps()
-                      ->Get(0)));
-      const monotonic_clock::time_point
-          oldest_local_unreliable_monotonic_timestamps =
-              monotonic_clock::time_point(chrono::nanoseconds(
-                  log_header->message()
-                      .oldest_local_unreliable_monotonic_timestamps()
-                      ->Get(0)));
-      const monotonic_clock::time_point
-          oldest_remote_reliable_monotonic_timestamps =
-              monotonic_clock::time_point(chrono::nanoseconds(
-                  log_header->message()
-                      .oldest_remote_reliable_monotonic_timestamps()
-                      ->Get(0)));
-      const monotonic_clock::time_point
-          oldest_local_reliable_monotonic_timestamps =
-              monotonic_clock::time_point(chrono::nanoseconds(
-                  log_header->message()
-                      .oldest_local_reliable_monotonic_timestamps()
-                      ->Get(0)));
-      const monotonic_clock::time_point
-          oldest_logger_remote_unreliable_monotonic_timestamps =
-              monotonic_clock::time_point(chrono::nanoseconds(
-                  log_header->message()
-                      .oldest_logger_remote_unreliable_monotonic_timestamps()
-                      ->Get(1)));
-      const monotonic_clock::time_point
-          oldest_logger_local_unreliable_monotonic_timestamps =
-              monotonic_clock::time_point(chrono::nanoseconds(
-                  log_header->message()
-                      .oldest_logger_local_unreliable_monotonic_timestamps()
-                      ->Get(1)));
-
-      const Channel *channel =
-          event_loop_factory_.configuration()->channels()->Get(
-              msg->message().channel_index());
-      const Connection *connection = configuration::ConnectionToNode(
-          channel, configuration::GetNode(
-                       event_loop_factory_.configuration(),
-                       log_header->message().node()->name()->string_view()));
-
-      const bool reliable = connection->time_to_live() == 0;
-
-      SCOPED_TRACE(file);
-      SCOPED_TRACE(aos::FlatbufferToJson(
-          *log_header, {.multi_line = true, .max_vector_size = 100}));
-
-      if (shared()) {
-        // Confirm that the oldest timestamps match what we expect.  Based on
-        // what we are doing, we know that the oldest time is the first
-        // message's time.
-        //
-        // This makes the test robust to both the split and combined config
-        // tests.
-        switch (log_header->message().parts_index()) {
-          case 0:
-            EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                      expected_oldest_remote_monotonic_timestamps);
-            EXPECT_EQ(oldest_local_monotonic_timestamps,
-                      expected_oldest_local_monotonic_timestamps);
-            EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
-                      expected_oldest_local_monotonic_timestamps)
-                << file;
-            EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
-                      expected_oldest_timestamp_monotonic_timestamps)
-                << file;
-
-            if (reliable) {
-              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-            } else {
-              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-            }
-            break;
-          case 1:
-            EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                      monotonic_clock::epoch() + chrono::nanoseconds(90000000));
-            EXPECT_EQ(oldest_local_monotonic_timestamps,
-                      monotonic_clock::epoch() + chrono::nanoseconds(90150000));
-            EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
-                      monotonic_clock::epoch() + chrono::nanoseconds(90150000));
-            EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
-                      monotonic_clock::epoch() + chrono::nanoseconds(90250000));
-            if (reliable) {
-              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-              EXPECT_EQ(
-                  oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(90000000));
-              EXPECT_EQ(
-                  oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(90150000));
-            } else {
-              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-            }
-            break;
-          case 2:
-            EXPECT_EQ(
-                oldest_remote_monotonic_timestamps,
-                monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
-            EXPECT_EQ(
-                oldest_local_monotonic_timestamps,
-                monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
-            EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
-                      expected_oldest_local_monotonic_timestamps)
-                << file;
-            EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
-                      expected_oldest_timestamp_monotonic_timestamps)
-                << file;
-            if (reliable) {
-              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-            } else {
-              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-            }
-            break;
-
-          case 3:
-            EXPECT_EQ(
-                oldest_remote_monotonic_timestamps,
-                monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
-            EXPECT_EQ(
-                oldest_local_monotonic_timestamps,
-                monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
-            EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                      expected_oldest_remote_monotonic_timestamps);
-            EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                      expected_oldest_local_monotonic_timestamps);
-            EXPECT_EQ(
-                oldest_logger_remote_unreliable_monotonic_timestamps,
-                monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
-            EXPECT_EQ(
-                oldest_logger_local_unreliable_monotonic_timestamps,
-                monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
-            break;
-          default:
-            FAIL();
-            break;
-        }
-
-        switch (log_header->message().parts_index()) {
-          case 0:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          case 1:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          case 2:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          case 3:
-            if (shared()) {
-              EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-              EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-              break;
-            }
-            [[fallthrough]];
-          default:
-            FAIL();
-            break;
-        }
-      } else {
-        switch (log_header->message().parts_index()) {
-          case 0:
-            if (reliable) {
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(
-                  oldest_logger_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(100150000))
-                  << file;
-              EXPECT_EQ(
-                  oldest_logger_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(100250000))
-                  << file;
-            } else {
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-              EXPECT_EQ(
-                  oldest_logger_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(90150000))
-                  << file;
-              EXPECT_EQ(
-                  oldest_logger_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(90250000))
-                  << file;
-            }
-            break;
-          case 1:
-            if (reliable) {
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        monotonic_clock::max_time);
-              EXPECT_EQ(
-                  oldest_logger_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
-              EXPECT_EQ(
-                  oldest_logger_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
-            } else {
-              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                        expected_oldest_remote_monotonic_timestamps);
-              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                        expected_oldest_local_monotonic_timestamps);
-              EXPECT_EQ(
-                  oldest_logger_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(1323150000));
-              EXPECT_EQ(
-                  oldest_logger_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::epoch() + chrono::nanoseconds(10100250000));
-            }
-            break;
-          default:
-            FAIL();
-            break;
-        }
-
-        switch (log_header->message().parts_index()) {
-          case 0:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          case 1:
-            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
-            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
-            break;
-          default:
-            FAIL();
-            break;
-        }
-      }
-
-      continue;
-    }
-    EXPECT_EQ(
-        log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
-        monotonic_clock::max_time.time_since_epoch().count());
-    EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
-              monotonic_clock::max_time.time_since_epoch().count());
-    EXPECT_EQ(log_header->message()
-                  .oldest_remote_unreliable_monotonic_timestamps()
-                  ->Get(0),
-              monotonic_clock::max_time.time_since_epoch().count());
-    EXPECT_EQ(log_header->message()
-                  .oldest_local_unreliable_monotonic_timestamps()
-                  ->Get(0),
-              monotonic_clock::max_time.time_since_epoch().count());
-
-    const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
-        monotonic_clock::time_point(chrono::nanoseconds(
-            log_header->message().oldest_remote_monotonic_timestamps()->Get(
-                1)));
-    const monotonic_clock::time_point oldest_local_monotonic_timestamps =
-        monotonic_clock::time_point(chrono::nanoseconds(
-            log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
-    const monotonic_clock::time_point
-        oldest_remote_unreliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_remote_unreliable_monotonic_timestamps()
-                    ->Get(1)));
-    const monotonic_clock::time_point
-        oldest_local_unreliable_monotonic_timestamps =
-            monotonic_clock::time_point(chrono::nanoseconds(
-                log_header->message()
-                    .oldest_local_unreliable_monotonic_timestamps()
-                    ->Get(1)));
-    switch (log_header->message().parts_index()) {
-      case 0:
-        EXPECT_EQ(oldest_remote_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
-        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
-                  monotonic_clock::max_time);
-        break;
-      default:
-        FAIL();
-        break;
-    }
-  }
-
-  if (shared()) {
-    EXPECT_EQ(timestamp_file_count, 4u);
-  } else {
-    EXPECT_EQ(timestamp_file_count, 4u);
-  }
-
-  // Confirm that we can actually sort the resulting log and read it.
-  {
-    LogReader reader(SortParts(filenames));
-
-    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-    log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-    // This sends out the fetched messages and advances time to the start of
-    // the log file.
-    reader.Register(&log_reader_factory);
-
-    log_reader_factory.Run();
-
-    reader.Deregister();
-  }
-}
-
-// Tests that we properly handle one direction of message_bridge being
-// unavailable.
-TEST_P(MultinodeLoggerTest, OneDirectionWithNegativeSlope) {
-  pi1_->Disconnect(pi2_->node());
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-
-  time_converter_.AddMonotonic(
-      {chrono::milliseconds(10000),
-       chrono::milliseconds(10000) - chrono::milliseconds(1)});
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  }
-
-  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
-  // to confirm the right thing happened.
-  ConfirmReadable(pi1_single_direction_logfiles_);
-}
-
-// Tests that we properly handle one direction of message_bridge being
-// unavailable.
-TEST_P(MultinodeLoggerTest, OneDirectionWithPositiveSlope) {
-  pi1_->Disconnect(pi2_->node());
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(500)});
-
-  time_converter_.AddMonotonic(
-      {chrono::milliseconds(10000),
-       chrono::milliseconds(10000) + chrono::milliseconds(1)});
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  }
-
-  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
-  // to confirm the right thing happened.
-  ConfirmReadable(pi1_single_direction_logfiles_);
-}
-
-// Tests that we explode if someone passes in a part file twice with a better
-// error than an out of order error.
-TEST_P(MultinodeLoggerTest, DuplicateLogFiles) {
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  }
-
-  std::vector<std::string> duplicates;
-  for (const std::string &f : pi1_single_direction_logfiles_) {
-    duplicates.emplace_back(f);
-    duplicates.emplace_back(f);
-  }
-  EXPECT_DEATH({ SortParts(duplicates); }, "Found duplicate parts in");
-}
-
-// Tests that we explode if someone loses a part out of the middle of a log.
-TEST_P(MultinodeLoggerTest, MissingPartsFromMiddle) {
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    aos::monotonic_clock::time_point last_rotation_time =
-        pi1_logger.event_loop->monotonic_now();
-    pi1_logger.logger->set_on_logged_period([&] {
-      const auto now = pi1_logger.event_loop->monotonic_now();
-      if (now > last_rotation_time + std::chrono::seconds(5)) {
-        pi1_logger.logger->Rotate();
-        last_rotation_time = now;
-      }
-    });
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  }
-
-  std::vector<std::string> missing_parts;
-
-  missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
-  missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
-  missing_parts.emplace_back(absl::StrCat(
-      logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
-
-  EXPECT_DEATH({ SortParts(missing_parts); },
-               "Broken log, missing part files between");
-}
-
-// Tests that we properly handle a dead node.  Do this by just disconnecting it
-// and only using one nodes of logs.
-TEST_P(MultinodeLoggerTest, DeadNode) {
-  pi1_->Disconnect(pi2_->node());
-  pi2_->Disconnect(pi1_->node());
-  time_converter_.AddMonotonic(
-      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(10000));
-  }
-
-  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
-  // to confirm the right thing happened.
-  ConfirmReadable(MakePi1DeadNodeLogfiles());
-}
-
-constexpr std::string_view kCombinedConfigSha1(
-    "5d73fe35bacaa59d24f8f0c1a806fe10b783b0fcc80809ee30a9db824e82538b");
-constexpr std::string_view kSplitConfigSha1(
-    "f25e8f6f90d61f41c41517e652300566228b077e44cd86f1af2af4a9bed31ad4");
-constexpr std::string_view kReloggedSplitConfigSha1(
-    "f1fabd629bdf8735c3d81bc791d7a454e8e636951c26cba6426545cbc97f911f");
-
-INSTANTIATE_TEST_SUITE_P(
-    All, MultinodeLoggerTest,
-    ::testing::Combine(
-        ::testing::Values(
-            ConfigParams{"multinode_pingpong_combined_config.json", true,
-                         kCombinedConfigSha1, kCombinedConfigSha1},
-            ConfigParams{"multinode_pingpong_split_config.json", false,
-                         kSplitConfigSha1, kReloggedSplitConfigSha1}),
-        ::testing::ValuesIn(SupportedCompressionAlgorithms())));
-
-INSTANTIATE_TEST_SUITE_P(
-    All, MultinodeLoggerDeathTest,
-    ::testing::Combine(
-        ::testing::Values(
-            ConfigParams{"multinode_pingpong_combined_config.json", true,
-                         kCombinedConfigSha1, kCombinedConfigSha1},
-            ConfigParams{"multinode_pingpong_split_config.json", false,
-                         kSplitConfigSha1, kReloggedSplitConfigSha1}),
-        ::testing::ValuesIn(SupportedCompressionAlgorithms())));
-
-// Tests that we can relog with a different config.  This makes most sense when
-// you are trying to edit a log and want to use channel renaming + the original
-// config in the new log.
-TEST_P(MultinodeLoggerTest, LogDifferentConfig) {
-  time_converter_.StartEqual();
-  {
-    LoggerState pi1_logger = MakeLogger(pi1_);
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(95));
-
-    StartLogger(&pi1_logger);
-    StartLogger(&pi2_logger);
-
-    event_loop_factory_.RunFor(chrono::milliseconds(20000));
-  }
-
-  LogReader reader(SortParts(logfiles_));
-  reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
-
-  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
-  log_reader_factory.set_send_delay(chrono::microseconds(0));
-
-  // This sends out the fetched messages and advances time to the start of the
-  // log file.
-  reader.Register(&log_reader_factory);
-
-  const Node *pi1 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi1");
-  const Node *pi2 =
-      configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
-  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
-  LOG(INFO) << "now pi1 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
-  LOG(INFO) << "now pi2 "
-            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
-  EXPECT_THAT(reader.LoggedNodes(),
-              ::testing::ElementsAre(
-                  configuration::GetNode(reader.logged_configuration(), pi1),
-                  configuration::GetNode(reader.logged_configuration(), pi2)));
-
-  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
-  // And confirm we can re-create a log again, while checking the contents.
-  std::vector<std::string> log_files;
-  {
-    LoggerState pi1_logger =
-        MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi1"),
-                   &log_reader_factory, reader.logged_configuration());
-    LoggerState pi2_logger =
-        MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi2"),
-                   &log_reader_factory, reader.logged_configuration());
-
-    pi1_logger.StartLogger(tmp_dir_ + "/relogged1");
-    pi2_logger.StartLogger(tmp_dir_ + "/relogged2");
-
-    log_reader_factory.Run();
-
-    for (auto &x : pi1_logger.log_namer->all_filenames()) {
-      log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged1_", x));
-    }
-    for (auto &x : pi2_logger.log_namer->all_filenames()) {
-      log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged2_", x));
-    }
-  }
-
-  reader.Deregister();
-
-  // And verify that we can run the LogReader over the relogged files without
-  // hitting any fatal errors.
-  {
-    LogReader relogged_reader(SortParts(log_files));
-    relogged_reader.Register();
-
-    relogged_reader.event_loop_factory()->Run();
-  }
-}
-
-// Tests that we properly replay a log where the start time for a node is before
-// any data on the node.  This can happen if the logger starts before data is
-// published.  While the scenario below is a bit convoluted, we have seen logs
-// like this generated out in the wild.
-TEST(MultinodeRebootLoggerTest, StartTimeBeforeData) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_split3_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  const size_t pi1_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi1->node());
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  const size_t pi2_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi2->node());
-  NodeEventLoopFactory *const pi3 =
-      event_loop_factory.GetNodeEventLoopFactory("pi3");
-  const size_t pi3_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi3->node());
-
-  const std::string kLogfile1_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile1/";
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  const std::string kLogfile2_2 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.2/";
-  const std::string kLogfile3_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile3/";
-  util::UnlinkRecursive(kLogfile1_1);
-  util::UnlinkRecursive(kLogfile2_1);
-  util::UnlinkRecursive(kLogfile2_2);
-  util::UnlinkRecursive(kLogfile3_1);
-  const UUID pi1_boot0 = UUID::Random();
-  const UUID pi2_boot0 = UUID::Random();
-  const UUID pi2_boot1 = UUID::Random();
-  const UUID pi3_boot0 = UUID::Random();
-  {
-    CHECK_EQ(pi1_index, 0u);
-    CHECK_EQ(pi2_index, 1u);
-    CHECK_EQ(pi3_index, 2u);
-
-    time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
-    time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
-    time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
-    time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
-
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch(),
-         BootTimestamp::epoch()});
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(20000);
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp::epoch() + reboot_time,
-         BootTimestamp{
-             .boot = 1,
-             .time = monotonic_clock::epoch() + chrono::milliseconds(1323)},
-         BootTimestamp::epoch() + reboot_time});
-  }
-
-  // Make everything perfectly quiet.
-  event_loop_factory.SkipTimingReport();
-  event_loop_factory.DisableStatistics();
-
-  std::vector<std::string> filenames;
-  {
-    LoggerState pi1_logger = LoggerState::MakeLogger(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    LoggerState pi3_logger = LoggerState::MakeLogger(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    {
-      // And now start the logger.
-      LoggerState pi2_logger = LoggerState::MakeLogger(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
-      event_loop_factory.RunFor(chrono::milliseconds(1000));
-
-      pi1_logger.StartLogger(kLogfile1_1);
-      pi3_logger.StartLogger(kLogfile3_1);
-      pi2_logger.StartLogger(kLogfile2_1);
-
-      event_loop_factory.RunFor(chrono::milliseconds(10000));
-
-      // Now that we've got a start time in the past, turn on data.
-      event_loop_factory.EnableStatistics();
-      std::unique_ptr<aos::EventLoop> ping_event_loop =
-          pi1->MakeEventLoop("ping");
-      Ping ping(ping_event_loop.get());
-
-      pi2->AlwaysStart<Pong>("pong");
-
-      event_loop_factory.RunFor(chrono::milliseconds(3000));
-
-      pi2_logger.AppendAllFilenames(&filenames);
-
-      // Stop logging on pi2 before rebooting and completely shut off all
-      // messages on pi2.
-      pi2->DisableStatistics();
-      pi1->Disconnect(pi2->node());
-      pi2->Disconnect(pi1->node());
-    }
-    event_loop_factory.RunFor(chrono::milliseconds(7000));
-    // pi2 now reboots.
-    {
-      event_loop_factory.RunFor(chrono::milliseconds(1000));
-
-      // Start logging again on pi2 after it is up.
-      LoggerState pi2_logger = LoggerState::MakeLogger(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-      pi2_logger.StartLogger(kLogfile2_2);
-
-      event_loop_factory.RunFor(chrono::milliseconds(10000));
-      // And, now that we have a start time in the log, turn data back on.
-      pi2->EnableStatistics();
-      pi1->Connect(pi2->node());
-      pi2->Connect(pi1->node());
-
-      pi2->AlwaysStart<Pong>("pong");
-      std::unique_ptr<aos::EventLoop> ping_event_loop =
-          pi1->MakeEventLoop("ping");
-      Ping ping(ping_event_loop.get());
-
-      event_loop_factory.RunFor(chrono::milliseconds(3000));
-
-      pi2_logger.AppendAllFilenames(&filenames);
-    }
-
-    pi1_logger.AppendAllFilenames(&filenames);
-    pi3_logger.AppendAllFilenames(&filenames);
-  }
-
-  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
-  // to confirm the right thing happened.
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  auto result = ConfirmReadable(filenames);
-  EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch() +
-                                                      chrono::seconds(1)));
-  EXPECT_THAT(result[0].second,
-              ::testing::ElementsAre(realtime_clock::epoch() +
-                                     chrono::microseconds(34990350)));
-
-  EXPECT_THAT(result[1].first,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch() + chrono::seconds(1),
-                  realtime_clock::epoch() + chrono::microseconds(3323000)));
-  EXPECT_THAT(result[1].second,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch() + chrono::microseconds(13990200),
-                  realtime_clock::epoch() + chrono::microseconds(16313200)));
-
-  EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch() +
-                                                      chrono::seconds(1)));
-  EXPECT_THAT(result[2].second,
-              ::testing::ElementsAre(realtime_clock::epoch() +
-                                     chrono::microseconds(34900150)));
-}
-
-// Tests that local data before remote data after reboot is properly replayed.
-// We only trigger a reboot in the timestamp interpolation function when solving
-// the timestamp problem when we actually have a point in the function.  This
-// originally only happened when a point passes the noncausal filter.  At the
-// start of time for the second boot, if we aren't careful, we will have
-// messages which need to be published at times before the boot.  This happens
-// when a local message is in the log before a forwarded message, so there is no
-// point in the interpolation function.  This delays the reboot.  So, we need to
-// recreate that situation and make sure it doesn't come back.
-TEST(MultinodeRebootLoggerTest,
-     LocalMessageBeforeRemoteBeforeStartAfterReboot) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_split3_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  const size_t pi1_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi1->node());
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  const size_t pi2_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi2->node());
-  NodeEventLoopFactory *const pi3 =
-      event_loop_factory.GetNodeEventLoopFactory("pi3");
-  const size_t pi3_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi3->node());
-
-  const std::string kLogfile1_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile1/";
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  const std::string kLogfile2_2 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.2/";
-  const std::string kLogfile3_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile3/";
-  util::UnlinkRecursive(kLogfile1_1);
-  util::UnlinkRecursive(kLogfile2_1);
-  util::UnlinkRecursive(kLogfile2_2);
-  util::UnlinkRecursive(kLogfile3_1);
-  const UUID pi1_boot0 = UUID::Random();
-  const UUID pi2_boot0 = UUID::Random();
-  const UUID pi2_boot1 = UUID::Random();
-  const UUID pi3_boot0 = UUID::Random();
-  {
-    CHECK_EQ(pi1_index, 0u);
-    CHECK_EQ(pi2_index, 1u);
-    CHECK_EQ(pi3_index, 2u);
-
-    time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
-    time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
-    time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
-    time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
-
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch(),
-         BootTimestamp::epoch()});
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp::epoch() + reboot_time,
-         BootTimestamp{.boot = 1,
-                       .time = monotonic_clock::epoch() + reboot_time +
-                               chrono::seconds(100)},
-         BootTimestamp::epoch() + reboot_time});
-  }
-
-  std::vector<std::string> filenames;
-  {
-    LoggerState pi1_logger = LoggerState::MakeLogger(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    LoggerState pi3_logger = LoggerState::MakeLogger(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    {
-      // And now start the logger.
-      LoggerState pi2_logger = LoggerState::MakeLogger(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
-      pi1_logger.StartLogger(kLogfile1_1);
-      pi3_logger.StartLogger(kLogfile3_1);
-      pi2_logger.StartLogger(kLogfile2_1);
-
-      event_loop_factory.RunFor(chrono::milliseconds(1005));
-
-      // Now that we've got a start time in the past, turn on data.
-      std::unique_ptr<aos::EventLoop> ping_event_loop =
-          pi1->MakeEventLoop("ping");
-      Ping ping(ping_event_loop.get());
-
-      pi2->AlwaysStart<Pong>("pong");
-
-      event_loop_factory.RunFor(chrono::milliseconds(3000));
-
-      pi2_logger.AppendAllFilenames(&filenames);
-
-      // Disable any remote messages on pi2.
-      pi1->Disconnect(pi2->node());
-      pi2->Disconnect(pi1->node());
-    }
-    event_loop_factory.RunFor(chrono::milliseconds(995));
-    // pi2 now reboots at 5 seconds.
-    {
-      event_loop_factory.RunFor(chrono::milliseconds(1000));
-
-      // Make local stuff happen before we start logging and connect the remote.
-      pi2->AlwaysStart<Pong>("pong");
-      std::unique_ptr<aos::EventLoop> ping_event_loop =
-          pi1->MakeEventLoop("ping");
-      Ping ping(ping_event_loop.get());
-      event_loop_factory.RunFor(chrono::milliseconds(1005));
-
-      // Start logging again on pi2 after it is up.
-      LoggerState pi2_logger = LoggerState::MakeLogger(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-      pi2_logger.StartLogger(kLogfile2_2);
-
-      // And allow remote messages now that we have some local ones.
-      pi1->Connect(pi2->node());
-      pi2->Connect(pi1->node());
-
-      event_loop_factory.RunFor(chrono::milliseconds(1000));
-
-      event_loop_factory.RunFor(chrono::milliseconds(3000));
-
-      pi2_logger.AppendAllFilenames(&filenames);
-    }
-
-    pi1_logger.AppendAllFilenames(&filenames);
-    pi3_logger.AppendAllFilenames(&filenames);
-  }
-
-  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
-  // to confirm the right thing happened.
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  auto result = ConfirmReadable(filenames);
-
-  EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
-  EXPECT_THAT(result[0].second,
-              ::testing::ElementsAre(realtime_clock::epoch() +
-                                     chrono::microseconds(11000350)));
-
-  EXPECT_THAT(result[1].first,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch(),
-                  realtime_clock::epoch() + chrono::microseconds(107005000)));
-  EXPECT_THAT(result[1].second,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch() + chrono::microseconds(4000150),
-                  realtime_clock::epoch() + chrono::microseconds(111000200)));
-
-  EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
-  EXPECT_THAT(result[2].second,
-              ::testing::ElementsAre(realtime_clock::epoch() +
-                                     chrono::microseconds(11000150)));
-
-  auto start_stop_result = ConfirmReadable(
-      filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
-      realtime_clock::epoch() + chrono::milliseconds(3000));
-
-  EXPECT_THAT(
-      start_stop_result[0].first,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
-  EXPECT_THAT(
-      start_stop_result[0].second,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
-  EXPECT_THAT(
-      start_stop_result[1].first,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
-  EXPECT_THAT(
-      start_stop_result[1].second,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
-  EXPECT_THAT(
-      start_stop_result[2].first,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
-  EXPECT_THAT(
-      start_stop_result[2].second,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
-}
-
-// Tests that setting the start and stop flags across a reboot works as
-// expected.
-TEST(MultinodeRebootLoggerTest, RebootStartStopTimes) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_split3_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  const size_t pi1_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi1->node());
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  const size_t pi2_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi2->node());
-  NodeEventLoopFactory *const pi3 =
-      event_loop_factory.GetNodeEventLoopFactory("pi3");
-  const size_t pi3_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi3->node());
-
-  const std::string kLogfile1_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile1/";
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  const std::string kLogfile2_2 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.2/";
-  const std::string kLogfile3_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile3/";
-  util::UnlinkRecursive(kLogfile1_1);
-  util::UnlinkRecursive(kLogfile2_1);
-  util::UnlinkRecursive(kLogfile2_2);
-  util::UnlinkRecursive(kLogfile3_1);
-  {
-    CHECK_EQ(pi1_index, 0u);
-    CHECK_EQ(pi2_index, 1u);
-    CHECK_EQ(pi3_index, 2u);
-
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch(),
-         BootTimestamp::epoch()});
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp::epoch() + reboot_time,
-         BootTimestamp{.boot = 1,
-                       .time = monotonic_clock::epoch() + reboot_time},
-         BootTimestamp::epoch() + reboot_time});
-  }
-
-  std::vector<std::string> filenames;
-  {
-    LoggerState pi1_logger = LoggerState::MakeLogger(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    LoggerState pi3_logger = LoggerState::MakeLogger(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    {
-      // And now start the logger.
-      LoggerState pi2_logger = LoggerState::MakeLogger(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
-      pi1_logger.StartLogger(kLogfile1_1);
-      pi3_logger.StartLogger(kLogfile3_1);
-      pi2_logger.StartLogger(kLogfile2_1);
-
-      event_loop_factory.RunFor(chrono::milliseconds(1005));
-
-      // Now that we've got a start time in the past, turn on data.
-      std::unique_ptr<aos::EventLoop> ping_event_loop =
-          pi1->MakeEventLoop("ping");
-      Ping ping(ping_event_loop.get());
-
-      pi2->AlwaysStart<Pong>("pong");
-
-      event_loop_factory.RunFor(chrono::milliseconds(3000));
-
-      pi2_logger.AppendAllFilenames(&filenames);
-    }
-    event_loop_factory.RunFor(chrono::milliseconds(995));
-    // pi2 now reboots at 5 seconds.
-    {
-      event_loop_factory.RunFor(chrono::milliseconds(1000));
-
-      // Make local stuff happen before we start logging and connect the remote.
-      pi2->AlwaysStart<Pong>("pong");
-      std::unique_ptr<aos::EventLoop> ping_event_loop =
-          pi1->MakeEventLoop("ping");
-      Ping ping(ping_event_loop.get());
-      event_loop_factory.RunFor(chrono::milliseconds(5));
-
-      // Start logging again on pi2 after it is up.
-      LoggerState pi2_logger = LoggerState::MakeLogger(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-      pi2_logger.StartLogger(kLogfile2_2);
-
-      event_loop_factory.RunFor(chrono::milliseconds(5000));
-
-      pi2_logger.AppendAllFilenames(&filenames);
-    }
-
-    pi1_logger.AppendAllFilenames(&filenames);
-    pi3_logger.AppendAllFilenames(&filenames);
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  auto result = ConfirmReadable(filenames);
-
-  EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
-  EXPECT_THAT(result[0].second,
-              ::testing::ElementsAre(realtime_clock::epoch() +
-                                     chrono::microseconds(11000350)));
-
-  EXPECT_THAT(result[1].first,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch(),
-                  realtime_clock::epoch() + chrono::microseconds(6005000)));
-  EXPECT_THAT(result[1].second,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch() + chrono::microseconds(4900150),
-                  realtime_clock::epoch() + chrono::microseconds(11000200)));
-
-  EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
-  EXPECT_THAT(result[2].second,
-              ::testing::ElementsAre(realtime_clock::epoch() +
-                                     chrono::microseconds(11000150)));
-
-  // Confirm we observed the correct start and stop times.  We should see the
-  // reboot here.
-  auto start_stop_result = ConfirmReadable(
-      filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
-      realtime_clock::epoch() + chrono::milliseconds(8000));
-
-  EXPECT_THAT(
-      start_stop_result[0].first,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
-  EXPECT_THAT(
-      start_stop_result[0].second,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
-  EXPECT_THAT(start_stop_result[1].first,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch() + chrono::seconds(2),
-                  realtime_clock::epoch() + chrono::microseconds(6005000)));
-  EXPECT_THAT(start_stop_result[1].second,
-              ::testing::ElementsAre(
-                  realtime_clock::epoch() + chrono::microseconds(4900150),
-                  realtime_clock::epoch() + chrono::seconds(8)));
-  EXPECT_THAT(
-      start_stop_result[2].first,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
-  EXPECT_THAT(
-      start_stop_result[2].second,
-      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
-}
-
-// Tests that we properly handle one direction being down.
-TEST(MissingDirectionTest, OneDirection) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_split4_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  const size_t pi1_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi1->node());
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  const size_t pi2_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi2->node());
-  std::vector<std::string> filenames;
-
-  {
-    CHECK_EQ(pi1_index, 0u);
-    CHECK_EQ(pi2_index, 1u);
-
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
-         BootTimestamp::epoch() + reboot_time});
-  }
-
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  const std::string kLogfile1_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile1.1/";
-  util::UnlinkRecursive(kLogfile2_1);
-  util::UnlinkRecursive(kLogfile1_1);
-
-  pi2->Disconnect(pi1->node());
-
-  pi1->AlwaysStart<Ping>("ping");
-  pi2->AlwaysStart<Pong>("pong");
-
-  {
-    LoggerState pi2_logger = LoggerState::MakeLogger(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
-    event_loop_factory.RunFor(chrono::milliseconds(95));
-
-    pi2_logger.StartLogger(kLogfile2_1);
-
-    event_loop_factory.RunFor(chrono::milliseconds(6000));
-
-    pi2->Connect(pi1->node());
-
-    LoggerState pi1_logger = LoggerState::MakeLogger(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    pi1_logger.StartLogger(kLogfile1_1);
-
-    event_loop_factory.RunFor(chrono::milliseconds(5000));
-    pi1_logger.AppendAllFilenames(&filenames);
-    pi2_logger.AppendAllFilenames(&filenames);
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  ConfirmReadable(filenames);
-}
-
-// Tests that we properly handle only one direction ever existing after a
-// reboot.
-TEST(MissingDirectionTest, OneDirectionAfterReboot) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_split4_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  const size_t pi1_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi1->node());
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  const size_t pi2_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi2->node());
-  std::vector<std::string> filenames;
-
-  {
-    CHECK_EQ(pi1_index, 0u);
-    CHECK_EQ(pi2_index, 1u);
-
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
-         BootTimestamp::epoch() + reboot_time});
-  }
-
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  util::UnlinkRecursive(kLogfile2_1);
-
-  pi1->AlwaysStart<Ping>("ping");
-
-  // Pi1 sends to pi2.  Reboot pi1, but don't let pi2 connect to pi1.  This
-  // makes it such that we will only get timestamps from pi1 -> pi2 on the
-  // second boot.
-  {
-    LoggerState pi2_logger = LoggerState::MakeLogger(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
-    event_loop_factory.RunFor(chrono::milliseconds(95));
-
-    pi2_logger.StartLogger(kLogfile2_1);
-
-    event_loop_factory.RunFor(chrono::milliseconds(4000));
-
-    pi2->Disconnect(pi1->node());
-
-    event_loop_factory.RunFor(chrono::milliseconds(1000));
-    pi1->AlwaysStart<Ping>("ping");
-
-    event_loop_factory.RunFor(chrono::milliseconds(5000));
-    pi2_logger.AppendAllFilenames(&filenames);
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  ConfirmReadable(filenames);
-}
-
-// Tests that we properly handle only one direction ever existing after a reboot
-// with only reliable data.
-TEST(MissingDirectionTest, OneDirectionAfterRebootReliable) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_split4_reliable_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  const size_t pi1_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi1->node());
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  const size_t pi2_index = configuration::GetNodeIndex(
-      event_loop_factory.configuration(), pi2->node());
-  std::vector<std::string> filenames;
-
-  {
-    CHECK_EQ(pi1_index, 0u);
-    CHECK_EQ(pi2_index, 1u);
-
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
-    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch() + reboot_time,
-        {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
-         BootTimestamp::epoch() + reboot_time});
-  }
-
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  util::UnlinkRecursive(kLogfile2_1);
-
-  pi1->AlwaysStart<Ping>("ping");
-
-  // Pi1 sends to pi2.  Reboot pi1, but don't let pi2 connect to pi1.  This
-  // makes it such that we will only get timestamps from pi1 -> pi2 on the
-  // second boot.
-  {
-    LoggerState pi2_logger = LoggerState::MakeLogger(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
-    event_loop_factory.RunFor(chrono::milliseconds(95));
-
-    pi2_logger.StartLogger(kLogfile2_1);
-
-    event_loop_factory.RunFor(chrono::milliseconds(4000));
-
-    pi2->Disconnect(pi1->node());
-
-    event_loop_factory.RunFor(chrono::milliseconds(1000));
-    pi1->AlwaysStart<Ping>("ping");
-
-    event_loop_factory.RunFor(chrono::milliseconds(5000));
-    pi2_logger.AppendAllFilenames(&filenames);
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  ConfirmReadable(filenames);
-}
-
-// Tests that we properly handle what used to be a time violation in one
-// direction.  This can occur when one direction goes down after sending some
-// data, but the other keeps working.  The down direction ends up resolving to a
-// straight line in the noncausal filter, where the direction which is still up
-// can cross that line.  Really, time progressed along just fine but we assumed
-// that the offset was a line when it could have deviated by up to 1ms/second.
-TEST_P(MultinodeLoggerTest, OneDirectionTimeDrift) {
-  std::vector<std::string> filenames;
-
-  CHECK_EQ(pi1_index_, 0u);
-  CHECK_EQ(pi2_index_, 1u);
-
-  time_converter_.AddNextTimestamp(
-      distributed_clock::epoch(),
-      {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
-  const chrono::nanoseconds before_disconnect_duration =
-      time_converter_.AddMonotonic(
-          {chrono::milliseconds(1000), chrono::milliseconds(1000)});
-
-  const chrono::nanoseconds test_duration =
-      time_converter_.AddMonotonic(
-          {chrono::milliseconds(1000), chrono::milliseconds(1000)}) +
-      time_converter_.AddMonotonic(
-          {chrono::milliseconds(10000),
-           chrono::milliseconds(10000) - chrono::milliseconds(5)}) +
-      time_converter_.AddMonotonic(
-          {chrono::milliseconds(10000),
-           chrono::milliseconds(10000) + chrono::milliseconds(5)});
-
-  const std::string kLogfile =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  util::UnlinkRecursive(kLogfile);
-
-  {
-    LoggerState pi2_logger = MakeLogger(pi2_);
-    pi2_logger.StartLogger(kLogfile);
-    event_loop_factory_.RunFor(before_disconnect_duration);
-
-    pi2_->Disconnect(pi1_->node());
-
-    event_loop_factory_.RunFor(test_duration);
-    pi2_->Connect(pi1_->node());
-
-    event_loop_factory_.RunFor(chrono::milliseconds(5000));
-    pi2_logger.AppendAllFilenames(&filenames);
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  ConfirmReadable(filenames);
-}
-
-// Tests that we can replay a logfile that has timestamps such that at least one
-// node's epoch is at a positive distributed_clock (and thus will have to be
-// booted after the other node(s)).
-TEST_P(MultinodeLoggerTest, StartOneNodeBeforeOther) {
-  std::vector<std::string> filenames;
-
-  CHECK_EQ(pi1_index_, 0u);
-  CHECK_EQ(pi2_index_, 1u);
-
-  time_converter_.AddNextTimestamp(
-      distributed_clock::epoch(),
-      {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
-  const chrono::nanoseconds before_reboot_duration = chrono::milliseconds(1000);
-  time_converter_.RebootAt(
-      0, distributed_clock::time_point(before_reboot_duration));
-
-  const chrono::nanoseconds test_duration = time_converter_.AddMonotonic(
-      {chrono::milliseconds(10000), chrono::milliseconds(10000)});
-
-  const std::string kLogfile =
-      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
-  util::UnlinkRecursive(kLogfile);
-
-  pi2_->Disconnect(pi1_->node());
-  pi1_->Disconnect(pi2_->node());
-
-  {
-    LoggerState pi2_logger = MakeLogger(pi2_);
-
-    pi2_logger.StartLogger(kLogfile);
-    event_loop_factory_.RunFor(before_reboot_duration);
-
-    pi2_->Connect(pi1_->node());
-    pi1_->Connect(pi2_->node());
-
-    event_loop_factory_.RunFor(test_duration);
-
-    pi2_logger.AppendAllFilenames(&filenames);
-  }
-
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  ConfirmReadable(filenames);
-
-  {
-    LogReader reader(sorted_parts);
-    SimulatedEventLoopFactory replay_factory(reader.configuration());
-    reader.RegisterWithoutStarting(&replay_factory);
-
-    NodeEventLoopFactory *const replay_node =
-        reader.event_loop_factory()->GetNodeEventLoopFactory("pi1");
-
-    std::unique_ptr<EventLoop> test_event_loop =
-        replay_node->MakeEventLoop("test_reader");
-    replay_node->OnStartup([replay_node]() {
-      // Check that we didn't boot until at least t=0.
-      CHECK_LE(monotonic_clock::epoch(), replay_node->monotonic_now());
-    });
-    test_event_loop->OnRun([&test_event_loop]() {
-      // Check that we didn't boot until at least t=0.
-      EXPECT_LE(monotonic_clock::epoch(), test_event_loop->monotonic_now());
-    });
-    reader.event_loop_factory()->Run();
-    reader.Deregister();
-  }
-}
-
-// Tests that when we have a loop without all the logs at all points in time, we
-// can sort it properly.
-TEST(MultinodeLoggerLoopTest, Loop) {
-  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
-      aos::configuration::ReadConfig(ArtifactPath(
-          "aos/events/logging/multinode_pingpong_triangle_split_config.json"));
-  message_bridge::TestingTimeConverter time_converter(
-      configuration::NodesCount(&config.message()));
-  SimulatedEventLoopFactory event_loop_factory(&config.message());
-  event_loop_factory.SetTimeConverter(&time_converter);
-
-  NodeEventLoopFactory *const pi1 =
-      event_loop_factory.GetNodeEventLoopFactory("pi1");
-  NodeEventLoopFactory *const pi2 =
-      event_loop_factory.GetNodeEventLoopFactory("pi2");
-  NodeEventLoopFactory *const pi3 =
-      event_loop_factory.GetNodeEventLoopFactory("pi3");
-
-  const std::string kLogfile1_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile1/";
-  const std::string kLogfile2_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile2/";
-  const std::string kLogfile3_1 =
-      aos::testing::TestTmpDir() + "/multi_logfile3/";
-  util::UnlinkRecursive(kLogfile1_1);
-  util::UnlinkRecursive(kLogfile2_1);
-  util::UnlinkRecursive(kLogfile3_1);
-
-  {
-    // Make pi1 boot before everything else.
-    time_converter.AddNextTimestamp(
-        distributed_clock::epoch(),
-        {BootTimestamp::epoch(),
-         BootTimestamp::epoch() - chrono::milliseconds(100),
-         BootTimestamp::epoch() - chrono::milliseconds(300)});
-  }
-
-  // We want to setup a situation such that 2 of the 3 legs of the loop are very
-  // confident about time being X, and the third leg is pulling the average off
-  // to one side.
-  //
-  // It's easiest to visualize this in timestamp_plotter.
-
-  std::vector<std::string> filenames;
-  {
-    // Have pi1 send out a reliable message at startup.  This sets up a long
-    // forwarding time message at the start to bias time.
-    std::unique_ptr<EventLoop> pi1_event_loop = pi1->MakeEventLoop("ping");
-    {
-      aos::Sender<examples::Ping> ping_sender =
-          pi1_event_loop->MakeSender<examples::Ping>("/reliable");
-
-      aos::Sender<examples::Ping>::Builder builder = ping_sender.MakeBuilder();
-      examples::Ping::Builder ping_builder =
-          builder.MakeBuilder<examples::Ping>();
-      CHECK_EQ(builder.Send(ping_builder.Finish()), RawSender::Error::kOk);
-    }
-
-    // Wait a while so there's enough data to let the worst case be rather off.
-    event_loop_factory.RunFor(chrono::seconds(1000));
-
-    // Now start a receiving node first.  This sets up 2 tight bounds between 2
-    // of the nodes.
-    LoggerState pi2_logger = LoggerState::MakeLogger(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    pi2_logger.StartLogger(kLogfile2_1);
-
-    event_loop_factory.RunFor(chrono::seconds(100));
-
-    // And now start the third leg.
-    LoggerState pi3_logger = LoggerState::MakeLogger(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    pi3_logger.StartLogger(kLogfile3_1);
-
-    LoggerState pi1_logger = LoggerState::MakeLogger(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-    pi1_logger.StartLogger(kLogfile1_1);
-
-    event_loop_factory.RunFor(chrono::seconds(100));
-
-    pi1_logger.AppendAllFilenames(&filenames);
-    pi2_logger.AppendAllFilenames(&filenames);
-    pi3_logger.AppendAllFilenames(&filenames);
-  }
-
-  // Make sure we can read this.
-  const std::vector<LogFile> sorted_parts = SortParts(filenames);
-  auto result = ConfirmReadable(filenames);
-}
-
 }  // namespace testing
 }  // namespace logger
 }  // namespace aos
diff --git a/aos/events/logging/multinode_logger_test.cc b/aos/events/logging/multinode_logger_test.cc
new file mode 100644
index 0000000..bc1f5b8
--- /dev/null
+++ b/aos/events/logging/multinode_logger_test.cc
@@ -0,0 +1,3594 @@
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/logging/multinode_logger_test_lib.h"
+#include "aos/events/message_counter.h"
+#include "aos/events/ping_lib.h"
+#include "aos/events/pong_lib.h"
+#include "aos/network/remote_message_generated.h"
+#include "aos/network/timestamp_generated.h"
+#include "aos/testing/tmpdir.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace aos {
+namespace logger {
+namespace testing {
+
+namespace chrono = std::chrono;
+using aos::message_bridge::RemoteMessage;
+using aos::testing::ArtifactPath;
+using aos::testing::MessageCounter;
+
+constexpr std::string_view kCombinedConfigSha1(
+    "5d73fe35bacaa59d24f8f0c1a806fe10b783b0fcc80809ee30a9db824e82538b");
+constexpr std::string_view kSplitConfigSha1(
+    "f25e8f6f90d61f41c41517e652300566228b077e44cd86f1af2af4a9bed31ad4");
+constexpr std::string_view kReloggedSplitConfigSha1(
+    "f1fabd629bdf8735c3d81bc791d7a454e8e636951c26cba6426545cbc97f911f");
+
+INSTANTIATE_TEST_SUITE_P(
+    All, MultinodeLoggerTest,
+    ::testing::Combine(
+        ::testing::Values(
+            ConfigParams{"multinode_pingpong_combined_config.json", true,
+                         kCombinedConfigSha1, kCombinedConfigSha1},
+            ConfigParams{"multinode_pingpong_split_config.json", false,
+                         kSplitConfigSha1, kReloggedSplitConfigSha1}),
+        ::testing::ValuesIn(SupportedCompressionAlgorithms())));
+
+INSTANTIATE_TEST_SUITE_P(
+    All, MultinodeLoggerDeathTest,
+    ::testing::Combine(
+        ::testing::Values(
+            ConfigParams{"multinode_pingpong_combined_config.json", true,
+                         kCombinedConfigSha1, kCombinedConfigSha1},
+            ConfigParams{"multinode_pingpong_split_config.json", false,
+                         kSplitConfigSha1, kReloggedSplitConfigSha1}),
+        ::testing::ValuesIn(SupportedCompressionAlgorithms())));
+
+// Tests that we can write and read simple multi-node log files.
+TEST_P(MultinodeLoggerTest, SimpleMultiNode) {
+  std::vector<std::string> actual_filenames;
+  time_converter_.StartEqual();
+
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+    pi1_logger.AppendAllFilenames(&actual_filenames);
+    pi2_logger.AppendAllFilenames(&actual_filenames);
+  }
+
+  ASSERT_THAT(actual_filenames,
+              ::testing::UnorderedElementsAreArray(logfiles_));
+
+  {
+    std::set<std::string> logfile_uuids;
+    std::set<std::string> parts_uuids;
+    // Confirm that we have the expected number of UUIDs for both the logfile
+    // UUIDs and parts UUIDs.
+    std::vector<SizePrefixedFlatbufferVector<LogFileHeader>> log_header;
+    for (std::string_view f : logfiles_) {
+      log_header.emplace_back(ReadHeader(f).value());
+      if (!log_header.back().message().has_configuration()) {
+        logfile_uuids.insert(
+            log_header.back().message().log_event_uuid()->str());
+        parts_uuids.insert(log_header.back().message().parts_uuid()->str());
+      }
+    }
+
+    EXPECT_EQ(logfile_uuids.size(), 2u);
+    if (shared()) {
+      EXPECT_EQ(parts_uuids.size(), 7u);
+    } else {
+      EXPECT_EQ(parts_uuids.size(), 8u);
+    }
+
+    // And confirm everything is on the correct node.
+    EXPECT_EQ(log_header[2].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[3].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[4].message().node()->name()->string_view(), "pi1");
+
+    EXPECT_EQ(log_header[5].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[6].message().node()->name()->string_view(), "pi2");
+
+    EXPECT_EQ(log_header[7].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[8].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[9].message().node()->name()->string_view(), "pi2");
+
+    EXPECT_EQ(log_header[10].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[11].message().node()->name()->string_view(), "pi1");
+
+    EXPECT_EQ(log_header[12].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[13].message().node()->name()->string_view(), "pi2");
+
+    if (shared()) {
+      EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
+      EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
+      EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi2");
+
+      EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
+      EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi1");
+    } else {
+      EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
+      EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
+
+      EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi1");
+      EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
+
+      EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi2");
+      EXPECT_EQ(log_header[19].message().node()->name()->string_view(), "pi2");
+    }
+
+    // And the parts index matches.
+    EXPECT_EQ(log_header[2].message().parts_index(), 0);
+    EXPECT_EQ(log_header[3].message().parts_index(), 1);
+    EXPECT_EQ(log_header[4].message().parts_index(), 2);
+
+    EXPECT_EQ(log_header[5].message().parts_index(), 0);
+    EXPECT_EQ(log_header[6].message().parts_index(), 1);
+
+    EXPECT_EQ(log_header[7].message().parts_index(), 0);
+    EXPECT_EQ(log_header[8].message().parts_index(), 1);
+    EXPECT_EQ(log_header[9].message().parts_index(), 2);
+
+    EXPECT_EQ(log_header[10].message().parts_index(), 0);
+    EXPECT_EQ(log_header[11].message().parts_index(), 1);
+
+    EXPECT_EQ(log_header[12].message().parts_index(), 0);
+    EXPECT_EQ(log_header[13].message().parts_index(), 1);
+
+    if (shared()) {
+      EXPECT_EQ(log_header[14].message().parts_index(), 0);
+      EXPECT_EQ(log_header[15].message().parts_index(), 1);
+      EXPECT_EQ(log_header[16].message().parts_index(), 2);
+
+      EXPECT_EQ(log_header[17].message().parts_index(), 0);
+      EXPECT_EQ(log_header[18].message().parts_index(), 1);
+    } else {
+      EXPECT_EQ(log_header[14].message().parts_index(), 0);
+      EXPECT_EQ(log_header[15].message().parts_index(), 1);
+
+      EXPECT_EQ(log_header[16].message().parts_index(), 0);
+      EXPECT_EQ(log_header[17].message().parts_index(), 1);
+
+      EXPECT_EQ(log_header[18].message().parts_index(), 0);
+      EXPECT_EQ(log_header[19].message().parts_index(), 1);
+    }
+  }
+
+  const std::vector<LogFile> sorted_log_files = SortParts(logfiles_);
+  {
+    using ::testing::UnorderedElementsAre;
+    std::shared_ptr<const aos::Configuration> config =
+        sorted_log_files[0].config;
+
+    // Timing reports, pings
+    EXPECT_THAT(CountChannelsData(config, logfiles_[2]),
+                UnorderedElementsAre(
+                    std::make_tuple("/pi1/aos",
+                                    "aos.message_bridge.ServerStatistics", 1),
+                    std::make_tuple("/test", "aos.examples.Ping", 1)))
+        << " : " << logfiles_[2];
+    {
+      std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
+          std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 1),
+          std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
+                          1)};
+      if (!std::get<0>(GetParam()).shared) {
+        channel_counts.push_back(
+            std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                            "aos-message_bridge-Timestamp",
+                            "aos.message_bridge.RemoteMessage", 1));
+      }
+      EXPECT_THAT(CountChannelsData(config, logfiles_[3]),
+                  ::testing::UnorderedElementsAreArray(channel_counts))
+          << " : " << logfiles_[3];
+    }
+    {
+      std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
+          std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 199),
+          std::make_tuple("/pi1/aos", "aos.message_bridge.ServerStatistics",
+                          20),
+          std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
+                          199),
+          std::make_tuple("/pi1/aos", "aos.timing.Report", 40),
+          std::make_tuple("/test", "aos.examples.Ping", 2000)};
+      if (!std::get<0>(GetParam()).shared) {
+        channel_counts.push_back(
+            std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                            "aos-message_bridge-Timestamp",
+                            "aos.message_bridge.RemoteMessage", 199));
+      }
+      EXPECT_THAT(CountChannelsData(config, logfiles_[4]),
+                  ::testing::UnorderedElementsAreArray(channel_counts))
+          << " : " << logfiles_[4];
+    }
+    // Timestamps for pong
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[2]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[2];
+    EXPECT_THAT(
+        CountChannelsTimestamp(config, logfiles_[3]),
+        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
+        << " : " << logfiles_[3];
+    EXPECT_THAT(
+        CountChannelsTimestamp(config, logfiles_[4]),
+        UnorderedElementsAre(
+            std::make_tuple("/test", "aos.examples.Pong", 2000),
+            std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200)))
+        << " : " << logfiles_[4];
+
+    // Pong data.
+    EXPECT_THAT(
+        CountChannelsData(config, logfiles_[5]),
+        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 91)))
+        << " : " << logfiles_[5];
+    EXPECT_THAT(CountChannelsData(config, logfiles_[6]),
+                UnorderedElementsAre(
+                    std::make_tuple("/test", "aos.examples.Pong", 1910)))
+        << " : " << logfiles_[6];
+
+    // No timestamps
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[5]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[5];
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[6]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[6];
+
+    // Timing reports and pongs.
+    EXPECT_THAT(CountChannelsData(config, logfiles_[7]),
+                UnorderedElementsAre(std::make_tuple(
+                    "/pi2/aos", "aos.message_bridge.ServerStatistics", 1)))
+        << " : " << logfiles_[7];
+    EXPECT_THAT(
+        CountChannelsData(config, logfiles_[8]),
+        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
+        << " : " << logfiles_[8];
+    EXPECT_THAT(
+        CountChannelsData(config, logfiles_[9]),
+        UnorderedElementsAre(
+            std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200),
+            std::make_tuple("/pi2/aos", "aos.message_bridge.ServerStatistics",
+                            20),
+            std::make_tuple("/pi2/aos", "aos.message_bridge.ClientStatistics",
+                            200),
+            std::make_tuple("/pi2/aos", "aos.timing.Report", 40),
+            std::make_tuple("/test", "aos.examples.Pong", 2000)))
+        << " : " << logfiles_[9];
+    // And ping timestamps.
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[7]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[7];
+    EXPECT_THAT(
+        CountChannelsTimestamp(config, logfiles_[8]),
+        UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Ping", 1)))
+        << " : " << logfiles_[8];
+    EXPECT_THAT(
+        CountChannelsTimestamp(config, logfiles_[9]),
+        UnorderedElementsAre(
+            std::make_tuple("/test", "aos.examples.Ping", 2000),
+            std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 200)))
+        << " : " << logfiles_[9];
+
+    // And then test that the remotely logged timestamp data files only have
+    // timestamps in them.
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[10]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[10];
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[11]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[11];
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[12]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[12];
+    EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[13]),
+                UnorderedElementsAre())
+        << " : " << logfiles_[13];
+
+    EXPECT_THAT(CountChannelsData(config, logfiles_[10]),
+                UnorderedElementsAre(std::make_tuple(
+                    "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
+        << " : " << logfiles_[10];
+    EXPECT_THAT(CountChannelsData(config, logfiles_[11]),
+                UnorderedElementsAre(std::make_tuple(
+                    "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
+        << " : " << logfiles_[11];
+
+    EXPECT_THAT(CountChannelsData(config, logfiles_[12]),
+                UnorderedElementsAre(std::make_tuple(
+                    "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
+        << " : " << logfiles_[12];
+    EXPECT_THAT(CountChannelsData(config, logfiles_[13]),
+                UnorderedElementsAre(std::make_tuple(
+                    "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
+        << " : " << logfiles_[13];
+
+    // Timestamps from pi2 on pi1, and the other way.
+    if (shared()) {
+      EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[14];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[15];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[16];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[17];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[18];
+
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
+                  UnorderedElementsAre(
+                      std::make_tuple("/test", "aos.examples.Ping", 1)))
+          << " : " << logfiles_[14];
+      EXPECT_THAT(
+          CountChannelsTimestamp(config, logfiles_[15]),
+          UnorderedElementsAre(
+              std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 9),
+              std::make_tuple("/test", "aos.examples.Ping", 90)))
+          << " : " << logfiles_[15];
+      EXPECT_THAT(
+          CountChannelsTimestamp(config, logfiles_[16]),
+          UnorderedElementsAre(
+              std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 191),
+              std::make_tuple("/test", "aos.examples.Ping", 1910)))
+          << " : " << logfiles_[16];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
+                  UnorderedElementsAre(std::make_tuple(
+                      "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
+          << " : " << logfiles_[17];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
+                  UnorderedElementsAre(std::make_tuple(
+                      "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
+          << " : " << logfiles_[18];
+    } else {
+      EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[14];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[15];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[16];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[17];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[18];
+      EXPECT_THAT(CountChannelsData(config, logfiles_[19]),
+                  UnorderedElementsAre())
+          << " : " << logfiles_[19];
+
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
+                  UnorderedElementsAre(std::make_tuple(
+                      "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
+          << " : " << logfiles_[14];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[15]),
+                  UnorderedElementsAre(std::make_tuple(
+                      "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
+          << " : " << logfiles_[15];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[16]),
+                  UnorderedElementsAre(std::make_tuple(
+                      "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
+          << " : " << logfiles_[16];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
+                  UnorderedElementsAre(std::make_tuple(
+                      "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
+          << " : " << logfiles_[17];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
+                  UnorderedElementsAre(
+                      std::make_tuple("/test", "aos.examples.Ping", 91)))
+          << " : " << logfiles_[18];
+      EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[19]),
+                  UnorderedElementsAre(
+                      std::make_tuple("/test", "aos.examples.Ping", 1910)))
+          << " : " << logfiles_[19];
+    }
+  }
+
+  LogReader reader(sorted_log_files);
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
+  reader.Register(&log_reader_factory);
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+  LOG(INFO) << "now pi1 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+  LOG(INFO) << "now pi2 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+  EXPECT_THAT(reader.LoggedNodes(),
+              ::testing::ElementsAre(
+                  configuration::GetNode(reader.logged_configuration(), pi1),
+                  configuration::GetNode(reader.logged_configuration(), pi2)));
+
+  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+  std::unique_ptr<EventLoop> pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  std::unique_ptr<EventLoop> pi2_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi2);
+
+  int pi1_ping_count = 10;
+  int pi2_ping_count = 10;
+  int pi1_pong_count = 10;
+  int pi2_pong_count = 10;
+
+  // Confirm that the ping value matches.
+  pi1_event_loop->MakeWatcher(
+      "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
+        VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping) << " at "
+                << pi1_event_loop->context().monotonic_remote_time << " -> "
+                << pi1_event_loop->context().monotonic_event_time;
+        EXPECT_EQ(ping.value(), pi1_ping_count + 1);
+        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
+                  pi1_ping_count * chrono::milliseconds(10) +
+                      monotonic_clock::epoch());
+        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
+                  pi1_ping_count * chrono::milliseconds(10) +
+                      realtime_clock::epoch());
+        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
+                  pi1_event_loop->context().monotonic_event_time);
+        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
+                  pi1_event_loop->context().realtime_event_time);
+
+        ++pi1_ping_count;
+      });
+  pi2_event_loop->MakeWatcher(
+      "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
+        VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping) << " at "
+                << pi2_event_loop->context().monotonic_remote_time << " -> "
+                << pi2_event_loop->context().monotonic_event_time;
+        EXPECT_EQ(ping.value(), pi2_ping_count + 1);
+
+        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
+                  pi2_ping_count * chrono::milliseconds(10) +
+                      monotonic_clock::epoch());
+        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
+                  pi2_ping_count * chrono::milliseconds(10) +
+                      realtime_clock::epoch());
+        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time +
+                      chrono::microseconds(150),
+                  pi2_event_loop->context().monotonic_event_time);
+        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time +
+                      chrono::microseconds(150),
+                  pi2_event_loop->context().realtime_event_time);
+        ++pi2_ping_count;
+      });
+
+  constexpr ssize_t kQueueIndexOffset = -9;
+  // Confirm that the ping and pong counts both match, and the value also
+  // matches.
+  pi1_event_loop->MakeWatcher(
+      "/test", [&pi1_event_loop, &pi1_ping_count,
+                &pi1_pong_count](const examples::Pong &pong) {
+        VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
+                << pi1_event_loop->context().monotonic_remote_time << " -> "
+                << pi1_event_loop->context().monotonic_event_time;
+
+        EXPECT_EQ(pi1_event_loop->context().remote_queue_index,
+                  pi1_pong_count + kQueueIndexOffset);
+        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
+                  chrono::microseconds(200) +
+                      pi1_pong_count * chrono::milliseconds(10) +
+                      monotonic_clock::epoch());
+        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
+                  chrono::microseconds(200) +
+                      pi1_pong_count * chrono::milliseconds(10) +
+                      realtime_clock::epoch());
+
+        EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time +
+                      chrono::microseconds(150),
+                  pi1_event_loop->context().monotonic_event_time);
+        EXPECT_EQ(pi1_event_loop->context().realtime_remote_time +
+                      chrono::microseconds(150),
+                  pi1_event_loop->context().realtime_event_time);
+
+        EXPECT_EQ(pong.value(), pi1_pong_count + 1);
+        ++pi1_pong_count;
+        EXPECT_EQ(pi1_ping_count, pi1_pong_count);
+      });
+  pi2_event_loop->MakeWatcher(
+      "/test", [&pi2_event_loop, &pi2_ping_count,
+                &pi2_pong_count](const examples::Pong &pong) {
+        VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
+                << pi2_event_loop->context().monotonic_remote_time << " -> "
+                << pi2_event_loop->context().monotonic_event_time;
+
+        EXPECT_EQ(pi2_event_loop->context().remote_queue_index,
+                  pi2_pong_count + kQueueIndexOffset);
+
+        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
+                  chrono::microseconds(200) +
+                      pi2_pong_count * chrono::milliseconds(10) +
+                      monotonic_clock::epoch());
+        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
+                  chrono::microseconds(200) +
+                      pi2_pong_count * chrono::milliseconds(10) +
+                      realtime_clock::epoch());
+
+        EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
+                  pi2_event_loop->context().monotonic_event_time);
+        EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
+                  pi2_event_loop->context().realtime_event_time);
+
+        EXPECT_EQ(pong.value(), pi2_pong_count + 1);
+        ++pi2_pong_count;
+        EXPECT_EQ(pi2_ping_count, pi2_pong_count);
+      });
+
+  log_reader_factory.Run();
+  EXPECT_EQ(pi1_ping_count, 2010);
+  EXPECT_EQ(pi2_ping_count, 2010);
+  EXPECT_EQ(pi1_pong_count, 2010);
+  EXPECT_EQ(pi2_pong_count, 2010);
+
+  reader.Deregister();
+}
+
+// Test that if we feed the replay with a mismatched node list that we die on
+// the LogReader constructor.
+TEST_P(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
+  time_converter_.StartEqual();
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  // Test that, if we add an additional node to the replay config that the
+  // logger complains about the mismatch in number of nodes.
+  FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
+      configuration::MergeWithConfig(&config_.message(), R"({
+          "nodes": [
+            {
+              "name": "extra-node"
+            }
+          ]
+        }
+      )");
+
+  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+  EXPECT_DEATH(LogReader(sorted_parts, &extra_nodes_config.message()),
+               "Log file and replay config need to have matching nodes lists.");
+}
+
+// Tests that we can read log files where they don't start at the same monotonic
+// time.
+TEST_P(MultinodeLoggerTest, StaggeredStart) {
+  time_converter_.StartEqual();
+  std::vector<std::string> actual_filenames;
+
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(200));
+
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+    pi1_logger.AppendAllFilenames(&actual_filenames);
+    pi2_logger.AppendAllFilenames(&actual_filenames);
+  }
+
+  // Since we delay starting pi2, it already knows about all the timestamps so
+  // we don't end up with extra parts.
+  LogReader reader(SortParts(actual_filenames));
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
+  reader.Register(&log_reader_factory);
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  EXPECT_THAT(reader.LoggedNodes(),
+              ::testing::ElementsAre(
+                  configuration::GetNode(reader.logged_configuration(), pi1),
+                  configuration::GetNode(reader.logged_configuration(), pi2)));
+
+  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+  std::unique_ptr<EventLoop> pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  std::unique_ptr<EventLoop> pi2_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi2);
+
+  int pi1_ping_count = 30;
+  int pi2_ping_count = 30;
+  int pi1_pong_count = 30;
+  int pi2_pong_count = 30;
+
+  // Confirm that the ping value matches.
+  pi1_event_loop->MakeWatcher(
+      "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
+        VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
+                << pi1_event_loop->context().monotonic_remote_time << " -> "
+                << pi1_event_loop->context().monotonic_event_time;
+        EXPECT_EQ(ping.value(), pi1_ping_count + 1);
+
+        ++pi1_ping_count;
+      });
+  pi2_event_loop->MakeWatcher(
+      "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
+        VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
+                << pi2_event_loop->context().monotonic_remote_time << " -> "
+                << pi2_event_loop->context().monotonic_event_time;
+        EXPECT_EQ(ping.value(), pi2_ping_count + 1);
+
+        ++pi2_ping_count;
+      });
+
+  // Confirm that the ping and pong counts both match, and the value also
+  // matches.
+  pi1_event_loop->MakeWatcher(
+      "/test", [&pi1_event_loop, &pi1_ping_count,
+                &pi1_pong_count](const examples::Pong &pong) {
+        VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
+                << pi1_event_loop->context().monotonic_remote_time << " -> "
+                << pi1_event_loop->context().monotonic_event_time;
+
+        EXPECT_EQ(pong.value(), pi1_pong_count + 1);
+        ++pi1_pong_count;
+        EXPECT_EQ(pi1_ping_count, pi1_pong_count);
+      });
+  pi2_event_loop->MakeWatcher(
+      "/test", [&pi2_event_loop, &pi2_ping_count,
+                &pi2_pong_count](const examples::Pong &pong) {
+        VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
+                << pi2_event_loop->context().monotonic_remote_time << " -> "
+                << pi2_event_loop->context().monotonic_event_time;
+
+        EXPECT_EQ(pong.value(), pi2_pong_count + 1);
+        ++pi2_pong_count;
+        EXPECT_EQ(pi2_ping_count, pi2_pong_count);
+      });
+
+  log_reader_factory.Run();
+  EXPECT_EQ(pi1_ping_count, 2030);
+  EXPECT_EQ(pi2_ping_count, 2030);
+  EXPECT_EQ(pi1_pong_count, 2030);
+  EXPECT_EQ(pi2_pong_count, 2030);
+
+  reader.Deregister();
+}
+
+// Tests that we can read log files where the monotonic clocks drift and don't
+// match correctly.  While we are here, also test that different ending times
+// also is readable.
+TEST_P(MultinodeLoggerTest, MismatchedClocks) {
+  // TODO(austin): Negate...
+  const chrono::nanoseconds initial_pi2_offset = chrono::seconds(1000);
+
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + initial_pi2_offset});
+  // Wait for 95 ms, (~0.1 seconds - 1/2 of the ping/pong period), and set the
+  // skew to be 200 uS/s
+  const chrono::nanoseconds startup_sleep1 = time_converter_.AddMonotonic(
+      {chrono::milliseconds(95),
+       chrono::milliseconds(95) - chrono::nanoseconds(200) * 95});
+  // Run another 200 ms to have one logger start first.
+  const chrono::nanoseconds startup_sleep2 = time_converter_.AddMonotonic(
+      {chrono::milliseconds(200), chrono::milliseconds(200)});
+  // Slew one way then the other at the same 200 uS/S slew rate.  Make sure we
+  // go far enough to cause problems if this isn't accounted for.
+  const chrono::nanoseconds logger_run1 = time_converter_.AddMonotonic(
+      {chrono::milliseconds(20000),
+       chrono::milliseconds(20000) - chrono::nanoseconds(200) * 20000});
+  const chrono::nanoseconds logger_run2 = time_converter_.AddMonotonic(
+      {chrono::milliseconds(40000),
+       chrono::milliseconds(40000) + chrono::nanoseconds(200) * 40000});
+  const chrono::nanoseconds logger_run3 = time_converter_.AddMonotonic(
+      {chrono::milliseconds(400), chrono::milliseconds(400)});
+
+  {
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    LOG(INFO) << "pi2 times: " << pi2_->monotonic_now() << " "
+              << pi2_->realtime_now() << " distributed "
+              << pi2_->ToDistributedClock(pi2_->monotonic_now());
+
+    LOG(INFO) << "pi2_ times: " << pi2_->monotonic_now() << " "
+              << pi2_->realtime_now() << " distributed "
+              << pi2_->ToDistributedClock(pi2_->monotonic_now());
+
+    event_loop_factory_.RunFor(startup_sleep1);
+
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(startup_sleep2);
+
+    {
+      // Run pi1's logger for only part of the time.
+      LoggerState pi1_logger = MakeLogger(pi1_);
+
+      StartLogger(&pi1_logger);
+      event_loop_factory_.RunFor(logger_run1);
+
+      // Make sure we slewed time far enough so that the difference is greater
+      // than the network delay.  This confirms that if we sort incorrectly, it
+      // would show in the results.
+      EXPECT_LT(
+          (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
+          -event_loop_factory_.send_delay() -
+              event_loop_factory_.network_delay());
+
+      event_loop_factory_.RunFor(logger_run2);
+
+      // And now check that we went far enough the other way to make sure we
+      // cover both problems.
+      EXPECT_GT(
+          (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
+          event_loop_factory_.send_delay() +
+              event_loop_factory_.network_delay());
+    }
+
+    // And log a bit more on pi2.
+    event_loop_factory_.RunFor(logger_run3);
+  }
+
+  LogReader reader(
+      SortParts(MakeLogFiles(logfile_base1_, logfile_base2_, 3, 2)));
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
+  reader.Register(&log_reader_factory);
+
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+  LOG(INFO) << "now pi1 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+  LOG(INFO) << "now pi2 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+  LOG(INFO) << "Done registering (pi1) "
+            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now()
+            << " "
+            << log_reader_factory.GetNodeEventLoopFactory(pi1)->realtime_now();
+  LOG(INFO) << "Done registering (pi2) "
+            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now()
+            << " "
+            << log_reader_factory.GetNodeEventLoopFactory(pi2)->realtime_now();
+
+  EXPECT_THAT(reader.LoggedNodes(),
+              ::testing::ElementsAre(
+                  configuration::GetNode(reader.logged_configuration(), pi1),
+                  configuration::GetNode(reader.logged_configuration(), pi2)));
+
+  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+  std::unique_ptr<EventLoop> pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  std::unique_ptr<EventLoop> pi2_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi2);
+
+  int pi1_ping_count = 30;
+  int pi2_ping_count = 30;
+  int pi1_pong_count = 30;
+  int pi2_pong_count = 30;
+
+  // Confirm that the ping value matches.
+  pi1_event_loop->MakeWatcher(
+      "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
+        VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
+                << pi1_event_loop->context().monotonic_remote_time << " -> "
+                << pi1_event_loop->context().monotonic_event_time;
+        EXPECT_EQ(ping.value(), pi1_ping_count + 1);
+
+        ++pi1_ping_count;
+      });
+  pi2_event_loop->MakeWatcher(
+      "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
+        VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
+                << pi2_event_loop->context().monotonic_remote_time << " -> "
+                << pi2_event_loop->context().monotonic_event_time;
+        EXPECT_EQ(ping.value(), pi2_ping_count + 1);
+
+        ++pi2_ping_count;
+      });
+
+  // Confirm that the ping and pong counts both match, and the value also
+  // matches.
+  pi1_event_loop->MakeWatcher(
+      "/test", [&pi1_event_loop, &pi1_ping_count,
+                &pi1_pong_count](const examples::Pong &pong) {
+        VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
+                << pi1_event_loop->context().monotonic_remote_time << " -> "
+                << pi1_event_loop->context().monotonic_event_time;
+
+        EXPECT_EQ(pong.value(), pi1_pong_count + 1);
+        ++pi1_pong_count;
+        EXPECT_EQ(pi1_ping_count, pi1_pong_count);
+      });
+  pi2_event_loop->MakeWatcher(
+      "/test", [&pi2_event_loop, &pi2_ping_count,
+                &pi2_pong_count](const examples::Pong &pong) {
+        VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
+                << pi2_event_loop->context().monotonic_remote_time << " -> "
+                << pi2_event_loop->context().monotonic_event_time;
+
+        EXPECT_EQ(pong.value(), pi2_pong_count + 1);
+        ++pi2_pong_count;
+        EXPECT_EQ(pi2_ping_count, pi2_pong_count);
+      });
+
+  log_reader_factory.Run();
+  EXPECT_EQ(pi1_ping_count, 6030);
+  EXPECT_EQ(pi2_ping_count, 6030);
+  EXPECT_EQ(pi1_pong_count, 6030);
+  EXPECT_EQ(pi2_pong_count, 6030);
+
+  reader.Deregister();
+}
+
+// Tests that we can sort a bunch of parts into the pre-determined sorted parts.
+TEST_P(MultinodeLoggerTest, SortParts) {
+  time_converter_.StartEqual();
+  // Make a bunch of parts.
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(2000));
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+  VerifyParts(sorted_parts);
+}
+
+// Tests that we can sort a bunch of parts with an empty part.  We should ignore
+// it and remove it from the sorted list.
+TEST_P(MultinodeLoggerTest, SortEmptyParts) {
+  time_converter_.StartEqual();
+  // Make a bunch of parts.
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(2000));
+  }
+
+  // TODO(austin): Should we flip out if the file can't open?
+  const std::string kEmptyFile("foobarinvalidfiledoesnotexist" + Extension());
+
+  aos::util::WriteStringToFileOrDie(kEmptyFile, "");
+  logfiles_.emplace_back(kEmptyFile);
+
+  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+  VerifyParts(sorted_parts, {kEmptyFile});
+}
+
+// Tests that we can sort a bunch of parts with the end missing off a
+// file.  We should use the part we can read.
+TEST_P(MultinodeLoggerTest, SortTruncatedParts) {
+  std::vector<std::string> actual_filenames;
+  time_converter_.StartEqual();
+  // Make a bunch of parts.
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(2000));
+
+    pi1_logger.AppendAllFilenames(&actual_filenames);
+    pi2_logger.AppendAllFilenames(&actual_filenames);
+  }
+
+  ASSERT_THAT(actual_filenames,
+              ::testing::UnorderedElementsAreArray(logfiles_));
+
+  // Strip off the end of one of the files.  Pick one with a lot of data.
+  // For snappy, needs to have enough data to be >1 chunk of compressed data so
+  // that we don't corrupt the entire log part.
+  ::std::string compressed_contents =
+      aos::util::ReadFileToStringOrDie(logfiles_[4]);
+
+  aos::util::WriteStringToFileOrDie(
+      logfiles_[4],
+      compressed_contents.substr(0, compressed_contents.size() - 100));
+
+  const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+  VerifyParts(sorted_parts);
+}
+
+// Tests that if we remap a remapped channel, it shows up correctly.
+TEST_P(MultinodeLoggerTest, RemapLoggedChannel) {
+  time_converter_.StartEqual();
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  LogReader reader(SortParts(logfiles_));
+
+  // Remap just on pi1.
+  reader.RemapLoggedChannel<aos::timing::Report>(
+      "/aos", configuration::GetNode(reader.configuration(), "pi1"));
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  std::vector<const Channel *> remapped_channels = reader.RemappedChannels();
+  // Note: An extra channel gets remapped automatically due to a timestamp
+  // channel being LOCAL_LOGGER'd.
+  ASSERT_EQ(remapped_channels.size(), std::get<0>(GetParam()).shared ? 1u : 2u);
+  EXPECT_EQ(remapped_channels[0]->name()->string_view(), "/original/pi1/aos");
+  EXPECT_EQ(remapped_channels[0]->type()->string_view(), "aos.timing.Report");
+  if (!std::get<0>(GetParam()).shared) {
+    EXPECT_EQ(remapped_channels[1]->name()->string_view(),
+              "/original/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+              "aos-message_bridge-Timestamp");
+    EXPECT_EQ(remapped_channels[1]->type()->string_view(),
+              "aos.message_bridge.RemoteMessage");
+  }
+
+  reader.Register(&log_reader_factory);
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  // Confirm we can read the data on the remapped channel, just for pi1. Nothing
+  // else should have moved.
+  std::unique_ptr<EventLoop> pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  pi1_event_loop->SkipTimingReport();
+  std::unique_ptr<EventLoop> full_pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  full_pi1_event_loop->SkipTimingReport();
+  std::unique_ptr<EventLoop> pi2_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi2);
+  pi2_event_loop->SkipTimingReport();
+
+  MessageCounter<aos::timing::Report> pi1_timing_report(pi1_event_loop.get(),
+                                                        "/aos");
+  MessageCounter<aos::timing::Report> full_pi1_timing_report(
+      full_pi1_event_loop.get(), "/pi1/aos");
+  MessageCounter<aos::timing::Report> pi1_original_timing_report(
+      pi1_event_loop.get(), "/original/aos");
+  MessageCounter<aos::timing::Report> full_pi1_original_timing_report(
+      full_pi1_event_loop.get(), "/original/pi1/aos");
+  MessageCounter<aos::timing::Report> pi2_timing_report(pi2_event_loop.get(),
+                                                        "/aos");
+
+  log_reader_factory.Run();
+
+  EXPECT_EQ(pi1_timing_report.count(), 0u);
+  EXPECT_EQ(full_pi1_timing_report.count(), 0u);
+  EXPECT_NE(pi1_original_timing_report.count(), 0u);
+  EXPECT_NE(full_pi1_original_timing_report.count(), 0u);
+  EXPECT_NE(pi2_timing_report.count(), 0u);
+
+  reader.Deregister();
+}
+
+// Tests that we can remap a forwarded channel as well.
+TEST_P(MultinodeLoggerTest, RemapForwardedLoggedChannel) {
+  time_converter_.StartEqual();
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  LogReader reader(SortParts(logfiles_));
+
+  reader.RemapLoggedChannel<examples::Ping>("/test");
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  reader.Register(&log_reader_factory);
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  // Confirm we can read the data on the remapped channel, just for pi1. Nothing
+  // else should have moved.
+  std::unique_ptr<EventLoop> pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  pi1_event_loop->SkipTimingReport();
+  std::unique_ptr<EventLoop> full_pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  full_pi1_event_loop->SkipTimingReport();
+  std::unique_ptr<EventLoop> pi2_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi2);
+  pi2_event_loop->SkipTimingReport();
+
+  MessageCounter<examples::Ping> pi1_ping(pi1_event_loop.get(), "/test");
+  MessageCounter<examples::Ping> pi2_ping(pi2_event_loop.get(), "/test");
+  MessageCounter<examples::Ping> pi1_original_ping(pi1_event_loop.get(),
+                                                   "/original/test");
+  MessageCounter<examples::Ping> pi2_original_ping(pi2_event_loop.get(),
+                                                   "/original/test");
+
+  std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
+      pi1_original_ping_timestamp;
+  std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
+      pi1_ping_timestamp;
+  if (!shared()) {
+    pi1_original_ping_timestamp =
+        std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
+            pi1_event_loop.get(),
+            "/pi1/aos/remote_timestamps/pi2/original/test/aos-examples-Ping");
+    pi1_ping_timestamp =
+        std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
+            pi1_event_loop.get(),
+            "/pi1/aos/remote_timestamps/pi2/test/aos-examples-Ping");
+  }
+
+  log_reader_factory.Run();
+
+  EXPECT_EQ(pi1_ping.count(), 0u);
+  EXPECT_EQ(pi2_ping.count(), 0u);
+  EXPECT_NE(pi1_original_ping.count(), 0u);
+  EXPECT_NE(pi2_original_ping.count(), 0u);
+  if (!shared()) {
+    EXPECT_NE(pi1_original_ping_timestamp->count(), 0u);
+    EXPECT_EQ(pi1_ping_timestamp->count(), 0u);
+  }
+
+  reader.Deregister();
+}
+
+// Tests that we observe all the same events in log replay (for a given node)
+// whether we just register an event loop for that node or if we register a full
+// event loop factory.
+TEST_P(MultinodeLoggerTest, SingleNodeReplay) {
+  time_converter_.StartEqual();
+  constexpr chrono::milliseconds kStartupDelay(95);
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(kStartupDelay);
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  LogReader full_reader(SortParts(logfiles_));
+  LogReader single_node_reader(SortParts(logfiles_));
+
+  SimulatedEventLoopFactory full_factory(full_reader.configuration());
+  SimulatedEventLoopFactory single_node_factory(
+      single_node_reader.configuration());
+  single_node_factory.SkipTimingReport();
+  single_node_factory.DisableStatistics();
+  std::unique_ptr<EventLoop> replay_event_loop =
+      single_node_factory.GetNodeEventLoopFactory("pi1")->MakeEventLoop(
+          "log_reader");
+
+  full_reader.Register(&full_factory);
+  single_node_reader.Register(replay_event_loop.get());
+
+  const Node *full_pi1 =
+      configuration::GetNode(full_factory.configuration(), "pi1");
+
+  // Confirm we can read the data on the remapped channel, just for pi1. Nothing
+  // else should have moved.
+  std::unique_ptr<EventLoop> full_event_loop =
+      full_factory.MakeEventLoop("test", full_pi1);
+  full_event_loop->SkipTimingReport();
+  full_event_loop->SkipAosLog();
+  // maps are indexed on channel index.
+  // observed_messages: {channel_index: [(message_sent_time, was_fetched),...]}
+  std::map<size_t, std::vector<std::pair<monotonic_clock::time_point, bool>>>
+      observed_messages;
+  std::map<size_t, std::unique_ptr<RawFetcher>> fetchers;
+  for (size_t ii = 0; ii < full_event_loop->configuration()->channels()->size();
+       ++ii) {
+    const Channel *channel =
+        full_event_loop->configuration()->channels()->Get(ii);
+    // We currently don't support replaying remote timestamp channels in
+    // realtime replay (unless the remote timestamp channel was not NOT_LOGGED,
+    // in which case it gets auto-remapped and replayed on a /original channel).
+    if (channel->name()->string_view().find("remote_timestamp") !=
+            std::string_view::npos &&
+        channel->name()->string_view().find("/original") ==
+            std::string_view::npos) {
+      continue;
+    }
+    if (configuration::ChannelIsReadableOnNode(channel, full_pi1)) {
+      observed_messages[ii] = {};
+      fetchers[ii] = full_event_loop->MakeRawFetcher(channel);
+      full_event_loop->OnRun([ii, &observed_messages, &fetchers]() {
+        if (fetchers[ii]->Fetch()) {
+          observed_messages[ii].push_back(std::make_pair(
+              fetchers[ii]->context().monotonic_event_time, true));
+        }
+      });
+      full_event_loop->MakeRawNoArgWatcher(
+          channel, [ii, &observed_messages](const Context &context) {
+            observed_messages[ii].push_back(
+                std::make_pair(context.monotonic_event_time, false));
+          });
+    }
+  }
+
+  full_factory.Run();
+  fetchers.clear();
+  full_reader.Deregister();
+
+  const Node *single_node_pi1 =
+      configuration::GetNode(single_node_factory.configuration(), "pi1");
+  std::map<size_t, std::unique_ptr<RawFetcher>> single_node_fetchers;
+
+  std::unique_ptr<EventLoop> single_node_event_loop =
+      single_node_factory.MakeEventLoop("test", single_node_pi1);
+  single_node_event_loop->SkipTimingReport();
+  single_node_event_loop->SkipAosLog();
+  for (size_t ii = 0;
+       ii < single_node_event_loop->configuration()->channels()->size(); ++ii) {
+    const Channel *channel =
+        single_node_event_loop->configuration()->channels()->Get(ii);
+    single_node_factory.DisableForwarding(channel);
+    if (configuration::ChannelIsReadableOnNode(channel, single_node_pi1)) {
+      single_node_fetchers[ii] =
+          single_node_event_loop->MakeRawFetcher(channel);
+      single_node_event_loop->OnRun([channel, ii, &single_node_fetchers]() {
+        EXPECT_FALSE(single_node_fetchers[ii]->Fetch())
+            << "Single EventLoop replay doesn't support pre-loading fetchers. "
+            << configuration::StrippedChannelToString(channel);
+      });
+      single_node_event_loop->MakeRawNoArgWatcher(
+          channel, [ii, &observed_messages, channel,
+                    kStartupDelay](const Context &context) {
+            if (observed_messages[ii].empty()) {
+              FAIL() << "Observed extra message at "
+                     << context.monotonic_event_time << " on "
+                     << configuration::StrippedChannelToString(channel);
+              return;
+            }
+            const std::pair<monotonic_clock::time_point, bool> &message =
+                observed_messages[ii].front();
+            if (message.second) {
+              EXPECT_LE(message.first,
+                        context.monotonic_event_time + kStartupDelay)
+                  << "Mismatched message times " << context.monotonic_event_time
+                  << " and " << message.first << " on "
+                  << configuration::StrippedChannelToString(channel);
+            } else {
+              EXPECT_EQ(message.first,
+                        context.monotonic_event_time + kStartupDelay)
+                  << "Mismatched message times " << context.monotonic_event_time
+                  << " and " << message.first << " on "
+                  << configuration::StrippedChannelToString(channel);
+            }
+            observed_messages[ii].erase(observed_messages[ii].begin());
+          });
+    }
+  }
+
+  single_node_factory.Run();
+
+  single_node_fetchers.clear();
+
+  single_node_reader.Deregister();
+
+  for (const auto &pair : observed_messages) {
+    EXPECT_TRUE(pair.second.empty())
+        << "Missed " << pair.second.size() << " messages on "
+        << configuration::StrippedChannelToString(
+               single_node_event_loop->configuration()->channels()->Get(
+                   pair.first));
+  }
+}
+
+// Tests that we properly recreate forwarded timestamps when replaying a log.
+// This should be enough that we can then re-run the logger and get a valid log
+// back.
+TEST_P(MultinodeLoggerTest, MessageHeader) {
+  time_converter_.StartEqual();
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  LogReader reader(SortParts(logfiles_));
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
+  reader.Register(&log_reader_factory);
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+  LOG(INFO) << "now pi1 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+  LOG(INFO) << "now pi2 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+  EXPECT_THAT(reader.LoggedNodes(),
+              ::testing::ElementsAre(
+                  configuration::GetNode(reader.logged_configuration(), pi1),
+                  configuration::GetNode(reader.logged_configuration(), pi2)));
+
+  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+  std::unique_ptr<EventLoop> pi1_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi1);
+  std::unique_ptr<EventLoop> pi2_event_loop =
+      log_reader_factory.MakeEventLoop("test", pi2);
+
+  aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi1_fetcher =
+      pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
+  aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi2_fetcher =
+      pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
+
+  aos::Fetcher<examples::Ping> ping_on_pi1_fetcher =
+      pi1_event_loop->MakeFetcher<examples::Ping>("/test");
+  aos::Fetcher<examples::Ping> ping_on_pi2_fetcher =
+      pi2_event_loop->MakeFetcher<examples::Ping>("/test");
+
+  aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi2_fetcher =
+      pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
+  aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi1_fetcher =
+      pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
+
+  aos::Fetcher<examples::Pong> pong_on_pi2_fetcher =
+      pi2_event_loop->MakeFetcher<examples::Pong>("/test");
+  aos::Fetcher<examples::Pong> pong_on_pi1_fetcher =
+      pi1_event_loop->MakeFetcher<examples::Pong>("/test");
+
+  const size_t pi1_timestamp_channel = configuration::ChannelIndex(
+      pi1_event_loop->configuration(), pi1_timestamp_on_pi1_fetcher.channel());
+  const size_t ping_timestamp_channel = configuration::ChannelIndex(
+      pi2_event_loop->configuration(), ping_on_pi2_fetcher.channel());
+
+  const size_t pi2_timestamp_channel = configuration::ChannelIndex(
+      pi2_event_loop->configuration(), pi2_timestamp_on_pi2_fetcher.channel());
+  const size_t pong_timestamp_channel = configuration::ChannelIndex(
+      pi1_event_loop->configuration(), pong_on_pi1_fetcher.channel());
+
+  const chrono::nanoseconds network_delay = event_loop_factory_.network_delay();
+  const chrono::nanoseconds send_delay = event_loop_factory_.send_delay();
+
+  for (std::pair<int, std::string> channel :
+       shared()
+           ? std::vector<
+                 std::pair<int, std::string>>{{-1,
+                                               "/aos/remote_timestamps/pi2"}}
+           : std::vector<std::pair<int, std::string>>{
+                 {pi1_timestamp_channel,
+                  "/aos/remote_timestamps/pi2/pi1/aos/"
+                  "aos-message_bridge-Timestamp"},
+                 {ping_timestamp_channel,
+                  "/aos/remote_timestamps/pi2/test/aos-examples-Ping"}}) {
+    pi1_event_loop->MakeWatcher(
+        channel.second,
+        [&pi1_event_loop, &pi2_event_loop, pi1_timestamp_channel,
+         ping_timestamp_channel, &pi1_timestamp_on_pi1_fetcher,
+         &pi1_timestamp_on_pi2_fetcher, &ping_on_pi1_fetcher,
+         &ping_on_pi2_fetcher, network_delay, send_delay,
+         channel_index = channel.first](const RemoteMessage &header) {
+          const aos::monotonic_clock::time_point header_monotonic_sent_time(
+              chrono::nanoseconds(header.monotonic_sent_time()));
+          const aos::realtime_clock::time_point header_realtime_sent_time(
+              chrono::nanoseconds(header.realtime_sent_time()));
+          const aos::monotonic_clock::time_point header_monotonic_remote_time(
+              chrono::nanoseconds(header.monotonic_remote_time()));
+          const aos::realtime_clock::time_point header_realtime_remote_time(
+              chrono::nanoseconds(header.realtime_remote_time()));
+
+          if (channel_index != -1) {
+            ASSERT_EQ(channel_index, header.channel_index());
+          }
+
+          const Context *pi1_context = nullptr;
+          const Context *pi2_context = nullptr;
+
+          if (header.channel_index() == pi1_timestamp_channel) {
+            ASSERT_TRUE(pi1_timestamp_on_pi1_fetcher.FetchNext());
+            ASSERT_TRUE(pi1_timestamp_on_pi2_fetcher.FetchNext());
+            pi1_context = &pi1_timestamp_on_pi1_fetcher.context();
+            pi2_context = &pi1_timestamp_on_pi2_fetcher.context();
+          } else if (header.channel_index() == ping_timestamp_channel) {
+            ASSERT_TRUE(ping_on_pi1_fetcher.FetchNext());
+            ASSERT_TRUE(ping_on_pi2_fetcher.FetchNext());
+            pi1_context = &ping_on_pi1_fetcher.context();
+            pi2_context = &ping_on_pi2_fetcher.context();
+          } else {
+            LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
+                       << configuration::CleanedChannelToString(
+                              pi1_event_loop->configuration()->channels()->Get(
+                                  header.channel_index()));
+          }
+
+          ASSERT_TRUE(header.has_boot_uuid());
+          EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
+                    pi2_event_loop->boot_uuid());
+
+          EXPECT_EQ(pi1_context->queue_index, header.remote_queue_index());
+          EXPECT_EQ(pi2_context->remote_queue_index,
+                    header.remote_queue_index());
+          EXPECT_EQ(pi2_context->queue_index, header.queue_index());
+
+          EXPECT_EQ(pi2_context->monotonic_event_time,
+                    header_monotonic_sent_time);
+          EXPECT_EQ(pi2_context->realtime_event_time,
+                    header_realtime_sent_time);
+          EXPECT_EQ(pi2_context->realtime_remote_time,
+                    header_realtime_remote_time);
+          EXPECT_EQ(pi2_context->monotonic_remote_time,
+                    header_monotonic_remote_time);
+
+          EXPECT_EQ(pi1_context->realtime_event_time,
+                    header_realtime_remote_time);
+          EXPECT_EQ(pi1_context->monotonic_event_time,
+                    header_monotonic_remote_time);
+
+          // Time estimation isn't perfect, but we know the clocks were
+          // identical when logged, so we know when this should have come back.
+          // Confirm we got it when we expected.
+          EXPECT_EQ(pi1_event_loop->context().monotonic_event_time,
+                    pi1_context->monotonic_event_time + 2 * network_delay +
+                        send_delay);
+        });
+  }
+  for (std::pair<int, std::string> channel :
+       shared()
+           ? std::vector<
+                 std::pair<int, std::string>>{{-1,
+                                               "/aos/remote_timestamps/pi1"}}
+           : std::vector<std::pair<int, std::string>>{
+                 {pi2_timestamp_channel,
+                  "/aos/remote_timestamps/pi1/pi2/aos/"
+                  "aos-message_bridge-Timestamp"}}) {
+    pi2_event_loop->MakeWatcher(
+        channel.second,
+        [&pi2_event_loop, &pi1_event_loop, pi2_timestamp_channel,
+         pong_timestamp_channel, &pi2_timestamp_on_pi2_fetcher,
+         &pi2_timestamp_on_pi1_fetcher, &pong_on_pi2_fetcher,
+         &pong_on_pi1_fetcher, network_delay, send_delay,
+         channel_index = channel.first](const RemoteMessage &header) {
+          const aos::monotonic_clock::time_point header_monotonic_sent_time(
+              chrono::nanoseconds(header.monotonic_sent_time()));
+          const aos::realtime_clock::time_point header_realtime_sent_time(
+              chrono::nanoseconds(header.realtime_sent_time()));
+          const aos::monotonic_clock::time_point header_monotonic_remote_time(
+              chrono::nanoseconds(header.monotonic_remote_time()));
+          const aos::realtime_clock::time_point header_realtime_remote_time(
+              chrono::nanoseconds(header.realtime_remote_time()));
+
+          if (channel_index != -1) {
+            ASSERT_EQ(channel_index, header.channel_index());
+          }
+
+          const Context *pi2_context = nullptr;
+          const Context *pi1_context = nullptr;
+
+          if (header.channel_index() == pi2_timestamp_channel) {
+            ASSERT_TRUE(pi2_timestamp_on_pi2_fetcher.FetchNext());
+            ASSERT_TRUE(pi2_timestamp_on_pi1_fetcher.FetchNext());
+            pi2_context = &pi2_timestamp_on_pi2_fetcher.context();
+            pi1_context = &pi2_timestamp_on_pi1_fetcher.context();
+          } else if (header.channel_index() == pong_timestamp_channel) {
+            ASSERT_TRUE(pong_on_pi2_fetcher.FetchNext());
+            ASSERT_TRUE(pong_on_pi1_fetcher.FetchNext());
+            pi2_context = &pong_on_pi2_fetcher.context();
+            pi1_context = &pong_on_pi1_fetcher.context();
+          } else {
+            LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
+                       << configuration::CleanedChannelToString(
+                              pi2_event_loop->configuration()->channels()->Get(
+                                  header.channel_index()));
+          }
+
+          ASSERT_TRUE(header.has_boot_uuid());
+          EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
+                    pi1_event_loop->boot_uuid());
+
+          EXPECT_EQ(pi2_context->queue_index, header.remote_queue_index());
+          EXPECT_EQ(pi1_context->remote_queue_index,
+                    header.remote_queue_index());
+          EXPECT_EQ(pi1_context->queue_index, header.queue_index());
+
+          EXPECT_EQ(pi1_context->monotonic_event_time,
+                    header_monotonic_sent_time);
+          EXPECT_EQ(pi1_context->realtime_event_time,
+                    header_realtime_sent_time);
+          EXPECT_EQ(pi1_context->realtime_remote_time,
+                    header_realtime_remote_time);
+          EXPECT_EQ(pi1_context->monotonic_remote_time,
+                    header_monotonic_remote_time);
+
+          EXPECT_EQ(pi2_context->realtime_event_time,
+                    header_realtime_remote_time);
+          EXPECT_EQ(pi2_context->monotonic_event_time,
+                    header_monotonic_remote_time);
+
+          // Time estimation isn't perfect, but we know the clocks were
+          // identical when logged, so we know when this should have come back.
+          // Confirm we got it when we expected.
+          EXPECT_EQ(pi2_event_loop->context().monotonic_event_time,
+                    pi2_context->monotonic_event_time + 2 * network_delay +
+                        send_delay);
+        });
+  }
+
+  // And confirm we can re-create a log again, while checking the contents.
+  {
+    LoggerState pi1_logger = MakeLogger(
+        log_reader_factory.GetNodeEventLoopFactory("pi1"), &log_reader_factory);
+    LoggerState pi2_logger = MakeLogger(
+        log_reader_factory.GetNodeEventLoopFactory("pi2"), &log_reader_factory);
+
+    StartLogger(&pi1_logger, tmp_dir_ + "/relogged1");
+    StartLogger(&pi2_logger, tmp_dir_ + "/relogged2");
+
+    log_reader_factory.Run();
+  }
+
+  reader.Deregister();
+
+  // And verify that we can run the LogReader over the relogged files without
+  // hitting any fatal errors.
+  {
+    LogReader relogged_reader(SortParts(MakeLogFiles(
+        tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2", 3, 3, true)));
+    relogged_reader.Register();
+
+    relogged_reader.event_loop_factory()->Run();
+  }
+  // And confirm that we can read the logged file using the reader's
+  // configuration.
+  {
+    LogReader relogged_reader(
+        SortParts(MakeLogFiles(tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2",
+                               3, 3, true)),
+        reader.configuration());
+    relogged_reader.Register();
+
+    relogged_reader.event_loop_factory()->Run();
+  }
+}
+
+// Tests that we properly populate and extract the logger_start time by setting
+// up a clock difference between 2 nodes and looking at the resulting parts.
+TEST_P(MultinodeLoggerTest, LoggerStartTime) {
+  std::vector<std::string> actual_filenames;
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+
+    pi1_logger.AppendAllFilenames(&actual_filenames);
+    pi2_logger.AppendAllFilenames(&actual_filenames);
+  }
+
+  ASSERT_THAT(actual_filenames,
+              ::testing::UnorderedElementsAreArray(logfiles_));
+
+  for (const LogFile &log_file : SortParts(logfiles_)) {
+    for (const LogParts &log_part : log_file.parts) {
+      if (log_part.node == log_file.logger_node) {
+        EXPECT_EQ(log_part.logger_monotonic_start_time,
+                  aos::monotonic_clock::min_time);
+        EXPECT_EQ(log_part.logger_realtime_start_time,
+                  aos::realtime_clock::min_time);
+      } else {
+        const chrono::seconds offset = log_file.logger_node == "pi1"
+                                           ? -chrono::seconds(1000)
+                                           : chrono::seconds(1000);
+        EXPECT_EQ(log_part.logger_monotonic_start_time,
+                  log_part.monotonic_start_time + offset);
+        EXPECT_EQ(log_part.logger_realtime_start_time,
+                  log_file.realtime_start_time +
+                      (log_part.logger_monotonic_start_time -
+                       log_file.monotonic_start_time));
+      }
+    }
+  }
+}
+
+// Test that renaming the base, renames the folder.
+TEST_P(MultinodeLoggerTest, LoggerRenameFolder) {
+  util::UnlinkRecursive(tmp_dir_ + "/renamefolder");
+  util::UnlinkRecursive(tmp_dir_ + "/new-good");
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+  logfile_base1_ = tmp_dir_ + "/renamefolder/multi_logfile1";
+  logfile_base2_ = tmp_dir_ + "/renamefolder/multi_logfile2";
+  logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
+  LoggerState pi1_logger = MakeLogger(pi1_);
+  LoggerState pi2_logger = MakeLogger(pi2_);
+
+  StartLogger(&pi1_logger);
+  StartLogger(&pi2_logger);
+
+  event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  logfile_base1_ = tmp_dir_ + "/new-good/multi_logfile1";
+  logfile_base2_ = tmp_dir_ + "/new-good/multi_logfile2";
+  logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
+  ASSERT_TRUE(pi1_logger.logger->RenameLogBase(logfile_base1_));
+  ASSERT_TRUE(pi2_logger.logger->RenameLogBase(logfile_base2_));
+  for (auto &file : logfiles_) {
+    struct stat s;
+    EXPECT_EQ(0, stat(file.c_str(), &s));
+  }
+}
+
+// Test that renaming the file base dies.
+TEST_P(MultinodeLoggerDeathTest, LoggerRenameFile) {
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+  util::UnlinkRecursive(tmp_dir_ + "/renamefile");
+  logfile_base1_ = tmp_dir_ + "/renamefile/multi_logfile1";
+  logfile_base2_ = tmp_dir_ + "/renamefile/multi_logfile2";
+  logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
+  LoggerState pi1_logger = MakeLogger(pi1_);
+  StartLogger(&pi1_logger);
+  event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  logfile_base1_ = tmp_dir_ + "/new-renamefile/new_multi_logfile1";
+  EXPECT_DEATH({ pi1_logger.logger->RenameLogBase(logfile_base1_); },
+               "Rename of file base from");
+}
+
+// TODO(austin): We can write a test which recreates a logfile and confirms that
+// we get it back.  That is the ultimate test.
+
+// Tests that we properly recreate forwarded timestamps when replaying a log.
+// This should be enough that we can then re-run the logger and get a valid log
+// back.
+TEST_P(MultinodeLoggerTest, RemoteReboot) {
+  std::vector<std::string> actual_filenames;
+
+  const UUID pi1_boot0 = UUID::Random();
+  const UUID pi2_boot0 = UUID::Random();
+  const UUID pi2_boot1 = UUID::Random();
+  {
+    CHECK_EQ(pi1_index_, 0u);
+    CHECK_EQ(pi2_index_, 1u);
+
+    time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
+    time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
+    time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
+
+    time_converter_.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch()});
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
+    time_converter_.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp::epoch() + reboot_time,
+         BootTimestamp{
+             .boot = 1,
+             .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
+  }
+
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+              pi1_boot0);
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+              pi2_boot0);
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+
+    VLOG(1) << "Reboot now!";
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+              pi1_boot0);
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+              pi2_boot1);
+
+    pi1_logger.AppendAllFilenames(&actual_filenames);
+  }
+
+  std::sort(actual_filenames.begin(), actual_filenames.end());
+  std::sort(pi1_reboot_logfiles_.begin(), pi1_reboot_logfiles_.end());
+  ASSERT_THAT(actual_filenames,
+              ::testing::UnorderedElementsAreArray(pi1_reboot_logfiles_));
+
+  // Confirm that our new oldest timestamps properly update as we reboot and
+  // rotate.
+  for (const std::string &file : pi1_reboot_logfiles_) {
+    std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
+        ReadHeader(file);
+    CHECK(log_header);
+    if (log_header->message().has_configuration()) {
+      continue;
+    }
+
+    const monotonic_clock::time_point monotonic_start_time =
+        monotonic_clock::time_point(
+            chrono::nanoseconds(log_header->message().monotonic_start_time()));
+    const UUID source_node_boot_uuid = UUID::FromString(
+        log_header->message().source_node_boot_uuid()->string_view());
+
+    if (log_header->message().node()->name()->string_view() != "pi1") {
+      // The remote message channel should rotate later and have more parts.
+      // This only is true on the log files with shared remote messages.
+      //
+      // TODO(austin): I'm not the most thrilled with this test pattern...  It
+      // feels brittle in a different way.
+      if (file.find("aos.message_bridge.RemoteMessage") == std::string::npos ||
+          !shared()) {
+        switch (log_header->message().parts_index()) {
+          case 0:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          case 1:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            ASSERT_EQ(monotonic_start_time,
+                      monotonic_clock::epoch() + chrono::seconds(1));
+            break;
+          case 2:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
+            break;
+          case 3:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+            ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
+                                                chrono::nanoseconds(2322999462))
+                << " on " << file;
+            break;
+          default:
+            FAIL();
+            break;
+        }
+      } else {
+        switch (log_header->message().parts_index()) {
+          case 0:
+          case 1:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          case 2:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            ASSERT_EQ(monotonic_start_time,
+                      monotonic_clock::epoch() + chrono::seconds(1));
+            break;
+          case 3:
+          case 4:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
+            break;
+          case 5:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+            ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
+                                                chrono::nanoseconds(2322999462))
+                << " on " << file;
+            break;
+          default:
+            FAIL();
+            break;
+        }
+      }
+      continue;
+    }
+    SCOPED_TRACE(file);
+    SCOPED_TRACE(aos::FlatbufferToJson(
+        *log_header, {.multi_line = true, .max_vector_size = 100}));
+    ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
+    ASSERT_EQ(
+        log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
+    EXPECT_EQ(
+        log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
+        monotonic_clock::max_time.time_since_epoch().count());
+    ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
+    ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
+              2u);
+    EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
+              monotonic_clock::max_time.time_since_epoch().count());
+    ASSERT_TRUE(log_header->message()
+                    .has_oldest_remote_unreliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_remote_unreliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+    EXPECT_EQ(log_header->message()
+                  .oldest_remote_unreliable_monotonic_timestamps()
+                  ->Get(0),
+              monotonic_clock::max_time.time_since_epoch().count());
+    ASSERT_TRUE(log_header->message()
+                    .has_oldest_local_unreliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_local_unreliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+    EXPECT_EQ(log_header->message()
+                  .oldest_local_unreliable_monotonic_timestamps()
+                  ->Get(0),
+              monotonic_clock::max_time.time_since_epoch().count());
+
+    const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+        monotonic_clock::time_point(chrono::nanoseconds(
+            log_header->message().oldest_remote_monotonic_timestamps()->Get(
+                1)));
+    const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+        monotonic_clock::time_point(chrono::nanoseconds(
+            log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
+    const monotonic_clock::time_point
+        oldest_remote_unreliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_remote_unreliable_monotonic_timestamps()
+                    ->Get(1)));
+    const monotonic_clock::time_point
+        oldest_local_unreliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_local_unreliable_monotonic_timestamps()
+                    ->Get(1)));
+    const monotonic_clock::time_point
+        oldest_remote_reliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_remote_reliable_monotonic_timestamps()
+                    ->Get(1)));
+    const monotonic_clock::time_point
+        oldest_local_reliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_local_reliable_monotonic_timestamps()
+                    ->Get(1)));
+    const monotonic_clock::time_point
+        oldest_logger_remote_unreliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_logger_remote_unreliable_monotonic_timestamps()
+                    ->Get(0)));
+    const monotonic_clock::time_point
+        oldest_logger_local_unreliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_logger_local_unreliable_monotonic_timestamps()
+                    ->Get(0)));
+    EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+              monotonic_clock::max_time);
+    EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+              monotonic_clock::max_time);
+    switch (log_header->message().parts_index()) {
+      case 0:
+        EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
+        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        break;
+      case 1:
+        EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90200)));
+        EXPECT_EQ(oldest_local_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90350)));
+        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90200)));
+        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90350)));
+        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        break;
+      case 2:
+        EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90200)))
+            << file;
+        EXPECT_EQ(oldest_local_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90350)))
+            << file;
+        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90200)))
+            << file;
+        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(90350)))
+            << file;
+        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(100000)))
+            << file;
+        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(100150)))
+            << file;
+        break;
+      case 3:
+        EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::milliseconds(1323) +
+                                              chrono::microseconds(200)));
+        EXPECT_EQ(oldest_local_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(10100350)));
+        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::milliseconds(1323) +
+                                              chrono::microseconds(200)));
+        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(10100350)));
+        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                  monotonic_clock::max_time)
+            << file;
+        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                  monotonic_clock::max_time)
+            << file;
+        break;
+      case 4:
+        EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::milliseconds(1323) +
+                                              chrono::microseconds(200)));
+        EXPECT_EQ(oldest_local_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(10100350)));
+        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::milliseconds(1323) +
+                                              chrono::microseconds(200)));
+        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(10100350)));
+        EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(1423000)))
+            << file;
+        EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                  monotonic_clock::time_point(chrono::microseconds(10200150)))
+            << file;
+        break;
+      default:
+        FAIL();
+        break;
+    }
+  }
+
+  // Confirm that we refuse to replay logs with missing boot uuids.
+  {
+    LogReader reader(SortParts(pi1_reboot_logfiles_));
+
+    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+    log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+    // This sends out the fetched messages and advances time to the start of
+    // the log file.
+    reader.Register(&log_reader_factory);
+
+    log_reader_factory.Run();
+
+    reader.Deregister();
+  }
+}
+
+// Tests that we can sort a log which only has timestamps from the remote
+// because the local message_bridge_client failed to connect.
+TEST_P(MultinodeLoggerTest, RemoteRebootOnlyTimestamps) {
+  const UUID pi1_boot0 = UUID::Random();
+  const UUID pi2_boot0 = UUID::Random();
+  const UUID pi2_boot1 = UUID::Random();
+  {
+    CHECK_EQ(pi1_index_, 0u);
+    CHECK_EQ(pi2_index_, 1u);
+
+    time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
+    time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
+    time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
+
+    time_converter_.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch()});
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
+    time_converter_.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp::epoch() + reboot_time,
+         BootTimestamp{
+             .boot = 1,
+             .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
+  }
+  pi2_->Disconnect(pi1_->node());
+
+  std::vector<std::string> filenames;
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+              pi1_boot0);
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+              pi2_boot0);
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+
+    VLOG(1) << "Reboot now!";
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+              pi1_boot0);
+    EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+              pi2_boot1);
+    pi1_logger.AppendAllFilenames(&filenames);
+  }
+
+  std::sort(filenames.begin(), filenames.end());
+
+  // Confirm that our new oldest timestamps properly update as we reboot and
+  // rotate.
+  size_t timestamp_file_count = 0;
+  for (const std::string &file : filenames) {
+    std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
+        ReadHeader(file);
+    CHECK(log_header);
+
+    if (log_header->message().has_configuration()) {
+      continue;
+    }
+
+    const monotonic_clock::time_point monotonic_start_time =
+        monotonic_clock::time_point(
+            chrono::nanoseconds(log_header->message().monotonic_start_time()));
+    const UUID source_node_boot_uuid = UUID::FromString(
+        log_header->message().source_node_boot_uuid()->string_view());
+
+    ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
+    ASSERT_EQ(
+        log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
+    ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
+    ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
+              2u);
+    ASSERT_TRUE(log_header->message()
+                    .has_oldest_remote_unreliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_remote_unreliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+    ASSERT_TRUE(log_header->message()
+                    .has_oldest_local_unreliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_local_unreliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+    ASSERT_TRUE(log_header->message()
+                    .has_oldest_remote_reliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_remote_reliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+    ASSERT_TRUE(
+        log_header->message().has_oldest_local_reliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_local_reliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+
+    ASSERT_TRUE(
+        log_header->message()
+            .has_oldest_logger_remote_unreliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_logger_remote_unreliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+    ASSERT_TRUE(log_header->message()
+                    .has_oldest_logger_local_unreliable_monotonic_timestamps());
+    ASSERT_EQ(log_header->message()
+                  .oldest_logger_local_unreliable_monotonic_timestamps()
+                  ->size(),
+              2u);
+
+    if (log_header->message().node()->name()->string_view() != "pi1") {
+      ASSERT_TRUE(file.find("aos.message_bridge.RemoteMessage") !=
+                  std::string::npos);
+
+      const std::optional<SizePrefixedFlatbufferVector<MessageHeader>> msg =
+          ReadNthMessage(file, 0);
+      CHECK(msg);
+
+      EXPECT_TRUE(msg->message().has_monotonic_sent_time());
+      EXPECT_TRUE(msg->message().has_monotonic_remote_time());
+
+      const monotonic_clock::time_point
+          expected_oldest_local_monotonic_timestamps(
+              chrono::nanoseconds(msg->message().monotonic_sent_time()));
+      const monotonic_clock::time_point
+          expected_oldest_remote_monotonic_timestamps(
+              chrono::nanoseconds(msg->message().monotonic_remote_time()));
+      const monotonic_clock::time_point
+          expected_oldest_timestamp_monotonic_timestamps(
+              chrono::nanoseconds(msg->message().monotonic_timestamp_time()));
+
+      EXPECT_NE(expected_oldest_local_monotonic_timestamps,
+                monotonic_clock::min_time);
+      EXPECT_NE(expected_oldest_remote_monotonic_timestamps,
+                monotonic_clock::min_time);
+      EXPECT_NE(expected_oldest_timestamp_monotonic_timestamps,
+                monotonic_clock::min_time);
+
+      ++timestamp_file_count;
+      // Since the log file is from the perspective of the other node,
+      const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+          monotonic_clock::time_point(chrono::nanoseconds(
+              log_header->message().oldest_remote_monotonic_timestamps()->Get(
+                  0)));
+      const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+          monotonic_clock::time_point(chrono::nanoseconds(
+              log_header->message().oldest_local_monotonic_timestamps()->Get(
+                  0)));
+      const monotonic_clock::time_point
+          oldest_remote_unreliable_monotonic_timestamps =
+              monotonic_clock::time_point(chrono::nanoseconds(
+                  log_header->message()
+                      .oldest_remote_unreliable_monotonic_timestamps()
+                      ->Get(0)));
+      const monotonic_clock::time_point
+          oldest_local_unreliable_monotonic_timestamps =
+              monotonic_clock::time_point(chrono::nanoseconds(
+                  log_header->message()
+                      .oldest_local_unreliable_monotonic_timestamps()
+                      ->Get(0)));
+      const monotonic_clock::time_point
+          oldest_remote_reliable_monotonic_timestamps =
+              monotonic_clock::time_point(chrono::nanoseconds(
+                  log_header->message()
+                      .oldest_remote_reliable_monotonic_timestamps()
+                      ->Get(0)));
+      const monotonic_clock::time_point
+          oldest_local_reliable_monotonic_timestamps =
+              monotonic_clock::time_point(chrono::nanoseconds(
+                  log_header->message()
+                      .oldest_local_reliable_monotonic_timestamps()
+                      ->Get(0)));
+      const monotonic_clock::time_point
+          oldest_logger_remote_unreliable_monotonic_timestamps =
+              monotonic_clock::time_point(chrono::nanoseconds(
+                  log_header->message()
+                      .oldest_logger_remote_unreliable_monotonic_timestamps()
+                      ->Get(1)));
+      const monotonic_clock::time_point
+          oldest_logger_local_unreliable_monotonic_timestamps =
+              monotonic_clock::time_point(chrono::nanoseconds(
+                  log_header->message()
+                      .oldest_logger_local_unreliable_monotonic_timestamps()
+                      ->Get(1)));
+
+      const Channel *channel =
+          event_loop_factory_.configuration()->channels()->Get(
+              msg->message().channel_index());
+      const Connection *connection = configuration::ConnectionToNode(
+          channel, configuration::GetNode(
+                       event_loop_factory_.configuration(),
+                       log_header->message().node()->name()->string_view()));
+
+      const bool reliable = connection->time_to_live() == 0;
+
+      SCOPED_TRACE(file);
+      SCOPED_TRACE(aos::FlatbufferToJson(
+          *log_header, {.multi_line = true, .max_vector_size = 100}));
+
+      if (shared()) {
+        // Confirm that the oldest timestamps match what we expect.  Based on
+        // what we are doing, we know that the oldest time is the first
+        // message's time.
+        //
+        // This makes the test robust to both the split and combined config
+        // tests.
+        switch (log_header->message().parts_index()) {
+          case 0:
+            EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                      expected_oldest_remote_monotonic_timestamps);
+            EXPECT_EQ(oldest_local_monotonic_timestamps,
+                      expected_oldest_local_monotonic_timestamps);
+            EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+                      expected_oldest_local_monotonic_timestamps)
+                << file;
+            EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+                      expected_oldest_timestamp_monotonic_timestamps)
+                << file;
+
+            if (reliable) {
+              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+            } else {
+              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+            }
+            break;
+          case 1:
+            EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                      monotonic_clock::epoch() + chrono::nanoseconds(90000000));
+            EXPECT_EQ(oldest_local_monotonic_timestamps,
+                      monotonic_clock::epoch() + chrono::nanoseconds(90150000));
+            EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+                      monotonic_clock::epoch() + chrono::nanoseconds(90150000));
+            EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+                      monotonic_clock::epoch() + chrono::nanoseconds(90250000));
+            if (reliable) {
+              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+              EXPECT_EQ(
+                  oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(90000000));
+              EXPECT_EQ(
+                  oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(90150000));
+            } else {
+              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+            }
+            break;
+          case 2:
+            EXPECT_EQ(
+                oldest_remote_monotonic_timestamps,
+                monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
+            EXPECT_EQ(
+                oldest_local_monotonic_timestamps,
+                monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+            EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+                      expected_oldest_local_monotonic_timestamps)
+                << file;
+            EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+                      expected_oldest_timestamp_monotonic_timestamps)
+                << file;
+            if (reliable) {
+              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+            } else {
+              EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+            }
+            break;
+
+          case 3:
+            EXPECT_EQ(
+                oldest_remote_monotonic_timestamps,
+                monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
+            EXPECT_EQ(
+                oldest_local_monotonic_timestamps,
+                monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+            EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                      expected_oldest_remote_monotonic_timestamps);
+            EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                      expected_oldest_local_monotonic_timestamps);
+            EXPECT_EQ(
+                oldest_logger_remote_unreliable_monotonic_timestamps,
+                monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+            EXPECT_EQ(
+                oldest_logger_local_unreliable_monotonic_timestamps,
+                monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
+            break;
+          default:
+            FAIL();
+            break;
+        }
+
+        switch (log_header->message().parts_index()) {
+          case 0:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          case 1:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          case 2:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          case 3:
+            if (shared()) {
+              EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+              EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+              break;
+            }
+            [[fallthrough]];
+          default:
+            FAIL();
+            break;
+        }
+      } else {
+        switch (log_header->message().parts_index()) {
+          case 0:
+            if (reliable) {
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(
+                  oldest_logger_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(100150000))
+                  << file;
+              EXPECT_EQ(
+                  oldest_logger_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(100250000))
+                  << file;
+            } else {
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+              EXPECT_EQ(
+                  oldest_logger_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(90150000))
+                  << file;
+              EXPECT_EQ(
+                  oldest_logger_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(90250000))
+                  << file;
+            }
+            break;
+          case 1:
+            if (reliable) {
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        monotonic_clock::max_time);
+              EXPECT_EQ(
+                  oldest_logger_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+              EXPECT_EQ(
+                  oldest_logger_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
+            } else {
+              EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                        expected_oldest_remote_monotonic_timestamps);
+              EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                        expected_oldest_local_monotonic_timestamps);
+              EXPECT_EQ(
+                  oldest_logger_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(1323150000));
+              EXPECT_EQ(
+                  oldest_logger_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::epoch() + chrono::nanoseconds(10100250000));
+            }
+            break;
+          default:
+            FAIL();
+            break;
+        }
+
+        switch (log_header->message().parts_index()) {
+          case 0:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          case 1:
+            EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+            EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+            break;
+          default:
+            FAIL();
+            break;
+        }
+      }
+
+      continue;
+    }
+    EXPECT_EQ(
+        log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
+        monotonic_clock::max_time.time_since_epoch().count());
+    EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
+              monotonic_clock::max_time.time_since_epoch().count());
+    EXPECT_EQ(log_header->message()
+                  .oldest_remote_unreliable_monotonic_timestamps()
+                  ->Get(0),
+              monotonic_clock::max_time.time_since_epoch().count());
+    EXPECT_EQ(log_header->message()
+                  .oldest_local_unreliable_monotonic_timestamps()
+                  ->Get(0),
+              monotonic_clock::max_time.time_since_epoch().count());
+
+    const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+        monotonic_clock::time_point(chrono::nanoseconds(
+            log_header->message().oldest_remote_monotonic_timestamps()->Get(
+                1)));
+    const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+        monotonic_clock::time_point(chrono::nanoseconds(
+            log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
+    const monotonic_clock::time_point
+        oldest_remote_unreliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_remote_unreliable_monotonic_timestamps()
+                    ->Get(1)));
+    const monotonic_clock::time_point
+        oldest_local_unreliable_monotonic_timestamps =
+            monotonic_clock::time_point(chrono::nanoseconds(
+                log_header->message()
+                    .oldest_local_unreliable_monotonic_timestamps()
+                    ->Get(1)));
+    switch (log_header->message().parts_index()) {
+      case 0:
+        EXPECT_EQ(oldest_remote_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
+        EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+                  monotonic_clock::max_time);
+        break;
+      default:
+        FAIL();
+        break;
+    }
+  }
+
+  if (shared()) {
+    EXPECT_EQ(timestamp_file_count, 4u);
+  } else {
+    EXPECT_EQ(timestamp_file_count, 4u);
+  }
+
+  // Confirm that we can actually sort the resulting log and read it.
+  {
+    LogReader reader(SortParts(filenames));
+
+    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+    log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+    // This sends out the fetched messages and advances time to the start of
+    // the log file.
+    reader.Register(&log_reader_factory);
+
+    log_reader_factory.Run();
+
+    reader.Deregister();
+  }
+}
+
+// Tests that we properly handle one direction of message_bridge being
+// unavailable.
+TEST_P(MultinodeLoggerTest, OneDirectionWithNegativeSlope) {
+  pi1_->Disconnect(pi2_->node());
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+
+  time_converter_.AddMonotonic(
+      {chrono::milliseconds(10000),
+       chrono::milliseconds(10000) - chrono::milliseconds(1)});
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  }
+
+  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
+  // to confirm the right thing happened.
+  ConfirmReadable(pi1_single_direction_logfiles_);
+}
+
+// Tests that we properly handle one direction of message_bridge being
+// unavailable.
+TEST_P(MultinodeLoggerTest, OneDirectionWithPositiveSlope) {
+  pi1_->Disconnect(pi2_->node());
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(500)});
+
+  time_converter_.AddMonotonic(
+      {chrono::milliseconds(10000),
+       chrono::milliseconds(10000) + chrono::milliseconds(1)});
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  }
+
+  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
+  // to confirm the right thing happened.
+  ConfirmReadable(pi1_single_direction_logfiles_);
+}
+
+// Tests that we explode if someone passes in a part file twice with a better
+// error than an out of order error.
+TEST_P(MultinodeLoggerTest, DuplicateLogFiles) {
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  }
+
+  std::vector<std::string> duplicates;
+  for (const std::string &f : pi1_single_direction_logfiles_) {
+    duplicates.emplace_back(f);
+    duplicates.emplace_back(f);
+  }
+  EXPECT_DEATH({ SortParts(duplicates); }, "Found duplicate parts in");
+}
+
+// Tests that we explode if someone loses a part out of the middle of a log.
+TEST_P(MultinodeLoggerTest, MissingPartsFromMiddle) {
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    aos::monotonic_clock::time_point last_rotation_time =
+        pi1_logger.event_loop->monotonic_now();
+    pi1_logger.logger->set_on_logged_period([&] {
+      const auto now = pi1_logger.event_loop->monotonic_now();
+      if (now > last_rotation_time + std::chrono::seconds(5)) {
+        pi1_logger.logger->Rotate();
+        last_rotation_time = now;
+      }
+    });
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  }
+
+  std::vector<std::string> missing_parts;
+
+  missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+  missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
+  missing_parts.emplace_back(absl::StrCat(
+      logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+
+  EXPECT_DEATH({ SortParts(missing_parts); },
+               "Broken log, missing part files between");
+}
+
+// Tests that we properly handle a dead node.  Do this by just disconnecting it
+// and only using one nodes of logs.
+TEST_P(MultinodeLoggerTest, DeadNode) {
+  pi1_->Disconnect(pi2_->node());
+  pi2_->Disconnect(pi1_->node());
+  time_converter_.AddMonotonic(
+      {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(10000));
+  }
+
+  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
+  // to confirm the right thing happened.
+  ConfirmReadable(MakePi1DeadNodeLogfiles());
+}
+
+// Tests that we can relog with a different config.  This makes most sense when
+// you are trying to edit a log and want to use channel renaming + the original
+// config in the new log.
+TEST_P(MultinodeLoggerTest, LogDifferentConfig) {
+  time_converter_.StartEqual();
+  {
+    LoggerState pi1_logger = MakeLogger(pi1_);
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+    StartLogger(&pi1_logger);
+    StartLogger(&pi2_logger);
+
+    event_loop_factory_.RunFor(chrono::milliseconds(20000));
+  }
+
+  LogReader reader(SortParts(logfiles_));
+  reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
+
+  SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+  log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+  // This sends out the fetched messages and advances time to the start of the
+  // log file.
+  reader.Register(&log_reader_factory);
+
+  const Node *pi1 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi1");
+  const Node *pi2 =
+      configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+  LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+  LOG(INFO) << "now pi1 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+  LOG(INFO) << "now pi2 "
+            << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+  EXPECT_THAT(reader.LoggedNodes(),
+              ::testing::ElementsAre(
+                  configuration::GetNode(reader.logged_configuration(), pi1),
+                  configuration::GetNode(reader.logged_configuration(), pi2)));
+
+  reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+  // And confirm we can re-create a log again, while checking the contents.
+  std::vector<std::string> log_files;
+  {
+    LoggerState pi1_logger =
+        MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi1"),
+                   &log_reader_factory, reader.logged_configuration());
+    LoggerState pi2_logger =
+        MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi2"),
+                   &log_reader_factory, reader.logged_configuration());
+
+    pi1_logger.StartLogger(tmp_dir_ + "/relogged1");
+    pi2_logger.StartLogger(tmp_dir_ + "/relogged2");
+
+    log_reader_factory.Run();
+
+    for (auto &x : pi1_logger.log_namer->all_filenames()) {
+      log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged1_", x));
+    }
+    for (auto &x : pi2_logger.log_namer->all_filenames()) {
+      log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged2_", x));
+    }
+  }
+
+  reader.Deregister();
+
+  // And verify that we can run the LogReader over the relogged files without
+  // hitting any fatal errors.
+  {
+    LogReader relogged_reader(SortParts(log_files));
+    relogged_reader.Register();
+
+    relogged_reader.event_loop_factory()->Run();
+  }
+}
+
+// Tests that we properly replay a log where the start time for a node is before
+// any data on the node.  This can happen if the logger starts before data is
+// published.  While the scenario below is a bit convoluted, we have seen logs
+// like this generated out in the wild.
+TEST(MultinodeRebootLoggerTest, StartTimeBeforeData) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_split3_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  const size_t pi1_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi1->node());
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  const size_t pi2_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi2->node());
+  NodeEventLoopFactory *const pi3 =
+      event_loop_factory.GetNodeEventLoopFactory("pi3");
+  const size_t pi3_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi3->node());
+
+  const std::string kLogfile1_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile1/";
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  const std::string kLogfile2_2 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.2/";
+  const std::string kLogfile3_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile3/";
+  util::UnlinkRecursive(kLogfile1_1);
+  util::UnlinkRecursive(kLogfile2_1);
+  util::UnlinkRecursive(kLogfile2_2);
+  util::UnlinkRecursive(kLogfile3_1);
+  const UUID pi1_boot0 = UUID::Random();
+  const UUID pi2_boot0 = UUID::Random();
+  const UUID pi2_boot1 = UUID::Random();
+  const UUID pi3_boot0 = UUID::Random();
+  {
+    CHECK_EQ(pi1_index, 0u);
+    CHECK_EQ(pi2_index, 1u);
+    CHECK_EQ(pi3_index, 2u);
+
+    time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
+    time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
+    time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
+    time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
+
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch(),
+         BootTimestamp::epoch()});
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(20000);
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp::epoch() + reboot_time,
+         BootTimestamp{
+             .boot = 1,
+             .time = monotonic_clock::epoch() + chrono::milliseconds(1323)},
+         BootTimestamp::epoch() + reboot_time});
+  }
+
+  // Make everything perfectly quiet.
+  event_loop_factory.SkipTimingReport();
+  event_loop_factory.DisableStatistics();
+
+  std::vector<std::string> filenames;
+  {
+    LoggerState pi1_logger = MakeLoggerState(
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    LoggerState pi3_logger = MakeLoggerState(
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    {
+      // And now start the logger.
+      LoggerState pi2_logger = MakeLoggerState(
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+      event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+      pi1_logger.StartLogger(kLogfile1_1);
+      pi3_logger.StartLogger(kLogfile3_1);
+      pi2_logger.StartLogger(kLogfile2_1);
+
+      event_loop_factory.RunFor(chrono::milliseconds(10000));
+
+      // Now that we've got a start time in the past, turn on data.
+      event_loop_factory.EnableStatistics();
+      std::unique_ptr<aos::EventLoop> ping_event_loop =
+          pi1->MakeEventLoop("ping");
+      Ping ping(ping_event_loop.get());
+
+      pi2->AlwaysStart<Pong>("pong");
+
+      event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+      pi2_logger.AppendAllFilenames(&filenames);
+
+      // Stop logging on pi2 before rebooting and completely shut off all
+      // messages on pi2.
+      pi2->DisableStatistics();
+      pi1->Disconnect(pi2->node());
+      pi2->Disconnect(pi1->node());
+    }
+    event_loop_factory.RunFor(chrono::milliseconds(7000));
+    // pi2 now reboots.
+    {
+      event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+      // Start logging again on pi2 after it is up.
+      LoggerState pi2_logger = MakeLoggerState(
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+      pi2_logger.StartLogger(kLogfile2_2);
+
+      event_loop_factory.RunFor(chrono::milliseconds(10000));
+      // And, now that we have a start time in the log, turn data back on.
+      pi2->EnableStatistics();
+      pi1->Connect(pi2->node());
+      pi2->Connect(pi1->node());
+
+      pi2->AlwaysStart<Pong>("pong");
+      std::unique_ptr<aos::EventLoop> ping_event_loop =
+          pi1->MakeEventLoop("ping");
+      Ping ping(ping_event_loop.get());
+
+      event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+      pi2_logger.AppendAllFilenames(&filenames);
+    }
+
+    pi1_logger.AppendAllFilenames(&filenames);
+    pi3_logger.AppendAllFilenames(&filenames);
+  }
+
+  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
+  // to confirm the right thing happened.
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  auto result = ConfirmReadable(filenames);
+  EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch() +
+                                                      chrono::seconds(1)));
+  EXPECT_THAT(result[0].second,
+              ::testing::ElementsAre(realtime_clock::epoch() +
+                                     chrono::microseconds(34990350)));
+
+  EXPECT_THAT(result[1].first,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch() + chrono::seconds(1),
+                  realtime_clock::epoch() + chrono::microseconds(3323000)));
+  EXPECT_THAT(result[1].second,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch() + chrono::microseconds(13990200),
+                  realtime_clock::epoch() + chrono::microseconds(16313200)));
+
+  EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch() +
+                                                      chrono::seconds(1)));
+  EXPECT_THAT(result[2].second,
+              ::testing::ElementsAre(realtime_clock::epoch() +
+                                     chrono::microseconds(34900150)));
+}
+
+// Tests that local data before remote data after reboot is properly replayed.
+// We only trigger a reboot in the timestamp interpolation function when solving
+// the timestamp problem when we actually have a point in the function.  This
+// originally only happened when a point passes the noncausal filter.  At the
+// start of time for the second boot, if we aren't careful, we will have
+// messages which need to be published at times before the boot.  This happens
+// when a local message is in the log before a forwarded message, so there is no
+// point in the interpolation function.  This delays the reboot.  So, we need to
+// recreate that situation and make sure it doesn't come back.
+TEST(MultinodeRebootLoggerTest,
+     LocalMessageBeforeRemoteBeforeStartAfterReboot) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_split3_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  const size_t pi1_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi1->node());
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  const size_t pi2_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi2->node());
+  NodeEventLoopFactory *const pi3 =
+      event_loop_factory.GetNodeEventLoopFactory("pi3");
+  const size_t pi3_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi3->node());
+
+  const std::string kLogfile1_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile1/";
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  const std::string kLogfile2_2 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.2/";
+  const std::string kLogfile3_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile3/";
+  util::UnlinkRecursive(kLogfile1_1);
+  util::UnlinkRecursive(kLogfile2_1);
+  util::UnlinkRecursive(kLogfile2_2);
+  util::UnlinkRecursive(kLogfile3_1);
+  const UUID pi1_boot0 = UUID::Random();
+  const UUID pi2_boot0 = UUID::Random();
+  const UUID pi2_boot1 = UUID::Random();
+  const UUID pi3_boot0 = UUID::Random();
+  {
+    CHECK_EQ(pi1_index, 0u);
+    CHECK_EQ(pi2_index, 1u);
+    CHECK_EQ(pi3_index, 2u);
+
+    time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
+    time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
+    time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
+    time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
+
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch(),
+         BootTimestamp::epoch()});
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp::epoch() + reboot_time,
+         BootTimestamp{.boot = 1,
+                       .time = monotonic_clock::epoch() + reboot_time +
+                               chrono::seconds(100)},
+         BootTimestamp::epoch() + reboot_time});
+  }
+
+  std::vector<std::string> filenames;
+  {
+    LoggerState pi1_logger = MakeLoggerState(
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    LoggerState pi3_logger = MakeLoggerState(
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    {
+      // And now start the logger.
+      LoggerState pi2_logger = MakeLoggerState(
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+      pi1_logger.StartLogger(kLogfile1_1);
+      pi3_logger.StartLogger(kLogfile3_1);
+      pi2_logger.StartLogger(kLogfile2_1);
+
+      event_loop_factory.RunFor(chrono::milliseconds(1005));
+
+      // Now that we've got a start time in the past, turn on data.
+      std::unique_ptr<aos::EventLoop> ping_event_loop =
+          pi1->MakeEventLoop("ping");
+      Ping ping(ping_event_loop.get());
+
+      pi2->AlwaysStart<Pong>("pong");
+
+      event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+      pi2_logger.AppendAllFilenames(&filenames);
+
+      // Disable any remote messages on pi2.
+      pi1->Disconnect(pi2->node());
+      pi2->Disconnect(pi1->node());
+    }
+    event_loop_factory.RunFor(chrono::milliseconds(995));
+    // pi2 now reboots at 5 seconds.
+    {
+      event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+      // Make local stuff happen before we start logging and connect the remote.
+      pi2->AlwaysStart<Pong>("pong");
+      std::unique_ptr<aos::EventLoop> ping_event_loop =
+          pi1->MakeEventLoop("ping");
+      Ping ping(ping_event_loop.get());
+      event_loop_factory.RunFor(chrono::milliseconds(1005));
+
+      // Start logging again on pi2 after it is up.
+      LoggerState pi2_logger = MakeLoggerState(
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+      pi2_logger.StartLogger(kLogfile2_2);
+
+      // And allow remote messages now that we have some local ones.
+      pi1->Connect(pi2->node());
+      pi2->Connect(pi1->node());
+
+      event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+      event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+      pi2_logger.AppendAllFilenames(&filenames);
+    }
+
+    pi1_logger.AppendAllFilenames(&filenames);
+    pi3_logger.AppendAllFilenames(&filenames);
+  }
+
+  // Confirm that we can parse the result.  LogReader has enough internal CHECKs
+  // to confirm the right thing happened.
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  auto result = ConfirmReadable(filenames);
+
+  EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
+  EXPECT_THAT(result[0].second,
+              ::testing::ElementsAre(realtime_clock::epoch() +
+                                     chrono::microseconds(11000350)));
+
+  EXPECT_THAT(result[1].first,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch(),
+                  realtime_clock::epoch() + chrono::microseconds(107005000)));
+  EXPECT_THAT(result[1].second,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch() + chrono::microseconds(4000150),
+                  realtime_clock::epoch() + chrono::microseconds(111000200)));
+
+  EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
+  EXPECT_THAT(result[2].second,
+              ::testing::ElementsAre(realtime_clock::epoch() +
+                                     chrono::microseconds(11000150)));
+
+  auto start_stop_result = ConfirmReadable(
+      filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
+      realtime_clock::epoch() + chrono::milliseconds(3000));
+
+  EXPECT_THAT(
+      start_stop_result[0].first,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+  EXPECT_THAT(
+      start_stop_result[0].second,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
+  EXPECT_THAT(
+      start_stop_result[1].first,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+  EXPECT_THAT(
+      start_stop_result[1].second,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
+  EXPECT_THAT(
+      start_stop_result[2].first,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+  EXPECT_THAT(
+      start_stop_result[2].second,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
+}
+
+// Tests that setting the start and stop flags across a reboot works as
+// expected.
+TEST(MultinodeRebootLoggerTest, RebootStartStopTimes) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_split3_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  const size_t pi1_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi1->node());
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  const size_t pi2_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi2->node());
+  NodeEventLoopFactory *const pi3 =
+      event_loop_factory.GetNodeEventLoopFactory("pi3");
+  const size_t pi3_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi3->node());
+
+  const std::string kLogfile1_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile1/";
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  const std::string kLogfile2_2 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.2/";
+  const std::string kLogfile3_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile3/";
+  util::UnlinkRecursive(kLogfile1_1);
+  util::UnlinkRecursive(kLogfile2_1);
+  util::UnlinkRecursive(kLogfile2_2);
+  util::UnlinkRecursive(kLogfile3_1);
+  {
+    CHECK_EQ(pi1_index, 0u);
+    CHECK_EQ(pi2_index, 1u);
+    CHECK_EQ(pi3_index, 2u);
+
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch(),
+         BootTimestamp::epoch()});
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp::epoch() + reboot_time,
+         BootTimestamp{.boot = 1,
+                       .time = monotonic_clock::epoch() + reboot_time},
+         BootTimestamp::epoch() + reboot_time});
+  }
+
+  std::vector<std::string> filenames;
+  {
+    LoggerState pi1_logger = MakeLoggerState(
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    LoggerState pi3_logger = MakeLoggerState(
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    {
+      // And now start the logger.
+      LoggerState pi2_logger = MakeLoggerState(
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+      pi1_logger.StartLogger(kLogfile1_1);
+      pi3_logger.StartLogger(kLogfile3_1);
+      pi2_logger.StartLogger(kLogfile2_1);
+
+      event_loop_factory.RunFor(chrono::milliseconds(1005));
+
+      // Now that we've got a start time in the past, turn on data.
+      std::unique_ptr<aos::EventLoop> ping_event_loop =
+          pi1->MakeEventLoop("ping");
+      Ping ping(ping_event_loop.get());
+
+      pi2->AlwaysStart<Pong>("pong");
+
+      event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+      pi2_logger.AppendAllFilenames(&filenames);
+    }
+    event_loop_factory.RunFor(chrono::milliseconds(995));
+    // pi2 now reboots at 5 seconds.
+    {
+      event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+      // Make local stuff happen before we start logging and connect the remote.
+      pi2->AlwaysStart<Pong>("pong");
+      std::unique_ptr<aos::EventLoop> ping_event_loop =
+          pi1->MakeEventLoop("ping");
+      Ping ping(ping_event_loop.get());
+      event_loop_factory.RunFor(chrono::milliseconds(5));
+
+      // Start logging again on pi2 after it is up.
+      LoggerState pi2_logger = MakeLoggerState(
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+      pi2_logger.StartLogger(kLogfile2_2);
+
+      event_loop_factory.RunFor(chrono::milliseconds(5000));
+
+      pi2_logger.AppendAllFilenames(&filenames);
+    }
+
+    pi1_logger.AppendAllFilenames(&filenames);
+    pi3_logger.AppendAllFilenames(&filenames);
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  auto result = ConfirmReadable(filenames);
+
+  EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
+  EXPECT_THAT(result[0].second,
+              ::testing::ElementsAre(realtime_clock::epoch() +
+                                     chrono::microseconds(11000350)));
+
+  EXPECT_THAT(result[1].first,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch(),
+                  realtime_clock::epoch() + chrono::microseconds(6005000)));
+  EXPECT_THAT(result[1].second,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch() + chrono::microseconds(4900150),
+                  realtime_clock::epoch() + chrono::microseconds(11000200)));
+
+  EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
+  EXPECT_THAT(result[2].second,
+              ::testing::ElementsAre(realtime_clock::epoch() +
+                                     chrono::microseconds(11000150)));
+
+  // Confirm we observed the correct start and stop times.  We should see the
+  // reboot here.
+  auto start_stop_result = ConfirmReadable(
+      filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
+      realtime_clock::epoch() + chrono::milliseconds(8000));
+
+  EXPECT_THAT(
+      start_stop_result[0].first,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+  EXPECT_THAT(
+      start_stop_result[0].second,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
+  EXPECT_THAT(start_stop_result[1].first,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch() + chrono::seconds(2),
+                  realtime_clock::epoch() + chrono::microseconds(6005000)));
+  EXPECT_THAT(start_stop_result[1].second,
+              ::testing::ElementsAre(
+                  realtime_clock::epoch() + chrono::microseconds(4900150),
+                  realtime_clock::epoch() + chrono::seconds(8)));
+  EXPECT_THAT(
+      start_stop_result[2].first,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+  EXPECT_THAT(
+      start_stop_result[2].second,
+      ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
+}
+
+// Tests that we properly handle one direction being down.
+TEST(MissingDirectionTest, OneDirection) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_split4_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  const size_t pi1_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi1->node());
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  const size_t pi2_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi2->node());
+  std::vector<std::string> filenames;
+
+  {
+    CHECK_EQ(pi1_index, 0u);
+    CHECK_EQ(pi2_index, 1u);
+
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
+         BootTimestamp::epoch() + reboot_time});
+  }
+
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  const std::string kLogfile1_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile1.1/";
+  util::UnlinkRecursive(kLogfile2_1);
+  util::UnlinkRecursive(kLogfile1_1);
+
+  pi2->Disconnect(pi1->node());
+
+  pi1->AlwaysStart<Ping>("ping");
+  pi2->AlwaysStart<Pong>("pong");
+
+  {
+    LoggerState pi2_logger = MakeLoggerState(
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+    event_loop_factory.RunFor(chrono::milliseconds(95));
+
+    pi2_logger.StartLogger(kLogfile2_1);
+
+    event_loop_factory.RunFor(chrono::milliseconds(6000));
+
+    pi2->Connect(pi1->node());
+
+    LoggerState pi1_logger = MakeLoggerState(
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    pi1_logger.StartLogger(kLogfile1_1);
+
+    event_loop_factory.RunFor(chrono::milliseconds(5000));
+    pi1_logger.AppendAllFilenames(&filenames);
+    pi2_logger.AppendAllFilenames(&filenames);
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  ConfirmReadable(filenames);
+}
+
+// Tests that we properly handle only one direction ever existing after a
+// reboot.
+TEST(MissingDirectionTest, OneDirectionAfterReboot) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_split4_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  const size_t pi1_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi1->node());
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  const size_t pi2_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi2->node());
+  std::vector<std::string> filenames;
+
+  {
+    CHECK_EQ(pi1_index, 0u);
+    CHECK_EQ(pi2_index, 1u);
+
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
+         BootTimestamp::epoch() + reboot_time});
+  }
+
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  util::UnlinkRecursive(kLogfile2_1);
+
+  pi1->AlwaysStart<Ping>("ping");
+
+  // Pi1 sends to pi2.  Reboot pi1, but don't let pi2 connect to pi1.  This
+  // makes it such that we will only get timestamps from pi1 -> pi2 on the
+  // second boot.
+  {
+    LoggerState pi2_logger = MakeLoggerState(
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+    event_loop_factory.RunFor(chrono::milliseconds(95));
+
+    pi2_logger.StartLogger(kLogfile2_1);
+
+    event_loop_factory.RunFor(chrono::milliseconds(4000));
+
+    pi2->Disconnect(pi1->node());
+
+    event_loop_factory.RunFor(chrono::milliseconds(1000));
+    pi1->AlwaysStart<Ping>("ping");
+
+    event_loop_factory.RunFor(chrono::milliseconds(5000));
+    pi2_logger.AppendAllFilenames(&filenames);
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  ConfirmReadable(filenames);
+}
+
+// Tests that we properly handle only one direction ever existing after a reboot
+// with only reliable data.
+TEST(MissingDirectionTest, OneDirectionAfterRebootReliable) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_split4_reliable_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  const size_t pi1_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi1->node());
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  const size_t pi2_index = configuration::GetNodeIndex(
+      event_loop_factory.configuration(), pi2->node());
+  std::vector<std::string> filenames;
+
+  {
+    CHECK_EQ(pi1_index, 0u);
+    CHECK_EQ(pi2_index, 1u);
+
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+    const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch() + reboot_time,
+        {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
+         BootTimestamp::epoch() + reboot_time});
+  }
+
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  util::UnlinkRecursive(kLogfile2_1);
+
+  pi1->AlwaysStart<Ping>("ping");
+
+  // Pi1 sends to pi2.  Reboot pi1, but don't let pi2 connect to pi1.  This
+  // makes it such that we will only get timestamps from pi1 -> pi2 on the
+  // second boot.
+  {
+    LoggerState pi2_logger = MakeLoggerState(
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+    event_loop_factory.RunFor(chrono::milliseconds(95));
+
+    pi2_logger.StartLogger(kLogfile2_1);
+
+    event_loop_factory.RunFor(chrono::milliseconds(4000));
+
+    pi2->Disconnect(pi1->node());
+
+    event_loop_factory.RunFor(chrono::milliseconds(1000));
+    pi1->AlwaysStart<Ping>("ping");
+
+    event_loop_factory.RunFor(chrono::milliseconds(5000));
+    pi2_logger.AppendAllFilenames(&filenames);
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  ConfirmReadable(filenames);
+}
+
+// Tests that we properly handle what used to be a time violation in one
+// direction.  This can occur when one direction goes down after sending some
+// data, but the other keeps working.  The down direction ends up resolving to a
+// straight line in the noncausal filter, where the direction which is still up
+// can cross that line.  Really, time progressed along just fine but we assumed
+// that the offset was a line when it could have deviated by up to 1ms/second.
+TEST_P(MultinodeLoggerTest, OneDirectionTimeDrift) {
+  std::vector<std::string> filenames;
+
+  CHECK_EQ(pi1_index_, 0u);
+  CHECK_EQ(pi2_index_, 1u);
+
+  time_converter_.AddNextTimestamp(
+      distributed_clock::epoch(),
+      {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+  const chrono::nanoseconds before_disconnect_duration =
+      time_converter_.AddMonotonic(
+          {chrono::milliseconds(1000), chrono::milliseconds(1000)});
+
+  const chrono::nanoseconds test_duration =
+      time_converter_.AddMonotonic(
+          {chrono::milliseconds(1000), chrono::milliseconds(1000)}) +
+      time_converter_.AddMonotonic(
+          {chrono::milliseconds(10000),
+           chrono::milliseconds(10000) - chrono::milliseconds(5)}) +
+      time_converter_.AddMonotonic(
+          {chrono::milliseconds(10000),
+           chrono::milliseconds(10000) + chrono::milliseconds(5)});
+
+  const std::string kLogfile =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  util::UnlinkRecursive(kLogfile);
+
+  {
+    LoggerState pi2_logger = MakeLogger(pi2_);
+    pi2_logger.StartLogger(kLogfile);
+    event_loop_factory_.RunFor(before_disconnect_duration);
+
+    pi2_->Disconnect(pi1_->node());
+
+    event_loop_factory_.RunFor(test_duration);
+    pi2_->Connect(pi1_->node());
+
+    event_loop_factory_.RunFor(chrono::milliseconds(5000));
+    pi2_logger.AppendAllFilenames(&filenames);
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  ConfirmReadable(filenames);
+}
+
+// Tests that we can replay a logfile that has timestamps such that at least one
+// node's epoch is at a positive distributed_clock (and thus will have to be
+// booted after the other node(s)).
+TEST_P(MultinodeLoggerTest, StartOneNodeBeforeOther) {
+  std::vector<std::string> filenames;
+
+  CHECK_EQ(pi1_index_, 0u);
+  CHECK_EQ(pi2_index_, 1u);
+
+  time_converter_.AddNextTimestamp(
+      distributed_clock::epoch(),
+      {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+  const chrono::nanoseconds before_reboot_duration = chrono::milliseconds(1000);
+  time_converter_.RebootAt(
+      0, distributed_clock::time_point(before_reboot_duration));
+
+  const chrono::nanoseconds test_duration = time_converter_.AddMonotonic(
+      {chrono::milliseconds(10000), chrono::milliseconds(10000)});
+
+  const std::string kLogfile =
+      aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+  util::UnlinkRecursive(kLogfile);
+
+  pi2_->Disconnect(pi1_->node());
+  pi1_->Disconnect(pi2_->node());
+
+  {
+    LoggerState pi2_logger = MakeLogger(pi2_);
+
+    pi2_logger.StartLogger(kLogfile);
+    event_loop_factory_.RunFor(before_reboot_duration);
+
+    pi2_->Connect(pi1_->node());
+    pi1_->Connect(pi2_->node());
+
+    event_loop_factory_.RunFor(test_duration);
+
+    pi2_logger.AppendAllFilenames(&filenames);
+  }
+
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  ConfirmReadable(filenames);
+
+  {
+    LogReader reader(sorted_parts);
+    SimulatedEventLoopFactory replay_factory(reader.configuration());
+    reader.RegisterWithoutStarting(&replay_factory);
+
+    NodeEventLoopFactory *const replay_node =
+        reader.event_loop_factory()->GetNodeEventLoopFactory("pi1");
+
+    std::unique_ptr<EventLoop> test_event_loop =
+        replay_node->MakeEventLoop("test_reader");
+    replay_node->OnStartup([replay_node]() {
+      // Check that we didn't boot until at least t=0.
+      CHECK_LE(monotonic_clock::epoch(), replay_node->monotonic_now());
+    });
+    test_event_loop->OnRun([&test_event_loop]() {
+      // Check that we didn't boot until at least t=0.
+      EXPECT_LE(monotonic_clock::epoch(), test_event_loop->monotonic_now());
+    });
+    reader.event_loop_factory()->Run();
+    reader.Deregister();
+  }
+}
+
+// Tests that when we have a loop without all the logs at all points in time, we
+// can sort it properly.
+TEST(MultinodeLoggerLoopTest, Loop) {
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(ArtifactPath(
+          "aos/events/logging/multinode_pingpong_triangle_split_config.json"));
+  message_bridge::TestingTimeConverter time_converter(
+      configuration::NodesCount(&config.message()));
+  SimulatedEventLoopFactory event_loop_factory(&config.message());
+  event_loop_factory.SetTimeConverter(&time_converter);
+
+  NodeEventLoopFactory *const pi1 =
+      event_loop_factory.GetNodeEventLoopFactory("pi1");
+  NodeEventLoopFactory *const pi2 =
+      event_loop_factory.GetNodeEventLoopFactory("pi2");
+  NodeEventLoopFactory *const pi3 =
+      event_loop_factory.GetNodeEventLoopFactory("pi3");
+
+  const std::string kLogfile1_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile1/";
+  const std::string kLogfile2_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile2/";
+  const std::string kLogfile3_1 =
+      aos::testing::TestTmpDir() + "/multi_logfile3/";
+  util::UnlinkRecursive(kLogfile1_1);
+  util::UnlinkRecursive(kLogfile2_1);
+  util::UnlinkRecursive(kLogfile3_1);
+
+  {
+    // Make pi1 boot before everything else.
+    time_converter.AddNextTimestamp(
+        distributed_clock::epoch(),
+        {BootTimestamp::epoch(),
+         BootTimestamp::epoch() - chrono::milliseconds(100),
+         BootTimestamp::epoch() - chrono::milliseconds(300)});
+  }
+
+  // We want to setup a situation such that 2 of the 3 legs of the loop are very
+  // confident about time being X, and the third leg is pulling the average off
+  // to one side.
+  //
+  // It's easiest to visualize this in timestamp_plotter.
+
+  std::vector<std::string> filenames;
+  {
+    // Have pi1 send out a reliable message at startup.  This sets up a long
+    // forwarding time message at the start to bias time.
+    std::unique_ptr<EventLoop> pi1_event_loop = pi1->MakeEventLoop("ping");
+    {
+      aos::Sender<examples::Ping> ping_sender =
+          pi1_event_loop->MakeSender<examples::Ping>("/reliable");
+
+      aos::Sender<examples::Ping>::Builder builder = ping_sender.MakeBuilder();
+      examples::Ping::Builder ping_builder =
+          builder.MakeBuilder<examples::Ping>();
+      CHECK_EQ(builder.Send(ping_builder.Finish()), RawSender::Error::kOk);
+    }
+
+    // Wait a while so there's enough data to let the worst case be rather off.
+    event_loop_factory.RunFor(chrono::seconds(1000));
+
+    // Now start a receiving node first.  This sets up 2 tight bounds between 2
+    // of the nodes.
+    LoggerState pi2_logger = MakeLoggerState(
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    pi2_logger.StartLogger(kLogfile2_1);
+
+    event_loop_factory.RunFor(chrono::seconds(100));
+
+    // And now start the third leg.
+    LoggerState pi3_logger = MakeLoggerState(
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    pi3_logger.StartLogger(kLogfile3_1);
+
+    LoggerState pi1_logger = MakeLoggerState(
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+    pi1_logger.StartLogger(kLogfile1_1);
+
+    event_loop_factory.RunFor(chrono::seconds(100));
+
+    pi1_logger.AppendAllFilenames(&filenames);
+    pi2_logger.AppendAllFilenames(&filenames);
+    pi3_logger.AppendAllFilenames(&filenames);
+  }
+
+  // Make sure we can read this.
+  const std::vector<LogFile> sorted_parts = SortParts(filenames);
+  auto result = ConfirmReadable(filenames);
+}
+
+}  // namespace testing
+}  // namespace logger
+}  // namespace aos
diff --git a/aos/events/logging/multinode_logger_test_lib.cc b/aos/events/logging/multinode_logger_test_lib.cc
new file mode 100644
index 0000000..0f588c6
--- /dev/null
+++ b/aos/events/logging/multinode_logger_test_lib.cc
@@ -0,0 +1,647 @@
+#include "aos/events/logging/multinode_logger_test_lib.h"
+
+#include "aos/events/event_loop.h"
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/logging/logfile_utils.h"
+#include "aos/events/ping_lib.h"
+#include "aos/events/pong_lib.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/testing/tmpdir.h"
+
+namespace aos {
+namespace logger {
+namespace testing {
+
+using aos::testing::ArtifactPath;
+
+LoggerState MakeLoggerState(NodeEventLoopFactory *node,
+                            SimulatedEventLoopFactory *factory,
+                            CompressionParams params,
+                            const Configuration *configuration) {
+  if (configuration == nullptr) {
+    configuration = factory->configuration();
+  }
+  return {node->MakeEventLoop("logger"),
+          {},
+          configuration,
+          configuration::GetNode(configuration, node->node()),
+          nullptr,
+          params};
+}
+
+void LoggerState::StartLogger(std::string logfile_base) {
+  CHECK(!logfile_base.empty());
+
+  logger = std::make_unique<Logger>(event_loop.get(), configuration);
+  logger->set_polling_period(std::chrono::milliseconds(100));
+  logger->set_name(
+      absl::StrCat("name_prefix_", event_loop->node()->name()->str()));
+  logger->set_logger_sha1(
+      absl::StrCat("logger_sha1_", event_loop->node()->name()->str()));
+  logger->set_logger_version(
+      absl::StrCat("logger_version_", event_loop->node()->name()->str()));
+  event_loop->OnRun([this, logfile_base]() {
+    std::unique_ptr<MultiNodeLogNamer> namer =
+        std::make_unique<MultiNodeLogNamer>(logfile_base, configuration,
+                                            event_loop.get(), node);
+    namer->set_extension(params.extension);
+    namer->set_encoder_factory(params.encoder_factory);
+    log_namer = namer.get();
+
+    logger->StartLogging(std::move(namer));
+  });
+}
+
+void LoggerState::AppendAllFilenames(std::vector<std::string> *filenames) {
+  for (const std::string &file : log_namer->all_filenames()) {
+    const std::string_view separator =
+        log_namer->base_name().back() == '/' ? "" : "_";
+    filenames->emplace_back(
+        absl::StrCat(log_namer->base_name(), separator, file));
+  }
+}
+
+LoggerState::~LoggerState() {
+  if (logger) {
+    std::vector<std::string> filenames;
+    AppendAllFilenames(&filenames);
+    std::sort(filenames.begin(), filenames.end());
+    for (const std::string &file : filenames) {
+      LOG(INFO) << "Wrote to " << file;
+      auto x = ReadHeader(file);
+      if (x) {
+        VLOG(1) << aos::FlatbufferToJson(x.value());
+      }
+    }
+  }
+}
+
+MultinodeLoggerTest::MultinodeLoggerTest()
+    : config_(aos::configuration::ReadConfig(ArtifactPath(absl::StrCat(
+          "aos/events/logging/", std::get<0>(GetParam()).config)))),
+      time_converter_(configuration::NodesCount(&config_.message())),
+      event_loop_factory_(&config_.message()),
+      pi1_(event_loop_factory_.GetNodeEventLoopFactory("pi1")),
+      pi1_index_(configuration::GetNodeIndex(
+          event_loop_factory_.configuration(), pi1_->node())),
+      pi2_(event_loop_factory_.GetNodeEventLoopFactory("pi2")),
+      pi2_index_(configuration::GetNodeIndex(
+          event_loop_factory_.configuration(), pi2_->node())),
+      tmp_dir_(aos::testing::TestTmpDir()),
+      logfile_base1_(tmp_dir_ + "/multi_logfile1"),
+      logfile_base2_(tmp_dir_ + "/multi_logfile2"),
+      pi1_reboot_logfiles_(MakePi1RebootLogfiles()),
+      logfiles_(MakeLogFiles(logfile_base1_, logfile_base2_)),
+      pi1_single_direction_logfiles_(MakePi1SingleDirectionLogfiles()),
+      structured_logfiles_(StructureLogFiles()) {
+  LOG(INFO) << "Config " << std::get<0>(GetParam()).config;
+  event_loop_factory_.SetTimeConverter(&time_converter_);
+
+  // Go through and remove the logfiles if they already exist.
+  for (const auto &file : logfiles_) {
+    unlink(file.c_str());
+    unlink((file + ".xz").c_str());
+  }
+
+  for (const auto &file : MakeLogFiles(tmp_dir_ + "/relogged1",
+                                       tmp_dir_ + "/relogged2", 3, 3, true)) {
+    unlink(file.c_str());
+  }
+
+  for (const auto &file : pi1_reboot_logfiles_) {
+    unlink(file.c_str());
+  }
+
+  LOG(INFO) << "Logging data to " << logfiles_[0] << ", " << logfiles_[1]
+            << " and " << logfiles_[2];
+
+  pi1_->OnStartup([this]() { pi1_->AlwaysStart<Ping>("ping"); });
+  pi2_->OnStartup([this]() { pi2_->AlwaysStart<Pong>("pong"); });
+}
+
+bool MultinodeLoggerTest::shared() const {
+  return std::get<0>(GetParam()).shared;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakeLogFiles(
+    std::string logfile_base1, std::string logfile_base2, size_t pi1_data_count,
+    size_t pi2_data_count, bool relogged_config) {
+  std::string_view sha256 = relogged_config
+                                ? std::get<0>(GetParam()).relogged_sha256
+                                : std::get<0>(GetParam()).sha256;
+  std::vector<std::string> result;
+  result.emplace_back(absl::StrCat(logfile_base1, "_", sha256, Extension()));
+  result.emplace_back(absl::StrCat(logfile_base2, "_", sha256, Extension()));
+  for (size_t i = 0; i < pi1_data_count; ++i) {
+    result.emplace_back(
+        absl::StrCat(logfile_base1, "_pi1_data.part", i, Extension()));
+  }
+  result.emplace_back(logfile_base1 + "_pi2_data/test/aos.examples.Pong.part0" +
+                      Extension());
+  result.emplace_back(logfile_base1 + "_pi2_data/test/aos.examples.Pong.part1" +
+                      Extension());
+  for (size_t i = 0; i < pi2_data_count; ++i) {
+    result.emplace_back(
+        absl::StrCat(logfile_base2, "_pi2_data.part", i, Extension()));
+  }
+  result.emplace_back(logfile_base2 +
+                      "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part0" +
+                      Extension());
+  result.emplace_back(logfile_base2 +
+                      "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part1" +
+                      Extension());
+  result.emplace_back(logfile_base1 +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
+                      Extension());
+  result.emplace_back(logfile_base1 +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
+                      Extension());
+  if (shared()) {
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/"
+                        "aos.message_bridge.RemoteMessage.part2" +
+                        Extension());
+    result.emplace_back(logfile_base2 +
+                        "_timestamps/pi2/aos/remote_timestamps/pi1/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base2 +
+                        "_timestamps/pi2/aos/remote_timestamps/pi1/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+  } else {
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+    result.emplace_back(logfile_base2 +
+                        "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base2 +
+                        "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+                        "aos-examples-Ping/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base1 +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+                        "aos-examples-Ping/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+  }
+
+  return result;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakePi1RebootLogfiles() {
+  std::vector<std::string> result;
+  result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+  result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
+  result.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
+  result.emplace_back(logfile_base1_ + "_pi1_data.part3" + Extension());
+  result.emplace_back(logfile_base1_ + "_pi1_data.part4" + Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/test/aos.examples.Pong.part0" + Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/test/aos.examples.Pong.part1" + Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/test/aos.examples.Pong.part2" + Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/test/aos.examples.Pong.part3" + Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
+                      Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
+                      Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part2" +
+                      Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part3" +
+                      Extension());
+  result.emplace_back(absl::StrCat(
+      logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+  if (shared()) {
+    for (size_t i = 0; i < 6; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base1_,
+                       "_timestamps/pi1/aos/remote_timestamps/pi2/"
+                       "aos.message_bridge.RemoteMessage.part",
+                       i, Extension()));
+    }
+  } else {
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part2" +
+                        Extension());
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+                        "aos-message_bridge-Timestamp/"
+                        "aos.message_bridge.RemoteMessage.part3" +
+                        Extension());
+
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+                        "aos-examples-Ping/"
+                        "aos.message_bridge.RemoteMessage.part0" +
+                        Extension());
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+                        "aos-examples-Ping/"
+                        "aos.message_bridge.RemoteMessage.part1" +
+                        Extension());
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+                        "aos-examples-Ping/"
+                        "aos.message_bridge.RemoteMessage.part2" +
+                        Extension());
+    result.emplace_back(logfile_base1_ +
+                        "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+                        "aos-examples-Ping/"
+                        "aos.message_bridge.RemoteMessage.part3" +
+                        Extension());
+  }
+  return result;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakePi1SingleDirectionLogfiles() {
+  std::vector<std::string> result;
+  result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+  result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
+  result.emplace_back(logfile_base1_ +
+                      "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
+                      Extension());
+  result.emplace_back(absl::StrCat(
+      logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+  return result;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakePi1DeadNodeLogfiles() {
+  std::vector<std::string> result;
+  result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+  result.emplace_back(absl::StrCat(
+      logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+  return result;
+}
+
+std::vector<std::vector<std::string>> MultinodeLoggerTest::StructureLogFiles() {
+  std::vector<std::vector<std::string>> result{
+      std::vector<std::string>{logfiles_[2], logfiles_[3], logfiles_[4]},
+      std::vector<std::string>{logfiles_[5], logfiles_[6]},
+      std::vector<std::string>{logfiles_[7], logfiles_[8], logfiles_[9]},
+      std::vector<std::string>{logfiles_[10], logfiles_[11]},
+      std::vector<std::string>{logfiles_[12], logfiles_[13]}};
+
+  if (shared()) {
+    result.emplace_back(
+        std::vector<std::string>{logfiles_[14], logfiles_[15], logfiles_[16]});
+    result.emplace_back(std::vector<std::string>{logfiles_[17], logfiles_[18]});
+  } else {
+    result.emplace_back(std::vector<std::string>{logfiles_[14], logfiles_[15]});
+    result.emplace_back(std::vector<std::string>{logfiles_[16], logfiles_[17]});
+    result.emplace_back(std::vector<std::string>{logfiles_[18], logfiles_[19]});
+  }
+
+  return result;
+}
+
+std::string MultinodeLoggerTest::Extension() {
+  return absl::StrCat(".bfbs", std::get<1>(GetParam()).extension);
+}
+
+LoggerState MultinodeLoggerTest::MakeLogger(
+    NodeEventLoopFactory *node, SimulatedEventLoopFactory *factory,
+    const Configuration *configuration) {
+  if (factory == nullptr) {
+    factory = &event_loop_factory_;
+  }
+  return MakeLoggerState(node, factory, std::get<1>(GetParam()), configuration);
+}
+
+void MultinodeLoggerTest::StartLogger(LoggerState *logger,
+                                      std::string logfile_base) {
+  if (logfile_base.empty()) {
+    if (logger->event_loop->node()->name()->string_view() == "pi1") {
+      logfile_base = logfile_base1_;
+    } else {
+      logfile_base = logfile_base2_;
+    }
+  }
+  logger->StartLogger(logfile_base);
+}
+
+void MultinodeLoggerTest::VerifyParts(
+    const std::vector<LogFile> &sorted_parts,
+    const std::vector<std::string> &corrupted_parts) {
+  EXPECT_EQ(sorted_parts.size(), 2u);
+
+  // Count up the number of UUIDs and make sure they are what we expect as a
+  // sanity check.
+  std::set<std::string> log_event_uuids;
+  std::set<std::string> parts_uuids;
+  std::set<std::string> both_uuids;
+
+  size_t missing_rt_count = 0;
+
+  std::vector<std::string> logger_nodes;
+  for (const LogFile &log_file : sorted_parts) {
+    EXPECT_FALSE(log_file.log_event_uuid.empty());
+    log_event_uuids.insert(log_file.log_event_uuid);
+    logger_nodes.emplace_back(log_file.logger_node);
+    both_uuids.insert(log_file.log_event_uuid);
+    EXPECT_TRUE(log_file.config);
+    EXPECT_EQ(log_file.name,
+              absl::StrCat("name_prefix_", log_file.logger_node));
+    EXPECT_EQ(log_file.logger_sha1,
+              absl::StrCat("logger_sha1_", log_file.logger_node));
+    EXPECT_EQ(log_file.logger_version,
+              absl::StrCat("logger_version_", log_file.logger_node));
+
+    for (const LogParts &part : log_file.parts) {
+      EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
+          << ": " << part;
+      missing_rt_count +=
+          part.realtime_start_time == aos::realtime_clock::min_time;
+
+      EXPECT_TRUE(log_event_uuids.find(part.log_event_uuid) !=
+                  log_event_uuids.end());
+      EXPECT_NE(part.node, "");
+      EXPECT_TRUE(log_file.config);
+      parts_uuids.insert(part.parts_uuid);
+      both_uuids.insert(part.parts_uuid);
+    }
+  }
+
+  // We won't have RT timestamps for 5 or 6 log files.  We don't log the RT
+  // start time on remote nodes because we don't know it and would be
+  // guessing.  And the log reader can actually do a better job.  The number
+  // depends on if we have the remote timestamps split across 2 files, or just
+  // across 1, depending on if we are using a split or combined timestamp
+  // channel config.
+  EXPECT_EQ(missing_rt_count, shared() ? 5u : 6u);
+
+  EXPECT_EQ(log_event_uuids.size(), 2u);
+  EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
+  EXPECT_EQ(log_event_uuids.size() + parts_uuids.size(), both_uuids.size());
+
+  // Test that each list of parts is in order.  Don't worry about the ordering
+  // between part file lists though.
+  // (inner vectors all need to be in order, but outer one doesn't matter).
+  ASSERT_THAT(ToLogReaderVector(sorted_parts),
+              ::testing::UnorderedElementsAreArray(structured_logfiles_));
+
+  EXPECT_THAT(logger_nodes, ::testing::UnorderedElementsAre("pi1", "pi2"));
+
+  EXPECT_NE(sorted_parts[0].realtime_start_time, aos::realtime_clock::min_time);
+  EXPECT_NE(sorted_parts[1].realtime_start_time, aos::realtime_clock::min_time);
+
+  EXPECT_NE(sorted_parts[0].monotonic_start_time,
+            aos::monotonic_clock::min_time);
+  EXPECT_NE(sorted_parts[1].monotonic_start_time,
+            aos::monotonic_clock::min_time);
+
+  EXPECT_THAT(sorted_parts[0].corrupted, ::testing::Eq(corrupted_parts));
+  EXPECT_THAT(sorted_parts[1].corrupted, ::testing::Eq(corrupted_parts));
+}
+
+void MultinodeLoggerTest::AddExtension(std::string_view extension) {
+  std::transform(logfiles_.begin(), logfiles_.end(), logfiles_.begin(),
+                 [extension](const std::string &in) {
+                   return absl::StrCat(in, extension);
+                 });
+
+  std::transform(structured_logfiles_.begin(), structured_logfiles_.end(),
+                 structured_logfiles_.begin(),
+                 [extension](std::vector<std::string> in) {
+                   std::transform(in.begin(), in.end(), in.begin(),
+                                  [extension](const std::string &in_str) {
+                                    return absl::StrCat(in_str, extension);
+                                  });
+                   return in;
+                 });
+}
+
+std::vector<std::vector<std::string>> ToLogReaderVector(
+    const std::vector<LogFile> &log_files) {
+  std::vector<std::vector<std::string>> result;
+  for (const LogFile &log_file : log_files) {
+    for (const LogParts &log_parts : log_file.parts) {
+      std::vector<std::string> parts;
+      for (const std::string &part : log_parts.parts) {
+        parts.emplace_back(part);
+      }
+      result.emplace_back(std::move(parts));
+    }
+  }
+  return result;
+}
+
+std::vector<CompressionParams> SupportedCompressionAlgorithms() {
+  return {{"",
+           [](size_t max_message_size) {
+             return std::make_unique<DummyEncoder>(max_message_size);
+           }},
+          {SnappyDecoder::kExtension,
+           [](size_t max_message_size) {
+             return std::make_unique<SnappyEncoder>(max_message_size, 32768);
+           }},
+#ifdef LZMA
+          {LzmaDecoder::kExtension,
+           [](size_t max_message_size) {
+             return std::make_unique<LzmaEncoder>(max_message_size, 3);
+           }}
+#endif  // LZMA
+  };
+}
+
+std::ostream &operator<<(std::ostream &ostream,
+                         const CompressionParams &params) {
+  ostream << "\"" << params.extension << "\"";
+  return ostream;
+}
+
+std::ostream &operator<<(std::ostream &ostream, const ConfigParams &params) {
+  ostream << "{config: \"" << params.config << "\", shared: " << params.shared
+          << ", sha256: \"" << params.sha256 << "\", relogged_sha256: \""
+          << params.relogged_sha256 << "\"}";
+  return ostream;
+}
+
+std::vector<std::pair<std::vector<realtime_clock::time_point>,
+                      std::vector<realtime_clock::time_point>>>
+ConfirmReadable(const std::vector<std::string> &files,
+                realtime_clock::time_point start_time,
+                realtime_clock::time_point end_time) {
+  {
+    LogReader reader(SortParts(files));
+
+    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+    reader.Register(&log_reader_factory);
+
+    log_reader_factory.Run();
+
+    reader.Deregister();
+  }
+  {
+    std::vector<std::pair<std::vector<realtime_clock::time_point>,
+                          std::vector<realtime_clock::time_point>>>
+        result;
+    LogReader reader(SortParts(files));
+
+    reader.SetStartTime(start_time);
+    reader.SetEndTime(end_time);
+
+    SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+    reader.RegisterWithoutStarting(&log_reader_factory);
+    result.resize(
+        configuration::NodesCount(log_reader_factory.configuration()));
+    if (configuration::MultiNode(log_reader_factory.configuration())) {
+      size_t i = 0;
+      for (const aos::Node *node :
+           *log_reader_factory.configuration()->nodes()) {
+        LOG(INFO) << "Registering start";
+        reader.OnStart(node, [node, &log_reader_factory, &result,
+                              node_index = i]() {
+          LOG(INFO) << "Starting " << node->name()->string_view();
+          result[node_index].first.push_back(
+              log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
+        });
+        reader.OnEnd(node, [node, &log_reader_factory, &result,
+                            node_index = i]() {
+          LOG(INFO) << "Ending " << node->name()->string_view();
+          result[node_index].second.push_back(
+              log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
+        });
+        ++i;
+      }
+    } else {
+      reader.OnStart([&log_reader_factory, &result]() {
+        LOG(INFO) << "Starting";
+        result[0].first.push_back(
+            log_reader_factory.GetNodeEventLoopFactory(nullptr)
+                ->realtime_now());
+      });
+      reader.OnEnd([&log_reader_factory, &result]() {
+        LOG(INFO) << "Ending";
+        result[0].second.push_back(
+            log_reader_factory.GetNodeEventLoopFactory(nullptr)
+                ->realtime_now());
+      });
+    }
+
+    log_reader_factory.Run();
+
+    reader.Deregister();
+
+    for (auto x : result) {
+      for (auto y : x.first) {
+        VLOG(1) << "Start " << y;
+      }
+      for (auto y : x.second) {
+        VLOG(1) << "End " << y;
+      }
+    }
+    return result;
+  }
+}
+
+// Counts the number of messages on a channel.  Returns (channel name, channel
+// type, count) for every message matching matcher()
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
+    std::shared_ptr<const aos::Configuration> config, std::string_view filename,
+    std::function<bool(const UnpackedMessageHeader *)> matcher) {
+  MessageReader message_reader(filename);
+  std::vector<int> counts(config->channels()->size(), 0);
+
+  while (true) {
+    std::shared_ptr<UnpackedMessageHeader> msg = message_reader.ReadMessage();
+    if (!msg) {
+      break;
+    }
+
+    if (matcher(msg.get())) {
+      counts[msg->channel_index]++;
+    }
+  }
+
+  std::vector<std::tuple<std::string, std::string, int>> result;
+  int channel = 0;
+  for (size_t i = 0; i < counts.size(); ++i) {
+    if (counts[i] != 0) {
+      const Channel *channel = config->channels()->Get(i);
+      result.push_back(std::make_tuple(channel->name()->str(),
+                                       channel->type()->str(), counts[i]));
+    }
+    ++channel;
+  }
+
+  return result;
+}
+
+// Counts the number of messages (channel, count) for all data messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
+    std::shared_ptr<const aos::Configuration> config,
+    std::string_view filename) {
+  return CountChannelsMatching(
+      config, filename, [](const UnpackedMessageHeader *msg) {
+        if (msg->span.data() != nullptr) {
+          CHECK(!msg->monotonic_remote_time.has_value());
+          CHECK(!msg->realtime_remote_time.has_value());
+          CHECK(!msg->remote_queue_index.has_value());
+          return true;
+        }
+        return false;
+      });
+}
+
+// Counts the number of messages (channel, count) for all timestamp messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
+    std::shared_ptr<const aos::Configuration> config,
+    std::string_view filename) {
+  return CountChannelsMatching(
+      config, filename, [](const UnpackedMessageHeader *msg) {
+        if (msg->span.data() == nullptr) {
+          CHECK(msg->monotonic_remote_time.has_value());
+          CHECK(msg->realtime_remote_time.has_value());
+          CHECK(msg->remote_queue_index.has_value());
+          return true;
+        }
+        return false;
+      });
+}
+
+}  // namespace testing
+}  // namespace logger
+}  // namespace aos
diff --git a/aos/events/logging/multinode_logger_test_lib.h b/aos/events/logging/multinode_logger_test_lib.h
new file mode 100644
index 0000000..d3754e4
--- /dev/null
+++ b/aos/events/logging/multinode_logger_test_lib.h
@@ -0,0 +1,148 @@
+#ifndef AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
+#define AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
+
+#include "absl/strings/str_format.h"
+#include "aos/events/event_loop.h"
+#include "aos/events/logging/log_writer.h"
+#include "aos/events/logging/snappy_encoder.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/network/testing_time_converter.h"
+#include "aos/testing/path.h"
+#include "aos/util/file.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+
+#ifdef LZMA
+#include "aos/events/logging/lzma_encoder.h"
+#endif
+
+namespace aos {
+namespace logger {
+namespace testing {
+
+struct CompressionParams {
+  std::string_view extension;
+  std::function<std::unique_ptr<DataEncoder>(size_t max_message_size)>
+      encoder_factory;
+};
+
+// Parameters to run all the tests with.
+struct ConfigParams {
+  // The config file to use.
+  std::string config;
+  // If true, the RemoteMessage channel should be shared between all the remote
+  // channels.  If false, there will be 1 RemoteMessage channel per remote
+  // channel.
+  bool shared;
+  // sha256 of the config.
+  std::string_view sha256;
+  // sha256 of the relogged config
+  std::string_view relogged_sha256;
+};
+
+struct LoggerState {
+  void StartLogger(std::string logfile_base);
+
+  std::unique_ptr<EventLoop> event_loop;
+  std::unique_ptr<Logger> logger;
+  const Configuration *configuration;
+  const Node *node;
+  MultiNodeLogNamer *log_namer;
+  CompressionParams params;
+
+  void AppendAllFilenames(std::vector<std::string> *filenames);
+
+  ~LoggerState();
+};
+
+LoggerState MakeLoggerState(NodeEventLoopFactory *node,
+                            SimulatedEventLoopFactory *factory,
+                            CompressionParams params,
+                            const Configuration *configuration = nullptr);
+std::vector<std::vector<std::string>> ToLogReaderVector(
+    const std::vector<LogFile> &log_files);
+std::vector<CompressionParams> SupportedCompressionAlgorithms();
+std::ostream &operator<<(std::ostream &ostream,
+                         const CompressionParams &params);
+std::ostream &operator<<(std::ostream &ostream, const ConfigParams &params);
+std::vector<std::pair<std::vector<realtime_clock::time_point>,
+                      std::vector<realtime_clock::time_point>>>
+ConfirmReadable(
+    const std::vector<std::string> &files,
+    realtime_clock::time_point start_time = realtime_clock::min_time,
+    realtime_clock::time_point end_time = realtime_clock::max_time);
+// Counts the number of messages on a channel.  Returns (channel name, channel
+// type, count) for every message matching matcher()
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
+    std::shared_ptr<const aos::Configuration> config, std::string_view filename,
+    std::function<bool(const UnpackedMessageHeader *)> matcher);
+// Counts the number of messages (channel, count) for all data messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
+    std::shared_ptr<const aos::Configuration> config,
+    std::string_view filename);
+// Counts the number of messages (channel, count) for all timestamp messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
+    std::shared_ptr<const aos::Configuration> config,
+    std::string_view filename);
+
+class MultinodeLoggerTest : public ::testing::TestWithParam<
+                                std::tuple<ConfigParams, CompressionParams>> {
+ public:
+  MultinodeLoggerTest();
+
+  bool shared() const;
+
+  std::vector<std::string> MakeLogFiles(std::string logfile_base1,
+                                        std::string logfile_base2,
+                                        size_t pi1_data_count = 3,
+                                        size_t pi2_data_count = 3,
+                                        bool relogged_config = false);
+
+  std::vector<std::string> MakePi1RebootLogfiles();
+
+  std::vector<std::string> MakePi1SingleDirectionLogfiles();
+
+  std::vector<std::string> MakePi1DeadNodeLogfiles();
+
+  std::vector<std::vector<std::string>> StructureLogFiles();
+
+  std::string Extension();
+
+  LoggerState MakeLogger(NodeEventLoopFactory *node,
+                         SimulatedEventLoopFactory *factory = nullptr,
+                         const Configuration *configuration = nullptr);
+
+  void StartLogger(LoggerState *logger, std::string logfile_base = "");
+
+  void VerifyParts(const std::vector<LogFile> &sorted_parts,
+                   const std::vector<std::string> &corrupted_parts = {});
+
+  void AddExtension(std::string_view extension);
+
+  // Config and factory.
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
+  message_bridge::TestingTimeConverter time_converter_;
+  SimulatedEventLoopFactory event_loop_factory_;
+
+  NodeEventLoopFactory *const pi1_;
+  const size_t pi1_index_;
+  NodeEventLoopFactory *const pi2_;
+  const size_t pi2_index_;
+
+  std::string tmp_dir_;
+  std::string logfile_base1_;
+  std::string logfile_base2_;
+  std::vector<std::string> pi1_reboot_logfiles_;
+  std::vector<std::string> logfiles_;
+  std::vector<std::string> pi1_single_direction_logfiles_;
+
+  std::vector<std::vector<std::string>> structured_logfiles_;
+};
+
+typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
+
+}  // namespace testing
+}  // namespace logger
+}  // namespace aos
+
+#endif  //  AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
diff --git a/aos/libc/aos_strsignal_test.cc b/aos/libc/aos_strsignal_test.cc
index 51389af..d854aab 100644
--- a/aos/libc/aos_strsignal_test.cc
+++ b/aos/libc/aos_strsignal_test.cc
@@ -26,13 +26,15 @@
   }
 };
 
+// msan doesn't seem to like strsignal().
+#if !__has_feature(memory_sanitizer)
 // Tests that all the signals give the same result as strsignal(3).
 TEST(StrsignalTest, All) {
   // Sigh, strsignal allocates a buffer that uses pthread local storage.  This
   // interacts poorly with asan.  Spawning a thread causes the storage to get
   // cleaned up before asan checks.
   SignalNameTester t;
-#ifdef AOS_SANITIZER_thread
+#if defined(AOS_SANITIZER_thread)
   // tsan doesn't like this usage of ::std::thread. It looks like
   // <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57507>.
   t();
@@ -41,6 +43,7 @@
   thread.join();
 #endif
 }
+#endif
 
 }  // namespace testing
 }  // namespace libc
diff --git a/aos/network/BUILD b/aos/network/BUILD
index f05a75e..88df785 100644
--- a/aos/network/BUILD
+++ b/aos/network/BUILD
@@ -65,11 +65,19 @@
     name = "message_bridge_client_fbs",
     srcs = ["message_bridge_client.fbs"],
     gen_reflections = 1,
-    includes = [
-        ":message_bridge_server_fbs_includes",
-        "//aos:configuration_fbs_includes",
-    ],
     target_compatible_with = ["@platforms//os:linux"],
+    deps = [
+        ":message_bridge_server_fbs",
+        "//aos:configuration_fbs",
+    ],
+)
+
+flatbuffer_ts_library(
+    name = "message_bridge_client_ts_fbs",
+    srcs = ["message_bridge_client.fbs"],
+    deps = [
+        ":message_bridge_server_ts_fbs",
+    ],
 )
 
 cc_static_flatbuffer(
@@ -83,10 +91,18 @@
     name = "message_bridge_server_fbs",
     srcs = ["message_bridge_server.fbs"],
     gen_reflections = 1,
-    includes = [
-        "//aos:configuration_fbs_includes",
-    ],
     target_compatible_with = ["@platforms//os:linux"],
+    deps = [
+        "//aos:configuration_fbs",
+    ],
+)
+
+flatbuffer_ts_library(
+    name = "message_bridge_server_ts_fbs",
+    srcs = ["message_bridge_server.fbs"],
+    deps = [
+        "//aos:configuration_ts_fbs",
+    ],
 )
 
 cc_static_flatbuffer(
diff --git a/aos/network/sctp_lib.cc b/aos/network/sctp_lib.cc
index 53a28cb..9632d3c 100644
--- a/aos/network/sctp_lib.cc
+++ b/aos/network/sctp_lib.cc
@@ -56,6 +56,7 @@
 struct sockaddr_storage ResolveSocket(std::string_view host, int port,
                                       bool use_ipv6) {
   struct sockaddr_storage result;
+  memset(&result, 0, sizeof(result));
   struct addrinfo *addrinfo_result;
   struct sockaddr_in *t_addr = (struct sockaddr_in *)&result;
   struct sockaddr_in6 *t_addr6 = (struct sockaddr_in6 *)&result;
@@ -282,6 +283,7 @@
 
   // Use the assoc_id for the destination instead of the msg_name.
   struct msghdr outmsg;
+  memset(&outmsg, 0, sizeof(outmsg));
   if (sockaddr_remote) {
     outmsg.msg_name = &*sockaddr_remote;
     outmsg.msg_namelen = sizeof(*sockaddr_remote);
@@ -338,8 +340,13 @@
   CHECK(fd_ != -1);
 
   while (true) {
-    aos::unique_c_ptr<Message> result(
-        reinterpret_cast<Message *>(malloc(sizeof(Message) + max_size_ + 1)));
+    constexpr size_t kMessageAlign = alignof(Message);
+    const size_t max_message_size =
+        ((sizeof(Message) + max_size_ + 1 + (kMessageAlign - 1)) /
+         kMessageAlign) *
+        kMessageAlign;
+    aos::unique_c_ptr<Message> result(reinterpret_cast<Message *>(
+        aligned_alloc(kMessageAlign, max_message_size)));
 
     struct msghdr inmessage;
     memset(&inmessage, 0, sizeof(struct msghdr));
@@ -489,6 +496,7 @@
 
   // Use the assoc_id for the destination instead of the msg_name.
   struct msghdr outmsg;
+  memset(&outmsg, 0, sizeof(outmsg));
   outmsg.msg_namelen = 0;
 
   outmsg.msg_iovlen = 0;
diff --git a/aos/network/timestamp_channel.cc b/aos/network/timestamp_channel.cc
index f8f525f..52032f5 100644
--- a/aos/network/timestamp_channel.cc
+++ b/aos/network/timestamp_channel.cc
@@ -5,6 +5,10 @@
 DEFINE_bool(combined_timestamp_channel_fallback, true,
             "If true, fall back to using the combined timestamp channel if the "
             "single timestamp channel doesn't exist for a timestamp.");
+DEFINE_bool(check_timestamp_channel_frequencies, true,
+            "If true, include a debug CHECK to ensure that remote timestamp "
+            "channels are configured to have at least as great a frequency as "
+            "the corresponding data channel.");
 
 namespace aos {
 namespace message_bridge {
@@ -109,12 +113,14 @@
 
   const Channel *timestamp_channel = finder.ForChannel(channel, connection);
 
-  // Sanity-check that the timestamp channel can actually support full-rate
-  // messages coming through on the source channel.
-  CHECK_GE(timestamp_channel->frequency(), channel->frequency())
-      << ": Timestamp channel "
-      << configuration::StrippedChannelToString(timestamp_channel)
-      << "'s rate is lower than the source channel.";
+  if (FLAGS_check_timestamp_channel_frequencies) {
+    // Sanity-check that the timestamp channel can actually support full-rate
+    // messages coming through on the source channel.
+    CHECK_GE(timestamp_channel->frequency(), channel->frequency())
+        << ": Timestamp channel "
+        << configuration::StrippedChannelToString(timestamp_channel)
+        << "'s rate is lower than the source channel.";
+  }
 
   {
     auto it = timestamp_loggers_.find(timestamp_channel);
diff --git a/aos/starter/BUILD b/aos/starter/BUILD
index 8a30408..9068caa 100644
--- a/aos/starter/BUILD
+++ b/aos/starter/BUILD
@@ -247,3 +247,16 @@
         "//aos/testing:googletest",
     ],
 )
+
+cc_library(
+    name = "mock_starter",
+    srcs = ["mock_starter.cc"],
+    hdrs = ["mock_starter.h"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//aos/events:simulated_event_loop",
+        "//aos/starter:starter_fbs",
+        "//aos/starter:starter_rpc_fbs",
+        "//aos/starter:starter_rpc_lib",
+    ],
+)
diff --git a/aos/starter/irq_affinity.cc b/aos/starter/irq_affinity.cc
index 08cd5a6..7b74fe1 100644
--- a/aos/starter/irq_affinity.cc
+++ b/aos/starter/irq_affinity.cc
@@ -145,31 +145,10 @@
           &irq_affinity_config)
       : top_(event_loop) {
     if (irq_affinity_config.message().has_kthreads()) {
-      kthreads_.reserve(irq_affinity_config.message().kthreads()->size());
-      for (const starter::KthreadConfig *kthread_config :
-           *irq_affinity_config.message().kthreads()) {
-        LOG(INFO) << "Kthread " << aos::FlatbufferToJson(kthread_config);
-        CHECK(kthread_config->has_name()) << ": Name required";
-        const size_t star_position =
-            kthread_config->name()->string_view().find('*');
-        const bool has_star = star_position != std::string_view::npos;
-
-        kthreads_.push_back(ParsedKThreadConfig{
-            .full_match = !has_star,
-            .prefix = std::string(
-                !has_star ? kthread_config->name()->string_view()
-                          : kthread_config->name()->string_view().substr(
-                                0, star_position)),
-            .postfix = std::string(
-                !has_star ? ""
-                          : kthread_config->name()->string_view().substr(
-                                star_position + 1)),
-            .scheduler = kthread_config->scheduler(),
-            .priority = kthread_config->priority(),
-            .nice = kthread_config->nice(),
-            .affinity = AffinityFromFlatbuffer(kthread_config->affinity()),
-        });
-      }
+      PopulateThreads(irq_affinity_config.message().kthreads(), &kthreads_);
+    }
+    if (irq_affinity_config.message().has_threads()) {
+      PopulateThreads(irq_affinity_config.message().threads(), &threads_);
     }
 
     if (irq_affinity_config.message().has_irqs()) {
@@ -196,6 +175,13 @@
               break;
             }
           }
+        } else {
+          for (const ParsedKThreadConfig &match : threads_) {
+            if (match.Matches(reading.second.name)) {
+              match.ConfigurePid(reading.first);
+              break;
+            }
+          }
         }
       }
 
@@ -220,6 +206,36 @@
   }
 
  private:
+  void PopulateThreads(
+      const flatbuffers::Vector<flatbuffers::Offset<starter::KthreadConfig>>
+          *threads_config,
+      std::vector<ParsedKThreadConfig> *threads) {
+    threads->reserve(threads_config->size());
+    for (const starter::KthreadConfig *kthread_config : *threads_config) {
+      LOG(INFO) << "Kthread " << aos::FlatbufferToJson(kthread_config);
+      CHECK(kthread_config->has_name()) << ": Name required";
+      const size_t star_position =
+          kthread_config->name()->string_view().find('*');
+      const bool has_star = star_position != std::string_view::npos;
+
+      threads->push_back(ParsedKThreadConfig{
+          .full_match = !has_star,
+          .prefix = std::string(
+              !has_star ? kthread_config->name()->string_view()
+                        : kthread_config->name()->string_view().substr(
+                              0, star_position)),
+          .postfix = std::string(
+              !has_star ? ""
+                        : kthread_config->name()->string_view().substr(
+                              star_position + 1)),
+          .scheduler = kthread_config->scheduler(),
+          .priority = kthread_config->priority(),
+          .nice = kthread_config->nice(),
+          .affinity = AffinityFromFlatbuffer(kthread_config->affinity()),
+      });
+    }
+  }
+
   util::Top top_;
 
   // TODO(austin): Publish message with everything in it.
@@ -227,6 +243,7 @@
   // posterity.
 
   std::vector<ParsedKThreadConfig> kthreads_;
+  std::vector<ParsedKThreadConfig> threads_;
   std::vector<ParsedIrqConfig> irqs_;
 
   InterruptsStatus interrupts_status_;
diff --git a/aos/starter/kthread.fbs b/aos/starter/kthread.fbs
index 69f2b0e..c07fce9 100644
--- a/aos/starter/kthread.fbs
+++ b/aos/starter/kthread.fbs
@@ -21,7 +21,10 @@
 
 table IrqAffinityConfig {
   irqs: [IrqConfig] (id: 0);
+  // Kernel threads.
   kthreads: [KthreadConfig] (id: 1);
+  // Normal threads.
+  threads: [KthreadConfig] (id: 2);
 }
 
 root_type IrqAffinityConfig;
diff --git a/aos/starter/mock_starter.cc b/aos/starter/mock_starter.cc
new file mode 100644
index 0000000..1e06758
--- /dev/null
+++ b/aos/starter/mock_starter.cc
@@ -0,0 +1,123 @@
+#include "aos/starter/mock_starter.h"
+
+namespace aos {
+namespace starter {
+
+MockStarter::MockStarter(aos::EventLoop *event_loop)
+    : event_loop_(event_loop),
+      status_sender_(event_loop_->MakeSender<aos::starter::Status>("/aos")) {
+  aos::TimerHandler *send_timer =
+      event_loop_->AddTimer([this]() { SendStatus(); });
+
+  CHECK(aos::configuration::MultiNode(event_loop_->configuration()));
+
+  for (const aos::Node *node :
+       aos::configuration::GetNodes(event_loop_->configuration())) {
+    const aos::Channel *channel = aos::starter::StarterRpcChannelForNode(
+        event_loop_->configuration(), node);
+    if (aos::configuration::ChannelIsReadableOnNode(channel,
+                                                    event_loop_->node())) {
+      std::string_view channel_name = channel->name()->string_view();
+      event_loop_->MakeWatcher(
+          channel_name, [this](const aos::starter::StarterRpc &command) {
+            for (const flatbuffers::String *node : *command.nodes()) {
+              if (node->string_view() ==
+                  event_loop_->node()->name()->string_view()) {
+                CHECK(statuses_.count(command.name()->str()) > 0)
+                    << "Unable to find " << command.name()->string_view()
+                    << " in our list of applications.";
+                ApplicationStatus &status = statuses_[command.name()->str()];
+                switch (command.command()) {
+                  case aos::starter::Command::START:
+                    if (!status.running) {
+                      VLOG(1) << "Starting " << command.name()->string_view()
+                              << " at " << event_loop_->monotonic_now();
+                      status.running = true;
+                      status.start_time = event_loop_->monotonic_now();
+                      status.id = next_id_++;
+                    }
+                    break;
+                  case aos::starter::Command::STOP:
+                    if (status.running) {
+                      VLOG(1) << "Stopping " << command.name()->string_view()
+                              << " at " << event_loop_->monotonic_now();
+                    }
+                    status.running = false;
+                    break;
+                  case aos::starter::Command::RESTART:
+                    status.running = true;
+                    VLOG(1) << "Restarting " << command.name()->string_view()
+                            << " at " << event_loop_->monotonic_now();
+                    status.start_time = event_loop_->monotonic_now();
+                    status.id = next_id_++;
+                }
+                SendStatus();
+              }
+            }
+          });
+    }
+  }
+
+  event_loop_->OnRun([this, send_timer]() {
+    send_timer->Setup(event_loop_->monotonic_now(), std::chrono::seconds(1));
+
+    for (const aos::Application *application :
+         *event_loop_->configuration()->applications()) {
+      if (aos::configuration::ApplicationShouldStart(
+              event_loop_->configuration(), event_loop_->node(), application)) {
+        statuses_[application->name()->str()] = ApplicationStatus{
+            next_id_++, application->autostart(), event_loop_->monotonic_now()};
+      }
+    }
+  });
+}
+
+void MockStarter::SendStatus() {
+  aos::Sender<aos::starter::Status>::Builder builder =
+      status_sender_.MakeBuilder();
+  std::vector<flatbuffers::Offset<aos::starter::ApplicationStatus>>
+      status_offsets;
+  for (const std::pair<const std::string, ApplicationStatus> &pair :
+       statuses_) {
+    const flatbuffers::Offset<flatbuffers::String> name_offset =
+        builder.fbb()->CreateString(pair.first);
+    aos::starter::ApplicationStatus::Builder status_builder =
+        builder.MakeBuilder<aos::starter::ApplicationStatus>();
+    status_builder.add_name(name_offset);
+    status_builder.add_state(pair.second.running
+                                 ? aos::starter::State::RUNNING
+                                 : aos::starter::State::STOPPED);
+    status_builder.add_last_exit_code(0);
+    status_builder.add_id(pair.second.id);
+    status_builder.add_last_stop_reason(
+        aos::starter::LastStopReason::STOP_REQUESTED);
+    status_builder.add_last_start_time(
+        pair.second.start_time.time_since_epoch().count());
+    if (pair.second.running) {
+      status_builder.add_pid(pair.second.id);
+    }
+    status_offsets.push_back(status_builder.Finish());
+  }
+  const flatbuffers::Offset<
+      flatbuffers::Vector<flatbuffers::Offset<aos::starter::ApplicationStatus>>>
+      statuses_offset = builder.fbb()->CreateVector(status_offsets);
+  aos::starter::Status::Builder status_builder =
+      builder.MakeBuilder<aos::starter::Status>();
+  status_builder.add_statuses(statuses_offset);
+  builder.CheckOk(builder.Send(status_builder.Finish()));
+}
+
+MockStarters::MockStarters(aos::SimulatedEventLoopFactory *event_loop_factory) {
+  CHECK(aos::configuration::MultiNode(event_loop_factory->configuration()));
+  for (const aos::Node *node :
+       aos::configuration::GetNodes(event_loop_factory->configuration())) {
+    event_loops_.emplace_back(
+        event_loop_factory->GetNodeEventLoopFactory(node)->MakeEventLoop(
+            "starterd"));
+    mock_starters_.emplace_back(
+        std::make_unique<MockStarter>(event_loops_.back().get()));
+  }
+}
+
+}  // namespace starter
+}  // namespace aos
diff --git a/aos/starter/mock_starter.h b/aos/starter/mock_starter.h
new file mode 100644
index 0000000..a0c1b76
--- /dev/null
+++ b/aos/starter/mock_starter.h
@@ -0,0 +1,54 @@
+#include <map>
+
+#include "aos/events/event_loop.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/starter/starter_generated.h"
+#include "aos/starter/starter_rpc_generated.h"
+#include "aos/starter/starterd_lib.h"
+
+namespace aos {
+namespace starter {
+
+// Simple mock of starterd that updates the starter status message to act as
+// though applications are started and stopped when requested.
+// TODO(james.kuszmaul): Consider integrating with SimulatedEventLoopFactory.
+class MockStarter {
+ public:
+  struct ApplicationStatus {
+    int id;
+    bool running;
+    aos::monotonic_clock::time_point start_time;
+  };
+
+  MockStarter(aos::EventLoop *event_loop);
+
+  const aos::Node *node() const { return event_loop_->node(); }
+
+  const std::map<std::string, ApplicationStatus> &statuses() const {
+    return statuses_;
+  }
+
+ private:
+  void SendStatus();
+
+  aos::EventLoop *event_loop_;
+  aos::Sender<aos::starter::Status> status_sender_;
+  std::map<std::string, ApplicationStatus> statuses_;
+  int next_id_ = 0;
+};
+
+// Spins up MockStarter's for each node.
+class MockStarters {
+ public:
+  MockStarters(aos::SimulatedEventLoopFactory *event_loop_factory);
+  const std::vector<std::unique_ptr<MockStarter>> &starters() const {
+    return mock_starters_;
+  }
+
+ private:
+  std::vector<std::unique_ptr<aos::EventLoop>> event_loops_;
+  std::vector<std::unique_ptr<MockStarter>> mock_starters_;
+};
+
+}  // namespace starter
+}  // namespace aos
diff --git a/aos/starter/roborio_irq_config.json b/aos/starter/roborio_irq_config.json
index af9a315..7b1d536 100644
--- a/aos/starter/roborio_irq_config.json
+++ b/aos/starter/roborio_irq_config.json
@@ -30,5 +30,12 @@
       "scheduler": "SCHEDULER_OTHER",
       "nice": -20
     }
+  ],
+  "threads": [
+    {
+      "name": "FRC_NetCommDaem",
+      "scheduler": "SCHEDULER_FIFO",
+      "priority": 15
+    }
   ]
 }
diff --git a/aos/starter/starterd_lib.cc b/aos/starter/starterd_lib.cc
index 485d1f1..b8b7343 100644
--- a/aos/starter/starterd_lib.cc
+++ b/aos/starter/starterd_lib.cc
@@ -84,7 +84,8 @@
     if (aos::configuration::MultiNode(config_msg_)) {
       std::string_view current_node = event_loop_.node()->name()->string_view();
       for (const aos::Application *application : *applications) {
-        CHECK(application->has_nodes());
+        CHECK(application->has_nodes())
+            << ": Missing nodes on " << aos::FlatbufferToJson(application);
         for (const flatbuffers::String *node : *application->nodes()) {
           if (node->string_view() == current_node) {
             AddApplication(application);
diff --git a/aos/util/BUILD b/aos/util/BUILD
index 929b376..2f10a70 100644
--- a/aos/util/BUILD
+++ b/aos/util/BUILD
@@ -346,6 +346,7 @@
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
         "//aos/scoped:scoped_fd",
+        "@com_github_google_flatbuffers//:flatbuffers",
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/strings",
         "@com_google_absl//absl/types:span",
diff --git a/aos/util/file.cc b/aos/util/file.cc
index 6c35627..4e2d1cd 100644
--- a/aos/util/file.cc
+++ b/aos/util/file.cc
@@ -165,12 +165,46 @@
   return span;
 }
 
+FileReader::FileReader(std::string_view filename)
+    : file_(open(::std::string(filename).c_str(), O_RDONLY)) {
+  PCHECK(file_.get() != -1) << ": opening " << filename;
+}
+
+absl::Span<char> FileReader::ReadContents(absl::Span<char> buffer) {
+  PCHECK(0 == lseek(file_.get(), 0, SEEK_SET));
+  const ssize_t result = read(file_.get(), buffer.data(), buffer.size());
+  PCHECK(result >= 0);
+  return {buffer.data(), static_cast<size_t>(result)};
+}
+
 FileWriter::FileWriter(std::string_view filename, mode_t permissions)
     : file_(open(::std::string(filename).c_str(), O_WRONLY | O_CREAT | O_TRUNC,
                  permissions)) {
   PCHECK(file_.get() != -1) << ": opening " << filename;
 }
 
+// absl::SimpleAtoi doesn't interpret a leading 0x as hex, which we need here.
+// Instead, we use the flatbufers API, which unfortunately relies on NUL
+// termination.
+int32_t FileReader::ReadInt32() {
+  // Maximum characters for a 32-bit integer, +1 for the NUL.
+  // Hex is the same size with the leading 0x.
+  std::array<char, 11> buffer;
+  int32_t result;
+  const auto string_span =
+      ReadContents(absl::Span<char>(buffer.data(), buffer.size())
+                       .subspan(0, buffer.size() - 1));
+  // Verify we found the newline.
+  CHECK_EQ(buffer[string_span.size() - 1], '\n');
+  // Truncate the newline.
+  buffer[string_span.size() - 1] = '\0';
+  CHECK(flatbuffers::StringToNumber(buffer.data(), &result))
+      << ": Error parsing string to integer: "
+      << std::string_view(string_span.data(), string_span.size());
+
+  return result;
+}
+
 FileWriter::WriteResult FileWriter::WriteBytes(
     absl::Span<const uint8_t> bytes) {
   size_t size_written = 0;
diff --git a/aos/util/file.h b/aos/util/file.h
index 56479ce..ddd5d47 100644
--- a/aos/util/file.h
+++ b/aos/util/file.h
@@ -6,12 +6,14 @@
 #include <sys/types.h>
 
 #include <memory>
+#include <optional>
 #include <string>
 #include <string_view>
 
 #include "absl/strings/numbers.h"
 #include "absl/types/span.h"
 #include "aos/scoped/scoped_fd.h"
+#include "flatbuffers/util.h"
 #include "glog/logging.h"
 
 namespace aos {
@@ -48,43 +50,38 @@
 // Wrapper to handle reading the contents of a file into a buffer. Meant for
 // situations where the malloc'ing of ReadFileToStringOrDie is inappropriate,
 // but where you still want to read a file.
-template <int kBufferSize = 1024>
 class FileReader {
  public:
-  FileReader(std::string_view filename)
-      : file_(open(::std::string(filename).c_str(), O_RDONLY)) {
-    PCHECK(file_.get() != -1) << ": opening " << filename;
-    memset(buffer_, 0, kBufferSize);
-  }
+  FileReader(std::string_view filename);
   // Reads the entire contents of the file into the internal buffer and returns
   // a string_view of it.
   // Note: The result may not be null-terminated.
-  std::string_view ReadContents() {
-    PCHECK(0 == lseek(file_.get(), 0, SEEK_SET));
-    const ssize_t result = read(file_.get(), buffer_, sizeof(buffer_));
-    PCHECK(result >= 0);
-    return {buffer_, static_cast<size_t>(result)};
+  absl::Span<char> ReadContents(absl::Span<char> buffer);
+  // Returns the value of the file as a string, for a fixed-length file.
+  // Returns nullopt if the result is smaller than kSize. Ignores any
+  // bytes beyond kSize.
+  template <int kSize>
+  std::optional<std::array<char, kSize>> ReadString() {
+    std::array<char, kSize> result;
+    const absl::Span<char> used_span =
+        ReadContents(absl::Span<char>(result.data(), result.size()));
+    if (used_span.size() == kSize) {
+      return result;
+    } else {
+      return std::nullopt;
+    }
   }
-  // Calls ReadContents() and attempts to convert the result into an integer, or
-  // dies trying.
-  int ReadInt() {
-    int result;
-    std::string_view contents = ReadContents();
-    CHECK(absl::SimpleAtoi(contents, &result))
-        << "Failed to parse \"" << contents << "\" as int.";
-    return result;
-  }
+  // Returns the value of the file as an integer. Crashes if it doesn't fit in a
+  // 32-bit integer. The value may start with 0x for a hex value, otherwise it
+  // must be base 10.
+  int32_t ReadInt32();
 
  private:
   aos::ScopedFD file_;
-  char buffer_[kBufferSize];
 };
 
 // Simple interface to allow opening a file for writing and then writing it
 // without any malloc's.
-// TODO(james): It may make sense to add a ReadBytes() interface here that can
-// take a memory buffer to fill, to avoid the templating required by the
-// self-managed buffer of FileReader<>.
 class FileWriter {
  public:
   // The result of an individual call to WriteBytes().
diff --git a/aos/util/file_test.cc b/aos/util/file_test.cc
index 9b0def0..d4382c4 100644
--- a/aos/util/file_test.cc
+++ b/aos/util/file_test.cc
@@ -56,8 +56,20 @@
   FLAGS_die_on_malloc = true;
   RegisterMallocHook();
   aos::ScopedRealtime realtime;
-  EXPECT_EQ("123456789\n", reader.ReadContents());
-  EXPECT_EQ(123456789, reader.ReadInt());
+  {
+    std::array<char, 20> contents;
+    absl::Span<char> read_result =
+        reader.ReadContents({contents.data(), contents.size()});
+    EXPECT_EQ("123456789\n",
+              std::string_view(read_result.data(), read_result.size()));
+  }
+  {
+    std::optional<std::array<char, 10>> read_result = reader.ReadString<10>();
+    ASSERT_TRUE(read_result.has_value());
+    EXPECT_EQ("123456789\n",
+              std::string_view(read_result->data(), read_result->size()));
+  }
+  EXPECT_EQ(123456789, reader.ReadInt32());
 }
 
 // Tests that we can write to a file without malloc'ing.
diff --git a/frc971/analysis/BUILD b/frc971/analysis/BUILD
index 36c450a..1ebf1ba 100644
--- a/frc971/analysis/BUILD
+++ b/frc971/analysis/BUILD
@@ -58,6 +58,7 @@
         "//y2022/control_loops/superstructure:turret_plotter",
         "//y2022/localizer:localizer_plotter",
         "//y2022/vision:vision_plotter",
+        "//y2023/localizer:corrections_plotter",
     ],
 )
 
diff --git a/frc971/analysis/plot_index.ts b/frc971/analysis/plot_index.ts
index ab00b99..1b62e42 100644
--- a/frc971/analysis/plot_index.ts
+++ b/frc971/analysis/plot_index.ts
@@ -54,6 +54,8 @@
     '../../y2022/localizer/localizer_plotter'
 import {plotVision as plot2022Vision} from
     '../../y2022/vision/vision_plotter'
+import {plotVision as plot2023Corrections} from
+    '../../y2023/localizer/corrections_plotter'
 import {plotDemo} from '../../aos/network/www/demo_plot';
 
 const rootDiv = document.createElement('div');
@@ -112,6 +114,7 @@
   ['Spline Debug', new PlotState(plotDiv, plotSpline)],
   ['Down Estimator', new PlotState(plotDiv, plotDownEstimator)],
   ['Robot State', new PlotState(plotDiv, plotRobotState)],
+  ['2023 Vision', new PlotState(plotDiv, plot2023Corrections)],
   ['2020 Finisher', new PlotState(plotDiv, plot2020Finisher)],
   ['2020 Accelerator', new PlotState(plotDiv, plot2020Accelerator)],
   ['2020 Hood', new PlotState(plotDiv, plot2020Hood)],
diff --git a/frc971/autonomous/base_autonomous_actor.cc b/frc971/autonomous/base_autonomous_actor.cc
index ca6cc89..90c1454 100644
--- a/frc971/autonomous/base_autonomous_actor.cc
+++ b/frc971/autonomous/base_autonomous_actor.cc
@@ -177,6 +177,24 @@
   return false;
 }
 
+double BaseAutonomousActor::X() {
+  drivetrain_status_fetcher_.Fetch();
+  CHECK(drivetrain_status_fetcher_.get());
+  return drivetrain_status_fetcher_->x();
+}
+
+double BaseAutonomousActor::Y() {
+  drivetrain_status_fetcher_.Fetch();
+  CHECK(drivetrain_status_fetcher_.get());
+  return drivetrain_status_fetcher_->y();
+}
+
+double BaseAutonomousActor::Theta() {
+  drivetrain_status_fetcher_.Fetch();
+  CHECK(drivetrain_status_fetcher_.get());
+  return drivetrain_status_fetcher_->theta();
+}
+
 bool BaseAutonomousActor::WaitForAboveAngle(double angle) {
   ::aos::time::PhasedLoop phased_loop(frc971::controls::kLoopFrequency,
                                       event_loop()->monotonic_now(),
@@ -412,18 +430,26 @@
     //     when we reach the end of the spline).
     // (b) The spline that we are executing is the correct one.
     // (c) There is less than distance distance remaining.
+    if (base_autonomous_actor_->drivetrain_status_fetcher_->trajectory_logging()
+            ->goal_spline_handle() != spline_handle_) {
+      // Never done if we aren't the active spline.
+      return false;
+    }
+
+    if (base_autonomous_actor_->drivetrain_status_fetcher_->trajectory_logging()
+            ->is_executed()) {
+      return true;
+    }
     return base_autonomous_actor_->drivetrain_status_fetcher_
                ->trajectory_logging()
                ->is_executing() &&
            base_autonomous_actor_->drivetrain_status_fetcher_
                    ->trajectory_logging()
-                   ->goal_spline_handle() == spline_handle_ &&
-           base_autonomous_actor_->drivetrain_status_fetcher_
-                   ->trajectory_logging()
                    ->distance_remaining() < distance;
   }
   return false;
 }
+
 bool BaseAutonomousActor::SplineHandle::WaitForSplineDistanceRemaining(
     double distance) {
   ::aos::time::PhasedLoop phased_loop(
diff --git a/frc971/autonomous/base_autonomous_actor.h b/frc971/autonomous/base_autonomous_actor.h
index a70a440..5562dde 100644
--- a/frc971/autonomous/base_autonomous_actor.h
+++ b/frc971/autonomous/base_autonomous_actor.h
@@ -91,6 +91,11 @@
   // Returns true if the drive has finished.
   bool IsDriveDone();
 
+  // Returns the current x, y, theta of the robot on the field.
+  double X();
+  double Y();
+  double Theta();
+
   void LineFollowAtVelocity(
       double velocity,
       y2019::control_loops::drivetrain::SelectionHint hint =
diff --git a/frc971/control_loops/drivetrain/drivetrain_plotter.ts b/frc971/control_loops/drivetrain/drivetrain_plotter.ts
index 5610e97..a609532 100644
--- a/frc971/control_loops/drivetrain/drivetrain_plotter.ts
+++ b/frc971/control_loops/drivetrain/drivetrain_plotter.ts
@@ -18,6 +18,8 @@
       "frc971.control_loops.drivetrain.Position");
   const status = aosPlotter.addMessageSource(
       '/drivetrain', 'frc971.control_loops.drivetrain.Status');
+  const localizerOuput = aosPlotter.addMessageSource(
+      '/localizer', 'frc971.controls.LocalizerOutput');
   const output = aosPlotter.addMessageSource(
       '/drivetrain', 'frc971.control_loops.drivetrain.Output');
   const gyroReading = aosPlotter.addMessageSource(
@@ -279,11 +281,10 @@
   xPositionPlot.plot.getAxisLabels().setXLabel(TIME);
   xPositionPlot.plot.getAxisLabels().setYLabel('X Position (m)');
 
-  const localizerX = xPositionPlot.addMessageLine(status, ['x']);
-  localizerX.setColor(RED);
-  const splineX =
-      xPositionPlot.addMessageLine(status, ['trajectory_logging', 'x']);
-  splineX.setColor(GREEN);
+  xPositionPlot.addMessageLine(status, ['x']).setColor(RED);
+  xPositionPlot.addMessageLine(status, ['trajectory_logging', 'x'])
+      .setColor(GREEN);
+  xPositionPlot.addMessageLine(localizerOuput, ['x']).setColor(BLUE);
 
   // Absolute Y Position
   const yPositionPlot = aosPlotter.addPlot(element);
@@ -291,11 +292,10 @@
   yPositionPlot.plot.getAxisLabels().setXLabel(TIME);
   yPositionPlot.plot.getAxisLabels().setYLabel('Y Position (m)');
 
-  const localizerY = yPositionPlot.addMessageLine(status, ['y']);
-  localizerY.setColor(RED);
-  const splineY =
-      yPositionPlot.addMessageLine(status, ['trajectory_logging', 'y']);
-  splineY.setColor(GREEN);
+  yPositionPlot.addMessageLine(status, ['y']).setColor(RED);
+  yPositionPlot.addMessageLine(status, ['trajectory_logging', 'y'])
+      .setColor(GREEN);
+  yPositionPlot.addMessageLine(localizerOuput, ['y']).setColor(BLUE);
 
   // Gyro
   const gyroPlot = aosPlotter.addPlot(element);
diff --git a/frc971/control_loops/drivetrain/line_follow_drivetrain.cc b/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
index 101c0f7..ea20bd0 100644
--- a/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
+++ b/frc971/control_loops/drivetrain/line_follow_drivetrain.cc
@@ -165,6 +165,8 @@
     const ::Eigen::Matrix<double, 5, 1> &abs_state, double relative_y_offset,
     double velocity_sign) {
   // Calculates the goal angle for the drivetrain given our position.
+  // Note: The goal angle is in target-relative coordinates, since our entire
+  // control loop is written relative to the target.
   // The calculated goal will be such that a point piece_rad to one side of the
   // drivetrain (the side depends on where we approach from and SignedRadii())
   // will end up hitting the plane of the target exactly target_rad from the
diff --git a/frc971/control_loops/drivetrain/localization/puppet_localizer.cc b/frc971/control_loops/drivetrain/localization/puppet_localizer.cc
index 33095f2..77b9fbe 100644
--- a/frc971/control_loops/drivetrain/localization/puppet_localizer.cc
+++ b/frc971/control_loops/drivetrain/localization/puppet_localizer.cc
@@ -91,7 +91,7 @@
         static_cast<float>(localizer_output_fetcher_->y()),
         static_cast<float>(localizer_output_fetcher_->theta())};
     Eigen::Matrix3f R = Eigen::Matrix3f::Zero();
-    R.diagonal() << 0.01, 0.01, 1e-4;
+    R.diagonal() << 1e-4, 1e-4, 1e-6;
     const Input U_correct = ekf_.MostRecentInput();
     observations_.CorrectKnownH(Eigen::Vector3f::Zero(), &U_correct,
                                 Corrector(state_at_capture.value(), Z), R, now);
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.h b/frc971/control_loops/drivetrain/splinedrivetrain.h
index cd712bc..71e5ffe 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.h
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.h
@@ -22,7 +22,7 @@
 
 class SplineDrivetrain {
  public:
-  static constexpr size_t kMaxTrajectories = 5;
+  static constexpr size_t kMaxTrajectories = 6;
   SplineDrivetrain(const DrivetrainConfig<double> &dt_config);
 
   void SetGoal(const ::frc971::control_loops::drivetrain::Goal *goal);
diff --git a/frc971/control_loops/python/drawing_constants.py b/frc971/control_loops/python/drawing_constants.py
index 431abd7..523100e 100644
--- a/frc971/control_loops/python/drawing_constants.py
+++ b/frc971/control_loops/python/drawing_constants.py
@@ -44,7 +44,11 @@
     cr.stroke()
 
 
-def draw_control_points(cr, points, width=10, radius=4, color=palette["BLUE"]):
+def draw_control_points_cross(cr,
+                              points,
+                              width=10,
+                              radius=4,
+                              color=palette["BLUE"]):
     for i in range(0, len(points)):
         draw_px_x(cr, points[i][0], points[i][1], width, color)
         set_color(cr, color)
diff --git a/frc971/control_loops/python/multispline.py b/frc971/control_loops/python/multispline.py
index 5551587..830cfde 100644
--- a/frc971/control_loops/python/multispline.py
+++ b/frc971/control_loops/python/multispline.py
@@ -264,6 +264,8 @@
         best_multispline = None
 
         for multispline_index, multispline in enumerate(multisplines):
+            if not multispline.getLibsplines():
+                continue
             distance_spline = DistanceSpline(multispline.getLibsplines())
 
             # The optimizer finds local minima that often aren't what we want,
diff --git a/frc971/control_loops/python/path_edit.py b/frc971/control_loops/python/path_edit.py
index 86777e5..b5e8c1b 100755
--- a/frc971/control_loops/python/path_edit.py
+++ b/frc971/control_loops/python/path_edit.py
@@ -17,9 +17,10 @@
 from constants import FIELD
 from constants import get_json_folder
 from constants import ROBOT_SIDE_TO_BALL_CENTER, ROBOT_SIDE_TO_HATCH_PANEL, HATCH_PANEL_WIDTH, BALL_RADIUS
-from drawing_constants import set_color, draw_px_cross, draw_px_x, display_text, draw_control_points
+from drawing_constants import set_color, draw_px_cross, draw_px_x, display_text, draw_control_points_cross
 from multispline import Multispline, ControlPointIndex
 import time
+from pathlib import Path
 
 
 class Mode(enum.Enum):
@@ -54,6 +55,7 @@
         self.lasty = 0
         self.drag_start = None
         self.module_path = os.path.dirname(os.path.realpath(sys.argv[0]))
+        self.repository_root = Path(self.module_path, "../../..").resolve()
         self.path_to_export = os.path.join(self.module_path,
                                            'points_for_pathedit.json')
 
@@ -199,38 +201,14 @@
                     draw_px_x(cr, point[0], point[1], self.pxToM(2))
             if len(self.multisplines) != 0 and self.multisplines[0].getSplines(
             ):  #still in testing
+                self.draw_cursor(cr)
                 self.draw_splines(cr)
         elif self.mode == Mode.kEditing:
             if len(self.multisplines) != 0 and self.multisplines[0].getSplines(
             ):
+                self.draw_cursor(cr)
                 self.draw_splines(cr)
-
-                for i, points in enumerate(
-                        self.active_multispline.getSplines()):
-                    points = [np.array([x, y]) for (x, y) in points]
-                    draw_control_points(cr,
-                                        points,
-                                        width=self.pxToM(5),
-                                        radius=self.pxToM(2))
-
-                    p0, p1, p2, p3, p4, p5 = points
-                    first_tangent = p0 + 2.0 * (p1 - p0)
-                    second_tangent = p5 + 2.0 * (p4 - p5)
-                    cr.set_source_rgb(0, 0.5, 0)
-                    cr.move_to(*p0)
-                    cr.set_line_width(self.pxToM(1.0))
-                    cr.line_to(*first_tangent)
-                    cr.move_to(*first_tangent)
-                    cr.line_to(*p2)
-
-                    cr.move_to(*p5)
-                    cr.line_to(*second_tangent)
-
-                    cr.move_to(*second_tangent)
-                    cr.line_to(*p3)
-
-                    cr.stroke()
-                    cr.set_line_width(self.pxToM(2))
+                self.draw_control_points(cr)
 
         set_color(cr, palette["WHITE"])
         cr.paint_with_alpha(0.2)
@@ -238,7 +216,34 @@
         draw_px_cross(cr, self.mousex, self.mousey, self.pxToM(2))
         cr.restore()
 
-    def draw_splines(self, cr):
+    def draw_control_points(self, cr):
+        for i, points in enumerate(self.active_multispline.getSplines()):
+            points = [np.array([x, y]) for (x, y) in points]
+            draw_control_points_cross(cr,
+                                      points,
+                                      width=self.pxToM(5),
+                                      radius=self.pxToM(2))
+
+            p0, p1, p2, p3, p4, p5 = points
+            first_tangent = p0 + 2.0 * (p1 - p0)
+            second_tangent = p5 + 2.0 * (p4 - p5)
+            cr.set_source_rgb(0, 0.5, 0)
+            cr.move_to(*p0)
+            cr.set_line_width(self.pxToM(1.0))
+            cr.line_to(*first_tangent)
+            cr.move_to(*first_tangent)
+            cr.line_to(*p2)
+
+            cr.move_to(*p5)
+            cr.line_to(*second_tangent)
+
+            cr.move_to(*second_tangent)
+            cr.line_to(*p3)
+
+            cr.stroke()
+            cr.set_line_width(self.pxToM(2))
+
+    def draw_cursor(self, cr):
         mouse = np.array((self.mousex, self.mousey))
 
         multispline, result = Multispline.nearest_distance(
@@ -267,6 +272,7 @@
             multispline_index = self.multisplines.index(multispline)
             self.graph.place_cursor(multispline_index, distance=result.x[0])
 
+    def draw_splines(self, cr):
         for multispline in self.multisplines:
             for i, spline in enumerate(multispline.getLibsplines()):
                 alpha = 1 if multispline == self.active_multispline else 0.2
@@ -284,65 +290,62 @@
                 self.draw_robot_at_point(cr, spline, 1)
 
     def export_json(self, file_name):
-        self.path_to_export = os.path.join(
-            self.module_path,  # position of the python
-            "../../..",  # root of the repository
+        export_folder = Path(
+            self.repository_root,
             get_json_folder(self.field),  # path from the root
-            file_name  # selected file
         )
 
-        # Will export to json file
-        multisplines_object = [
-            multispline.toJsonObject() for multispline in self.multisplines
-        ]
-        print(multisplines_object)
-        with open(self.path_to_export, mode='w') as points_file:
-            json.dump(multisplines_object, points_file)
+        filename = Path(export_folder, file_name)
+
+        # strip suffix
+        filename = filename.with_suffix("")
+        print(file_name, filename)
+
+        print(f"Exporting {len(self.multisplines)} splines")
+        # Export each multispline to its own json file
+        for index, multispline in enumerate(self.multisplines):
+            file = filename.with_suffix(f".{index}.json")
+            print(f"  {file.relative_to(export_folder)}")
+            with open(file, mode='w') as points_file:
+                json.dump(multispline.toJsonObject(), points_file)
 
     def import_json(self, file_name):
-        self.path_to_export = os.path.join(
-            self.module_path,  # position of the python
-            "../../..",  # root of the repository
+        # Abort place mode
+        if self.mode is Mode.kPlacing and len(self.multisplines) > 0 and len(
+                self.multisplines[-1].getSplines()) == 0:
+            self.multisplines.pop()
+            self.mode = Mode.kEditing
+            self.queue_draw()
+
+        import_folder = Path(
+            self.repository_root,
             get_json_folder(self.field),  # path from the root
-            file_name  # selected file
         )
 
-        # import from json file
-        print("LOADING LOAD FROM " + file_name)  # Load takes a few seconds
-        with open(self.path_to_export) as points_file:
-            multisplines_object = json.load(points_file)
+        file_candidates = []
+
+        # try exact match first
+        filename = Path(import_folder, file_name)
+        if filename.exists():
+            file_candidates.append(filename)
+        else:
+            # look for other files with the same stem but different numbers
+            stripped_stem = Path(file_name).with_suffix('').stem
+            file_candidates = list(
+                import_folder.glob(f"{stripped_stem}.*.json"))
+            print([file.stem for file in file_candidates])
+            file_candidates.sort()
+
+        print(f"Found {len(file_candidates)} files")
+        for file in file_candidates:
+            print(f"  {file.relative_to(import_folder)}")
+
+            with open(file) as points_file:
+                self.multisplines.append(
+                    Multispline.fromJsonObject(json.load(points_file)))
 
         self.attempt_append_multisplines()
 
-        # TODO: Export multisplines in different files
-        if type(multisplines_object) is dict:
-            multisplines_object = [multisplines_object]
-        else:
-            self.multisplines = []
-
-        # if people messed with the spline json,
-        # it might not be the right length
-        # so give them a nice error message
-        for multispline_object in multisplines_object:
-            print(multispline_object)
-            try:  # try to salvage as many segments of the spline as possible
-                self.multisplines.append(
-                    Multispline.fromJsonObject(multispline_object))
-            except IndexError:
-                # check if they're both 6+5*(k-1) long
-                expected_length = 6 + 5 * (multispline_object["spline_count"] -
-                                           1)
-                x_len = len(multispline_object["spline_x"])
-                y_len = len(multispline_object["spline_x"])
-                if x_len is not expected_length:
-                    print(
-                        "Error: spline x values were not the expected length; expected {} got {}"
-                        .format(expected_length, x_len))
-                elif y_len is not expected_length:
-                    print(
-                        "Error: spline y values were not the expected length; expected {} got {}"
-                        .format(expected_length, y_len))
-
         print("SPLINES LOADED")
         self.mode = Mode.kEditing
         self.queue_draw()
@@ -361,9 +364,9 @@
         self.control_point_index = None
         #recalulate graph using new points
         self.graph.axis.clear()
-        self.graph.queue_draw()
-        #allow placing again
-        self.mode = Mode.kPlacing
+        self.graph.canvas.draw_idle()
+        #go back into viewing mode
+        self.mode = Mode.kViewing
         #redraw entire graph
         self.queue_draw()
 
diff --git a/frc971/control_loops/python/spline_graph.py b/frc971/control_loops/python/spline_graph.py
index ce5efe1..666710e 100755
--- a/frc971/control_loops/python/spline_graph.py
+++ b/frc971/control_loops/python/spline_graph.py
@@ -32,10 +32,12 @@
 
         if self.field.mode != Mode.kViewing:
             self.field.mode = Mode.kViewing
+            self.field.queue_draw()
             Gtk.Button.set_label(self.toggle_view, "Switch to Editing Mode")
 
         else:
             self.field.mode = self.field.previous_mode
+            self.field.queue_draw()
             if self.field.mode == Mode.kEditing:
                 Gtk.Button.set_label(self.toggle_view,
                                      "Switch to Viewing Mode")
diff --git a/frc971/rockpi/contents/etc/systemd/system/tmp.mount b/frc971/rockpi/contents/etc/systemd/system/tmp.mount
new file mode 100644
index 0000000..517dd49
--- /dev/null
+++ b/frc971/rockpi/contents/etc/systemd/system/tmp.mount
@@ -0,0 +1,28 @@
+#  SPDX-License-Identifier: LGPL-2.1-or-later
+#
+#  This file is part of systemd.
+#
+#  systemd is free software; you can redistribute it and/or modify it
+#  under the terms of the GNU Lesser General Public License as published by
+#  the Free Software Foundation; either version 2.1 of the License, or
+#  (at your option) any later version.
+
+[Unit]
+Description=Temporary Directory /tmp
+Documentation=https://systemd.io/TEMPORARY_DIRECTORIES
+Documentation=man:file-hierarchy(7)
+Documentation=https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
+ConditionPathIsSymbolicLink=!/tmp
+DefaultDependencies=no
+Conflicts=umount.target
+Before=local-fs.target umount.target
+After=swap.target
+
+[Mount]
+What=tmpfs
+Where=/tmp
+Type=tmpfs
+Options=mode=1777,nosuid,nodev
+
+[Install]
+WantedBy=local-fs.target
diff --git a/frc971/vision/BUILD b/frc971/vision/BUILD
index 936a441..f3ebecb 100644
--- a/frc971/vision/BUILD
+++ b/frc971/vision/BUILD
@@ -32,6 +32,13 @@
     visibility = ["//visibility:public"],
 )
 
+flatbuffer_ts_library(
+    name = "target_map_ts_fbs",
+    srcs = ["target_map.fbs"],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+)
+
 flatbuffer_py_library(
     name = "calibration_fbs_python",
     srcs = [
diff --git a/frc971/vision/target_map.fbs b/frc971/vision/target_map.fbs
index e635760..de79744 100644
--- a/frc971/vision/target_map.fbs
+++ b/frc971/vision/target_map.fbs
@@ -63,6 +63,13 @@
   // End-of-frame timestamp for the frame with tag detections.
   // (for use case 2.).
   monotonic_timestamp_ns:int64 (id: 2);
+
+  // Number of april tags rejected (cumulative) because
+  // of low decision margin (affected by lighting).
+  // We do the decision margin rejection in aprilrobotics
+  // so we don't have an excessive amount of random target
+  // detections (for use case 2).
+  rejections:uint64 (id: 3);
 }
 
 root_type TargetMap;
diff --git a/scouting/DriverRank/src/DriverRank.jl b/scouting/DriverRank/src/DriverRank.jl
index 39ac95e..e759fea 100755
--- a/scouting/DriverRank/src/DriverRank.jl
+++ b/scouting/DriverRank/src/DriverRank.jl
@@ -106,7 +106,8 @@
     input_csv::String,
     output_csv::String,
 )
-    df = DataFrame(CSV.File(input_csv))
+    # Force all team numbers to be parsed as strings.
+    df = DataFrame(CSV.File(input_csv, types=String))
 
     rank1 = "Rank 1 (best)"
     rank2 = "Rank 2"
diff --git a/scouting/db/db.go b/scouting/db/db.go
index 75309d9..3791596 100644
--- a/scouting/db/db.go
+++ b/scouting/db/db.go
@@ -7,6 +7,7 @@
 	"gorm.io/gorm"
 	"gorm.io/gorm/clause"
 	"gorm.io/gorm/logger"
+	"strconv"
 )
 
 type Database struct {
@@ -19,7 +20,7 @@
 	CompLevel        string `gorm:"primaryKey"`
 	Alliance         string `gorm:"primaryKey"` // "R" or "B"
 	AlliancePosition int32  `gorm:"primaryKey"` // 1, 2, or 3
-	TeamNumber       int32
+	TeamNumber       string
 }
 
 type Shift struct {
@@ -75,7 +76,9 @@
 	LowConesAuto, MiddleConesAuto, HighConesAuto, ConesDroppedAuto int32
 	LowCubes, MiddleCubes, HighCubes, CubesDropped                 int32
 	LowCones, MiddleCones, HighCones, ConesDropped                 int32
-	AvgCycle                                                       int32
+	AvgCycle                                                       int64
+	DockedAuto, EngagedAuto                                        bool
+	Docked, Engaged                                                bool
 	// The username of the person who collected these statistics.
 	// "unknown" if submitted without logging in.
 	// Empty if the stats have not yet been collected.
@@ -195,7 +198,7 @@
 }
 
 func (database *Database) AddToStats(s Stats) error {
-	matches, err := database.queryMatches(s.TeamNumber)
+	matches, err := database.queryMatches(strconv.Itoa(int(s.TeamNumber)))
 	if err != nil {
 		return err
 	}
@@ -329,13 +332,22 @@
 	return stats2023, result.Error
 }
 
+func (database *Database) ReturnStats2023ForTeam(teamNumber string, matchNumber int32, setNumber int32, compLevel string) ([]Stats2023, error) {
+	var stats2023 []Stats2023
+	result := database.
+		Where("team_number = ? AND match_number = ? AND set_number = ? AND comp_level = ?",
+			teamNumber, matchNumber, setNumber, compLevel).
+		Find(&stats2023)
+	return stats2023, result.Error
+}
+
 func (database *Database) ReturnRankings() ([]Ranking, error) {
 	var rankins []Ranking
 	result := database.Find(&rankins)
 	return rankins, result.Error
 }
 
-func (database *Database) queryMatches(teamNumber_ int32) ([]TeamMatch, error) {
+func (database *Database) queryMatches(teamNumber_ string) ([]TeamMatch, error) {
 	var matches []TeamMatch
 	result := database.
 		Where("team_number = $1", teamNumber_).
diff --git a/scouting/db/db_test.go b/scouting/db/db_test.go
index b5e9a38..72c5f86 100644
--- a/scouting/db/db_test.go
+++ b/scouting/db/db_test.go
@@ -6,7 +6,6 @@
 	"os"
 	"os/exec"
 	"reflect"
-	"strconv"
 	"strings"
 	"testing"
 	"time"
@@ -75,27 +74,27 @@
 	correct := []TeamMatch{
 		TeamMatch{
 			MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 9999,
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "9999",
 		},
 		TeamMatch{
 			MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 1000,
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "1000",
 		},
 		TeamMatch{
 			MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 777,
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "777",
 		},
 		TeamMatch{
 			MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 0000,
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "0000",
 		},
 		TeamMatch{
 			MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 4321,
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "4321",
 		},
 		TeamMatch{
 			MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 1234,
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "1234",
 		},
 	}
 
@@ -202,17 +201,17 @@
 	}
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 1236},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "1236"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 1001},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "1001"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 777},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "777"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 1000},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "1000"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 4321},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "4321"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 1234},
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "1234"},
 	}
 
 	for _, match := range matches {
@@ -246,7 +245,8 @@
 			ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 2,
 			HighCubes: 1, CubesDropped: 0, LowCones: 0,
 			MiddleCones: 2, HighCones: 1, ConesDropped: 1,
-			AvgCycle: 0, CollectedBy: "emma",
+			AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "emma",
 		},
 		Stats2023{
 			TeamNumber: "7454", MatchNumber: 3, SetNumber: 1,
@@ -256,7 +256,8 @@
 			ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 0,
 			HighCubes: 0, CubesDropped: 1, LowCones: 0,
 			MiddleCones: 0, HighCones: 1, ConesDropped: 0,
-			AvgCycle: 0, CollectedBy: "tyler",
+			AvgCycle: 0, DockedAuto: false, EngagedAuto: false,
+			Docked: true, Engaged: true, CollectedBy: "tyler",
 		},
 		Stats2023{
 			TeamNumber: "4354", MatchNumber: 3, SetNumber: 1,
@@ -266,7 +267,8 @@
 			ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
 			HighCubes: 2, CubesDropped: 1, LowCones: 1,
 			MiddleCones: 1, HighCones: 0, ConesDropped: 1,
-			AvgCycle: 0, CollectedBy: "isaac",
+			AvgCycle: 0, DockedAuto: false, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "isaac",
 		},
 		Stats2023{
 			TeamNumber: "6533", MatchNumber: 3, SetNumber: 1,
@@ -276,7 +278,8 @@
 			ConesDroppedAuto: 0, LowCubes: 0, MiddleCubes: 1,
 			HighCubes: 2, CubesDropped: 1, LowCones: 0,
 			MiddleCones: 1, HighCones: 0, ConesDropped: 0,
-			AvgCycle: 0, CollectedBy: "will",
+			AvgCycle: 0, DockedAuto: true, EngagedAuto: true,
+			Docked: false, Engaged: false, CollectedBy: "will",
 		},
 		Stats2023{
 			TeamNumber: "8354", MatchNumber: 3, SetNumber: 1,
@@ -286,21 +289,22 @@
 			ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 0,
 			HighCubes: 0, CubesDropped: 2, LowCones: 1,
 			MiddleCones: 1, HighCones: 0, ConesDropped: 1,
-			AvgCycle: 0, CollectedBy: "unkown",
+			AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+			Docked: true, Engaged: false, CollectedBy: "unkown",
 		},
 	}
 
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 6344},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "6344"},
 		TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 7454},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "7454"},
 		TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 4354},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "4354"},
 		TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 6533},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "6533"},
 		TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 8354},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "8354"},
 	}
 
 	for _, match := range matches {
@@ -321,6 +325,84 @@
 	}
 }
 
+func TestQueryingStats2023ByTeam(t *testing.T) {
+	fixture := createDatabase(t)
+	defer fixture.TearDown()
+
+	stats := []Stats2023{
+		Stats2023{
+			TeamNumber: "6344", MatchNumber: 3, SetNumber: 1,
+			CompLevel: "qm", StartingQuadrant: 1, LowCubesAuto: 0,
+			MiddleCubesAuto: 1, HighCubesAuto: 0, CubesDroppedAuto: 1,
+			LowConesAuto: 1, MiddleConesAuto: 0, HighConesAuto: 2,
+			ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 2,
+			HighCubes: 1, CubesDropped: 0, LowCones: 0,
+			MiddleCones: 2, HighCones: 1, ConesDropped: 1,
+			AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "emma",
+		},
+		Stats2023{
+			TeamNumber: "7454", MatchNumber: 4, SetNumber: 1,
+			CompLevel: "qm", StartingQuadrant: 2, LowCubesAuto: 1,
+			MiddleCubesAuto: 2, HighCubesAuto: 2, CubesDroppedAuto: 0,
+			LowConesAuto: 2, MiddleConesAuto: 0, HighConesAuto: 0,
+			ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 0,
+			HighCubes: 0, CubesDropped: 1, LowCones: 0,
+			MiddleCones: 0, HighCones: 1, ConesDropped: 0,
+			AvgCycle: 0, DockedAuto: true, EngagedAuto: true,
+			Docked: false, Engaged: false, CollectedBy: "tyler",
+		},
+		Stats2023{
+			TeamNumber: "6344", MatchNumber: 5, SetNumber: 1,
+			CompLevel: "qm", StartingQuadrant: 1, LowCubesAuto: 0,
+			MiddleCubesAuto: 1, HighCubesAuto: 0, CubesDroppedAuto: 1,
+			LowConesAuto: 1, MiddleConesAuto: 0, HighConesAuto: 2,
+			ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 2,
+			HighCubes: 1, CubesDropped: 0, LowCones: 0,
+			MiddleCones: 2, HighCones: 1, ConesDropped: 1,
+			AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+			Docked: true, Engaged: false, CollectedBy: "emma",
+		},
+	}
+
+	matches := []TeamMatch{
+		TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "6344"},
+		TeamMatch{MatchNumber: 4, SetNumber: 1, CompLevel: "qm",
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "7454"},
+		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "qm",
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "6344"},
+	}
+
+	for _, match := range matches {
+		err := fixture.db.AddToMatch(match)
+		check(t, err, "Failed to add match")
+	}
+
+	for i := range stats {
+		err := fixture.db.AddToStats2023(stats[i])
+		check(t, err, "Failed to add 2023stats to DB")
+	}
+
+	// Validate that requesting status for a single team gets us the
+	// expected data.
+	statsFor6344, err := fixture.db.ReturnStats2023ForTeam("6344", 3, 1, "qm")
+	check(t, err, "Failed ReturnStats2023()")
+
+	if !reflect.DeepEqual([]Stats2023{stats[0]}, statsFor6344) {
+		t.Errorf("Got %#v,\nbut expected %#v.", statsFor6344, stats[0])
+	}
+
+	// Validate that requesting team data for a non-existent match returns
+	// nothing.
+	statsForMissing, err := fixture.db.ReturnStats2023ForTeam("6344", 9, 1, "qm")
+	check(t, err, "Failed ReturnStats2023()")
+
+	if !reflect.DeepEqual([]Stats2023{}, statsForMissing) {
+		t.Errorf("Got %#v,\nbut expected %#v.", statsForMissing, []Stats2023{})
+	}
+}
+
 func TestAddDuplicateStats(t *testing.T) {
 	fixture := createDatabase(t)
 	defer fixture.TearDown()
@@ -338,17 +420,17 @@
 
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 1236},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "1236"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 1001},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "1001"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 777},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "777"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 1000},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "1000"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 4321},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "4321"},
 		TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 1234},
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "1234"},
 	}
 
 	for _, match := range matches {
@@ -383,7 +465,8 @@
 			ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 1,
 			HighCubes: 2, CubesDropped: 1, LowCones: 1,
 			MiddleCones: 0, HighCones: 1, ConesDropped: 2,
-			AvgCycle: 58, CollectedBy: "unknown",
+			AvgCycle: 58, DockedAuto: false, EngagedAuto: false,
+			Docked: true, Engaged: true, CollectedBy: "unknown",
 		},
 		Stats2023{
 			TeamNumber: "2314", MatchNumber: 5, SetNumber: 1,
@@ -393,7 +476,8 @@
 			ConesDroppedAuto: 0, LowCubes: 2, MiddleCubes: 0,
 			HighCubes: 1, CubesDropped: 0, LowCones: 0,
 			MiddleCones: 2, HighCones: 1, ConesDropped: 0,
-			AvgCycle: 34, CollectedBy: "simon",
+			AvgCycle: 34, DockedAuto: true, EngagedAuto: true,
+			Docked: true, Engaged: false, CollectedBy: "simon",
 		},
 		Stats2023{
 			TeamNumber: "3242", MatchNumber: 5, SetNumber: 1,
@@ -403,7 +487,8 @@
 			ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 2,
 			HighCubes: 0, CubesDropped: 0, LowCones: 2,
 			MiddleCones: 0, HighCones: 1, ConesDropped: 1,
-			AvgCycle: 50, CollectedBy: "eliza",
+			AvgCycle: 50, DockedAuto: false, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "eliza",
 		},
 		Stats2023{
 			TeamNumber: "1742", MatchNumber: 5, SetNumber: 1,
@@ -413,7 +498,8 @@
 			ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 1,
 			HighCubes: 2, CubesDropped: 1, LowCones: 0,
 			MiddleCones: 2, HighCones: 1, ConesDropped: 1,
-			AvgCycle: 49, CollectedBy: "isaac",
+			AvgCycle: 49, DockedAuto: true, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "isaac",
 		},
 		Stats2023{
 			TeamNumber: "2454", MatchNumber: 5, SetNumber: 1,
@@ -423,7 +509,8 @@
 			ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 2,
 			HighCubes: 0, CubesDropped: 0, LowCones: 1,
 			MiddleCones: 1, HighCones: 1, ConesDropped: 0,
-			AvgCycle: 70, CollectedBy: "sam",
+			AvgCycle: 70, DockedAuto: true, EngagedAuto: true,
+			Docked: false, Engaged: false, CollectedBy: "sam",
 		},
 	}
 
@@ -436,7 +523,8 @@
 			ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 2,
 			HighCubes: 0, CubesDropped: 0, LowCones: 2,
 			MiddleCones: 0, HighCones: 1, ConesDropped: 1,
-			AvgCycle: 50, CollectedBy: "eliza",
+			AvgCycle: 50, DockedAuto: false, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "eliza",
 		},
 		Stats2023{
 			TeamNumber: "2454", MatchNumber: 5, SetNumber: 1,
@@ -446,31 +534,32 @@
 			ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 2,
 			HighCubes: 0, CubesDropped: 0, LowCones: 1,
 			MiddleCones: 1, HighCones: 1, ConesDropped: 0,
-			AvgCycle: 70, CollectedBy: "sam",
+			AvgCycle: 70, DockedAuto: true, EngagedAuto: true,
+			Docked: false, Engaged: false, CollectedBy: "sam",
 		},
 	}
 
 	originalMatches := []TeamMatch{
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 1111},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "1111"},
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 2314},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "2314"},
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 1742},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "1742"},
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 2454},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "2454"},
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 3242},
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "3242"},
 	}
 
 	// Matches for which we want to delete the stats.
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			TeamNumber: 1111},
+			TeamNumber: "1111"},
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			TeamNumber: 2314},
+			TeamNumber: "2314"},
 		TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
-			TeamNumber: 1742},
+			TeamNumber: "1742"},
 	}
 
 	for _, match := range originalMatches {
@@ -485,7 +574,7 @@
 	}
 
 	for _, match := range matches {
-		err := fixture.db.DeleteFromStats(match.CompLevel, match.MatchNumber, match.SetNumber, strconv.Itoa(int(match.TeamNumber)))
+		err := fixture.db.DeleteFromStats(match.CompLevel, match.MatchNumber, match.SetNumber, match.TeamNumber)
 		check(t, err, "Failed to delete stat")
 	}
 
@@ -588,17 +677,17 @@
 
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 1235},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "1235"},
 		TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 1234},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "1234"},
 		TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 1233},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "1233"},
 		TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 1232},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "1232"},
 		TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 1231},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "1231"},
 		TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 1239},
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "1239"},
 	}
 
 	for _, match := range matches {
@@ -684,15 +773,15 @@
 
 	correct := []TeamMatch{
 		TeamMatch{
-			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 1, TeamNumber: 6835},
+			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 1, TeamNumber: "6835"},
 		TeamMatch{
-			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: 4834},
+			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: "4834"},
 		TeamMatch{
-			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 9824},
+			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "9824"},
 		TeamMatch{
-			MatchNumber: 7, SetNumber: 2, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: 3732},
+			MatchNumber: 7, SetNumber: 2, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: "3732"},
 		TeamMatch{
-			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: 3732},
+			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: "3732"},
 	}
 
 	for i := 0; i < len(correct); i++ {
@@ -714,11 +803,11 @@
 
 	testDatabase := []TeamMatch{
 		TeamMatch{
-			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 4464},
+			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "4464"},
 		TeamMatch{
-			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: 2352},
+			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: "2352"},
 		TeamMatch{
-			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 6321},
+			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "6321"},
 	}
 
 	for i := 0; i < len(testDatabase); i++ {
@@ -728,9 +817,9 @@
 
 	correct := []TeamMatch{
 		TeamMatch{
-			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: 2352},
+			MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: "2352"},
 		TeamMatch{
-			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 6321},
+			MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "6321"},
 	}
 
 	got, err := fixture.db.ReturnMatches()
@@ -865,17 +954,17 @@
 
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 1235},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "1235"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 1236},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "1236"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 1237},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "1237"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 1238},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "1238"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 1239},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "1239"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 1233},
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "1233"},
 	}
 
 	for _, match := range matches {
@@ -909,7 +998,8 @@
 			ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 2,
 			HighCubes: 1, CubesDropped: 0, LowCones: 2,
 			MiddleCones: 0, HighCones: 2, ConesDropped: 1,
-			AvgCycle: 51, CollectedBy: "isaac",
+			AvgCycle: 51, DockedAuto: true, EngagedAuto: true,
+			Docked: false, Engaged: false, CollectedBy: "isaac",
 		},
 		Stats2023{
 			TeamNumber: "5443", MatchNumber: 2, SetNumber: 1,
@@ -919,7 +1009,8 @@
 			ConesDroppedAuto: 0, LowCubes: 2, MiddleCubes: 2,
 			HighCubes: 1, CubesDropped: 0, LowCones: 1,
 			MiddleCones: 0, HighCones: 2, ConesDropped: 1,
-			AvgCycle: 39, CollectedBy: "jack",
+			AvgCycle: 39, DockedAuto: false, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "jack",
 		},
 		Stats2023{
 			TeamNumber: "5436", MatchNumber: 2, SetNumber: 1,
@@ -929,7 +1020,8 @@
 			ConesDroppedAuto: 1, LowCubes: 2, MiddleCubes: 2,
 			HighCubes: 0, CubesDropped: 0, LowCones: 1,
 			MiddleCones: 2, HighCones: 1, ConesDropped: 1,
-			AvgCycle: 45, CollectedBy: "martin",
+			AvgCycle: 45, DockedAuto: true, EngagedAuto: false,
+			Docked: false, Engaged: false, CollectedBy: "martin",
 		},
 		Stats2023{
 			TeamNumber: "5643", MatchNumber: 2, SetNumber: 1,
@@ -939,19 +1031,20 @@
 			ConesDroppedAuto: 1, LowCubes: 2, MiddleCubes: 2,
 			HighCubes: 0, CubesDropped: 0, LowCones: 2,
 			MiddleCones: 2, HighCones: 1, ConesDropped: 1,
-			AvgCycle: 34, CollectedBy: "unknown",
+			AvgCycle: 34, DockedAuto: true, EngagedAuto: false,
+			Docked: true, Engaged: false, CollectedBy: "unknown",
 		},
 	}
 
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 2343},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "2343"},
 		TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 5443},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "5443"},
 		TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 5436},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "5436"},
 		TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 5643},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "5643"},
 	}
 
 	for _, match := range matches {
@@ -1004,17 +1097,17 @@
 
 	matches := []TeamMatch{
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 1, TeamNumber: 1235},
+			Alliance: "R", AlliancePosition: 1, TeamNumber: "1235"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 2, TeamNumber: 1236},
+			Alliance: "R", AlliancePosition: 2, TeamNumber: "1236"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "R", AlliancePosition: 3, TeamNumber: 1237},
+			Alliance: "R", AlliancePosition: 3, TeamNumber: "1237"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 1, TeamNumber: 1238},
+			Alliance: "B", AlliancePosition: 1, TeamNumber: "1238"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 2, TeamNumber: 1239},
+			Alliance: "B", AlliancePosition: 2, TeamNumber: "1239"},
 		TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
-			Alliance: "B", AlliancePosition: 3, TeamNumber: 1233},
+			Alliance: "B", AlliancePosition: 3, TeamNumber: "1233"},
 	}
 
 	for _, match := range matches {
diff --git a/scouting/scraping/scrape.go b/scouting/scraping/scrape.go
index 9cb2336..c6aa6f4 100644
--- a/scouting/scraping/scrape.go
+++ b/scouting/scraping/scrape.go
@@ -78,7 +78,7 @@
 
 	defer resp.Body.Close()
 	if resp.StatusCode != 200 {
-		return nil, errors.New(fmt.Sprint("Got unexpected status code from TBA API request: ", resp.Status))
+		return nil, errors.New(fmt.Sprint("Got unexpected status code from TBA API request ", req.URL, ": ", resp.Status))
 	}
 
 	// Get all bytes from response body.
diff --git a/scouting/webserver/main.go b/scouting/webserver/main.go
index 4752cf4..1b5a002 100644
--- a/scouting/webserver/main.go
+++ b/scouting/webserver/main.go
@@ -9,6 +9,7 @@
 	"log"
 	"os"
 	"os/signal"
+	"path"
 	"strconv"
 	"syscall"
 	"time"
@@ -58,6 +59,14 @@
 	return 8080
 }
 
+func getDefaultBlueAllianceConfig() string {
+	workspaceDirectory := os.Getenv("BUILD_WORKSPACE_DIRECTORY")
+	if workspaceDirectory != "" {
+		return path.Join(workspaceDirectory, "scouting_config.json")
+	}
+	return "scouting_config.json"
+}
+
 func main() {
 	portPtr := flag.Int("port", getDefaultPort(), "The port number to bind to.")
 	dirPtr := flag.String("directory", ".", "The directory to serve at /.")
@@ -71,9 +80,11 @@
 			"-db_config.")
 	dbConnectRetries := flag.Int("db_retries", 5,
 		"The number of seconds to retry connecting to the database on startup.")
-	blueAllianceConfigPtr := flag.String("tba_config", "",
+	blueAllianceConfigPtr := flag.String("tba_config", getDefaultBlueAllianceConfig(),
 		"The path to your The Blue Alliance JSON config. "+
 			"It needs an \"api_key\" field with your TBA API key. "+
+			"It needs a \"year\" field with the event year. "+
+			"It needs an \"event_code\" field with the event code. "+
 			"Optionally, it can have a \"base_url\" field with the TBA API base URL.")
 	flag.Parse()
 
diff --git a/scouting/webserver/match_list/match_list.go b/scouting/webserver/match_list/match_list.go
index 9029438..c5af661 100644
--- a/scouting/webserver/match_list/match_list.go
+++ b/scouting/webserver/match_list/match_list.go
@@ -97,32 +97,32 @@
 			{
 				MatchNumber: int32(match.MatchNumber),
 				SetNumber:   int32(match.SetNumber), CompLevel: match.CompLevel,
-				Alliance: "R", AlliancePosition: 1, TeamNumber: red[0],
+				Alliance: "R", AlliancePosition: 1, TeamNumber: strconv.Itoa(int(red[0])),
 			},
 			{
 				MatchNumber: int32(match.MatchNumber),
 				SetNumber:   int32(match.SetNumber), CompLevel: match.CompLevel,
-				Alliance: "R", AlliancePosition: 2, TeamNumber: red[1],
+				Alliance: "R", AlliancePosition: 2, TeamNumber: strconv.Itoa(int(red[1])),
 			},
 			{
 				MatchNumber: int32(match.MatchNumber),
 				SetNumber:   int32(match.SetNumber), CompLevel: match.CompLevel,
-				Alliance: "R", AlliancePosition: 3, TeamNumber: red[2],
+				Alliance: "R", AlliancePosition: 3, TeamNumber: strconv.Itoa(int(red[2])),
 			},
 			{
 				MatchNumber: int32(match.MatchNumber),
 				SetNumber:   int32(match.SetNumber), CompLevel: match.CompLevel,
-				Alliance: "B", AlliancePosition: 1, TeamNumber: blue[0],
+				Alliance: "B", AlliancePosition: 1, TeamNumber: strconv.Itoa(int(blue[0])),
 			},
 			{
 				MatchNumber: int32(match.MatchNumber),
 				SetNumber:   int32(match.SetNumber), CompLevel: match.CompLevel,
-				Alliance: "B", AlliancePosition: 2, TeamNumber: blue[1],
+				Alliance: "B", AlliancePosition: 2, TeamNumber: strconv.Itoa(int(blue[1])),
 			},
 			{
 				MatchNumber: int32(match.MatchNumber),
 				SetNumber:   int32(match.SetNumber), CompLevel: match.CompLevel,
-				Alliance: "B", AlliancePosition: 3, TeamNumber: blue[2],
+				Alliance: "B", AlliancePosition: 3, TeamNumber: strconv.Itoa(int(blue[2])),
 			},
 		}
 
diff --git a/scouting/webserver/requests/BUILD b/scouting/webserver/requests/BUILD
index 4c4870c..935d721 100644
--- a/scouting/webserver/requests/BUILD
+++ b/scouting/webserver/requests/BUILD
@@ -60,6 +60,7 @@
         "//scouting/webserver/requests/messages:request_notes_for_team_go_fbs",
         "//scouting/webserver/requests/messages:request_shift_schedule_go_fbs",
         "//scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs",
+        "//scouting/webserver/requests/messages:submit_actions_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_go_fbs",
         "//scouting/webserver/requests/messages:submit_data_scouting_response_go_fbs",
         "//scouting/webserver/requests/messages:submit_driver_ranking_go_fbs",
diff --git a/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs b/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs
index d9d36b3..cd6afc6 100644
--- a/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs
+++ b/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs
@@ -24,13 +24,18 @@
   middle_cones:int (id:16);
   high_cones:int (id:17);
   cones_dropped:int (id:18);
-  avg_cycle:int (id:19);
+  // Time in nanoseconds.
+  avg_cycle:int64 (id:19);
+  docked_auto:bool (id:20);
+  engaged_auto:bool (id:23);
+  docked:bool (id:25);
+  engaged:bool (id:26);
 
-  collected_by:string (id:20);
+  collected_by:string (id:24);
 }
 
 table Request2023DataScoutingResponse {
     stats_list:[Stats2023] (id:0);
 }
 
-root_type Request2023DataScoutingResponse;
\ No newline at end of file
+root_type Request2023DataScoutingResponse;
diff --git a/scouting/webserver/requests/messages/request_all_matches_response.fbs b/scouting/webserver/requests/messages/request_all_matches_response.fbs
index 9d3be62..55da7bb 100644
--- a/scouting/webserver/requests/messages/request_all_matches_response.fbs
+++ b/scouting/webserver/requests/messages/request_all_matches_response.fbs
@@ -1,15 +1,28 @@
 namespace scouting.webserver.requests;
 
+// Specifies whether a team has been scouted for this particular match.
+table ScoutedLevel {
+    r1: bool (id: 0);
+    r2: bool (id: 1);
+    r3: bool (id: 2);
+    b1: bool (id: 3);
+    b2: bool (id: 4);
+    b3: bool (id: 5);
+}
+
 table Match {
     match_number:int (id: 0);
     set_number:int (id: 1);
     comp_level:string (id: 2);
-    r1:int (id: 3);
-    r2:int (id: 4);
-    r3:int (id: 5);
-    b1:int (id: 6);
-    b2:int (id: 7);
-    b3:int (id: 8);
+    r1:string (id: 3);
+    r2:string (id: 4);
+    r3:string (id: 5);
+    b1:string (id: 6);
+    b2:string (id: 7);
+    b3:string (id: 8);
+
+    // Tells you how completely we've data scouted this match.
+    data_scouted: ScoutedLevel (id: 9);
 }
 
 table RequestAllMatchesResponse  {
diff --git a/scouting/webserver/requests/messages/submit_actions.fbs b/scouting/webserver/requests/messages/submit_actions.fbs
index dfb980f..5488a79 100644
--- a/scouting/webserver/requests/messages/submit_actions.fbs
+++ b/scouting/webserver/requests/messages/submit_actions.fbs
@@ -50,12 +50,15 @@
 }
 
 table Action {
-    timestamp:int (id:0);
+    timestamp:int64 (id:0);
     action_taken:ActionType (id:2);
 }
 
 table SubmitActions {
-    actions_list:[Action] (id:0);
+    team_number:string (id: 0);
+    match_number:int (id: 1);
+    set_number:int (id: 2);
+    comp_level:string (id: 3);
+    actions_list:[Action] (id:4);
+    collected_by:string (id: 5);
 }
-
-root_type SubmitActions;
\ No newline at end of file
diff --git a/scouting/webserver/requests/requests.go b/scouting/webserver/requests/requests.go
index 467542a..4f12a4f 100644
--- a/scouting/webserver/requests/requests.go
+++ b/scouting/webserver/requests/requests.go
@@ -79,6 +79,7 @@
 	ReturnAllShifts() ([]db.Shift, error)
 	ReturnStats() ([]db.Stats, error)
 	ReturnStats2023() ([]db.Stats2023, error)
+	ReturnStats2023ForTeam(teamNumber string, matchNumber int32, setNumber int32, compLevel string) ([]db.Stats2023, error)
 	QueryAllShifts(int) ([]db.Shift, error)
 	QueryStats(int) ([]db.Stats, error)
 	QueryNotes(int32) ([]string, error)
@@ -212,6 +213,16 @@
 	db Database
 }
 
+// Change structure of match objects in the database(1 per team) to
+// the old match structure(1 per match) that the webserver uses.
+// We use the information in this struct to identify which match object
+// corresponds to which old match structure object.
+type MatchAssemblyKey struct {
+	MatchNumber int32
+	SetNumber   int32
+	CompLevel   string
+}
+
 func findIndexInList(list []string, comp_level string) (int, error) {
 	for index, value := range list {
 		if value == comp_level {
@@ -221,6 +232,15 @@
 	return -1, errors.New(fmt.Sprint("Failed to find comp level ", comp_level, " in list ", list))
 }
 
+func (handler requestAllMatchesHandler) teamHasBeenDataScouted(key MatchAssemblyKey, teamNumber string) (bool, error) {
+	stats, err := handler.db.ReturnStats2023ForTeam(
+		teamNumber, key.MatchNumber, key.SetNumber, key.CompLevel)
+	if err != nil {
+		return false, err
+	}
+	return (len(stats) > 0), nil
+}
+
 func (handler requestAllMatchesHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
 	requestBytes, err := io.ReadAll(req.Body)
 	if err != nil {
@@ -239,35 +259,42 @@
 		return
 	}
 
-	// Change structure of match objects in the database(1 per team) to
-	// the old match structure(1 per match) that the webserver uses.
-	type Key struct {
-		MatchNumber int32
-		SetNumber   int32
-		CompLevel   string
-	}
-
-	assembledMatches := map[Key]request_all_matches_response.MatchT{}
+	assembledMatches := map[MatchAssemblyKey]request_all_matches_response.MatchT{}
 
 	for _, match := range matches {
-		key := Key{match.MatchNumber, match.SetNumber, match.CompLevel}
+		key := MatchAssemblyKey{match.MatchNumber, match.SetNumber, match.CompLevel}
+
+		// Retrieve the converted match structure we have assembled so
+		// far. If we haven't started assembling one yet, then start a
+		// new one.
 		entry, ok := assembledMatches[key]
 		if !ok {
 			entry = request_all_matches_response.MatchT{
 				MatchNumber: match.MatchNumber,
 				SetNumber:   match.SetNumber,
 				CompLevel:   match.CompLevel,
+				DataScouted: &request_all_matches_response.ScoutedLevelT{},
 			}
 		}
+
+		var team *string
+		var dataScoutedTeam *bool
+
+		// Fill in the field for the match that we have in in the
+		// database. In the database, each match row only has 1 team
+		// number.
 		switch match.Alliance {
 		case "R":
 			switch match.AlliancePosition {
 			case 1:
-				entry.R1 = match.TeamNumber
+				team = &entry.R1
+				dataScoutedTeam = &entry.DataScouted.R1
 			case 2:
-				entry.R2 = match.TeamNumber
+				team = &entry.R2
+				dataScoutedTeam = &entry.DataScouted.R2
 			case 3:
-				entry.R3 = match.TeamNumber
+				team = &entry.R3
+				dataScoutedTeam = &entry.DataScouted.R3
 			default:
 				respondWithError(w, http.StatusInternalServerError, fmt.Sprint("Unknown red position ", strconv.Itoa(int(match.AlliancePosition)), " in match ", strconv.Itoa(int(match.MatchNumber))))
 				return
@@ -275,11 +302,14 @@
 		case "B":
 			switch match.AlliancePosition {
 			case 1:
-				entry.B1 = match.TeamNumber
+				team = &entry.B1
+				dataScoutedTeam = &entry.DataScouted.B1
 			case 2:
-				entry.B2 = match.TeamNumber
+				team = &entry.B2
+				dataScoutedTeam = &entry.DataScouted.B2
 			case 3:
-				entry.B3 = match.TeamNumber
+				team = &entry.B3
+				dataScoutedTeam = &entry.DataScouted.B3
 			default:
 				respondWithError(w, http.StatusInternalServerError, fmt.Sprint("Unknown blue position ", strconv.Itoa(int(match.AlliancePosition)), " in match ", strconv.Itoa(int(match.MatchNumber))))
 				return
@@ -288,6 +318,21 @@
 			respondWithError(w, http.StatusInternalServerError, fmt.Sprint("Unknown alliance ", match.Alliance, " in match ", strconv.Itoa(int(match.AlliancePosition))))
 			return
 		}
+
+		*team = match.TeamNumber
+
+		// Figure out if this team has been data scouted already.
+		*dataScoutedTeam, err = handler.teamHasBeenDataScouted(key, match.TeamNumber)
+		if err != nil {
+			respondWithError(w, http.StatusInternalServerError, fmt.Sprint(
+				"Failed to determine data scouting status for team ",
+				strconv.Itoa(int(match.AlliancePosition)),
+				" in match ",
+				strconv.Itoa(int(match.MatchNumber)),
+				err))
+			return
+		}
+
 		assembledMatches[key] = entry
 	}
 
@@ -454,6 +499,122 @@
 	w.Write(builder.FinishedBytes())
 }
 
+func ConvertActionsToStat(submitActions *submit_actions.SubmitActions) (db.Stats2023, error) {
+	overall_time := int64(0)
+	cycles := int64(0)
+	picked_up := false
+	lastPlacedTime := int64(0)
+	stat := db.Stats2023{TeamNumber: string(submitActions.TeamNumber()), MatchNumber: submitActions.MatchNumber(), SetNumber: submitActions.SetNumber(), CompLevel: string(submitActions.CompLevel()),
+		StartingQuadrant: 0, LowCubesAuto: 0, MiddleCubesAuto: 0, HighCubesAuto: 0, CubesDroppedAuto: 0,
+		LowConesAuto: 0, MiddleConesAuto: 0, HighConesAuto: 0, ConesDroppedAuto: 0, LowCubes: 0, MiddleCubes: 0, HighCubes: 0,
+		CubesDropped: 0, LowCones: 0, MiddleCones: 0, HighCones: 0, ConesDropped: 0, AvgCycle: 0, CollectedBy: string(submitActions.CollectedBy()),
+	}
+	// Loop over all actions.
+	for i := 0; i < submitActions.ActionsListLength(); i++ {
+		var action submit_actions.Action
+		if !submitActions.ActionsList(&action, i) {
+			return db.Stats2023{}, errors.New(fmt.Sprintf("Failed to parse submit_actions.Action"))
+		}
+		actionTable := new(flatbuffers.Table)
+		action_type := action.ActionTakenType()
+		if !action.ActionTaken(actionTable) {
+			return db.Stats2023{}, errors.New(fmt.Sprint("Failed to parse sub-action or sub-action was missing"))
+		}
+		if action_type == submit_actions.ActionTypeStartMatchAction {
+			var startMatchAction submit_actions.StartMatchAction
+			startMatchAction.Init(actionTable.Bytes, actionTable.Pos)
+			stat.StartingQuadrant = startMatchAction.Position()
+		} else if action_type == submit_actions.ActionTypeAutoBalanceAction {
+			var autoBalanceAction submit_actions.AutoBalanceAction
+			autoBalanceAction.Init(actionTable.Bytes, actionTable.Pos)
+			if autoBalanceAction.Docked() {
+				stat.DockedAuto = true
+			}
+			if autoBalanceAction.Engaged() {
+				stat.EngagedAuto = true
+			}
+		} else if action_type == submit_actions.ActionTypePickupObjectAction {
+			var pick_up_action submit_actions.PickupObjectAction
+			pick_up_action.Init(actionTable.Bytes, actionTable.Pos)
+			if picked_up == true {
+				object := pick_up_action.ObjectType().String()
+				auto := pick_up_action.Auto()
+				if object == "kCube" && auto == false {
+					stat.CubesDropped += 1
+				} else if object == "kCube" && auto == true {
+					stat.CubesDroppedAuto += 1
+				} else if object == "kCone" && auto == false {
+					stat.ConesDropped += 1
+				} else if object == "kCube" && auto == true {
+					stat.ConesDroppedAuto += 1
+				}
+			} else {
+				picked_up = true
+			}
+		} else if action_type == submit_actions.ActionTypePlaceObjectAction {
+			var place_action submit_actions.PlaceObjectAction
+			place_action.Init(actionTable.Bytes, actionTable.Pos)
+			if !picked_up {
+				return db.Stats2023{}, errors.New(fmt.Sprintf("Got PlaceObjectAction without corresponding PickupObjectAction"))
+			}
+			object := place_action.ObjectType()
+			level := place_action.ScoreLevel()
+			auto := place_action.Auto()
+			if object == 0 && level == 0 && auto == true {
+				stat.LowCubesAuto += 1
+			} else if object == 0 && level == 0 && auto == false {
+				stat.LowCubes += 1
+			} else if object == 0 && level == 1 && auto == true {
+				stat.MiddleCubesAuto += 1
+			} else if object == 0 && level == 1 && auto == false {
+				stat.MiddleCubes += 1
+			} else if object == 0 && level == 2 && auto == true {
+				stat.HighCubesAuto += 1
+			} else if object == 0 && level == 2 && auto == false {
+				stat.HighCubes += 1
+			} else if object == 1 && level == 0 && auto == true {
+				stat.LowConesAuto += 1
+			} else if object == 1 && level == 0 && auto == false {
+				stat.LowCones += 1
+			} else if object == 1 && level == 1 && auto == true {
+				stat.MiddleConesAuto += 1
+			} else if object == 1 && level == 1 && auto == false {
+				stat.MiddleCones += 1
+			} else if object == 1 && level == 2 && auto == true {
+				stat.HighConesAuto += 1
+			} else if object == 1 && level == 2 && auto == false {
+				stat.HighCones += 1
+			} else {
+				return db.Stats2023{}, errors.New(fmt.Sprintf("Got unknown ObjectType/ScoreLevel/Auto combination"))
+			}
+			picked_up = false
+			if lastPlacedTime != int64(0) {
+				// If this is not the first time we place,
+				// start counting cycle time. We define cycle
+				// time as the time between placements.
+				overall_time += int64(action.Timestamp()) - lastPlacedTime
+				cycles += 1
+			}
+			lastPlacedTime = int64(action.Timestamp())
+		} else if action_type == submit_actions.ActionTypeEndMatchAction {
+			var endMatchAction submit_actions.EndMatchAction
+			endMatchAction.Init(actionTable.Bytes, actionTable.Pos)
+			if endMatchAction.Docked() {
+				stat.Docked = true
+			}
+			if endMatchAction.Engaged() {
+				stat.Engaged = true
+			}
+		}
+	}
+	if cycles != 0 {
+		stat.AvgCycle = overall_time / cycles
+	} else {
+		stat.AvgCycle = 0
+	}
+	return stat, nil
+}
+
 // Handles a Request2023DataScouting request.
 type request2023DataScoutingHandler struct {
 	db Database
@@ -502,6 +663,10 @@
 			HighCones:        stat.HighCones,
 			ConesDropped:     stat.ConesDropped,
 			AvgCycle:         stat.AvgCycle,
+			DockedAuto:       stat.DockedAuto,
+			EngagedAuto:      stat.EngagedAuto,
+			Docked:           stat.Docked,
+			Engaged:          stat.Engaged,
 			CollectedBy:      stat.CollectedBy,
 		})
 	}
diff --git a/scouting/webserver/requests/requests_test.go b/scouting/webserver/requests/requests_test.go
index efd770b..d2d5acc 100644
--- a/scouting/webserver/requests/requests_test.go
+++ b/scouting/webserver/requests/requests_test.go
@@ -23,6 +23,7 @@
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_notes_for_team"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response"
+	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_actions"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting_response"
 	"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_driver_ranking"
@@ -127,75 +128,100 @@
 		matches: []db.TeamMatch{
 			{
 				MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 1, TeamNumber: 5,
+				Alliance: "R", AlliancePosition: 1, TeamNumber: "5",
 			},
 			{
 				MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 2, TeamNumber: 42,
+				Alliance: "R", AlliancePosition: 2, TeamNumber: "42",
 			},
 			{
 				MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 3, TeamNumber: 600,
+				Alliance: "R", AlliancePosition: 3, TeamNumber: "600",
 			},
 			{
 				MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 1, TeamNumber: 971,
+				Alliance: "B", AlliancePosition: 1, TeamNumber: "971",
 			},
 			{
 				MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 2, TeamNumber: 400,
+				Alliance: "B", AlliancePosition: 2, TeamNumber: "400",
 			},
 			{
 				MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 3, TeamNumber: 200,
+				Alliance: "B", AlliancePosition: 3, TeamNumber: "200",
 			},
 			{
 				MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 1, TeamNumber: 6,
+				Alliance: "R", AlliancePosition: 1, TeamNumber: "6",
 			},
 			{
 				MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 2, TeamNumber: 43,
+				Alliance: "R", AlliancePosition: 2, TeamNumber: "43",
 			},
 			{
 				MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 3, TeamNumber: 601,
+				Alliance: "R", AlliancePosition: 3, TeamNumber: "601",
 			},
 			{
 				MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 1, TeamNumber: 972,
+				Alliance: "B", AlliancePosition: 1, TeamNumber: "972",
 			},
 			{
 				MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 2, TeamNumber: 401,
+				Alliance: "B", AlliancePosition: 2, TeamNumber: "401",
 			},
 			{
 				MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 3, TeamNumber: 201,
+				Alliance: "B", AlliancePosition: 3, TeamNumber: "201",
 			},
 			{
 				MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 1, TeamNumber: 7,
+				Alliance: "R", AlliancePosition: 1, TeamNumber: "7",
 			},
 			{
 				MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 2, TeamNumber: 44,
+				Alliance: "R", AlliancePosition: 2, TeamNumber: "44",
 			},
 			{
 				MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
-				Alliance: "R", AlliancePosition: 3, TeamNumber: 602,
+				Alliance: "R", AlliancePosition: 3, TeamNumber: "602",
 			},
 			{
 				MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 1, TeamNumber: 973,
+				Alliance: "B", AlliancePosition: 1, TeamNumber: "973",
 			},
 			{
 				MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 2, TeamNumber: 402,
+				Alliance: "B", AlliancePosition: 2, TeamNumber: "402",
 			},
 			{
 				MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
-				Alliance: "B", AlliancePosition: 3, TeamNumber: 202,
+				Alliance: "B", AlliancePosition: 3, TeamNumber: "202",
+			},
+		},
+		// Pretend that we have some data scouting data.
+		stats2023: []db.Stats2023{
+			{
+				TeamNumber: "5", MatchNumber: 1, SetNumber: 1,
+				CompLevel: "qm", StartingQuadrant: 3, LowCubesAuto: 10,
+				MiddleCubesAuto: 1, HighCubesAuto: 1, CubesDroppedAuto: 0,
+				LowConesAuto: 1, MiddleConesAuto: 2, HighConesAuto: 1,
+				ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 1,
+				HighCubes: 2, CubesDropped: 1, LowCones: 1,
+				MiddleCones: 2, HighCones: 0, ConesDropped: 1,
+				AvgCycle: 34, DockedAuto: true, EngagedAuto: true,
+				Docked: false, Engaged: false, CollectedBy: "alex",
+			},
+			{
+				TeamNumber: "973", MatchNumber: 3, SetNumber: 1,
+				CompLevel: "qm", StartingQuadrant: 1, LowCubesAuto: 0,
+				MiddleCubesAuto: 1, HighCubesAuto: 1, CubesDroppedAuto: 2,
+				LowConesAuto: 0, MiddleConesAuto: 0, HighConesAuto: 0,
+				ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
+				HighCubes: 1, CubesDropped: 0, LowCones: 0,
+				MiddleCones: 2, HighCones: 1, ConesDropped: 1,
+				AvgCycle: 53, DockedAuto: true, EngagedAuto: false,
+				Docked: false, Engaged: false, CollectedBy: "bob",
 			},
 		},
 	}
@@ -218,15 +244,28 @@
 			// R1, R2, R3, B1, B2, B3
 			{
 				1, 1, "qm",
-				5, 42, 600, 971, 400, 200,
+				"5", "42", "600", "971", "400", "200",
+				&request_all_matches_response.ScoutedLevelT{
+					// The R1 team has already been data
+					// scouted.
+					true, false, false, false, false, false,
+				},
 			},
 			{
 				2, 1, "qm",
-				6, 43, 601, 972, 401, 201,
+				"6", "43", "601", "972", "401", "201",
+				&request_all_matches_response.ScoutedLevelT{
+					false, false, false, false, false, false,
+				},
 			},
 			{
 				3, 1, "qm",
-				7, 44, 602, 973, 402, 202,
+				"7", "44", "602", "973", "402", "202",
+				&request_all_matches_response.ScoutedLevelT{
+					// The B1 team has already been data
+					// scouted.
+					false, false, false, true, false, false,
+				},
 			},
 		},
 	}
@@ -330,7 +369,8 @@
 				ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 1,
 				HighCubes: 2, CubesDropped: 1, LowCones: 1,
 				MiddleCones: 2, HighCones: 0, ConesDropped: 1,
-				AvgCycle: 34, CollectedBy: "isaac",
+				AvgCycle: 34, DockedAuto: true, EngagedAuto: false,
+				Docked: false, Engaged: false, CollectedBy: "isaac",
 			},
 			{
 				TeamNumber: "2343", MatchNumber: 1, SetNumber: 2,
@@ -340,7 +380,8 @@
 				ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
 				HighCubes: 1, CubesDropped: 0, LowCones: 0,
 				MiddleCones: 2, HighCones: 1, ConesDropped: 1,
-				AvgCycle: 53, CollectedBy: "unknown",
+				AvgCycle: 53, DockedAuto: false, EngagedAuto: false,
+				Docked: false, Engaged: false, CollectedBy: "unknown",
 			},
 		},
 	}
@@ -367,7 +408,8 @@
 				ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 1,
 				HighCubes: 2, CubesDropped: 1, LowCones: 1,
 				MiddleCones: 2, HighCones: 0, ConesDropped: 1,
-				AvgCycle: 34, CollectedBy: "isaac",
+				AvgCycle: 34, DockedAuto: true, EngagedAuto: false,
+				Docked: false, Engaged: false, CollectedBy: "isaac",
 			},
 			{
 				TeamNumber: "2343", MatchNumber: 1, SetNumber: 2,
@@ -377,7 +419,8 @@
 				ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
 				HighCubes: 1, CubesDropped: 0, LowCones: 0,
 				MiddleCones: 2, HighCones: 1, ConesDropped: 1,
-				AvgCycle: 53, CollectedBy: "unknown",
+				AvgCycle: 53, DockedAuto: false, EngagedAuto: false,
+				Docked: false, Engaged: false, CollectedBy: "unknown",
 			},
 		},
 	}
@@ -391,6 +434,124 @@
 	}
 }
 
+// Validates that we can request the 2023 stats.
+func TestConvertActionsToStat(t *testing.T) {
+	builder := flatbuffers.NewBuilder(1024)
+	builder.Finish((&submit_actions.SubmitActionsT{
+		TeamNumber:  "4244",
+		MatchNumber: 3,
+		SetNumber:   1,
+		CompLevel:   "quals",
+		CollectedBy: "katie",
+		ActionsList: []*submit_actions.ActionT{
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypeStartMatchAction,
+					Value: &submit_actions.StartMatchActionT{
+						Position: 1,
+					},
+				},
+				Timestamp: 0,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypePickupObjectAction,
+					Value: &submit_actions.PickupObjectActionT{
+						ObjectType: submit_actions.ObjectTypekCube,
+						Auto:       true,
+					},
+				},
+				Timestamp: 400,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypePickupObjectAction,
+					Value: &submit_actions.PickupObjectActionT{
+						ObjectType: submit_actions.ObjectTypekCube,
+						Auto:       true,
+					},
+				},
+				Timestamp: 800,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypePlaceObjectAction,
+					Value: &submit_actions.PlaceObjectActionT{
+						ObjectType: submit_actions.ObjectTypekCube,
+						ScoreLevel: submit_actions.ScoreLevelkLow,
+						Auto:       true,
+					},
+				},
+				Timestamp: 2000,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypeAutoBalanceAction,
+					Value: &submit_actions.AutoBalanceActionT{
+						Docked:  true,
+						Engaged: true,
+					},
+				},
+				Timestamp: 2400,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypePickupObjectAction,
+					Value: &submit_actions.PickupObjectActionT{
+						ObjectType: submit_actions.ObjectTypekCone,
+						Auto:       false,
+					},
+				},
+				Timestamp: 2800,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypePlaceObjectAction,
+					Value: &submit_actions.PlaceObjectActionT{
+						ObjectType: submit_actions.ObjectTypekCone,
+						ScoreLevel: submit_actions.ScoreLevelkHigh,
+						Auto:       false,
+					},
+				},
+				Timestamp: 3100,
+			},
+			{
+				ActionTaken: &submit_actions.ActionTypeT{
+					Type: submit_actions.ActionTypeEndMatchAction,
+					Value: &submit_actions.EndMatchActionT{
+						Docked:  true,
+						Engaged: false,
+					},
+				},
+				Timestamp: 4000,
+			},
+		},
+	}).Pack(builder))
+
+	submitActions := submit_actions.GetRootAsSubmitActions(builder.FinishedBytes(), 0)
+	response, err := ConvertActionsToStat(submitActions)
+
+	if err != nil {
+		t.Fatal("Failed to convert actions to stats: ", err)
+	}
+
+	expected := db.Stats2023{
+		TeamNumber: "4244", MatchNumber: 3, SetNumber: 1,
+		CompLevel: "quals", StartingQuadrant: 1, LowCubesAuto: 1,
+		MiddleCubesAuto: 0, HighCubesAuto: 0, CubesDroppedAuto: 1,
+		LowConesAuto: 0, MiddleConesAuto: 0, HighConesAuto: 0,
+		ConesDroppedAuto: 0, LowCubes: 0, MiddleCubes: 0,
+		HighCubes: 0, CubesDropped: 0, LowCones: 0,
+		MiddleCones: 0, HighCones: 1, ConesDropped: 0,
+		AvgCycle: 1100, DockedAuto: true, EngagedAuto: true,
+		Docked: true, Engaged: false, CollectedBy: "katie",
+	}
+
+	if expected != response {
+		t.Fatal("Expected ", expected, ", but got ", response)
+	}
+}
+
 func TestSubmitNotes(t *testing.T) {
 	database := MockDatabase{}
 	scoutingServer := server.NewScoutingServer()
@@ -776,6 +937,16 @@
 	return database.stats2023, nil
 }
 
+func (database *MockDatabase) ReturnStats2023ForTeam(teamNumber string, matchNumber int32, setNumber int32, compLevel string) ([]db.Stats2023, error) {
+	var results []db.Stats2023
+	for _, stats := range database.stats2023 {
+		if stats.TeamNumber == teamNumber && stats.MatchNumber == matchNumber && stats.SetNumber == setNumber && stats.CompLevel == compLevel {
+			results = append(results, stats)
+		}
+	}
+	return results, nil
+}
+
 func (database *MockDatabase) QueryStats(int) ([]db.Stats, error) {
 	return []db.Stats{}, nil
 }
diff --git a/scouting/www/entry/entry.component.ts b/scouting/www/entry/entry.component.ts
index 44fc958..aef97f7 100644
--- a/scouting/www/entry/entry.component.ts
+++ b/scouting/www/entry/entry.component.ts
@@ -118,7 +118,6 @@
   matchStartTimestamp: number = 0;
 
   addAction(action: ActionT): void {
-    action.timestamp = Math.floor(Date.now() / 1000);
     if (action.type == 'startMatchAction') {
       // Unix nanosecond timestamp.
       this.matchStartTimestamp = Date.now() * 1e6;
@@ -193,7 +192,7 @@
             StartMatchAction.createStartMatchAction(builder, action.position);
           actionOffset = Action.createAction(
             builder,
-            action.timestamp || 0,
+            BigInt(action.timestamp || 0),
             ActionType.StartMatchAction,
             startMatchActionOffset
           );
@@ -208,7 +207,7 @@
             );
           actionOffset = Action.createAction(
             builder,
-            action.timestamp || 0,
+            BigInt(action.timestamp || 0),
             ActionType.PickupObjectAction,
             pickupObjectActionOffset
           );
@@ -223,7 +222,7 @@
             );
           actionOffset = Action.createAction(
             builder,
-            action.timestamp || 0,
+            BigInt(action.timestamp || 0),
             ActionType.AutoBalanceAction,
             autoBalanceActionOffset
           );
@@ -239,7 +238,7 @@
             );
           actionOffset = Action.createAction(
             builder,
-            action.timestamp || 0,
+            BigInt(action.timestamp || 0),
             ActionType.PlaceObjectAction,
             placeObjectActionOffset
           );
@@ -250,7 +249,7 @@
             RobotDeathAction.createRobotDeathAction(builder, action.robotOn);
           actionOffset = Action.createAction(
             builder,
-            action.timestamp || 0,
+            BigInt(action.timestamp || 0),
             ActionType.RobotDeathAction,
             robotDeathActionOffset
           );
@@ -264,7 +263,7 @@
           );
           actionOffset = Action.createAction(
             builder,
-            action.timestamp || 0,
+            BigInt(action.timestamp || 0),
             ActionType.EndMatchAction,
             endMatchActionOffset
           );
diff --git a/scouting/www/entry/entry.ng.html b/scouting/www/entry/entry.ng.html
index 49967b9..e06ab18 100644
--- a/scouting/www/entry/entry.ng.html
+++ b/scouting/www/entry/entry.ng.html
@@ -89,6 +89,9 @@
   </div>
 
   <div *ngSwitchCase="'Pickup'" id="PickUp" class="container-fluid">
+    <h6 class="text-muted">
+      Last Action: {{actionList[actionList.length - 1].type}}
+    </h6>
     <div class="d-grid gap-5">
       <button class="btn btn-secondary" (click)="undoLastAction()">UNDO</button>
       <button
@@ -112,20 +115,17 @@
       <!-- 'Balancing' during auto. -->
       <div *ngIf="autoPhase" class="d-grid gap-2">
         <label>
-          <input type="checkbox" (change)="dockedValue = $event.target.value" />
+          <input #docked type="checkbox" />
           Docked
         </label>
         <label>
-          <input
-            type="checkbox"
-            (change)="engagedValue = $event.target.value"
-          />
+          <input #engaged type="checkbox" />
           Engaged
         </label>
         <br />
         <button
           class="btn btn-info"
-          (click)="addAction({type: 'autoBalanceAction', docked: dockedValue, engaged: engagedValue});"
+          (click)="addAction({type: 'autoBalanceAction', docked: docked.checked, engaged: engaged.checked});"
         >
           Submit Balancing
         </button>
@@ -148,6 +148,9 @@
   </div>
 
   <div *ngSwitchCase="'Place'" id="Place" class="container-fluid">
+    <h6 class="text-muted">
+      Last Action: {{actionList[actionList.length - 1].type}}
+    </h6>
     <div class="d-grid gap-5">
       <button class="btn btn-secondary" (click)="undoLastAction()">UNDO</button>
       <button
@@ -177,20 +180,17 @@
       <!-- 'Balancing' during auto. -->
       <div *ngIf="autoPhase" class="d-grid gap-2">
         <label>
-          <input type="checkbox" (change)="dockedValue = $event.target.value" />
+          <input #docked type="checkbox" />
           Docked
         </label>
         <label>
-          <input
-            type="checkbox"
-            (change)="engagedValue = $event.target.value"
-          />
+          <input #engaged type="checkbox" />
           Engaged
         </label>
         <br />
         <button
           class="btn btn-info"
-          (click)="addAction({type: 'autoBalanceAction', docked: dockedValue, engaged: engagedValue});"
+          (click)="addAction({type: 'autoBalanceAction', docked: docked.checked, engaged: engaged.checked});"
         >
           Submit Balancing
         </button>
@@ -213,6 +213,9 @@
   </div>
 
   <div *ngSwitchCase="'Endgame'" id="Endgame" class="container-fluid">
+    <h6 class="text-muted">
+      Last Action: {{actionList[actionList.length - 1].type}}
+    </h6>
     <div class="d-grid gap-5">
       <button class="btn btn-secondary" (click)="undoLastAction()">UNDO</button>
       <button
@@ -222,17 +225,17 @@
         DEAD
       </button>
       <label>
-        <input type="checkbox" (change)="dockedValue = $event.target.value" />
+        <input #docked type="checkbox" />
         Docked
       </label>
       <label>
-        <input type="checkbox" (change)="engagedValue = $event.target.value" />
+        <input #engaged type="checkbox" />
         Engaged
       </label>
       <button
         *ngIf="!autoPhase"
         class="btn btn-info"
-        (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: dockedValue, engaged: engagedValue});"
+        (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: docked.checked, engaged: engaged.checked});"
       >
         End Match
       </button>
@@ -248,23 +251,25 @@
       >
         Revive
       </button>
+      <button
+        class="btn btn-info"
+        (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: docked.checked, engaged: engaged.checked});"
+      >
+        End Match
+      </button>
     </div>
   </div>
 
   <div *ngSwitchCase="'Review and Submit'" id="Review" class="container-fluid">
     <div class="d-grid gap-5">
       <button class="btn btn-secondary" (click)="undoLastAction()">UNDO</button>
-      <button
-        *ngIf="!autoPhase"
-        class="btn btn-warning"
-        (click)="submitActions();"
-      >
-        Submit
-      </button>
+      <button class="btn btn-warning" (click)="submitActions();">Submit</button>
     </div>
   </div>
 
   <div *ngSwitchCase="'Success'" id="Success" class="container-fluid">
     <h2>Successfully submitted data.</h2>
   </div>
+
+  <span class="error_message" role="alert">{{ errorMessage }}</span>
 </ng-container>
diff --git a/scouting/www/match_list/match_list.component.css b/scouting/www/match_list/match_list.component.css
index e7c071c..f77be5e 100644
--- a/scouting/www/match_list/match_list.component.css
+++ b/scouting/www/match_list/match_list.component.css
@@ -6,6 +6,10 @@
   background-color: #dc3545;
 }
 
+button:disabled {
+  background-color: #524143;
+}
+
 .blue {
   background-color: #0d6efd;
 }
@@ -22,3 +26,7 @@
   /* minimum touch target size */
   height: 44px;
 }
+
+div.hidden_row {
+  display: none;
+}
diff --git a/scouting/www/match_list/match_list.component.ts b/scouting/www/match_list/match_list.component.ts
index eb5284f..0deeb11 100644
--- a/scouting/www/match_list/match_list.component.ts
+++ b/scouting/www/match_list/match_list.component.ts
@@ -10,7 +10,7 @@
 import {MatchListRequestor} from '@org_frc971/scouting/www/rpc';
 
 type TeamInMatch = {
-  teamNumber: number;
+  teamNumber: string;
   matchNumber: number;
   setNumber: number;
   compLevel: string;
@@ -26,21 +26,85 @@
   progressMessage: string = '';
   errorMessage: string = '';
   matchList: Match[] = [];
+  hideCompletedMatches: boolean = true;
 
   constructor(private readonly matchListRequestor: MatchListRequestor) {}
 
+  // Returns true if the match is fully scouted. Returns false otherwise.
+  matchIsFullyScouted(match: Match): boolean {
+    const scouted = match.dataScouted();
+    return (
+      scouted.r1() &&
+      scouted.r2() &&
+      scouted.r3() &&
+      scouted.b1() &&
+      scouted.b2() &&
+      scouted.b3()
+    );
+  }
+
+  // Returns true if at least one team in this match has been scouted. Returns
+  // false otherwise.
+  matchIsPartiallyScouted(match: Match): boolean {
+    const scouted = match.dataScouted();
+    return (
+      scouted.r1() ||
+      scouted.r2() ||
+      scouted.r3() ||
+      scouted.b1() ||
+      scouted.b2() ||
+      scouted.b3()
+    );
+  }
+
+  // Returns a class for the row to hide it if all teams in this match have
+  // already been scouted.
+  getRowClass(match: Match): string {
+    if (this.hideCompletedMatches && this.matchIsFullyScouted(match)) {
+      return 'hidden_row';
+    }
+    return '';
+  }
+
   setTeamInMatch(teamInMatch: TeamInMatch) {
     this.selectedTeamEvent.emit(teamInMatch);
   }
 
-  teamsInMatch(match: Match): {teamNumber: number; color: 'red' | 'blue'}[] {
+  teamsInMatch(
+    match: Match
+  ): {teamNumber: string; color: 'red' | 'blue'; disabled: boolean}[] {
+    const scouted = match.dataScouted();
     return [
-      {teamNumber: match.r1(), color: 'red'},
-      {teamNumber: match.r2(), color: 'red'},
-      {teamNumber: match.r3(), color: 'red'},
-      {teamNumber: match.b1(), color: 'blue'},
-      {teamNumber: match.b2(), color: 'blue'},
-      {teamNumber: match.b3(), color: 'blue'},
+      {
+        teamNumber: match.r1(),
+        color: 'red',
+        disabled: this.hideCompletedMatches && scouted.r1(),
+      },
+      {
+        teamNumber: match.r2(),
+        color: 'red',
+        disabled: this.hideCompletedMatches && scouted.r2(),
+      },
+      {
+        teamNumber: match.r3(),
+        color: 'red',
+        disabled: this.hideCompletedMatches && scouted.r3(),
+      },
+      {
+        teamNumber: match.b1(),
+        color: 'blue',
+        disabled: this.hideCompletedMatches && scouted.b1(),
+      },
+      {
+        teamNumber: match.b2(),
+        color: 'blue',
+        disabled: this.hideCompletedMatches && scouted.b2(),
+      },
+      {
+        teamNumber: match.b3(),
+        color: 'blue',
+        disabled: this.hideCompletedMatches && scouted.b3(),
+      },
     ];
   }
 
@@ -64,7 +128,22 @@
   displayMatchNumber(match: Match): string {
     // Only display the set number for eliminations matches.
     const setNumber = match.compLevel() == 'qm' ? '' : `${match.setNumber()}`;
-    return `${this.matchType(match)} ${setNumber} Match ${match.matchNumber()}`;
+    const matchType = this.matchType(match);
+    const mainText = `${matchType} ${setNumber} Match ${match.matchNumber()}`;
+
+    // When showing the full match list (i.e. not hiding completed matches)
+    // it's useful to know if a match has already been scouted or not.
+    const suffix = (() => {
+      if (this.matchIsFullyScouted(match)) {
+        return '(fully scouted)';
+      } else if (this.matchIsPartiallyScouted(match)) {
+        return '(partially scouted)';
+      } else {
+        return '';
+      }
+    })();
+
+    return `${mainText} ${suffix}`;
   }
 
   ngOnInit() {
diff --git a/scouting/www/match_list/match_list.ng.html b/scouting/www/match_list/match_list.ng.html
index e890faf..0ebbe4c 100644
--- a/scouting/www/match_list/match_list.ng.html
+++ b/scouting/www/match_list/match_list.ng.html
@@ -2,8 +2,16 @@
   <h2>Matches</h2>
 </div>
 
+<label>
+  <input type="checkbox" [(ngModel)]="hideCompletedMatches" />
+  Hide completed matches
+</label>
+
 <div class="container-fluid">
-  <div class="row" *ngFor="let match of matchList; index as i">
+  <div
+    *ngFor="let match of matchList; index as i"
+    [ngClass]="'row ' + getRowClass(match)"
+  >
     <span class="badge bg-secondary rounded-left">
       {{ displayMatchNumber(match) }}
     </span>
@@ -18,6 +26,7 @@
             })"
         class="match-item"
         [ngClass]="team.color"
+        [disabled]="team.disabled"
       >
         {{ team.teamNumber }}
       </button>
diff --git a/scouting/www/notes/notes.component.ts b/scouting/www/notes/notes.component.ts
index 21264f2..94a1586 100644
--- a/scouting/www/notes/notes.component.ts
+++ b/scouting/www/notes/notes.component.ts
@@ -111,7 +111,7 @@
   setTeamNumber() {
     let data: Input = {
       teamNumber: this.teamNumberSelection,
-      notesData: 'Auto: \nTeleop: \nEngame: ',
+      notesData: 'Match: \nAuto: \nTeleop: \nEngame: ',
       keywordsData: {
         goodDriving: false,
         badDriving: false,
diff --git a/y2023/BUILD b/y2023/BUILD
index d897c06..095b856 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -60,6 +60,7 @@
         "//y2023/constants:constants_sender",
         "//y2023/vision:foxglove_image_converter",
         "//aos/network:web_proxy_main",
+        ":joystick_republish",
         "//aos/events/logging:log_cat",
         "//y2023/rockpi:imu_main",
         "//frc971/image_streamer:image_streamer",
@@ -297,6 +298,7 @@
         "//third_party:phoenixpro",
         "//third_party:wpilib",
         "//y2023/control_loops/drivetrain:drivetrain_can_position_fbs",
+        "//y2023/control_loops/superstructure:led_indicator_lib",
         "//y2023/control_loops/superstructure:superstructure_output_fbs",
         "//y2023/control_loops/superstructure:superstructure_position_fbs",
     ],
@@ -326,6 +328,23 @@
     ],
 )
 
+cc_binary(
+    name = "joystick_republish",
+    srcs = [
+        "joystick_republish.cc",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        "//aos:configuration",
+        "//aos:flatbuffer_merge",
+        "//aos:init",
+        "//aos/events:shm_event_loop",
+        "//frc971/input:joystick_state_fbs",
+        "@com_github_google_glog//:glog",
+    ],
+)
+
 py_library(
     name = "python_init",
     srcs = ["__init__.py"],
diff --git a/y2023/autonomous/BUILD b/y2023/autonomous/BUILD
index 6d6922b..cfcc19c 100644
--- a/y2023/autonomous/BUILD
+++ b/y2023/autonomous/BUILD
@@ -53,9 +53,11 @@
         "//frc971/control_loops:profiled_subsystem_fbs",
         "//frc971/control_loops/drivetrain:drivetrain_config",
         "//frc971/control_loops/drivetrain:localizer_fbs",
+        "//y2023:constants",
         "//y2023/control_loops/drivetrain:drivetrain_base",
         "//y2023/control_loops/superstructure:superstructure_goal_fbs",
         "//y2023/control_loops/superstructure:superstructure_status_fbs",
+        "//y2023/control_loops/superstructure/arm:generated_graph",
     ],
 )
 
diff --git a/y2023/autonomous/auto_splines.cc b/y2023/autonomous/auto_splines.cc
index 0cc98a2..09cf4ea 100644
--- a/y2023/autonomous/auto_splines.cc
+++ b/y2023/autonomous/auto_splines.cc
@@ -4,7 +4,7 @@
 #include "aos/flatbuffer_merge.h"
 
 namespace y2023 {
-namespace actors {
+namespace autonomous {
 
 namespace {
 flatbuffers::Offset<frc971::MultiSpline> FixSpline(
@@ -92,6 +92,66 @@
   return FixSpline(builder, multispline_builder.Finish(), alliance);
 }
 
+flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::TestSpline(
+    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+        *builder,
+    aos::Alliance alliance) {
+  return FixSpline(
+      builder,
+      aos::CopyFlatBuffer<frc971::MultiSpline>(test_spline_, builder->fbb()),
+      alliance);
+}
+
+flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::Spline1(
+    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+        *builder,
+    aos::Alliance alliance) {
+  return FixSpline(
+      builder,
+      aos::CopyFlatBuffer<frc971::MultiSpline>(spline_1_, builder->fbb()),
+      alliance);
+}
+
+flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::Spline2(
+    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+        *builder,
+    aos::Alliance alliance) {
+  return FixSpline(
+      builder,
+      aos::CopyFlatBuffer<frc971::MultiSpline>(spline_2_, builder->fbb()),
+      alliance);
+}
+
+flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::Spline3(
+    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+        *builder,
+    aos::Alliance alliance) {
+  return FixSpline(
+      builder,
+      aos::CopyFlatBuffer<frc971::MultiSpline>(spline_3_, builder->fbb()),
+      alliance);
+}
+
+flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::Spline4(
+    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+        *builder,
+    aos::Alliance alliance) {
+  return FixSpline(
+      builder,
+      aos::CopyFlatBuffer<frc971::MultiSpline>(spline_4_, builder->fbb()),
+      alliance);
+}
+
+flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::Spline5(
+    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+        *builder,
+    aos::Alliance alliance) {
+  return FixSpline(
+      builder,
+      aos::CopyFlatBuffer<frc971::MultiSpline>(spline_5_, builder->fbb()),
+      alliance);
+}
+
 flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::StraightLine(
     aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
         *builder,
@@ -112,15 +172,5 @@
   return FixSpline(builder, multispline_builder.Finish(), alliance);
 }
 
-flatbuffers::Offset<frc971::MultiSpline> AutonomousSplines::TestSpline(
-    aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
-        *builder,
-    aos::Alliance alliance) {
-  return FixSpline(
-      builder,
-      aos::CopyFlatBuffer<frc971::MultiSpline>(test_spline_, builder->fbb()),
-      alliance);
-}
-
-}  // namespace actors
+}  // namespace autonomous
 }  // namespace y2023
diff --git a/y2023/autonomous/auto_splines.h b/y2023/autonomous/auto_splines.h
index 1280693..2847957 100644
--- a/y2023/autonomous/auto_splines.h
+++ b/y2023/autonomous/auto_splines.h
@@ -1,10 +1,12 @@
-#ifndef Y2023_ACTORS_AUTO_SPLINES_H_
-#define Y2023_ACTORS_AUTO_SPLINES_H_
+#ifndef Y2023_AUTONOMOUS_AUTO_SPLINES_H_
+#define Y2023_AUTONOMOUS_AUTO_SPLINES_H_
 
 #include "aos/events/event_loop.h"
+#include "aos/flatbuffer_merge.h"
 #include "frc971/control_loops/control_loops_generated.h"
 #include "frc971/input/joystick_state_generated.h"
 #include "frc971/control_loops/drivetrain/drivetrain_goal_generated.h"
+#include "frc971/input/joystick_state_generated.h"
 /*
 
   The cooridinate system for the autonomous splines is the same as the spline
@@ -13,13 +15,23 @@
 */
 
 namespace y2023 {
-namespace actors {
+namespace autonomous {
 
 class AutonomousSplines {
  public:
   AutonomousSplines()
       : test_spline_(aos::JsonFileToFlatbuffer<frc971::MultiSpline>(
-            "splines/test_spline.json")) {}
+            "splines/test_spline.json")),
+        spline_1_(aos::JsonFileToFlatbuffer<frc971::MultiSpline>(
+            "splines/spline.0.json")),
+        spline_2_(aos::JsonFileToFlatbuffer<frc971::MultiSpline>(
+            "splines/spline.1.json")),
+        spline_3_(aos::JsonFileToFlatbuffer<frc971::MultiSpline>(
+            "splines/spline.2.json")),
+        spline_4_(aos::JsonFileToFlatbuffer<frc971::MultiSpline>(
+            "splines/spline.3.json")),
+        spline_5_(aos::JsonFileToFlatbuffer<frc971::MultiSpline>(
+            "splines/spline.4.json")) {}
   static flatbuffers::Offset<frc971::MultiSpline> BasicSSpline(
       aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
           *builder,
@@ -33,11 +45,37 @@
       aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
           *builder,
       aos::Alliance alliance);
+  flatbuffers::Offset<frc971::MultiSpline> Spline1(
+      aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+          *builder,
+      aos::Alliance alliance);
+  flatbuffers::Offset<frc971::MultiSpline> Spline2(
+      aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+          *builder,
+      aos::Alliance alliance);
+  flatbuffers::Offset<frc971::MultiSpline> Spline3(
+      aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+          *builder,
+      aos::Alliance alliance);
+  flatbuffers::Offset<frc971::MultiSpline> Spline4(
+      aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+          *builder,
+      aos::Alliance alliance);
+  flatbuffers::Offset<frc971::MultiSpline> Spline5(
+      aos::Sender<frc971::control_loops::drivetrain::SplineGoal>::Builder
+          *builder,
+      aos::Alliance alliance);
+
  private:
   aos::FlatbufferDetachedBuffer<frc971::MultiSpline> test_spline_;
+  aos::FlatbufferDetachedBuffer<frc971::MultiSpline> spline_1_;
+  aos::FlatbufferDetachedBuffer<frc971::MultiSpline> spline_2_;
+  aos::FlatbufferDetachedBuffer<frc971::MultiSpline> spline_3_;
+  aos::FlatbufferDetachedBuffer<frc971::MultiSpline> spline_4_;
+  aos::FlatbufferDetachedBuffer<frc971::MultiSpline> spline_5_;
 };
 
-}  // namespace actors
+}  // namespace autonomous
 }  // namespace y2023
 
-#endif  // Y2023_ACTORS_AUTO_SPLINES_H_
+#endif  // Y2023_AUTONOMOUS_AUTO_SPLINES_H_
diff --git a/y2023/autonomous/autonomous_actor.cc b/y2023/autonomous/autonomous_actor.cc
index 8e99af6..77965e4 100644
--- a/y2023/autonomous/autonomous_actor.cc
+++ b/y2023/autonomous/autonomous_actor.cc
@@ -5,13 +5,18 @@
 #include <cmath>
 
 #include "aos/logging/logging.h"
+#include "aos/util/math.h"
 #include "frc971/control_loops/drivetrain/localizer_generated.h"
+#include "y2023/autonomous/auto_splines.h"
+#include "y2023/constants.h"
 #include "y2023/control_loops/drivetrain/drivetrain_base.h"
+#include "y2023/control_loops/superstructure/arm/generated_graph.h"
 
 DEFINE_bool(spline_auto, true, "Run simple test S-spline auto mode.");
+DEFINE_bool(charged_up, true, "If true run charged up autonomous mode");
 
 namespace y2023 {
-namespace actors {
+namespace autonomous {
 
 using ::aos::monotonic_clock;
 using ::frc971::ProfileParametersT;
@@ -28,7 +33,9 @@
       joystick_state_fetcher_(
           event_loop->MakeFetcher<aos::JoystickState>("/aos")),
       robot_state_fetcher_(event_loop->MakeFetcher<aos::RobotState>("/aos")),
-      auto_splines_() {
+      auto_splines_(),
+      arm_goal_position_(control_loops::superstructure::arm::NeutralIndex()),
+      points_(control_loops::superstructure::arm::PointList()) {
   replan_timer_ = event_loop->AddTimer([this]() { Replan(); });
 
   event_loop->OnRun([this, event_loop]() {
@@ -51,8 +58,9 @@
           (joystick_state_fetcher_->alliance() != alliance_)) {
         alliance_ = joystick_state_fetcher_->alliance();
         is_planned_ = false;
-        // Only kick the planning out by 2 seconds. If we end up enabled in that
-        // second, then we will kick it out further based on the code below.
+        // Only kick the planning out by 2 seconds. If we end up enabled in
+        // that second, then we will kick it out further based on the code
+        // below.
         replan_timer_->Setup(now + std::chrono::seconds(2));
       }
       if (joystick_state_fetcher_->enabled()) {
@@ -77,6 +85,27 @@
                    SplineDirection::kForward);
 
     starting_position_ = test_spline_->starting_position();
+  } else if (FLAGS_charged_up) {
+    charged_up_splines_ = {
+        PlanSpline(std::bind(&AutonomousSplines::Spline1, &auto_splines_,
+                             std::placeholders::_1, alliance_),
+                   SplineDirection::kBackward),
+        PlanSpline(std::bind(&AutonomousSplines::Spline2, &auto_splines_,
+                             std::placeholders::_1, alliance_),
+                   SplineDirection::kForward),
+        PlanSpline(std::bind(&AutonomousSplines::Spline3, &auto_splines_,
+                             std::placeholders::_1, alliance_),
+                   SplineDirection::kBackward),
+        PlanSpline(std::bind(&AutonomousSplines::Spline4, &auto_splines_,
+                             std::placeholders::_1, alliance_),
+                   SplineDirection::kForward),
+        PlanSpline(std::bind(&AutonomousSplines::Spline5, &auto_splines_,
+                             std::placeholders::_1, alliance_),
+                   SplineDirection::kBackward),
+    };
+
+    starting_position_ = charged_up_splines_.value()[0].starting_position();
+    CHECK(starting_position_);
   }
 
   is_planned_ = true;
@@ -113,8 +142,8 @@
     CHECK(starting_position_);
     SendStartingPosition(starting_position_.value());
   }
-  // Clear this so that we don't accidentally resend things as soon as we replan
-  // later.
+  // Clear this so that we don't accidentally resend things as soon as we
+  // replan later.
   user_indicated_safe_to_reset_ = false;
   is_planned_ = false;
   starting_position_.reset();
@@ -160,5 +189,204 @@
   }
 }
 
-}  // namespace actors
+// Charged Up 3 Game Object Autonomous.
+void AutonomousActor::ChargedUp() {
+  aos::monotonic_clock::time_point start_time = aos::monotonic_clock::now();
+
+  CHECK(charged_up_splines_);
+
+  auto &splines = *charged_up_splines_;
+
+  // Tell the superstructure a cone was preloaded
+  if (!WaitForPreloaded()) return;
+
+  // Place first cone on mid level
+  MidConeScore();
+
+  // Wait until the arm is at the goal to spit
+  if (!WaitForArmGoal()) return;
+  Spit();
+
+  AOS_LOG(
+      INFO, "Placed first cone %lf s\n",
+      aos::time::DurationInSeconds(aos::monotonic_clock::now() - start_time));
+
+  // Drive and intake the cube nearest to the starting zone
+  if (!splines[0].WaitForPlan()) return;
+  splines[0].Start();
+
+  // Move arm into position to pickup a cube and start cube intake
+  PickupCube();
+  IntakeCube();
+
+  if (!splines[0].WaitForSplineDistanceRemaining(0.02)) return;
+
+  // Drive back to grid and place cube on high level
+  if (!splines[1].WaitForPlan()) return;
+  splines[1].Start();
+
+  HighCubeScore();
+
+  if (!splines[1].WaitForSplineDistanceRemaining(0.02)) return;
+
+  if (!WaitForArmGoal()) return;
+  Spit();
+
+  AOS_LOG(
+      INFO, "Placed first cube %lf s\n",
+      aos::time::DurationInSeconds(aos::monotonic_clock::now() - start_time));
+
+  // Drive and intake the cube second nearest to the starting zone
+  if (!splines[2].WaitForPlan()) return;
+  splines[2].Start();
+
+  PickupCube();
+  IntakeCube();
+
+  if (!splines[2].WaitForSplineDistanceRemaining(0.02)) return;
+
+  // Drive back to grid and place object on mid level
+  if (!splines[3].WaitForPlan()) return;
+  splines[3].Start();
+
+  MidCubeScore();
+
+  if (!splines[3].WaitForSplineDistanceRemaining(0.02)) return;
+
+  if (!WaitForArmGoal()) return;
+  Spit();
+
+  AOS_LOG(
+      INFO, "Placed second cube %lf s\n",
+      aos::time::DurationInSeconds(aos::monotonic_clock::now() - start_time));
+
+  // Drive onto charging station
+  if (!splines[4].WaitForPlan()) return;
+  splines[4].Start();
+
+  if (!splines[4].WaitForSplineDistanceRemaining(0.02)) return;
+}
+
+void AutonomousActor::SendSuperstructureGoal() {
+  auto builder = superstructure_goal_sender_.MakeBuilder();
+
+  control_loops::superstructure::Goal::Builder superstructure_builder =
+      builder.MakeBuilder<control_loops::superstructure::Goal>();
+
+  superstructure_builder.add_arm_goal_position(arm_goal_position_);
+  superstructure_builder.add_preloaded_with_cone(preloaded_);
+  superstructure_builder.add_roller_goal(roller_goal_);
+  superstructure_builder.add_wrist(wrist_goal_);
+
+  if (builder.Send(superstructure_builder.Finish()) !=
+      aos::RawSender::Error::kOk) {
+    AOS_LOG(ERROR, "Sending superstructure goal failed.\n");
+  }
+}
+
+[[nodiscard]] bool AutonomousActor::WaitForPreloaded() {
+  set_preloaded(true);
+  SendSuperstructureGoal();
+
+  ::aos::time::PhasedLoop phased_loop(frc971::controls::kLoopFrequency,
+                                      event_loop()->monotonic_now(),
+                                      ActorBase::kLoopOffset);
+
+  bool loaded = false;
+  while (!loaded) {
+    if (ShouldCancel()) {
+      return false;
+    }
+
+    phased_loop.SleepUntilNext();
+    superstructure_status_fetcher_.Fetch();
+    CHECK(superstructure_status_fetcher_.get() != nullptr);
+
+    loaded = (superstructure_status_fetcher_->end_effector_state() ==
+              control_loops::superstructure::EndEffectorState::LOADED);
+  }
+
+  set_preloaded(false);
+  SendSuperstructureGoal();
+
+  return true;
+}
+
+void AutonomousActor::MidConeScore() {
+  set_arm_goal_position(
+      control_loops::superstructure::arm::ScoreFrontMidConeUpIndex());
+  set_wrist_goal(0.05);
+  SendSuperstructureGoal();
+}
+
+void AutonomousActor::HighCubeScore() {
+  set_arm_goal_position(
+      control_loops::superstructure::arm::ScoreFrontHighCubeIndex());
+  set_wrist_goal(0.6);
+  SendSuperstructureGoal();
+}
+
+void AutonomousActor::MidCubeScore() {
+  set_arm_goal_position(
+      control_loops::superstructure::arm::ScoreFrontMidCubeIndex());
+  set_wrist_goal(0.6);
+  SendSuperstructureGoal();
+}
+
+void AutonomousActor::PickupCube() {
+  set_arm_goal_position(
+      control_loops::superstructure::arm::GroundPickupBackCubeIndex());
+  set_wrist_goal(0.6);
+  SendSuperstructureGoal();
+}
+
+void AutonomousActor::Spit() {
+  set_roller_goal(control_loops::superstructure::RollerGoal::SPIT);
+  SendSuperstructureGoal();
+}
+
+void AutonomousActor::IntakeCube() {
+  set_roller_goal(control_loops::superstructure::RollerGoal::INTAKE_CUBE);
+  SendSuperstructureGoal();
+}
+
+[[nodiscard]] bool AutonomousActor::WaitForArmGoal() {
+  constexpr double kEpsTheta = 0.01;
+
+  ::aos::time::PhasedLoop phased_loop(frc971::controls::kLoopFrequency,
+                                      event_loop()->monotonic_now(),
+                                      ActorBase::kLoopOffset);
+
+  bool at_goal = false;
+  while (!at_goal) {
+    if (ShouldCancel()) {
+      return false;
+    }
+
+    phased_loop.SleepUntilNext();
+    superstructure_status_fetcher_.Fetch();
+    CHECK(superstructure_status_fetcher_.get() != nullptr);
+
+    // Check that the status had the right goal
+    at_goal = (std::abs(points_[arm_goal_position_](0) -
+                        superstructure_status_fetcher_->arm()->theta0()) <
+                   kEpsTheta &&
+               std::abs(points_[arm_goal_position_](1) -
+                        superstructure_status_fetcher_->arm()->theta1()) <
+                   kEpsTheta &&
+               std::abs(points_[arm_goal_position_](2) -
+                        superstructure_status_fetcher_->arm()->theta2()) <
+                   kEpsTheta) &&
+              (std::abs(wrist_goal_ -
+                        superstructure_status_fetcher_->wrist()->position()) <
+               kEpsTheta);
+  }
+
+  set_preloaded(false);
+  SendSuperstructureGoal();
+
+  return true;
+}
+
+}  // namespace autonomous
 }  // namespace y2023
diff --git a/y2023/autonomous/autonomous_actor.h b/y2023/autonomous/autonomous_actor.h
index cf0b458..d13003d 100644
--- a/y2023/autonomous/autonomous_actor.h
+++ b/y2023/autonomous/autonomous_actor.h
@@ -1,5 +1,5 @@
-#ifndef Y2023_ACTORS_AUTONOMOUS_ACTOR_H_
-#define Y2023_ACTORS_AUTONOMOUS_ACTOR_H_
+#ifndef Y2023_AUTONOMOUS_AUTONOMOUS_ACTOR_H_
+#define Y2023_AUTONOMOUS_AUTONOMOUS_ACTOR_H_
 
 #include "aos/actions/actions.h"
 #include "aos/actions/actor.h"
@@ -8,9 +8,11 @@
 #include "frc971/control_loops/drivetrain/drivetrain_config.h"
 #include "frc971/control_loops/drivetrain/localizer_generated.h"
 #include "y2023/autonomous/auto_splines.h"
+#include "y2023/control_loops/superstructure/superstructure_goal_generated.h"
+#include "y2023/control_loops/superstructure/superstructure_status_generated.h"
 
 namespace y2023 {
-namespace actors {
+namespace autonomous {
 
 class AutonomousActor : public ::frc971::autonomous::BaseAutonomousActor {
  public:
@@ -20,11 +22,39 @@
       const ::frc971::autonomous::AutonomousActionParams *params) override;
 
  private:
+  void set_arm_goal_position(uint32_t requested_arm_goal_position) {
+    arm_goal_position_ = requested_arm_goal_position;
+  }
+
+  void set_roller_goal(
+      control_loops::superstructure::RollerGoal requested_roller_goal) {
+    roller_goal_ = requested_roller_goal;
+  }
+
+  void set_wrist_goal(double requested_wrist_goal) {
+    wrist_goal_ = requested_wrist_goal;
+  }
+
+  void set_preloaded(bool preloaded) { preloaded_ = preloaded; }
+
+  void SendSuperstructureGoal();
+  void HighCubeScore();
+  void MidCubeScore();
+  void MidConeScore();
+  void PickupCube();
+  void Spit();
+  void IntakeCube();
+
+  [[nodiscard]] bool WaitForArmGoal();
+
+  [[nodiscard]] bool WaitForPreloaded();
+
   void Reset();
 
   void SendStartingPosition(const Eigen::Vector3d &start);
   void MaybeSendStartingPosition();
   void SplineAuto();
+  void ChargedUp();
   void Replan();
 
   aos::Sender<frc971::control_loops::drivetrain::LocalizerControl>
@@ -32,10 +62,13 @@
   aos::Fetcher<aos::JoystickState> joystick_state_fetcher_;
   aos::Fetcher<aos::RobotState> robot_state_fetcher_;
 
+  double wrist_goal_ = 0.0;
+  control_loops::superstructure::RollerGoal roller_goal_ =
+      control_loops::superstructure::RollerGoal::IDLE;
+
   aos::TimerHandler *replan_timer_;
   aos::TimerHandler *button_poll_;
 
-  std::optional<SplineHandle> test_spline_;
   aos::Alliance alliance_ = aos::Alliance::kInvalid;
   AutonomousSplines auto_splines_;
   bool user_indicated_safe_to_reset_ = false;
@@ -44,9 +77,22 @@
   bool is_planned_ = false;
 
   std::optional<Eigen::Vector3d> starting_position_;
+
+  uint32_t arm_goal_position_;
+  bool preloaded_ = false;
+
+  aos::Sender<control_loops::superstructure::Goal> superstructure_goal_sender_;
+  aos::Fetcher<y2023::control_loops::superstructure::Status>
+      superstructure_status_fetcher_;
+
+  std::optional<SplineHandle> test_spline_;
+  std::optional<std::array<SplineHandle, 5>> charged_up_splines_;
+
+  // List of arm angles from arm::PointsList
+  const ::std::vector<::Eigen::Matrix<double, 3, 1>> points_;
 };
 
-}  // namespace actors
+}  // namespace autonomous
 }  // namespace y2023
 
-#endif  // Y2023_ACTORS_AUTONOMOUS_ACTOR_H_
+#endif  // Y2023_AUTONOMOUS_AUTONOMOUS_ACTOR_H_
diff --git a/y2023/autonomous/autonomous_actor_main.cc b/y2023/autonomous/autonomous_actor_main.cc
index 1ee3c15..08a8960 100644
--- a/y2023/autonomous/autonomous_actor_main.cc
+++ b/y2023/autonomous/autonomous_actor_main.cc
@@ -11,7 +11,7 @@
       aos::configuration::ReadConfig("aos_config.json");
 
   ::aos::ShmEventLoop event_loop(&config.message());
-  ::y2023::actors::AutonomousActor autonomous(&event_loop);
+  ::y2023::autonomous::AutonomousActor autonomous(&event_loop);
 
   event_loop.Run();
 
diff --git a/y2023/autonomous/splines/spline_1.json b/y2023/autonomous/splines/spline.0.json
similarity index 100%
rename from y2023/autonomous/splines/spline_1.json
rename to y2023/autonomous/splines/spline.0.json
diff --git a/y2023/autonomous/splines/spline_2.json b/y2023/autonomous/splines/spline.1.json
similarity index 100%
rename from y2023/autonomous/splines/spline_2.json
rename to y2023/autonomous/splines/spline.1.json
diff --git a/y2023/autonomous/splines/spline_3.json b/y2023/autonomous/splines/spline.2.json
similarity index 100%
rename from y2023/autonomous/splines/spline_3.json
rename to y2023/autonomous/splines/spline.2.json
diff --git a/y2023/autonomous/splines/spline_4.json b/y2023/autonomous/splines/spline.3.json
similarity index 100%
rename from y2023/autonomous/splines/spline_4.json
rename to y2023/autonomous/splines/spline.3.json
diff --git a/y2023/autonomous/splines/spline_5.json b/y2023/autonomous/splines/spline.4.json
similarity index 100%
rename from y2023/autonomous/splines/spline_5.json
rename to y2023/autonomous/splines/spline.4.json
diff --git a/y2023/constants.cc b/y2023/constants.cc
index 4bd2096..f11c68c 100644
--- a/y2023/constants.cc
+++ b/y2023/constants.cc
@@ -83,19 +83,20 @@
       break;
 
     case kCompTeamNumber:
-      arm_proximal->zeroing.measured_absolute_position = 0.138453705930275;
+      arm_proximal->zeroing.measured_absolute_position = 0.153241637089465;
       arm_proximal->potentiometer_offset =
           0.931355973012855 + 8.6743197253382 - 0.101200335326309 -
-          0.0820901660993467 - 0.0703733798337964 - 0.0294645384848748;
+          0.0820901660993467 - 0.0703733798337964 - 0.0294645384848748 -
+          0.577156175549626;
 
-      arm_distal->zeroing.measured_absolute_position = 0.562947209110251;
+      arm_distal->zeroing.measured_absolute_position = 0.119544808434349;
       arm_distal->potentiometer_offset =
           0.436664933370656 + 0.49457213779426 + 6.78213223139724 -
           0.0220711555235029 - 0.0162945074111813 + 0.00630344935527365 -
           0.0164398318919943 - 0.145833494945215 + 0.234878799868491 +
-          0.125924230298394 + 0.147136306208754;
+          0.125924230298394 + 0.147136306208754 - 0.69167546169753;
 
-      roll_joint->zeroing.measured_absolute_position = 0.593975883699743;
+      roll_joint->zeroing.measured_absolute_position = 0.62315534539819;
       roll_joint->potentiometer_offset =
           -(3.87038557084874 - 0.0241774522172967 + 0.0711345168020632 -
             0.866186131631967 - 0.0256788357596952 + 0.18101759154572017 -
@@ -105,7 +106,7 @@
           0.0201047336425017 - 1.0173426655158 - 0.186085272847293 - 0.0317706563397807;
 
       wrist->subsystem_params.zeroing_constants.measured_absolute_position =
-          0.894159203288852;
+          1.00731305518279;
 
       break;
 
@@ -119,20 +120,14 @@
           7.673132586937 - 0.0799284644472573 - 0.0323574039310657 +
           0.0143810684138064 + 0.00945555248207735;
 
-      roll_joint->zeroing.measured_absolute_position = 1.7490367887908;
+      roll_joint->zeroing.measured_absolute_position = 1.85482286175059;
       roll_joint->potentiometer_offset =
           0.624713611895747 + 3.10458504917251 - 0.0966407797407789 +
-          0.0257708772364788 - 0.0395076737853459 - 6.87914956118006;
+          0.0257708772364788 - 0.0395076737853459 - 6.87914956118006 -
+          0.097581301615046;
 
       wrist->subsystem_params.zeroing_constants.measured_absolute_position =
-          0.0227022553749391;
-
-      wrist->subsystem_params.zeroing_constants.one_revolution_distance =
-          M_PI * 2.0 * constants::Values::kPracticeWristEncoderRatio();
-      wrist->subsystem_params.range = Values::kPracticeWristRange();
-      wrist->subsystem_params.zeroing_constants.middle_position =
-          Values::kPracticeWristRange().middle();
-      r.wrist_flipped = false;
+          6.04062267812154;
 
       break;
 
diff --git a/y2023/constants.h b/y2023/constants.h
index 8996597..595a694 100644
--- a/y2023/constants.h
+++ b/y2023/constants.h
@@ -168,7 +168,7 @@
 
   // Game object is spit from end effector for at least this time
   static constexpr std::chrono::milliseconds kExtraSpittingTime() {
-    return std::chrono::seconds(2);
+    return std::chrono::seconds(1);
   }
 
   // if true, tune down all the arm constants for testing.
diff --git a/y2023/control_loops/drivetrain/drivetrain_main.cc b/y2023/control_loops/drivetrain/drivetrain_main.cc
index b13e76f..eebae91 100644
--- a/y2023/control_loops/drivetrain/drivetrain_main.cc
+++ b/y2023/control_loops/drivetrain/drivetrain_main.cc
@@ -15,6 +15,8 @@
   aos::FlatbufferDetachedBuffer<aos::Configuration> config =
       aos::configuration::ReadConfig("aos_config.json");
 
+  frc971::constants::WaitForConstants<y2023::Constants>(&config.message());
+
   aos::ShmEventLoop event_loop(&config.message());
   std::unique_ptr<::frc971::control_loops::drivetrain::PuppetLocalizer>
       localizer = std::make_unique<
diff --git a/y2023/control_loops/drivetrain/target_selector.cc b/y2023/control_loops/drivetrain/target_selector.cc
index 1b70ca1..bc1fb90 100644
--- a/y2023/control_loops/drivetrain/target_selector.cc
+++ b/y2023/control_loops/drivetrain/target_selector.cc
@@ -1,6 +1,5 @@
 #include "y2023/control_loops/drivetrain/target_selector.h"
 
-#include "aos/containers/sized_array.h"
 #include "frc971/shooter_interpolation/interpolation.h"
 #include "y2023/control_loops/superstructure/superstructure_position_generated.h"
 #include "y2023/vision/game_pieces_generated.h"
@@ -53,6 +52,74 @@
   }
 }
 
+aos::SizedArray<const frc971::vision::Position *, 3>
+TargetSelector::PossibleScoringPositions(
+    const TargetSelectorHint *hint, const localizer::HalfField *scoring_map) {
+  aos::SizedArray<const localizer::ScoringGrid *, 3> possible_grids;
+  if (hint->has_grid()) {
+    possible_grids = {[hint, scoring_map]() -> const localizer::ScoringGrid * {
+      switch (hint->grid()) {
+        case GridSelectionHint::LEFT:
+          return scoring_map->left_grid();
+        case GridSelectionHint::MIDDLE:
+          return scoring_map->middle_grid();
+        case GridSelectionHint::RIGHT:
+          return scoring_map->right_grid();
+      }
+      // Make roborio compiler happy...
+      return nullptr;
+    }()};
+  } else {
+    possible_grids = {scoring_map->left_grid(), scoring_map->middle_grid(),
+                      scoring_map->right_grid()};
+  }
+
+  aos::SizedArray<const localizer::ScoringRow *, 3> possible_rows =
+      [possible_grids, hint]() {
+        aos::SizedArray<const localizer::ScoringRow *, 3> rows;
+        for (const localizer::ScoringGrid *grid : possible_grids) {
+          CHECK_NOTNULL(grid);
+          switch (hint->row()) {
+            case RowSelectionHint::BOTTOM:
+              rows.push_back(grid->bottom());
+              break;
+            case RowSelectionHint::MIDDLE:
+              rows.push_back(grid->middle());
+              break;
+            case RowSelectionHint::TOP:
+              rows.push_back(grid->top());
+              break;
+          }
+        }
+        return rows;
+      }();
+  aos::SizedArray<const frc971::vision::Position *, 3> positions;
+  for (const localizer::ScoringRow *row : possible_rows) {
+    CHECK_NOTNULL(row);
+    switch (hint->spot()) {
+      case SpotSelectionHint::LEFT:
+        positions.push_back(row->left_cone());
+        break;
+      case SpotSelectionHint::MIDDLE:
+        positions.push_back(row->cube());
+        break;
+      case SpotSelectionHint::RIGHT:
+        positions.push_back(row->right_cone());
+        break;
+    }
+  }
+  return positions;
+}
+
+aos::SizedArray<const frc971::vision::Position *, 3>
+TargetSelector::PossiblePickupPositions(
+    const localizer::HalfField *scoring_map) {
+  aos::SizedArray<const frc971::vision::Position *, 3> positions;
+  positions.push_back(scoring_map->substation()->left());
+  positions.push_back(scoring_map->substation()->right());
+  return positions;
+}
+
 bool TargetSelector::UpdateSelection(const ::Eigen::Matrix<double, 5, 1> &state,
                                      double /*command_speed*/) {
   UpdateAlliance();
@@ -77,63 +144,11 @@
     }
     last_hint_ = hint_object;
   }
-  aos::SizedArray<const localizer::ScoringGrid *, 3> possible_grids;
-  if (hint_fetcher_->has_grid()) {
-    possible_grids = {[this]() -> const localizer::ScoringGrid * {
-      switch (hint_fetcher_->grid()) {
-        case GridSelectionHint::LEFT:
-          return scoring_map_->left_grid();
-        case GridSelectionHint::MIDDLE:
-          return scoring_map_->middle_grid();
-        case GridSelectionHint::RIGHT:
-          return scoring_map_->right_grid();
-      }
-      // Make roborio compiler happy...
-      return nullptr;
-    }()};
-  } else {
-    possible_grids = {scoring_map_->left_grid(), scoring_map_->middle_grid(),
-                      scoring_map_->right_grid()};
-  }
-
-  aos::SizedArray<const localizer::ScoringRow *, 3> possible_rows =
-      [this, possible_grids]() {
-        aos::SizedArray<const localizer::ScoringRow *, 3> rows;
-        for (const localizer::ScoringGrid *grid : possible_grids) {
-          CHECK_NOTNULL(grid);
-          switch (hint_fetcher_->row()) {
-            case RowSelectionHint::BOTTOM:
-              rows.push_back(grid->bottom());
-              break;
-            case RowSelectionHint::MIDDLE:
-              rows.push_back(grid->middle());
-              break;
-            case RowSelectionHint::TOP:
-              rows.push_back(grid->top());
-              break;
-          }
-        }
-        return rows;
-      }();
-  aos::SizedArray<const frc971::vision::Position *, 3> possible_positions =
-      [this, possible_rows]() {
-        aos::SizedArray<const frc971::vision::Position *, 3> positions;
-        for (const localizer::ScoringRow *row : possible_rows) {
-          CHECK_NOTNULL(row);
-          switch (hint_fetcher_->spot()) {
-            case SpotSelectionHint::LEFT:
-              positions.push_back(row->left_cone());
-              break;
-            case SpotSelectionHint::MIDDLE:
-              positions.push_back(row->cube());
-              break;
-            case SpotSelectionHint::RIGHT:
-              positions.push_back(row->right_cone());
-              break;
-          }
-        }
-        return positions;
-      }();
+  const aos::SizedArray<const frc971::vision::Position *, 3>
+      possible_positions =
+          hint_fetcher_->substation_pickup()
+              ? PossiblePickupPositions(scoring_map_)
+              : PossibleScoringPositions(hint_fetcher_.get(), scoring_map_);
   CHECK_LT(0u, possible_positions.size());
   aos::SizedArray<double, 3> distances;
   std::optional<double> closest_distance;
@@ -156,7 +171,10 @@
   if (!target_pose_.has_value() ||
       distances.at(1) - distances.at(0) > kGridHysteresisDistance) {
     CHECK(closest_position.has_value());
-    target_pose_ = Pose(closest_position.value(), /*theta=*/0.0);
+    // Since all targets on one side of the field face the same direction, we
+    // can just auto-choose orientation based on field side.
+    target_pose_ = Pose(closest_position.value(),
+                        /*theta=*/closest_position->x() > 0.0 ? 0.0 : M_PI);
     if (hint_fetcher_->has_robot_side()) {
       drive_direction_ = hint_fetcher_->robot_side();
     } else {
diff --git a/y2023/control_loops/drivetrain/target_selector.h b/y2023/control_loops/drivetrain/target_selector.h
index 5e7f015..e469ce9 100644
--- a/y2023/control_loops/drivetrain/target_selector.h
+++ b/y2023/control_loops/drivetrain/target_selector.h
@@ -1,5 +1,6 @@
 #ifndef Y2023_CONTROL_LOOPS_DRIVETRAIN_TARGET_SELECTOR_H_
 #define Y2023_CONTROL_LOOPS_DRIVETRAIN_TARGET_SELECTOR_H_
+#include "aos/containers/sized_array.h"
 #include "frc971/constants/constants_sender_lib.h"
 #include "frc971/control_loops/drivetrain/localizer.h"
 #include "frc971/control_loops/pose.h"
@@ -47,6 +48,11 @@
 
  private:
   void UpdateAlliance();
+  static aos::SizedArray<const frc971::vision::Position *, 3>
+  PossibleScoringPositions(const TargetSelectorHint *hint,
+                           const localizer::HalfField *scoring_map);
+  static aos::SizedArray<const frc971::vision::Position *, 3>
+  PossiblePickupPositions(const localizer::HalfField *scoring_map);
   std::optional<Pose> target_pose_;
   aos::Fetcher<aos::JoystickState> joystick_state_fetcher_;
   aos::Fetcher<TargetSelectorHint> hint_fetcher_;
diff --git a/y2023/control_loops/drivetrain/target_selector_hint.fbs b/y2023/control_loops/drivetrain/target_selector_hint.fbs
index 357bc21..ce5fd89 100644
--- a/y2023/control_loops/drivetrain/target_selector_hint.fbs
+++ b/y2023/control_loops/drivetrain/target_selector_hint.fbs
@@ -30,7 +30,8 @@
   row:RowSelectionHint (id: 1);
   spot:SpotSelectionHint (id: 2);
   robot_side:frc971.control_loops.drivetrain.RobotSide = DONT_CARE (id: 3);
-  // TODO: support human player pickup auto-align?
+  // If set, attempt to pickup from the human player station.
+  substation_pickup:bool (id: 4);
 }
 
 root_type TargetSelectorHint;
diff --git a/y2023/control_loops/drivetrain/target_selector_test.cc b/y2023/control_loops/drivetrain/target_selector_test.cc
index c28c14d..21f3fe6 100644
--- a/y2023/control_loops/drivetrain/target_selector_test.cc
+++ b/y2023/control_loops/drivetrain/target_selector_test.cc
@@ -55,6 +55,14 @@
     builder.CheckOk(builder.Send(hint_builder.Finish()));
   }
 
+  void SendSubstationHint() {
+    auto builder = hint_sender_.MakeBuilder();
+    TargetSelectorHint::Builder hint_builder =
+        builder.MakeBuilder<TargetSelectorHint>();
+    hint_builder.add_substation_pickup(true);
+    builder.CheckOk(builder.Send(hint_builder.Finish()));
+  }
+
   const localizer::HalfField *scoring_map() const {
     return constants_fetcher_.constants().scoring_map()->red();
   }
@@ -185,4 +193,29 @@
   EXPECT_EQ(target.y(), middle_pos->y());
 }
 
+// Test that substation pickup being set in the hint causes us to pickup from
+// the substation.
+TEST_F(TargetSelectorTest, SubstationPickup) {
+  SendJoystickState();
+  SendSubstationHint();
+  const frc971::vision::Position *left_pos =
+      scoring_map()->substation()->left();
+  const frc971::vision::Position *right_pos =
+      scoring_map()->substation()->right();
+  Eigen::Matrix<double, 5, 1> left_position;
+  left_position << 0.0, left_pos->y(), 0.0, 0.0, 0.0;
+  Eigen::Matrix<double, 5, 1> right_position;
+  right_position << 0.0, right_pos->y(), 0.0, 0.0, 0.0;
+
+  EXPECT_TRUE(target_selector_.UpdateSelection(left_position, 0.0));
+  Eigen::Vector3d target = target_selector_.TargetPose().abs_pos();
+  EXPECT_EQ(target.x(), left_pos->x());
+  EXPECT_EQ(target.y(), left_pos->y());
+
+  EXPECT_TRUE(target_selector_.UpdateSelection(right_position, 0.0));
+  target = target_selector_.TargetPose().abs_pos();
+  EXPECT_EQ(target.x(), right_pos->x());
+  EXPECT_EQ(target.y(), right_pos->y());
+}
+
 }  // namespace y2023::control_loops::drivetrain
diff --git a/y2023/control_loops/python/drivetrain.py b/y2023/control_loops/python/drivetrain.py
index 05739fc..9d2b006 100644
--- a/y2023/control_loops/python/drivetrain.py
+++ b/y2023/control_loops/python/drivetrain.py
@@ -14,17 +14,17 @@
 
 kDrivetrain = drivetrain.DrivetrainParams(
     J=6.5,
-    mass=58.0,
+    mass=68.0,
     # TODO(austin): Measure radius a bit better.
     robot_radius=0.39,
     wheel_radius=2.5 * 0.0254,
     motor_type=control_loop.Falcon(),
     num_motors=3,
-    G=(14.0 / 54.0) * (22.0 / 56.0),
+    G=(14.0 / 52.0) * (26.0 / 58.0),
     q_pos=0.24,
     q_vel=2.5,
-    efficiency=0.75,
-    has_imu=True,
+    efficiency=0.92,
+    has_imu=False,
     force=True,
     kf_q_voltage=1.0,
     controller_poles=[0.82, 0.82])
diff --git a/y2023/control_loops/python/graph_paths.py b/y2023/control_loops/python/graph_paths.py
index e8589ef..24b9805 100644
--- a/y2023/control_loops/python/graph_paths.py
+++ b/y2023/control_loops/python/graph_paths.py
@@ -207,7 +207,7 @@
         control1=np.array([5.997741842590495, 1.8354263885166913]),
         control2=np.array([6.141018843972322, 1.0777341552037734]),
         end=points['ScoreFrontHighConeDownTipPlaced'],
-        control_alpha_rolls=[(0.50, 0.0), (.95, np.pi / 2.0)],
+        control_alpha_rolls=[(0.30, 0.0), (.95, np.pi / 2.0)],
     ))
 
 named_segments.append(
@@ -240,7 +240,7 @@
         control1=np.array([3.153228, -0.497009]),
         control2=np.array([2.972776, -1.026820]),
         end=points['GroundPickupBackCube'],
-        control_alpha_rolls=[(0.7, 0.0), (.9, -np.pi / 2.0)],
+        control_alpha_rolls=[(0.4, 0.0), (.9, -np.pi / 2.0)],
     ))
 
 points['GroundPickupFrontCube'] = to_theta_with_circular_index_and_roll(
@@ -329,7 +329,7 @@
     ))
 
 points['HPPickupBackConeUp'] = to_theta_with_circular_index_and_roll(
-    -1.1050539, 1.325, np.pi / 2.0, circular_index=0)
+    -1.1200539, 1.335, np.pi / 2.0, circular_index=0)
 
 named_segments.append(
     ThetaSplineSegment(
@@ -338,11 +338,11 @@
         control1=np.array([2.0, -0.239378]),
         control2=np.array([1.6, -0.626835]),
         end=points['HPPickupBackConeUp'],
-        control_alpha_rolls=[(0.7, 0.0), (.9, np.pi / 2.0)],
+        control_alpha_rolls=[(0.3, 0.0), (.9, np.pi / 2.0)],
     ))
 
 points['HPPickupFrontConeUp'] = np.array(
-    (5.16514378449353, 1.26, -np.pi / 2.0))
+    (5.16514378449353, 1.23, -np.pi / 2.0))
 #        to_theta_with_circular_index_and_roll(
 #    0.265749, 1.28332, -np.pi / 2.0, circular_index=1)
 
@@ -353,7 +353,7 @@
         control1=np.array([4.068204933788692, -0.05440997896697275]),
         control2=np.array([4.861911360838861, -0.03790410600482508]),
         end=points['HPPickupFrontConeUp'],
-        control_alpha_rolls=[(0.7, 0.0), (.9, -np.pi / 2.0)],
+        control_alpha_rolls=[(0.3, 0.0), (.9, -np.pi / 2.0)],
     ))
 
 points['ScoreFrontHighConeUp'] = to_theta_with_circular_index_and_roll(
@@ -366,7 +366,7 @@
         control1=np.array([2.594244, 0.417442]),
         control2=np.array([1.51325, 0.679748]),
         end=points['ScoreFrontHighConeUp'],
-        control_alpha_rolls=[(0.40, 0.0), (.95, -np.pi / 2.0)],
+        control_alpha_rolls=[(0.20, 0.0), (.95, -np.pi / 2.0)],
     ))
 
 points['ScoreFrontMidConeUp'] = to_theta_with_circular_index_and_roll(
@@ -382,6 +382,32 @@
         control_alpha_rolls=[(0.40, 0.0), (.95, -np.pi / 2.0)],
     ))
 
+points['ScoreFrontMidConeUpAuto'] = to_theta_with_circular_index_and_roll(
+    0.58, 0.97, -np.pi / 2.0, circular_index=0)
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="NeutralToScoreFrontMidConeUpAuto",
+        start=points['Neutral'],
+        control1=np.array([2.99620794024176, 0.23620211875551145]),
+        control2=np.array([2.728197531599509, 0.5677148040671784]),
+        end=points['ScoreFrontMidConeUpAuto'],
+        control_alpha_rolls=[(0.20, 0.0), (.90, -np.pi / 2.0)],
+        vmax=10.0,
+        alpha_unitizer=np.matrix(
+            f"{1.0 / 20.0} 0 0; 0 {1.0 / 25.0} 0; 0 0 {1.0 / 100.0}"),
+    ))
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="ScoreFrontMidConeUpAutoToGroundPickupBackCube",
+        start=points['ScoreFrontMidConeUpAuto'],
+        control1=np.array([3.1869633311848187, 0.2812689595803919]),
+        control2=np.array([2.906100237354555, -0.7760928122326023]),
+        end=points['GroundPickupBackCube'],
+        control_alpha_rolls=[(0.40, 0.0), (0.60, 0.0)],
+    ))
+
 points['ScoreFrontLowCube'] = to_theta_with_circular_index_and_roll(
     0.325603, 0.39, np.pi / 2.0, circular_index=0)
 
@@ -418,6 +444,17 @@
         control_alpha_rolls=[],
     ))
 
+# Auto express spline...
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupBackCubeToScoreFrontMidCube",
+        start=points['ScoreFrontMidCube'],
+        control1=np.array([3.2345111429709847, 0.45338639767112277]),
+        control2=np.array([3.098240119468829, -0.46161157069783254]),
+        end=points['GroundPickupBackCube'],
+        control_alpha_rolls=[(0.40, 0.0), (0.60, 0.0)],
+    ))
+
 points['ScoreFrontHighCube'] = to_theta_with_circular_index_and_roll(
     0.901437, 1.16, np.pi / 2.0, circular_index=0)
 
@@ -559,6 +596,66 @@
         control_alpha_rolls=[],
     ))
 
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupBackConeUpToGroundPickupBackConeDownBase",
+        start=points["GroundPickupBackConeUp"],
+        control1=np.array([2.984750907058771, -1.4175755629898785]),
+        control2=np.array([2.9834020302847164, -1.4959006770007095]),
+        end=points["GroundPickupBackConeDownBase"],
+        control_alpha_rolls=[],
+    ))
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupBackCubeToGroundPickupBackConeUp",
+        start=points["GroundPickupBackCube"],
+        control1=np.array([2.9814712671305497, -1.4752615794585657]),
+        control2=np.array([2.9814712671305497, -1.4752615794585657]),
+        end=points["GroundPickupBackConeUp"],
+        control_alpha_rolls=[],
+    ))
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupBackCubeToGroundPickupBackConeDownBase",
+        start=points["GroundPickupBackCube"],
+        control1=np.array([2.9915062872070943, -1.5453319249912183]),
+        control2=np.array([3.0113316601735853, -1.5625220857410058]),
+        end=points["GroundPickupBackConeDownBase"],
+        control_alpha_rolls=[],
+    ))
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupFrontConeUpToGroundPickupFrontConeDownBase",
+        start=points["GroundPickupFrontConeUp"],
+        control1=np.array([4.178303420953318, 1.7954892324947347]),
+        control2=np.array([4.198694185882847, 1.8611851211658763]),
+        end=points["GroundPickupFrontConeDownBase"],
+        control_alpha_rolls=[],
+    ))
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupFrontCubeToGroundPickupFrontConeUp",
+        start=points["GroundPickupFrontCube"],
+        control1=np.array([4.152678427672294, 1.755703782155648]),
+        control2=np.array([4.115445030197163, 1.77599054062196]),
+        end=points["GroundPickupFrontConeUp"],
+        control_alpha_rolls=[],
+    ))
+
+named_segments.append(
+    ThetaSplineSegment(
+        name="GroundPickupFrontCubeToGroundPickFrontCubeDownBase",
+        start=points["GroundPickupFrontCube"],
+        control1=np.array([4.126486425254001, 1.838621758570565]),
+        control2=np.array([4.1585708953556, 1.8633805468551703]),
+        end=points["GroundPickupFrontConeDownBase"],
+        control_alpha_rolls=[],
+    ))
+
 front_points = []
 back_points = []
 unnamed_segments = []
diff --git a/y2023/control_loops/superstructure/end_effector.cc b/y2023/control_loops/superstructure/end_effector.cc
index 4d5d43e..444be0d 100644
--- a/y2023/control_loops/superstructure/end_effector.cc
+++ b/y2023/control_loops/superstructure/end_effector.cc
@@ -18,14 +18,29 @@
       beambreak_(false) {}
 
 void EndEffector::RunIteration(
-    const ::aos::monotonic_clock::time_point timestamp, RollerGoal roller_goal,
-    double falcon_current, double cone_position, bool beambreak,
-    double *roller_voltage) {
+    const ::aos::monotonic_clock::time_point timestamp,
+    RollerGoal roller_goal, double falcon_current, double cone_position,
+    bool beambreak, double *roller_voltage, bool preloaded_with_cone) {
   *roller_voltage = 0.0;
 
   constexpr double kMinCurrent = 40.0;
   constexpr double kMaxConePosition = 0.92;
 
+  // If we started off preloaded, skip to the loaded state.
+  // Make sure we weren't already there just in case.
+  if (preloaded_with_cone) {
+    switch (state_) {
+      case EndEffectorState::IDLE:
+      case EndEffectorState::INTAKING:
+        game_piece_ = vision::Class::CONE_UP;
+        state_ = EndEffectorState::LOADED;
+        break;
+      case EndEffectorState::LOADED:
+      case EndEffectorState::SPITTING:
+        break;
+    }
+  }
+
   // Let them switch game pieces
   if (roller_goal == RollerGoal::INTAKE_CONE_UP) {
     game_piece_ = vision::Class::CONE_UP;
@@ -90,8 +105,8 @@
       break;
     case EndEffectorState::LOADED:
       timer_ = timestamp;
-      // If loaded and beam break not triggered, intake
-      if (!beambreak_status) {
+      // If loaded and beam break not triggered and not preloaded, intake
+      if (!beambreak_status && !preloaded_with_cone) {
         state_ = EndEffectorState::INTAKING;
       }
       break;
@@ -110,6 +125,12 @@
         // Finished spitting
         state_ = EndEffectorState::IDLE;
         game_piece_ = vision::Class::NONE;
+      } else if (roller_goal == RollerGoal::INTAKE_CONE_UP ||
+                 roller_goal == RollerGoal::INTAKE_CONE_DOWN ||
+                 roller_goal == RollerGoal::INTAKE_CUBE ||
+                 roller_goal == RollerGoal::INTAKE_LAST) {
+        state_ = EndEffectorState::INTAKING;
+        timer_ = timestamp;
       }
 
       break;
diff --git a/y2023/control_loops/superstructure/end_effector.h b/y2023/control_loops/superstructure/end_effector.h
index 2d95115..da0ce5e 100644
--- a/y2023/control_loops/superstructure/end_effector.h
+++ b/y2023/control_loops/superstructure/end_effector.h
@@ -25,7 +25,7 @@
   void RunIteration(const ::aos::monotonic_clock::time_point timestamp,
                     RollerGoal roller_goal, double falcon_current,
                     double cone_position, bool beambreak,
-                    double *intake_roller_voltage);
+                    double *intake_roller_voltage, bool preloaded_with_cone);
   EndEffectorState state() const { return state_; }
   vision::Class game_piece() const { return game_piece_; }
   void Reset();
diff --git a/y2023/control_loops/superstructure/led_indicator.cc b/y2023/control_loops/superstructure/led_indicator.cc
index 68e7f14..48bca1b 100644
--- a/y2023/control_loops/superstructure/led_indicator.cc
+++ b/y2023/control_loops/superstructure/led_indicator.cc
@@ -73,6 +73,7 @@
   superstructure_position_fetcher_.Fetch();
   server_statistics_fetcher_.Fetch();
   drivetrain_output_fetcher_.Fetch();
+  drivetrain_status_fetcher_.Fetch();
   client_statistics_fetcher_.Fetch();
   gyro_reading_fetcher_.Fetch();
   localizer_output_fetcher_.Fetch();
@@ -165,7 +166,8 @@
     }
 
     // Check if there is a target that is in sight
-    if (drivetrain_status_fetcher_->line_follow_logging()->have_target()) {
+    if (drivetrain_status_fetcher_.get() != nullptr &&
+        drivetrain_status_fetcher_->line_follow_logging()->have_target()) {
       DisplayLed(255, 165, 0);
       return;
     }
diff --git a/y2023/control_loops/superstructure/superstructure.cc b/y2023/control_loops/superstructure/superstructure.cc
index 0f19a1e..ece0790 100644
--- a/y2023/control_loops/superstructure/superstructure.cc
+++ b/y2023/control_loops/superstructure/superstructure.cc
@@ -88,7 +88,8 @@
           ? position->roller_falcon()->torque_current()
           : 0.0,
       position->cone_position(), position->end_effector_cube_beam_break(),
-      &output_struct.roller_voltage);
+      &output_struct.roller_voltage,
+      unsafe_goal != nullptr ? unsafe_goal->preloaded_with_cone() : false);
 
   if (output) {
     output->CheckOk(output->Send(Output::Pack(*output->fbb(), &output_struct)));
diff --git a/y2023/control_loops/superstructure/superstructure_goal.fbs b/y2023/control_loops/superstructure/superstructure_goal.fbs
index 670351a..7ac7000 100644
--- a/y2023/control_loops/superstructure/superstructure_goal.fbs
+++ b/y2023/control_loops/superstructure/superstructure_goal.fbs
@@ -22,6 +22,9 @@
     wrist:frc971.control_loops.StaticZeroingSingleDOFProfiledSubsystemGoal (id: 2);
 
     roller_goal:RollerGoal (id: 3);
+
+    // If true, we started with the cone loaded and should proceed to that state.
+    preloaded_with_cone:bool (id: 4);
 }
 
 
diff --git a/y2023/control_loops/superstructure/superstructure_lib_test.cc b/y2023/control_loops/superstructure/superstructure_lib_test.cc
index c1c20a3..80e41f9 100644
--- a/y2023/control_loops/superstructure/superstructure_lib_test.cc
+++ b/y2023/control_loops/superstructure/superstructure_lib_test.cc
@@ -446,6 +446,26 @@
   const ::std::vector<::Eigen::Matrix<double, 3, 1>> points_;
 };
 
+// Test that we are able to signal that the ball was preloaded
+TEST_F(SuperstructureTest, Preloaded) {
+  SetEnabled(true);
+  WaitUntilZeroed();
+
+
+  {
+    auto builder = superstructure_goal_sender_.MakeBuilder();
+    Goal::Builder goal_builder = builder.MakeBuilder<Goal>();
+    goal_builder.add_preloaded_with_cone(true);
+    ASSERT_EQ(builder.Send(goal_builder.Finish()), aos::RawSender::Error::kOk);
+  }
+
+  RunFor(dt());
+
+  ASSERT_TRUE(superstructure_status_fetcher_.Fetch());
+  EXPECT_EQ(superstructure_status_fetcher_->end_effector_state(),
+            EndEffectorState::LOADED);
+}
+
 // Tests that the superstructure does nothing when the goal is to remain
 // still.
 TEST_F(SuperstructureTest, DoesNothing) {
diff --git a/y2023/joystick_reader.cc b/y2023/joystick_reader.cc
index 9efd848..efd7537 100644
--- a/y2023/joystick_reader.cc
+++ b/y2023/joystick_reader.cc
@@ -198,6 +198,7 @@
         .wrist_goal = kConeWrist,
         .game_piece = GamePiece::CONE_UP,
         .buttons = {{kLowConeScoreLeft, SpotSelectionHint::LEFT},
+                    {kLowCube, SpotSelectionHint::MIDDLE},
                     {kLowConeScoreRight, SpotSelectionHint::RIGHT}},
         .side = Side::BACK,
         .row_hint = RowSelectionHint::BOTTOM,
@@ -207,6 +208,7 @@
         .wrist_goal = kConeWrist,
         .game_piece = GamePiece::CONE_UP,
         .buttons = {{kLowConeScoreLeft, SpotSelectionHint::LEFT},
+                    {kLowCube, SpotSelectionHint::MIDDLE},
                     {kLowConeScoreRight, SpotSelectionHint::RIGHT}},
         .side = Side::FRONT,
         .row_hint = RowSelectionHint::BOTTOM,
@@ -226,6 +228,7 @@
         .wrist_goal = kConeWrist,
         .game_piece = GamePiece::CONE_DOWN,
         .buttons = {{kLowConeScoreLeft, SpotSelectionHint::LEFT},
+                    {kLowCube, SpotSelectionHint::MIDDLE},
                     {kLowConeScoreRight, SpotSelectionHint::RIGHT}},
         .side = Side::BACK,
         .row_hint = RowSelectionHint::BOTTOM,
@@ -235,6 +238,7 @@
         .wrist_goal = kConeWrist,
         .game_piece = GamePiece::CONE_DOWN,
         .buttons = {{kLowConeScoreLeft, SpotSelectionHint::LEFT},
+                    {kLowCube, SpotSelectionHint::MIDDLE},
                     {kLowConeScoreRight, SpotSelectionHint::RIGHT}},
         .side = Side::FRONT,
         .row_hint = RowSelectionHint::BOTTOM,
@@ -318,7 +322,9 @@
         .index = arm::ScoreFrontLowCubeIndex(),
         .wrist_goal = kCubeWrist,
         .game_piece = GamePiece::CUBE,
-        .buttons = {{kLowCube, SpotSelectionHint::MIDDLE}},
+        .buttons = {{kLowConeScoreLeft, SpotSelectionHint::LEFT},
+                    {kLowCube, SpotSelectionHint::MIDDLE},
+                    {kLowConeScoreRight, SpotSelectionHint::RIGHT}},
         .side = Side::FRONT,
         .row_hint = RowSelectionHint::BOTTOM,
     },
@@ -326,7 +332,9 @@
         .index = arm::ScoreBackLowCubeIndex(),
         .wrist_goal = kCubeWrist,
         .game_piece = GamePiece::CUBE,
-        .buttons = {{kLowCube, SpotSelectionHint::MIDDLE}},
+        .buttons = {{kLowConeScoreLeft, SpotSelectionHint::LEFT},
+                    {kLowCube, SpotSelectionHint::MIDDLE},
+                    {kLowConeScoreRight, SpotSelectionHint::RIGHT}},
         .side = Side::BACK,
         .row_hint = RowSelectionHint::BOTTOM,
     },
@@ -374,6 +382,8 @@
 
   GamePiece current_game_piece_ = GamePiece::CONE_UP;
 
+  bool has_scored_ = false;
+
   void HandleTeleop(
       const ::frc971::input::driver_station::Data &data) override {
     superstructure_status_fetcher_.Fetch();
@@ -437,6 +447,7 @@
 
     // Ok, no active setpoint.  Search for the right one.
     if (current_setpoint_ == nullptr) {
+      has_scored_ = false;
       const Side current_side =
           data.IsPressed(kBack) ? Side::BACK : Side::FRONT;
       // Search for the active setpoint.
@@ -477,13 +488,16 @@
         // spit.
         if (std::abs(score_wrist_goal.value() -
                      superstructure_status_fetcher_->wrist()->goal_position()) <
-            0.1) {
+                0.1 ||
+            has_scored_) {
           if (place_index.has_value()) {
             arm_goal_position_ = place_index.value();
-            if (arm_goal_position_ ==
-                    superstructure_status_fetcher_->arm()->current_node() &&
-                superstructure_status_fetcher_->arm()->path_distance_to_go() <
-                    0.01) {
+            if ((arm_goal_position_ ==
+                     superstructure_status_fetcher_->arm()->current_node() &&
+                 superstructure_status_fetcher_->arm()->path_distance_to_go() <
+                     0.01) ||
+                has_scored_) {
+              has_scored_ = true;
               roller_goal = RollerGoal::SPIT;
             }
           } else {
@@ -513,7 +527,18 @@
         AOS_LOG(ERROR, "Sending superstructure goal failed.\n");
       }
     }
-    if (placing_row.has_value()) {
+    // TODO(james): Is there a more principled way to detect Human Player
+    // pickup? Probably don't bother fixing it until/unless we add more buttons
+    // that can select human player pickup.
+    if (data.IsPressed(kHPConePickup)) {
+      auto builder = target_selector_hint_sender_.MakeBuilder();
+      auto hint_builder = builder.MakeBuilder<TargetSelectorHint>();
+      hint_builder.add_substation_pickup(true);
+      hint_builder.add_robot_side(CHECK_NOTNULL(current_setpoint_)->side);
+      if (builder.Send(hint_builder.Finish()) != aos::RawSender::Error::kOk) {
+        AOS_LOG(ERROR, "Sending target selector hint failed.\n");
+      }
+    } else if (placing_row.has_value()) {
       auto builder = target_selector_hint_sender_.MakeBuilder();
       auto hint_builder = builder.MakeBuilder<TargetSelectorHint>();
       hint_builder.add_row(placing_row.value());
diff --git a/y2023/joystick_republish.cc b/y2023/joystick_republish.cc
new file mode 100644
index 0000000..9542001
--- /dev/null
+++ b/y2023/joystick_republish.cc
@@ -0,0 +1,34 @@
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#include "aos/configuration.h"
+#include "aos/init.h"
+#include "aos/events/shm_event_loop.h"
+#include "aos/flatbuffer_merge.h"
+#include "aos/init.h"
+#include "frc971/input/joystick_state_generated.h"
+#include "glog/logging.h"
+
+DEFINE_string(config, "aos_config.json", "Config file to use.");
+
+int main(int argc, char *argv[]) {
+  aos::InitGoogle(&argc, &argv);
+
+  aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+  aos::ShmEventLoop event_loop(&config.message());
+
+  aos::Sender<aos::JoystickState> sender(
+      event_loop.MakeSender<aos::JoystickState>("/imu/aos"));
+
+  event_loop.MakeWatcher(
+      "/roborio/aos", [&](const aos::JoystickState &joystick_state) {
+        auto builder = sender.MakeBuilder();
+        flatbuffers::Offset<aos::JoystickState> state_fbs =
+            aos::CopyFlatBuffer(&joystick_state, builder.fbb());
+        builder.CheckOk(builder.Send(state_fbs));
+      });
+
+  event_loop.Run();
+  return 0;
+}
diff --git a/y2023/localizer/BUILD b/y2023/localizer/BUILD
index d97d4a3..7795462 100644
--- a/y2023/localizer/BUILD
+++ b/y2023/localizer/BUILD
@@ -1,5 +1,6 @@
 load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
 load("@com_github_google_flatbuffers//:typescript.bzl", "flatbuffer_ts_library")
+load("//tools/build_rules:js.bzl", "ts_project")
 
 flatbuffer_cc_library(
     name = "status_fbs",
@@ -176,3 +177,35 @@
         "//y2023/control_loops/drivetrain:drivetrain_base",
     ],
 )
+
+ts_project(
+    name = "corrections_plotter",
+    srcs = ["corrections_plotter.ts"],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":visualization_ts_fbs",
+        "//aos/network/www:aos_plotter",
+        "//aos/network/www:colors",
+        "//aos/network/www:proxy",
+    ],
+)
+
+cc_binary(
+    name = "localizer_replay",
+    srcs = ["localizer_replay.cc"],
+    data = [
+        "//y2023:aos_config",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    deps = [
+        ":localizer",
+        "//aos:configuration",
+        "//aos:init",
+        "//aos:json_to_flatbuffer",
+        "//aos/events:simulated_event_loop",
+        "//aos/events/logging:log_reader",
+        "//aos/events/logging:log_writer",
+        "//y2023/control_loops/drivetrain:drivetrain_base",
+    ],
+)
diff --git a/y2023/localizer/corrections_plotter.ts b/y2023/localizer/corrections_plotter.ts
new file mode 100644
index 0000000..a114f3f
--- /dev/null
+++ b/y2023/localizer/corrections_plotter.ts
@@ -0,0 +1,140 @@
+import {ByteBuffer} from 'flatbuffers';
+import {AosPlotter} from '../../aos/network/www/aos_plotter';
+import {MessageHandler, TimestampedMessage} from '../../aos/network/www/aos_plotter';
+import {BLUE, BROWN, CYAN, GREEN, PINK, RED, WHITE} from '../../aos/network/www/colors';
+import {Connection} from '../../aos/network/www/proxy';
+import {Table} from '../../aos/network/www/reflection';
+import {Schema} from 'flatbuffers_reflection/reflection_generated';
+import {Visualization, TargetEstimateDebug} from './visualization_generated';
+
+
+const TIME = AosPlotter.TIME;
+// magenta, yellow, cyan, black
+const PI_COLORS = [[255, 0, 255], [255, 255, 0], [0, 255, 255], [0, 0, 0]];
+
+class VisionMessageHandler extends MessageHandler {
+  constructor(private readonly schema: Schema) {
+    super(schema);
+  }
+
+  private readScalar(table: Table, fieldName: string): number|BigInt|null {
+    return this.parser.readScalar(table, fieldName);
+  }
+
+  addMessage(data: Uint8Array, time: number): void {
+    const message = Visualization.getRootAsVisualization(new ByteBuffer(data));
+    for (let ii = 0; ii < message.targetsLength(); ++ii) {
+      const target = message.targets(ii);
+      const time = Number(target.imageMonotonicTimestampNs()) * 1e-9;
+      if (time == 0) {
+        console.log('Dropping message without populated time?');
+        continue;
+      }
+      const table = Table.getNamedTable(
+          target.bb, this.schema, 'y2023.localizer.TargetEstimateDebug', target.bb_pos);
+      this.messages.push(new TimestampedMessage(table, time));
+    }
+  }
+}
+
+export function plotVision(conn: Connection, element: Element): void {
+  const aosPlotter = new AosPlotter(conn);
+
+  const targets = [];
+  for (const pi of ['pi1', 'pi2', 'pi3', 'pi4']) {
+    targets.push(aosPlotter.addRawMessageSource(
+        '/' + pi + '/camera', 'y2023.localizer.Visualization',
+        new VisionMessageHandler(
+            conn.getSchema('y2023.localizer.Visualization'))));
+  }
+  const localizerStatus = aosPlotter.addMessageSource(
+      '/localizer', 'y2023.localizer.Status');
+  const localizerOutput = aosPlotter.addMessageSource(
+      '/localizer', 'frc971.controls.LocalizerOutput');
+
+  const rejectionPlot = aosPlotter.addPlot(element);
+  rejectionPlot.plot.getAxisLabels().setTitle('Rejection Reasons');
+  rejectionPlot.plot.getAxisLabels().setXLabel(TIME);
+  rejectionPlot.plot.getAxisLabels().setYLabel('[bool, enum]');
+
+  rejectionPlot
+      .addMessageLine(localizerStatus, ['statistics[]', 'total_accepted'])
+      .setDrawLine(false)
+      .setColor(BLUE);
+  rejectionPlot
+      .addMessageLine(localizerStatus, ['statistics[]', 'total_candidates'])
+      .setDrawLine(false)
+      .setColor(RED);
+  for (let ii = 0; ii < targets.length; ++ii) {
+    rejectionPlot.addMessageLine(targets[ii], ['rejection_reason'])
+        .setDrawLine(false)
+        .setColor(PI_COLORS[ii])
+        .setLabel('pi' + (ii + 1));
+  }
+
+  const xPlot = aosPlotter.addPlot(element);
+  xPlot.plot.getAxisLabels().setTitle('X Position');
+  xPlot.plot.getAxisLabels().setXLabel(TIME);
+  xPlot.plot.getAxisLabels().setYLabel('[m]');
+
+  for (let ii = 0; ii < targets.length; ++ii) {
+    xPlot.addMessageLine(targets[ii], ['implied_robot_x'])
+        .setDrawLine(false)
+        .setColor(PI_COLORS[ii])
+        .setLabel('pi' + (ii + 1));
+  }
+  xPlot.addMessageLine(localizerOutput, ['x'])
+      .setDrawLine(false)
+      .setColor(BLUE);
+
+  const correctionXPlot = aosPlotter.addPlot(element);
+  correctionXPlot.plot.getAxisLabels().setTitle('X Corrections');
+  correctionXPlot.plot.getAxisLabels().setXLabel(TIME);
+  correctionXPlot.plot.getAxisLabels().setYLabel('[m]');
+
+  for (let ii = 0; ii < targets.length; ++ii) {
+    correctionXPlot.addMessageLine(targets[ii], ['correction_x'])
+        .setDrawLine(false)
+        .setColor(PI_COLORS[ii])
+        .setLabel('pi' + (ii + 1));
+  }
+
+  const yPlot = aosPlotter.addPlot(element);
+  yPlot.plot.getAxisLabels().setTitle('Y Position');
+  yPlot.plot.getAxisLabels().setXLabel(TIME);
+  yPlot.plot.getAxisLabels().setYLabel('[m]');
+
+  for (let ii = 0; ii < targets.length; ++ii) {
+    yPlot.addMessageLine(targets[ii], ['implied_robot_y'])
+        .setDrawLine(false)
+        .setColor(PI_COLORS[ii])
+        .setLabel('pi' + (ii + 1));
+  }
+  yPlot.addMessageLine(localizerOutput, ['y'])
+      .setDrawLine(false)
+      .setColor(BLUE);
+
+  const correctionYPlot = aosPlotter.addPlot(element);
+  correctionYPlot.plot.getAxisLabels().setTitle('Y Corrections');
+  correctionYPlot.plot.getAxisLabels().setXLabel(TIME);
+  correctionYPlot.plot.getAxisLabels().setYLabel('[m]');
+
+  for (let ii = 0; ii < targets.length; ++ii) {
+    correctionYPlot.addMessageLine(targets[ii], ['correction_y'])
+        .setDrawLine(false)
+        .setColor(PI_COLORS[ii])
+        .setLabel('pi' + (ii + 1));
+  }
+
+  const aprilTagPlot = aosPlotter.addPlot(element);
+  aprilTagPlot.plot.getAxisLabels().setTitle('April Tag IDs');
+  aprilTagPlot.plot.getAxisLabels().setXLabel(TIME);
+  aprilTagPlot.plot.getAxisLabels().setYLabel('[id]');
+
+  for (let ii = 0; ii < targets.length; ++ii) {
+    aprilTagPlot.addMessageLine(targets[ii], ['april_tag'])
+        .setDrawLine(false)
+        .setColor(PI_COLORS[ii])
+        .setLabel('pi' + (ii + 1));
+  }
+}
diff --git a/y2023/localizer/localizer.cc b/y2023/localizer/localizer.cc
index ffc1df3..32f5604 100644
--- a/y2023/localizer/localizer.cc
+++ b/y2023/localizer/localizer.cc
@@ -12,6 +12,13 @@
 DEFINE_double(distortion_noise_scalar, 1.0,
               "Scale the target pose distortion factor by this when computing "
               "the noise.");
+DEFINE_double(
+    max_implied_yaw_error, 30.0,
+    "Reject target poses that imply a robot yaw of more than this many degrees "
+    "off from our estimate.");
+DEFINE_double(max_distance_to_target, 6.0,
+              "Reject target poses that have a 3d distance of more than this "
+              "many meters.");
 
 namespace y2023::localizer {
 namespace {
@@ -241,8 +248,13 @@
   builder.add_camera(camera_index);
   builder.add_image_age_sec(aos::time::DurationInSeconds(
       event_loop_->monotonic_now() - capture_time));
+  builder.add_image_monotonic_timestamp_ns(
+      std::chrono::duration_cast<std::chrono::nanoseconds>(
+          capture_time.time_since_epoch())
+          .count());
 
   const uint64_t target_id = target.id();
+  builder.add_april_tag(target_id);
   VLOG(2) << aos::FlatbufferToJson(&target);
   if (target_poses_.count(target_id) == 0) {
     VLOG(1) << "Rejecting target due to invalid ID " << target_id;
@@ -282,11 +294,21 @@
   builder.add_implied_robot_y(Z(Corrector::kY));
   builder.add_implied_robot_theta(Z(Corrector::kTheta));
 
+  Eigen::AngleAxisd rvec_camera_target(
+      Eigen::Affine3d(H_camera_target).rotation());
+  // Use y angle (around vertical axis) to compute skew
+  double skew = rvec_camera_target.axis().y() * rvec_camera_target.angle();
+  builder.add_skew(skew);
+
+  double distance_to_target =
+      Eigen::Affine3d(H_camera_target).translation().norm();
+
   // TODO(james): Tune this. Also, gain schedule for auto mode?
   Eigen::Matrix<double, 3, 1> noises(1.0, 1.0, 0.5);
   noises /= 4.0;
   // Scale noise by the distortion factor for this detection
-  noises *= (1.0 + FLAGS_distortion_noise_scalar * target.distortion_factor());
+  noises *= (1.0 + FLAGS_distortion_noise_scalar * target.distortion_factor() *
+                       std::exp(distance_to_target));
 
   Eigen::Matrix3d R = Eigen::Matrix3d::Zero();
   R.diagonal() = noises.cwiseAbs2();
@@ -309,8 +331,22 @@
                        &builder);
   }
 
+  double theta_at_capture = state_at_capture.value()(StateIdx::kTheta);
+  double camera_implied_theta = Z(Corrector::kTheta);
+  constexpr double kDegToRad = M_PI / 180.0;
+
+  if (std::abs(camera_implied_theta - theta_at_capture) >
+             FLAGS_max_implied_yaw_error * kDegToRad) {
+    return RejectImage(camera_index, RejectionReason::HIGH_IMPLIED_YAW_ERROR,
+                       &builder);
+  } else if (distance_to_target > FLAGS_max_distance_to_target) {
+    return RejectImage(camera_index, RejectionReason::HIGH_DISTANCE_TO_TARGET,
+                       &builder);
+  }
+
   const Input U = ekf_.MostRecentInput();
   VLOG(1) << "previous state " << ekf_.X_hat().topRows<3>().transpose();
+  const State prior_state = ekf_.X_hat();
   // For the correction step, instead of passing in the measurement directly,
   // we pass in (0, 0, 0) as the measurement and then for the expected
   // measurement (Zhat) we calculate the error between the pose implied by
@@ -322,6 +358,12 @@
   ++total_accepted_targets_;
   ++cameras_.at(camera_index).total_accepted_targets;
   VLOG(1) << "new state " << ekf_.X_hat().topRows<3>().transpose();
+  builder.add_correction_x(ekf_.X_hat()(StateIdx::kX) -
+                           prior_state(StateIdx::kX));
+  builder.add_correction_y(ekf_.X_hat()(StateIdx::kY) -
+                           prior_state(StateIdx::kY));
+  builder.add_correction_theta(ekf_.X_hat()(StateIdx::kTheta) -
+                               prior_state(StateIdx::kTheta));
   builder.add_accepted(true);
   return builder.Finish();
 }
@@ -363,21 +405,22 @@
 }
 
 flatbuffers::Offset<frc971::control_loops::drivetrain::LocalizerState>
-Localizer::PopulateState(flatbuffers::FlatBufferBuilder *fbb) const {
+Localizer::PopulateState(const State &X_hat,
+                         flatbuffers::FlatBufferBuilder *fbb) {
   frc971::control_loops::drivetrain::LocalizerState::Builder builder(*fbb);
-  builder.add_x(ekf_.X_hat(StateIdx::kX));
-  builder.add_y(ekf_.X_hat(StateIdx::kY));
-  builder.add_theta(ekf_.X_hat(StateIdx::kTheta));
-  builder.add_left_velocity(ekf_.X_hat(StateIdx::kLeftVelocity));
-  builder.add_right_velocity(ekf_.X_hat(StateIdx::kRightVelocity));
-  builder.add_left_encoder(ekf_.X_hat(StateIdx::kLeftEncoder));
-  builder.add_right_encoder(ekf_.X_hat(StateIdx::kRightEncoder));
-  builder.add_left_voltage_error(ekf_.X_hat(StateIdx::kLeftVoltageError));
-  builder.add_right_voltage_error(ekf_.X_hat(StateIdx::kRightVoltageError));
-  builder.add_angular_error(ekf_.X_hat(StateIdx::kAngularError));
+  builder.add_x(X_hat(StateIdx::kX));
+  builder.add_y(X_hat(StateIdx::kY));
+  builder.add_theta(X_hat(StateIdx::kTheta));
+  builder.add_left_velocity(X_hat(StateIdx::kLeftVelocity));
+  builder.add_right_velocity(X_hat(StateIdx::kRightVelocity));
+  builder.add_left_encoder(X_hat(StateIdx::kLeftEncoder));
+  builder.add_right_encoder(X_hat(StateIdx::kRightEncoder));
+  builder.add_left_voltage_error(X_hat(StateIdx::kLeftVoltageError));
+  builder.add_right_voltage_error(X_hat(StateIdx::kRightVoltageError));
+  builder.add_angular_error(X_hat(StateIdx::kAngularError));
   builder.add_longitudinal_velocity_offset(
-      ekf_.X_hat(StateIdx::kLongitudinalVelocityOffset));
-  builder.add_lateral_velocity(ekf_.X_hat(StateIdx::kLateralVelocity));
+      X_hat(StateIdx::kLongitudinalVelocityOffset));
+  builder.add_lateral_velocity(X_hat(StateIdx::kLateralVelocity));
   return builder.Finish();
 }
 
@@ -423,7 +466,7 @@
   auto down_estimator_offset =
       down_estimator_.PopulateStatus(builder.fbb(), t_);
   auto imu_offset = PopulateImu(builder.fbb());
-  auto state_offset = PopulateState(builder.fbb());
+  auto state_offset = PopulateState(ekf_.X_hat(), builder.fbb());
   Status::Builder status_builder = builder.MakeBuilder<Status>();
   status_builder.add_state(state_offset);
   status_builder.add_down_estimator(down_estimator_offset);
diff --git a/y2023/localizer/localizer.h b/y2023/localizer/localizer.h
index ba5b1cf..accb0c1 100644
--- a/y2023/localizer/localizer.h
+++ b/y2023/localizer/localizer.h
@@ -83,8 +83,8 @@
       TargetEstimateDebug::Builder *builder);
 
   void SendOutput();
-  flatbuffers::Offset<frc971::control_loops::drivetrain::LocalizerState>
-  PopulateState(flatbuffers::FlatBufferBuilder *fbb) const;
+  static flatbuffers::Offset<frc971::control_loops::drivetrain::LocalizerState>
+  PopulateState(const State &X_hat, flatbuffers::FlatBufferBuilder *fbb);
   flatbuffers::Offset<ImuStatus> PopulateImu(
       flatbuffers::FlatBufferBuilder *fbb) const;
   void SendStatus();
diff --git a/y2023/localizer/localizer_replay.cc b/y2023/localizer/localizer_replay.cc
new file mode 100644
index 0000000..7176f4e
--- /dev/null
+++ b/y2023/localizer/localizer_replay.cc
@@ -0,0 +1,100 @@
+#include "aos/configuration.h"
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/logging/log_writer.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/init.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/network/team_number.h"
+#include "y2023/localizer/localizer.h"
+#include "gflags/gflags.h"
+#include "y2023/control_loops/drivetrain/drivetrain_base.h"
+
+DEFINE_string(config, "y2023/aos_config.json",
+              "Name of the config file to replay using.");
+DEFINE_int32(team, 9971, "Team number to use for logfile replay.");
+DEFINE_string(output_folder, "/tmp/replayed",
+              "Name of the folder to write replayed logs to.");
+
+class LoggerState {
+ public:
+  LoggerState(aos::logger::LogReader *reader, const aos::Node *node)
+      : event_loop_(
+            reader->event_loop_factory()->MakeEventLoop("logger", node)),
+        namer_(std::make_unique<aos::logger::MultiNodeLogNamer>(
+            absl::StrCat(FLAGS_output_folder, "/", node->name()->string_view(),
+                         "/"),
+            event_loop_.get())),
+        logger_(std::make_unique<aos::logger::Logger>(event_loop_.get())) {
+    event_loop_->SkipTimingReport();
+    event_loop_->SkipAosLog();
+    event_loop_->OnRun([this]() { logger_->StartLogging(std::move(namer_)); });
+  }
+
+ private:
+  std::unique_ptr<aos::EventLoop> event_loop_;
+  std::unique_ptr<aos::logger::LogNamer> namer_;
+  std::unique_ptr<aos::logger::Logger> logger_;
+};
+
+int main(int argc, char **argv) {
+  aos::InitGoogle(&argc, &argv);
+
+  aos::network::OverrideTeamNumber(FLAGS_team);
+
+  const aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+      aos::configuration::ReadConfig(FLAGS_config);
+
+  // find logfiles
+  std::vector<std::string> unsorted_logfiles =
+      aos::logger::FindLogs(argc, argv);
+
+  // sort logfiles
+  const std::vector<aos::logger::LogFile> logfiles =
+      aos::logger::SortParts(unsorted_logfiles);
+
+  // open logfiles
+  aos::logger::LogReader reader(logfiles, &config.message());
+
+  reader.RemapLoggedChannel("/localizer",
+                            "y2023.localizer.Status");
+  for (const auto pi : {"pi1", "pi2", "pi3", "pi4"}) {
+    reader.RemapLoggedChannel(absl::StrCat("/", pi, "/camera"),
+                              "y2023.localizer.Visualization");
+  }
+  reader.RemapLoggedChannel("/localizer",
+                            "frc971.controls.LocalizerOutput");
+
+  auto factory =
+      std::make_unique<aos::SimulatedEventLoopFactory>(reader.configuration());
+
+  reader.Register(factory.get());
+
+  std::vector<std::unique_ptr<LoggerState>> loggers;
+  // List of nodes to create loggers for (note: currently just roborio; this
+  // code was refactored to allow easily adding new loggers to accommodate
+  // debugging and potential future changes).
+  const std::vector<std::string> nodes_to_log = {"imu"};
+  for (const std::string &node : nodes_to_log) {
+    loggers.emplace_back(std::make_unique<LoggerState>(
+        &reader, aos::configuration::GetNode(reader.configuration(), node)));
+  }
+
+  const aos::Node *node = nullptr;
+  if (aos::configuration::MultiNode(reader.configuration())) {
+    node = aos::configuration::GetNode(reader.configuration(), "imu");
+  }
+
+  std::unique_ptr<aos::EventLoop> localizer_event_loop =
+      reader.event_loop_factory()->MakeEventLoop("localizer", node);
+  localizer_event_loop->SkipTimingReport();
+
+  y2023::localizer::Localizer localizer(
+      localizer_event_loop.get(),
+      y2023::control_loops::drivetrain::GetDrivetrainConfig());
+
+  reader.event_loop_factory()->Run();
+
+  reader.Deregister();
+
+  return 0;
+}
diff --git a/y2023/localizer/localizer_test.cc b/y2023/localizer/localizer_test.cc
index 57b9dd0..7248b33 100644
--- a/y2023/localizer/localizer_test.cc
+++ b/y2023/localizer/localizer_test.cc
@@ -15,6 +15,7 @@
 DEFINE_string(output_folder, "",
               "If set, logs all channels to the provided logfile.");
 DECLARE_bool(die_on_malloc);
+DECLARE_double(max_distance_to_target);
 
 namespace y2023::localizer::testing {
 
@@ -73,6 +74,7 @@
                 ->MakeFetcher<frc971::controls::LocalizerOutput>("/localizer")),
         status_fetcher_(
             imu_test_event_loop_->MakeFetcher<Status>("/localizer")) {
+    FLAGS_max_distance_to_target = 100.0;
     FLAGS_die_on_malloc = true;
     {
       aos::TimerHandler *timer = roborio_test_event_loop_->AddTimer([this]() {
@@ -123,7 +125,7 @@
             const frc971::control_loops::Pose robot_pose(
                 {drivetrain_plant_.GetPosition().x(),
                  drivetrain_plant_.GetPosition().y(), 0.0},
-                drivetrain_plant_.state()(2, 0));
+                drivetrain_plant_.state()(2, 0) + implied_yaw_error_);
 
             const Eigen::Matrix<double, 4, 4> H_field_camera =
                 robot_pose.AsTransformationMatrix() * H_robot_camera;
@@ -275,6 +277,7 @@
 
   uint64_t send_target_id_ = kTargetId;
   double pose_error_ = 1e-7;
+  double implied_yaw_error_ = 0.0;
 
   gflags::FlagSaver flag_saver_;
 };
@@ -483,4 +486,25 @@
             status_fetcher_->statistics()->Get(0)->total_candidates());
 }
 
+// Tests that we correctly reject a detection with a high implied yaw error.
+TEST_F(LocalizerTest, HighImpliedYawError) {
+  output_voltages_ << 0.0, 0.0;
+  send_targets_ = true;
+  implied_yaw_error_ = 31.0 * M_PI / 180.0;
+
+  event_loop_factory_.RunFor(std::chrono::seconds(4));
+  CHECK(status_fetcher_.Fetch());
+  ASSERT_TRUE(status_fetcher_->has_statistics());
+  ASSERT_EQ(4u /* number of cameras */, status_fetcher_->statistics()->size());
+  ASSERT_EQ(0, status_fetcher_->statistics()->Get(0)->total_accepted());
+  ASSERT_LT(10, status_fetcher_->statistics()->Get(0)->total_candidates());
+  ASSERT_EQ(
+      status_fetcher_->statistics()
+          ->Get(0)
+          ->rejection_reasons()
+          ->Get(static_cast<size_t>(RejectionReason::HIGH_IMPLIED_YAW_ERROR))
+          ->count(),
+      status_fetcher_->statistics()->Get(0)->total_candidates());
+}
+
 }  // namespace y2023::localizer::testing
diff --git a/y2023/localizer/status.fbs b/y2023/localizer/status.fbs
index 1b5c6f6..ded492e 100644
--- a/y2023/localizer/status.fbs
+++ b/y2023/localizer/status.fbs
@@ -17,6 +17,11 @@
   NO_SUCH_TARGET = 3,
   // Pose estimation error was higher than any normal detection.
   HIGH_POSE_ERROR = 4,
+  // Pose estimate implied a robot yaw far off from our estimate.
+  HIGH_IMPLIED_YAW_ERROR = 5,
+  // Pose estimate had a high distance to target.
+  // We don't trust estimates very far out.
+  HIGH_DISTANCE_TO_TARGET = 6,
 }
 
 table RejectionCount {
diff --git a/y2023/localizer/visualization.fbs b/y2023/localizer/visualization.fbs
index 2785872..8fa03f0 100644
--- a/y2023/localizer/visualization.fbs
+++ b/y2023/localizer/visualization.fbs
@@ -15,6 +15,18 @@
   // Image age (more human-readable than trying to interpret raw nanosecond
   // values).
   image_age_sec:double (id: 9);
+  // Time at which the image was captured.
+  image_monotonic_timestamp_ns:uint64 (id: 10);
+  // April tag ID used for this image detection.
+  april_tag:uint (id: 11);
+  // If the image was accepted, the total correction that occurred as a result.
+  // These numbers will be equal to the value after the correction - the value
+  // before.
+  correction_x: double (id: 12);
+  correction_y: double (id: 13);
+  correction_theta: double (id: 14);
+  // The angle between the camera axis and target normal.
+  skew:double (id: 15);
 }
 
 table Visualization {
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index bd1c5fd..95ad541 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -11,7 +11,7 @@
 DEFINE_int32(pixel_border, 10,
              "Size of image border within which to reject detected corners");
 DEFINE_double(
-    max_expected_distortion, 0.0005,
+    max_expected_distortion, 0.314,
     "Maximum expected value for unscaled distortion factors. Will scale "
     "distortion factors so that this value (and a higher distortion) maps to "
     "1.0.");
@@ -36,7 +36,8 @@
       target_map_sender_(
           event_loop->MakeSender<frc971::vision::TargetMap>("/camera")),
       image_annotations_sender_(
-          event_loop->MakeSender<foxglove::ImageAnnotations>("/camera")) {
+          event_loop->MakeSender<foxglove::ImageAnnotations>("/camera")),
+      rejections_(0) {
   tag_family_ = tag16h5_create();
   tag_detector_ = apriltag_detector_create();
 
@@ -89,17 +90,18 @@
                                         aos::monotonic_clock::time_point eof) {
   image_size_ = image_grayscale.size();
 
-  std::vector<Detection> detections = DetectTags(image_grayscale, eof);
+  DetectionResult result = DetectTags(image_grayscale, eof);
 
   auto builder = target_map_sender_.MakeBuilder();
   std::vector<flatbuffers::Offset<frc971::vision::TargetPoseFbs>> target_poses;
-  for (const auto &detection : detections) {
+  for (const auto &detection : result.detections) {
     target_poses.emplace_back(BuildTargetPose(detection, builder.fbb()));
   }
   const auto target_poses_offset = builder.fbb()->CreateVector(target_poses);
   auto target_map_builder = builder.MakeBuilder<frc971::vision::TargetMap>();
   target_map_builder.add_target_poses(target_poses_offset);
   target_map_builder.add_monotonic_timestamp_ns(eof.time_since_epoch().count());
+  target_map_builder.add_rejections(result.rejections);
   builder.CheckOk(builder.Send(target_map_builder.Finish()));
 }
 
@@ -158,11 +160,11 @@
   }
   avg_distance /= corners.size();
 
-  // Normalize avg_distance by dividing by the image size, and then the maximum
-  // expected distortion
+  // Normalize avg_distance by dividing by the image diagonal,
+  // and then the maximum expected distortion
   double distortion_factor =
       avg_distance /
-      static_cast<double>(image_size_.width * image_size_.height);
+      cv::norm(cv::Point2d(image_size_.width, image_size_.height));
   return std::min(distortion_factor / FLAGS_max_expected_distortion, 1.0);
 }
 
@@ -177,7 +179,7 @@
   return corner_points;
 }
 
-std::vector<AprilRoboticsDetector::Detection> AprilRoboticsDetector::DetectTags(
+AprilRoboticsDetector::DetectionResult AprilRoboticsDetector::DetectTags(
     cv::Mat image, aos::monotonic_clock::time_point eof) {
   const aos::monotonic_clock::time_point start_time =
       aos::monotonic_clock::now();
@@ -273,6 +275,8 @@
                                      .pose = pose,
                                      .pose_error = pose_error,
                                      .distortion_factor = distortion_factor});
+    } else {
+      rejections_++;
     }
   }
 
@@ -292,7 +296,7 @@
   VLOG(1) << "Took " << chrono::duration<double>(end_time - start_time).count()
           << " seconds to detect overall";
 
-  return results;
+  return {.detections = results, .rejections = rejections_};
 }
 
 }  // namespace vision
diff --git a/y2023/vision/aprilrobotics.h b/y2023/vision/aprilrobotics.h
index bf9265b..fab2d30 100644
--- a/y2023/vision/aprilrobotics.h
+++ b/y2023/vision/aprilrobotics.h
@@ -31,6 +31,11 @@
     double distortion_factor;
   };
 
+  struct DetectionResult {
+    std::vector<Detection> detections;
+    size_t rejections;
+  };
+
   AprilRoboticsDetector(aos::EventLoop *event_loop,
                         std::string_view channel_name);
   ~AprilRoboticsDetector();
@@ -43,8 +48,8 @@
   // Helper function to store detection points in vector of Point2f's
   std::vector<cv::Point2f> MakeCornerVector(const apriltag_detection_t *det);
 
-  std::vector<Detection> DetectTags(cv::Mat image,
-                                    aos::monotonic_clock::time_point eof);
+  DetectionResult DetectTags(cv::Mat image,
+                             aos::monotonic_clock::time_point eof);
 
   const std::optional<cv::Mat> extrinsics() const { return extrinsics_; }
   const cv::Mat intrinsics() const { return intrinsics_; }
@@ -78,6 +83,8 @@
   frc971::vision::ImageCallback image_callback_;
   aos::Sender<frc971::vision::TargetMap> target_map_sender_;
   aos::Sender<foxglove::ImageAnnotations> image_annotations_sender_;
+
+  size_t rejections_;
 };
 
 }  // namespace vision
diff --git a/y2023/vision/camera_reader.cc b/y2023/vision/camera_reader.cc
index 7de3eb5..b526086 100644
--- a/y2023/vision/camera_reader.cc
+++ b/y2023/vision/camera_reader.cc
@@ -12,6 +12,7 @@
 
 DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
 DEFINE_bool(lowlight_camera, true, "Switch to use imx462 image sensor.");
+DEFINE_int32(gain, 200, "analogue_gain");
 
 DEFINE_double(red, 1.252, "Red gain");
 DEFINE_double(green, 1, "Green gain");
@@ -117,7 +118,7 @@
                                           rkisp1_selfpath->device(),
                                           camera->device());
   if (FLAGS_lowlight_camera) {
-    v4l2_reader_selfpath.SetGainExt(100);
+    v4l2_reader_selfpath.SetGainExt(FLAGS_gain);
     v4l2_reader_selfpath.SetVerticalBlanking(1000);
     v4l2_reader_selfpath.SetExposure(FLAGS_exposure);
   } else {
diff --git a/y2023/vision/image_logger.cc b/y2023/vision/image_logger.cc
index b87cec0..d90f9c2 100644
--- a/y2023/vision/image_logger.cc
+++ b/y2023/vision/image_logger.cc
@@ -62,7 +62,7 @@
   });
 
   event_loop.MakeWatcher(
-      "/roborio/aos", [&](const aos::JoystickState &joystick_state) {
+      "/imu/aos", [&](const aos::JoystickState &joystick_state) {
         const auto timestamp = event_loop.context().monotonic_event_time;
         // Store the last time we got disabled
         if (enabled && !joystick_state.enabled()) {
diff --git a/y2023/wpilib_interface.cc b/y2023/wpilib_interface.cc
index 033d3d7..cd52cfd 100644
--- a/y2023/wpilib_interface.cc
+++ b/y2023/wpilib_interface.cc
@@ -56,6 +56,7 @@
 #include "y2023/can_configuration_generated.h"
 #include "y2023/constants.h"
 #include "y2023/control_loops/drivetrain/drivetrain_can_position_generated.h"
+#include "y2023/control_loops/superstructure/led_indicator.h"
 #include "y2023/control_loops/superstructure/superstructure_output_generated.h"
 #include "y2023/control_loops/superstructure/superstructure_position_generated.h"
 
@@ -1026,6 +1027,8 @@
 
     AddLoop(&can_output_event_loop);
 
+    // Thread 6
+    // Setup superstructure output
     ::aos::ShmEventLoop output_event_loop(&config.message());
     output_event_loop.set_name("PWMOutputWriter");
     SuperstructureWriter superstructure_writer(&output_event_loop);
@@ -1041,6 +1044,14 @@
 
     AddLoop(&output_event_loop);
 
+    // Thread 7
+    // Setup led_indicator
+    ::aos::ShmEventLoop led_indicator_event_loop(&config.message());
+    led_indicator_event_loop.set_name("LedIndicator");
+    control_loops::superstructure::LedIndicator led_indicator(
+        &led_indicator_event_loop);
+    AddLoop(&led_indicator_event_loop);
+
     RunLoops();
   }
 };
diff --git a/y2023/www/BUILD b/y2023/www/BUILD
index 63089b5..404247e 100644
--- a/y2023/www/BUILD
+++ b/y2023/www/BUILD
@@ -28,11 +28,14 @@
     target_compatible_with = ["@platforms//os:linux"],
     deps = [
         "//aos/network:connect_ts_fbs",
+        "//aos/network:message_bridge_client_ts_fbs",
+        "//aos/network:message_bridge_server_ts_fbs",
         "//aos/network:web_proxy_ts_fbs",
         "//aos/network/www:proxy",
         "//frc971/control_loops:control_loops_ts_fbs",
         "//frc971/control_loops/drivetrain:drivetrain_status_ts_fbs",
         "//frc971/control_loops/drivetrain/localization:localizer_output_ts_fbs",
+        "//frc971/vision:target_map_ts_fbs",
         "//y2023/control_loops/superstructure:superstructure_status_ts_fbs",
         "//y2023/localizer:status_ts_fbs",
         "//y2023/localizer:visualization_ts_fbs",
diff --git a/y2023/www/field.html b/y2023/www/field.html
index a63c6f5..cc89bb9 100644
--- a/y2023/www/field.html
+++ b/y2023/www/field.html
@@ -36,66 +36,76 @@
       </table>
 
       <table>
-	      <tr>
-		      <th colspan="2">Superstructure</th>
-		</tr>
-		<tr>
-			<td>End Effector State</td>
-			<td id="end_effector_state"> NA </td>
-		</tr>
-		<tr>
-			<td>Wrist</td>
-			<td id="wrist"> NA </td>
-		</tr>
-	</table>
-	<table>
-		<tr>
-			<th colspan="2">Game Piece</th>
-		</tr>
-		<tr>
-			<td>Game Piece Held</td>
-			<td id="game_piece"> NA </td>
-		</tr>
-	</table>
+        <tr>
+          <th colspan="2">Superstructure</th>
+    </tr>
+    <tr>
+      <td>End Effector State</td>
+      <td id="end_effector_state"> NA </td>
+    </tr>
+    <tr>
+      <td>Wrist</td>
+      <td id="wrist"> NA </td>
+    </tr>
+  </table>
+  <table>
+    <tr>
+      <th colspan="2">Game Piece</th>
+    </tr>
+    <tr>
+      <td>Game Piece Held</td>
+      <td id="game_piece"> NA </td>
+    </tr>
+  </table>
 
-	<table>
-		<tr>
-			<th colspan="2">Arm</th>
-		</tr>
-		<tr>
-			<td>State</td>
-			<td id="arm_state"> NA </td>
-		</tr>
-		<tr>
-			<td>X</td>
-			<td id="arm_x"> NA </td>
-		</tr>
-		<tr>
-			<td>Y</td>
-			<td id="arm_y"> NA </td>
-		</tr>
-		<tr>
-			<td>Circular Index</td>
-			<td id="arm_circular_index"> NA </td>
-		</tr>
-		<tr>
-			<td>Roll</td>
-			<td id="arm_roll"> NA </td>
-		</tr>
-		<tr>
-			<td>Proximal</td>
-			<td id="arm_proximal"> NA </td>
-		</tr>
-		<tr>
-			<td>Distal</td>
-			<td id="arm_distal"> NA </td>
-		</tr>
-	</table>
-	<h3>Zeroing Faults:</h3>
-	<p id="zeroing_faults"> NA </p>
-    </div>
+  <table>
+    <tr>
+      <th colspan="2">Arm</th>
+    </tr>
+    <tr>
+      <td>State</td>
+      <td id="arm_state"> NA </td>
+    </tr>
+    <tr>
+      <td>X</td>
+      <td id="arm_x"> NA </td>
+    </tr>
+    <tr>
+      <td>Y</td>
+      <td id="arm_y"> NA </td>
+    </tr>
+    <tr>
+      <td>Circular Index</td>
+      <td id="arm_circular_index"> NA </td>
+    </tr>
+    <tr>
+      <td>Roll</td>
+      <td id="arm_roll"> NA </td>
+    </tr>
+    <tr>
+      <td>Proximal</td>
+      <td id="arm_proximal"> NA </td>
+    </tr>
+    <tr>
+      <td>Distal</td>
+      <td id="arm_distal"> NA </td>
+    </tr>
+  </table>
+
+  <h3>Zeroing Faults:</h3>
+  <p id="zeroing_faults"> NA </p>
+  </div>
+  <div id="middle_readouts">
     <div id="vision_readouts">
     </div>
+    <div id="message_bridge_status">
+      <div>
+        <div>Node</div>
+        <div>Client</div>
+        <div>Server</div>
+      </div>
+    </div>
+  </div>
   </body>
 </html>
 
diff --git a/y2023/www/field_handler.ts b/y2023/www/field_handler.ts
index 65b9a20..2f62c7d 100644
--- a/y2023/www/field_handler.ts
+++ b/y2023/www/field_handler.ts
@@ -1,12 +1,15 @@
-import {ByteBuffer} from 'flatbuffers';
-import {Connection} from '../../aos/network/www/proxy';
-import {LocalizerOutput} from '../../frc971/control_loops/drivetrain/localization/localizer_output_generated';
-import {RejectionReason} from '../localizer/status_generated';
-import {Status as DrivetrainStatus} from '../../frc971/control_loops/drivetrain/drivetrain_status_generated';
-import {Status as SuperstructureStatus, EndEffectorState, ArmState, ArmStatus} from '../control_loops/superstructure/superstructure_status_generated'
+import {ByteBuffer} from 'flatbuffers'
+import {ClientStatistics} from '../../aos/network/message_bridge_client_generated'
+import {ServerStatistics, State as ConnectionState} from '../../aos/network/message_bridge_server_generated'
+import {Connection} from '../../aos/network/www/proxy'
+import {ZeroingError} from '../../frc971/control_loops/control_loops_generated'
+import {Status as DrivetrainStatus} from '../../frc971/control_loops/drivetrain/drivetrain_status_generated'
+import {LocalizerOutput} from '../../frc971/control_loops/drivetrain/localization/localizer_output_generated'
+import {TargetMap} from '../../frc971/vision/target_map_generated'
+import {ArmState, ArmStatus, EndEffectorState, Status as SuperstructureStatus} from '../control_loops/superstructure/superstructure_status_generated'
+import {RejectionReason} from '../localizer/status_generated'
+import {TargetEstimateDebug, Visualization} from '../localizer/visualization_generated'
 import {Class} from '../vision/game_pieces_generated'
-import {ZeroingError} from '../../frc971/control_loops/control_loops_generated';
-import {Visualization, TargetEstimateDebug} from '../localizer/visualization_generated';
 
 import {FIELD_LENGTH, FIELD_WIDTH, FT_TO_M, IN_TO_M} from './constants';
 
@@ -35,36 +38,54 @@
       (document.getElementById('theta') as HTMLElement);
   private imagesAcceptedCounter: HTMLElement =
       (document.getElementById('images_accepted') as HTMLElement);
-  private rejectionReasonCells: HTMLElement[] = [];
+  // HTML elements for rejection reasons for individual pis. Indices
+  // corresponding to RejectionReason enum values will be for those reasons. The
+  // final row will account for images rejected by the aprilrobotics detector
+  // instead of the localizer.
+  private rejectionReasonCells: HTMLElement[][] = [];
+  private messageBridgeDiv: HTMLElement =
+      (document.getElementById('message_bridge_status') as HTMLElement);
+  private clientStatuses = new Map<string, HTMLElement>();
+  private serverStatuses = new Map<string, HTMLElement>();
   private fieldImage: HTMLImageElement = new Image();
   private endEffectorState: HTMLElement =
-	  (document.getElementById('end_effector_state') as HTMLElement);
+      (document.getElementById('end_effector_state') as HTMLElement);
   private wrist: HTMLElement =
-	  (document.getElementById('wrist') as HTMLElement);
+      (document.getElementById('wrist') as HTMLElement);
   private armState: HTMLElement =
-	  (document.getElementById('arm_state') as HTMLElement);
+      (document.getElementById('arm_state') as HTMLElement);
   private gamePiece: HTMLElement =
-	  (document.getElementById('game_piece') as HTMLElement);
-  private armX: HTMLElement =
-	  (document.getElementById('arm_x') as HTMLElement);
-  private armY: HTMLElement =
-	  (document.getElementById('arm_y') as HTMLElement);
+      (document.getElementById('game_piece') as HTMLElement);
+  private armX: HTMLElement = (document.getElementById('arm_x') as HTMLElement);
+  private armY: HTMLElement = (document.getElementById('arm_y') as HTMLElement);
   private circularIndex: HTMLElement =
-	  (document.getElementById('arm_circular_index') as HTMLElement);
+      (document.getElementById('arm_circular_index') as HTMLElement);
   private roll: HTMLElement =
-	  (document.getElementById('arm_roll') as HTMLElement);
+      (document.getElementById('arm_roll') as HTMLElement);
   private proximal: HTMLElement =
-	  (document.getElementById('arm_proximal') as HTMLElement);
+      (document.getElementById('arm_proximal') as HTMLElement);
   private distal: HTMLElement =
-	  (document.getElementById('arm_distal') as HTMLElement);
+      (document.getElementById('arm_distal') as HTMLElement);
   private zeroingFaults: HTMLElement =
-	  (document.getElementById('zeroing_faults') as HTMLElement);_
-
+      (document.getElementById('zeroing_faults') as HTMLElement);
   constructor(private readonly connection: Connection) {
     (document.getElementById('field') as HTMLElement).appendChild(this.canvas);
 
-    this.fieldImage.src = "2023.png";
+    this.fieldImage.src = '2023.png';
 
+    // Construct a table header.
+    {
+      const row = document.createElement('div');
+      const nameCell = document.createElement('div');
+      nameCell.innerHTML = 'Rejection Reason';
+      row.appendChild(nameCell);
+      for (const pi of PIS) {
+        const nodeCell = document.createElement('div');
+        nodeCell.innerHTML = pi;
+        row.appendChild(nodeCell);
+      }
+      document.getElementById('vision_readouts').appendChild(row);
+    }
     for (const value in RejectionReason) {
       // Typescript generates an iterator that produces both numbers and
       // strings... don't do anything on the string iterations.
@@ -75,10 +96,31 @@
       const nameCell = document.createElement('div');
       nameCell.innerHTML = RejectionReason[value];
       row.appendChild(nameCell);
-      const valueCell = document.createElement('div');
-      valueCell.innerHTML = 'NA';
-      this.rejectionReasonCells.push(valueCell);
-      row.appendChild(valueCell);
+      this.rejectionReasonCells.push([]);
+      for (const pi of PIS) {
+        const valueCell = document.createElement('div');
+        valueCell.innerHTML = 'NA';
+        this.rejectionReasonCells[this.rejectionReasonCells.length - 1].push(
+            valueCell);
+        row.appendChild(valueCell);
+      }
+      document.getElementById('vision_readouts').appendChild(row);
+    }
+
+    // Add rejection reason row for aprilrobotics rejections.
+    {
+      const row = document.createElement('div');
+      const nameCell = document.createElement('div');
+      nameCell.innerHTML = 'Rejected by aprilrobotics';
+      row.appendChild(nameCell);
+      this.rejectionReasonCells.push([]);
+      for (const pi of PIS) {
+        const valueCell = document.createElement('div');
+        valueCell.innerHTML = 'NA';
+        this.rejectionReasonCells[this.rejectionReasonCells.length - 1].push(
+            valueCell);
+        row.appendChild(valueCell);
+      }
       document.getElementById('vision_readouts').appendChild(row);
     }
 
@@ -94,28 +136,39 @@
       // matches.
       for (const pi in PIS) {
         this.connection.addReliableHandler(
-            '/' + PIS[pi] + '/camera', "y2023.localizer.Visualization",
+            '/' + PIS[pi] + '/camera', 'y2023.localizer.Visualization',
             (data) => {
-              this.handleLocalizerDebug(pi, data);
+              this.handleLocalizerDebug(Number(pi), data);
+            });
+      }
+      for (const pi in PIS) {
+        // Make unreliable to reduce network spam.
+        this.connection.addHandler(
+            '/' + PIS[pi] + '/camera', 'frc971.vision.TargetMap', (data) => {
+              this.handlePiTargetMap(pi, data);
             });
       }
       this.connection.addHandler(
-          '/drivetrain', "frc971.control_loops.drivetrain.Status", (data) => {
+          '/drivetrain', 'frc971.control_loops.drivetrain.Status', (data) => {
             this.handleDrivetrainStatus(data);
           });
       this.connection.addHandler(
-               '/localizer', "frc971.controls.LocalizerOutput", (data) => {
+          '/localizer', 'frc971.controls.LocalizerOutput', (data) => {
             this.handleLocalizerOutput(data);
           });
-	this.connection.addHandler(
-		'/superstructure', "y2023.control_loops.superstructure.Status",
-		(data) => {
-			this.handleSuperstructureStatus(data)
-		});
+      this.connection.addHandler(
+          '/superstructure', 'y2023.control_loops.superstructure.Status',
+          (data) => {this.handleSuperstructureStatus(data)});
+      this.connection.addHandler(
+          '/aos', 'aos.message_bridge.ServerStatistics',
+          (data) => {this.handleServerStatistics(data)});
+      this.connection.addHandler(
+          '/aos', 'aos.message_bridge.ClientStatistics',
+          (data) => {this.handleClientStatistics(data)});
     });
   }
 
-  private handleLocalizerDebug(pi: string, data: Uint8Array): void {
+  private handleLocalizerDebug(pi: number, data: Uint8Array): void {
     const now = Date.now() / 1000.0;
 
     const fbBuffer = new ByteBuffer(data);
@@ -125,11 +178,11 @@
     const debug = this.localizerImageMatches.get(now);
 
     if (debug.statistics()) {
-      if (debug.statistics().rejectionReasonsLength() ==
+      if ((debug.statistics().rejectionReasonsLength() + 1) ==
           this.rejectionReasonCells.length) {
         for (let ii = 0; ii < debug.statistics().rejectionReasonsLength();
              ++ii) {
-          this.rejectionReasonCells[ii].innerHTML =
+          this.rejectionReasonCells[ii][pi].innerHTML =
               debug.statistics().rejectionReasons(ii).count().toString();
         }
       } else {
@@ -138,6 +191,13 @@
     }
   }
 
+  private handlePiTargetMap(pi: string, data: Uint8Array): void {
+    const fbBuffer = new ByteBuffer(data);
+    const targetMap = TargetMap.getRootAsTargetMap(fbBuffer);
+    this.rejectionReasonCells[this.rejectionReasonCells.length - 1][pi]
+        .innerHTML = targetMap.rejections().toString();
+  }
+
   private handleLocalizerOutput(data: Uint8Array): void {
     const fbBuffer = new ByteBuffer(data);
     this.localizerOutput = LocalizerOutput.getRootAsLocalizerOutput(fbBuffer);
@@ -149,8 +209,69 @@
   }
 
   private handleSuperstructureStatus(data: Uint8Array): void {
-	  const fbBuffer = new ByteBuffer(data);
-	  this.superstructureStatus = SuperstructureStatus.getRootAsStatus(fbBuffer);
+    const fbBuffer = new ByteBuffer(data);
+    this.superstructureStatus = SuperstructureStatus.getRootAsStatus(fbBuffer);
+  }
+
+  private populateNodeConnections(nodeName: string): void {
+    const row = document.createElement('div');
+    this.messageBridgeDiv.appendChild(row);
+    const nodeDiv = document.createElement('div');
+    nodeDiv.innerHTML = nodeName;
+    row.appendChild(nodeDiv);
+    const clientDiv = document.createElement('div');
+    clientDiv.innerHTML = 'N/A';
+    row.appendChild(clientDiv);
+    const serverDiv = document.createElement('div');
+    serverDiv.innerHTML = 'N/A';
+    row.appendChild(serverDiv);
+    this.serverStatuses.set(nodeName, serverDiv);
+    this.clientStatuses.set(nodeName, clientDiv);
+  }
+
+  private setCurrentNodeState(element: HTMLElement, state: ConnectionState):
+      void {
+    if (state === ConnectionState.CONNECTED) {
+      element.innerHTML = ConnectionState[state];
+      element.classList.remove('faulted');
+      element.classList.add('connected');
+    } else {
+      element.innerHTML = ConnectionState[state];
+      element.classList.remove('connected');
+      element.classList.add('faulted');
+    }
+  }
+
+  private handleServerStatistics(data: Uint8Array): void {
+    const fbBuffer = new ByteBuffer(data);
+    const serverStatistics =
+        ServerStatistics.getRootAsServerStatistics(fbBuffer);
+
+    for (let ii = 0; ii < serverStatistics.connectionsLength(); ++ii) {
+      const connection = serverStatistics.connections(ii);
+      const nodeName = connection.node().name();
+      if (!this.serverStatuses.has(nodeName)) {
+        this.populateNodeConnections(nodeName);
+      }
+      this.setCurrentNodeState(
+          this.serverStatuses.get(nodeName), connection.state());
+    }
+  }
+
+  private handleClientStatistics(data: Uint8Array): void {
+    const fbBuffer = new ByteBuffer(data);
+    const clientStatistics =
+        ClientStatistics.getRootAsClientStatistics(fbBuffer);
+
+    for (let ii = 0; ii < clientStatistics.connectionsLength(); ++ii) {
+      const connection = clientStatistics.connections(ii);
+      const nodeName = connection.node().name();
+      if (!this.clientStatuses.has(nodeName)) {
+        this.populateNodeConnections(nodeName);
+      }
+      this.setCurrentNodeState(
+          this.clientStatuses.get(nodeName), connection.state());
+    }
   }
 
   drawField(): void {
@@ -163,8 +284,8 @@
     ctx.restore();
   }
 
-  drawCamera(
-      x: number, y: number, theta: number, color: string = 'blue'): void {
+  drawCamera(x: number, y: number, theta: number, color: string = 'blue'):
+      void {
     const ctx = this.canvas.getContext('2d');
     ctx.save();
     ctx.translate(x, y);
@@ -182,8 +303,8 @@
   }
 
   drawRobot(
-      x: number, y: number, theta: number,
-      color: string = 'blue', dashed: boolean = false): void {
+      x: number, y: number, theta: number, color: string = 'blue',
+      dashed: boolean = false): void {
     const ctx = this.canvas.getContext('2d');
     ctx.save();
     ctx.translate(x, y);
@@ -216,22 +337,22 @@
   }
 
   setEstopped(div: HTMLElement): void {
-	  div.innerHTML = 'estopped';
-	  div.classList.add('faulted');
-	  div.classList.remove('zeroing');
-	  div.classList.remove('near');
+    div.innerHTML = 'estopped';
+    div.classList.add('faulted');
+    div.classList.remove('zeroing');
+    div.classList.remove('near');
   }
 
   setTargetValue(
-	  div: HTMLElement, target: number, val: number, tolerance: number): void {
-	  div.innerHTML = val.toFixed(4);
-	  div.classList.remove('faulted');
-	  div.classList.remove('zeroing');
-	  if (Math.abs(target - val) < tolerance) {
-		  div.classList.add('near');
-	  } else {
-		  div.classList.remove('near');
-	  }
+      div: HTMLElement, target: number, val: number, tolerance: number): void {
+    div.innerHTML = val.toFixed(4);
+    div.classList.remove('faulted');
+    div.classList.remove('zeroing');
+    if (Math.abs(target - val) < tolerance) {
+      div.classList.add('near');
+    } else {
+      div.classList.remove('near');
+    }
   }
 
   setValue(div: HTMLElement, val: number): void {
@@ -249,60 +370,92 @@
     const now = Date.now() / 1000.0;
 
     if (this.superstructureStatus) {
-	    this.endEffectorState.innerHTML =
-		    EndEffectorState[this.superstructureStatus.endEffectorState()];
-	    if (!this.superstructureStatus.wrist() ||
-		!this.superstructureStatus.wrist().zeroed()) {
-		    this.setZeroing(this.wrist);
-	    } else if (this.superstructureStatus.wrist().estopped()) {
-		    this.setEstopped(this.wrist);
-	    } else {
-		    this.setTargetValue(
-		    	this.wrist,
-		    	this.superstructureStatus.wrist().unprofiledGoalPosition(),
-		    	this.superstructureStatus.wrist().estimatorState().position(),
-		    	1e-3);
-	    }
-	    this.armState.innerHTML =
-		    ArmState[this.superstructureStatus.arm().state()];
-	    this.gamePiece.innerHTML =
-		    Class[this.superstructureStatus.gamePiece()];
-	    this.armX.innerHTML =
-		    this.superstructureStatus.arm().armX().toFixed(2);
-	    this.armY.innerHTML =
-		    this.superstructureStatus.arm().armY().toFixed(2);
-	    this.circularIndex.innerHTML =
-		    this.superstructureStatus.arm().armCircularIndex().toFixed(0);
-	    this.roll.innerHTML =
-		    this.superstructureStatus.arm().rollJointEstimatorState().position().toFixed(2);
-	    this.proximal.innerHTML =
-		    this.superstructureStatus.arm().proximalEstimatorState().position().toFixed(2);
-	    this.distal.innerHTML =
-		    this.superstructureStatus.arm().distalEstimatorState().position().toFixed(2);
-	    let zeroingErrors: string = "Roll Joint Errors:"+'<br/>';
-	    for (let i = 0; i < this.superstructureStatus.arm().rollJointEstimatorState().errors.length; i++) {
-	    	zeroingErrors += ZeroingError[this.superstructureStatus.arm().rollJointEstimatorState().errors(i)]+'<br/>';
-	    }
-      zeroingErrors += '<br/>'+"Proximal Joint Errors:"+'<br/>';
-	    for (let i = 0; i < this.superstructureStatus.arm().proximalEstimatorState().errors.length; i++) {
-        zeroingErrors += ZeroingError[this.superstructureStatus.arm().proximalEstimatorState().errors(i)]+'<br/>';
-	    }
-      zeroingErrors += '<br/>'+"Distal Joint Errors:"+'<br/>';
-	    for (let i = 0; i < this.superstructureStatus.arm().distalEstimatorState().errors.length; i++) {
-        zeroingErrors += ZeroingError[this.superstructureStatus.arm().distalEstimatorState().errors(i)]+'<br/>';
-	    }
-      zeroingErrors += '<br/>'+"Wrist Errors:"+'<br/>';
-	    for (let i = 0; i < this.superstructureStatus.wrist().estimatorState().errors.length; i++) {
-        zeroingErrors += ZeroingError[this.superstructureStatus.wrist().estimatorState().errors(i)]+'<br/>';
-	    }
-	    this.zeroingFaults.innerHTML = zeroingErrors;
+      this.endEffectorState.innerHTML =
+          EndEffectorState[this.superstructureStatus.endEffectorState()];
+      if (!this.superstructureStatus.wrist() ||
+          !this.superstructureStatus.wrist().zeroed()) {
+        this.setZeroing(this.wrist);
+      } else if (this.superstructureStatus.wrist().estopped()) {
+        this.setEstopped(this.wrist);
+      } else {
+        this.setTargetValue(
+            this.wrist,
+            this.superstructureStatus.wrist().unprofiledGoalPosition(),
+            this.superstructureStatus.wrist().estimatorState().position(),
+            1e-3);
+      }
+      this.armState.innerHTML =
+          ArmState[this.superstructureStatus.arm().state()];
+      this.gamePiece.innerHTML = Class[this.superstructureStatus.gamePiece()];
+      this.armX.innerHTML = this.superstructureStatus.arm().armX().toFixed(2);
+      this.armY.innerHTML = this.superstructureStatus.arm().armY().toFixed(2);
+      this.circularIndex.innerHTML =
+          this.superstructureStatus.arm().armCircularIndex().toFixed(0);
+      this.roll.innerHTML = this.superstructureStatus.arm()
+                                .rollJointEstimatorState()
+                                .position()
+                                .toFixed(2);
+      this.proximal.innerHTML = this.superstructureStatus.arm()
+                                    .proximalEstimatorState()
+                                    .position()
+                                    .toFixed(2);
+      this.distal.innerHTML = this.superstructureStatus.arm()
+                                  .distalEstimatorState()
+                                  .position()
+                                  .toFixed(2);
+      let zeroingErrors: string = 'Roll Joint Errors:' +
+          '<br/>';
+      for (let i = 0; i < this.superstructureStatus.arm()
+                              .rollJointEstimatorState()
+                              .errors.length;
+           i++) {
+        zeroingErrors += ZeroingError[this.superstructureStatus.arm()
+                                          .rollJointEstimatorState()
+                                          .errors(i)] +
+            '<br/>';
+      }
+      zeroingErrors += '<br/>' +
+          'Proximal Joint Errors:' +
+          '<br/>';
+      for (let i = 0; i < this.superstructureStatus.arm()
+                              .proximalEstimatorState()
+                              .errors.length;
+           i++) {
+        zeroingErrors += ZeroingError[this.superstructureStatus.arm()
+                                          .proximalEstimatorState()
+                                          .errors(i)] +
+            '<br/>';
+      }
+      zeroingErrors += '<br/>' +
+          'Distal Joint Errors:' +
+          '<br/>';
+      for (let i = 0; i <
+           this.superstructureStatus.arm().distalEstimatorState().errors.length;
+           i++) {
+        zeroingErrors += ZeroingError[this.superstructureStatus.arm()
+                                          .distalEstimatorState()
+                                          .errors(i)] +
+            '<br/>';
+      }
+      zeroingErrors += '<br/>' +
+          'Wrist Errors:' +
+          '<br/>';
+      for (let i = 0;
+           i < this.superstructureStatus.wrist().estimatorState().errors.length;
+           i++) {
+        zeroingErrors += ZeroingError[this.superstructureStatus.wrist()
+                                          .estimatorState()
+                                          .errors(i)] +
+            '<br/>';
+      }
+      this.zeroingFaults.innerHTML = zeroingErrors;
     }
 
     if (this.drivetrainStatus && this.drivetrainStatus.trajectoryLogging()) {
       this.drawRobot(
           this.drivetrainStatus.trajectoryLogging().x(),
           this.drivetrainStatus.trajectoryLogging().y(),
-          this.drivetrainStatus.trajectoryLogging().theta(), "#000000FF",
+          this.drivetrainStatus.trajectoryLogging().theta(), '#000000FF',
           false);
     }
 
diff --git a/y2023/www/styles.css b/y2023/www/styles.css
index c486115..c2c44d2 100644
--- a/y2023/www/styles.css
+++ b/y2023/www/styles.css
@@ -7,37 +7,19 @@
   display: inline-block
 }
 
-#targets,
 #readouts,
-#vision_readouts {
+#middle_readouts
+{
   display: inline-block;
   vertical-align: top;
   float: right;
 }
 
+
 #legend {
   display: inline-block;
 }
 
-#outer_target {
-  border: 1px solid black;
-  width: 140px;
-  background-color: white;
-}
-
-#inner_target {
-  width: 60px;
-  height: 60px;
-  margin: 40px;
-  border: 1px solid black;
-  background-color: white;
-}
-
-#outer_target.targetted,
-#inner_target.targetted {
-  background-color: green;
-}
-
 table, th, td {
   border: 1px solid black;
   border-collapse: collapse;
@@ -54,7 +36,7 @@
   width: 150px;
 }
 
-.near {
+.connected, .near {
   background-color: LightGreen;
   border-radius: 10px;
 }
@@ -79,3 +61,14 @@
   padding: 5px;
   text-align: right;
 }
+
+#message_bridge_status > div {
+  display: table-row;
+  padding: 5px;
+}
+
+#message_bridge_status > div > div {
+  display: table-cell;
+  padding: 5px;
+  text-align: right;
+}
diff --git a/y2023/y2023_imu.json b/y2023/y2023_imu.json
index 96134f8..416552b 100644
--- a/y2023/y2023_imu.json
+++ b/y2023/y2023_imu.json
@@ -2,6 +2,108 @@
   "channels": [
     {
       "name": "/imu/aos",
+      "type": "aos.JoystickState",
+      "source_node": "imu",
+      "frequency": 100,
+      "logger": "LOCAL_AND_REMOTE_LOGGER",
+      "logger_nodes": [
+        "imu"
+      ],
+      "destination_nodes": [
+        {
+          "name": "logger",
+          "priority": 5,
+          "time_to_live": 50000000,
+          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+          "timestamp_logger_nodes": [
+            "imu"
+          ]
+        },
+        {
+          "name": "pi1",
+          "priority": 5,
+          "time_to_live": 50000000,
+          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+          "timestamp_logger_nodes": [
+            "imu"
+          ]
+        },
+        {
+          "name": "pi2",
+          "priority": 5,
+          "time_to_live": 50000000,
+          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+          "timestamp_logger_nodes": [
+            "imu"
+          ]
+        },
+        {
+          "name": "pi3",
+          "priority": 5,
+          "time_to_live": 50000000,
+          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+          "timestamp_logger_nodes": [
+            "imu"
+          ]
+        },
+        {
+          "name": "pi4",
+          "priority": 5,
+          "time_to_live": 50000000,
+          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+          "timestamp_logger_nodes": [
+            "imu"
+          ]
+        }
+      ]
+    },
+    {
+      "name": "/imu/aos/remote_timestamps/logger/imu/aos/aos-JoystickState",
+      "type": "aos.message_bridge.RemoteMessage",
+      "source_node": "imu",
+      "logger": "NOT_LOGGED",
+      "frequency": 300,
+      "num_senders": 2,
+      "max_size": 200
+    },
+    {
+      "name": "/imu/aos/remote_timestamps/pi1/imu/aos/aos-JoystickState",
+      "type": "aos.message_bridge.RemoteMessage",
+      "source_node": "imu",
+      "logger": "NOT_LOGGED",
+      "frequency": 300,
+      "num_senders": 2,
+      "max_size": 200
+    },
+    {
+      "name": "/imu/aos/remote_timestamps/pi2/imu/aos/aos-JoystickState",
+      "type": "aos.message_bridge.RemoteMessage",
+      "source_node": "imu",
+      "logger": "NOT_LOGGED",
+      "frequency": 300,
+      "num_senders": 2,
+      "max_size": 200
+    },
+    {
+      "name": "/imu/aos/remote_timestamps/pi3/imu/aos/aos-JoystickState",
+      "type": "aos.message_bridge.RemoteMessage",
+      "source_node": "imu",
+      "logger": "NOT_LOGGED",
+      "frequency": 300,
+      "num_senders": 2,
+      "max_size": 200
+    },
+    {
+      "name": "/imu/aos/remote_timestamps/pi4/imu/aos/aos-JoystickState",
+      "type": "aos.message_bridge.RemoteMessage",
+      "source_node": "imu",
+      "logger": "NOT_LOGGED",
+      "frequency": 300,
+      "num_senders": 2,
+      "max_size": 200
+    },
+    {
+      "name": "/imu/aos",
       "type": "aos.timing.Report",
       "source_node": "imu",
       "frequency": 50,
@@ -305,7 +407,7 @@
       "type": "y2023.localizer.Status",
       "source_node": "imu",
       "frequency": 2200,
-      "max_size": 1000,
+      "max_size": 1504,
       "num_senders": 2
     },
     {
@@ -373,6 +475,14 @@
       ]
     },
     {
+      "name": "joystick_republish",
+      "executable_name": "joystick_republish",
+      "user": "pi",
+      "nodes": [
+        "imu"
+      ]
+    },
+    {
       "name": "message_bridge_server",
       "executable_name": "message_bridge_server",
       "user": "pi",
@@ -383,7 +493,12 @@
     {
       "name": "localizer_logger",
       "executable_name": "logger_main",
-      "args": ["--logging_folder", "", "--snappy_compress"],
+      "args": [
+        "--logging_folder",
+        "",
+        "--snappy_compress",
+        "--rotate_every", "30.0"
+      ],
       "user": "pi",
       "nodes": [
         "imu"
@@ -392,6 +507,10 @@
     {
       "name": "web_proxy",
       "executable_name": "web_proxy_main",
+      "args": [
+        "--min_ice_port=5800",
+        "--max_ice_port=5810"
+      ],
       "user": "pi",
       "nodes": [
         "imu"
@@ -450,6 +569,18 @@
     },
     {
       "name": "roborio"
+    },
+    {
+      "name": "pi1"
+    },
+    {
+      "name": "pi2"
+    },
+    {
+      "name": "pi3"
+    },
+    {
+      "name": "pi4"
     }
   ]
 }
diff --git a/y2023/y2023_pi_template.json b/y2023/y2023_pi_template.json
index 113e48f..8d9fdaa 100644
--- a/y2023/y2023_pi_template.json
+++ b/y2023/y2023_pi_template.json
@@ -377,6 +377,10 @@
       "name": "web_proxy",
       "executable_name": "web_proxy_main",
       "user": "pi",
+      "args": [
+        "--min_ice_port=5800",
+        "--max_ice_port=5810"
+      ],
       "nodes": [
         "pi{{ NUM }}"
       ]
@@ -397,7 +401,7 @@
         "--logging_folder",
         "",
         "--rotate_every",
-        "60.0",
+        "30.0",
         "--direct",
         "--flush_size=4194304"
       ],
diff --git a/y2023/y2023_roborio.json b/y2023/y2023_roborio.json
index f3697ac..3114ac9 100644
--- a/y2023/y2023_roborio.json
+++ b/y2023/y2023_roborio.json
@@ -6,7 +6,7 @@
       "source_node": "roborio",
       "frequency": 100,
       "logger": "LOCAL_AND_REMOTE_LOGGER",
-      "logger_nodes" : [
+      "logger_nodes": [
         "imu",
         "logger"
       ],
@@ -19,51 +19,6 @@
           "timestamp_logger_nodes": [
             "roborio"
           ]
-        },
-        {
-          "name": "logger",
-          "priority": 5,
-          "time_to_live": 50000000,
-          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
-          "timestamp_logger_nodes": [
-            "roborio"
-          ]
-        },
-        {
-          "name": "pi1",
-          "priority": 5,
-          "time_to_live": 50000000,
-          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
-          "timestamp_logger_nodes": [
-            "roborio"
-          ]
-        },
-        {
-          "name": "pi2",
-          "priority": 5,
-          "time_to_live": 50000000,
-          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
-          "timestamp_logger_nodes": [
-            "roborio"
-          ]
-        },
-        {
-          "name": "pi3",
-          "priority": 5,
-          "time_to_live": 50000000,
-          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
-          "timestamp_logger_nodes": [
-            "roborio"
-          ]
-        },
-        {
-          "name": "pi4",
-          "priority": 5,
-          "time_to_live": 50000000,
-          "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
-          "timestamp_logger_nodes": [
-            "roborio"
-          ]
         }
       ]
     },
@@ -77,51 +32,6 @@
       "max_size": 200
     },
     {
-      "name": "/roborio/aos/remote_timestamps/logger/roborio/aos/aos-JoystickState",
-      "type": "aos.message_bridge.RemoteMessage",
-      "source_node": "roborio",
-      "logger": "NOT_LOGGED",
-      "frequency": 300,
-      "num_senders": 2,
-      "max_size": 200
-    },
-    {
-      "name": "/roborio/aos/remote_timestamps/pi1/roborio/aos/aos-JoystickState",
-      "type": "aos.message_bridge.RemoteMessage",
-      "source_node": "roborio",
-      "logger": "NOT_LOGGED",
-      "frequency": 300,
-      "num_senders": 2,
-      "max_size": 200
-    },
-    {
-      "name": "/roborio/aos/remote_timestamps/pi2/roborio/aos/aos-JoystickState",
-      "type": "aos.message_bridge.RemoteMessage",
-      "source_node": "roborio",
-      "logger": "NOT_LOGGED",
-      "frequency": 300,
-      "num_senders": 2,
-      "max_size": 200
-    },
-    {
-      "name": "/roborio/aos/remote_timestamps/pi3/roborio/aos/aos-JoystickState",
-      "type": "aos.message_bridge.RemoteMessage",
-      "source_node": "roborio",
-      "logger": "NOT_LOGGED",
-      "frequency": 300,
-      "num_senders": 2,
-      "max_size": 200
-    },
-    {
-      "name": "/roborio/aos/remote_timestamps/pi4/roborio/aos/aos-JoystickState",
-      "type": "aos.message_bridge.RemoteMessage",
-      "source_node": "roborio",
-      "logger": "NOT_LOGGED",
-      "frequency": 300,
-      "num_senders": 2,
-      "max_size": 200
-    },
-    {
       "name": "/roborio/aos",
       "type": "aos.RobotState",
       "source_node": "roborio",
@@ -608,7 +518,10 @@
     {
       "name": "roborio_web_proxy",
       "executable_name": "web_proxy_main",
-      "args": ["--min_ice_port=5800", "--max_ice_port=5810"],
+      "args": [
+        "--min_ice_port=5800",
+        "--max_ice_port=5810"
+      ],
       "nodes": [
         "roborio"
       ]
@@ -616,7 +529,9 @@
     {
       "name": "roborio_message_bridge_client",
       "executable_name": "message_bridge_client",
-      "args": ["--rt_priority=16"],
+      "args": [
+        "--rt_priority=16"
+      ],
       "nodes": [
         "roborio"
       ]
@@ -624,7 +539,9 @@
     {
       "name": "roborio_message_bridge_server",
       "executable_name": "message_bridge_server",
-      "args": ["--rt_priority=16"],
+      "args": [
+        "--rt_priority=16"
+      ],
       "nodes": [
         "roborio"
       ]
@@ -632,7 +549,11 @@
     {
       "name": "logger",
       "executable_name": "logger_main",
-      "args": ["--snappy_compress", "--logging_folder=/home/admin/logs/"],
+      "args": [
+        "--snappy_compress",
+        "--logging_folder=/home/admin/logs/",
+        "--rotate_every", "30.0"
+      ],
       "nodes": [
         "roborio"
       ]