Add a LogNamer which puts everything in 1 big file

This lets us test that we can sort files with data and timestamps
together, and files with data and timestamps split up to maintain
compatibility as we improve sorting.

Change-Id: I6d28dd95cc5057634e609d2197e1629f4f4a5baa
Signed-off-by: James Kuszmaul <james.kuszmaul@bluerivertech.com>
diff --git a/aos/events/logging/config_remapper_test.cc b/aos/events/logging/config_remapper_test.cc
index 9649f19..89e1c96 100644
--- a/aos/events/logging/config_remapper_test.cc
+++ b/aos/events/logging/config_remapper_test.cc
@@ -27,9 +27,11 @@
     ::testing::Combine(
         ::testing::Values(
             ConfigParams{"multinode_pingpong_combined_config.json", true,
-                         kCombinedConfigSha1(), kCombinedConfigSha1()},
+                         kCombinedConfigSha1(), kCombinedConfigSha1(),
+                         FileStrategy::kCombine},
             ConfigParams{"multinode_pingpong_split_config.json", false,
-                         kSplitConfigSha1(), kReloggedSplitConfigSha1()}),
+                         kSplitConfigSha1(), kReloggedSplitConfigSha1(),
+                         FileStrategy::kCombine}),
         ::testing::ValuesIn(SupportedCompressionAlgorithms())));
 
 // Tests that we can read a config and remap a channel
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index edfcc83..6682ae3 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -745,6 +745,50 @@
   CloseWriter(&writer);
 }
 
+void MultiNodeLogNamer::NoticeNode(const Node *source_node) {
+  if (std::find(nodes_.begin(), nodes_.end(), source_node) == nodes_.end()) {
+    nodes_.emplace_back(source_node);
+  }
+}
+
+NewDataWriter *MultiNodeLogNamer::FindNodeDataWriter(const Node *source_node,
+                                                     size_t max_message_size) {
+  NoticeNode(source_node);
+
+  auto it = node_data_writers_.find(source_node);
+  if (it != node_data_writers_.end()) {
+    it->second.UpdateMaxMessageSize(max_message_size);
+    return &(it->second);
+  }
+  return nullptr;
+}
+
+NewDataWriter *MultiNodeLogNamer::FindNodeTimestampWriter(
+    const Node *source_node, size_t max_message_size) {
+  NoticeNode(source_node);
+
+  auto it = node_timestamp_writers_.find(source_node);
+  if (it != node_timestamp_writers_.end()) {
+    it->second.UpdateMaxMessageSize(max_message_size);
+    return &(it->second);
+  }
+  return nullptr;
+}
+
+NewDataWriter *MultiNodeLogNamer::AddNodeDataWriter(const Node *source_node,
+                                                    NewDataWriter &&writer) {
+  auto result = node_data_writers_.emplace(source_node, std::move(writer));
+  CHECK(result.second);
+  return &(result.first->second);
+}
+
+NewDataWriter *MultiNodeLogNamer::AddNodeTimestampWriter(
+    const Node *source_node, NewDataWriter &&writer) {
+  auto result = node_timestamp_writers_.emplace(source_node, std::move(writer));
+  CHECK(result.second);
+  return &(result.first->second);
+}
+
 NewDataWriter *MultiNodeLogNamer::MakeWriter(const Channel *channel) {
   // See if we can read the data on this node at all.
   const bool is_readable =
@@ -765,39 +809,36 @@
   // log.  It needs to be logged with send timestamps, but be sorted enough
   // to be able to be processed.
 
-  // Track that this node is being logged.
   const Node *source_node =
       configuration::MultiNode(configuration_)
           ? configuration::GetNode(configuration_,
                                    channel->source_node()->string_view())
           : nullptr;
 
-  if (std::find(nodes_.begin(), nodes_.end(), source_node) == nodes_.end()) {
-    nodes_.emplace_back(source_node);
-  }
-
   // If we already have a data writer for the node, then use the same writer for
   // all channels of that node.
-  auto it = node_data_writers_.find(source_node);
-  if (it != node_data_writers_.end()) {
-    it->second.UpdateMaxMessageSize(
-        PackMessageSize(LogType::kLogRemoteMessage, channel->max_size()));
-    return &(it->second);
+  NewDataWriter *result = FindNodeDataWriter(
+      source_node,
+      PackMessageSize(LogType::kLogRemoteMessage, channel->max_size()));
+  if (result != nullptr) {
+    return result;
   }
 
   // If we don't have a data writer for the node, create one.
-  NewDataWriter data_writer(
-      this, source_node, node_,
-      [this, source_node](NewDataWriter *data_writer) {
-        OpenDataWriter(source_node, data_writer);
-      },
-      [this](NewDataWriter *data_writer) { CloseWriter(&data_writer->writer); },
-      PackMessageSize(LogType::kLogRemoteMessage, channel->max_size()),
-      {StoredDataType::DATA});
-
-  auto result = node_data_writers_.emplace(source_node, std::move(data_writer));
-  CHECK(result.second);
-  return &(result.first->second);
+  return AddNodeDataWriter(
+      source_node,
+      NewDataWriter{
+          this,
+          source_node,
+          node_,
+          [this, source_node](NewDataWriter *data_writer) {
+            OpenDataWriter(source_node, data_writer);
+          },
+          [this](NewDataWriter *data_writer) {
+            CloseWriter(&data_writer->writer);
+          },
+          PackMessageSize(LogType::kLogRemoteMessage, channel->max_size()),
+          {StoredDataType::DATA}});
 }
 
 NewDataWriter *MultiNodeLogNamer::MakeForwardedTimestampWriter(
@@ -807,31 +848,29 @@
       configuration::ChannelIsReadableOnNode(channel, this->node());
   CHECK(is_readable) << ": " << configuration::CleanedChannelToString(channel);
 
-  if (std::find(nodes_.begin(), nodes_.end(), node) == nodes_.end()) {
-    nodes_.emplace_back(node);
-  }
-
   CHECK_NE(node, this->node());
 
   // If we have a remote timestamp writer for a particular node, use the same
   // writer for all remote timestamp channels of that node.
-  auto it = node_timestamp_writers_.find(node);
-  if (it != node_timestamp_writers_.end()) {
-    return &(it->second);
+  NewDataWriter *result =
+      FindNodeTimestampWriter(node, PackRemoteMessageSize());
+  if (result != nullptr) {
+    return result;
   }
 
   // If there are no remote timestamp writers for the node, create one.
-  NewDataWriter data_writer(
-      this, configuration::GetNode(configuration_, node), node_,
-      [this](NewDataWriter *data_writer) {
-        OpenForwardedTimestampWriter(node_, data_writer);
-      },
-      [this](NewDataWriter *data_writer) { CloseWriter(&data_writer->writer); },
-      PackRemoteMessageSize(), {StoredDataType::REMOTE_TIMESTAMPS});
-
-  auto result = node_timestamp_writers_.emplace(node, std::move(data_writer));
-  CHECK(result.second);
-  return &(result.first->second);
+  return AddNodeTimestampWriter(
+      node, NewDataWriter{this,
+                          configuration::GetNode(configuration_, node),
+                          node_,
+                          [this](NewDataWriter *data_writer) {
+                            OpenForwardedTimestampWriter(node_, data_writer);
+                          },
+                          [this](NewDataWriter *data_writer) {
+                            CloseWriter(&data_writer->writer);
+                          },
+                          PackRemoteMessageSize(),
+                          {StoredDataType::REMOTE_TIMESTAMPS}});
 }
 
 NewDataWriter *MultiNodeLogNamer::MakeTimestampWriter(const Channel *channel) {
@@ -845,23 +884,24 @@
   }
 
   // There is only one of these.
-  auto it = node_timestamp_writers_.find(this->node());
-  if (it != node_timestamp_writers_.end()) {
-    it->second.UpdateMaxMessageSize(
-        PackMessageSize(LogType::kLogDeliveryTimeOnly, 0));
-    return &(it->second);
+  NewDataWriter *result = FindNodeTimestampWriter(
+      this->node(), PackMessageSize(LogType::kLogDeliveryTimeOnly, 0));
+  if (result != nullptr) {
+    return result;
   }
 
-  NewDataWriter data_writer(
-      this, node_, node_,
-      [this](NewDataWriter *data_writer) { OpenTimestampWriter(data_writer); },
-      [this](NewDataWriter *data_writer) { CloseWriter(&data_writer->writer); },
-      PackMessageSize(LogType::kLogDeliveryTimeOnly, 0),
-      {StoredDataType::TIMESTAMPS});
-
-  auto result = node_timestamp_writers_.emplace(node_, std::move(data_writer));
-  CHECK(result.second);
-  return &(result.first->second);
+  return AddNodeTimestampWriter(
+      node_, NewDataWriter{this,
+                           node_,
+                           node_,
+                           [this](NewDataWriter *data_writer) {
+                             OpenTimestampWriter(data_writer);
+                           },
+                           [this](NewDataWriter *data_writer) {
+                             CloseWriter(&data_writer->writer);
+                           },
+                           PackMessageSize(LogType::kLogDeliveryTimeOnly, 0),
+                           {StoredDataType::TIMESTAMPS}});
 }
 
 WriteCode MultiNodeLogNamer::Close() {
@@ -990,5 +1030,167 @@
   }
 }
 
+NewDataWriter *MinimalFileMultiNodeLogNamer::MakeWriter(
+    const Channel *channel) {
+  // See if we can read the data on this node at all.
+  const bool is_readable =
+      configuration::ChannelIsReadableOnNode(channel, this->node());
+  if (!is_readable) {
+    return nullptr;
+  }
+
+  // Then, see if we are supposed to log the data here.
+  const bool log_message =
+      configuration::ChannelMessageIsLoggedOnNode(channel, this->node());
+
+  if (!log_message) {
+    return nullptr;
+  }
+
+  // Ok, we have data that is being forwarded to us that we are supposed to
+  // log.  It needs to be logged with send timestamps, but be sorted enough
+  // to be able to be processed.
+
+  const Node *source_node =
+      configuration::MultiNode(configuration_)
+          ? configuration::GetNode(configuration_,
+                                   channel->source_node()->string_view())
+          : nullptr;
+
+  // If we don't have a data writer for the node, create one.
+  if (this->node() == source_node) {
+    // If we already have a data writer for the node, then use the same writer
+    // for all channels of that node.
+    NewDataWriter *result = FindNodeDataWriter(
+        source_node,
+        PackMessageSize(LogType::kLogMessage, channel->max_size()));
+    if (result != nullptr) {
+      return result;
+    }
+
+    return AddNodeDataWriter(
+        source_node,
+        NewDataWriter{
+            this,
+            source_node,
+            node_,
+            [this, source_node](NewDataWriter *data_writer) {
+              OpenNodeWriter(source_node, data_writer);
+            },
+            [this](NewDataWriter *data_writer) {
+              CloseWriter(&data_writer->writer);
+            },
+            PackMessageSize(LogType::kLogMessage, channel->max_size()),
+            {StoredDataType::DATA, StoredDataType::TIMESTAMPS}});
+  } else {
+    // If we already have a data writer for the node, then use the same writer
+    // for all channels of that node.
+    NewDataWriter *result = FindNodeDataWriter(
+        source_node,
+        PackMessageSize(LogType::kLogRemoteMessage, channel->max_size()));
+    if (result != nullptr) {
+      return result;
+    }
+
+    return AddNodeDataWriter(
+        source_node,
+        NewDataWriter{
+            this,
+            source_node,
+            node_,
+            [this, source_node](NewDataWriter *data_writer) {
+              OpenNodeWriter(source_node, data_writer);
+            },
+            [this](NewDataWriter *data_writer) {
+              CloseWriter(&data_writer->writer);
+            },
+            PackMessageSize(LogType::kLogRemoteMessage, channel->max_size()),
+            {StoredDataType::DATA, StoredDataType::REMOTE_TIMESTAMPS}});
+  }
+}
+
+NewDataWriter *MinimalFileMultiNodeLogNamer::MakeTimestampWriter(
+    const Channel *channel) {
+  bool log_delivery_times = false;
+  if (this->node() != nullptr) {
+    log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
+        channel, this->node(), this->node());
+  }
+  if (!log_delivery_times) {
+    return nullptr;
+  }
+
+  // There is only one of these.
+  NewDataWriter *result = FindNodeDataWriter(
+      this->node(), PackMessageSize(LogType::kLogDeliveryTimeOnly, 0));
+  if (result != nullptr) {
+    return result;
+  }
+
+  return AddNodeDataWriter(
+      node_, NewDataWriter{this,
+                           node_,
+                           node_,
+                           [this](NewDataWriter *data_writer) {
+                             OpenNodeWriter(node_, data_writer);
+                           },
+                           [this](NewDataWriter *data_writer) {
+                             CloseWriter(&data_writer->writer);
+                           },
+                           PackMessageSize(LogType::kLogDeliveryTimeOnly, 0),
+                           {StoredDataType::DATA, StoredDataType::TIMESTAMPS}});
+}
+
+NewDataWriter *MinimalFileMultiNodeLogNamer::MakeForwardedTimestampWriter(
+    const Channel *channel, const Node *node) {
+  // See if we can read the data on this node at all.
+  const bool is_readable =
+      configuration::ChannelIsReadableOnNode(channel, this->node());
+  CHECK(is_readable) << ": " << configuration::CleanedChannelToString(channel);
+
+  CHECK_NE(node, this->node());
+
+  // If we have a remote timestamp writer for a particular node, use the same
+  // writer for all remote timestamp channels of that node.
+  NewDataWriter *result = FindNodeDataWriter(node, PackRemoteMessageSize());
+  if (result != nullptr) {
+    return result;
+  }
+
+  // If there are no remote timestamp writers for the node, create one.
+  return AddNodeDataWriter(
+      node,
+      NewDataWriter{this,
+                    configuration::GetNode(configuration_, node),
+                    node_,
+                    [this, node](NewDataWriter *data_writer) {
+                      OpenNodeWriter(node, data_writer);
+                    },
+                    [this](NewDataWriter *data_writer) {
+                      CloseWriter(&data_writer->writer);
+                    },
+                    PackRemoteMessageSize(),
+                    {StoredDataType::DATA, StoredDataType::REMOTE_TIMESTAMPS}});
+}
+
+void MinimalFileMultiNodeLogNamer::OpenNodeWriter(const Node *source_node,
+                                                  NewDataWriter *data_writer) {
+  std::string filename;
+
+  if (node() != nullptr) {
+    filename = absl::StrCat(node()->name()->string_view(), "_");
+  }
+
+  if (source_node != nullptr) {
+    absl::StrAppend(&filename, source_node->name()->string_view(), "_");
+  }
+
+  absl::StrAppend(&filename, "all.part", data_writer->parts_index(), ".bfbs",
+                  extension_);
+  VLOG(1) << "Going to open " << filename;
+  CreateBufferWriter(filename, data_writer->max_message_size(),
+                     &data_writer->writer);
+}
+
 }  // namespace logger
 }  // namespace aos
diff --git a/aos/events/logging/log_namer.h b/aos/events/logging/log_namer.h
index ccdfe5a..5374acf 100644
--- a/aos/events/logging/log_namer.h
+++ b/aos/events/logging/log_namer.h
@@ -504,6 +504,24 @@
   LogBackend *log_backend() { return log_backend_.get(); }
   const LogBackend *log_backend() const { return log_backend_.get(); }
 
+  // Returns the data writer or timestamp writer if we find one for the provided
+  // node.
+  NewDataWriter *FindNodeDataWriter(const Node *node, size_t max_message_size);
+  NewDataWriter *FindNodeTimestampWriter(const Node *node,
+                                         size_t max_message_size);
+
+  // Saves the data writer or timestamp writer for the provided node.
+  NewDataWriter *AddNodeDataWriter(const Node *node, NewDataWriter &&writer);
+  NewDataWriter *AddNodeTimestampWriter(const Node *node,
+                                        NewDataWriter &&writer);
+
+  void CloseWriter(std::unique_ptr<DetachedBufferWriter> *writer_pointer);
+
+  void CreateBufferWriter(std::string_view path, size_t max_message_size,
+                          std::unique_ptr<DetachedBufferWriter> *destination);
+
+  std::string extension_;
+
  private:
   // Opens up a writer for timestamps forwarded back.
   void OpenForwardedTimestampWriter(const Node *source_node,
@@ -513,10 +531,8 @@
   void OpenDataWriter(const Node *source_node, NewDataWriter *data_writer);
   void OpenTimestampWriter(NewDataWriter *data_writer);
 
-  void CreateBufferWriter(std::string_view path, size_t max_message_size,
-                          std::unique_ptr<DetachedBufferWriter> *destination);
-
-  void CloseWriter(std::unique_ptr<DetachedBufferWriter> *writer_pointer);
+  // Tracks the node in nodes_.
+  void NoticeNode(const Node *source_node);
 
   // A version of std::accumulate which operates over all of our DataWriters.
   template <typename T, typename BinaryOperation>
@@ -542,7 +558,6 @@
   std::vector<std::string> all_filenames_;
 
   std::function<std::unique_ptr<DataEncoder>(size_t)> encoder_factory_;
-  std::string extension_;
 
   // Storage for statistics from previously-rotated DetachedBufferWriters.
   std::chrono::nanoseconds max_write_time_ = std::chrono::nanoseconds::zero();
@@ -611,6 +626,30 @@
   }
 };
 
+// Class which dumps all data from each node into a single file per node.  This
+// is mostly interesting for testing.
+class MinimalFileMultiNodeLogNamer : public MultiNodeFilesLogNamer {
+ public:
+  MinimalFileMultiNodeLogNamer(std::string_view base_name,
+                               EventLoop *event_loop)
+      : MultiNodeFilesLogNamer(base_name, event_loop) {}
+  MinimalFileMultiNodeLogNamer(std::string_view base_name,
+                               const Configuration *configuration,
+                               EventLoop *event_loop, const Node *node)
+      : MultiNodeFilesLogNamer(base_name, configuration, event_loop, node) {}
+
+  NewDataWriter *MakeWriter(const Channel *channel) override;
+
+  NewDataWriter *MakeForwardedTimestampWriter(const Channel *channel,
+                                              const Node *node) override;
+
+  NewDataWriter *MakeTimestampWriter(const Channel *channel) override;
+
+ private:
+  // Names the data writer.
+  void OpenNodeWriter(const Node *source_node, NewDataWriter *data_writer);
+};
+
 }  // namespace logger
 }  // namespace aos
 
diff --git a/aos/events/logging/log_reader_utils_test.cc b/aos/events/logging/log_reader_utils_test.cc
index b61c9de..047c7b9 100644
--- a/aos/events/logging/log_reader_utils_test.cc
+++ b/aos/events/logging/log_reader_utils_test.cc
@@ -17,7 +17,8 @@
     All, MultinodeLoggerOneConfigTest,
     ::testing::Combine(::testing::Values(ConfigParams{
                            "multinode_pingpong_combined_config.json", true,
-                           kCombinedConfigSha1(), kCombinedConfigSha1()}),
+                           kCombinedConfigSha1(), kCombinedConfigSha1(),
+                           FileStrategy::kCombine}),
                        ::testing::ValuesIn(SupportedCompressionAlgorithms())));
 
 // This test is to check if we are able to get the right channels from a log
@@ -91,7 +92,8 @@
 
   {
     LoggerState pi1_logger = MakeLoggerState(
-        pi1_, &event_loop_factory_, SupportedCompressionAlgorithms()[0]);
+        pi1_, &event_loop_factory_, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi2_->DisableStatistics();
     pi2_->Disconnect(pi1_->node());
     pi1_->Disconnect(pi2_->node());
diff --git a/aos/events/logging/multinode_logger_test.cc b/aos/events/logging/multinode_logger_test.cc
index 290b312..fd930bc 100644
--- a/aos/events/logging/multinode_logger_test.cc
+++ b/aos/events/logging/multinode_logger_test.cc
@@ -26,9 +26,14 @@
     ::testing::Combine(
         ::testing::Values(
             ConfigParams{"multinode_pingpong_combined_config.json", true,
-                         kCombinedConfigSha1(), kCombinedConfigSha1()},
+                         kCombinedConfigSha1(), kCombinedConfigSha1(),
+                         FileStrategy::kKeepSeparate},
             ConfigParams{"multinode_pingpong_split_config.json", false,
-                         kSplitConfigSha1(), kReloggedSplitConfigSha1()}),
+                         kSplitConfigSha1(), kReloggedSplitConfigSha1(),
+                         FileStrategy::kKeepSeparate},
+            ConfigParams{"multinode_pingpong_split_config.json", false,
+                         kSplitConfigSha1(), kReloggedSplitConfigSha1(),
+                         FileStrategy::kCombine}),
         ::testing::ValuesIn(SupportedCompressionAlgorithms())));
 
 INSTANTIATE_TEST_SUITE_P(
@@ -36,13 +41,22 @@
     ::testing::Combine(
         ::testing::Values(
             ConfigParams{"multinode_pingpong_combined_config.json", true,
-                         kCombinedConfigSha1(), kCombinedConfigSha1()},
+                         kCombinedConfigSha1(), kCombinedConfigSha1(),
+                         FileStrategy::kKeepSeparate},
             ConfigParams{"multinode_pingpong_split_config.json", false,
-                         kSplitConfigSha1(), kReloggedSplitConfigSha1()}),
+                         kSplitConfigSha1(), kReloggedSplitConfigSha1(),
+                         FileStrategy::kKeepSeparate},
+            ConfigParams{"multinode_pingpong_split_config.json", false,
+                         kSplitConfigSha1(), kReloggedSplitConfigSha1(),
+                         FileStrategy::kCombine}),
         ::testing::ValuesIn(SupportedCompressionAlgorithms())));
 
 // Tests that we can write and read simple multi-node log files.
 TEST_P(MultinodeLoggerTest, SimpleMultiNode) {
+  if (file_strategy() == FileStrategy::kCombine) {
+    GTEST_SKIP() << "We don't need to test the combined file writer this deep.";
+  }
+
   std::vector<std::string> actual_filenames;
   time_converter_.StartEqual();
 
@@ -1133,6 +1147,10 @@
 // Tests that we can sort a bunch of parts with the end missing off a
 // file.  We should use the part we can read.
 TEST_P(MultinodeLoggerTest, SortTruncatedParts) {
+  if (file_strategy() == FileStrategy::kCombine) {
+    GTEST_SKIP() << "We don't need to test the combined file writer this deep.";
+  }
+
   std::vector<std::string> actual_filenames;
   time_converter_.StartEqual();
   // Make a bunch of parts.
@@ -2085,6 +2103,9 @@
 // This should be enough that we can then re-run the logger and get a valid log
 // back.
 TEST_P(MultinodeLoggerTest, RemoteReboot) {
+  if (file_strategy() == FileStrategy::kCombine) {
+    GTEST_SKIP() << "We don't need to test the combined file writer this deep.";
+  }
   std::vector<std::string> actual_filenames;
 
   const UUID pi1_boot0 = UUID::Random();
@@ -2424,6 +2445,10 @@
 // Tests that we can sort a log which only has timestamps from the remote
 // because the local message_bridge_client failed to connect.
 TEST_P(MultinodeLoggerTest, RemoteRebootOnlyTimestamps) {
+  if (file_strategy() == FileStrategy::kCombine) {
+    GTEST_SKIP() << "We don't need to test the combined file writer this deep.";
+  }
+
   const UUID pi1_boot0 = UUID::Random();
   const UUID pi2_boot0 = UUID::Random();
   const UUID pi2_boot1 = UUID::Random();
@@ -2926,6 +2951,9 @@
 
 // Tests that we explode if someone loses a part out of the middle of a log.
 TEST_P(MultinodeLoggerTest, MissingPartsFromMiddle) {
+  if (file_strategy() == FileStrategy::kCombine) {
+    GTEST_SKIP() << "We don't need to test the combined file writer this deep.";
+  }
   time_converter_.AddMonotonic(
       {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
   {
@@ -3137,13 +3165,16 @@
   std::vector<std::string> filenames;
   {
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     LoggerState pi3_logger = MakeLoggerState(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     {
       // And now start the logger.
       LoggerState pi2_logger = MakeLoggerState(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+          FileStrategy::kKeepSeparate);
 
       event_loop_factory.RunFor(chrono::milliseconds(1000));
 
@@ -3178,7 +3209,8 @@
 
       // Start logging again on pi2 after it is up.
       LoggerState pi2_logger = MakeLoggerState(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+          FileStrategy::kKeepSeparate);
       pi2_logger.StartLogger(kLogfile2_2);
 
       event_loop_factory.RunFor(chrono::milliseconds(10000));
@@ -3299,13 +3331,16 @@
   std::vector<std::string> filenames;
   {
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     LoggerState pi3_logger = MakeLoggerState(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     {
       // And now start the logger.
       LoggerState pi2_logger = MakeLoggerState(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+          FileStrategy::kKeepSeparate);
 
       pi1_logger.StartLogger(kLogfile1_1);
       pi3_logger.StartLogger(kLogfile3_1);
@@ -3343,7 +3378,8 @@
 
       // Start logging again on pi2 after it is up.
       LoggerState pi2_logger = MakeLoggerState(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+          FileStrategy::kKeepSeparate);
       pi2_logger.StartLogger(kLogfile2_2);
 
       // And allow remote messages now that we have some local ones.
@@ -3462,13 +3498,16 @@
   std::vector<std::string> filenames;
   {
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     LoggerState pi3_logger = MakeLoggerState(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     {
       // And now start the logger.
       LoggerState pi2_logger = MakeLoggerState(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+          FileStrategy::kKeepSeparate);
 
       pi1_logger.StartLogger(kLogfile1_1);
       pi3_logger.StartLogger(kLogfile3_1);
@@ -3502,7 +3541,8 @@
 
       // Start logging again on pi2 after it is up.
       LoggerState pi2_logger = MakeLoggerState(
-          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+          pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+          FileStrategy::kKeepSeparate);
       pi2_logger.StartLogger(kLogfile2_2);
 
       event_loop_factory.RunFor(chrono::milliseconds(5000));
@@ -3612,7 +3652,8 @@
 
   {
     LoggerState pi2_logger = MakeLoggerState(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
 
     event_loop_factory.RunFor(chrono::milliseconds(95));
 
@@ -3623,7 +3664,8 @@
     pi2->Connect(pi1->node());
 
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi1_logger.StartLogger(kLogfile1_1);
 
     event_loop_factory.RunFor(chrono::milliseconds(5000));
@@ -3682,7 +3724,8 @@
   // second boot.
   {
     LoggerState pi2_logger = MakeLoggerState(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
 
     event_loop_factory.RunFor(chrono::milliseconds(95));
 
@@ -3751,7 +3794,8 @@
   // second boot.
   {
     LoggerState pi2_logger = MakeLoggerState(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
 
     event_loop_factory.RunFor(chrono::milliseconds(95));
 
@@ -3830,7 +3874,8 @@
     event_loop_factory.RunFor(chrono::milliseconds(1000));
 
     LoggerState pi2_logger = MakeLoggerState(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
 
     pi2_logger.StartLogger(kLogfile2_1);
 
@@ -3900,7 +3945,8 @@
     event_loop_factory.RunFor(chrono::milliseconds(1000));
 
     LoggerState pi2_logger = MakeLoggerState(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
 
     pi2_logger.StartLogger(kLogfile2_1);
 
@@ -4097,18 +4143,21 @@
     // Now start a receiving node first.  This sets up 2 tight bounds between
     // 2 of the nodes.
     LoggerState pi2_logger = MakeLoggerState(
-        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi2_logger.StartLogger(kLogfile2_1);
 
     event_loop_factory.RunFor(chrono::seconds(100));
 
     // And now start the third leg.
     LoggerState pi3_logger = MakeLoggerState(
-        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi3_logger.StartLogger(kLogfile3_1);
 
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi1_logger.StartLogger(kLogfile1_1);
 
     event_loop_factory.RunFor(chrono::seconds(100));
@@ -4258,7 +4307,8 @@
     // Now start a receiving node first.  This sets up 2 tight bounds between
     // 2 of the nodes.
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi1_logger.StartLogger(kLogfile1_1);
 
     std::unique_ptr<EventLoop> pi2_event_loop = pi2->MakeEventLoop("pong");
@@ -4373,7 +4423,8 @@
     // Now start a receiving node first.  This sets up 2 tight bounds between
     // 2 of the nodes.
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi1_logger.StartLogger(kLogfile1_1);
 
     event_loop_factory.RunFor(chrono::seconds(10));
@@ -4462,7 +4513,8 @@
     // Now start a receiving node first.  This sets up 2 tight bounds between
     // 2 of the nodes.
     LoggerState pi1_logger = MakeLoggerState(
-        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+        pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0],
+        FileStrategy::kKeepSeparate);
     pi1_logger.StartLogger(kLogfile1_1);
 
     event_loop_factory.RunFor(chrono::seconds(10));
diff --git a/aos/events/logging/multinode_logger_test_lib.cc b/aos/events/logging/multinode_logger_test_lib.cc
index ddb7007..a4485d4 100644
--- a/aos/events/logging/multinode_logger_test_lib.cc
+++ b/aos/events/logging/multinode_logger_test_lib.cc
@@ -17,6 +17,7 @@
 LoggerState MakeLoggerState(NodeEventLoopFactory *node,
                             SimulatedEventLoopFactory *factory,
                             CompressionParams params,
+                            FileStrategy file_strategy,
                             const Configuration *configuration) {
   if (configuration == nullptr) {
     configuration = factory->configuration();
@@ -26,14 +27,18 @@
           configuration,
           configuration::GetNode(configuration, node->node()),
           nullptr,
-          params};
+          params,
+          file_strategy};
 }
 
 std::unique_ptr<MultiNodeFilesLogNamer> LoggerState::MakeLogNamer(
     std::string logfile_base) {
   std::unique_ptr<MultiNodeFilesLogNamer> namer =
-      std::make_unique<MultiNodeFilesLogNamer>(logfile_base, configuration,
-                                               event_loop.get(), node);
+      file_strategy == FileStrategy::kCombine
+          ? std::make_unique<MinimalFileMultiNodeLogNamer>(
+                logfile_base, configuration, event_loop.get(), node)
+          : std::make_unique<MultiNodeFilesLogNamer>(
+                logfile_base, configuration, event_loop.get(), node);
   namer->set_extension(params.extension);
   namer->set_encoder_factory(params.encoder_factory);
   return namer;
@@ -106,7 +111,8 @@
   event_loop_factory_.SetTimeConverter(&time_converter_);
 
   LOG(INFO) << "Logging data to " << logfiles_[0] << ", " << logfiles_[1]
-            << " and " << logfiles_[2];
+            << " and " << logfiles_[2] << " shared? " << shared()
+            << " combine? " << (file_strategy() == FileStrategy::kCombine);
 
   pi1_->OnStartup([this]() {
     pi1_->AlwaysStart<Ping>("ping");
@@ -122,6 +128,10 @@
   return std::get<0>(GetParam()).shared;
 }
 
+FileStrategy MultinodeLoggerTest::file_strategy() const {
+  return std::get<0>(GetParam()).file_strategy;
+}
+
 std::vector<std::string> MultinodeLoggerTest::MakeLogFiles(
     std::string logfile_base1, std::string logfile_base2, size_t pi1_data_count,
     size_t pi2_data_count, size_t pi1_timestamps_count,
@@ -132,38 +142,60 @@
   std::vector<std::string> result;
   result.emplace_back(absl::StrCat(logfile_base1, "_", sha256, Extension()));
   result.emplace_back(absl::StrCat(logfile_base2, "_", sha256, Extension()));
-  for (size_t i = 0; i < pi1_data_count; ++i) {
-    result.emplace_back(
-        absl::StrCat(logfile_base1, "_pi1_data.part", i, Extension()));
+
+  if (file_strategy() == FileStrategy::kCombine) {
+    for (size_t i = 0; i < pi1_data_count + pi1_timestamps_count; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base1, "_pi1_pi1_all.part", i, Extension()));
+    }
+    for (size_t i = 0; i < 3; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base1, "_pi1_pi2_all.part", i, Extension()));
+    }
+
+    for (size_t i = 0; i < pi2_data_count + pi2_timestamps_count; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base2, "_pi2_pi2_all.part", i, Extension()));
+    }
+
+    for (size_t i = 0; i < 3; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base2, "_pi2_pi1_all.part", i, Extension()));
+    }
+  } else {
+    for (size_t i = 0; i < pi1_data_count; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base1, "_pi1_data.part", i, Extension()));
+    }
+    for (size_t i = 0; i < pi1_timestamps_count; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base1, "_pi1_timestamps.part", i, Extension()));
+    }
+    for (size_t i = 0; i < pi2_data_count; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base2, "_pi2_data.part", i, Extension()));
+    }
+    for (size_t i = 0; i < pi2_timestamps_count; ++i) {
+      result.emplace_back(
+          absl::StrCat(logfile_base2, "_pi2_timestamps.part", i, Extension()));
+    }
+    result.emplace_back(logfile_base2 + "_data/pi1_data.part0" + Extension());
+    result.emplace_back(logfile_base2 + "_data/pi1_data.part1" + Extension());
+    result.emplace_back(logfile_base1 + "_data/pi2_data.part0" + Extension());
+    result.emplace_back(logfile_base1 + "_data/pi2_data.part1" + Extension());
+    // shared and not shared config types will have the same output since the
+    // data writers are consolidated to per node instead of per channel.
+    result.emplace_back(logfile_base1 + "_timestamps/remote_pi2.part0" +
+                        Extension());
+    result.emplace_back(logfile_base1 + "_timestamps/remote_pi2.part1" +
+                        Extension());
+    result.emplace_back(logfile_base1 + "_timestamps/remote_pi2.part2" +
+                        Extension());
+    result.emplace_back(logfile_base2 + "_timestamps/remote_pi1.part0" +
+                        Extension());
+    result.emplace_back(logfile_base2 + "_timestamps/remote_pi1.part1" +
+                        Extension());
   }
-  for (size_t i = 0; i < pi1_timestamps_count; ++i) {
-    result.emplace_back(
-        absl::StrCat(logfile_base1, "_pi1_timestamps.part", i, Extension()));
-  }
-  for (size_t i = 0; i < pi2_data_count; ++i) {
-    result.emplace_back(
-        absl::StrCat(logfile_base2, "_pi2_data.part", i, Extension()));
-  }
-  for (size_t i = 0; i < pi2_timestamps_count; ++i) {
-    result.emplace_back(
-        absl::StrCat(logfile_base2, "_pi2_timestamps.part", i, Extension()));
-  }
-  result.emplace_back(logfile_base2 + "_data/pi1_data.part0" + Extension());
-  result.emplace_back(logfile_base2 + "_data/pi1_data.part1" + Extension());
-  result.emplace_back(logfile_base1 + "_data/pi2_data.part0" + Extension());
-  result.emplace_back(logfile_base1 + "_data/pi2_data.part1" + Extension());
-  // shared and not shared config types will have the same output since the data
-  // writers are consolidated to per node instead of per channel.
-  result.emplace_back(logfile_base1 + "_timestamps/remote_pi2.part0" +
-                      Extension());
-  result.emplace_back(logfile_base1 + "_timestamps/remote_pi2.part1" +
-                      Extension());
-  result.emplace_back(logfile_base1 + "_timestamps/remote_pi2.part2" +
-                      Extension());
-  result.emplace_back(logfile_base2 + "_timestamps/remote_pi1.part0" +
-                      Extension());
-  result.emplace_back(logfile_base2 + "_timestamps/remote_pi1.part1" +
-                      Extension());
 
   return result;
 }
@@ -191,26 +223,36 @@
 
 std::vector<std::string> MultinodeLoggerTest::MakePi1DeadNodeLogfiles() {
   std::vector<std::string> result;
-  result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
-  result.emplace_back(absl::StrCat(
-      logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+  if (file_strategy() == FileStrategy::kCombine) {
+    result.emplace_back(logfile_base1_ + "_pi1_pi1_all.part0" + Extension());
+    result.emplace_back(absl::StrCat(
+        logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+  } else {
+    result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+    result.emplace_back(absl::StrCat(
+        logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+  }
   return result;
 }
 
 std::vector<std::vector<std::string>> MultinodeLoggerTest::StructureLogFiles() {
-  std::vector<std::vector<std::string>> result{
-      std::vector<std::string>{logfiles_[2]},
-      std::vector<std::string>{logfiles_[3], logfiles_[4]},
-      std::vector<std::string>{logfiles_[5]},
-      std::vector<std::string>{logfiles_[6], logfiles_[7]},
-      std::vector<std::string>{logfiles_[8], logfiles_[9]},
-      std::vector<std::string>{logfiles_[10], logfiles_[11]}};
-
-  result.emplace_back(
-      std::vector<std::string>{logfiles_[12], logfiles_[13], logfiles_[14]});
-  result.emplace_back(std::vector<std::string>{logfiles_[15], logfiles_[16]});
-
-  return result;
+  if (file_strategy() == FileStrategy::kCombine) {
+    return std::vector<std::vector<std::string>>{
+        std::vector<std::string>{logfiles_[2], logfiles_[3], logfiles_[4]},
+        std::vector<std::string>{logfiles_[5], logfiles_[6], logfiles_[7]},
+        std::vector<std::string>{logfiles_[8], logfiles_[9], logfiles_[10]},
+        std::vector<std::string>{logfiles_[11], logfiles_[12], logfiles_[13]}};
+  } else {
+    return std::vector<std::vector<std::string>>{
+        std::vector<std::string>{logfiles_[2]},
+        std::vector<std::string>{logfiles_[3], logfiles_[4]},
+        std::vector<std::string>{logfiles_[5]},
+        std::vector<std::string>{logfiles_[6], logfiles_[7]},
+        std::vector<std::string>{logfiles_[8], logfiles_[9]},
+        std::vector<std::string>{logfiles_[10], logfiles_[11]},
+        std::vector<std::string>{logfiles_[12], logfiles_[13], logfiles_[14]},
+        std::vector<std::string>{logfiles_[15], logfiles_[16]}};
+  }
 }
 
 std::string MultinodeLoggerTest::Extension() {
@@ -223,7 +265,8 @@
   if (factory == nullptr) {
     factory = &event_loop_factory_;
   }
-  return MakeLoggerState(node, factory, std::get<1>(GetParam()), configuration);
+  return MakeLoggerState(node, factory, std::get<1>(GetParam()),
+                         file_strategy(), configuration);
 }
 
 void MultinodeLoggerTest::StartLogger(LoggerState *logger,
@@ -286,7 +329,8 @@
   // depends on if we have the remote timestamps split across 2 files, or just
   // across 1, depending on if we are using a split or combined timestamp
   // channel config.
-  EXPECT_EQ(missing_rt_count, shared() ? 4u : 4u);
+  EXPECT_EQ(missing_rt_count,
+            file_strategy() == FileStrategy::kCombine ? 2u : 4u);
 
   EXPECT_EQ(log_event_uuids.size(), 2u);
   EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
diff --git a/aos/events/logging/multinode_logger_test_lib.h b/aos/events/logging/multinode_logger_test_lib.h
index 272c729..ffdceb9 100644
--- a/aos/events/logging/multinode_logger_test_lib.h
+++ b/aos/events/logging/multinode_logger_test_lib.h
@@ -27,6 +27,13 @@
       encoder_factory;
 };
 
+enum class FileStrategy {
+  // Use MinimalFileMultiNodeLogNamer
+  kCombine,
+  // Use MultiNodeFilesLogNamer
+  kKeepSeparate,
+};
+
 // Parameters to run all the tests with.
 struct ConfigParams {
   // The config file to use.
@@ -39,6 +46,8 @@
   std::string_view sha256;
   // sha256 of the relogged config
   std::string_view relogged_sha256;
+  // If kCombine, use MinimalFileMultiNodeLogNamer.
+  FileStrategy file_strategy;
 };
 
 struct LoggerState {
@@ -53,6 +62,7 @@
   const Node *node;
   MultiNodeFilesLogNamer *log_namer;
   CompressionParams params;
+  FileStrategy file_strategy;
 
   void AppendAllFilenames(std::vector<std::string> *filenames);
 
@@ -72,6 +82,7 @@
 LoggerState MakeLoggerState(NodeEventLoopFactory *node,
                             SimulatedEventLoopFactory *factory,
                             CompressionParams params,
+                            FileStrategy file_strategy,
                             const Configuration *configuration = nullptr);
 std::vector<std::vector<std::string>> ToLogReaderVector(
     const std::vector<LogFile> &log_files);
@@ -118,6 +129,7 @@
   MultinodeLoggerTest();
 
   bool shared() const;
+  FileStrategy file_strategy() const;
 
   std::vector<std::string> MakeLogFiles(std::string logfile_base1,
                                         std::string logfile_base2,