Error out if asked to read the same part twice
Before, we would get a cryptic error about time going backwards when we
tried to concatenate the part file with itself. We have UUIDs
everywhere, so we can pretty easily detect this case. Do that and give
a more targeted error.
Change-Id: I85df7de72649d671d383e9768c26c10751e6ae4e
Signed-off-by: Austin Schuh <austin.schuh@bluerivertech.com>
diff --git a/aos/events/logging/logfile_sorting.cc b/aos/events/logging/logfile_sorting.cc
index 3f637f8..eedfa62 100644
--- a/aos/events/logging/logfile_sorting.cc
+++ b/aos/events/logging/logfile_sorting.cc
@@ -1238,8 +1238,15 @@
return a.second < b.second;
});
new_parts.parts.reserve(parts.second.parts.size());
- for (std::pair<std::string, int> &p : parts.second.parts) {
- new_parts.parts.emplace_back(std::move(p.first));
+ {
+ int last_parts_index = -1;
+ for (std::pair<std::string, int> &p : parts.second.parts) {
+ CHECK_LT(last_parts_index, p.second)
+ << ": Found duplicate parts in '" << new_parts.parts.back()
+ << "' and '" << p.first << "'";
+ last_parts_index = p.second;
+ new_parts.parts.emplace_back(std::move(p.first));
+ }
}
CHECK(!parts.second.config_sha256.empty());
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 226c084..8d8a2a6 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -2501,6 +2501,30 @@
ConfirmReadable(pi1_single_direction_logfiles_);
}
+// Tests that we explode if someone passes in a part file twice with a better
+// error than an out of order error.
+TEST_P(MultinodeLoggerTest, DuplicateLogFiles) {
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() + chrono::seconds(1000)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ }
+
+ std::vector<std::string> duplicates;
+ for (const std::string &f : pi1_single_direction_logfiles_) {
+ duplicates.emplace_back(f);
+ duplicates.emplace_back(f);
+ }
+ EXPECT_DEATH({ SortParts(duplicates); }, "Found duplicate parts in");
+}
+
// Tests that we properly handle a dead node. Do this by just disconnecting it
// and only using one nodes of logs.
TEST_P(MultinodeLoggerTest, DeadNode) {