Add some more UUIDs to log files
Change-Id: I32a26012bf204de3188ea77255ca5c43bd78f2f5
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index ad2f2d2..ea1f877 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -194,6 +194,7 @@
"//aos/network:team_number",
"//aos/network:timestamp_filter",
"//aos/time",
+ "//aos/util:file",
"//third_party/gmp",
"@com_github_google_flatbuffers//:flatbuffers",
"@com_google_absl//absl/strings",
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 5226154..15b4fd9 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -500,10 +500,10 @@
// Now compare that the headers match.
if (!CompareFlatBuffer(message_reader.raw_log_file_header(),
log_file_header_)) {
- if (message_reader.log_file_header()->has_logger_uuid() &&
- log_file_header_.message().has_logger_uuid() &&
- message_reader.log_file_header()->logger_uuid()->string_view() !=
- log_file_header_.message().logger_uuid()->string_view()) {
+ if (message_reader.log_file_header()->has_log_event_uuid() &&
+ log_file_header_.message().has_log_event_uuid() &&
+ message_reader.log_file_header()->log_event_uuid()->string_view() !=
+ log_file_header_.message().log_event_uuid()->string_view()) {
LOG(FATAL) << "Logger UUIDs don't match between log file chunks "
<< filenames_[0] << " and " << filenames_[i]
<< ", this is not supported.";
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index 859e24c..57d6909 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -16,6 +16,7 @@
#include "aos/flatbuffer_merge.h"
#include "aos/network/team_number.h"
#include "aos/time/time.h"
+#include "aos/util/file.h"
#include "flatbuffers/flatbuffers.h"
#include "third_party/gmp/gmpxx.h"
@@ -39,6 +40,8 @@
std::function<bool(const Channel *)> should_log)
: event_loop_(event_loop),
configuration_(configuration),
+ boot_uuid_(
+ util::ReadFileToStringOrDie("/proc/sys/kernel/random/boot_id")),
name_(network::GetHostname()),
timer_handler_(event_loop_->AddTimer(
[this]() { DoLogData(event_loop_->monotonic_now()); })),
@@ -177,10 +180,12 @@
}
}
-void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer) {
+void Logger::StartLogging(std::unique_ptr<LogNamer> log_namer,
+ std::string_view log_start_uuid) {
CHECK(!log_namer_) << ": Already logging";
log_namer_ = std::move(log_namer);
- uuid_ = UUID::Random();
+ log_event_uuid_ = UUID::Random();
+ log_start_uuid_ = log_start_uuid;
VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
// We want to do as much work as possible before the initial Fetch. Time
@@ -248,6 +253,9 @@
}
node_state_.clear();
+ log_event_uuid_ = UUID::Zero();
+ log_start_uuid_ = std::string();
+
return std::move(log_namer_);
}
@@ -382,17 +390,28 @@
// TODO(austin): Compress this much more efficiently. There are a bunch of
// duplicated schemas.
- flatbuffers::Offset<aos::Configuration> configuration_offset =
+ const flatbuffers::Offset<aos::Configuration> configuration_offset =
CopyFlatBuffer(configuration_, &fbb);
- flatbuffers::Offset<flatbuffers::String> name_offset =
+ const flatbuffers::Offset<flatbuffers::String> name_offset =
fbb.CreateString(name_);
- CHECK(uuid_ != UUID::Zero());
- flatbuffers::Offset<flatbuffers::String> logger_uuid_offset =
- fbb.CreateString(uuid_.string_view());
+ CHECK(log_event_uuid_ != UUID::Zero());
+ const flatbuffers::Offset<flatbuffers::String> log_event_uuid_offset =
+ fbb.CreateString(log_event_uuid_.string_view());
- flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
+ const flatbuffers::Offset<flatbuffers::String> logger_instance_uuid_offset =
+ fbb.CreateString(logger_instance_uuid_.string_view());
+
+ flatbuffers::Offset<flatbuffers::String> log_start_uuid_offset;
+ if (!log_start_uuid_.empty()) {
+ log_start_uuid_offset = fbb.CreateString(log_start_uuid_);
+ }
+
+ const flatbuffers::Offset<flatbuffers::String> boot_uuid_offset =
+ fbb.CreateString(boot_uuid_);
+
+ const flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
fbb.CreateString("00000000-0000-4000-8000-000000000000");
flatbuffers::Offset<Node> node_offset;
@@ -430,7 +449,12 @@
.count());
}
- log_file_header_builder.add_logger_uuid(logger_uuid_offset);
+ log_file_header_builder.add_log_event_uuid(log_event_uuid_offset);
+ log_file_header_builder.add_logger_instance_uuid(logger_instance_uuid_offset);
+ if (!log_start_uuid_offset.IsNull()) {
+ log_file_header_builder.add_log_start_uuid(log_start_uuid_offset);
+ }
+ log_file_header_builder.add_boot_uuid(boot_uuid_offset);
log_file_header_builder.add_parts_uuid(parts_uuid_offset);
log_file_header_builder.add_parts_index(0);
@@ -587,7 +611,7 @@
std::vector<std::pair<std::string, int>> parts;
};
- // Map holding the logger_uuid -> second map. The second map holds the
+ // Map holding the log_event_uuid -> second map. The second map holds the
// parts_uuid -> list of parts for sorting.
std::map<std::string, std::map<std::string, UnsortedLogParts>> parts_list;
@@ -652,20 +676,22 @@
continue;
}
- CHECK(log_header.message().has_logger_uuid());
+ CHECK(log_header.message().has_log_event_uuid());
CHECK(log_header.message().has_parts_uuid());
CHECK(log_header.message().has_parts_index());
- const std::string logger_uuid = log_header.message().logger_uuid()->str();
+ const std::string log_event_uuid =
+ log_header.message().log_event_uuid()->str();
const std::string parts_uuid = log_header.message().parts_uuid()->str();
int32_t parts_index = log_header.message().parts_index();
- auto log_it = parts_list.find(logger_uuid);
+ auto log_it = parts_list.find(log_event_uuid);
if (log_it == parts_list.end()) {
- log_it = parts_list
- .insert(std::make_pair(
- logger_uuid, std::map<std::string, UnsortedLogParts>()))
- .first;
+ log_it =
+ parts_list
+ .insert(std::make_pair(log_event_uuid,
+ std::map<std::string, UnsortedLogParts>()))
+ .first;
}
auto it = log_it->second.find(parts_uuid);
@@ -724,12 +750,12 @@
for (std::pair<const std::string, std::map<std::string, UnsortedLogParts>>
&logs : parts_list) {
LogFile new_file;
- new_file.logger_uuid = logs.first;
+ new_file.log_event_uuid = logs.first;
for (std::pair<const std::string, UnsortedLogParts> &parts : logs.second) {
LogParts new_parts;
new_parts.monotonic_start_time = parts.second.monotonic_start_time;
new_parts.realtime_start_time = parts.second.realtime_start_time;
- new_parts.logger_uuid = logs.first;
+ new_parts.log_event_uuid = logs.first;
new_parts.parts_uuid = parts.first;
new_parts.node = std::move(parts.second.node);
@@ -751,8 +777,8 @@
std::ostream &operator<<(std::ostream &stream, const LogFile &file) {
stream << "{";
- if (!file.logger_uuid.empty()) {
- stream << "\"logger_uuid\": \"" << file.logger_uuid << "\", ";
+ if (!file.log_event_uuid.empty()) {
+ stream << "\"log_event_uuid\": \"" << file.log_event_uuid << "\", ";
}
stream << "\"parts\": [";
for (size_t i = 0; i < file.parts.size(); ++i) {
@@ -766,8 +792,8 @@
}
std::ostream &operator<<(std::ostream &stream, const LogParts &parts) {
stream << "{";
- if (!parts.logger_uuid.empty()) {
- stream << "\"logger_uuid\": \"" << parts.logger_uuid << "\", ";
+ if (!parts.log_event_uuid.empty()) {
+ stream << "\"log_event_uuid\": \"" << parts.log_event_uuid << "\", ";
}
if (!parts.parts_uuid.empty()) {
stream << "\"parts_uuid\": \"" << parts.parts_uuid << "\", ";
diff --git a/aos/events/logging/logger.fbs b/aos/events/logging/logger.fbs
index c651c35..34c62c6 100644
--- a/aos/events/logging/logger.fbs
+++ b/aos/events/logging/logger.fbs
@@ -48,7 +48,19 @@
// All log files and parts from a single logging event will have
// the same uuid. This should be all the files generated on a single node.
// Used to correlate files recorded together.
- logger_uuid:string (id: 6);
+ log_event_uuid:string (id: 6);
+
+ // All log parts generated by a single Logger instance will have the same
+ // value here.
+ logger_instance_uuid: string (id: 9);
+
+ // All log parts generated on a single node while it is booted will have the
+ // same value here. It also matches with the one used by systemd.
+ boot_uuid: string (id: 10);
+
+ // All log events across all nodes produced by a single high-level start event
+ // will have the same value here.
+ log_start_uuid: string (id: 11);
// Part files which go together all have headers. When creating a log file
// with multiple parts, the logger should stop writing to part n-1 as soon
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index e8856f3..161300c 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -66,12 +66,19 @@
polling_period_ = polling_period;
}
+ std::string_view log_start_uuid() const { return log_start_uuid_; }
+
// Rotates the log file(s), triggering new part files to be written for each
// log file.
void Rotate();
// Starts logging to files with the given naming scheme.
- void StartLogging(std::unique_ptr<LogNamer> log_namer);
+ //
+ // log_start_uuid may be used to tie this log event to other log events across
+ // multiple nodes. The default (empty string) indicates there isn't one
+ // available.
+ void StartLogging(std::unique_ptr<LogNamer> log_namer,
+ std::string_view log_start_uuid = "");
// Stops logging. Ensures any messages through end_time make it into the log.
//
@@ -154,13 +161,17 @@
aos::monotonic_clock::time_point monotonic_start_time,
aos::realtime_clock::time_point realtime_start_time);
- EventLoop *event_loop_;
- UUID uuid_ = UUID::Zero();
- std::unique_ptr<LogNamer> log_namer_;
-
+ EventLoop *const event_loop_;
// The configuration to place at the top of the log file.
const Configuration *const configuration_;
+ UUID log_event_uuid_ = UUID::Zero();
+ const UUID logger_instance_uuid_ = UUID::Random();
+ std::unique_ptr<LogNamer> log_namer_;
+ // Empty indicates there isn't one.
+ std::string log_start_uuid_;
+ const std::string boot_uuid_;
+
// Name to save in the log file. Defaults to hostname.
std::string name_;
@@ -194,7 +205,7 @@
aos::realtime_clock::time_point realtime_start_time;
// UUIDs if available.
- std::string logger_uuid;
+ std::string log_event_uuid;
std::string parts_uuid;
// The node this represents, or empty if we are in a single node world.
@@ -208,7 +219,7 @@
// ordering constraints relative to each other.
struct LogFile {
// The UUID tying them all together (if available)
- std::string logger_uuid;
+ std::string log_event_uuid;
// All the parts, unsorted.
std::vector<LogParts> parts;
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 2095529..108d4de 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -262,8 +262,8 @@
log_header.emplace_back(ReadHeader(f));
}
- EXPECT_EQ(log_header[0].message().logger_uuid()->string_view(),
- log_header[1].message().logger_uuid()->string_view());
+ EXPECT_EQ(log_header[0].message().log_event_uuid()->string_view(),
+ log_header[1].message().log_event_uuid()->string_view());
EXPECT_EQ(log_header[0].message().parts_uuid()->string_view(),
log_header[1].message().parts_uuid()->string_view());
@@ -531,7 +531,7 @@
std::vector<FlatbufferVector<LogFileHeader>> log_header;
for (std::string_view f : logfiles_) {
log_header.emplace_back(ReadHeader(f));
- logfile_uuids.insert(log_header.back().message().logger_uuid()->str());
+ logfile_uuids.insert(log_header.back().message().log_event_uuid()->str());
parts_uuids.insert(log_header.back().message().parts_uuid()->str());
}
@@ -1111,16 +1111,16 @@
// Count up the number of UUIDs and make sure they are what we expect as a
// sanity check.
- std::set<std::string> logger_uuids;
+ std::set<std::string> log_event_uuids;
std::set<std::string> parts_uuids;
std::set<std::string> both_uuids;
size_t missing_rt_count = 0;
for (const LogFile &log_file : sorted_parts) {
- EXPECT_FALSE(log_file.logger_uuid.empty());
- logger_uuids.insert(log_file.logger_uuid);
- both_uuids.insert(log_file.logger_uuid);
+ EXPECT_FALSE(log_file.log_event_uuid.empty());
+ log_event_uuids.insert(log_file.log_event_uuid);
+ both_uuids.insert(log_file.log_event_uuid);
for (const LogParts &part : log_file.parts) {
EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
@@ -1128,7 +1128,8 @@
missing_rt_count +=
part.realtime_start_time == aos::realtime_clock::min_time;
- EXPECT_TRUE(logger_uuids.find(part.logger_uuid) != logger_uuids.end());
+ EXPECT_TRUE(log_event_uuids.find(part.log_event_uuid) !=
+ log_event_uuids.end());
EXPECT_NE(part.node, "");
parts_uuids.insert(part.parts_uuid);
both_uuids.insert(part.parts_uuid);
@@ -1140,9 +1141,9 @@
// the log reader can actually do a better job.
EXPECT_EQ(missing_rt_count, 5u);
- EXPECT_EQ(logger_uuids.size(), 2u);
+ EXPECT_EQ(log_event_uuids.size(), 2u);
EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
- EXPECT_EQ(logger_uuids.size() + parts_uuids.size(), both_uuids.size());
+ EXPECT_EQ(log_event_uuids.size() + parts_uuids.size(), both_uuids.size());
// Test that each list of parts is in order. Don't worry about the ordering
// between part file lists though.