Merge "Add UUIDs for both the logger and parts"
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 61df474..5aba387 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -47,6 +47,7 @@
     deps = [
         ":logfile_utils",
         ":logger_fbs",
+        ":uuid",
         "//aos/events:event_loop",
         "//aos/events:simulated_event_loop",
         "//aos/network:message_bridge_server_fbs",
@@ -158,3 +159,18 @@
         "//aos/testing:googletest",
     ],
 )
+
+cc_library(
+    name = "uuid",
+    srcs = ["uuid.cc"],
+    hdrs = ["uuid.h"],
+)
+
+cc_test(
+    name = "uuid_test",
+    srcs = ["uuid_test.cc"],
+    deps = [
+        ":uuid",
+        "//aos/testing:googletest",
+    ],
+)
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 05ee4e0..214fa2b 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -377,12 +377,44 @@
       }
     }
 
+    // We don't have a good way to set the realtime start time on remote nodes.
+    // Confirm it remains consistent.
+    CHECK_EQ(log_file_header_.mutable_message()->has_realtime_start_time(),
+             message_reader.log_file_header()->has_realtime_start_time());
+
+    // Parts index will *not* match unless we set them to match.  We only want
+    // to accept the start time and parts mismatching, so set them.
+    log_file_header_.mutable_message()->mutate_parts_index(
+        message_reader.log_file_header()->parts_index());
+
     // Now compare that the headers match.
-    CHECK(CompareFlatBuffer(message_reader.raw_log_file_header(),
-                            log_file_header_))
-        << ": Header is different between log file chunks " << filenames_[0]
-        << " and " << filenames_[i] << ", this is not supported.";
+    if (!CompareFlatBuffer(message_reader.raw_log_file_header(),
+                           log_file_header_)) {
+      if (message_reader.log_file_header()->has_logger_uuid() &&
+          log_file_header_.message().has_logger_uuid() &&
+          message_reader.log_file_header()->logger_uuid()->string_view() !=
+              log_file_header_.message().logger_uuid()->string_view()) {
+        LOG(FATAL) << "Logger UUIDs don't match between log file chunks "
+                   << filenames_[0] << " and " << filenames_[i]
+                   << ", this is not supported.";
+      }
+      if (message_reader.log_file_header()->has_parts_uuid() &&
+          log_file_header_.message().has_parts_uuid() &&
+          message_reader.log_file_header()->parts_uuid()->string_view() !=
+              log_file_header_.message().parts_uuid()->string_view()) {
+        LOG(FATAL) << "Parts UUIDs don't match between log file chunks "
+                   << filenames_[0] << " and " << filenames_[i]
+                   << ", this is not supported.";
+      }
+
+      LOG(FATAL) << "Header is different between log file chunks "
+                 << filenames_[0] << " and " << filenames_[i]
+                 << ", this is not supported.";
+    }
   }
+  // Put the parts index back to the first log file chunk.
+  log_file_header_.mutable_message()->mutate_parts_index(
+      message_reader_->log_file_header()->parts_index());
 
   // Setup per channel state.
   channels_.resize(configuration()->channels()->size());
@@ -429,11 +461,22 @@
   // We can't support the config diverging between two log file headers.  See if
   // they are the same.
   if (next_filename_index_ != 0) {
+    // In order for the headers to identically compare, they need to have the
+    // same parts_index.  Rewrite the saved header with the new parts_index,
+    // compare, and then restore.
+    const int32_t original_parts_index =
+        log_file_header_.message().parts_index();
+    log_file_header_.mutable_message()->mutate_parts_index(
+        message_reader_->log_file_header()->parts_index());
+
     CHECK(CompareFlatBuffer(message_reader_->raw_log_file_header(),
                             log_file_header_))
         << ": Header is different between log file chunks "
         << filenames_[next_filename_index_] << " and "
         << filenames_[next_filename_index_ - 1] << ", this is not supported.";
+
+    log_file_header_.mutable_message()->mutate_parts_index(
+        original_parts_index);
   }
 
   ++next_filename_index_;
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index 89839ec..85809b1 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -12,6 +12,7 @@
 #include "absl/types/span.h"
 #include "aos/events/event_loop.h"
 #include "aos/events/logging/logger_generated.h"
+#include "aos/events/logging/uuid.h"
 #include "aos/flatbuffer_merge.h"
 #include "aos/network/team_number.h"
 #include "aos/time/time.h"
@@ -34,16 +35,30 @@
 namespace logger {
 namespace chrono = std::chrono;
 
+void LogNamer::UpdateHeader(
+    aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+    const UUID &uuid, int parts_index) {
+  header->mutable_message()->mutate_parts_index(parts_index);
+  CHECK_EQ(uuid.string_view().size(),
+           header->mutable_message()->mutable_parts_uuid()->size());
+  std::copy(uuid.string_view().begin(), uuid.string_view().end(),
+            reinterpret_cast<char *>(
+                header->mutable_message()->mutable_parts_uuid()->Data()));
+}
+
 void MultiNodeLogNamer::WriteHeader(
-    const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> &header,
+    aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
     const Node *node) {
   if (node == this->node()) {
-    data_writer_->WriteSizedFlatbuffer(header.full_span());
+    UpdateHeader(header, uuid_, part_number_);
+    data_writer_->WriteSizedFlatbuffer(header->full_span());
   } else {
     for (std::pair<const Channel *const, DataWriter> &data_writer :
          data_writers_) {
       if (node == data_writer.second.node) {
-        data_writer.second.writer->WriteSizedFlatbuffer(header.full_span());
+        UpdateHeader(header, data_writer.second.uuid,
+                     data_writer.second.part_number);
+        data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
       }
     }
   }
@@ -51,18 +66,21 @@
 
 void MultiNodeLogNamer::Rotate(
     const Node *node,
-    const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> &header) {
+    aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) {
   if (node == this->node()) {
     ++part_number_;
     *data_writer_ = std::move(*OpenDataWriter());
-    data_writer_->WriteSizedFlatbuffer(header.full_span());
+    UpdateHeader(header, uuid_, part_number_);
+    data_writer_->WriteSizedFlatbuffer(header->full_span());
   } else {
     for (std::pair<const Channel *const, DataWriter> &data_writer :
          data_writers_) {
       if (node == data_writer.second.node) {
         ++data_writer.second.part_number;
         data_writer.second.rotate(data_writer.first, &data_writer.second);
-        data_writer.second.writer->WriteSizedFlatbuffer(header.full_span());
+        UpdateHeader(header, data_writer.second.uuid,
+                     data_writer.second.part_number);
+        data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
       }
     }
   }
@@ -76,6 +94,7 @@
 Logger::Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
                std::chrono::milliseconds polling_period)
     : event_loop_(event_loop),
+      uuid_(UUID::Random()),
       log_namer_(std::move(log_namer)),
       timer_handler_(event_loop_->AddTimer([this]() { DoLogData(); })),
       polling_period_(polling_period),
@@ -246,7 +265,7 @@
         configuration::GetNodeIndex(event_loop_->configuration(), node);
     MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
                          realtime_start_time);
-    log_namer_->WriteHeader(node_state_[node_index].log_file_header, node);
+    log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
   }
 }
 
@@ -268,7 +287,7 @@
             node, node_index,
             server_statistics_fetcher_.context().monotonic_event_time,
             server_statistics_fetcher_.context().realtime_event_time)) {
-      log_namer_->Rotate(node, node_state_[node_index].log_file_header);
+      log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
     }
   }
 }
@@ -361,9 +380,15 @@
   flatbuffers::Offset<aos::Configuration> configuration_offset =
       CopyFlatBuffer(event_loop_->configuration(), &fbb);
 
-  flatbuffers::Offset<flatbuffers::String> string_offset =
+  flatbuffers::Offset<flatbuffers::String> name_offset =
       fbb.CreateString(network::GetHostname());
 
+  flatbuffers::Offset<flatbuffers::String> logger_uuid_offset =
+      fbb.CreateString(uuid_.string_view());
+
+  flatbuffers::Offset<flatbuffers::String> parts_uuid_offset =
+      fbb.CreateString("00000000-0000-4000-8000-000000000000");
+
   flatbuffers::Offset<Node> node_offset;
 
   if (configuration::MultiNode(event_loop_->configuration())) {
@@ -372,7 +397,7 @@
 
   aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
 
-  log_file_header_builder.add_name(string_offset);
+  log_file_header_builder.add_name(name_offset);
 
   // Only add the node if we are running in a multinode configuration.
   if (node != nullptr) {
@@ -400,6 +425,11 @@
             .count());
   }
 
+  log_file_header_builder.add_logger_uuid(logger_uuid_offset);
+
+  log_file_header_builder.add_parts_uuid(parts_uuid_offset);
+  log_file_header_builder.add_parts_index(0);
+
   fbb.FinishSizePrefixed(log_file_header_builder.Finish());
   return fbb.Release();
 }
@@ -408,7 +438,7 @@
   for (const Node *node : log_namer_->nodes()) {
     const int node_index =
         configuration::GetNodeIndex(event_loop_->configuration(), node);
-    log_namer_->Rotate(node, node_state_[node_index].log_file_header);
+    log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
   }
 }
 
diff --git a/aos/events/logging/logger.fbs b/aos/events/logging/logger.fbs
index 00becfa..2905dfa 100644
--- a/aos/events/logging/logger.fbs
+++ b/aos/events/logging/logger.fbs
@@ -15,9 +15,11 @@
 
 table LogFileHeader {
   // Time this log file started on the monotonic clock in nanoseconds.
-  monotonic_start_time:long = -9223372036854775808;
+  // If this isn't known (the log file is being recorded from another node
+  // where we don't know the time offset), both timestamps will be min_time.
+  monotonic_start_time:int64 = -9223372036854775808;
   // Time this log file started on the realtime clock in nanoseconds.
-  realtime_start_time:long = -9223372036854775808;
+  realtime_start_time:int64 = -9223372036854775808;
 
   // Messages are not written in order to disk.  They will be out of order by
   // at most this duration (in nanoseconds).  If the log reader buffers until
@@ -33,6 +35,36 @@
 
   // The current node, if known and running in a multi-node configuration.
   node:Node;
+
+  // All UUIDs are uuid4.
+
+  // A log file is made up of a bunch of log files and parts.  These build up
+  // a tree.  Every .bfbs file has a LogFileHeader at the start.
+  //
+  //  /-- basename_pi1_data.part0.bfbs, basename_pi1_data.part1.bfbs, etc.
+  // ---- basename_timestamps/pi1/aos/remote_timestamps/pi2/aos.logger.MessageHeader.part0.bfbs, etc.
+  //  \-- basename_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0.bfbs, etc.
+
+  // All log files and parts from a single run of a logger executable will have
+  // the same uuid.  This should be all the files generated on a single node.
+  // Used to correlate files recorded together.
+  logger_uuid:string;
+
+  // Part files which go together all have headers.  When creating a log file
+  // with multiple parts, the logger should stop writing to part n-1 as soon
+  // as it starts writing to part n, and write messages as though there was
+  // just 1 big file.  Therefore, part files won't be self standing, since
+  // they won't have data fetched at the beginning.
+
+  // If data is logged before the time offset can be established to the other
+  // node, the start time will be monotonic_clock::min_time, and a new part file
+  // will be created when the start time is known.
+
+  // All the parts which go together have the same uuid.
+  parts_uuid:string;
+  // And the parts_index corresponds to which part this is in the sequence.  The
+  // index should start at 0.
+  parts_index:int32;
 }
 
 // Table holding a message.
@@ -58,12 +90,12 @@
 
   // Time this message was sent on the monotonic clock of the remote node in
   // nanoseconds.
-  monotonic_remote_time:long = -9223372036854775808;
+  monotonic_remote_time:int64 = -9223372036854775808;
   // Time this message was sent on the realtime clock of the remote node in
   // nanoseconds.
-  realtime_remote_time:long = -9223372036854775808;
+  realtime_remote_time:int64 = -9223372036854775808;
   // Queue index of this message on the remote node.
-  remote_queue_index:uint = 4294967295;
+  remote_queue_index:uint32 = 4294967295;
 }
 
 root_type MessageHeader;
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index e599091..d4ed786 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -14,6 +14,7 @@
 #include "aos/events/logging/eigen_mpq.h"
 #include "aos/events/logging/logfile_utils.h"
 #include "aos/events/logging/logger_generated.h"
+#include "aos/events/logging/uuid.h"
 #include "aos/events/simulated_event_loop.h"
 #include "aos/network/message_bridge_server_generated.h"
 #include "aos/network/timestamp_filter.h"
@@ -30,7 +31,7 @@
   virtual ~LogNamer() {}
 
   virtual void WriteHeader(
-      const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> &header,
+      aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
       const Node *node) = 0;
   virtual DetachedBufferWriter *MakeWriter(const Channel *channel) = 0;
 
@@ -39,13 +40,16 @@
       const Channel *channel, const Node *node) = 0;
   virtual void Rotate(
       const Node *node,
-      const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader>
-          &header) = 0;
+      aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) = 0;
   const std::vector<const Node *> &nodes() const { return nodes_; }
 
   const Node *node() const { return node_; }
 
  protected:
+  void UpdateHeader(
+      aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+      const UUID &uuid, int part_id);
+
   const Node *const node_;
   std::vector<const Node *> nodes_;
 };
@@ -53,13 +57,17 @@
 class LocalLogNamer : public LogNamer {
  public:
   LocalLogNamer(std::string_view base_name, const Node *node)
-      : LogNamer(node), base_name_(base_name), data_writer_(OpenDataWriter()) {}
+      : LogNamer(node),
+        base_name_(base_name),
+        uuid_(UUID::Random()),
+        data_writer_(OpenDataWriter()) {}
 
   void WriteHeader(
-      const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> &header,
+      aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
       const Node *node) override {
     CHECK_EQ(node, this->node());
-    data_writer_->WriteSizedFlatbuffer(header.full_span());
+    UpdateHeader(header, uuid_, part_number_);
+    data_writer_->WriteSizedFlatbuffer(header->full_span());
   }
 
   DetachedBufferWriter *MakeWriter(const Channel *channel) override {
@@ -68,12 +76,13 @@
   }
 
   void Rotate(const Node *node,
-              const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader>
-                  &header) override {
+              aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header)
+      override {
     CHECK(node == this->node());
     ++part_number_;
     *data_writer_ = std::move(*OpenDataWriter());
-    data_writer_->WriteSizedFlatbuffer(header.full_span());
+    UpdateHeader(header, uuid_, part_number_);
+    data_writer_->WriteSizedFlatbuffer(header->full_span());
   }
 
   DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) override {
@@ -98,6 +107,7 @@
         absl::StrCat(base_name_, ".part", part_number_, ".bfbs"));
   }
   const std::string base_name_;
+  const UUID uuid_;
   size_t part_number_ = 0;
   std::unique_ptr<DetachedBufferWriter> data_writer_;
 };
@@ -111,17 +121,18 @@
       : LogNamer(node),
         base_name_(base_name),
         configuration_(configuration),
+        uuid_(UUID::Random()),
         data_writer_(OpenDataWriter()) {}
 
   // Writes the header to all log files for a specific node.  This function
   // needs to be called after all the writers are created.
   void WriteHeader(
-      const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> &header,
+      aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
       const Node *node) override;
 
   void Rotate(const Node *node,
-              const aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader>
-                  &header) override;
+              aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header)
+      override;
 
   // Makes a data logger for a specific channel.
   DetachedBufferWriter *MakeWriter(const Channel *channel) override {
@@ -221,6 +232,7 @@
     std::unique_ptr<DetachedBufferWriter> writer = nullptr;
     const Node *node;
     size_t part_number = 0;
+    UUID uuid = UUID::Random();
     std::function<void(const Channel *, DataWriter *)> rotate;
   };
 
@@ -258,6 +270,7 @@
 
   const std::string base_name_;
   const Configuration *const configuration_;
+  const UUID uuid_;
 
   size_t part_number_ = 0;
 
@@ -303,6 +316,7 @@
   void LogUntil(monotonic_clock::time_point t);
 
   EventLoop *event_loop_;
+  const UUID uuid_;
   std::unique_ptr<LogNamer> log_namer_;
 
   // Structure to track both a fetcher, and if the data fetched has been
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index ab5f02c..bf062b8 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -125,6 +125,23 @@
     event_loop_factory_.RunFor(chrono::milliseconds(10000));
   }
 
+  {
+    // Confirm that the UUIDs match for both the parts and the logger, and the
+    // parts_index increments.
+    std::vector<FlatbufferVector<LogFileHeader>> log_header;
+    for (std::string_view f : {logfile0, logfile1}) {
+      log_header.emplace_back(ReadHeader(f));
+    }
+
+    EXPECT_EQ(log_header[0].message().logger_uuid()->string_view(),
+              log_header[1].message().logger_uuid()->string_view());
+    EXPECT_EQ(log_header[0].message().parts_uuid()->string_view(),
+              log_header[1].message().parts_uuid()->string_view());
+
+    EXPECT_EQ(log_header[0].message().parts_index(), 0);
+    EXPECT_EQ(log_header[1].message().parts_index(), 1);
+  }
+
   // Even though it doesn't make any difference here, exercise the logic for
   // passing in a separate config.
   LogReader reader(std::vector<std::string>{logfile0, logfile1},
@@ -375,16 +392,50 @@
   }
 
   {
-    // Confirm that the headers are all for the correct nodes.
-    FlatbufferVector<LogFileHeader> logheader1 = ReadHeader(logfiles_[0]);
-    EXPECT_EQ(logheader1.message().node()->name()->string_view(), "pi1");
-    FlatbufferVector<LogFileHeader> logheader2 = ReadHeader(logfiles_[1]);
-    EXPECT_EQ(logheader2.message().node()->name()->string_view(), "pi2");
-    FlatbufferVector<LogFileHeader> logheader3 = ReadHeader(logfiles_[2]);
-    EXPECT_EQ(logheader3.message().node()->name()->string_view(), "pi2");
-    FlatbufferVector<LogFileHeader> logheader4 = ReadHeader(logfiles_[3]);
-    EXPECT_EQ(logheader4.message().node()->name()->string_view(), "pi2");
+    std::set<std::string> logfile_uuids;
+    std::set<std::string> parts_uuids;
+    // Confirm that we have the expected number of UUIDs for both the logfile
+    // UUIDs and parts UUIDs.
+    std::vector<FlatbufferVector<LogFileHeader>> log_header;
+    for (std::string_view f : logfiles_) {
+      log_header.emplace_back(ReadHeader(f));
+      logfile_uuids.insert(log_header.back().message().logger_uuid()->str());
+      parts_uuids.insert(log_header.back().message().parts_uuid()->str());
+    }
 
+    EXPECT_EQ(logfile_uuids.size(), 2u);
+    EXPECT_EQ(parts_uuids.size(), 7u);
+
+    // And confirm everything is on the correct node.
+    EXPECT_EQ(log_header[0].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[1].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[2].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[3].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[4].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[5].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[6].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[7].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[8].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[9].message().node()->name()->string_view(), "pi1");
+    EXPECT_EQ(log_header[10].message().node()->name()->string_view(), "pi2");
+    EXPECT_EQ(log_header[11].message().node()->name()->string_view(), "pi2");
+
+    // And the parts index matches.
+    EXPECT_EQ(log_header[0].message().parts_index(), 0);
+    EXPECT_EQ(log_header[1].message().parts_index(), 0);
+    EXPECT_EQ(log_header[2].message().parts_index(), 1);
+    EXPECT_EQ(log_header[3].message().parts_index(), 0);
+    EXPECT_EQ(log_header[4].message().parts_index(), 0);
+    EXPECT_EQ(log_header[5].message().parts_index(), 1);
+    EXPECT_EQ(log_header[6].message().parts_index(), 0);
+    EXPECT_EQ(log_header[7].message().parts_index(), 1);
+    EXPECT_EQ(log_header[8].message().parts_index(), 0);
+    EXPECT_EQ(log_header[9].message().parts_index(), 1);
+    EXPECT_EQ(log_header[10].message().parts_index(), 0);
+    EXPECT_EQ(log_header[11].message().parts_index(), 1);
+  }
+
+  {
     using ::testing::UnorderedElementsAre;
 
     // Timing reports, pings
diff --git a/aos/events/logging/uuid.cc b/aos/events/logging/uuid.cc
new file mode 100644
index 0000000..9298c8b
--- /dev/null
+++ b/aos/events/logging/uuid.cc
@@ -0,0 +1,60 @@
+#include "aos/events/logging/uuid.h"
+
+#include <array>
+#include <random>
+#include <string_view>
+
+namespace aos {
+namespace {
+char ToHex(int val) {
+  if (val < 10) {
+    return val + '0';
+  } else {
+    return val - 10 + 'a';
+  }
+}
+}  // namespace
+
+UUID UUID::Random() {
+  std::random_device rd;
+  std::mt19937 gen(rd());
+
+  std::uniform_int_distribution<> dis(0, 15);
+  std::uniform_int_distribution<> dis2(8, 11);
+
+  UUID result;
+
+  // UUID4 is implemented per https://www.cryptosys.net/pki/uuid-rfc4122.html
+  int i;
+  for (i = 0; i < 8; i++) {
+    result.data_[i] = ToHex(dis(gen));
+  }
+  result.data_[i] = '-';
+  ++i;
+  for (; i < 13; i++) {
+    result.data_[i] = ToHex(dis(gen));
+  }
+  result.data_[i] = '-';
+  ++i;
+  result.data_[i] = '4';
+  ++i;
+  for (; i < 18; i++) {
+    result.data_[i] = ToHex(dis(gen));
+  }
+  result.data_[i] = '-';
+  ++i;
+  result.data_[i] = ToHex(dis2(gen));
+  ++i;
+  for (; i < 23; i++) {
+    result.data_[i] = ToHex(dis(gen));
+  }
+  result.data_[i] = '-';
+  ++i;
+  for (; i < 36; i++) {
+    result.data_[i] = ToHex(dis(gen));
+  }
+
+  return result;
+}
+
+}  // namespace aos
diff --git a/aos/events/logging/uuid.h b/aos/events/logging/uuid.h
new file mode 100644
index 0000000..b81b811
--- /dev/null
+++ b/aos/events/logging/uuid.h
@@ -0,0 +1,36 @@
+#ifndef AOS_EVENTS_LOGGING_UUID_H_
+#define AOS_EVENTS_LOGGING_UUID_H_
+
+#include <array>
+#include <random>
+#include <string_view>
+
+namespace aos {
+
+// Class to generate and hold a UUID.
+class UUID {
+ public:
+  // Returns a randomly generated UUID.  This is known as a UUID4.
+  static UUID Random();
+
+  std::string_view string_view() const {
+    return std::string_view(data_.data(), data_.size());
+  }
+
+  bool operator==(const UUID &other) const {
+    return other.string_view() == string_view();
+  }
+  bool operator!=(const UUID &other) const {
+    return other.string_view() != string_view();
+  }
+
+ private:
+  UUID() {}
+
+  // Fixed size storage for the data.  Non-null terminated.
+  std::array<char, 36> data_;
+};
+
+}  // namespace aos
+
+#endif  // AOS_EVENTS_LOGGING_UUID_H_
diff --git a/aos/events/logging/uuid_test.cc b/aos/events/logging/uuid_test.cc
new file mode 100644
index 0000000..d0320de
--- /dev/null
+++ b/aos/events/logging/uuid_test.cc
@@ -0,0 +1,18 @@
+#include "aos/events/logging/uuid.h"
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace aos {
+namespace testing {
+
+// Tests that random UUIDs are actually random, and we can convert them to a
+// string.  Not very exhaustive, but it is a good smoke test.
+TEST(UUIDTest, GetOne) {
+  LOG(INFO) << UUID::Random().string_view();
+
+  EXPECT_NE(UUID::Random(), UUID::Random());
+}
+
+}  // namespace testing
+}  // namespace aos