Add encoding profiling to aos logger

The diagnostic data logger compression uses a significant amount
of CPU. To help profile the system, this change will record the
encode time for each message.

When profiling is enabled (through constructor argument) the encode
times will be collected in a log along with the event loop
monotonic clock time stamps.

Add a python script to create a graph of the encode_times from
the logger.

Change-Id: If1cc19fbffe0ff31f63e5789f610c4ca40a9d47a
Signed-off-by: James Kuszmaul <james.kuszmaul@bluerivertech.com>
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 3b5f57a..5d8c1b0 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -260,6 +260,7 @@
         "//aos:configuration_fbs",
         "//aos:flatbuffers",
         "//aos/containers:resizeable_buffer",
+        "//aos/time",
         "@com_github_google_flatbuffers//:flatbuffers",
         "@com_github_google_glog//:glog",
         "@com_google_absl//absl/types:span",
@@ -434,6 +435,7 @@
         "//aos/events:event_loop",
         "//aos/events:simulated_event_loop",
         "//aos/network:message_bridge_server_fbs",
+        "@com_google_absl//absl/strings",
     ],
 )
 
@@ -1023,3 +1025,33 @@
         "@com_github_google_glog//:glog",
     ],
 )
+
+py_binary(
+    name = "plot_logger_profile",
+    srcs = [
+        "plot_logger_profile.py",
+    ],
+    target_compatible_with = [
+        # TODO(PRO-24640): Remove compatibility.
+        "@platforms//cpu:x86_64",
+    ],
+    visibility = ["//visibility:public"],
+    deps = [
+        "@pip//bokeh",
+        "@pip//numpy",
+        "@pip//tabulate",
+    ],
+)
+
+py_test(
+    name = "plot_logger_profile_test",
+    srcs = [
+        "plot_logger_profile_test.py",
+    ],
+    target_compatible_with = ["@platforms//os:linux"],
+    visibility = ["//visibility:public"],
+    deps = [
+        ":plot_logger_profile",
+        "@pip//numpy",
+    ],
+)
diff --git a/aos/events/logging/buffer_encoder.cc b/aos/events/logging/buffer_encoder.cc
index 6aba79a..b352d5d 100644
--- a/aos/events/logging/buffer_encoder.cc
+++ b/aos/events/logging/buffer_encoder.cc
@@ -22,7 +22,8 @@
 
 bool DummyEncoder::HasSpace(size_t request) const { return request <= space(); }
 
-size_t DummyEncoder::Encode(Copier *copy, size_t start_byte) {
+size_t DummyEncoder::Encode(Copier *copy, size_t start_byte,
+                            std::chrono::nanoseconds * /*encode_duration*/) {
   const size_t input_buffer_initial_size = input_buffer_.size();
 
   size_t expected_write_size =
diff --git a/aos/events/logging/buffer_encoder.h b/aos/events/logging/buffer_encoder.h
index a155bb5..db09d32 100644
--- a/aos/events/logging/buffer_encoder.h
+++ b/aos/events/logging/buffer_encoder.h
@@ -7,6 +7,7 @@
 
 #include "aos/containers/resizeable_buffer.h"
 #include "aos/events/logging/logger_generated.h"
+#include "aos/time/time.h"
 
 namespace aos::logger {
 
@@ -65,14 +66,19 @@
 
   // Encodes and enqueues the given data encoder.  Starts at the start byte
   // (which must be a multiple of 8 bytes), and goes as far as it can.  Returns
-  // the amount encoded.
-  virtual size_t Encode(Copier *copy, size_t start_byte) = 0;
+  // the amount encoded. The `encode_duration` is optional, when provided it
+  // will be set to the amount of time spent by the encoder during this call.
+  virtual size_t Encode(
+      Copier *copy, size_t start_byte,
+      std::chrono::nanoseconds *encode_duration = nullptr) = 0;
 
   // Finalizes the encoding process. After this, queue_size() represents the
   // full extent of data which will be written to this file.
-  //
-  // Encode may not be called after this method.
-  virtual void Finish() = 0;
+  // This function may invoke the encoder to encode any remaining data remaining
+  // in the queue. The `encode_duration` is optional, when provided it will be
+  // set to the amount of time spent by the encoder during this call. Do not
+  // call Encode after calling this method.
+  virtual void Finish(std::chrono::nanoseconds *encode_duration = nullptr) = 0;
 
   // Clears the first n encoded buffers from the queue.
   virtual void Clear(int n) = 0;
@@ -105,8 +111,11 @@
 
   bool HasSpace(size_t request) const final;
   size_t space() const final;
-  size_t Encode(Copier *copy, size_t start_byte) final;
-  void Finish() final {}
+
+  // See base class for commments.
+  size_t Encode(Copier *copy, size_t start_byte,
+                std::chrono::nanoseconds *encode_duration = nullptr) final;
+  void Finish(std::chrono::nanoseconds * /*encode_duration*/ = nullptr) final {}
   void Clear(int n) final;
   absl::Span<const absl::Span<const uint8_t>> queue() final;
   size_t queued_bytes() const final;
diff --git a/aos/events/logging/log_backend.h b/aos/events/logging/log_backend.h
index 91f048c..4eca13f 100644
--- a/aos/events/logging/log_backend.h
+++ b/aos/events/logging/log_backend.h
@@ -30,6 +30,12 @@
   std::chrono::nanoseconds total_write_time() const {
     return total_write_time_;
   }
+
+  // The total time spent encoding.
+  std::chrono::nanoseconds total_encode_duration() const {
+    return total_encode_duration_;
+  }
+
   // The total number of writes which have been performed.
   int total_write_count() const { return total_write_count_; }
   // The total number of messages which have been written.
@@ -45,9 +51,10 @@
     total_write_count_ = 0;
     total_write_messages_ = 0;
     total_write_bytes_ = 0;
+    total_encode_duration_ = std::chrono::nanoseconds::zero();
   }
 
-  void UpdateStats(aos::monotonic_clock::duration duration, ssize_t written,
+  void UpdateStats(std::chrono::nanoseconds duration, ssize_t written,
                    int iovec_size) {
     if (duration > max_write_time_) {
       max_write_time_ = duration;
@@ -60,11 +67,20 @@
     total_write_bytes_ += written;
   }
 
+  // Update our total_encode_duration_ stat. This needs to be a separate
+  // function from UpdateStats because it's called from a different level in the
+  // stack.
+  void UpdateEncodeDuration(std::chrono::nanoseconds duration) {
+    total_encode_duration_ += duration;
+  }
+
  private:
   std::chrono::nanoseconds max_write_time_ = std::chrono::nanoseconds::zero();
   int max_write_time_bytes_ = -1;
   int max_write_time_messages_ = -1;
   std::chrono::nanoseconds total_write_time_ = std::chrono::nanoseconds::zero();
+  std::chrono::nanoseconds total_encode_duration_ =
+      std::chrono::nanoseconds::zero();
   int total_write_count_ = 0;
   int total_write_messages_ = 0;
   int total_write_bytes_ = 0;
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index 3d52070..6c88d89 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -231,13 +231,13 @@
   }
 }
 
-void NewDataWriter::CopyDataMessage(
+std::chrono::nanoseconds NewDataWriter::CopyDataMessage(
     DataEncoder::Copier *coppier, const UUID &source_node_boot_uuid,
     aos::monotonic_clock::time_point now,
     aos::monotonic_clock::time_point message_time) {
   CHECK(allowed_data_types_[static_cast<size_t>(StoredDataType::DATA)])
       << ": Tried to write data on non-data writer.";
-  CopyMessage(coppier, source_node_boot_uuid, now, message_time);
+  return CopyMessage(coppier, source_node_boot_uuid, now, message_time);
 }
 
 void NewDataWriter::CopyTimestampMessage(
@@ -259,10 +259,10 @@
   CopyMessage(coppier, source_node_boot_uuid, now, message_time);
 }
 
-void NewDataWriter::CopyMessage(DataEncoder::Copier *coppier,
-                                const UUID &source_node_boot_uuid,
-                                aos::monotonic_clock::time_point now,
-                                aos::monotonic_clock::time_point message_time) {
+std::chrono::nanoseconds NewDataWriter::CopyMessage(
+    DataEncoder::Copier *coppier, const UUID &source_node_boot_uuid,
+    aos::monotonic_clock::time_point now,
+    aos::monotonic_clock::time_point message_time) {
   // Trigger a reboot if we detect the boot UUID change.
   UpdateBoot(source_node_boot_uuid);
 
@@ -340,7 +340,8 @@
   CHECK(header_written_) << ": Attempting to write message before header to "
                          << writer->name();
   CHECK_LE(coppier->size(), max_message_size_);
-  writer->CopyMessage(coppier, now);
+  std::chrono::nanoseconds encode_duration = writer->CopyMessage(coppier, now);
+  return encode_duration;
 }
 
 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader>
diff --git a/aos/events/logging/log_namer.h b/aos/events/logging/log_namer.h
index a0d5857..d7c97ae 100644
--- a/aos/events/logging/log_namer.h
+++ b/aos/events/logging/log_namer.h
@@ -76,11 +76,15 @@
                     monotonic_clock::time_point monotonic_timestamp_time =
                         monotonic_clock::min_time);
 
-  // Coppies a message with the provided boot UUID.
-  void CopyDataMessage(DataEncoder::Copier *copier,
-                       const UUID &source_node_boot_uuid,
-                       aos::monotonic_clock::time_point now,
-                       aos::monotonic_clock::time_point message_time);
+  // Copies a message with the provided boot UUID.
+  // Similar to CopyMessage, but also checks that StoredDataType::DATA is
+  // allowed on this writer. Returns the duration of time spent on encoding the
+  // message.
+  std::chrono::nanoseconds CopyDataMessage(
+      DataEncoder::Copier *copier, const UUID &source_node_boot_uuid,
+      aos::monotonic_clock::time_point now,
+      aos::monotonic_clock::time_point message_time);
+
   void CopyTimestampMessage(DataEncoder::Copier *copier,
                             const UUID &source_node_boot_uuid,
                             aos::monotonic_clock::time_point now,
@@ -165,10 +169,12 @@
   // Signals that a node has rebooted.
   void Reboot(const UUID &source_node_boot_uuid);
 
-  void CopyMessage(DataEncoder::Copier *copier,
-                   const UUID &source_node_boot_uuid,
-                   aos::monotonic_clock::time_point now,
-                   aos::monotonic_clock::time_point message_time);
+  // Copies a message with the provided boot UUID.
+  // Returns the duration of time spent on encoding the message.
+  std::chrono::nanoseconds CopyMessage(
+      DataEncoder::Copier *copier, const UUID &source_node_boot_uuid,
+      aos::monotonic_clock::time_point now,
+      aos::monotonic_clock::time_point message_time);
 
   void QueueHeader(
       aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> &&header);
@@ -507,6 +513,16 @@
         });
   }
 
+  std::chrono::nanoseconds total_encode_duration() const {
+    return accumulate_data_writers(
+        total_encode_duration_,
+        [](std::chrono::nanoseconds x, const NewDataWriter &data_writer) {
+          CHECK_NOTNULL(data_writer.writer);
+          return x +
+                 data_writer.writer->WriteStatistics()->total_encode_duration();
+        });
+  }
+
   void ResetStatistics();
 
  protected:
@@ -575,6 +591,8 @@
   int max_write_time_bytes_ = -1;
   int max_write_time_messages_ = -1;
   std::chrono::nanoseconds total_write_time_ = std::chrono::nanoseconds::zero();
+  std::chrono::nanoseconds total_encode_duration_ =
+      std::chrono::nanoseconds::zero();
   int total_write_count_ = 0;
   int total_write_messages_ = 0;
   int total_write_bytes_ = 0;
diff --git a/aos/events/logging/log_writer.cc b/aos/events/logging/log_writer.cc
index c960ab9..ae43851 100644
--- a/aos/events/logging/log_writer.cc
+++ b/aos/events/logging/log_writer.cc
@@ -6,6 +6,9 @@
 #include <map>
 #include <vector>
 
+#include "absl/strings/ascii.h"  // for AsciiStrToLower
+#include "absl/strings/str_cat.h"
+
 #include "aos/configuration.h"
 #include "aos/events/event_loop.h"
 #include "aos/network/message_bridge_server_generated.h"
@@ -713,23 +716,29 @@
             ? f.fetcher->context().source_boot_uuid
             : event_loop_->boot_uuid();
     // Write!
-    const auto start = event_loop_->monotonic_now();
+    const monotonic_clock::time_point start_time = event_loop_->monotonic_now();
 
     ContextDataCopier coppier(f.fetcher->context(), f.logged_channel_index,
                               f.log_type, event_loop_);
 
-    aos::monotonic_clock::time_point message_time =
+    const aos::monotonic_clock::time_point message_time =
         static_cast<int>(node_index_) != f.data_node_index
             ? f.fetcher->context().monotonic_remote_time
             : f.fetcher->context().monotonic_event_time;
-    writer->CopyDataMessage(&coppier, source_node_boot_uuid, start,
-                            message_time);
-    RecordCreateMessageTime(start, coppier.end_time(), f);
+    const std::chrono::nanoseconds encode_duration = writer->CopyDataMessage(
+        &coppier, source_node_boot_uuid, start_time, message_time);
+    RecordCreateMessageTime(start_time, coppier.end_time(), f);
+
+    const Channel *channel = f.fetcher->channel();
 
     VLOG(2) << "Wrote data as node " << FlatbufferToJson(node_)
-            << " for channel "
-            << configuration::CleanedChannelToString(f.fetcher->channel())
+            << " for channel " << configuration::CleanedChannelToString(channel)
             << " to " << writer->name();
+
+    if (profiling_info_.has_value()) {
+      profiling_info_->WriteProfileData(message_time, start_time,
+                                        encode_duration, *channel);
+    }
   }
 }
 
@@ -938,4 +947,66 @@
   }
 }
 
+void Logger::SetProfilingPath(
+    const std::optional<std::filesystem::path> &path) {
+  if (path.has_value()) {
+    profiling_info_.emplace(path.value());
+
+  } else {
+    profiling_info_.reset();
+  }
+}
+
+void ProfileDataWriter::WriteProfileData(
+    const aos::monotonic_clock::time_point message_time,
+    const aos::monotonic_clock::time_point encoding_start_time,
+    const std::chrono::nanoseconds encode_duration, const Channel &channel) {
+  const int64_t encode_duration_ns =
+      std::chrono::duration_cast<std::chrono::nanoseconds>(encode_duration)
+          .count();
+  const int64_t encoding_start_time_ns =
+      encoding_start_time.time_since_epoch().count();
+  const std::string message_time_s =
+      std::to_string(std::chrono::duration_cast<std::chrono::duration<double>>(
+                         message_time.time_since_epoch())
+                         .count());
+
+  const std::string log_entry =
+      absl::StrCat(channel.name()->string_view(), ",",  // channel name
+                   channel.type()->string_view(), ",",  // channel type
+                   encode_duration_ns, ",",             // encode duration
+                   encoding_start_time_ns, ",",         // encoding start time
+                   message_time_s, "\n"                 // message time
+      );
+  stream_ << log_entry;
+}
+
+ProfileDataWriter::ProfileDataWriter(const std::filesystem::path &path) {
+  CHECK(!path.empty());
+
+  const std::string extension = path.extension().string();
+  const std::string lower_case_extension = absl::AsciiStrToLower(extension);
+
+  // Warn if the path is not a csv file.
+  if (std::filesystem::is_directory(path)) {
+    LOG(WARNING) << "Path for logger profiling output file should be a csv "
+                    "file, not a directory. Received path: "
+                 << path << ".";
+  } else if (lower_case_extension != ".csv") {
+    LOG(WARNING) << "The extension for logger profiling output file should be "
+                    "'.csv'. Received path: "
+                 << extension << ".";
+  }
+
+  stream_.open(path, std::ios::out);
+  CHECK(stream_.is_open()) << ": Failed to open " << path;
+
+  // Write the header that describes the file content and the column names.
+  stream_
+      << "# This file is in csv format and contains profiling data for each "
+         "channel. The column names are: channel_name, channel_type, "
+         "encode_duration_ns, encoding_start_time_ns, message_time_s"
+      << std::endl;
+}
+
 }  // namespace aos::logger
diff --git a/aos/events/logging/log_writer.h b/aos/events/logging/log_writer.h
index 6091d7e..486ed8f 100644
--- a/aos/events/logging/log_writer.h
+++ b/aos/events/logging/log_writer.h
@@ -2,6 +2,7 @@
 #define AOS_EVENTS_LOGGING_LOG_WRITER_H_
 
 #include <chrono>
+#include <fstream>
 #include <string_view>
 #include <vector>
 
@@ -24,6 +25,31 @@
 aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> PackConfiguration(
     const Configuration *const configuration);
 
+// A class to manage the writing of profile data. It will open a file during
+// construction and close it when it goes out of scope.
+class ProfileDataWriter {
+ public:
+  // A constructor to open the stream.
+  ProfileDataWriter(const std::filesystem::path &csv_path);
+
+  // Write the profile data to the file as a csv line.
+  void WriteProfileData(
+      const aos::monotonic_clock::time_point message_time,
+      const aos::monotonic_clock::time_point encoding_start_time,
+      const std::chrono::nanoseconds encode_duration, const Channel &channel);
+
+  // A destructor to close the stream if it's open.
+  ~ProfileDataWriter() {
+    if (stream_.is_open()) {
+      stream_.close();
+    }
+  }
+
+ private:
+  // The stream to write profiling data to.
+  std::ofstream stream_;
+};
+
 // Logs all channels available in the event loop to disk every 100 ms.
 // Start by logging one message per channel to capture any state and
 // configuration that is sent rately on a channel and would affect execution.
@@ -88,6 +114,9 @@
   }
   std::chrono::nanoseconds polling_period() const { return polling_period_; }
 
+  // Sets the path to write profiling data to. nullopt will disable profiling.
+  void SetProfilingPath(const std::optional<std::filesystem::path> &path);
+
   std::optional<UUID> log_start_uuid() const { return log_start_uuid_; }
   UUID logger_instance_uuid() const { return logger_instance_uuid_; }
 
@@ -304,6 +333,12 @@
                                aos::monotonic_clock::time_point end,
                                const FetcherStruct &fetcher);
 
+  // Write an entry to the profile file.
+  void RecordProfileData(aos::monotonic_clock::time_point message_time,
+                         aos::monotonic_clock::time_point encoding_start_time,
+                         std::chrono::nanoseconds encode_duration,
+                         const Channel &channel);
+
   EventLoop *const event_loop_;
   // The configuration to place at the top of the log file.
   const Configuration *const configuration_;
@@ -384,6 +419,9 @@
 
   // Amount of time to run the logger behind now.
   std::chrono::nanoseconds logging_delay_ = std::chrono::nanoseconds(0);
+
+  // Profiling info
+  std::optional<ProfileDataWriter> profiling_info_;
 };
 
 }  // namespace aos::logger
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 0676577..c01026f 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -172,23 +172,30 @@
   return *this;
 }
 
-void DetachedBufferWriter::CopyMessage(DataEncoder::Copier *copier,
-                                       aos::monotonic_clock::time_point now) {
+std::chrono::nanoseconds DetachedBufferWriter::CopyMessage(
+    DataEncoder::Copier *copier, aos::monotonic_clock::time_point now) {
   if (ran_out_of_space_) {
     // We don't want any later data to be written after space becomes
     // available, so refuse to write anything more once we've dropped data
     // because we ran out of space.
-    return;
+    return std::chrono::nanoseconds::zero();
   }
 
   const size_t message_size = copier->size();
   size_t overall_bytes_written = 0;
 
+  std::chrono::nanoseconds total_encode_duration =
+      std::chrono::nanoseconds::zero();
+
   // Keep writing chunks until we've written it all.  If we end up with a
   // partial write, this means we need to flush to disk.
   do {
+    // Initialize encode_duration for the case that the encoder cannot measure
+    // encode duration for a single message.
+    std::chrono::nanoseconds encode_duration = std::chrono::nanoseconds::zero();
     const size_t bytes_written =
-        encoder_->Encode(copier, overall_bytes_written);
+        encoder_->Encode(copier, overall_bytes_written, &encode_duration);
+
     CHECK(bytes_written != 0);
 
     overall_bytes_written += bytes_written;
@@ -197,16 +204,24 @@
               << message_size << " wrote " << overall_bytes_written;
       Flush(now);
     }
+    total_encode_duration += encode_duration;
   } while (overall_bytes_written < message_size);
 
+  WriteStatistics()->UpdateEncodeDuration(total_encode_duration);
+
   FlushAtThreshold(now);
+  return total_encode_duration;
 }
 
 void DetachedBufferWriter::Close() {
   if (!log_sink_->is_open()) {
     return;
   }
-  encoder_->Finish();
+  // Initialize encode_duration for the case that the encoder cannot measure
+  // encode duration for a single message.
+  std::chrono::nanoseconds encode_duration = std::chrono::nanoseconds::zero();
+  encoder_->Finish(&encode_duration);
+  WriteStats().UpdateEncodeDuration(encode_duration);
   while (encoder_->queue_size() > 0) {
     Flush(monotonic_clock::max_time);
   }
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index d781a20..f9a62b1 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -73,8 +73,9 @@
   // Triggers a flush if there's enough data queued up.
   //
   // Steals the detached buffer from it.
-  void CopyMessage(DataEncoder::Copier *coppier,
-                   aos::monotonic_clock::time_point now);
+  // Returns the duration of time spent on encoding the message.
+  std::chrono::nanoseconds CopyMessage(DataEncoder::Copier *copier,
+                                       aos::monotonic_clock::time_point now);
 
   // Indicates we got ENOSPC when trying to write. After this returns true, no
   // further data is written.
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 7799c57..ea5071e 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -571,4 +571,129 @@
   EXPECT_EQ(replay_count, sent_messages);
 }
 
+// Helper function to verify the contents of the profiling data file.
+void VerifyProfilingData(const std::filesystem::path &profiling_path) {
+  std::ifstream file(profiling_path);
+  ASSERT_TRUE(file.is_open()) << "Failed to open profiling data file.";
+
+  std::string line;
+
+  // Verify that the header is a comment starting with '#'.
+  std::getline(file, line);
+  ASSERT_THAT(line, ::testing::StartsWith("#"));
+
+  // Now, verify the contents of each line.
+  int record_count = 0;
+
+  // Track the total encoding duration.
+  uint64_t total_encoding_duration_ns = 0;
+
+  while (std::getline(file, line)) {
+    std::stringstream line_stream(line);
+    std::string cell;
+
+    // Extract each cell from the CSV line.
+    std::vector<std::string> cells;
+    while (std::getline(line_stream, cell, ',')) {
+      cells.push_back(cell);
+    }
+
+    // Expecting 5 fields: channel_name, channel_type, encode_duration_ns,
+    // encoding_start_time_ns, message_time_s.
+    ASSERT_EQ(cells.size(), 5) << "Incorrect number of fields in the CSV line.";
+
+    // Channel name and type are strings and just need to not be empty.
+    EXPECT_FALSE(cells[0].empty()) << "Channel name is empty.";
+    EXPECT_FALSE(cells[1].empty()) << "Channel type is empty.";
+
+    // Encode duration, encoding start time should be positive numbers.
+    const int64_t encode_duration_ns = std::stoll(cells[2]);
+    const int64_t encoding_start_time_ns = std::stoll(cells[3]);
+
+    ASSERT_GT(encode_duration_ns, 0)
+        << "Encode duration is not positive. Line: " << line;
+    ASSERT_GT(encoding_start_time_ns, 0)
+        << "Encoding start time is not positive. Line: " << line;
+
+    // Message time should be non-negative.
+    const double message_time_s = std::stod(cells[4]);
+    EXPECT_GE(message_time_s, 0) << "Message time is negative";
+    ++record_count;
+    total_encoding_duration_ns += encode_duration_ns;
+  }
+
+  EXPECT_GT(record_count, 0) << "Profiling data file is empty.";
+  LOG(INFO) << "Total encoding duration: " << total_encoding_duration_ns;
+}
+
+// Tests logging many messages with LZMA compression.
+TEST_F(LoggerTest, ManyMessagesLzmaWithProfiling) {
+  const std::string tmpdir = aos::testing::TestTmpDir();
+  const std::string base_name = tmpdir + "/lzma_logfile";
+  const std::string config_sha256 =
+      absl::StrCat(base_name, kSingleConfigSha256, ".bfbs");
+  const std::string logfile = absl::StrCat(base_name, ".part0.xz");
+  const std::string profiling_path =
+      absl::StrCat(tmpdir, "/encoding_profile.csv");
+
+  // Clean up any previous test artifacts.
+  unlink(config_sha256.c_str());
+  unlink(logfile.c_str());
+
+  LOG(INFO) << "Logging data to " << logfile;
+  ping_.set_quiet(true);
+
+  {
+    std::unique_ptr<aos::EventLoop> logger_event_loop =
+        event_loop_factory_.MakeEventLoop("logger");
+
+    std::unique_ptr<aos::EventLoop> ping_spammer_event_loop =
+        event_loop_factory_.MakeEventLoop("ping_spammer");
+    aos::Sender<examples::Ping> ping_sender =
+        ping_spammer_event_loop->MakeSender<examples::Ping>("/test");
+
+    aos::TimerHandler *timer_handler =
+        ping_spammer_event_loop->AddTimer([&ping_sender]() {
+          aos::Sender<examples::Ping>::Builder builder =
+              ping_sender.MakeBuilder();
+          examples::Ping::Builder ping_builder =
+              builder.MakeBuilder<examples::Ping>();
+          CHECK_EQ(builder.Send(ping_builder.Finish()),
+                   aos::RawSender::Error::kOk);
+        });
+
+    // Send a message every 50 microseconds to simulate high throughput.
+    ping_spammer_event_loop->OnRun([&ping_spammer_event_loop, timer_handler]() {
+      timer_handler->Schedule(ping_spammer_event_loop->monotonic_now(),
+                              std::chrono::microseconds(50));
+    });
+
+    aos::logger::Logger logger(logger_event_loop.get());
+    logger.set_separate_config(false);
+    logger.set_polling_period(std::chrono::milliseconds(100));
+
+    // Enable logger profiling.
+    logger.SetProfilingPath(profiling_path);
+
+    std::unique_ptr<aos::logger::MultiNodeFilesLogNamer> log_namer =
+        std::make_unique<aos::logger::MultiNodeFilesLogNamer>(
+            base_name, logger_event_loop->configuration(),
+            logger_event_loop.get(), logger_event_loop->node());
+#ifdef LZMA
+    // Set up LZMA encoder.
+    log_namer->set_encoder_factory([](size_t max_message_size) {
+      return std::make_unique<aos::logger::LzmaEncoder>(max_message_size, 1);
+    });
+#endif
+
+    logger.StartLogging(std::move(log_namer));
+
+    event_loop_factory_.RunFor(std::chrono::seconds(1));
+  }
+
+#ifdef LZMA
+  VerifyProfilingData(profiling_path);
+#endif
+}
+
 }  // namespace aos::logger::testing
diff --git a/aos/events/logging/lzma_encoder.cc b/aos/events/logging/lzma_encoder.cc
index 27d01ab..a757560 100644
--- a/aos/events/logging/lzma_encoder.cc
+++ b/aos/events/logging/lzma_encoder.cc
@@ -95,7 +95,8 @@
 
 LzmaEncoder::~LzmaEncoder() { lzma_end(&stream_); }
 
-size_t LzmaEncoder::Encode(Copier *copy, size_t start_byte) {
+size_t LzmaEncoder::Encode(Copier *copy, size_t start_byte,
+                           std::chrono::nanoseconds *encode_duration) {
   const size_t copy_size = copy->size();
   // LZMA compresses the data as it goes along, copying the compressed results
   // into another buffer.  So, there's no need to store more than one message
@@ -107,13 +108,14 @@
 
   stream_.next_in = input_buffer_.data();
   stream_.avail_in = copy_size;
-
-  RunLzmaCode(LZMA_RUN);
+  RunLzmaCode(LZMA_RUN, encode_duration);
 
   return copy_size - start_byte;
 }
 
-void LzmaEncoder::Finish() { RunLzmaCode(LZMA_FINISH); }
+void LzmaEncoder::Finish(std::chrono::nanoseconds *encode_duration) {
+  RunLzmaCode(LZMA_FINISH, encode_duration);
+}
 
 void LzmaEncoder::Clear(const int n) {
   CHECK_GE(n, 0);
@@ -154,7 +156,8 @@
   return bytes;
 }
 
-void LzmaEncoder::RunLzmaCode(lzma_action action) {
+void LzmaEncoder::RunLzmaCode(lzma_action action,
+                              std::chrono::nanoseconds *encode_duration) {
   CHECK(!finished_);
 
   // This is to keep track of how many bytes resulted from encoding this input
@@ -181,8 +184,20 @@
       last_avail_out = stream_.avail_out;
     }
 
-    // Encode the data.
-    lzma_ret status = lzma_code(&stream_, action);
+    // Declare status, which will be populated by lzma_code.
+    lzma_ret status;
+
+    if (encode_duration == nullptr) {
+      // Perform lzma_code without measuring the time.
+      status = lzma_code(&stream_, action);
+    } else {
+      // Measure the encoding time of lzma_code
+      const monotonic_clock::time_point start_time =
+          aos::monotonic_clock::now();
+      status = lzma_code(&stream_, action);
+      *encode_duration = aos::monotonic_clock::now() - start_time;
+    }
+
     CHECK(LzmaCodeIsOk(status));
     if (action == LZMA_FINISH) {
       if (status == LZMA_STREAM_END) {
diff --git a/aos/events/logging/lzma_encoder.h b/aos/events/logging/lzma_encoder.h
index 93508ca..d0d9280 100644
--- a/aos/events/logging/lzma_encoder.h
+++ b/aos/events/logging/lzma_encoder.h
@@ -39,18 +39,23 @@
     return true;
   }
   size_t space() const final { return input_buffer_.capacity(); }
-  size_t Encode(Copier *copy, size_t start_byte) final;
-  void Finish() final;
+
+  // See base class for commments.
+  size_t Encode(Copier *copy, size_t start_byte,
+                std::chrono::nanoseconds *encode_duration = nullptr) final;
+  void Finish(std::chrono::nanoseconds *encode_duration = nullptr) final;
   void Clear(int n) final;
   absl::Span<const absl::Span<const uint8_t>> queue() final;
   size_t queued_bytes() const final;
   size_t total_bytes() const final { return total_bytes_; }
+
   size_t queue_size() const final { return queue_.size(); }
 
  private:
   static constexpr size_t kEncodedBufferSizeBytes{1024 * 128};
 
-  void RunLzmaCode(lzma_action action);
+  void RunLzmaCode(lzma_action action,
+                   std::chrono::nanoseconds *encode_duration);
 
   lzma_stream stream_;
   uint32_t compression_preset_;
@@ -65,6 +70,9 @@
   // Reset.
   size_t total_bytes_ = 0;
 
+  std::chrono::nanoseconds total_encode_duration_ =
+      std::chrono::nanoseconds::zero();
+
   // Buffer that messages get coppied into for encoding.
   ResizeableBuffer input_buffer_;
 
diff --git a/aos/events/logging/plot_logger_profile.py b/aos/events/logging/plot_logger_profile.py
new file mode 100644
index 0000000..125b308
--- /dev/null
+++ b/aos/events/logging/plot_logger_profile.py
@@ -0,0 +1,287 @@
+# Parse log profiling data and produce a graph showing encode times in time bins,
+# with a breakdown per message type.
+
+import argparse
+import csv
+import math
+import os
+import webbrowser
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+import numpy as np
+from bokeh.io import output_notebook
+from bokeh.models import ColumnDataSource, HoverTool, Legend, LegendItem
+from bokeh.palettes import Category20, Viridis256
+from bokeh.plotting import figure, show, output_file
+from tabulate import tabulate
+
+
+@dataclass
+class SeriesDetail:
+    event_loop_times_s: List[float]
+    encode_times_ms: List[float]
+
+
+def parse_csv_file(filepath: Path, max_lines: Optional[int],
+                   start_time: Optional[float],
+                   end_time: Optional[float]) -> Dict[str, SeriesDetail]:
+    """Parses the CSV file to extract needed data, respecting the maximum number of lines if provided."""
+    data_by_type: Dict[str, SeriesDetail] = {}
+
+    with open(filepath, 'r') as file:
+        reader = csv.reader(file)
+        next(reader)  # Skip the header line
+
+        line_count = 0
+        for line in reader:
+            if max_lines is not None and line_count >= max_lines:
+                break
+
+            line_count += 1
+
+            assert len(line) > 0
+
+            # Note that channel_name and encoding_start_time_ns are not yet used.
+            # They may be used here in the future, but for now they are helpful when
+            # directly looking at the csv file.
+            channel_name, channel_type, encode_duration_ns, encoding_start_time_ns, message_time_s = line
+
+            # Convert nanoseconds to milliseconds
+            encode_duration_ms = float(encode_duration_ns) * 1e-6
+            message_time_s = float(message_time_s)
+
+            if (start_time is not None and message_time_s < start_time):
+                continue
+            if (end_time is not None and message_time_s > end_time):
+                continue
+
+            if channel_type in data_by_type:
+                data_by_type[channel_type].encode_times_ms.append(
+                    encode_duration_ms)
+                data_by_type[channel_type].event_loop_times_s.append(
+                    message_time_s)
+            else:
+                data_by_type[channel_type] = SeriesDetail(
+                    encode_times_ms=[encode_duration_ms],
+                    event_loop_times_s=[message_time_s])
+
+    return data_by_type
+
+
+@dataclass
+class DataBin:
+    bin_range: str
+    message_encode_times: Dict[str, float]
+
+
+@dataclass
+class BinnedData:
+    bins: List[DataBin] = field(default_factory=list)
+    top_type_names: List[str] = field(default_factory=list)
+
+
+def create_binned_data(data_by_type: Dict[str, SeriesDetail],
+                       num_bins: int = 25,
+                       top_n: int = 5) -> BinnedData:
+    # Calculate total encoding times for each message type across the entire file.
+    total_encode_times: Dict[str, float] = {
+        message_type: sum(detail.encode_times_ms)
+        for message_type, detail in data_by_type.items()
+    }
+
+    # Determine the top N message types based on total encoding times.
+    top_types: List[Tuple[str, float]] = sorted(total_encode_times.items(),
+                                                key=lambda item: item[1],
+                                                reverse=True)[:top_n]
+    print(f"{top_types=}")
+    top_type_names: List[str] = [type_name for type_name, _ in top_types]
+
+    # Find the global minimum and maximum times to establish bin edges.
+    min_time: float = min(detail.event_loop_times_s[0]
+                          for detail in data_by_type.values())
+    max_time: float = max(detail.event_loop_times_s[-1]
+                          for detail in data_by_type.values())
+
+    # Create bins.
+    bins = np.linspace(min_time, max_time, num_bins + 1)
+
+    # Initialize the list of DataBin instances with the correct number of bins.
+    binned_data = BinnedData(top_type_names=top_type_names)
+    for i in range(num_bins):
+        bin_range = f"{bins[i]:.2f} - {bins[i+1]:.2f}"
+        binned_data.bins.append(
+            DataBin(bin_range=bin_range,
+                    message_encode_times={
+                        name: 0
+                        for name in top_type_names + ['other']
+                    }))
+
+    # Populate binned_data with message encode times.
+    for message_type, details in data_by_type.items():
+        binned_indices = np.digitize(details.event_loop_times_s, bins) - 1
+        # Correcting the bin indices that are out of range by being exactly on the maximum edge.
+        binned_indices = np.minimum(binned_indices, num_bins - 1)
+
+        for idx, encode_time in enumerate(details.encode_times_ms):
+            bin_index = binned_indices[idx]
+            current_bin = binned_data.bins[bin_index]
+            if message_type in top_type_names:
+                current_bin.message_encode_times[message_type] += encode_time
+            else:
+                current_bin.message_encode_times['other'] += encode_time
+
+    return binned_data
+
+
+def print_binned_data(binned_data: BinnedData) -> None:
+    # Extend headers for the table by replacing '.' with '\n.' for each message type name and
+    # adding 'Total'.
+    headers = ['Bin Range'] + [
+        key.replace('.', '\n.')
+        for key in binned_data.top_type_names + ['other']
+    ] + ['Total']
+
+    # Initialize the table data list.
+    table_data = []
+
+    # Populate the table data with the values from each DataBin instance and calculate totals.
+    for data_bin in binned_data.bins:
+        # Initialize a row with the bin range.
+        row = [data_bin.bin_range]
+        # Add the total message encode times for each message type.
+        encode_times = [
+            data_bin.message_encode_times[message_type]
+            for message_type in binned_data.top_type_names
+        ]
+        other_time = data_bin.message_encode_times['other']
+        row += encode_times + [other_time]
+        # Calculate the total encode time for the row and append it.
+        total_encode_time = sum(encode_times) + other_time
+        row.append(total_encode_time)
+        # Append the row to the table data.
+        table_data.append(row)
+
+    # Print the table using tabulate with 'grid' format for better structure.
+    print(
+        tabulate(table_data, headers=headers, tablefmt='grid', floatfmt=".2f"))
+
+
+def plot_bar(binned_data: BinnedData) -> None:
+    filename = "plot.html"
+    output_file(filename, title="Message Encode Time Plot Stacked Bar Graph")
+
+    # Adjust width based on bin count for readability.
+    plot_width = max(1200, 50 * len(binned_data.bins))
+
+    p = figure(x_range=[bin.bin_range for bin in binned_data.bins],
+               title='Message Encode Time by Type over Event Loop Time',
+               x_axis_label='Event Loop Time Bins',
+               y_axis_label='Total Message Encode Time (ms)',
+               width=plot_width,
+               height=600,
+               tools="")
+
+    source_data = {'bin_edges': [bin.bin_range for bin in binned_data.bins]}
+    for message_type in binned_data.top_type_names + ['other']:
+        source_data[message_type] = [
+            bin.message_encode_times.get(message_type, 0)
+            for bin in binned_data.bins
+        ]
+
+    source = ColumnDataSource(data=source_data)
+
+    # Calculate totals and sort in descending order.
+    totals = {
+        message_type: sum(source_data[message_type])
+        for message_type in source_data if message_type != 'bin_edges'
+    }
+    sorted_message_types = sorted(totals, key=totals.get, reverse=True)
+
+    # Reverse the list to place larger segments at the top.
+    sorted_message_types.reverse()
+
+    num_types = len(sorted_message_types)
+    if num_types > 20:
+        raise ValueError(
+            f"Number of types ({num_types}) exceeds the number of available colors in Category20."
+        )
+    colors = Category20[20][:num_types]
+
+    # Apply reversed order to stack rendering.
+    renderers = p.vbar_stack(sorted_message_types,
+                             x='bin_edges',
+                             width=0.7,
+                             color=colors,
+                             source=source)
+
+    # Change the orientation of the x-axis labels to a 45-degree angle.
+    p.xaxis.major_label_orientation = math.pi / 4
+
+    # Create a custom legend, maintaining the reversed order for visual consistency.
+    legend_items = [
+        LegendItem(label=mt.replace('.', ' '), renderers=[renderers[i]])
+        for i, mt in enumerate(sorted_message_types)
+    ]
+    legend = Legend(items=legend_items, location=(0, -30))
+    p.add_layout(legend, 'right')
+
+    p.y_range.start = 0
+    p.x_range.range_padding = 0.05
+    p.xgrid.grid_line_color = None
+    p.axis.minor_tick_line_color = None
+    p.outline_line_color = None
+
+    file_path = os.path.realpath(filename)
+    print('\n')
+    print(f"Plot saved to '{file_path}'")
+    webbrowser.open('file://' + file_path)
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description=
+        'Process log files to extract and plot message encode times.')
+    parser.add_argument('--log_file_path',
+                        type=Path,
+                        help='Path to the log file',
+                        required=True)
+    parser.add_argument(
+        '--max_lines',
+        type=int,
+        default=None,
+        help='Maximum number of lines to read from the log file')
+    parser.add_argument('--num_bins',
+                        type=int,
+                        default=40,
+                        help='Number of bins to use')
+    parser.add_argument('--top_n',
+                        type=int,
+                        default=10,
+                        help='Number of top message types to plot')
+    parser.add_argument('--start_time',
+                        type=float,
+                        default=None,
+                        help='Start time in seconds')
+    parser.add_argument('--end_time',
+                        type=float,
+                        default=None,
+                        help='End time in seconds')
+
+    args = parser.parse_args()
+
+    data_by_type = parse_csv_file(filepath=args.log_file_path,
+                                  max_lines=args.max_lines,
+                                  start_time=args.start_time,
+                                  end_time=args.end_time)
+    binned_data = create_binned_data(data_by_type,
+                                     num_bins=args.num_bins,
+                                     top_n=args.top_n)
+    print_binned_data(binned_data)
+    plot_bar(binned_data)
+    print(f"{os.path.basename(__file__)} Finished.")
+
+
+if __name__ == '__main__':
+    main()
diff --git a/aos/events/logging/plot_logger_profile_test.py b/aos/events/logging/plot_logger_profile_test.py
new file mode 100644
index 0000000..b3e7c70
--- /dev/null
+++ b/aos/events/logging/plot_logger_profile_test.py
@@ -0,0 +1,109 @@
+import unittest
+from unittest.mock import mock_open, patch
+from pathlib import Path
+import numpy as np
+from aos.events.logging.plot_logger_profile import SeriesDetail, parse_csv_file, create_binned_data
+
+# Mock CSV data as a string
+# The actual column names are "channel_name, channel_type, encode_duration_ns, encoding_start_time_ns, message_time_s".
+mock_csv = """# It doesn't matter what's in this comment. The column names are put here for your reference.
+/test/channel/name1,aos.test.channel.name1,1000000,123456789,10
+/test/channel/name2,aos.test.channel.name1,2000000,123456789,20
+/test/channel/name3,aos.test.channel.name2,3000000,123456789,30
+"""
+
+
+class TestLogParser(unittest.TestCase):
+
+    def test_parse_csv_file(self):
+        # Expected result after parsing the mock CSV, using SeriesDetail instances
+        expected_result = {
+            'aos.test.channel.name1':
+            SeriesDetail(encode_times_ms=[1.0, 2.0],
+                         event_loop_times_s=[10.0, 20.0]),
+            'aos.test.channel.name2':
+            SeriesDetail(encode_times_ms=[3.0], event_loop_times_s=[30.0])
+        }
+
+        # Use 'mock_open' to simulate file opening and reading
+        with patch('builtins.open', mock_open(read_data=mock_csv)):
+            # Parse the data assuming the mock file path is 'dummy_path.csv'
+            result = parse_csv_file(Path('dummy_path.csv'),
+                                    max_lines=None,
+                                    start_time=None,
+                                    end_time=None)
+
+            # Check if the parsed data matches the expected result
+            self.assertEqual(result, expected_result)
+
+
+class TestCreateBinnedData(unittest.TestCase):
+
+    def test_create_binned_data_with_other_category(self):
+        # Setup input data with three types
+        data_by_type = {
+            'Type1':
+            SeriesDetail(encode_times_ms=[10.0, 20.0, 30.0],
+                         event_loop_times_s=[13.0, 23.0, 33.0]),
+            'Type2':
+            SeriesDetail(encode_times_ms=[15.0, 25.0, 35.0],
+                         event_loop_times_s=[18.0, 28.0, 38.0]),
+            'Type3':
+            SeriesDetail(encode_times_ms=[5.0, 10.0, 15.0],
+                         event_loop_times_s=[8.0, 19.0, 29.0])
+        }
+
+        # Choose top_n less than the number of types. This will cause Type3's data to go into 'other'
+        top_n = 2
+
+        # Sort the types by total encoding times to determine top types
+        expected_top_types = ['Type2', 'Type1']
+
+        bins = np.linspace(8, 38, len(data_by_type) + 1)
+
+        # Expected output for 3 bins. The bin bounds were measured to be 18.0, 28.0, and 38.0.
+        expected_bins = [
+            {
+                "bin_range": f"{bins[0]:.2f} - {bins[1]:.2f}",
+                "message_encode_times": {
+                    "Type1": 10.0,
+                    "Type2": 0,
+                    "other": 5.0
+                }
+            },
+            {
+                "bin_range": f"{bins[1]:.2f} - {bins[2]:.2f}",
+                "message_encode_times": {
+                    "Type1": 20.0,
+                    "Type2": 15.0,
+                    "other": 10.0
+                }
+            },
+            {
+                "bin_range": f"{bins[2]:.2f} - {bins[3]:.2f}",
+                "message_encode_times": {
+                    "Type1": 30.0,
+                    "Type2": 60.0,
+                    "other": 15.0
+                }
+            },
+        ]
+
+        # Call the function under test
+        result = create_binned_data(data_by_type,
+                                    num_bins=3,
+                                    top_n=len(expected_top_types))
+
+        # Check the top type names and order
+        self.assertEqual(result.top_type_names, expected_top_types)
+
+        # Check each bin's range and encode times
+        for idx, bin_data in enumerate(result.bins):
+            self.assertEqual(bin_data.bin_range,
+                             expected_bins[idx]["bin_range"])
+            self.assertEqual(bin_data.message_encode_times,
+                             expected_bins[idx]["message_encode_times"])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/aos/events/logging/snappy_encoder.cc b/aos/events/logging/snappy_encoder.cc
index 0e96092..0160177 100644
--- a/aos/events/logging/snappy_encoder.cc
+++ b/aos/events/logging/snappy_encoder.cc
@@ -41,9 +41,16 @@
   total_bytes_ += queue_.back().size();
 }
 
-void SnappyEncoder::Finish() { EncodeCurrentBuffer(); }
+void SnappyEncoder::Finish(std::chrono::nanoseconds * /*encode_duration*/) {
+  // TODO (Maxwell Gumley): find a way to measure encode duration for a single
+  // message.
+  EncodeCurrentBuffer();
+}
 
-size_t SnappyEncoder::Encode(Copier *copy, size_t start_byte) {
+size_t SnappyEncoder::Encode(Copier *copy, size_t start_byte,
+                             std::chrono::nanoseconds * /*encode_duration*/) {
+  // TODO (Maxwell Gumley): find a way to measure encode duration for a single
+  // message.
   CHECK_EQ(start_byte, 0u);
   buffer_source_.Append(copy);
 
diff --git a/aos/events/logging/snappy_encoder.h b/aos/events/logging/snappy_encoder.h
index 4b6130c..76100e3 100644
--- a/aos/events/logging/snappy_encoder.h
+++ b/aos/events/logging/snappy_encoder.h
@@ -20,9 +20,11 @@
   explicit SnappyEncoder(size_t max_message_size,
                          size_t chunk_size = 128 * 1024);
 
-  size_t Encode(Copier *copy, size_t start_byte) final;
+  // See base class for commments.
+  size_t Encode(Copier *copy, size_t start_byte,
+                std::chrono::nanoseconds *encode_duration = nullptr) final;
 
-  void Finish() final;
+  void Finish(std::chrono::nanoseconds *encode_duration = nullptr) final;
   void Clear(int n) final;
   absl::Span<const absl::Span<const uint8_t>> queue() final;
   size_t queued_bytes() const final;
diff --git a/tools/python/requirements.lock.txt b/tools/python/requirements.lock.txt
index c265e77..5a4dcf4 100644
--- a/tools/python/requirements.lock.txt
+++ b/tools/python/requirements.lock.txt
@@ -4,6 +4,10 @@
 #
 #    bazel run //tools/python:requirements.update
 #
+bokeh==3.4.1 \
+    --hash=sha256:1e3c502a0a8205338fc74dadbfa321f8a0965441b39501e36796a47b4017b642 \
+    --hash=sha256:d824961e4265367b0750ce58b07e564ad0b83ca64b335521cd3421e9b9f10d89
+    # via -r tools/python/requirements.txt
 certifi==2022.9.24 \
     --hash=sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14 \
     --hash=sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382
@@ -16,77 +20,54 @@
     --hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \
     --hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48
     # via mkdocs
-contourpy==1.0.6 \
-    --hash=sha256:0236875c5a0784215b49d00ebbe80c5b6b5d5244b3655a36dda88105334dea17 \
-    --hash=sha256:03d1b9c6b44a9e30d554654c72be89af94fab7510b4b9f62356c64c81cec8b7d \
-    --hash=sha256:0537cc1195245bbe24f2913d1f9211b8f04eb203de9044630abd3664c6cc339c \
-    --hash=sha256:06ca79e1efbbe2df795822df2fa173d1a2b38b6e0f047a0ec7903fbca1d1847e \
-    --hash=sha256:08e8d09d96219ace6cb596506fb9b64ea5f270b2fb9121158b976d88871fcfd1 \
-    --hash=sha256:0b1e66346acfb17694d46175a0cea7d9036f12ed0c31dfe86f0f405eedde2bdd \
-    --hash=sha256:0b97454ed5b1368b66ed414c754cba15b9750ce69938fc6153679787402e4cdf \
-    --hash=sha256:0e4854cc02006ad6684ce092bdadab6f0912d131f91c2450ce6dbdea78ee3c0b \
-    --hash=sha256:12a7dc8439544ed05c6553bf026d5e8fa7fad48d63958a95d61698df0e00092b \
-    --hash=sha256:1b1ee48a130da4dd0eb8055bbab34abf3f6262957832fd575e0cab4979a15a41 \
-    --hash=sha256:1c0e1308307a75e07d1f1b5f0f56b5af84538a5e9027109a7bcf6cb47c434e72 \
-    --hash=sha256:1dedf4c64185a216c35eb488e6f433297c660321275734401760dafaeb0ad5c2 \
-    --hash=sha256:208bc904889c910d95aafcf7be9e677726df9ef71e216780170dbb7e37d118fa \
-    --hash=sha256:211dfe2bd43bf5791d23afbe23a7952e8ac8b67591d24be3638cabb648b3a6eb \
-    --hash=sha256:341330ed19074f956cb20877ad8d2ae50e458884bfa6a6df3ae28487cc76c768 \
-    --hash=sha256:344cb3badf6fc7316ad51835f56ac387bdf86c8e1b670904f18f437d70da4183 \
-    --hash=sha256:358f6364e4873f4d73360b35da30066f40387dd3c427a3e5432c6b28dd24a8fa \
-    --hash=sha256:371f6570a81dfdddbb837ba432293a63b4babb942a9eb7aaa699997adfb53278 \
-    --hash=sha256:375d81366afd547b8558c4720337218345148bc2fcffa3a9870cab82b29667f2 \
-    --hash=sha256:3a1917d3941dd58732c449c810fa7ce46cc305ce9325a11261d740118b85e6f3 \
-    --hash=sha256:4081918147fc4c29fad328d5066cfc751da100a1098398742f9f364be63803fc \
-    --hash=sha256:444fb776f58f4906d8d354eb6f6ce59d0a60f7b6a720da6c1ccb839db7c80eb9 \
-    --hash=sha256:46deb310a276cc5c1fd27958e358cce68b1e8a515fa5a574c670a504c3a3fe30 \
-    --hash=sha256:494efed2c761f0f37262815f9e3c4bb9917c5c69806abdee1d1cb6611a7174a0 \
-    --hash=sha256:50627bf76abb6ba291ad08db583161939c2c5fab38c38181b7833423ab9c7de3 \
-    --hash=sha256:5641927cc5ae66155d0c80195dc35726eae060e7defc18b7ab27600f39dd1fe7 \
-    --hash=sha256:5b117d29433fc8393b18a696d794961464e37afb34a6eeb8b2c37b5f4128a83e \
-    --hash=sha256:613c665529899b5d9fade7e5d1760111a0b011231277a0d36c49f0d3d6914bd6 \
-    --hash=sha256:6e459ebb8bb5ee4c22c19cc000174f8059981971a33ce11e17dddf6aca97a142 \
-    --hash=sha256:6f56515e7c6fae4529b731f6c117752247bef9cdad2b12fc5ddf8ca6a50965a5 \
-    --hash=sha256:730c27978a0003b47b359935478b7d63fd8386dbb2dcd36c1e8de88cbfc1e9de \
-    --hash=sha256:75a2e638042118118ab39d337da4c7908c1af74a8464cad59f19fbc5bbafec9b \
-    --hash=sha256:78ced51807ccb2f45d4ea73aca339756d75d021069604c2fccd05390dc3c28eb \
-    --hash=sha256:7ee394502026d68652c2824348a40bf50f31351a668977b51437131a90d777ea \
-    --hash=sha256:8468b40528fa1e15181cccec4198623b55dcd58306f8815a793803f51f6c474a \
-    --hash=sha256:84c593aeff7a0171f639da92cb86d24954bbb61f8a1b530f74eb750a14685832 \
-    --hash=sha256:913bac9d064cff033cf3719e855d4f1db9f1c179e0ecf3ba9fdef21c21c6a16a \
-    --hash=sha256:9447c45df407d3ecb717d837af3b70cfef432138530712263730783b3d016512 \
-    --hash=sha256:9b0e7fe7f949fb719b206548e5cde2518ffb29936afa4303d8a1c4db43dcb675 \
-    --hash=sha256:9bc407a6af672da20da74823443707e38ece8b93a04009dca25856c2d9adadb1 \
-    --hash=sha256:9e8e686a6db92a46111a1ee0ee6f7fbfae4048f0019de207149f43ac1812cf95 \
-    --hash=sha256:9fc4e7973ed0e1fe689435842a6e6b330eb7ccc696080dda9a97b1a1b78e41db \
-    --hash=sha256:a457ee72d9032e86730f62c5eeddf402e732fdf5ca8b13b41772aa8ae13a4563 \
-    --hash=sha256:a628bba09ba72e472bf7b31018b6281fd4cc903f0888049a3724afba13b6e0b8 \
-    --hash=sha256:a79d239fc22c3b8d9d3de492aa0c245533f4f4c7608e5749af866949c0f1b1b9 \
-    --hash=sha256:aa4674cf3fa2bd9c322982644967f01eed0c91bb890f624e0e0daf7a5c3383e9 \
-    --hash=sha256:acd2bd02f1a7adff3a1f33e431eb96ab6d7987b039d2946a9b39fe6fb16a1036 \
-    --hash=sha256:b3b1bd7577c530eaf9d2bc52d1a93fef50ac516a8b1062c3d1b9bcec9ebe329b \
-    --hash=sha256:b48d94386f1994db7c70c76b5808c12e23ed7a4ee13693c2fc5ab109d60243c0 \
-    --hash=sha256:b64f747e92af7da3b85631a55d68c45a2d728b4036b03cdaba4bd94bcc85bd6f \
-    --hash=sha256:b98c820608e2dca6442e786817f646d11057c09a23b68d2b3737e6dcb6e4a49b \
-    --hash=sha256:c1baa49ab9fedbf19d40d93163b7d3e735d9cd8d5efe4cce9907902a6dad391f \
-    --hash=sha256:c38c6536c2d71ca2f7e418acaf5bca30a3af7f2a2fa106083c7d738337848dbe \
-    --hash=sha256:c78bfbc1a7bff053baf7e508449d2765964d67735c909b583204e3240a2aca45 \
-    --hash=sha256:cd2bc0c8f2e8de7dd89a7f1c10b8844e291bca17d359373203ef2e6100819edd \
-    --hash=sha256:d2eff2af97ea0b61381828b1ad6cd249bbd41d280e53aea5cccd7b2b31b8225c \
-    --hash=sha256:d8834c14b8c3dd849005e06703469db9bf96ba2d66a3f88ecc539c9a8982e0ee \
-    --hash=sha256:d912f0154a20a80ea449daada904a7eb6941c83281a9fab95de50529bfc3a1da \
-    --hash=sha256:da1ef35fd79be2926ba80fbb36327463e3656c02526e9b5b4c2b366588b74d9a \
-    --hash=sha256:dbe6fe7a1166b1ddd7b6d887ea6fa8389d3f28b5ed3f73a8f40ece1fc5a3d340 \
-    --hash=sha256:dcd556c8fc37a342dd636d7eef150b1399f823a4462f8c968e11e1ebeabee769 \
-    --hash=sha256:e13b31d1b4b68db60b3b29f8e337908f328c7f05b9add4b1b5c74e0691180109 \
-    --hash=sha256:e1739496c2f0108013629aa095cc32a8c6363444361960c07493818d0dea2da4 \
-    --hash=sha256:e43255a83835a129ef98f75d13d643844d8c646b258bebd11e4a0975203e018f \
-    --hash=sha256:e626cefff8491bce356221c22af5a3ea528b0b41fbabc719c00ae233819ea0bf \
-    --hash=sha256:eadad75bf91897f922e0fb3dca1b322a58b1726a953f98c2e5f0606bd8408621 \
-    --hash=sha256:f33da6b5d19ad1bb5e7ad38bb8ba5c426d2178928bc2b2c44e8823ea0ecb6ff3 \
-    --hash=sha256:f4052a8a4926d4468416fc7d4b2a7b2a3e35f25b39f4061a7e2a3a2748c4fc48 \
-    --hash=sha256:f6ca38dd8d988eca8f07305125dec6f54ac1c518f1aaddcc14d08c01aebb6efc
-    # via matplotlib
+contourpy==1.2.1 \
+    --hash=sha256:00e5388f71c1a0610e6fe56b5c44ab7ba14165cdd6d695429c5cd94021e390b2 \
+    --hash=sha256:10a37ae557aabf2509c79715cd20b62e4c7c28b8cd62dd7d99e5ed3ce28c3fd9 \
+    --hash=sha256:11959f0ce4a6f7b76ec578576a0b61a28bdc0696194b6347ba3f1c53827178b9 \
+    --hash=sha256:187fa1d4c6acc06adb0fae5544c59898ad781409e61a926ac7e84b8f276dcef4 \
+    --hash=sha256:1a07fc092a4088ee952ddae19a2b2a85757b923217b7eed584fdf25f53a6e7ce \
+    --hash=sha256:1cac0a8f71a041aa587410424ad46dfa6a11f6149ceb219ce7dd48f6b02b87a7 \
+    --hash=sha256:1d59e739ab0e3520e62a26c60707cc3ab0365d2f8fecea74bfe4de72dc56388f \
+    --hash=sha256:2855c8b0b55958265e8b5888d6a615ba02883b225f2227461aa9127c578a4922 \
+    --hash=sha256:2e785e0f2ef0d567099b9ff92cbfb958d71c2d5b9259981cd9bee81bd194c9a4 \
+    --hash=sha256:309be79c0a354afff9ff7da4aaed7c3257e77edf6c1b448a779329431ee79d7e \
+    --hash=sha256:39f3ecaf76cd98e802f094e0d4fbc6dc9c45a8d0c4d185f0f6c2234e14e5f75b \
+    --hash=sha256:457499c79fa84593f22454bbd27670227874cd2ff5d6c84e60575c8b50a69619 \
+    --hash=sha256:49e70d111fee47284d9dd867c9bb9a7058a3c617274900780c43e38d90fe1205 \
+    --hash=sha256:4c75507d0a55378240f781599c30e7776674dbaf883a46d1c90f37e563453480 \
+    --hash=sha256:4c863140fafc615c14a4bf4efd0f4425c02230eb8ef02784c9a156461e62c965 \
+    --hash=sha256:4d8908b3bee1c889e547867ca4cdc54e5ab6be6d3e078556814a22457f49423c \
+    --hash=sha256:5b9eb0ca724a241683c9685a484da9d35c872fd42756574a7cfbf58af26677fd \
+    --hash=sha256:6022cecf8f44e36af10bd9118ca71f371078b4c168b6e0fab43d4a889985dbb5 \
+    --hash=sha256:6150ffa5c767bc6332df27157d95442c379b7dce3a38dff89c0f39b63275696f \
+    --hash=sha256:62828cada4a2b850dbef89c81f5a33741898b305db244904de418cc957ff05dc \
+    --hash=sha256:7b4182299f251060996af5249c286bae9361fa8c6a9cda5efc29fe8bfd6062ec \
+    --hash=sha256:94b34f32646ca0414237168d68a9157cb3889f06b096612afdd296003fdd32fd \
+    --hash=sha256:9ce6889abac9a42afd07a562c2d6d4b2b7134f83f18571d859b25624a331c90b \
+    --hash=sha256:9cffe0f850e89d7c0012a1fb8730f75edd4320a0a731ed0c183904fe6ecfc3a9 \
+    --hash=sha256:a12a813949e5066148712a0626895c26b2578874e4cc63160bb007e6df3436fe \
+    --hash=sha256:a1eea9aecf761c661d096d39ed9026574de8adb2ae1c5bd7b33558af884fb2ce \
+    --hash=sha256:a31f94983fecbac95e58388210427d68cd30fe8a36927980fab9c20062645609 \
+    --hash=sha256:ac58bdee53cbeba2ecad824fa8159493f0bf3b8ea4e93feb06c9a465d6c87da8 \
+    --hash=sha256:af3f4485884750dddd9c25cb7e3915d83c2db92488b38ccb77dd594eac84c4a0 \
+    --hash=sha256:b33d2bc4f69caedcd0a275329eb2198f560b325605810895627be5d4b876bf7f \
+    --hash=sha256:b59c0ffceff8d4d3996a45f2bb6f4c207f94684a96bf3d9728dbb77428dd8cb8 \
+    --hash=sha256:bb6834cbd983b19f06908b45bfc2dad6ac9479ae04abe923a275b5f48f1a186b \
+    --hash=sha256:bd3db01f59fdcbce5b22afad19e390260d6d0222f35a1023d9adc5690a889364 \
+    --hash=sha256:bd7c23df857d488f418439686d3b10ae2fbf9bc256cd045b37a8c16575ea1040 \
+    --hash=sha256:c2528d60e398c7c4c799d56f907664673a807635b857df18f7ae64d3e6ce2d9f \
+    --hash=sha256:d31a63bc6e6d87f77d71e1abbd7387ab817a66733734883d1fc0021ed9bfa083 \
+    --hash=sha256:d4492d82b3bc7fbb7e3610747b159869468079fe149ec5c4d771fa1f614a14df \
+    --hash=sha256:ddcb8581510311e13421b1f544403c16e901c4e8f09083c881fab2be80ee31ba \
+    --hash=sha256:e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445 \
+    --hash=sha256:eb3315a8a236ee19b6df481fc5f997436e8ade24a9f03dfdc6bd490fea20c6da \
+    --hash=sha256:ef2b055471c0eb466033760a521efb9d8a32b99ab907fc8358481a1dd29e3bd3 \
+    --hash=sha256:ef5adb9a3b1d0c645ff694f9bca7702ec2c70f4d734f9922ea34de02294fdf72 \
+    --hash=sha256:f32c38afb74bd98ce26de7cc74a67b40afb7b05aae7b42924ea990d51e4dac02 \
+    --hash=sha256:fe0ccca550bb8e5abc22f530ec0466136379c01321fd94f30a22231e8a48d985
+    # via
+    #   bokeh
+    #   matplotlib
 cycler==0.11.0 \
     --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \
     --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f
@@ -118,6 +99,7 @@
     --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
     # via
     #   -r tools/python/requirements.txt
+    #   bokeh
     #   mkdocs
 kiwisolver==1.4.4 \
     --hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
@@ -321,10 +303,12 @@
     --hash=sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135
     # via
     #   -r tools/python/requirements.txt
+    #   bokeh
     #   contourpy
     #   matplotlib
     #   opencv-python
     #   osqp
+    #   pandas
     #   qdldl
     #   scipy
     #   shapely
@@ -368,8 +352,40 @@
     --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \
     --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522
     # via
+    #   bokeh
     #   matplotlib
     #   mkdocs
+pandas==2.2.2 \
+    --hash=sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863 \
+    --hash=sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2 \
+    --hash=sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1 \
+    --hash=sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad \
+    --hash=sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db \
+    --hash=sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76 \
+    --hash=sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51 \
+    --hash=sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32 \
+    --hash=sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08 \
+    --hash=sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b \
+    --hash=sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4 \
+    --hash=sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921 \
+    --hash=sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288 \
+    --hash=sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee \
+    --hash=sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0 \
+    --hash=sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24 \
+    --hash=sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99 \
+    --hash=sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151 \
+    --hash=sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd \
+    --hash=sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce \
+    --hash=sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57 \
+    --hash=sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef \
+    --hash=sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54 \
+    --hash=sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a \
+    --hash=sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238 \
+    --hash=sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23 \
+    --hash=sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772 \
+    --hash=sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce \
+    --hash=sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad
+    # via bokeh
 pillow==9.3.0 \
     --hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
     --hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
@@ -432,7 +448,9 @@
     --hash=sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74 \
     --hash=sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb \
     --hash=sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0
-    # via matplotlib
+    # via
+    #   bokeh
+    #   matplotlib
 pkginfo==1.8.3 \
     --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \
     --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c
@@ -465,11 +483,16 @@
     # via
     #   ghp-import
     #   matplotlib
+    #   pandas
 python-gflags==3.1.2 \
     --hash=sha256:40ae131e899ef68e9e14aa53ca063839c34f6a168afe622217b5b875492a1ee2
     # via
     #   -r tools/python/requirements.txt
     #   glog
+pytz==2024.1 \
+    --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \
+    --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319
+    # via pandas
 pyyaml==6.0 \
     --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
     --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \
@@ -512,6 +535,7 @@
     --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \
     --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5
     # via
+    #   bokeh
     #   mkdocs
     #   pyyaml-env-tag
 pyyaml-env-tag==0.1 \
@@ -616,6 +640,27 @@
     --hash=sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5 \
     --hash=sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8
     # via -r tools/python/requirements.txt
+tabulate==0.9.0 \
+    --hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \
+    --hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f
+    # via -r tools/python/requirements.txt
+tornado==6.4 \
+    --hash=sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0 \
+    --hash=sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63 \
+    --hash=sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263 \
+    --hash=sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052 \
+    --hash=sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f \
+    --hash=sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee \
+    --hash=sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78 \
+    --hash=sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579 \
+    --hash=sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212 \
+    --hash=sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e \
+    --hash=sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2
+    # via bokeh
+tzdata==2024.1 \
+    --hash=sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd \
+    --hash=sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252
+    # via pandas
 urllib3==1.26.13 \
     --hash=sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc \
     --hash=sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8
@@ -651,6 +696,10 @@
     --hash=sha256:ed80a1628cee19f5cfc6bb74e173f1b4189eb532e705e2a13e3250312a62e0c9 \
     --hash=sha256:ee3e38a6cc050a8830089f79cbec8a3878ec2fe5160cdb2dc8ccb6def8552658
     # via mkdocs
+xyzservices==2024.4.0 \
+    --hash=sha256:6a04f11487a6fb77d92a98984cd107fbd9157fd5e65f929add9c3d6e604ee88c \
+    --hash=sha256:b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c
+    # via bokeh
 yapf==0.32.0 \
     --hash=sha256:8fea849025584e486fd06d6ba2bed717f396080fd3cc236ba10cb97c4c51cf32 \
     --hash=sha256:a3f5085d37ef7e3e004c4ba9f9b3e40c54ff1901cd111f05145ae313a7c67d1b
diff --git a/tools/python/requirements.txt b/tools/python/requirements.txt
index e758cf5..ddbaef3 100644
--- a/tools/python/requirements.txt
+++ b/tools/python/requirements.txt
@@ -19,3 +19,6 @@
 # TODO(phil): Migrate to absl-py. These are abandoned as far as I can tell.
 python-gflags
 glog
+
+bokeh
+tabulate
diff --git a/tools/python/whl_overrides.json b/tools/python/whl_overrides.json
index a109969..1bf2cf2 100644
--- a/tools/python/whl_overrides.json
+++ b/tools/python/whl_overrides.json
@@ -1,4 +1,8 @@
 {
+    "bokeh==3.4.1": {
+        "sha256": "1e3c502a0a8205338fc74dadbfa321f8a0965441b39501e36796a47b4017b642",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/bokeh-3.4.1-py3-none-any.whl"
+    },
     "certifi==2022.9.24": {
         "sha256": "90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/certifi-2022.9.24-py3-none-any.whl"
@@ -11,9 +15,9 @@
         "sha256": "bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/click-8.1.3-py3-none-any.whl"
     },
-    "contourpy==1.0.6": {
-        "sha256": "1dedf4c64185a216c35eb488e6f433297c660321275734401760dafaeb0ad5c2",
-        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/contourpy-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+    "contourpy==1.2.1": {
+        "sha256": "e1d59258c3c67c865435d8fbeb35f8c59b8bef3d6f46c1f29f6123556af28445",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/contourpy-1.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
     },
     "cycler==0.11.0": {
         "sha256": "3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3",
@@ -87,6 +91,10 @@
         "sha256": "ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/packaging-21.3-py3-none-any.whl"
     },
+    "pandas==2.2.2": {
+        "sha256": "66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+    },
     "pillow==9.3.0": {
         "sha256": "97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl"
@@ -96,8 +104,8 @@
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/pkginfo-1.8.3-py2.py3-none-any.whl"
     },
     "pycairo==1.22.0": {
-        "sha256": "6d8325547b2ee5476d317045ca5824901309cc5444dced73bd7d1262b3e18b83",
-        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/pycairo-1.22.0-cp39-cp39-manylinux_2_31_x86_64.whl"
+        "sha256": "451b9f68e45b9f9cae5069cd6eab44ad339ae55cf177be904c0fab6a55228b85",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/pycairo-1.22.0-cp39-cp39-manylinux_2_34_x86_64.whl"
     },
     "pygobject==3.42.2": {
         "sha256": "c11807320f696b07525b97800570e80a6563a649f2950d66501e13474e5c3a36",
@@ -115,6 +123,10 @@
         "sha256": "e2bd55abd9bb6e3b32026fd6c26a81c3f49979f24162fe73dc48da4fc306e74b",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/python_gflags-3.1.2-py3-none-any.whl"
     },
+    "pytz==2024.1": {
+        "sha256": "328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/pytz-2024.1-py2.py3-none-any.whl"
+    },
     "pyyaml==6.0": {
         "sha256": "40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl"
@@ -147,6 +159,18 @@
         "sha256": "c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/sympy-1.12-py3-none-any.whl"
     },
+    "tabulate==0.9.0": {
+        "sha256": "024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/tabulate-0.9.0-py3-none-any.whl"
+    },
+    "tornado==6.4": {
+        "sha256": "f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl"
+    },
+    "tzdata==2024.1": {
+        "sha256": "9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/tzdata-2024.1-py2.py3-none-any.whl"
+    },
     "urllib3==1.26.13": {
         "sha256": "47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/urllib3-1.26.13-py2.py3-none-any.whl"
@@ -159,6 +183,10 @@
         "sha256": "4f4e1c4aa54fb86316a62a87b3378c025e228178d55481d30d857c6c438897d6",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/watchdog-2.1.9-py3-none-manylinux2014_x86_64.whl"
     },
+    "xyzservices==2024.4.0": {
+        "sha256": "b83e48c5b776c9969fffcfff57b03d02b1b1cd6607a9d9c4e7f568b01ef47f4c",
+        "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/xyzservices-2024.4.0-py3-none-any.whl"
+    },
     "yapf==0.32.0": {
         "sha256": "8fea849025584e486fd06d6ba2bed717f396080fd3cc236ba10cb97c4c51cf32",
         "url": "https://software.frc971.org/Build-Dependencies/wheelhouse/yapf-0.32.0-py2.py3-none-any.whl"