Add ssd profiler for tuning SSD write performance
The write strategy for logging images is quite sensitive to the
configuration of the filesystem and how we write to it. Rather than
trying to debug that through the logger, this tool provides a simpler
way of profiling different settings.
Change-Id: Ia2555a4bfcc052b3e84a99d60cb044563bd76aba
Signed-off-by: Austin Schuh <austin.linux@gmail.com>
diff --git a/y2023/BUILD b/y2023/BUILD
index 0c34ce8..3a5f1bd 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -266,3 +266,15 @@
],
target_compatible_with = ["@platforms//os:linux"],
)
+
+cc_binary(
+ name = "ssd_profiler",
+ srcs = [
+ "ssd_profiler.cc",
+ ],
+ deps = [
+ "//aos:init",
+ "//aos/time",
+ "@com_github_google_glog//:glog",
+ ],
+)
diff --git a/y2023/ssd_profiler.cc b/y2023/ssd_profiler.cc
new file mode 100644
index 0000000..f5f24f5
--- /dev/null
+++ b/y2023/ssd_profiler.cc
@@ -0,0 +1,101 @@
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <chrono>
+
+#include "aos/init.h"
+#include "aos/realtime.h"
+#include "aos/time/time.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+
+namespace chrono = std::chrono;
+
+DEFINE_string(file, "/media/sda1/foo", "File to write to.");
+
+DEFINE_uint32(write_size, 4096, "Size of hunk to write");
+DEFINE_bool(sync, false, "If true, sync the file after each written block.");
+DEFINE_bool(direct, false, "If true, O_DIRECT.");
+
+int main(int argc, char ** argv) {
+ aos::InitGoogle(&argc, &argv);
+
+ std::vector<uint8_t> data;
+
+ // We want uncompressible data. The easiest way to do this is to grab a good
+ // sized block from /dev/random, and then reuse it.
+ {
+ int random_fd = open("/dev/random", O_RDONLY | O_CLOEXEC);
+ PCHECK(random_fd != -1) << ": Failed to open /dev/random";
+ data.resize(FLAGS_write_size);
+ size_t written = 0;
+ while (written < data.size()) {
+ const size_t result =
+ read(random_fd, data.data() + written, data.size() - written);
+ PCHECK(result > 0);
+ written += result;
+ }
+
+ PCHECK(close(random_fd) == 0);
+ }
+
+ int fd = open(
+ FLAGS_file.c_str(),
+ O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL | (FLAGS_direct ? O_DIRECT : 0),
+ 0774);
+ PCHECK(fd != -1);
+
+ const aos::monotonic_clock::time_point start_time =
+ aos::monotonic_clock::now();
+ aos::monotonic_clock::time_point last_time =
+ start_time;
+ size_t last_written_data = 0;
+ size_t written_data = 0;
+
+ while (true) {
+ PCHECK(write(fd, data.data(), data.size()) ==
+ static_cast<ssize_t>(data.size()))
+ << ": Failed after "
+ << chrono::duration<double>(aos::monotonic_clock::now() - start_time)
+ .count();
+
+ // Trigger a flush if asked.
+ if (FLAGS_sync) {
+ const aos::monotonic_clock::time_point monotonic_now =
+ aos::monotonic_clock::now();
+ sync_file_range(fd, written_data, data.size(), SYNC_FILE_RANGE_WRITE);
+
+ // Now, blocking flush the previous page so we don't get too far ahead.
+ // This is Linus' recommendation.
+ if (written_data > 0) {
+ sync_file_range(fd, written_data - data.size(), data.size(),
+ SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE |
+ SYNC_FILE_RANGE_WAIT_AFTER);
+ posix_fadvise(fd, written_data - data.size(), data.size(),
+ POSIX_FADV_DONTNEED);
+ }
+ VLOG(1) << "Took "
+ << chrono::duration<double>(aos::monotonic_clock::now() -
+ monotonic_now)
+ .count();
+ }
+
+ written_data += data.size();
+
+ const aos::monotonic_clock::time_point monotonic_now =
+ aos::monotonic_clock::now();
+ // Print out MB/s once it has been at least 1 second since last time.
+ if (monotonic_now > last_time + chrono::seconds(1)) {
+ LOG(INFO)
+ << ((written_data - last_written_data) /
+ chrono::duration<double>(monotonic_now - last_time).count() /
+ 1024. / 1024.)
+ << " MB/s";
+ last_time = monotonic_now;
+ last_written_data = written_data;
+ }
+ }
+
+ return 0;
+}