Austin Schuh | e6b2b88 | 2023-02-04 11:42:40 -0800 | [diff] [blame] | 1 | #include <fcntl.h> |
| 2 | #include <sys/stat.h> |
| 3 | #include <sys/types.h> |
| 4 | |
| 5 | #include <chrono> |
| 6 | |
| 7 | #include "aos/init.h" |
| 8 | #include "aos/realtime.h" |
| 9 | #include "aos/time/time.h" |
| 10 | #include "gflags/gflags.h" |
| 11 | #include "glog/logging.h" |
| 12 | |
| 13 | namespace chrono = std::chrono; |
| 14 | |
| 15 | DEFINE_string(file, "/media/sda1/foo", "File to write to."); |
| 16 | |
| 17 | DEFINE_uint32(write_size, 4096, "Size of hunk to write"); |
| 18 | DEFINE_bool(sync, false, "If true, sync the file after each written block."); |
| 19 | DEFINE_bool(direct, false, "If true, O_DIRECT."); |
| 20 | |
| 21 | int main(int argc, char ** argv) { |
| 22 | aos::InitGoogle(&argc, &argv); |
| 23 | |
| 24 | std::vector<uint8_t> data; |
| 25 | |
| 26 | // We want uncompressible data. The easiest way to do this is to grab a good |
| 27 | // sized block from /dev/random, and then reuse it. |
| 28 | { |
| 29 | int random_fd = open("/dev/random", O_RDONLY | O_CLOEXEC); |
| 30 | PCHECK(random_fd != -1) << ": Failed to open /dev/random"; |
| 31 | data.resize(FLAGS_write_size); |
| 32 | size_t written = 0; |
| 33 | while (written < data.size()) { |
| 34 | const size_t result = |
| 35 | read(random_fd, data.data() + written, data.size() - written); |
| 36 | PCHECK(result > 0); |
| 37 | written += result; |
| 38 | } |
| 39 | |
| 40 | PCHECK(close(random_fd) == 0); |
| 41 | } |
| 42 | |
| 43 | int fd = open( |
| 44 | FLAGS_file.c_str(), |
| 45 | O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL | (FLAGS_direct ? O_DIRECT : 0), |
| 46 | 0774); |
| 47 | PCHECK(fd != -1); |
| 48 | |
| 49 | const aos::monotonic_clock::time_point start_time = |
| 50 | aos::monotonic_clock::now(); |
| 51 | aos::monotonic_clock::time_point last_time = |
| 52 | start_time; |
| 53 | size_t last_written_data = 0; |
| 54 | size_t written_data = 0; |
| 55 | |
| 56 | while (true) { |
| 57 | PCHECK(write(fd, data.data(), data.size()) == |
| 58 | static_cast<ssize_t>(data.size())) |
| 59 | << ": Failed after " |
| 60 | << chrono::duration<double>(aos::monotonic_clock::now() - start_time) |
| 61 | .count(); |
| 62 | |
| 63 | // Trigger a flush if asked. |
| 64 | if (FLAGS_sync) { |
| 65 | const aos::monotonic_clock::time_point monotonic_now = |
| 66 | aos::monotonic_clock::now(); |
| 67 | sync_file_range(fd, written_data, data.size(), SYNC_FILE_RANGE_WRITE); |
| 68 | |
| 69 | // Now, blocking flush the previous page so we don't get too far ahead. |
| 70 | // This is Linus' recommendation. |
| 71 | if (written_data > 0) { |
| 72 | sync_file_range(fd, written_data - data.size(), data.size(), |
| 73 | SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | |
| 74 | SYNC_FILE_RANGE_WAIT_AFTER); |
| 75 | posix_fadvise(fd, written_data - data.size(), data.size(), |
| 76 | POSIX_FADV_DONTNEED); |
| 77 | } |
| 78 | VLOG(1) << "Took " |
| 79 | << chrono::duration<double>(aos::monotonic_clock::now() - |
| 80 | monotonic_now) |
| 81 | .count(); |
| 82 | } |
| 83 | |
| 84 | written_data += data.size(); |
| 85 | |
| 86 | const aos::monotonic_clock::time_point monotonic_now = |
| 87 | aos::monotonic_clock::now(); |
| 88 | // Print out MB/s once it has been at least 1 second since last time. |
| 89 | if (monotonic_now > last_time + chrono::seconds(1)) { |
| 90 | LOG(INFO) |
| 91 | << ((written_data - last_written_data) / |
| 92 | chrono::duration<double>(monotonic_now - last_time).count() / |
| 93 | 1024. / 1024.) |
| 94 | << " MB/s"; |
| 95 | last_time = monotonic_now; |
| 96 | last_written_data = written_data; |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | return 0; |
| 101 | } |