Move over to ABSL logging and flags.
Removes gperftools too since that wants gflags.
Here come the fireworks.
Change-Id: I79cb7bcf60f1047fbfa28bfffc21a0fd692e4b1c
Signed-off-by: Austin Schuh <austin.linux@gmail.com>
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 2bb87b6..7df8823 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -81,9 +81,10 @@
"//aos:init",
"//aos:json_to_flatbuffer",
"//aos/events:shm_event_loop",
- "@com_github_gflags_gflags//:gflags",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -93,7 +94,8 @@
hdrs = ["file_operations.h"],
target_compatible_with = ["@platforms//os:linux"],
deps = [
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings",
],
)
@@ -119,7 +121,9 @@
":file_operations",
"//aos/time",
"//aos/util:file",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
],
@@ -135,7 +139,8 @@
"//aos/containers:resizeable_buffer",
"//aos/testing:googletest",
"//aos/testing:tmpdir",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -195,9 +200,10 @@
"//aos/events:event_loop",
"//aos/network:remote_message_fbs",
"//aos/util:file",
- "@com_github_gflags_gflags//:gflags",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/types:span",
] + select({
"//tools:cpu_k8": [
@@ -220,8 +226,9 @@
deps = [
":logfile_utils",
"//aos:init",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -265,7 +272,8 @@
"//aos/containers:resizeable_buffer",
"//aos/time",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/types:span",
],
)
@@ -281,7 +289,8 @@
":buffer_encoder",
":buffer_encoder_param_test",
"//aos/testing:googletest",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -302,7 +311,8 @@
"//aos/containers:resizeable_buffer",
"//aos/util:crc32",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/types:span",
"@snappy",
],
@@ -319,7 +329,8 @@
":buffer_encoder_param_test",
":snappy_encoder",
"//aos/testing:googletest",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -340,7 +351,9 @@
"//aos/containers:resizeable_buffer",
"//third_party:lzma",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/types:span",
],
)
@@ -363,7 +376,8 @@
"//aos/containers:resizeable_buffer",
"@aws_sdk//:core",
"@aws_sdk//:s3",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
],
@@ -380,7 +394,9 @@
":buffer_encoder_param_test",
":lzma_encoder",
"//aos/testing:googletest",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -399,7 +415,8 @@
":logger_fbs",
"//aos/testing:googletest",
"//aos/testing:random_seed",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -418,7 +435,8 @@
"//aos/containers:error_list",
"//aos/containers:sized_array",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -546,8 +564,9 @@
"//aos:json_to_flatbuffer",
"//aos:sha256",
"//aos/events:simulated_event_loop",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -564,8 +583,9 @@
"//aos:configuration",
"//aos/events:simulated_event_loop",
"//aos/network:multinode_timestamp_filter",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -583,8 +603,9 @@
"//aos:init",
"//aos/events:simulated_event_loop",
"//aos/network:multinode_timestamp_filter",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -601,8 +622,9 @@
"//aos:init",
"//aos/events:simulated_event_loop",
"//aos/network:multinode_timestamp_filter",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -618,8 +640,9 @@
"//aos:init",
"//aos:json_to_flatbuffer",
"//aos/util:file",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -636,8 +659,9 @@
"//aos:json_to_flatbuffer",
"//aos/events:simulated_event_loop",
"//aos/time",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -660,8 +684,9 @@
"//aos:init",
"//aos/events:shm_event_loop",
"//aos/logging:log_namer",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -1009,8 +1034,9 @@
"//aos:init",
"//aos:json_to_flatbuffer",
"//aos/events/logging:log_reader",
- "@com_github_gflags_gflags//:gflags",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/flags:flag",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
@@ -1025,7 +1051,8 @@
"//aos:init",
"//aos/containers:resizeable_buffer",
"//aos/time",
- "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/log",
+ "@com_google_absl//absl/log:check",
],
)
diff --git a/aos/events/logging/boot_timestamp.h b/aos/events/logging/boot_timestamp.h
index dc5967c..0440ea7 100644
--- a/aos/events/logging/boot_timestamp.h
+++ b/aos/events/logging/boot_timestamp.h
@@ -9,7 +9,8 @@
#include <iostream>
#include <limits>
-#include "glog/logging.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "aos/time/time.h"
diff --git a/aos/events/logging/buffer_encoder.cc b/aos/events/logging/buffer_encoder.cc
index b352d5d..b6350a5 100644
--- a/aos/events/logging/buffer_encoder.cc
+++ b/aos/events/logging/buffer_encoder.cc
@@ -4,7 +4,8 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include "glog/logging.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "aos/flatbuffers.h"
diff --git a/aos/events/logging/buffer_encoder.h b/aos/events/logging/buffer_encoder.h
index db09d32..5235d7b 100644
--- a/aos/events/logging/buffer_encoder.h
+++ b/aos/events/logging/buffer_encoder.h
@@ -1,9 +1,10 @@
#ifndef AOS_EVENTS_LOGGING_BUFFER_ENCODER_H_
#define AOS_EVENTS_LOGGING_BUFFER_ENCODER_H_
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/types/span.h"
#include "flatbuffers/flatbuffers.h"
-#include "glog/logging.h"
#include "aos/containers/resizeable_buffer.h"
#include "aos/events/logging/logger_generated.h"
diff --git a/aos/events/logging/buffer_encoder_param_test.h b/aos/events/logging/buffer_encoder_param_test.h
index 8b6648d..7a05e98 100644
--- a/aos/events/logging/buffer_encoder_param_test.h
+++ b/aos/events/logging/buffer_encoder_param_test.h
@@ -6,7 +6,8 @@
#include <random>
#include <vector>
-#include "glog/logging.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "gtest/gtest.h"
#include "aos/events/logging/logfile_utils.h"
diff --git a/aos/events/logging/buffer_encoder_test.cc b/aos/events/logging/buffer_encoder_test.cc
index 127fb4f..69dd7de 100644
--- a/aos/events/logging/buffer_encoder_test.cc
+++ b/aos/events/logging/buffer_encoder_test.cc
@@ -4,7 +4,8 @@
#include <fstream>
#include <string>
-#include "glog/logging.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
diff --git a/aos/events/logging/config_remapper.cc b/aos/events/logging/config_remapper.cc
index 088d393..763cbc3 100644
--- a/aos/events/logging/config_remapper.cc
+++ b/aos/events/logging/config_remapper.cc
@@ -2,6 +2,8 @@
#include <vector>
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/escaping.h"
#include "flatbuffers/flatbuffers.h"
diff --git a/aos/events/logging/file_operations.cc b/aos/events/logging/file_operations.cc
index d54c8b6..abd9942 100644
--- a/aos/events/logging/file_operations.cc
+++ b/aos/events/logging/file_operations.cc
@@ -3,8 +3,9 @@
#include <algorithm>
#include <ostream>
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/match.h"
-#include "glog/logging.h"
namespace aos::logger::internal {
diff --git a/aos/events/logging/log_backend.cc b/aos/events/logging/log_backend.cc
index f8a6846..d2f0d77 100644
--- a/aos/events/logging/log_backend.cc
+++ b/aos/events/logging/log_backend.cc
@@ -4,19 +4,21 @@
#include <filesystem>
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
-#include "glog/logging.h"
#include "aos/events/logging/file_operations.h"
#include "aos/util/file.h"
-DEFINE_bool(
- sync, false,
+ABSL_FLAG(
+ bool, sync, false,
"If true, sync data to disk as we go so we don't get too far ahead. Also "
"fadvise that we are done with the memory once it hits disk.");
-DEFINE_uint32(queue_reserve, 32, "Pre-reserved size of write queue.");
+ABSL_FLAG(uint32_t, queue_reserve, 32, "Pre-reserved size of write queue.");
namespace aos::logger {
namespace {
@@ -42,7 +44,7 @@
} // namespace
logger::QueueAligner::QueueAligner() {
- aligned_queue_.reserve(FLAGS_queue_reserve);
+ aligned_queue_.reserve(absl::GetFlag(FLAGS_queue_reserve));
}
void logger::QueueAligner::FillAlignedQueue(
@@ -285,7 +287,7 @@
return WriteCode::kOutOfSpace;
}
- if (FLAGS_sync) {
+ if (absl::GetFlag(FLAGS_sync)) {
// Flush asynchronously and force the data out of the cache.
sync_file_range(fd_, total_write_bytes_, written, SYNC_FILE_RANGE_WRITE);
if (last_synced_bytes_ != 0) {
diff --git a/aos/events/logging/log_backend_test.cc b/aos/events/logging/log_backend_test.cc
index 0603b33..fc29979 100644
--- a/aos/events/logging/log_backend_test.cc
+++ b/aos/events/logging/log_backend_test.cc
@@ -5,9 +5,10 @@
#include <fstream>
#include <random>
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
-#include "glog/logging.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
diff --git a/aos/events/logging/log_cat.cc b/aos/events/logging/log_cat.cc
index d7347be..38a9136 100644
--- a/aos/events/logging/log_cat.cc
+++ b/aos/events/logging/log_cat.cc
@@ -6,8 +6,9 @@
#include <string_view>
#include <vector>
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
#include "absl/strings/escaping.h"
-#include "gflags/gflags.h"
#include "aos/aos_cli_utils.h"
#include "aos/configuration.h"
@@ -17,52 +18,52 @@
#include "aos/json_to_flatbuffer.h"
#include "aos/sha256.h"
-DEFINE_string(
- name, "",
+ABSL_FLAG(
+ std::string, name, "",
"Name to match for printing out channels. Empty means no name filter.");
-DEFINE_string(type, "",
- "Channel type to match for printing out channels. Empty means no "
- "type filter.");
-DEFINE_bool(json, false, "If true, print fully valid JSON");
-DEFINE_bool(fetch, false,
- "If true, also print out the messages from before the start of the "
- "log file");
-DEFINE_bool(raw, false,
- "If true, just print the data out unsorted and unparsed");
-DEFINE_string(raw_header, "",
- "If set, the file to read the header from in raw mode");
-DEFINE_bool(distributed_clock, false,
- "If true, print out the distributed time");
-DEFINE_bool(format_raw, true,
- "If true and --raw is specified, print out raw data, but use the "
- "schema to format the data.");
-DEFINE_int64(max_vector_size, 100,
- "If positive, vectors longer than this will not be printed");
-DEFINE_bool(pretty, false,
- "If true, pretty print the messages on multiple lines");
-DEFINE_bool(
- pretty_max, false,
+ABSL_FLAG(std::string, type, "",
+ "Channel type to match for printing out channels. Empty means no "
+ "type filter.");
+ABSL_FLAG(bool, json, false, "If true, print fully valid JSON");
+ABSL_FLAG(bool, fetch, false,
+ "If true, also print out the messages from before the start of the "
+ "log file");
+ABSL_FLAG(bool, raw, false,
+ "If true, just print the data out unsorted and unparsed");
+ABSL_FLAG(std::string, raw_header, "",
+ "If set, the file to read the header from in raw mode");
+ABSL_FLAG(bool, distributed_clock, false,
+ "If true, print out the distributed time");
+ABSL_FLAG(bool, format_raw, true,
+ "If true and --raw is specified, print out raw data, but use the "
+ "schema to format the data.");
+ABSL_FLAG(int64_t, max_vector_size, 100,
+ "If positive, vectors longer than this will not be printed");
+ABSL_FLAG(bool, pretty, false,
+ "If true, pretty print the messages on multiple lines");
+ABSL_FLAG(
+ bool, pretty_max, false,
"If true, expand every field to its own line (expands more than -pretty)");
-DEFINE_bool(print_timestamps, true, "If true, timestamps are printed.");
-DEFINE_bool(print, true,
- "If true, actually print the messages. If false, discard them, "
- "confirming they can be parsed.");
-DEFINE_uint64(
- count, 0,
+ABSL_FLAG(bool, print_timestamps, true, "If true, timestamps are printed.");
+ABSL_FLAG(bool, print, true,
+ "If true, actually print the messages. If false, discard them, "
+ "confirming they can be parsed.");
+ABSL_FLAG(
+ uint64_t, count, 0,
"If >0, log_cat will exit after printing this many messages. This "
"includes messages from before the start of the log if --fetch is set.");
-DEFINE_bool(print_parts_only, false,
- "If true, only print out the results of logfile sorting.");
-DEFINE_bool(channels, false,
- "If true, print out all the configured channels for this log.");
-DEFINE_double(monotonic_start_time, 0.0,
- "If set, only print messages sent at or after this many seconds "
- "after epoch.");
-DEFINE_double(monotonic_end_time, 0.0,
- "If set, only print messages sent at or before this many seconds "
- "after epoch.");
-DEFINE_bool(hex, false,
- "Are integers in the messages printed in hex notation.");
+ABSL_FLAG(bool, print_parts_only, false,
+ "If true, only print out the results of logfile sorting.");
+ABSL_FLAG(bool, channels, false,
+ "If true, print out all the configured channels for this log.");
+ABSL_FLAG(double, monotonic_start_time, 0.0,
+ "If set, only print messages sent at or after this many seconds "
+ "after epoch.");
+ABSL_FLAG(double, monotonic_end_time, 0.0,
+ "If set, only print messages sent at or before this many seconds "
+ "after epoch.");
+ABSL_FLAG(bool, hex, false,
+ "Are integers in the messages printed in hex notation.");
using aos::monotonic_clock;
namespace chrono = std::chrono;
@@ -70,12 +71,14 @@
// Prints out raw log parts to stdout.
int PrintRaw(int argc, char **argv) {
if (argc == 1) {
- CHECK(!FLAGS_raw_header.empty());
- aos::logger::MessageReader raw_header_reader(FLAGS_raw_header);
- std::cout << aos::FlatbufferToJson(raw_header_reader.raw_log_file_header(),
- {.multi_line = FLAGS_pretty,
- .max_vector_size = static_cast<size_t>(
- FLAGS_max_vector_size)})
+ CHECK(!absl::GetFlag(FLAGS_raw_header).empty());
+ aos::logger::MessageReader raw_header_reader(
+ absl::GetFlag(FLAGS_raw_header));
+ std::cout << aos::FlatbufferToJson(
+ raw_header_reader.raw_log_file_header(),
+ {.multi_line = absl::GetFlag(FLAGS_pretty),
+ .max_vector_size = static_cast<size_t>(
+ absl::GetFlag(FLAGS_max_vector_size))})
<< std::endl;
return 0;
}
@@ -108,9 +111,10 @@
maybe_header_data);
if (maybe_header.Verify()) {
std::cout << aos::FlatbufferToJson(
- log_file_header, {.multi_line = FLAGS_pretty,
- .max_vector_size = static_cast<size_t>(
- FLAGS_max_vector_size)})
+ log_file_header,
+ {.multi_line = absl::GetFlag(FLAGS_pretty),
+ .max_vector_size = static_cast<size_t>(
+ absl::GetFlag(FLAGS_max_vector_size))})
<< std::endl;
LOG(WARNING) << "Found duplicate LogFileHeader in " << reader.filename();
log_file_header =
@@ -126,26 +130,26 @@
// And now use the final sha256 to match the raw_header.
std::optional<aos::logger::MessageReader> raw_header_reader;
const aos::logger::LogFileHeader *full_header = &log_file_header.message();
- if (!FLAGS_raw_header.empty()) {
- raw_header_reader.emplace(FLAGS_raw_header);
- std::cout << aos::FlatbufferToJson(full_header,
- {.multi_line = FLAGS_pretty,
- .max_vector_size = static_cast<size_t>(
- FLAGS_max_vector_size)})
+ if (!absl::GetFlag(FLAGS_raw_header).empty()) {
+ raw_header_reader.emplace(absl::GetFlag(FLAGS_raw_header));
+ std::cout << aos::FlatbufferToJson(
+ full_header, {.multi_line = absl::GetFlag(FLAGS_pretty),
+ .max_vector_size = static_cast<size_t>(
+ absl::GetFlag(FLAGS_max_vector_size))})
<< std::endl;
CHECK_EQ(full_header->configuration_sha256()->string_view(),
aos::Sha256(raw_header_reader->raw_log_file_header().span()));
full_header = raw_header_reader->log_file_header();
}
- if (!FLAGS_print) {
+ if (!absl::GetFlag(FLAGS_print)) {
return 0;
}
- std::cout << aos::FlatbufferToJson(full_header,
- {.multi_line = FLAGS_pretty,
- .max_vector_size = static_cast<size_t>(
- FLAGS_max_vector_size)})
+ std::cout << aos::FlatbufferToJson(
+ full_header, {.multi_line = absl::GetFlag(FLAGS_pretty),
+ .max_vector_size = static_cast<size_t>(
+ absl::GetFlag(FLAGS_max_vector_size))})
<< std::endl;
CHECK(full_header->has_configuration())
<< ": Missing configuration! You may want to provide the path to the "
@@ -178,21 +182,25 @@
<< channel->type()->c_str();
}
- if (FLAGS_format_raw && message.message().data() != nullptr) {
+ if (absl::GetFlag(FLAGS_format_raw) &&
+ message.message().data() != nullptr) {
std::cout << aos::configuration::StrippedChannelToString(channel) << " "
- << aos::FlatbufferToJson(message, {.multi_line = FLAGS_pretty,
- .max_vector_size = 4})
+ << aos::FlatbufferToJson(
+ message, {.multi_line = absl::GetFlag(FLAGS_pretty),
+ .max_vector_size = 4})
<< ": "
<< aos::FlatbufferToJson(
channel->schema(), message.message().data()->data(),
- {FLAGS_pretty,
- static_cast<size_t>(FLAGS_max_vector_size)})
+ {absl::GetFlag(FLAGS_pretty),
+ static_cast<size_t>(
+ absl::GetFlag(FLAGS_max_vector_size))})
<< std::endl;
} else {
std::cout << aos::configuration::StrippedChannelToString(channel) << " "
<< aos::FlatbufferToJson(
- message, {FLAGS_pretty,
- static_cast<size_t>(FLAGS_max_vector_size)})
+ message, {absl::GetFlag(FLAGS_pretty),
+ static_cast<size_t>(
+ absl::GetFlag(FLAGS_max_vector_size))})
<< std::endl;
}
}
@@ -219,26 +227,26 @@
event_loop_->configuration()->channels();
const monotonic_clock::time_point start_time =
- (FLAGS_monotonic_start_time == 0.0
+ (absl::GetFlag(FLAGS_monotonic_start_time) == 0.0
? monotonic_clock::min_time
: monotonic_clock::time_point(
std::chrono::duration_cast<monotonic_clock::duration>(
std::chrono::duration<double>(
- FLAGS_monotonic_start_time))));
+ absl::GetFlag(FLAGS_monotonic_start_time)))));
const monotonic_clock::time_point end_time =
- (FLAGS_monotonic_end_time == 0.0
+ (absl::GetFlag(FLAGS_monotonic_end_time) == 0.0
? monotonic_clock::max_time
: monotonic_clock::time_point(
std::chrono::duration_cast<monotonic_clock::duration>(
std::chrono::duration<double>(
- FLAGS_monotonic_end_time))));
+ absl::GetFlag(FLAGS_monotonic_end_time)))));
for (flatbuffers::uoffset_t i = 0; i < channels->size(); i++) {
const aos::Channel *channel = channels->Get(i);
const flatbuffers::string_view name = channel->name()->string_view();
const flatbuffers::string_view type = channel->type()->string_view();
- if (name.find(FLAGS_name) != std::string::npos &&
- type.find(FLAGS_type) != std::string::npos) {
+ if (name.find(absl::GetFlag(FLAGS_name)) != std::string::npos &&
+ type.find(absl::GetFlag(FLAGS_type)) != std::string::npos) {
if (!aos::configuration::ChannelIsReadableOnNode(channel,
event_loop_->node())) {
continue;
@@ -250,14 +258,15 @@
end_time](
const aos::Context &context,
const void * /*message*/) {
- if (!FLAGS_print) {
+ if (!absl::GetFlag(FLAGS_print)) {
return;
}
- if (FLAGS_count > 0 && printer_->message_count() >= FLAGS_count) {
+ if (absl::GetFlag(FLAGS_count) > 0 &&
+ printer_->message_count() >= absl::GetFlag(FLAGS_count)) {
return;
}
- if (!FLAGS_fetch && !started_) {
+ if (!absl::GetFlag(FLAGS_fetch) && !started_) {
return;
}
@@ -267,7 +276,8 @@
}
printer_->PrintMessage(node_name_, node_factory_, channel, context);
- if (FLAGS_count > 0 && printer_->message_count() >= FLAGS_count) {
+ if (absl::GetFlag(FLAGS_count) > 0 &&
+ printer_->message_count() >= absl::GetFlag(FLAGS_count)) {
factory_->Exit();
}
});
@@ -278,7 +288,7 @@
void SetStarted(bool started, aos::monotonic_clock::time_point monotonic_now,
aos::realtime_clock::time_point realtime_now) {
started_ = started;
- if (FLAGS_json) {
+ if (absl::GetFlag(FLAGS_json)) {
return;
}
if (started_) {
@@ -318,7 +328,7 @@
};
int main(int argc, char **argv) {
- gflags::SetUsageMessage(
+ absl::SetProgramUsageMessage(
"Usage:\n"
" log_cat [args] logfile1 logfile2 ...\n"
"\n"
@@ -332,7 +342,7 @@
"the logged data.");
aos::InitGoogle(&argc, &argv);
- if (FLAGS_raw) {
+ if (absl::GetFlag(FLAGS_raw)) {
return PrintRaw(argc, argv);
}
@@ -345,17 +355,17 @@
for (auto &it : logfiles) {
VLOG(1) << it;
- if (FLAGS_print_parts_only) {
+ if (absl::GetFlag(FLAGS_print_parts_only)) {
std::cout << it << std::endl;
}
}
- if (FLAGS_print_parts_only) {
+ if (absl::GetFlag(FLAGS_print_parts_only)) {
return 0;
}
aos::logger::LogReader reader(logfiles);
- if (FLAGS_channels) {
+ if (absl::GetFlag(FLAGS_channels)) {
const aos::Configuration *config = reader.configuration();
for (const aos::Channel *channel : *config->channels()) {
std::cout << channel->name()->c_str() << " " << channel->type()->c_str()
@@ -373,8 +383,8 @@
const aos::Channel *channel = channels->Get(i);
const flatbuffers::string_view name = channel->name()->string_view();
const flatbuffers::string_view type = channel->type()->string_view();
- if (name.find(FLAGS_name) != std::string::npos &&
- type.find(FLAGS_type) != std::string::npos) {
+ if (name.find(absl::GetFlag(FLAGS_name)) != std::string::npos &&
+ type.find(absl::GetFlag(FLAGS_type)) != std::string::npos) {
found_channel = true;
}
}
@@ -385,13 +395,14 @@
aos::Printer printer(
{
- .pretty = FLAGS_pretty,
- .max_vector_size = static_cast<size_t>(FLAGS_max_vector_size),
- .pretty_max = FLAGS_pretty_max,
- .print_timestamps = FLAGS_print_timestamps,
- .json = FLAGS_json,
- .distributed_clock = FLAGS_distributed_clock,
- .hex = FLAGS_hex,
+ .pretty = absl::GetFlag(FLAGS_pretty),
+ .max_vector_size =
+ static_cast<size_t>(absl::GetFlag(FLAGS_max_vector_size)),
+ .pretty_max = absl::GetFlag(FLAGS_pretty_max),
+ .print_timestamps = absl::GetFlag(FLAGS_print_timestamps),
+ .json = absl::GetFlag(FLAGS_json),
+ .distributed_clock = absl::GetFlag(FLAGS_distributed_clock),
+ .hex = absl::GetFlag(FLAGS_hex),
},
false);
diff --git a/aos/events/logging/log_config_extractor.cc b/aos/events/logging/log_config_extractor.cc
index f2df5b3..628bf72 100644
--- a/aos/events/logging/log_config_extractor.cc
+++ b/aos/events/logging/log_config_extractor.cc
@@ -2,9 +2,11 @@
#include <iostream>
#include <vector>
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "flatbuffers/flatbuffers.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
#include "aos/configuration_generated.h"
#include "aos/events/logging/log_reader.h"
@@ -13,19 +15,20 @@
#include "aos/init.h"
#include "aos/json_to_flatbuffer.h"
-DEFINE_string(output_path, "/tmp/",
- "Destination folder for output files. If this flag is not used, "
- "it stores the files in /tmp/.");
-DEFINE_bool(convert_to_json, false,
- "If true, can be used to convert bfbs to json.");
-DEFINE_bool(bfbs, false,
- "If true, write as a binary flatbuffer inside the output_path.");
-DEFINE_bool(json, false, "If true, write as a json inside the output_path.");
-DEFINE_bool(stripped, false,
- "If true, write as a stripped json inside the output_path.");
-DEFINE_bool(quiet, false,
- "If true, do not print configuration to stdout. If false, print "
- "stripped json");
+ABSL_FLAG(std::string, output_path, "/tmp/",
+ "Destination folder for output files. If this flag is not used, "
+ "it stores the files in /tmp/.");
+ABSL_FLAG(bool, convert_to_json, false,
+ "If true, can be used to convert bfbs to json.");
+ABSL_FLAG(bool, bfbs, false,
+ "If true, write as a binary flatbuffer inside the output_path.");
+ABSL_FLAG(bool, json, false,
+ "If true, write as a json inside the output_path.");
+ABSL_FLAG(bool, stripped, false,
+ "If true, write as a stripped json inside the output_path.");
+ABSL_FLAG(bool, quiet, false,
+ "If true, do not print configuration to stdout. If false, print "
+ "stripped json");
namespace aos {
@@ -43,27 +46,27 @@
auto config_flatbuffer = configuration::MergeConfiguration(
RecursiveCopyFlatBuffer(config), schemas);
- if (FLAGS_bfbs) {
+ if (absl::GetFlag(FLAGS_bfbs)) {
WriteFlatbufferToFile(output_path + ".bfbs", config_flatbuffer);
LOG(INFO) << "Done writing bfbs to " << output_path << ".bfbs";
}
- if (FLAGS_json) {
+ if (absl::GetFlag(FLAGS_json)) {
WriteFlatbufferToJson(output_path + ".json", config_flatbuffer);
LOG(INFO) << "Done writing json to " << output_path << ".json";
}
- if (FLAGS_stripped || !FLAGS_quiet) {
+ if (absl::GetFlag(FLAGS_stripped) || !absl::GetFlag(FLAGS_quiet)) {
auto *channels = config_flatbuffer.mutable_message()->mutable_channels();
for (size_t i = 0; i < channels->size(); i++) {
channels->GetMutableObject(i)->clear_schema();
}
- if (FLAGS_stripped) {
+ if (absl::GetFlag(FLAGS_stripped)) {
WriteFlatbufferToJson(output_path + ".stripped.json", config_flatbuffer);
LOG(INFO) << "Done writing stripped json to " << output_path
<< ".stripped.json";
}
- if (!FLAGS_quiet) {
+ if (!absl::GetFlag(FLAGS_quiet)) {
std::cout << FlatbufferToJson(config_flatbuffer) << std::endl;
}
}
@@ -72,7 +75,7 @@
int Main(int argc, char *argv[]) {
CHECK(argc > 1) << "Must provide an argument";
- std::string output_path = FLAGS_output_path;
+ std::string output_path = absl::GetFlag(FLAGS_output_path);
if (output_path.back() != '/') {
output_path += "/";
}
@@ -93,7 +96,7 @@
aos::FlatbufferDetachedBuffer<aos::Configuration> buffer(
aos::JsonToFlatbuffer(stdin_data, aos::ConfigurationTypeTable()));
WriteConfig(&buffer.message(), output_path);
- } else if (FLAGS_convert_to_json) {
+ } else if (absl::GetFlag(FLAGS_convert_to_json)) {
aos::FlatbufferDetachedBuffer config = aos::configuration::ReadConfig(arg);
WriteFlatbufferToJson(output_path + ".json", config);
LOG(INFO) << "Done writing json to " << output_path << ".json";
@@ -109,7 +112,7 @@
} // namespace aos
int main(int argc, char *argv[]) {
- gflags::SetUsageMessage(
+ absl::SetProgramUsageMessage(
"Binary to output the configuration of a log.\n"
"# print config as stripped json to stdout\n"
"# path to log should always be absolute path.\n"
diff --git a/aos/events/logging/log_edit.cc b/aos/events/logging/log_edit.cc
index 4585ab7..33cd120 100644
--- a/aos/events/logging/log_edit.cc
+++ b/aos/events/logging/log_edit.cc
@@ -1,6 +1,7 @@
#include <iostream>
-#include "gflags/gflags.h"
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
#include "aos/configuration.h"
#include "aos/events/logging/log_reader.h"
@@ -9,32 +10,32 @@
#include "aos/json_to_flatbuffer.h"
#include "aos/util/file.h"
-DEFINE_string(logfile, "/tmp/logfile.bfbs",
- "Name of the logfile to read from.");
-DEFINE_bool(
- replace, false,
- "If true, replace the header on the log file with the JSON header.");
-DEFINE_string(
- header, "",
+ABSL_FLAG(std::string, logfile, "/tmp/logfile.bfbs",
+ "Name of the logfile to read from.");
+ABSL_FLAG(bool, replace, false,
+ "If true, replace the header on the log file with the JSON header.");
+ABSL_FLAG(
+ std::string, header, "",
"If provided, this is the path to the JSON with the log file header. If "
"not provided, _header.json will be appended to --logfile.");
-DEFINE_int32(
- max_message_size, 128 * 1024 * 1024,
- "Max size of a message to be written. This sets the buffers inside "
- "the encoders.");
+ABSL_FLAG(int32_t, max_message_size, 128 * 1024 * 1024,
+ "Max size of a message to be written. This sets the buffers inside "
+ "the encoders.");
+ABSL_FLAG(bool, direct, false,
+ "If true, write using O_DIRECT and write 512 byte aligned blocks "
+ "whenever possible.");
-DEFINE_bool(direct, false,
- "If true, write using O_DIRECT and write 512 byte aligned blocks "
- "whenever possible.");
int main(int argc, char **argv) {
- gflags::SetUsageMessage(R"(This tool lets us manipulate log files.)");
+ absl::SetProgramUsageMessage(R"(This tool lets us manipulate log files.)");
aos::InitGoogle(&argc, &argv);
std::string header_json_path =
- FLAGS_header.empty() ? (FLAGS_logfile + "_header.json") : FLAGS_header;
+ absl::GetFlag(FLAGS_header).empty()
+ ? (absl::GetFlag(FLAGS_logfile) + "_header.json")
+ : absl::GetFlag(FLAGS_header);
- if (FLAGS_replace) {
+ if (absl::GetFlag(FLAGS_replace)) {
const ::std::string header_json =
aos::util::ReadFileToStringOrDie(header_json_path);
flatbuffers::FlatBufferBuilder fbb;
@@ -46,16 +47,18 @@
aos::SizePrefixedFlatbufferDetachedBuffer<aos::logger::LogFileHeader>
header(fbb.Release());
- const std::string orig_path = FLAGS_logfile + ".orig";
- PCHECK(rename(FLAGS_logfile.c_str(), orig_path.c_str()) == 0);
+ const std::string orig_path = absl::GetFlag(FLAGS_logfile) + ".orig";
+ PCHECK(rename(absl::GetFlag(FLAGS_logfile).c_str(), orig_path.c_str()) ==
+ 0);
aos::logger::SpanReader span_reader(orig_path);
CHECK(!span_reader.ReadMessage().empty()) << ": Empty header, aborting";
- aos::logger::FileBackend file_backend("/", FLAGS_direct);
+ aos::logger::FileBackend file_backend("/", absl::GetFlag(FLAGS_direct));
aos::logger::DetachedBufferWriter buffer_writer(
- file_backend.RequestFile(FLAGS_logfile),
- std::make_unique<aos::logger::DummyEncoder>(FLAGS_max_message_size));
+ file_backend.RequestFile(absl::GetFlag(FLAGS_logfile)),
+ std::make_unique<aos::logger::DummyEncoder>(
+ absl::GetFlag(FLAGS_max_message_size)));
{
aos::logger::DataEncoder::SpanCopier copier(header.span());
buffer_writer.CopyMessage(&copier, aos::monotonic_clock::min_time);
@@ -73,7 +76,7 @@
}
}
} else {
- aos::logger::MessageReader reader(FLAGS_logfile);
+ aos::logger::MessageReader reader(absl::GetFlag(FLAGS_logfile));
aos::util::WriteStringToFileOrDie(
header_json_path,
aos::FlatbufferToJson(reader.log_file_header(), {.multi_line = true}));
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index c065f68..e8fb524 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -6,9 +6,11 @@
#include <string_view>
#include <vector>
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "flatbuffers/flatbuffers.h"
-#include "glog/logging.h"
#include "aos/containers/error_list.h"
#include "aos/containers/sized_array.h"
@@ -17,7 +19,7 @@
#include "aos/flatbuffer_merge.h"
#include "aos/uuid.h"
-DECLARE_int32(flush_size);
+ABSL_DECLARE_FLAG(int32_t, flush_size);
namespace aos::logger {
@@ -762,7 +764,7 @@
encoder_factory_([](size_t max_message_size) {
// TODO(austin): For slow channels, can we allocate less memory?
return std::make_unique<DummyEncoder>(max_message_size,
- FLAGS_flush_size);
+ absl::GetFlag(FLAGS_flush_size));
}) {}
MultiNodeLogNamer::~MultiNodeLogNamer() {
diff --git a/aos/events/logging/log_namer.h b/aos/events/logging/log_namer.h
index 2c0982a..dcacd1b 100644
--- a/aos/events/logging/log_namer.h
+++ b/aos/events/logging/log_namer.h
@@ -8,8 +8,9 @@
#include <vector>
#include "absl/container/btree_map.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "flatbuffers/flatbuffers.h"
-#include "glog/logging.h"
#include "aos/events/logging/logfile_utils.h"
#include "aos/events/logging/logger_generated.h"
diff --git a/aos/events/logging/log_reader.cc b/aos/events/logging/log_reader.cc
index 81f8466..ae5f0ef 100644
--- a/aos/events/logging/log_reader.cc
+++ b/aos/events/logging/log_reader.cc
@@ -10,6 +10,7 @@
#include <utility>
#include <vector>
+#include "absl/flags/flag.h"
#include "absl/strings/escaping.h"
#include "absl/types/span.h"
#include "flatbuffers/flatbuffers.h"
@@ -30,47 +31,47 @@
#include "aos/util/file.h"
#include "aos/uuid.h"
-DEFINE_bool(skip_missing_forwarding_entries, false,
- "If true, drop any forwarding entries with missing data. If "
- "false, CHECK.");
+ABSL_FLAG(bool, skip_missing_forwarding_entries, false,
+ "If true, drop any forwarding entries with missing data. If "
+ "false, CHECK.");
-DECLARE_bool(timestamps_to_csv);
-DEFINE_bool(
- enable_timestamp_loading, true,
+ABSL_DECLARE_FLAG(bool, timestamps_to_csv);
+ABSL_FLAG(
+ bool, enable_timestamp_loading, true,
"Enable loading all the timestamps into RAM at startup if they are in "
"separate files. This fixes any timestamp queueing problems for the cost "
"of storing timestamps in RAM only on logs with timestamps logged in "
"separate files from data. Only disable this if you are reading a really "
"long log file and are experiencing memory problems due to loading all the "
"timestamps into RAM.");
-DEFINE_bool(
- force_timestamp_loading, false,
+ABSL_FLAG(
+ bool, force_timestamp_loading, false,
"Force loading all the timestamps into RAM at startup. This fixes any "
"timestamp queueing problems for the cost of storing timestamps in RAM and "
"potentially reading each log twice.");
-DEFINE_bool(skip_order_validation, false,
- "If true, ignore any out of orderness in replay");
+ABSL_FLAG(bool, skip_order_validation, false,
+ "If true, ignore any out of orderness in replay");
-DEFINE_double(
- time_estimation_buffer_seconds, 2.0,
+ABSL_FLAG(
+ double, time_estimation_buffer_seconds, 2.0,
"The time to buffer ahead in the log file to accurately reconstruct time.");
-DEFINE_string(
- start_time, "",
+ABSL_FLAG(
+ std::string, start_time, "",
"If set, start at this point in time in the log on the realtime clock.");
-DEFINE_string(
- end_time, "",
+ABSL_FLAG(
+ std::string, end_time, "",
"If set, end at this point in time in the log on the realtime clock.");
-DEFINE_bool(drop_realtime_messages_before_start, false,
- "If set, will drop any messages sent before the start of the "
- "logfile in realtime replay. Setting this guarantees consistency "
- "in timing with the original logfile, but means that you lose "
- "access to fetched low-frequency messages.");
+ABSL_FLAG(bool, drop_realtime_messages_before_start, false,
+ "If set, will drop any messages sent before the start of the "
+ "logfile in realtime replay. Setting this guarantees consistency "
+ "in timing with the original logfile, but means that you lose "
+ "access to fetched low-frequency messages.");
-DEFINE_double(
- threaded_look_ahead_seconds, 2.0,
+ABSL_FLAG(
+ double, threaded_look_ahead_seconds, 2.0,
"Time, in seconds, to add to look-ahead when using multi-threaded replay. "
"Can validly be zero, but higher values are encouraged for realtime replay "
"in order to prevent the replay from ever having to block on waiting for "
@@ -190,8 +191,8 @@
replay_channels_(replay_channels),
config_remapper_(log_files_.config().get(), replay_configuration_,
replay_channels_) {
- SetStartTime(FLAGS_start_time);
- SetEndTime(FLAGS_end_time);
+ SetStartTime(absl::GetFlag(FLAGS_start_time));
+ SetEndTime(absl::GetFlag(FLAGS_end_time));
{
// Log files container validates that log files shared the same config.
@@ -442,13 +443,13 @@
filters_ =
std::make_unique<message_bridge::MultiNodeNoncausalOffsetEstimator>(
event_loop_factory_->configuration(), logged_configuration(),
- log_files_.boots(), FLAGS_skip_order_validation,
+ log_files_.boots(), absl::GetFlag(FLAGS_skip_order_validation),
timestamp_queue_strategy ==
TimestampQueueStrategy::kQueueTimestampsAtStartup
? chrono::seconds(0)
: chrono::duration_cast<chrono::nanoseconds>(
chrono::duration<double>(
- FLAGS_time_estimation_buffer_seconds)));
+ absl::GetFlag(FLAGS_time_estimation_buffer_seconds))));
std::vector<TimestampMapper *> timestamp_mappers;
for (const Node *node : configuration::GetNodes(configuration())) {
@@ -625,7 +626,7 @@
: monotonic_clock::min_time);
}
- if (FLAGS_timestamps_to_csv) {
+ if (absl::GetFlag(FLAGS_timestamps_to_csv)) {
filters_->Start(event_loop_factory);
}
}
@@ -640,8 +641,8 @@
TimestampQueueStrategy LogReader::ComputeTimestampQueueStrategy() const {
if ((log_files_.TimestampsStoredSeparately() &&
- FLAGS_enable_timestamp_loading) ||
- FLAGS_force_timestamp_loading) {
+ absl::GetFlag(FLAGS_enable_timestamp_loading)) ||
+ absl::GetFlag(FLAGS_force_timestamp_loading)) {
return TimestampQueueStrategy::kQueueTimestampsAtStartup;
} else {
return TimestampQueueStrategy::kQueueTogether;
@@ -658,13 +659,13 @@
filters_ =
std::make_unique<message_bridge::MultiNodeNoncausalOffsetEstimator>(
event_loop->configuration(), logged_configuration(),
- log_files_.boots(), FLAGS_skip_order_validation,
+ log_files_.boots(), absl::GetFlag(FLAGS_skip_order_validation),
timestamp_queue_strategy ==
TimestampQueueStrategy::kQueueTimestampsAtStartup
? chrono::seconds(0)
: chrono::duration_cast<chrono::nanoseconds>(
chrono::duration<double>(
- FLAGS_time_estimation_buffer_seconds)));
+ absl::GetFlag(FLAGS_time_estimation_buffer_seconds))));
std::vector<TimestampMapper *> timestamp_mappers;
for (const Node *node : configuration::GetNodes(configuration())) {
@@ -860,7 +861,7 @@
state->event_loop()->context().monotonic_event_time;
if (event_loop_factory_ != nullptr) {
// Only enforce exact timing in simulation.
- if (!FLAGS_skip_order_validation) {
+ if (!absl::GetFlag(FLAGS_skip_order_validation)) {
CHECK(monotonic_now == timestamped_message.monotonic_event_time.time)
<< ": " << FlatbufferToJson(state->event_loop()->node()) << " Now "
<< monotonic_now << " trying to send "
@@ -884,11 +885,12 @@
state->monotonic_start_time(
timestamped_message.monotonic_event_time.boot) ||
event_loop_factory_ != nullptr ||
- !FLAGS_drop_realtime_messages_before_start) {
+ !absl::GetFlag(FLAGS_drop_realtime_messages_before_start)) {
if (timestamped_message.data != nullptr && !state->found_last_message()) {
if (timestamped_message.monotonic_remote_time !=
BootTimestamp::min_time() &&
- !FLAGS_skip_order_validation && event_loop_factory_ != nullptr) {
+ !absl::GetFlag(FLAGS_skip_order_validation) &&
+ event_loop_factory_ != nullptr) {
// Confirm that the message was sent on the sending node before the
// destination node (this node). As a proxy, do this by making sure
// that time on the source node is past when the message was sent.
@@ -899,7 +901,7 @@
// fix that.
BootTimestamp monotonic_remote_now =
state->monotonic_remote_now(timestamped_message.channel_index);
- if (!FLAGS_skip_order_validation) {
+ if (!absl::GetFlag(FLAGS_skip_order_validation)) {
CHECK_EQ(timestamped_message.monotonic_remote_time.boot,
monotonic_remote_now.boot)
<< state->event_loop()->node()->name()->string_view() << " to "
@@ -974,7 +976,7 @@
state->monotonic_remote_start_time(
timestamped_message.monotonic_remote_time.boot,
timestamped_message.channel_index) &&
- !FLAGS_skip_missing_forwarding_entries)) {
+ !absl::GetFlag(FLAGS_skip_missing_forwarding_entries))) {
if (!state->found_last_message()) {
// We've found a timestamp without data that we expect to have data
// for. This likely means that we are at the end of the log file.
@@ -1130,8 +1132,8 @@
// primed).
state->QueueThreadUntil(
next_time + std::chrono::duration_cast<std::chrono::nanoseconds>(
- std::chrono::duration<double>(
- FLAGS_threaded_look_ahead_seconds)));
+ std::chrono::duration<double>(absl::GetFlag(
+ FLAGS_threaded_look_ahead_seconds))));
state->MaybeSetClockOffset();
state->Schedule(next_time.time);
state->SetUpStartupTimer();
@@ -1787,7 +1789,8 @@
message_queuer_->SetState(
message.value().monotonic_event_time +
std::chrono::duration_cast<std::chrono::nanoseconds>(
- std::chrono::duration<double>(FLAGS_threaded_look_ahead_seconds)));
+ std::chrono::duration<double>(
+ absl::GetFlag(FLAGS_threaded_look_ahead_seconds))));
VLOG(1) << "Popped " << message.value()
<< configuration::CleanedChannelToString(
event_loop_->configuration()->channels()->Get(
@@ -1872,8 +1875,9 @@
TimestampQueueStrategy::kQueueTimestampsAtStartup)
return;
- timestamp_mapper_->QueueFor(chrono::duration_cast<chrono::seconds>(
- chrono::duration<double>(FLAGS_time_estimation_buffer_seconds)));
+ timestamp_mapper_->QueueFor(
+ chrono::duration_cast<chrono::seconds>(chrono::duration<double>(
+ absl::GetFlag(FLAGS_time_estimation_buffer_seconds))));
}
void LogReader::State::Deregister() {
diff --git a/aos/events/logging/log_reader.h b/aos/events/logging/log_reader.h
index 85d4d2c..1334b86 100644
--- a/aos/events/logging/log_reader.h
+++ b/aos/events/logging/log_reader.h
@@ -8,9 +8,10 @@
#include <tuple>
#include <vector>
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "flatbuffers/flatbuffers.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
#include "aos/condition.h"
#include "aos/events/event_loop.h"
diff --git a/aos/events/logging/log_replayer.cc b/aos/events/logging/log_replayer.cc
index 22d8b39..970b828 100644
--- a/aos/events/logging/log_replayer.cc
+++ b/aos/events/logging/log_replayer.cc
@@ -7,9 +7,11 @@
#include <string_view>
#include <vector>
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "flatbuffers/flatbuffers.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
#include "aos/configuration_generated.h"
#include "aos/events/event_loop.h"
@@ -28,30 +30,31 @@
#include "aos/json_to_flatbuffer.h"
#include "aos/util/file.h"
-DEFINE_string(config, "", "If specified, overrides logged configuration.");
-DEFINE_bool(
- plot_timing, true,
+ABSL_FLAG(std::string, config, "",
+ "If specified, overrides logged configuration.");
+ABSL_FLAG(
+ bool, plot_timing, true,
"If set, generates a plot of the replay timing--namely, the errors between "
"when we "
"should've sent messages and when we actually sent replayed messages.");
-DEFINE_bool(skip_sender_channels, true,
- "If set, skips replay of the channels applications replay on");
-DEFINE_bool(skip_replay, false,
- "If set, skips actually running the replay. Useful for writing a "
- "config without running replay");
-DEFINE_bool(
- print_config, false,
+ABSL_FLAG(bool, skip_sender_channels, true,
+ "If set, skips replay of the channels applications replay on");
+ABSL_FLAG(bool, skip_replay, false,
+ "If set, skips actually running the replay. Useful for writing a "
+ "config without running replay");
+ABSL_FLAG(
+ bool, print_config, false,
"If set, prints the config that will be used for replay to stdout as json");
-DEFINE_string(
- replay_config, "",
+ABSL_FLAG(
+ std::string, replay_config, "",
"Path to the configuration used for log replay which includes items such "
"as channels to remap, and applications to target for replay. If not set, "
"log_reader will run on shm event loop. ");
-DEFINE_string(merge_with_config, "",
- "A valid json string to be merged with config. This is used to "
- "add extra applications needed to run only for log_replayer");
-DEFINE_bool(print_stats, true,
- "if set, prints the LogReplayerStats message as JSON to stdout");
+ABSL_FLAG(std::string, merge_with_config, "",
+ "A valid json string to be merged with config. This is used to "
+ "add extra applications needed to run only for log_replayer");
+ABSL_FLAG(bool, print_stats, true,
+ "if set, prints the LogReplayerStats message as JSON to stdout");
namespace aos::logger {
@@ -61,11 +64,11 @@
aos::logger::LogReader config_reader(logfiles);
aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- FLAGS_config.empty()
+ absl::GetFlag(FLAGS_config).empty()
? CopyFlatBuffer<aos::Configuration>(config_reader.configuration())
- : aos::configuration::ReadConfig(FLAGS_config);
+ : aos::configuration::ReadConfig(absl::GetFlag(FLAGS_config));
- if (FLAGS_plot_timing) {
+ if (absl::GetFlag(FLAGS_plot_timing)) {
// Go through the effort to add a ReplayTiming channel to ensure that we
// can capture timing information from the replay.
const aos::Configuration *raw_config = &config.message();
@@ -89,18 +92,19 @@
aos::FlatbufferSpan<reflection::Schema>(aos::LogReplayerStatsSchema()),
aos::configuration::GetMyNode(raw_config), channel_overrides);
- if (!FLAGS_merge_with_config.empty()) {
- config = aos::configuration::MergeWithConfig(&config.message(),
- FLAGS_merge_with_config);
+ if (!absl::GetFlag(FLAGS_merge_with_config).empty()) {
+ config = aos::configuration::MergeWithConfig(
+ &config.message(), absl::GetFlag(FLAGS_merge_with_config));
}
std::optional<aos::FlatbufferDetachedBuffer<ReplayConfig>> replay_config =
- FLAGS_replay_config.empty()
+ absl::GetFlag(FLAGS_replay_config).empty()
? std::nullopt
: std::make_optional(aos::JsonToFlatbuffer<ReplayConfig>(
- aos::util::ReadFileToStringOrDie(FLAGS_replay_config.data())));
+ aos::util::ReadFileToStringOrDie(
+ absl::GetFlag(FLAGS_replay_config).data())));
std::vector<std::pair<std::string, std::string>> message_filter;
- if (FLAGS_skip_sender_channels && replay_config.has_value()) {
+ if (absl::GetFlag(FLAGS_skip_sender_channels) && replay_config.has_value()) {
CHECK(replay_config.value().message().has_active_nodes());
std::vector<const Node *> active_nodes;
for (const auto &node : *replay_config.value().message().active_nodes()) {
@@ -142,12 +146,12 @@
}
}
- if (FLAGS_print_config) {
+ if (absl::GetFlag(FLAGS_print_config)) {
// TODO(Naman): Replace with config writer if it will be cleaner
std::cout << FlatbufferToJson(reader.configuration()) << std::endl;
}
- if (!FLAGS_skip_replay) {
+ if (!absl::GetFlag(FLAGS_skip_replay)) {
aos::ShmEventLoop event_loop(reader.configuration());
event_loop.SkipAosLog();
@@ -198,7 +202,7 @@
reader.OnEnd(event_loop.node(), [&event_loop]() { event_loop.Exit(); });
- if (FLAGS_plot_timing) {
+ if (absl::GetFlag(FLAGS_plot_timing)) {
aos::Sender<aos::timing::ReplayTiming> replay_timing_sender =
event_loop.MakeSender<aos::timing::ReplayTiming>("/timing");
reader.set_timing_accuracy_sender(event_loop.node(),
@@ -209,7 +213,7 @@
reader.Deregister();
- if (FLAGS_print_stats) {
+ if (absl::GetFlag(FLAGS_print_stats)) {
aos::Fetcher<aos::LogReplayerStats> stats_fetcher =
event_loop.MakeFetcher<aos::LogReplayerStats>("/replay");
CHECK(stats_fetcher.Fetch()) << "Failed to fetch LogReplayerStats!";
@@ -223,7 +227,7 @@
} // namespace aos::logger
int main(int argc, char *argv[]) {
- gflags::SetUsageMessage(
+ absl::SetProgramUsageMessage(
R"message(Binary to replay the full contents of a logfile into shared memory.
#replay_config should be set in order to replay a set of nodes, applications and channels
#print config and skip replay, if you only want to print the config and not do log replay
diff --git a/aos/events/logging/log_stats.cc b/aos/events/logging/log_stats.cc
index a45a167..6e6efc9 100644
--- a/aos/events/logging/log_stats.cc
+++ b/aos/events/logging/log_stats.cc
@@ -2,8 +2,9 @@
#include <iostream>
#include <queue>
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
#include "absl/strings/str_format.h"
-#include "gflags/gflags.h"
#include "aos/events/logging/log_reader.h"
#include "aos/events/simulated_event_loop.h"
@@ -11,23 +12,23 @@
#include "aos/json_to_flatbuffer.h"
#include "aos/time/time.h"
-DEFINE_string(
- name, "",
+ABSL_FLAG(
+ std::string, name, "",
"Name to match for printing out channels. Empty means no name filter.");
-DEFINE_string(node, "", "Node to print stats out for.");
+ABSL_FLAG(std::string, node, "", "Node to print stats out for.");
-DEFINE_bool(excessive_size_only, false,
- "Only print channels that have a set max message size that is more "
- "than double of the max message size.");
+ABSL_FLAG(bool, excessive_size_only, false,
+ "Only print channels that have a set max message size that is more "
+ "than double of the max message size.");
-DEFINE_double(
- run_for, 0.0,
+ABSL_FLAG(
+ double, run_for, 0.0,
"If set to a positive value, only process the log for this many seconds. "
"Otherwise, process the log until the end of the log.");
-DEFINE_bool(
- print_repack_size_diffs, false,
+ABSL_FLAG(
+ bool, print_repack_size_diffs, false,
"Analyze how many bytes could be saved in each message when converted to "
"JSON and back. This can be helpful to identify code that is generating "
"inefficiently packed flatbuffer messages.");
@@ -340,7 +341,7 @@
continue;
}
- if (channel->name()->string_view().find(FLAGS_name) ==
+ if (channel->name()->string_view().find(absl::GetFlag(FLAGS_name)) ==
std::string::npos) {
continue;
}
@@ -354,7 +355,7 @@
auto watcher = [this, channel_stats_index](const aos::Context &context) {
this->UpdateStats(context, channel_stats_index);
};
- if (FLAGS_print_repack_size_diffs) {
+ if (absl::GetFlag(FLAGS_print_repack_size_diffs)) {
event_loop_->MakeRawWatcher(
channel, std::bind(watcher, ::std::placeholders::_1));
} else {
@@ -376,7 +377,7 @@
void PrintStats() {
// Print out the stats per channel and for the logfile.
for (size_t i = 0; i != channel_stats_.size(); i++) {
- if (!FLAGS_excessive_size_only ||
+ if (!absl::GetFlag(FLAGS_excessive_size_only) ||
(channel_stats_[i].max_message_size() * 2) <
static_cast<size_t>(channel_stats_[i].channel()->max_size())) {
if (channel_stats_[i].total_num_messages() > 0) {
@@ -390,7 +391,7 @@
std::max(logfile_stats_.logfile_end_time,
channel_stats_[i].channel_end_time());
- if (!FLAGS_excessive_size_only) {
+ if (!absl::GetFlag(FLAGS_excessive_size_only)) {
std::cout << " " << channel_stats_[i].total_num_messages()
<< " msgs, " << channel_stats_[i].avg_messages_per_sec()
<< "hz avg, " << channel_stats_[i].max_messages_per_sec()
@@ -406,7 +407,7 @@
<< channel_stats_[i].Percentile() << ", "
<< channel_stats_[i].AvgLatency();
std::cout << std::endl;
- if (FLAGS_print_repack_size_diffs) {
+ if (absl::GetFlag(FLAGS_print_repack_size_diffs)) {
std::cout << " " << channel_stats_[i].avg_packed_size_reduction()
<< " bytes packed reduction avg, "
<< channel_stats_[i].max_packed_size_reduction()
@@ -441,7 +442,7 @@
};
int main(int argc, char **argv) {
- gflags::SetUsageMessage(
+ absl::SetProgramUsageMessage(
"Usage: \n"
" log_stats [args] logfile1 logfile2 ...\n"
"This program provides statistics on a given log file. Supported "
@@ -470,7 +471,7 @@
const aos::Node *node = nullptr;
if (aos::configuration::MultiNode(reader.configuration())) {
- if (FLAGS_node.empty()) {
+ if (absl::GetFlag(FLAGS_node).empty()) {
LOG(INFO) << "Need a --node specified. The log file has:";
for (const aos::Node *node : reader.LoggedNodes()) {
LOG(INFO) << " " << node->name()->string_view();
@@ -478,7 +479,8 @@
reader.Deregister();
return 1;
} else {
- node = aos::configuration::GetNode(reader.configuration(), FLAGS_node);
+ node = aos::configuration::GetNode(reader.configuration(),
+ absl::GetFlag(FLAGS_node));
}
}
@@ -498,10 +500,10 @@
log_stats_application = nullptr;
});
- if (FLAGS_run_for > 0.0) {
+ if (absl::GetFlag(FLAGS_run_for) > 0.0) {
event_loop_factory.RunFor(
std::chrono::duration_cast<std::chrono::nanoseconds>(
- std::chrono::duration<double>(FLAGS_run_for)));
+ std::chrono::duration<double>(absl::GetFlag(FLAGS_run_for))));
} else {
event_loop_factory.Run();
}
diff --git a/aos/events/logging/logfile_sorting.cc b/aos/events/logging/logfile_sorting.cc
index fdb4eab..756138c 100644
--- a/aos/events/logging/logfile_sorting.cc
+++ b/aos/events/logging/logfile_sorting.cc
@@ -9,6 +9,9 @@
#include <vector>
#include "absl/container/btree_map.h"
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/str_join.h"
#include "aos/containers/error_list.h"
@@ -24,8 +27,8 @@
#include "aos/events/logging/s3_file_operations.h"
#endif
-DEFINE_bool(quiet_sorting, false,
- "If true, sort with minimal messages about truncated files.");
+ABSL_FLAG(bool, quiet_sorting, false,
+ "If true, sort with minimal messages about truncated files.");
namespace aos::logger {
namespace {
@@ -388,7 +391,7 @@
std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
ReadHeader(reader);
if (!log_header) {
- if (!FLAGS_quiet_sorting) {
+ if (!absl::GetFlag(FLAGS_quiet_sorting)) {
LOG(WARNING) << "Skipping " << part.name << " without a header";
}
corrupted.emplace_back(part.name);
@@ -519,7 +522,7 @@
std::optional<SizePrefixedFlatbufferVector<MessageHeader>> first_message =
ReadNthMessage(part.name, 0);
if (!first_message) {
- if (!FLAGS_quiet_sorting) {
+ if (!absl::GetFlag(FLAGS_quiet_sorting)) {
LOG(WARNING) << "Skipping " << part.name << " without any messages";
}
corrupted.emplace_back(part.name);
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 0d10a99..f920681 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -9,10 +9,11 @@
#include <climits>
#include <filesystem>
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/escaping.h"
#include "flatbuffers/flatbuffers.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
#include "aos/configuration.h"
#include "aos/events/logging/snappy_encoder.h"
@@ -34,39 +35,38 @@
#include "aos/events/logging/s3_fetcher.h"
#endif
-DEFINE_int32(flush_size, 128 * 1024,
- "Number of outstanding bytes to allow before flushing to disk.");
-DEFINE_double(
- flush_period, 5.0,
- "Max time to let data sit in the queue before flushing in seconds.");
+ABSL_FLAG(int32_t, flush_size, 128 * 1024,
+ "Number of outstanding bytes to allow before flushing to disk.");
+ABSL_FLAG(double, flush_period, 5.0,
+ "Max time to let data sit in the queue before flushing in seconds.");
-DEFINE_double(
- max_network_delay, 1.0,
+ABSL_FLAG(
+ double, max_network_delay, 1.0,
"Max time to assume a message takes to cross the network before we are "
"willing to drop it from our buffers and assume it didn't make it. "
"Increasing this number can increase memory usage depending on the packet "
"loss of your network or if the timestamps aren't logged for a message.");
-DEFINE_double(
- max_out_of_order, -1,
+ABSL_FLAG(
+ double, max_out_of_order, -1,
"If set, this overrides the max out of order duration for a log file.");
-DEFINE_bool(workaround_double_headers, true,
- "Some old log files have two headers at the beginning. Use the "
- "last header as the actual header.");
+ABSL_FLAG(bool, workaround_double_headers, true,
+ "Some old log files have two headers at the beginning. Use the "
+ "last header as the actual header.");
-DEFINE_bool(crash_on_corrupt_message, true,
- "When true, MessageReader will crash the first time a message "
- "with corrupted format is found. When false, the crash will be "
- "suppressed, and any remaining readable messages will be "
- "evaluated to present verified vs corrupted stats.");
+ABSL_FLAG(bool, crash_on_corrupt_message, true,
+ "When true, MessageReader will crash the first time a message "
+ "with corrupted format is found. When false, the crash will be "
+ "suppressed, and any remaining readable messages will be "
+ "evaluated to present verified vs corrupted stats.");
-DEFINE_bool(ignore_corrupt_messages, false,
- "When true, and crash_on_corrupt_message is false, then any "
- "corrupt message found by MessageReader be silently ignored, "
- "providing access to all uncorrupted messages in a logfile.");
+ABSL_FLAG(bool, ignore_corrupt_messages, false,
+ "When true, and crash_on_corrupt_message is false, then any "
+ "corrupt message found by MessageReader be silently ignored, "
+ "providing access to all uncorrupted messages in a logfile.");
-DECLARE_bool(quiet_sorting);
+ABSL_DECLARE_FLAG(bool, quiet_sorting);
namespace aos::logger {
namespace {
@@ -280,11 +280,12 @@
// point queueing up any more data in memory. Also flush once we have enough
// data queued up or if it has been long enough.
while (encoder_->space() == 0 ||
- encoder_->queued_bytes() > static_cast<size_t>(FLAGS_flush_size) ||
+ encoder_->queued_bytes() >
+ static_cast<size_t>(absl::GetFlag(FLAGS_flush_size)) ||
encoder_->queue_size() >= IOV_MAX ||
- (now > last_flush_time_ +
- chrono::duration_cast<chrono::nanoseconds>(
- chrono::duration<double>(FLAGS_flush_period)) &&
+ (now > last_flush_time_ + chrono::duration_cast<chrono::nanoseconds>(
+ chrono::duration<double>(absl::GetFlag(
+ FLAGS_flush_period))) &&
encoder_->queued_bytes() != 0)) {
VLOG(1) << "Chose to flush at " << now << ", last " << last_flush_time_
<< " queued bytes " << encoder_->queued_bytes();
@@ -1140,7 +1141,7 @@
part_readers_.clear();
}
if (log_source_ == nullptr) {
- part_readers_.emplace_back(id, FLAGS_quiet_sorting);
+ part_readers_.emplace_back(id, absl::GetFlag(FLAGS_quiet_sorting));
} else {
part_readers_.emplace_back(id, log_source_->GetDecoder(id));
}
@@ -1168,7 +1169,8 @@
// way that it can't happen anymore. We've seen some logs where the body
// parses as a header recently, so the simple solution of always looking is
// failing us.
- if (FLAGS_workaround_double_headers && !result.message().has_logger_sha1()) {
+ if (absl::GetFlag(FLAGS_workaround_double_headers) &&
+ !result.message().has_logger_sha1()) {
while (true) {
absl::Span<const uint8_t> maybe_header_data = span_reader->PeekMessage();
if (maybe_header_data.empty()) {
@@ -1227,8 +1229,10 @@
: span_reader_(std::move(span_reader)),
raw_log_file_header_(
SizePrefixedFlatbufferVector<LogFileHeader>::Empty()) {
- set_crash_on_corrupt_message_flag(FLAGS_crash_on_corrupt_message);
- set_ignore_corrupt_messages_flag(FLAGS_ignore_corrupt_messages);
+ set_crash_on_corrupt_message_flag(
+ absl::GetFlag(FLAGS_crash_on_corrupt_message));
+ set_ignore_corrupt_messages_flag(
+ absl::GetFlag(FLAGS_ignore_corrupt_messages));
std::optional<SizePrefixedFlatbufferVector<LogFileHeader>>
raw_log_file_header = ReadHeader(&span_reader_);
@@ -1244,9 +1248,9 @@
total_verified_before_ = span_reader_.TotalConsumed();
max_out_of_order_duration_ =
- FLAGS_max_out_of_order > 0
+ absl::GetFlag(FLAGS_max_out_of_order) > 0
? chrono::duration_cast<chrono::nanoseconds>(
- chrono::duration<double>(FLAGS_max_out_of_order))
+ chrono::duration<double>(absl::GetFlag(FLAGS_max_out_of_order)))
: chrono::nanoseconds(log_file_header()->max_out_of_order_duration());
VLOG(1) << "Opened " << span_reader_.filename() << " as node "
@@ -2622,7 +2626,8 @@
messages.begin()->timestamp +
node_data.channels[msg->channel_index].time_to_live +
chrono::duration_cast<chrono::nanoseconds>(
- chrono::duration<double>(FLAGS_max_network_delay)) <
+ chrono::duration<double>(
+ absl::GetFlag(FLAGS_max_network_delay))) <
last_popped_message_time_) {
messages.pop_front();
}
diff --git a/aos/events/logging/logfile_utils_out_of_space_test_runner.cc b/aos/events/logging/logfile_utils_out_of_space_test_runner.cc
index f6fb499..4247c12 100644
--- a/aos/events/logging/logfile_utils_out_of_space_test_runner.cc
+++ b/aos/events/logging/logfile_utils_out_of_space_test_runner.cc
@@ -2,19 +2,21 @@
#include <array>
-#include "gflags/gflags.h"
-#include "glog/logging.h"
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "aos/events/logging/logfile_utils.h"
#include "aos/init.h"
-DECLARE_int32(flush_size);
-DEFINE_string(tmpfs, "", "tmpfs with the desired size");
+ABSL_DECLARE_FLAG(int32_t, flush_size);
+ABSL_FLAG(std::string, tmpfs, "", "tmpfs with the desired size");
int main(int argc, char **argv) {
aos::InitGoogle(&argc, &argv);
- FLAGS_flush_size = 1;
- CHECK(!FLAGS_tmpfs.empty()) << ": Must specify a tmpfs location";
+ absl::SetFlag(&FLAGS_flush_size, 1);
+ CHECK(!absl::GetFlag(FLAGS_tmpfs).empty())
+ << ": Must specify a tmpfs location";
std::array<uint8_t, 10240> data;
data.fill(0);
@@ -22,7 +24,7 @@
// Don't use odirect
aos::logger::FileBackend file_backend("/", false);
aos::logger::DetachedBufferWriter writer(
- file_backend.RequestFile(FLAGS_tmpfs + "/file"),
+ file_backend.RequestFile(absl::GetFlag(FLAGS_tmpfs) + "/file"),
std::make_unique<aos::logger::DummyEncoder>(data.size()));
for (int i = 0; i < 8; ++i) {
aos::logger::DataEncoder::SpanCopier coppier(data);
diff --git a/aos/events/logging/logfile_utils_test.cc b/aos/events/logging/logfile_utils_test.cc
index 87c6f5d..b97579a 100644
--- a/aos/events/logging/logfile_utils_test.cc
+++ b/aos/events/logging/logfile_utils_test.cc
@@ -5,11 +5,12 @@
#include <random>
#include <string>
+#include "absl/flags/flag.h"
+#include "absl/flags/reflection.h"
#include "absl/strings/escaping.h"
#include "external/com_github_google_flatbuffers/src/annotated_binary_text_gen.h"
#include "external/com_github_google_flatbuffers/src/binary_annotator.h"
#include "flatbuffers/reflection_generated.h"
-#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "aos/events/logging/logfile_sorting.h"
@@ -3094,7 +3095,7 @@
}
{
- gflags::FlagSaver fs;
+ absl::FlagSaver fs;
MessageReader reader(logfile);
reader.set_crash_on_corrupt_message_flag(false);
@@ -3116,7 +3117,7 @@
}
{
- gflags::FlagSaver fs;
+ absl::FlagSaver fs;
MessageReader reader(logfile);
reader.set_crash_on_corrupt_message_flag(false);
@@ -3239,6 +3240,30 @@
std::mt19937(::aos::testing::RandomSeed())};
std::vector<uint8_t> data_;
+
+ const aos::FlatbufferDetachedBuffer<Configuration> config_{
+ JsonToFlatbuffer<Configuration>(
+ R"({
+ "channels": [
+ {
+ "name": "/a",
+ "type": "aos.logger.testing.TestMessage"
+ },
+ {
+ "name": "/b",
+ "type": "aos.logger.testing.TestMessage"
+ },
+ {
+ "name": "/c",
+ "type": "aos.logger.testing.TestMessage"
+ },
+ {
+ "name": "/d",
+ "type": "aos.logger.testing.TestMessage"
+ }
+ ]
+}
+)")};
};
// Uses the binary schema to annotate a provided flatbuffer. Returns the
@@ -3268,7 +3293,7 @@
// tested below.
class TimeEventLoop : public EventLoop {
public:
- TimeEventLoop() : EventLoop(nullptr) {}
+ TimeEventLoop(const aos::Configuration *config) : EventLoop(config) {}
aos::monotonic_clock::time_point monotonic_now() const final {
return aos::monotonic_clock::min_time;
@@ -3403,7 +3428,7 @@
// Ok, now we want to confirm that we can build up arbitrary pieces of
// said flatbuffer. Try all of them since it is cheap.
- TimeEventLoop event_loop;
+ TimeEventLoop event_loop(&config_.message());
for (size_t i = 0; i < repacked_message.size(); i += 8) {
for (size_t j = i; j < repacked_message.size(); j += 8) {
std::vector<uint8_t> destination(repacked_message.size(), 67u);
@@ -3475,7 +3500,7 @@
// Ok, now we want to confirm that we can build up arbitrary pieces of said
// flatbuffer. Try all of them since it is cheap.
- TimeEventLoop event_loop;
+ TimeEventLoop event_loop(&config_.message());
for (size_t i = 0; i < repacked_message.size(); i += 8) {
for (size_t j = i; j < repacked_message.size(); j += 8) {
std::vector<uint8_t> destination(repacked_message.size(), 67u);
diff --git a/aos/events/logging/logger_main.cc b/aos/events/logging/logger_main.cc
index f99fe4c..73b6694 100644
--- a/aos/events/logging/logger_main.cc
+++ b/aos/events/logging/logger_main.cc
@@ -6,41 +6,45 @@
#ifdef LZMA
#include "aos/events/logging/lzma_encoder.h"
#endif
-#include "gflags/gflags.h"
-#include "glog/logging.h"
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "aos/events/logging/snappy_encoder.h"
#include "aos/events/shm_event_loop.h"
#include "aos/init.h"
#include "aos/logging/log_namer.h"
-DEFINE_bool(direct, false,
- "If true, write using O_DIRECT and write 512 byte aligned blocks "
- "whenever possible.");
+ABSL_FLAG(bool, direct, false,
+ "If true, write using O_DIRECT and write 512 byte aligned blocks "
+ "whenever possible.");
-DEFINE_string(config, "aos_config.json", "Config file to use.");
+ABSL_FLAG(std::string, config, "aos_config.json", "Config file to use.");
-DEFINE_bool(skip_renicing, false,
- "If true, skip renicing the logger. This leaves it lower priority "
- "and increases the likelihood of dropping messages and crashing.");
+ABSL_FLAG(bool, skip_renicing, false,
+ "If true, skip renicing the logger. This leaves it lower priority "
+ "and increases the likelihood of dropping messages and crashing.");
-DEFINE_bool(snappy_compress, false, "If true, compress log data using snappy.");
+ABSL_FLAG(bool, snappy_compress, false,
+ "If true, compress log data using snappy.");
#ifdef LZMA
-DEFINE_bool(xz_compress, false, "If true, compress log data using xz.");
+ABSL_FLAG(bool, xz_compress, false, "If true, compress log data using xz.");
#endif
-DEFINE_double(rotate_every, 0.0,
- "If set, rotate the logger after this many seconds");
+ABSL_FLAG(double, rotate_every, 0.0,
+ "If set, rotate the logger after this many seconds");
#ifdef LZMA
-DEFINE_int32(xz_compression_level, 9, "Compression level for the LZMA Encoder");
+ABSL_FLAG(int32_t, xz_compression_level, 9,
+ "Compression level for the LZMA Encoder");
#endif
-DECLARE_int32(flush_size);
+ABSL_DECLARE_FLAG(int32_t, flush_size);
int main(int argc, char *argv[]) {
- gflags::SetUsageMessage(
+ absl::SetProgramUsageMessage(
"This program provides a simple logger binary that logs all SHMEM data "
"directly to a file specified at the command line. It does not manage "
"filenames, so it will just crash if you attempt to overwrite an "
@@ -49,27 +53,28 @@
aos::InitGoogle(&argc, &argv);
aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(FLAGS_config);
+ aos::configuration::ReadConfig(absl::GetFlag(FLAGS_config));
aos::ShmEventLoop event_loop(&config.message());
auto log_namer = std::make_unique<aos::logger::MultiNodeFilesLogNamer>(
&event_loop, std::make_unique<aos::logger::RenamableFileBackend>(
absl::StrCat(aos::logging::GetLogName("fbs_log"), "/"),
- FLAGS_direct));
+ absl::GetFlag(FLAGS_direct)));
- if (FLAGS_snappy_compress) {
+ if (absl::GetFlag(FLAGS_snappy_compress)) {
log_namer->set_extension(aos::logger::SnappyDecoder::kExtension);
log_namer->set_encoder_factory([](size_t max_message_size) {
- return std::make_unique<aos::logger::SnappyEncoder>(max_message_size,
- FLAGS_flush_size);
+ return std::make_unique<aos::logger::SnappyEncoder>(
+ max_message_size, absl::GetFlag(FLAGS_flush_size));
});
#ifdef LZMA
- } else if (FLAGS_xz_compress) {
+ } else if (absl::GetFlag(FLAGS_xz_compress)) {
log_namer->set_extension(aos::logger::LzmaEncoder::kExtension);
log_namer->set_encoder_factory([](size_t max_message_size) {
return std::make_unique<aos::logger::LzmaEncoder>(
- max_message_size, FLAGS_xz_compression_level, FLAGS_flush_size);
+ max_message_size, absl::GetFlag(FLAGS_xz_compression_level),
+ absl::GetFlag(FLAGS_flush_size));
});
#endif
}
@@ -78,10 +83,10 @@
event_loop.monotonic_now();
aos::logger::Logger logger(&event_loop);
- if (FLAGS_rotate_every != 0.0) {
+ if (absl::GetFlag(FLAGS_rotate_every) != 0.0) {
logger.set_on_logged_period([&](aos::monotonic_clock::time_point t) {
- if (t > last_rotation_time +
- std::chrono::duration<double>(FLAGS_rotate_every)) {
+ if (t > last_rotation_time + std::chrono::duration<double>(
+ absl::GetFlag(FLAGS_rotate_every))) {
logger.Rotate();
last_rotation_time = t;
}
@@ -89,7 +94,7 @@
}
event_loop.OnRun([&log_namer, &logger]() {
- if (FLAGS_skip_renicing) {
+ if (absl::GetFlag(FLAGS_skip_renicing)) {
LOG(WARNING) << "Ignoring request to renice to -20 due to "
"--skip_renicing.";
} else {
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index c810203..bf5e414 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -2,8 +2,9 @@
#include <filesystem>
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/str_format.h"
-#include "glog/logging.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
diff --git a/aos/events/logging/lzma_encoder.cc b/aos/events/logging/lzma_encoder.cc
index 5e9fa7a..f4a075a 100644
--- a/aos/events/logging/lzma_encoder.cc
+++ b/aos/events/logging/lzma_encoder.cc
@@ -1,8 +1,10 @@
#include "aos/events/logging/lzma_encoder.h"
-#include "glog/logging.h"
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
-DEFINE_int32(lzma_threads, 1, "Number of threads to use for encoding");
+ABSL_FLAG(int32_t, lzma_threads, 1, "Number of threads to use for encoding");
namespace aos::logger {
namespace {
@@ -58,14 +60,14 @@
CHECK_LE(compression_preset_, 9u)
<< ": Compression preset must be in the range [0, 9].";
- if (FLAGS_lzma_threads <= 1) {
+ if (absl::GetFlag(FLAGS_lzma_threads) <= 1) {
lzma_ret status =
lzma_easy_encoder(&stream_, compression_preset_, LZMA_CHECK_CRC64);
CHECK(LzmaCodeIsOk(status));
} else {
lzma_mt mt_options;
memset(&mt_options, 0, sizeof(mt_options));
- mt_options.threads = FLAGS_lzma_threads;
+ mt_options.threads = absl::GetFlag(FLAGS_lzma_threads);
mt_options.block_size = block_size;
// Compress for at most 100 ms before relinquishing control back to the main
// thread.
diff --git a/aos/events/logging/lzma_encoder_test.cc b/aos/events/logging/lzma_encoder_test.cc
index 92b4f54..2eb1281 100644
--- a/aos/events/logging/lzma_encoder_test.cc
+++ b/aos/events/logging/lzma_encoder_test.cc
@@ -1,5 +1,7 @@
#include "aos/events/logging/lzma_encoder.h"
+#include "absl/flags/declare.h"
+#include "absl/flags/flag.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -7,14 +9,14 @@
#include "aos/testing/tmpdir.h"
#include "aos/util/file.h"
-DECLARE_int32(lzma_threads);
+ABSL_DECLARE_FLAG(int32_t, lzma_threads);
namespace aos::logger::testing {
INSTANTIATE_TEST_SUITE_P(
MtLzma, BufferEncoderTest,
::testing::Combine(::testing::Values([](size_t max_message_size) {
- FLAGS_lzma_threads = 3;
+ absl::SetFlag(&FLAGS_lzma_threads, 3);
return std::make_unique<LzmaEncoder>(max_message_size,
2, 4096);
}),
@@ -26,7 +28,7 @@
INSTANTIATE_TEST_SUITE_P(
MtLzmaThreaded, BufferEncoderTest,
::testing::Combine(::testing::Values([](size_t max_message_size) {
- FLAGS_lzma_threads = 3;
+ absl::SetFlag(&FLAGS_lzma_threads, 3);
return std::make_unique<LzmaEncoder>(max_message_size,
5, 4096);
}),
@@ -38,7 +40,7 @@
INSTANTIATE_TEST_SUITE_P(
Lzma, BufferEncoderTest,
::testing::Combine(::testing::Values([](size_t max_message_size) {
- FLAGS_lzma_threads = 1;
+ absl::SetFlag(&FLAGS_lzma_threads, 1);
return std::make_unique<LzmaEncoder>(max_message_size,
2, 4096);
}),
@@ -50,7 +52,7 @@
INSTANTIATE_TEST_SUITE_P(
LzmaThreaded, BufferEncoderTest,
::testing::Combine(::testing::Values([](size_t max_message_size) {
- FLAGS_lzma_threads = 1;
+ absl::SetFlag(&FLAGS_lzma_threads, 1);
return std::make_unique<LzmaEncoder>(max_message_size,
5, 4096);
}),
diff --git a/aos/events/logging/multinode_logger_test_lib.cc b/aos/events/logging/multinode_logger_test_lib.cc
index 3bf6207..b48f78a 100644
--- a/aos/events/logging/multinode_logger_test_lib.cc
+++ b/aos/events/logging/multinode_logger_test_lib.cc
@@ -1,5 +1,9 @@
#include "aos/events/logging/multinode_logger_test_lib.h"
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
+
#include "aos/events/event_loop.h"
#include "aos/events/logging/log_reader.h"
#include "aos/events/logging/logfile_utils.h"
@@ -9,7 +13,7 @@
#include "aos/events/simulated_event_loop.h"
#include "aos/testing/tmpdir.h"
-DECLARE_bool(force_timestamp_loading);
+ABSL_DECLARE_FLAG(bool, force_timestamp_loading);
namespace aos::logger::testing {
@@ -111,9 +115,9 @@
pi1_reboot_logfiles_(MakePi1RebootLogfiles()),
logfiles_(MakeLogFiles(logfile_base1_, logfile_base2_)),
structured_logfiles_(StructureLogFiles()) {
- FLAGS_force_timestamp_loading =
- std::get<0>(GetParam()).timestamp_buffering ==
- ForceTimestampBuffering::kForceBufferTimestamps;
+ absl::SetFlag(&FLAGS_force_timestamp_loading,
+ std::get<0>(GetParam()).timestamp_buffering ==
+ ForceTimestampBuffering::kForceBufferTimestamps);
util::UnlinkRecursive(tmp_dir_ + "/logs");
std::filesystem::create_directory(tmp_dir_ + "/logs");
diff --git a/aos/events/logging/multinode_logger_test_lib.h b/aos/events/logging/multinode_logger_test_lib.h
index 3c71632..386339d 100644
--- a/aos/events/logging/multinode_logger_test_lib.h
+++ b/aos/events/logging/multinode_logger_test_lib.h
@@ -1,8 +1,8 @@
#ifndef AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
#define AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
+#include "absl/flags/reflection.h"
#include "absl/strings/str_format.h"
-#include "glog/logging.h"
#include "gmock/gmock.h"
#include "aos/events/event_loop.h"
@@ -172,7 +172,7 @@
void AddExtension(std::string_view extension);
- gflags::FlagSaver flag_saver_;
+ absl::FlagSaver flag_saver_;
// Config and factory.
aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
diff --git a/aos/events/logging/realtime_replay_test.cc b/aos/events/logging/realtime_replay_test.cc
index dd4aaf0..7db9442 100644
--- a/aos/events/logging/realtime_replay_test.cc
+++ b/aos/events/logging/realtime_replay_test.cc
@@ -9,15 +9,14 @@
#include "aos/testing/path.h"
#include "aos/testing/tmpdir.h"
-DECLARE_string(override_hostname);
+ABSL_DECLARE_FLAG(std::string, override_hostname);
namespace aos::logger::testing {
class RealtimeLoggerTest : public ::testing::Test {
protected:
RealtimeLoggerTest()
- : shm_dir_(aos::testing::TestTmpDir() + "/aos"),
- config_file_(
+ : config_file_(
aos::testing::ArtifactPath("aos/events/pingpong_config.json")),
config_(aos::configuration::ReadConfig(config_file_)),
event_loop_factory_(&config_.message()),
@@ -27,17 +26,12 @@
pong_(pong_event_loop_.get()),
tmpdir_(aos::testing::TestTmpDir()),
base_name_(tmpdir_ + "/logfile/") {
- FLAGS_shm_base = shm_dir_;
-
// Nuke the shm and log dirs, to ensure we aren't being affected by any
// preexisting tests.
- aos::util::UnlinkRecursive(shm_dir_);
+ aos::util::UnlinkRecursive(absl::GetFlag(FLAGS_shm_base));
aos::util::UnlinkRecursive(base_name_);
}
- gflags::FlagSaver flag_saver_;
- std::string shm_dir_;
-
const std::string config_file_;
const aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
@@ -54,8 +48,7 @@
class RealtimeMultiNodeLoggerTest : public ::testing::Test {
protected:
RealtimeMultiNodeLoggerTest()
- : shm_dir_(aos::testing::TestTmpDir() + "/aos"),
- config_file_(aos::testing::ArtifactPath(
+ : config_file_(aos::testing::ArtifactPath(
"aos/events/logging/multinode_pingpong_combined_config.json")),
config_(aos::configuration::ReadConfig(config_file_)),
event_loop_factory_(&config_.message()),
@@ -64,17 +57,12 @@
ping_(ping_event_loop_.get()),
tmpdir_(aos::testing::TestTmpDir()),
base_name_(tmpdir_ + "/logfile/") {
- FLAGS_shm_base = shm_dir_;
-
// Nuke the shm and log dirs, to ensure we aren't being affected by any
// preexisting tests.
- aos::util::UnlinkRecursive(shm_dir_);
+ aos::util::UnlinkRecursive(absl::GetFlag(FLAGS_shm_base));
aos::util::UnlinkRecursive(base_name_);
}
- gflags::FlagSaver flag_saver_;
- std::string shm_dir_;
-
const std::string config_file_;
const aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
@@ -178,7 +166,7 @@
// Tests that ReplayChannels causes no messages to be replayed other than what
// is included on a multi node config
TEST_F(RealtimeMultiNodeLoggerTest, ReplayChannelsPingTest) {
- FLAGS_override_hostname = "raspberrypi";
+ absl::SetFlag(&FLAGS_override_hostname, "raspberrypi");
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop(
@@ -223,7 +211,7 @@
// Tests that when remapping a channel included in ReplayChannels messages are
// sent on the remapped channel
TEST_F(RealtimeMultiNodeLoggerTest, RemappedReplayChannelsTest) {
- FLAGS_override_hostname = "raspberrypi";
+ absl::SetFlag(&FLAGS_override_hostname, "raspberrypi");
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop(
@@ -275,7 +263,7 @@
// exist in the log being replayed, and there's no messages on those
// channels as well.
TEST_F(RealtimeMultiNodeLoggerTest, DoesNotExistInReplayChannelsTest) {
- FLAGS_override_hostname = "raspberrypi";
+ absl::SetFlag(&FLAGS_override_hostname, "raspberrypi");
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop(
@@ -335,7 +323,7 @@
// the channel being remapped.
TEST_F(RealtimeMultiNodeLoggerDeathTest,
RemapLoggedChannelNotIncludedInReplayChannels) {
- FLAGS_override_hostname = "raspberrypi";
+ absl::SetFlag(&FLAGS_override_hostname, "raspberrypi");
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop(
diff --git a/aos/events/logging/s3_fetcher.cc b/aos/events/logging/s3_fetcher.cc
index 4207286..557c3da 100644
--- a/aos/events/logging/s3_fetcher.cc
+++ b/aos/events/logging/s3_fetcher.cc
@@ -3,8 +3,9 @@
#include <aws/core/Aws.h>
#include <aws/s3/model/ListObjectsV2Request.h>
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
-#include "glog/logging.h"
// When we first start reading a log folder, we end up reading the first part of
// each file twice. We could speed this up by restructuring the API so all the
diff --git a/aos/events/logging/single_node_merge.cc b/aos/events/logging/single_node_merge.cc
index a34e4c2..01eeb6b 100644
--- a/aos/events/logging/single_node_merge.cc
+++ b/aos/events/logging/single_node_merge.cc
@@ -2,7 +2,7 @@
#include <string>
#include <vector>
-#include "gflags/gflags.h"
+#include "absl/flags/flag.h"
#include "aos/events/logging/logfile_sorting.h"
#include "aos/events/logging/logfile_utils.h"
@@ -13,7 +13,7 @@
// log. It doesn't solve the timestamp problem, but is still quite useful for
// debugging what happened in a log.
-DEFINE_string(node, "", "The node to dump sorted messages for");
+ABSL_FLAG(std::string, node, "", "The node to dump sorted messages for");
namespace aos::logger {
@@ -39,7 +39,7 @@
// Filter the parts relevant to each node when building the mapper.
mappers.emplace_back(std::make_unique<TimestampMapper>(
node_name, log_files, TimestampQueueStrategy::kQueueTogether));
- if (node_name == FLAGS_node) {
+ if (node_name == absl::GetFlag(FLAGS_node)) {
node_mapper = mappers.back().get();
}
} else {
@@ -47,7 +47,8 @@
}
}
- CHECK(node_mapper != nullptr) << ": Failed to find node " << FLAGS_node;
+ CHECK(node_mapper != nullptr)
+ << ": Failed to find node " << absl::GetFlag(FLAGS_node);
// Hook the peers up so data gets matched.
for (std::unique_ptr<TimestampMapper> &mapper1 : mappers) {
@@ -61,7 +62,7 @@
// Now, read all the timestamps for each node. This is simpler than the
// logger on purpose. It loads in *all* the timestamps in 1 go per node,
// ignoring memory usage.
- const Node *node = configuration::GetNode(config, FLAGS_node);
+ const Node *node = configuration::GetNode(config, absl::GetFlag(FLAGS_node));
LOG(INFO) << "Reading all data for " << node->name()->string_view();
const size_t node_index = configuration::GetNodeIndex(config, node);
@@ -74,7 +75,7 @@
if (m == nullptr) {
break;
}
- std::cout << "on " << FLAGS_node << " from "
+ std::cout << "on " << absl::GetFlag(FLAGS_node) << " from "
<< config->nodes()
->Get(configuration::GetNodeIndex(
config, config->channels()
diff --git a/aos/events/logging/ssd_profiler.cc b/aos/events/logging/ssd_profiler.cc
index 093e17b..034ac28 100644
--- a/aos/events/logging/ssd_profiler.cc
+++ b/aos/events/logging/ssd_profiler.cc
@@ -8,8 +8,9 @@
#include <csignal>
#include <filesystem>
-#include "gflags/gflags.h"
-#include "glog/logging.h"
+#include "absl/flags/flag.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "aos/containers/resizeable_buffer.h"
#include "aos/events/logging/log_backend.h"
@@ -19,33 +20,34 @@
namespace chrono = std::chrono;
-DEFINE_string(file, "/media/sda1/foo", "File to write to.");
+ABSL_FLAG(std::string, file, "/media/sda1/foo", "File to write to.");
-DEFINE_uint32(write_size, 4096, "Size of hunk to write");
-DEFINE_bool(cleanup, true, "If true, delete the created file");
-DEFINE_int32(nice, -20,
- "Priority to nice to. Set to 0 to not change the priority.");
-DEFINE_bool(sync, false, "If true, sync the file after each written block.");
-DEFINE_bool(writev, false, "If true, use writev.");
-DEFINE_bool(direct, false, "If true, O_DIRECT.");
-DEFINE_uint32(chunks, 1, "Chunks to write using writev.");
-DEFINE_uint32(chunk_size, 512, "Chunk size to write using writev.");
-DEFINE_uint64(overall_size, 0,
- "If nonzero, write this many bytes and then stop. Must be a "
- "multiple of --write_size");
-DEFINE_bool(rate_limit, false,
- "If true, kick off writes every 100ms to mimic logger write "
- "patterns more correctly.");
-DEFINE_double(write_bandwidth, 120.0,
- "Write speed in MB/s to simulate. This is only used when "
- "--rate_limit is specified.");
+ABSL_FLAG(uint32_t, write_size, 4096, "Size of hunk to write");
+ABSL_FLAG(bool, cleanup, true, "If true, delete the created file");
+ABSL_FLAG(int32_t, nice, -20,
+ "Priority to nice to. Set to 0 to not change the priority.");
+ABSL_FLAG(bool, sync, false,
+ "If true, sync the file after each written block.");
+ABSL_FLAG(bool, writev, false, "If true, use writev.");
+ABSL_FLAG(bool, direct, false, "If true, O_DIRECT.");
+ABSL_FLAG(uint32_t, chunks, 1, "Chunks to write using writev.");
+ABSL_FLAG(uint32_t, chunk_size, 512, "Chunk size to write using writev.");
+ABSL_FLAG(uint64_t, overall_size, 0,
+ "If nonzero, write this many bytes and then stop. Must be a "
+ "multiple of --write_size");
+ABSL_FLAG(bool, rate_limit, false,
+ "If true, kick off writes every 100ms to mimic logger write "
+ "patterns more correctly.");
+ABSL_FLAG(double, write_bandwidth, 120.0,
+ "Write speed in MB/s to simulate. This is only used when "
+ "--rate_limit is specified.");
void trap_sig(int signum) { exit(signum); }
aos::monotonic_clock::time_point start_time = aos::monotonic_clock::min_time;
std::atomic<size_t> written_data = 0;
-void cleanup() {
+void Cleanup() {
LOG(INFO) << "Overall average write speed: "
<< ((written_data) /
chrono::duration<double>(aos::monotonic_clock::now() -
@@ -56,19 +58,20 @@
<< "MB";
// Delete FLAGS_file at shutdown
- PCHECK(std::filesystem::remove(FLAGS_file) != 0) << "Failed to cleanup file";
+ PCHECK(std::filesystem::remove(absl::GetFlag(FLAGS_file)) != 0)
+ << "Failed to cleanup file";
}
int main(int argc, char **argv) {
aos::InitGoogle(&argc, &argv);
// c++ needs bash's trap <fcn> EXIT
// instead we get this mess :(
- if (FLAGS_cleanup) {
+ if (absl::GetFlag(FLAGS_cleanup)) {
std::signal(SIGINT, trap_sig);
std::signal(SIGTERM, trap_sig);
std::signal(SIGKILL, trap_sig);
std::signal(SIGHUP, trap_sig);
- std::atexit(cleanup);
+ std::atexit(Cleanup);
}
aos::AllocatorResizeableBuffer<
aos::AlignedReallocator<aos::logger::FileHandler::kSector>>
@@ -79,7 +82,7 @@
// good sized block from /dev/random, and then reuse it.
int random_fd = open("/dev/random", O_RDONLY | O_CLOEXEC);
PCHECK(random_fd != -1) << ": Failed to open /dev/random";
- data.resize(FLAGS_write_size);
+ data.resize(absl::GetFlag(FLAGS_write_size));
size_t written = 0;
while (written < data.size()) {
const size_t result =
@@ -92,23 +95,24 @@
}
std::vector<struct iovec> iovec;
- if (FLAGS_writev) {
- iovec.resize(FLAGS_chunks);
- CHECK_LE(FLAGS_chunks * FLAGS_chunk_size, FLAGS_write_size);
+ if (absl::GetFlag(FLAGS_writev)) {
+ const size_t chunks = absl::GetFlag(FLAGS_chunks);
+ const size_t chunk_size = absl::GetFlag(FLAGS_chunk_size);
+ iovec.resize(chunks);
+ CHECK_LE(chunks * chunk_size, absl::GetFlag(FLAGS_write_size));
- for (size_t i = 0; i < FLAGS_chunks; ++i) {
- iovec[i].iov_base = &data.at(i * FLAGS_chunk_size);
- iovec[i].iov_len = FLAGS_chunk_size;
+ for (size_t i = 0; i < chunks; ++i) {
+ iovec[i].iov_base = &data.at(i * chunk_size);
+ iovec[i].iov_len = chunk_size;
}
- iovec[FLAGS_chunks - 1].iov_base =
- &data.at((FLAGS_chunks - 1) * FLAGS_chunk_size);
- iovec[FLAGS_chunks - 1].iov_len =
- data.size() - (FLAGS_chunks - 1) * FLAGS_chunk_size;
+ iovec[chunks - 1].iov_base = &data.at((chunks - 1) * chunk_size);
+ iovec[chunks - 1].iov_len = data.size() - (chunks - 1) * chunk_size;
}
- int fd =
- open(FLAGS_file.c_str(),
- O_RDWR | O_CLOEXEC | O_CREAT | (FLAGS_direct ? O_DIRECT : 0), 0774);
+ int fd = open(absl::GetFlag(FLAGS_file).c_str(),
+ O_RDWR | O_CLOEXEC | O_CREAT |
+ (absl::GetFlag(FLAGS_direct) ? O_DIRECT : 0),
+ 0774);
PCHECK(fd != -1);
start_time = aos::monotonic_clock::now();
@@ -121,23 +125,24 @@
// want to write it in cycles of 100ms to simulate the logger.
size_t cycle_written_data = 0;
size_t data_per_cycle = std::numeric_limits<size_t>::max();
- if (FLAGS_rate_limit) {
- data_per_cycle =
- static_cast<size_t>((FLAGS_write_bandwidth * 1024 * 1024) / 10);
+ if (absl::GetFlag(FLAGS_rate_limit)) {
+ data_per_cycle = static_cast<size_t>(
+ (absl::GetFlag(FLAGS_write_bandwidth) * 1024 * 1024) / 10);
}
- if (FLAGS_nice != 0) {
- PCHECK(-1 != setpriority(PRIO_PROCESS, 0, FLAGS_nice))
- << ": Renicing to " << FLAGS_nice << " failed";
+ if (absl::GetFlag(FLAGS_nice) != 0) {
+ PCHECK(-1 != setpriority(PRIO_PROCESS, 0, absl::GetFlag(FLAGS_nice)))
+ << ": Renicing to " << absl::GetFlag(FLAGS_nice) << " failed";
}
while (true) {
// Bail if we have written our limit.
- if (written_data >= FLAGS_overall_size && FLAGS_overall_size != 0) {
+ if (written_data >= absl::GetFlag(FLAGS_overall_size) &&
+ absl::GetFlag(FLAGS_overall_size) != 0) {
break;
}
- if (FLAGS_writev) {
+ if (absl::GetFlag(FLAGS_writev)) {
PCHECK(writev(fd, iovec.data(), iovec.size()) ==
static_cast<ssize_t>(data.size()))
<< ": Failed after "
@@ -152,7 +157,7 @@
}
// Trigger a flush if asked.
- if (FLAGS_sync) {
+ if (absl::GetFlag(FLAGS_sync)) {
const aos::monotonic_clock::time_point monotonic_now =
aos::monotonic_clock::now();
sync_file_range(fd, written_data, data.size(), SYNC_FILE_RANGE_WRITE);
@@ -178,11 +183,12 @@
// Simulate the logger by writing the specified amount of data in periods of
// 100ms.
bool reset_cycle = false;
- if (cycle_written_data > data_per_cycle && FLAGS_rate_limit) {
+ if (cycle_written_data > data_per_cycle &&
+ absl::GetFlag(FLAGS_rate_limit)) {
// Check how much data we should have already written based on
// --write_bandwidth.
const size_t current_target =
- FLAGS_write_bandwidth * 1024 * 1024 *
+ absl::GetFlag(FLAGS_write_bandwidth) * 1024 * 1024 *
chrono::duration<double>(aos::monotonic_clock::now() - start_time)
.count();
const bool caught_up = written_data > current_target;
diff --git a/aos/events/logging/timestamp_extractor.cc b/aos/events/logging/timestamp_extractor.cc
index 3cf96f6..00555d8 100644
--- a/aos/events/logging/timestamp_extractor.cc
+++ b/aos/events/logging/timestamp_extractor.cc
@@ -2,30 +2,32 @@
#include <string>
#include <vector>
-#include "gflags/gflags.h"
+#include "absl/flags/flag.h"
+#include "absl/flags/usage.h"
#include "aos/events/logging/logfile_sorting.h"
#include "aos/events/logging/logfile_utils.h"
#include "aos/events/logging/logfile_validator.h"
#include "aos/init.h"
-DECLARE_bool(timestamps_to_csv);
-DEFINE_bool(skip_order_validation, false,
- "If true, ignore any out of orderness in replay");
+ABSL_DECLARE_FLAG(bool, timestamps_to_csv);
+ABSL_FLAG(bool, skip_order_validation, false,
+ "If true, ignore any out of orderness in replay");
namespace aos::logger {
int Main(int argc, char **argv) {
const LogFilesContainer log_files(SortParts(FindLogs(argc, argv)));
- CHECK(MultiNodeLogIsReadable(log_files, FLAGS_skip_order_validation));
+ CHECK(MultiNodeLogIsReadable(log_files,
+ absl::GetFlag(FLAGS_skip_order_validation)));
return 0;
}
} // namespace aos::logger
int main(int argc, char **argv) {
- FLAGS_timestamps_to_csv = true;
- gflags::SetUsageMessage(
+ absl::SetFlag(&FLAGS_timestamps_to_csv, true);
+ absl::SetProgramUsageMessage(
"Usage:\n"
" timestamp_extractor [args] logfile1 logfile2 ...\n\nThis program "
"dumps out all the timestamps from a set of log files for plotting. Use "
diff --git a/aos/events/logging/timestamp_plot.cc b/aos/events/logging/timestamp_plot.cc
index 5b5cdc2..82e8031 100644
--- a/aos/events/logging/timestamp_plot.cc
+++ b/aos/events/logging/timestamp_plot.cc
@@ -1,3 +1,4 @@
+#include "absl/flags/flag.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
@@ -7,13 +8,13 @@
using aos::analysis::Plotter;
-DEFINE_bool(all, false, "If true, plot *all* the nodes at once");
-DEFINE_bool(bounds, false, "If true, plot the noncausal bounds too.");
-DEFINE_bool(samples, true, "If true, plot the samples too.");
+ABSL_FLAG(bool, all, false, "If true, plot *all* the nodes at once");
+ABSL_FLAG(bool, bounds, false, "If true, plot the noncausal bounds too.");
+ABSL_FLAG(bool, samples, true, "If true, plot the samples too.");
-DEFINE_string(offsets, "",
- "Offsets to add to the monotonic clock for each node. Use the "
- "format of node=offset,node=offest");
+ABSL_FLAG(std::string, offsets, "",
+ "Offsets to add to the monotonic clock for each node. Use the "
+ "format of node=offset,node=offest");
// Simple C++ application to read the CSV files and use the in process plotter
// to plot them. This smokes the pants off gnuplot in terms of interactivity.
@@ -112,8 +113,9 @@
public:
NodePlotter() : nodes_(Nodes()) {
plotter_.AddFigure("Time");
- if (!FLAGS_offsets.empty()) {
- for (std::string_view nodeoffset : absl::StrSplit(FLAGS_offsets, ',')) {
+ if (!absl::GetFlag(FLAGS_offsets).empty()) {
+ for (std::string_view nodeoffset :
+ absl::StrSplit(absl::GetFlag(FLAGS_offsets), ',')) {
std::vector<std::string_view> node_offset =
absl::StrSplit(nodeoffset, '=');
CHECK_EQ(node_offset.size(), 2u);
@@ -284,7 +286,7 @@
.color = "yellow",
.point_size = 2.0});
- if (FLAGS_samples) {
+ if (absl::GetFlag(FLAGS_samples)) {
plotter_.AddLine(samplefile12.first, samplefile12.second,
Plotter::LineOptions{
.label = absl::StrCat("sample ", node1, " ", node2),
@@ -299,7 +301,7 @@
});
}
- if (FLAGS_bounds) {
+ if (absl::GetFlag(FLAGS_bounds)) {
plotter_.AddLine(
noncausalfile12.first, noncausalfile12.second,
Plotter::LineOptions{.label = absl::StrCat("nc ", node1, " ", node2),
@@ -316,7 +318,7 @@
int Main(int argc, const char *const *argv) {
NodePlotter plotter;
- if (FLAGS_all) {
+ if (absl::GetFlag(FLAGS_all)) {
const std::vector<std::pair<std::string, std::string>> connections =
NodeConnections();
for (std::pair<std::string, std::string> ab : connections) {