Merge changes I17adbd9b,I436b1eb9,I4de2d56e,I5e26ff2a
* changes:
Combine figure and main window
Add toolbar to ArmUi
Combine ArmUi and segment selector
Add option to stop editing control points
diff --git a/aos/aos_cli_utils.cc b/aos/aos_cli_utils.cc
index a4ea95e..bd54e38 100644
--- a/aos/aos_cli_utils.cc
+++ b/aos/aos_cli_utils.cc
@@ -17,6 +17,11 @@
DEFINE_bool(
_bash_autocomplete, false,
"Internal use: Outputs channel list for use with autocomplete script.");
+
+DEFINE_bool(_zsh_compatability, false,
+ "Internal use: Force completion to complete either channels or "
+ "message_types, zsh doesn't handle spaces well.");
+
DEFINE_string(_bash_autocomplete_word, "",
"Internal use: Current word being autocompleted");
@@ -195,7 +200,9 @@
// Otherwise, since the message type is poulated yet not being edited,
// the user must be editing the channel name alone, in which case only
// suggest channel names, not pairs.
- if (message_type.empty()) {
+ // If _split_complete flag is set then dont return
+ // pairs of values
+ if (!FLAGS__zsh_compatability && message_type.empty()) {
std::cout << '\'' << channel->name()->c_str() << ' '
<< channel->type()->c_str() << "' ";
} else {
diff --git a/aos/events/BUILD b/aos/events/BUILD
index ad3258c..08e3998 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -396,6 +396,7 @@
cc_test(
name = "shm_event_loop_test",
srcs = ["shm_event_loop_test.cc"],
+ flaky = True,
shard_count = 24,
target_compatible_with = ["@platforms//os:linux"],
deps = [
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 364cabb..07a751c 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -55,6 +55,7 @@
"log_replayer.cc",
],
target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
deps = [
":log_reader",
":log_reader_utils",
@@ -505,6 +506,30 @@
],
)
+cc_library(
+ name = "multinode_logger_test_lib",
+ testonly = True,
+ srcs = [
+ "multinode_logger_test_lib.cc",
+ ],
+ hdrs = ["multinode_logger_test_lib.h"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":log_reader",
+ ":log_writer",
+ ":snappy_encoder",
+ "//aos/events:message_counter",
+ "//aos/events:ping_lib",
+ "//aos/events:pong_lib",
+ "//aos/events:simulated_event_loop",
+ "//aos/network:testing_time_converter",
+ "//aos/testing:googletest",
+ "//aos/testing:path",
+ "//aos/testing:tmpdir",
+ ],
+)
+
aos_config(
name = "multinode_pingpong_split_config",
src = "multinode_pingpong_split.json",
@@ -617,8 +642,8 @@
)
cc_test(
- name = "logger_test",
- srcs = ["logger_test.cc"],
+ name = "multinode_logger_test",
+ srcs = ["multinode_logger_test.cc"],
copts = select({
"//tools:cpu_k8": ["-DLZMA=1"],
"//tools:cpu_arm64": ["-DLZMA=1"],
@@ -636,6 +661,24 @@
shard_count = 10,
target_compatible_with = ["@platforms//os:linux"],
deps = [
+ ":multinode_logger_test_lib",
+ ],
+)
+
+cc_test(
+ name = "logger_test",
+ srcs = ["logger_test.cc"],
+ copts = select({
+ "//tools:cpu_k8": ["-DLZMA=1"],
+ "//tools:cpu_arm64": ["-DLZMA=1"],
+ "//conditions:default": [],
+ }),
+ data = [
+ "//aos/events:pingpong_config",
+ ],
+ shard_count = 10,
+ target_compatible_with = ["@platforms//os:linux"],
+ deps = [
":log_reader",
":log_writer",
":snappy_encoder",
diff --git a/aos/events/logging/log_reader.cc b/aos/events/logging/log_reader.cc
index d3ce16b..fb63e79 100644
--- a/aos/events/logging/log_reader.cc
+++ b/aos/events/logging/log_reader.cc
@@ -617,8 +617,8 @@
filtered_parts.size() == 0u
? nullptr
: std::make_unique<TimestampMapper>(std::move(filtered_parts)),
- filters_.get(), node, State::ThreadedBuffering::kNo,
- MaybeMakeReplayChannelIndicies(node));
+ filters_.get(), std::bind(&LogReader::NoticeRealtimeEnd, this), node,
+ State::ThreadedBuffering::kNo, MaybeMakeReplayChannelIndicies(node));
State *state = states_[node_index].get();
state->SetNodeEventLoopFactory(
event_loop_factory_->GetNodeEventLoopFactory(node),
@@ -808,8 +808,8 @@
filtered_parts.size() == 0u
? nullptr
: std::make_unique<TimestampMapper>(std::move(filtered_parts)),
- filters_.get(), node, State::ThreadedBuffering::kYes,
- MaybeMakeReplayChannelIndicies(node));
+ filters_.get(), std::bind(&LogReader::NoticeRealtimeEnd, this), node,
+ State::ThreadedBuffering::kYes, MaybeMakeReplayChannelIndicies(node));
State *state = states_[node_index].get();
state->SetChannelCount(logged_configuration()->channels()->size());
@@ -964,7 +964,7 @@
VLOG(1) << MaybeNodeName(state->event_loop()->node()) << "Node down!";
if (exit_on_finish_ && live_nodes_ == 0 &&
event_loop_factory_ != nullptr) {
- CHECK_NOTNULL(event_loop_factory_)->Exit();
+ event_loop_factory_->Exit();
}
return;
}
@@ -1233,6 +1233,7 @@
}
if (end_time_ != realtime_clock::max_time) {
state->SetEndTimeFlag(end_time_);
+ ++live_nodes_with_realtime_time_end_;
}
event_loop->OnRun([state]() {
BootTimestamp next_time = state->SingleThreadedOldestMessageTime();
@@ -1745,9 +1746,11 @@
LogReader::State::State(
std::unique_ptr<TimestampMapper> timestamp_mapper,
message_bridge::MultiNodeNoncausalOffsetEstimator *multinode_filters,
- const Node *node, LogReader::State::ThreadedBuffering threading,
+ std::function<void()> notice_realtime_end, const Node *node,
+ LogReader::State::ThreadedBuffering threading,
std::unique_ptr<const ReplayChannelIndicies> replay_channel_indicies)
: timestamp_mapper_(std::move(timestamp_mapper)),
+ notice_realtime_end_(notice_realtime_end),
node_(node),
multinode_filters_(multinode_filters),
threading_(threading),
@@ -2347,6 +2350,8 @@
if (!stopped_ && started_) {
RunOnEnd();
SetFoundLastMessage(true);
+ CHECK(notice_realtime_end_);
+ notice_realtime_end_();
}
}
@@ -2372,5 +2377,14 @@
event_loop_factory_->SetRealtimeReplayRate(replay_rate);
}
+void LogReader::NoticeRealtimeEnd() {
+ CHECK_GE(live_nodes_with_realtime_time_end_, 1u);
+ --live_nodes_with_realtime_time_end_;
+ if (live_nodes_with_realtime_time_end_ == 0 && exit_on_finish() &&
+ event_loop_factory_ != nullptr) {
+ event_loop_factory_->Exit();
+ }
+}
+
} // namespace logger
} // namespace aos
diff --git a/aos/events/logging/log_reader.h b/aos/events/logging/log_reader.h
index 0d50fb9..fd5a935 100644
--- a/aos/events/logging/log_reader.h
+++ b/aos/events/logging/log_reader.h
@@ -260,6 +260,7 @@
void set_exit_on_finish(bool exit_on_finish) {
exit_on_finish_ = exit_on_finish;
}
+ bool exit_on_finish() const { return exit_on_finish_; }
// Sets the realtime replay rate. A value of 1.0 will cause the scheduler to
// try to play events in realtime. 0.5 will run at half speed. Use infinity
@@ -289,6 +290,10 @@
: logged_configuration()->nodes()->size();
}
+ // Handles when an individual node hits the realtime end time, exitting the
+ // entire event loop once all nodes are stopped.
+ void NoticeRealtimeEnd();
+
const std::vector<LogFile> log_files_;
// Class to manage sending RemoteMessages on the provided node after the
@@ -343,7 +348,8 @@
enum class ThreadedBuffering { kYes, kNo };
State(std::unique_ptr<TimestampMapper> timestamp_mapper,
message_bridge::MultiNodeNoncausalOffsetEstimator *multinode_filters,
- const Node *node, ThreadedBuffering threading,
+ std::function<void()> notice_realtime_end, const Node *node,
+ ThreadedBuffering threading,
std::unique_ptr<const ReplayChannelIndicies> replay_channel_indicies);
// Connects up the timestamp mappers.
@@ -667,6 +673,9 @@
NodeEventLoopFactory *node_event_loop_factory_ = nullptr;
SimulatedEventLoopFactory *event_loop_factory_ = nullptr;
+ // Callback for when this node hits its realtime end time.
+ std::function<void()> notice_realtime_end_;
+
std::unique_ptr<EventLoop> event_loop_unique_ptr_;
// Event loop.
const Node *node_ = nullptr;
@@ -785,6 +794,10 @@
// when to exit.
size_t live_nodes_ = 0;
+ // Similar counter to live_nodes_, but for tracking which individual nodes are
+ // running and have yet to hit the realtime end time, if any.
+ size_t live_nodes_with_realtime_time_end_ = 0;
+
const Configuration *remapped_configuration_ = nullptr;
const Configuration *replay_configuration_ = nullptr;
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index cdf080b..80b3a4a 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -27,30 +27,14 @@
namespace logger {
namespace testing {
-using aos::testing::ArtifactPath;
-
namespace chrono = std::chrono;
using aos::message_bridge::RemoteMessage;
+using aos::testing::ArtifactPath;
using aos::testing::MessageCounter;
constexpr std::string_view kSingleConfigSha256(
"bbe1b563139273b23a5405eebc2f2740cefcda5f96681acd0a84b8ff9ab93ea4");
-std::vector<std::vector<std::string>> ToLogReaderVector(
- const std::vector<LogFile> &log_files) {
- std::vector<std::vector<std::string>> result;
- for (const LogFile &log_file : log_files) {
- for (const LogParts &log_parts : log_file.parts) {
- std::vector<std::string> parts;
- for (const std::string &part : log_parts.parts) {
- parts.emplace_back(part);
- }
- result.emplace_back(std::move(parts));
- }
- }
- return result;
-}
-
class LoggerTest : public ::testing::Test {
public:
LoggerTest()
@@ -538,4250 +522,6 @@
EXPECT_EQ(replay_count, sent_messages);
}
-struct CompressionParams {
- std::string_view extension;
- std::function<std::unique_ptr<DataEncoder>(size_t max_message_size)>
- encoder_factory;
-};
-
-std::ostream &operator<<(std::ostream &ostream,
- const CompressionParams ¶ms) {
- ostream << "\"" << params.extension << "\"";
- return ostream;
-}
-
-std::vector<CompressionParams> SupportedCompressionAlgorithms() {
- return {{"",
- [](size_t max_message_size) {
- return std::make_unique<DummyEncoder>(max_message_size);
- }},
- {SnappyDecoder::kExtension,
- [](size_t max_message_size) {
- return std::make_unique<SnappyEncoder>(max_message_size, 32768);
- }},
-#ifdef LZMA
- {LzmaDecoder::kExtension,
- [](size_t max_message_size) {
- return std::make_unique<LzmaEncoder>(max_message_size, 3);
- }}
-#endif // LZMA
- };
-}
-
-// Parameters to run all the tests with.
-struct ConfigParams {
- // The config file to use.
- std::string config;
- // If true, the RemoteMessage channel should be shared between all the remote
- // channels. If false, there will be 1 RemoteMessage channel per remote
- // channel.
- bool shared;
- // sha256 of the config.
- std::string_view sha256;
- // sha256 of the relogged config
- std::string_view relogged_sha256;
-};
-
-std::ostream &operator<<(std::ostream &ostream, const ConfigParams ¶ms) {
- ostream << "{config: \"" << params.config << "\", shared: " << params.shared
- << ", sha256: \"" << params.sha256 << "\", relogged_sha256: \""
- << params.relogged_sha256 << "\"}";
- return ostream;
-}
-
-struct LoggerState {
- static LoggerState MakeLogger(NodeEventLoopFactory *node,
- SimulatedEventLoopFactory *factory,
- CompressionParams params,
- const Configuration *configuration = nullptr) {
- if (configuration == nullptr) {
- configuration = factory->configuration();
- }
- return {node->MakeEventLoop("logger"),
- {},
- configuration,
- configuration::GetNode(configuration, node->node()),
- nullptr,
- params};
- }
-
- void StartLogger(std::string logfile_base) {
- CHECK(!logfile_base.empty());
-
- logger = std::make_unique<Logger>(event_loop.get(), configuration);
- logger->set_polling_period(std::chrono::milliseconds(100));
- logger->set_name(
- absl::StrCat("name_prefix_", event_loop->node()->name()->str()));
- logger->set_logger_sha1(
- absl::StrCat("logger_sha1_", event_loop->node()->name()->str()));
- logger->set_logger_version(
- absl::StrCat("logger_version_", event_loop->node()->name()->str()));
- event_loop->OnRun([this, logfile_base]() {
- std::unique_ptr<MultiNodeLogNamer> namer =
- std::make_unique<MultiNodeLogNamer>(logfile_base, configuration,
- event_loop.get(), node);
- namer->set_extension(params.extension);
- namer->set_encoder_factory(params.encoder_factory);
- log_namer = namer.get();
-
- logger->StartLogging(std::move(namer));
- });
- }
-
- std::unique_ptr<EventLoop> event_loop;
- std::unique_ptr<Logger> logger;
- const Configuration *configuration;
- const Node *node;
- MultiNodeLogNamer *log_namer;
- CompressionParams params;
-
- void AppendAllFilenames(std::vector<std::string> *filenames) {
- for (const std::string &file : log_namer->all_filenames()) {
- const std::string_view separator =
- log_namer->base_name().back() == '/' ? "" : "_";
- filenames->emplace_back(
- absl::StrCat(log_namer->base_name(), separator, file));
- }
- }
-
- ~LoggerState() {
- if (logger) {
- std::vector<std::string> filenames;
- AppendAllFilenames(&filenames);
- std::sort(filenames.begin(), filenames.end());
- for (const std::string &file : filenames) {
- LOG(INFO) << "Wrote to " << file;
- auto x = ReadHeader(file);
- if (x) {
- VLOG(1) << aos::FlatbufferToJson(x.value());
- }
- }
- }
- }
-};
-
-std::vector<std::pair<std::vector<realtime_clock::time_point>,
- std::vector<realtime_clock::time_point>>>
-ConfirmReadable(
- const std::vector<std::string> &files,
- realtime_clock::time_point start_time = realtime_clock::min_time,
- realtime_clock::time_point end_time = realtime_clock::max_time) {
- {
- LogReader reader(SortParts(files));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- reader.Register(&log_reader_factory);
-
- log_reader_factory.Run();
-
- reader.Deregister();
- }
- {
- std::vector<std::pair<std::vector<realtime_clock::time_point>,
- std::vector<realtime_clock::time_point>>>
- result;
- LogReader reader(SortParts(files));
-
- reader.SetStartTime(start_time);
- reader.SetEndTime(end_time);
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- reader.RegisterWithoutStarting(&log_reader_factory);
- result.resize(
- configuration::NodesCount(log_reader_factory.configuration()));
- if (configuration::MultiNode(log_reader_factory.configuration())) {
- size_t i = 0;
- for (const aos::Node *node :
- *log_reader_factory.configuration()->nodes()) {
- LOG(INFO) << "Registering start";
- reader.OnStart(node, [node, &log_reader_factory, &result,
- node_index = i]() {
- LOG(INFO) << "Starting " << node->name()->string_view();
- result[node_index].first.push_back(
- log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
- });
- reader.OnEnd(node, [node, &log_reader_factory, &result,
- node_index = i]() {
- LOG(INFO) << "Ending " << node->name()->string_view();
- result[node_index].second.push_back(
- log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
- });
- ++i;
- }
- } else {
- reader.OnStart([&log_reader_factory, &result]() {
- LOG(INFO) << "Starting";
- result[0].first.push_back(
- log_reader_factory.GetNodeEventLoopFactory(nullptr)
- ->realtime_now());
- });
- reader.OnEnd([&log_reader_factory, &result]() {
- LOG(INFO) << "Ending";
- result[0].second.push_back(
- log_reader_factory.GetNodeEventLoopFactory(nullptr)
- ->realtime_now());
- });
- }
-
- log_reader_factory.Run();
-
- reader.Deregister();
-
- for (auto x : result) {
- for (auto y : x.first) {
- VLOG(1) << "Start " << y;
- }
- for (auto y : x.second) {
- VLOG(1) << "End " << y;
- }
- }
- return result;
- }
-}
-
-class MultinodeLoggerTest : public ::testing::TestWithParam<
- std::tuple<ConfigParams, CompressionParams>> {
- public:
- MultinodeLoggerTest()
- : config_(aos::configuration::ReadConfig(ArtifactPath(absl::StrCat(
- "aos/events/logging/", std::get<0>(GetParam()).config)))),
- time_converter_(configuration::NodesCount(&config_.message())),
- event_loop_factory_(&config_.message()),
- pi1_(event_loop_factory_.GetNodeEventLoopFactory("pi1")),
- pi1_index_(configuration::GetNodeIndex(
- event_loop_factory_.configuration(), pi1_->node())),
- pi2_(event_loop_factory_.GetNodeEventLoopFactory("pi2")),
- pi2_index_(configuration::GetNodeIndex(
- event_loop_factory_.configuration(), pi2_->node())),
- tmp_dir_(aos::testing::TestTmpDir()),
- logfile_base1_(tmp_dir_ + "/multi_logfile1"),
- logfile_base2_(tmp_dir_ + "/multi_logfile2"),
- pi1_reboot_logfiles_(MakePi1RebootLogfiles()),
- logfiles_(MakeLogFiles(logfile_base1_, logfile_base2_)),
- pi1_single_direction_logfiles_(MakePi1SingleDirectionLogfiles()),
- structured_logfiles_(StructureLogFiles()) {
- LOG(INFO) << "Config " << std::get<0>(GetParam()).config;
- event_loop_factory_.SetTimeConverter(&time_converter_);
-
- // Go through and remove the logfiles if they already exist.
- for (const auto &file : logfiles_) {
- unlink(file.c_str());
- unlink((file + ".xz").c_str());
- }
-
- for (const auto &file : MakeLogFiles(tmp_dir_ + "/relogged1",
- tmp_dir_ + "/relogged2", 3, 3, true)) {
- unlink(file.c_str());
- }
-
- for (const auto &file : pi1_reboot_logfiles_) {
- unlink(file.c_str());
- }
-
- LOG(INFO) << "Logging data to " << logfiles_[0] << ", " << logfiles_[1]
- << " and " << logfiles_[2];
-
- pi1_->OnStartup([this]() { pi1_->AlwaysStart<Ping>("ping"); });
- pi2_->OnStartup([this]() { pi2_->AlwaysStart<Pong>("pong"); });
- }
-
- bool shared() const { return std::get<0>(GetParam()).shared; }
-
- std::vector<std::string> MakeLogFiles(std::string logfile_base1,
- std::string logfile_base2,
- size_t pi1_data_count = 3,
- size_t pi2_data_count = 3,
- bool relogged_config = false) {
- std::string_view sha256 = relogged_config
- ? std::get<0>(GetParam()).relogged_sha256
- : std::get<0>(GetParam()).sha256;
- std::vector<std::string> result;
- result.emplace_back(absl::StrCat(logfile_base1, "_", sha256, Extension()));
- result.emplace_back(absl::StrCat(logfile_base2, "_", sha256, Extension()));
- for (size_t i = 0; i < pi1_data_count; ++i) {
- result.emplace_back(
- absl::StrCat(logfile_base1, "_pi1_data.part", i, Extension()));
- }
- result.emplace_back(logfile_base1 +
- "_pi2_data/test/aos.examples.Pong.part0" + Extension());
- result.emplace_back(logfile_base1 +
- "_pi2_data/test/aos.examples.Pong.part1" + Extension());
- for (size_t i = 0; i < pi2_data_count; ++i) {
- result.emplace_back(
- absl::StrCat(logfile_base2, "_pi2_data.part", i, Extension()));
- }
- result.emplace_back(logfile_base2 +
- "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part0" +
- Extension());
- result.emplace_back(logfile_base2 +
- "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part1" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
- Extension());
- if (shared()) {
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/"
- "aos.message_bridge.RemoteMessage.part2" +
- Extension());
- result.emplace_back(logfile_base2 +
- "_timestamps/pi2/aos/remote_timestamps/pi1/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base2 +
- "_timestamps/pi2/aos/remote_timestamps/pi1/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- } else {
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- result.emplace_back(logfile_base2 +
- "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base2 +
- "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base1 +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- }
-
- return result;
- }
-
- std::vector<std::string> MakePi1RebootLogfiles() {
- std::vector<std::string> result;
- result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
- result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
- result.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
- result.emplace_back(logfile_base1_ + "_pi1_data.part3" + Extension());
- result.emplace_back(logfile_base1_ + "_pi1_data.part4" + Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/test/aos.examples.Pong.part0" + Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/test/aos.examples.Pong.part1" + Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/test/aos.examples.Pong.part2" + Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/test/aos.examples.Pong.part3" + Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part2" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part3" +
- Extension());
- result.emplace_back(absl::StrCat(
- logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
- if (shared()) {
- for (size_t i = 0; i < 6; ++i) {
- result.emplace_back(
- absl::StrCat(logfile_base1_,
- "_timestamps/pi1/aos/remote_timestamps/pi2/"
- "aos.message_bridge.RemoteMessage.part",
- i, Extension()));
- }
- } else {
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part2" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part3" +
- Extension());
-
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part0" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part1" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part2" +
- Extension());
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part3" +
- Extension());
- }
- return result;
- }
-
- std::vector<std::string> MakePi1SingleDirectionLogfiles() {
- std::vector<std::string> result;
- result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
- result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
- result.emplace_back(logfile_base1_ +
- "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
- Extension());
- result.emplace_back(absl::StrCat(
- logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
- return result;
- }
-
- std::vector<std::string> MakePi1DeadNodeLogfiles() {
- std::vector<std::string> result;
- result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
- result.emplace_back(absl::StrCat(
- logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
- return result;
- }
-
- std::vector<std::vector<std::string>> StructureLogFiles() {
- std::vector<std::vector<std::string>> result{
- std::vector<std::string>{logfiles_[2], logfiles_[3], logfiles_[4]},
- std::vector<std::string>{logfiles_[5], logfiles_[6]},
- std::vector<std::string>{logfiles_[7], logfiles_[8], logfiles_[9]},
- std::vector<std::string>{logfiles_[10], logfiles_[11]},
- std::vector<std::string>{logfiles_[12], logfiles_[13]}};
-
- if (shared()) {
- result.emplace_back(std::vector<std::string>{logfiles_[14], logfiles_[15],
- logfiles_[16]});
- result.emplace_back(
- std::vector<std::string>{logfiles_[17], logfiles_[18]});
- } else {
- result.emplace_back(
- std::vector<std::string>{logfiles_[14], logfiles_[15]});
- result.emplace_back(
- std::vector<std::string>{logfiles_[16], logfiles_[17]});
- result.emplace_back(
- std::vector<std::string>{logfiles_[18], logfiles_[19]});
- }
-
- return result;
- }
-
- std::string Extension() {
- return absl::StrCat(".bfbs", std::get<1>(GetParam()).extension);
- }
-
- LoggerState MakeLogger(NodeEventLoopFactory *node,
- SimulatedEventLoopFactory *factory = nullptr,
- const Configuration *configuration = nullptr) {
- if (factory == nullptr) {
- factory = &event_loop_factory_;
- }
- return LoggerState::MakeLogger(node, factory, std::get<1>(GetParam()),
- configuration);
- }
-
- void StartLogger(LoggerState *logger, std::string logfile_base = "") {
- if (logfile_base.empty()) {
- if (logger->event_loop->node()->name()->string_view() == "pi1") {
- logfile_base = logfile_base1_;
- } else {
- logfile_base = logfile_base2_;
- }
- }
- logger->StartLogger(logfile_base);
- }
-
- void VerifyParts(const std::vector<LogFile> &sorted_parts,
- const std::vector<std::string> &corrupted_parts = {}) {
- EXPECT_EQ(sorted_parts.size(), 2u);
-
- // Count up the number of UUIDs and make sure they are what we expect as a
- // sanity check.
- std::set<std::string> log_event_uuids;
- std::set<std::string> parts_uuids;
- std::set<std::string> both_uuids;
-
- size_t missing_rt_count = 0;
-
- std::vector<std::string> logger_nodes;
- for (const LogFile &log_file : sorted_parts) {
- EXPECT_FALSE(log_file.log_event_uuid.empty());
- log_event_uuids.insert(log_file.log_event_uuid);
- logger_nodes.emplace_back(log_file.logger_node);
- both_uuids.insert(log_file.log_event_uuid);
- EXPECT_TRUE(log_file.config);
- EXPECT_EQ(log_file.name,
- absl::StrCat("name_prefix_", log_file.logger_node));
- EXPECT_EQ(log_file.logger_sha1,
- absl::StrCat("logger_sha1_", log_file.logger_node));
- EXPECT_EQ(log_file.logger_version,
- absl::StrCat("logger_version_", log_file.logger_node));
-
- for (const LogParts &part : log_file.parts) {
- EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
- << ": " << part;
- missing_rt_count +=
- part.realtime_start_time == aos::realtime_clock::min_time;
-
- EXPECT_TRUE(log_event_uuids.find(part.log_event_uuid) !=
- log_event_uuids.end());
- EXPECT_NE(part.node, "");
- EXPECT_TRUE(log_file.config);
- parts_uuids.insert(part.parts_uuid);
- both_uuids.insert(part.parts_uuid);
- }
- }
-
- // We won't have RT timestamps for 5 or 6 log files. We don't log the RT
- // start time on remote nodes because we don't know it and would be
- // guessing. And the log reader can actually do a better job. The number
- // depends on if we have the remote timestamps split across 2 files, or just
- // across 1, depending on if we are using a split or combined timestamp
- // channel config.
- EXPECT_EQ(missing_rt_count, shared() ? 5u : 6u);
-
- EXPECT_EQ(log_event_uuids.size(), 2u);
- EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
- EXPECT_EQ(log_event_uuids.size() + parts_uuids.size(), both_uuids.size());
-
- // Test that each list of parts is in order. Don't worry about the ordering
- // between part file lists though.
- // (inner vectors all need to be in order, but outer one doesn't matter).
- ASSERT_THAT(ToLogReaderVector(sorted_parts),
- ::testing::UnorderedElementsAreArray(structured_logfiles_));
-
- EXPECT_THAT(logger_nodes, ::testing::UnorderedElementsAre("pi1", "pi2"));
-
- EXPECT_NE(sorted_parts[0].realtime_start_time,
- aos::realtime_clock::min_time);
- EXPECT_NE(sorted_parts[1].realtime_start_time,
- aos::realtime_clock::min_time);
-
- EXPECT_NE(sorted_parts[0].monotonic_start_time,
- aos::monotonic_clock::min_time);
- EXPECT_NE(sorted_parts[1].monotonic_start_time,
- aos::monotonic_clock::min_time);
-
- EXPECT_THAT(sorted_parts[0].corrupted, ::testing::Eq(corrupted_parts));
- EXPECT_THAT(sorted_parts[1].corrupted, ::testing::Eq(corrupted_parts));
- }
-
- void AddExtension(std::string_view extension) {
- std::transform(logfiles_.begin(), logfiles_.end(), logfiles_.begin(),
- [extension](const std::string &in) {
- return absl::StrCat(in, extension);
- });
-
- std::transform(structured_logfiles_.begin(), structured_logfiles_.end(),
- structured_logfiles_.begin(),
- [extension](std::vector<std::string> in) {
- std::transform(in.begin(), in.end(), in.begin(),
- [extension](const std::string &in_str) {
- return absl::StrCat(in_str, extension);
- });
- return in;
- });
- }
-
- // Config and factory.
- aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
- message_bridge::TestingTimeConverter time_converter_;
- SimulatedEventLoopFactory event_loop_factory_;
-
- NodeEventLoopFactory *const pi1_;
- const size_t pi1_index_;
- NodeEventLoopFactory *const pi2_;
- const size_t pi2_index_;
-
- std::string tmp_dir_;
- std::string logfile_base1_;
- std::string logfile_base2_;
- std::vector<std::string> pi1_reboot_logfiles_;
- std::vector<std::string> logfiles_;
- std::vector<std::string> pi1_single_direction_logfiles_;
-
- std::vector<std::vector<std::string>> structured_logfiles_;
-};
-
-// Counts the number of messages on a channel. Returns (channel name, channel
-// type, count) for every message matching matcher()
-std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
- std::shared_ptr<const aos::Configuration> config, std::string_view filename,
- std::function<bool(const UnpackedMessageHeader *)> matcher) {
- MessageReader message_reader(filename);
- std::vector<int> counts(config->channels()->size(), 0);
-
- while (true) {
- std::shared_ptr<UnpackedMessageHeader> msg = message_reader.ReadMessage();
- if (!msg) {
- break;
- }
-
- if (matcher(msg.get())) {
- counts[msg->channel_index]++;
- }
- }
-
- std::vector<std::tuple<std::string, std::string, int>> result;
- int channel = 0;
- for (size_t i = 0; i < counts.size(); ++i) {
- if (counts[i] != 0) {
- const Channel *channel = config->channels()->Get(i);
- result.push_back(std::make_tuple(channel->name()->str(),
- channel->type()->str(), counts[i]));
- }
- ++channel;
- }
-
- return result;
-}
-
-// Counts the number of messages (channel, count) for all data messages.
-std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
- std::shared_ptr<const aos::Configuration> config,
- std::string_view filename) {
- return CountChannelsMatching(
- config, filename, [](const UnpackedMessageHeader *msg) {
- if (msg->span.data() != nullptr) {
- CHECK(!msg->monotonic_remote_time.has_value());
- CHECK(!msg->realtime_remote_time.has_value());
- CHECK(!msg->remote_queue_index.has_value());
- return true;
- }
- return false;
- });
-}
-
-// Counts the number of messages (channel, count) for all timestamp messages.
-std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
- std::shared_ptr<const aos::Configuration> config,
- std::string_view filename) {
- return CountChannelsMatching(
- config, filename, [](const UnpackedMessageHeader *msg) {
- if (msg->span.data() == nullptr) {
- CHECK(msg->monotonic_remote_time.has_value());
- CHECK(msg->realtime_remote_time.has_value());
- CHECK(msg->remote_queue_index.has_value());
- return true;
- }
- return false;
- });
-}
-
-// Tests that we can write and read simple multi-node log files.
-TEST_P(MultinodeLoggerTest, SimpleMultiNode) {
- std::vector<std::string> actual_filenames;
- time_converter_.StartEqual();
-
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- pi1_logger.AppendAllFilenames(&actual_filenames);
- pi2_logger.AppendAllFilenames(&actual_filenames);
- }
-
- ASSERT_THAT(actual_filenames,
- ::testing::UnorderedElementsAreArray(logfiles_));
-
- {
- std::set<std::string> logfile_uuids;
- std::set<std::string> parts_uuids;
- // Confirm that we have the expected number of UUIDs for both the logfile
- // UUIDs and parts UUIDs.
- std::vector<SizePrefixedFlatbufferVector<LogFileHeader>> log_header;
- for (std::string_view f : logfiles_) {
- log_header.emplace_back(ReadHeader(f).value());
- if (!log_header.back().message().has_configuration()) {
- logfile_uuids.insert(
- log_header.back().message().log_event_uuid()->str());
- parts_uuids.insert(log_header.back().message().parts_uuid()->str());
- }
- }
-
- EXPECT_EQ(logfile_uuids.size(), 2u);
- if (shared()) {
- EXPECT_EQ(parts_uuids.size(), 7u);
- } else {
- EXPECT_EQ(parts_uuids.size(), 8u);
- }
-
- // And confirm everything is on the correct node.
- EXPECT_EQ(log_header[2].message().node()->name()->string_view(), "pi1");
- EXPECT_EQ(log_header[3].message().node()->name()->string_view(), "pi1");
- EXPECT_EQ(log_header[4].message().node()->name()->string_view(), "pi1");
-
- EXPECT_EQ(log_header[5].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[6].message().node()->name()->string_view(), "pi2");
-
- EXPECT_EQ(log_header[7].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[8].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[9].message().node()->name()->string_view(), "pi2");
-
- EXPECT_EQ(log_header[10].message().node()->name()->string_view(), "pi1");
- EXPECT_EQ(log_header[11].message().node()->name()->string_view(), "pi1");
-
- EXPECT_EQ(log_header[12].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[13].message().node()->name()->string_view(), "pi2");
-
- if (shared()) {
- EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi2");
-
- EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
- EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi1");
- } else {
- EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
-
- EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi1");
- EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
-
- EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi2");
- EXPECT_EQ(log_header[19].message().node()->name()->string_view(), "pi2");
- }
-
- // And the parts index matches.
- EXPECT_EQ(log_header[2].message().parts_index(), 0);
- EXPECT_EQ(log_header[3].message().parts_index(), 1);
- EXPECT_EQ(log_header[4].message().parts_index(), 2);
-
- EXPECT_EQ(log_header[5].message().parts_index(), 0);
- EXPECT_EQ(log_header[6].message().parts_index(), 1);
-
- EXPECT_EQ(log_header[7].message().parts_index(), 0);
- EXPECT_EQ(log_header[8].message().parts_index(), 1);
- EXPECT_EQ(log_header[9].message().parts_index(), 2);
-
- EXPECT_EQ(log_header[10].message().parts_index(), 0);
- EXPECT_EQ(log_header[11].message().parts_index(), 1);
-
- EXPECT_EQ(log_header[12].message().parts_index(), 0);
- EXPECT_EQ(log_header[13].message().parts_index(), 1);
-
- if (shared()) {
- EXPECT_EQ(log_header[14].message().parts_index(), 0);
- EXPECT_EQ(log_header[15].message().parts_index(), 1);
- EXPECT_EQ(log_header[16].message().parts_index(), 2);
-
- EXPECT_EQ(log_header[17].message().parts_index(), 0);
- EXPECT_EQ(log_header[18].message().parts_index(), 1);
- } else {
- EXPECT_EQ(log_header[14].message().parts_index(), 0);
- EXPECT_EQ(log_header[15].message().parts_index(), 1);
-
- EXPECT_EQ(log_header[16].message().parts_index(), 0);
- EXPECT_EQ(log_header[17].message().parts_index(), 1);
-
- EXPECT_EQ(log_header[18].message().parts_index(), 0);
- EXPECT_EQ(log_header[19].message().parts_index(), 1);
- }
- }
-
- const std::vector<LogFile> sorted_log_files = SortParts(logfiles_);
- {
- using ::testing::UnorderedElementsAre;
- std::shared_ptr<const aos::Configuration> config =
- sorted_log_files[0].config;
-
- // Timing reports, pings
- EXPECT_THAT(CountChannelsData(config, logfiles_[2]),
- UnorderedElementsAre(
- std::make_tuple("/pi1/aos",
- "aos.message_bridge.ServerStatistics", 1),
- std::make_tuple("/test", "aos.examples.Ping", 1)))
- << " : " << logfiles_[2];
- {
- std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
- std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 1),
- std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
- 1)};
- if (!std::get<0>(GetParam()).shared) {
- channel_counts.push_back(
- std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp",
- "aos.message_bridge.RemoteMessage", 1));
- }
- EXPECT_THAT(CountChannelsData(config, logfiles_[3]),
- ::testing::UnorderedElementsAreArray(channel_counts))
- << " : " << logfiles_[3];
- }
- {
- std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
- std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 199),
- std::make_tuple("/pi1/aos", "aos.message_bridge.ServerStatistics",
- 20),
- std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
- 199),
- std::make_tuple("/pi1/aos", "aos.timing.Report", 40),
- std::make_tuple("/test", "aos.examples.Ping", 2000)};
- if (!std::get<0>(GetParam()).shared) {
- channel_counts.push_back(
- std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp",
- "aos.message_bridge.RemoteMessage", 199));
- }
- EXPECT_THAT(CountChannelsData(config, logfiles_[4]),
- ::testing::UnorderedElementsAreArray(channel_counts))
- << " : " << logfiles_[4];
- }
- // Timestamps for pong
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[2]),
- UnorderedElementsAre())
- << " : " << logfiles_[2];
- EXPECT_THAT(
- CountChannelsTimestamp(config, logfiles_[3]),
- UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
- << " : " << logfiles_[3];
- EXPECT_THAT(
- CountChannelsTimestamp(config, logfiles_[4]),
- UnorderedElementsAre(
- std::make_tuple("/test", "aos.examples.Pong", 2000),
- std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200)))
- << " : " << logfiles_[4];
-
- // Pong data.
- EXPECT_THAT(
- CountChannelsData(config, logfiles_[5]),
- UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 91)))
- << " : " << logfiles_[5];
- EXPECT_THAT(CountChannelsData(config, logfiles_[6]),
- UnorderedElementsAre(
- std::make_tuple("/test", "aos.examples.Pong", 1910)))
- << " : " << logfiles_[6];
-
- // No timestamps
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[5]),
- UnorderedElementsAre())
- << " : " << logfiles_[5];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[6]),
- UnorderedElementsAre())
- << " : " << logfiles_[6];
-
- // Timing reports and pongs.
- EXPECT_THAT(CountChannelsData(config, logfiles_[7]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.ServerStatistics", 1)))
- << " : " << logfiles_[7];
- EXPECT_THAT(
- CountChannelsData(config, logfiles_[8]),
- UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
- << " : " << logfiles_[8];
- EXPECT_THAT(
- CountChannelsData(config, logfiles_[9]),
- UnorderedElementsAre(
- std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200),
- std::make_tuple("/pi2/aos", "aos.message_bridge.ServerStatistics",
- 20),
- std::make_tuple("/pi2/aos", "aos.message_bridge.ClientStatistics",
- 200),
- std::make_tuple("/pi2/aos", "aos.timing.Report", 40),
- std::make_tuple("/test", "aos.examples.Pong", 2000)))
- << " : " << logfiles_[9];
- // And ping timestamps.
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[7]),
- UnorderedElementsAre())
- << " : " << logfiles_[7];
- EXPECT_THAT(
- CountChannelsTimestamp(config, logfiles_[8]),
- UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Ping", 1)))
- << " : " << logfiles_[8];
- EXPECT_THAT(
- CountChannelsTimestamp(config, logfiles_[9]),
- UnorderedElementsAre(
- std::make_tuple("/test", "aos.examples.Ping", 2000),
- std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 200)))
- << " : " << logfiles_[9];
-
- // And then test that the remotely logged timestamp data files only have
- // timestamps in them.
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[10]),
- UnorderedElementsAre())
- << " : " << logfiles_[10];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[11]),
- UnorderedElementsAre())
- << " : " << logfiles_[11];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[12]),
- UnorderedElementsAre())
- << " : " << logfiles_[12];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[13]),
- UnorderedElementsAre())
- << " : " << logfiles_[13];
-
- EXPECT_THAT(CountChannelsData(config, logfiles_[10]),
- UnorderedElementsAre(std::make_tuple(
- "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
- << " : " << logfiles_[10];
- EXPECT_THAT(CountChannelsData(config, logfiles_[11]),
- UnorderedElementsAre(std::make_tuple(
- "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
- << " : " << logfiles_[11];
-
- EXPECT_THAT(CountChannelsData(config, logfiles_[12]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
- << " : " << logfiles_[12];
- EXPECT_THAT(CountChannelsData(config, logfiles_[13]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
- << " : " << logfiles_[13];
-
- // Timestamps from pi2 on pi1, and the other way.
- if (shared()) {
- EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
- UnorderedElementsAre())
- << " : " << logfiles_[14];
- EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
- UnorderedElementsAre())
- << " : " << logfiles_[15];
- EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
- UnorderedElementsAre())
- << " : " << logfiles_[16];
- EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
- UnorderedElementsAre())
- << " : " << logfiles_[17];
- EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
- UnorderedElementsAre())
- << " : " << logfiles_[18];
-
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
- UnorderedElementsAre(
- std::make_tuple("/test", "aos.examples.Ping", 1)))
- << " : " << logfiles_[14];
- EXPECT_THAT(
- CountChannelsTimestamp(config, logfiles_[15]),
- UnorderedElementsAre(
- std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 9),
- std::make_tuple("/test", "aos.examples.Ping", 90)))
- << " : " << logfiles_[15];
- EXPECT_THAT(
- CountChannelsTimestamp(config, logfiles_[16]),
- UnorderedElementsAre(
- std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 191),
- std::make_tuple("/test", "aos.examples.Ping", 1910)))
- << " : " << logfiles_[16];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
- << " : " << logfiles_[17];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
- << " : " << logfiles_[18];
- } else {
- EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
- UnorderedElementsAre())
- << " : " << logfiles_[14];
- EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
- UnorderedElementsAre())
- << " : " << logfiles_[15];
- EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
- UnorderedElementsAre())
- << " : " << logfiles_[16];
- EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
- UnorderedElementsAre())
- << " : " << logfiles_[17];
- EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
- UnorderedElementsAre())
- << " : " << logfiles_[18];
- EXPECT_THAT(CountChannelsData(config, logfiles_[19]),
- UnorderedElementsAre())
- << " : " << logfiles_[19];
-
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
- UnorderedElementsAre(std::make_tuple(
- "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
- << " : " << logfiles_[14];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[15]),
- UnorderedElementsAre(std::make_tuple(
- "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
- << " : " << logfiles_[15];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[16]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
- << " : " << logfiles_[16];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
- UnorderedElementsAre(std::make_tuple(
- "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
- << " : " << logfiles_[17];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
- UnorderedElementsAre(
- std::make_tuple("/test", "aos.examples.Ping", 91)))
- << " : " << logfiles_[18];
- EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[19]),
- UnorderedElementsAre(
- std::make_tuple("/test", "aos.examples.Ping", 1910)))
- << " : " << logfiles_[19];
- }
- }
-
- LogReader reader(sorted_log_files);
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- // This sends out the fetched messages and advances time to the start of the
- // log file.
- reader.Register(&log_reader_factory);
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
- LOG(INFO) << "now pi1 "
- << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
- LOG(INFO) << "now pi2 "
- << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
- EXPECT_THAT(reader.LoggedNodes(),
- ::testing::ElementsAre(
- configuration::GetNode(reader.logged_configuration(), pi1),
- configuration::GetNode(reader.logged_configuration(), pi2)));
-
- reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
- std::unique_ptr<EventLoop> pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- std::unique_ptr<EventLoop> pi2_event_loop =
- log_reader_factory.MakeEventLoop("test", pi2);
-
- int pi1_ping_count = 10;
- int pi2_ping_count = 10;
- int pi1_pong_count = 10;
- int pi2_pong_count = 10;
-
- // Confirm that the ping value matches.
- pi1_event_loop->MakeWatcher(
- "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
- VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping) << " at "
- << pi1_event_loop->context().monotonic_remote_time << " -> "
- << pi1_event_loop->context().monotonic_event_time;
- EXPECT_EQ(ping.value(), pi1_ping_count + 1);
- EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
- pi1_ping_count * chrono::milliseconds(10) +
- monotonic_clock::epoch());
- EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
- pi1_ping_count * chrono::milliseconds(10) +
- realtime_clock::epoch());
- EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
- pi1_event_loop->context().monotonic_event_time);
- EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
- pi1_event_loop->context().realtime_event_time);
-
- ++pi1_ping_count;
- });
- pi2_event_loop->MakeWatcher(
- "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
- VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping) << " at "
- << pi2_event_loop->context().monotonic_remote_time << " -> "
- << pi2_event_loop->context().monotonic_event_time;
- EXPECT_EQ(ping.value(), pi2_ping_count + 1);
-
- EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
- pi2_ping_count * chrono::milliseconds(10) +
- monotonic_clock::epoch());
- EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
- pi2_ping_count * chrono::milliseconds(10) +
- realtime_clock::epoch());
- EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time +
- chrono::microseconds(150),
- pi2_event_loop->context().monotonic_event_time);
- EXPECT_EQ(pi2_event_loop->context().realtime_remote_time +
- chrono::microseconds(150),
- pi2_event_loop->context().realtime_event_time);
- ++pi2_ping_count;
- });
-
- constexpr ssize_t kQueueIndexOffset = -9;
- // Confirm that the ping and pong counts both match, and the value also
- // matches.
- pi1_event_loop->MakeWatcher(
- "/test", [&pi1_event_loop, &pi1_ping_count,
- &pi1_pong_count](const examples::Pong &pong) {
- VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
- << pi1_event_loop->context().monotonic_remote_time << " -> "
- << pi1_event_loop->context().monotonic_event_time;
-
- EXPECT_EQ(pi1_event_loop->context().remote_queue_index,
- pi1_pong_count + kQueueIndexOffset);
- EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
- chrono::microseconds(200) +
- pi1_pong_count * chrono::milliseconds(10) +
- monotonic_clock::epoch());
- EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
- chrono::microseconds(200) +
- pi1_pong_count * chrono::milliseconds(10) +
- realtime_clock::epoch());
-
- EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time +
- chrono::microseconds(150),
- pi1_event_loop->context().monotonic_event_time);
- EXPECT_EQ(pi1_event_loop->context().realtime_remote_time +
- chrono::microseconds(150),
- pi1_event_loop->context().realtime_event_time);
-
- EXPECT_EQ(pong.value(), pi1_pong_count + 1);
- ++pi1_pong_count;
- EXPECT_EQ(pi1_ping_count, pi1_pong_count);
- });
- pi2_event_loop->MakeWatcher(
- "/test", [&pi2_event_loop, &pi2_ping_count,
- &pi2_pong_count](const examples::Pong &pong) {
- VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
- << pi2_event_loop->context().monotonic_remote_time << " -> "
- << pi2_event_loop->context().monotonic_event_time;
-
- EXPECT_EQ(pi2_event_loop->context().remote_queue_index,
- pi2_pong_count + kQueueIndexOffset);
-
- EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
- chrono::microseconds(200) +
- pi2_pong_count * chrono::milliseconds(10) +
- monotonic_clock::epoch());
- EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
- chrono::microseconds(200) +
- pi2_pong_count * chrono::milliseconds(10) +
- realtime_clock::epoch());
-
- EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
- pi2_event_loop->context().monotonic_event_time);
- EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
- pi2_event_loop->context().realtime_event_time);
-
- EXPECT_EQ(pong.value(), pi2_pong_count + 1);
- ++pi2_pong_count;
- EXPECT_EQ(pi2_ping_count, pi2_pong_count);
- });
-
- log_reader_factory.Run();
- EXPECT_EQ(pi1_ping_count, 2010);
- EXPECT_EQ(pi2_ping_count, 2010);
- EXPECT_EQ(pi1_pong_count, 2010);
- EXPECT_EQ(pi2_pong_count, 2010);
-
- reader.Deregister();
-}
-
-typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
-
-// Test that if we feed the replay with a mismatched node list that we die on
-// the LogReader constructor.
-TEST_P(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
- time_converter_.StartEqual();
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- }
-
- // Test that, if we add an additional node to the replay config that the
- // logger complains about the mismatch in number of nodes.
- FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
- configuration::MergeWithConfig(&config_.message(), R"({
- "nodes": [
- {
- "name": "extra-node"
- }
- ]
- }
- )");
-
- const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
- EXPECT_DEATH(LogReader(sorted_parts, &extra_nodes_config.message()),
- "Log file and replay config need to have matching nodes lists.");
-}
-
-// Tests that we can read log files where they don't start at the same monotonic
-// time.
-TEST_P(MultinodeLoggerTest, StaggeredStart) {
- time_converter_.StartEqual();
- std::vector<std::string> actual_filenames;
-
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(200));
-
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- pi1_logger.AppendAllFilenames(&actual_filenames);
- pi2_logger.AppendAllFilenames(&actual_filenames);
- }
-
- // Since we delay starting pi2, it already knows about all the timestamps so
- // we don't end up with extra parts.
- LogReader reader(SortParts(actual_filenames));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- // This sends out the fetched messages and advances time to the start of the
- // log file.
- reader.Register(&log_reader_factory);
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- EXPECT_THAT(reader.LoggedNodes(),
- ::testing::ElementsAre(
- configuration::GetNode(reader.logged_configuration(), pi1),
- configuration::GetNode(reader.logged_configuration(), pi2)));
-
- reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
- std::unique_ptr<EventLoop> pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- std::unique_ptr<EventLoop> pi2_event_loop =
- log_reader_factory.MakeEventLoop("test", pi2);
-
- int pi1_ping_count = 30;
- int pi2_ping_count = 30;
- int pi1_pong_count = 30;
- int pi2_pong_count = 30;
-
- // Confirm that the ping value matches.
- pi1_event_loop->MakeWatcher(
- "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
- VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
- << pi1_event_loop->context().monotonic_remote_time << " -> "
- << pi1_event_loop->context().monotonic_event_time;
- EXPECT_EQ(ping.value(), pi1_ping_count + 1);
-
- ++pi1_ping_count;
- });
- pi2_event_loop->MakeWatcher(
- "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
- VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
- << pi2_event_loop->context().monotonic_remote_time << " -> "
- << pi2_event_loop->context().monotonic_event_time;
- EXPECT_EQ(ping.value(), pi2_ping_count + 1);
-
- ++pi2_ping_count;
- });
-
- // Confirm that the ping and pong counts both match, and the value also
- // matches.
- pi1_event_loop->MakeWatcher(
- "/test", [&pi1_event_loop, &pi1_ping_count,
- &pi1_pong_count](const examples::Pong &pong) {
- VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
- << pi1_event_loop->context().monotonic_remote_time << " -> "
- << pi1_event_loop->context().monotonic_event_time;
-
- EXPECT_EQ(pong.value(), pi1_pong_count + 1);
- ++pi1_pong_count;
- EXPECT_EQ(pi1_ping_count, pi1_pong_count);
- });
- pi2_event_loop->MakeWatcher(
- "/test", [&pi2_event_loop, &pi2_ping_count,
- &pi2_pong_count](const examples::Pong &pong) {
- VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
- << pi2_event_loop->context().monotonic_remote_time << " -> "
- << pi2_event_loop->context().monotonic_event_time;
-
- EXPECT_EQ(pong.value(), pi2_pong_count + 1);
- ++pi2_pong_count;
- EXPECT_EQ(pi2_ping_count, pi2_pong_count);
- });
-
- log_reader_factory.Run();
- EXPECT_EQ(pi1_ping_count, 2030);
- EXPECT_EQ(pi2_ping_count, 2030);
- EXPECT_EQ(pi1_pong_count, 2030);
- EXPECT_EQ(pi2_pong_count, 2030);
-
- reader.Deregister();
-}
-
-// Tests that we can read log files where the monotonic clocks drift and don't
-// match correctly. While we are here, also test that different ending times
-// also is readable.
-TEST_P(MultinodeLoggerTest, MismatchedClocks) {
- // TODO(austin): Negate...
- const chrono::nanoseconds initial_pi2_offset = chrono::seconds(1000);
-
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + initial_pi2_offset});
- // Wait for 95 ms, (~0.1 seconds - 1/2 of the ping/pong period), and set the
- // skew to be 200 uS/s
- const chrono::nanoseconds startup_sleep1 = time_converter_.AddMonotonic(
- {chrono::milliseconds(95),
- chrono::milliseconds(95) - chrono::nanoseconds(200) * 95});
- // Run another 200 ms to have one logger start first.
- const chrono::nanoseconds startup_sleep2 = time_converter_.AddMonotonic(
- {chrono::milliseconds(200), chrono::milliseconds(200)});
- // Slew one way then the other at the same 200 uS/S slew rate. Make sure we
- // go far enough to cause problems if this isn't accounted for.
- const chrono::nanoseconds logger_run1 = time_converter_.AddMonotonic(
- {chrono::milliseconds(20000),
- chrono::milliseconds(20000) - chrono::nanoseconds(200) * 20000});
- const chrono::nanoseconds logger_run2 = time_converter_.AddMonotonic(
- {chrono::milliseconds(40000),
- chrono::milliseconds(40000) + chrono::nanoseconds(200) * 40000});
- const chrono::nanoseconds logger_run3 = time_converter_.AddMonotonic(
- {chrono::milliseconds(400), chrono::milliseconds(400)});
-
- {
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- LOG(INFO) << "pi2 times: " << pi2_->monotonic_now() << " "
- << pi2_->realtime_now() << " distributed "
- << pi2_->ToDistributedClock(pi2_->monotonic_now());
-
- LOG(INFO) << "pi2_ times: " << pi2_->monotonic_now() << " "
- << pi2_->realtime_now() << " distributed "
- << pi2_->ToDistributedClock(pi2_->monotonic_now());
-
- event_loop_factory_.RunFor(startup_sleep1);
-
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(startup_sleep2);
-
- {
- // Run pi1's logger for only part of the time.
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- StartLogger(&pi1_logger);
- event_loop_factory_.RunFor(logger_run1);
-
- // Make sure we slewed time far enough so that the difference is greater
- // than the network delay. This confirms that if we sort incorrectly, it
- // would show in the results.
- EXPECT_LT(
- (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
- -event_loop_factory_.send_delay() -
- event_loop_factory_.network_delay());
-
- event_loop_factory_.RunFor(logger_run2);
-
- // And now check that we went far enough the other way to make sure we
- // cover both problems.
- EXPECT_GT(
- (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
- event_loop_factory_.send_delay() +
- event_loop_factory_.network_delay());
- }
-
- // And log a bit more on pi2.
- event_loop_factory_.RunFor(logger_run3);
- }
-
- LogReader reader(
- SortParts(MakeLogFiles(logfile_base1_, logfile_base2_, 3, 2)));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- // This sends out the fetched messages and advances time to the start of the
- // log file.
- reader.Register(&log_reader_factory);
-
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
- LOG(INFO) << "now pi1 "
- << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
- LOG(INFO) << "now pi2 "
- << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
- LOG(INFO) << "Done registering (pi1) "
- << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now()
- << " "
- << log_reader_factory.GetNodeEventLoopFactory(pi1)->realtime_now();
- LOG(INFO) << "Done registering (pi2) "
- << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now()
- << " "
- << log_reader_factory.GetNodeEventLoopFactory(pi2)->realtime_now();
-
- EXPECT_THAT(reader.LoggedNodes(),
- ::testing::ElementsAre(
- configuration::GetNode(reader.logged_configuration(), pi1),
- configuration::GetNode(reader.logged_configuration(), pi2)));
-
- reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
- std::unique_ptr<EventLoop> pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- std::unique_ptr<EventLoop> pi2_event_loop =
- log_reader_factory.MakeEventLoop("test", pi2);
-
- int pi1_ping_count = 30;
- int pi2_ping_count = 30;
- int pi1_pong_count = 30;
- int pi2_pong_count = 30;
-
- // Confirm that the ping value matches.
- pi1_event_loop->MakeWatcher(
- "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
- VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
- << pi1_event_loop->context().monotonic_remote_time << " -> "
- << pi1_event_loop->context().monotonic_event_time;
- EXPECT_EQ(ping.value(), pi1_ping_count + 1);
-
- ++pi1_ping_count;
- });
- pi2_event_loop->MakeWatcher(
- "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
- VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
- << pi2_event_loop->context().monotonic_remote_time << " -> "
- << pi2_event_loop->context().monotonic_event_time;
- EXPECT_EQ(ping.value(), pi2_ping_count + 1);
-
- ++pi2_ping_count;
- });
-
- // Confirm that the ping and pong counts both match, and the value also
- // matches.
- pi1_event_loop->MakeWatcher(
- "/test", [&pi1_event_loop, &pi1_ping_count,
- &pi1_pong_count](const examples::Pong &pong) {
- VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
- << pi1_event_loop->context().monotonic_remote_time << " -> "
- << pi1_event_loop->context().monotonic_event_time;
-
- EXPECT_EQ(pong.value(), pi1_pong_count + 1);
- ++pi1_pong_count;
- EXPECT_EQ(pi1_ping_count, pi1_pong_count);
- });
- pi2_event_loop->MakeWatcher(
- "/test", [&pi2_event_loop, &pi2_ping_count,
- &pi2_pong_count](const examples::Pong &pong) {
- VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
- << pi2_event_loop->context().monotonic_remote_time << " -> "
- << pi2_event_loop->context().monotonic_event_time;
-
- EXPECT_EQ(pong.value(), pi2_pong_count + 1);
- ++pi2_pong_count;
- EXPECT_EQ(pi2_ping_count, pi2_pong_count);
- });
-
- log_reader_factory.Run();
- EXPECT_EQ(pi1_ping_count, 6030);
- EXPECT_EQ(pi2_ping_count, 6030);
- EXPECT_EQ(pi1_pong_count, 6030);
- EXPECT_EQ(pi2_pong_count, 6030);
-
- reader.Deregister();
-}
-
-// Tests that we can sort a bunch of parts into the pre-determined sorted parts.
-TEST_P(MultinodeLoggerTest, SortParts) {
- time_converter_.StartEqual();
- // Make a bunch of parts.
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(2000));
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
- VerifyParts(sorted_parts);
-}
-
-// Tests that we can sort a bunch of parts with an empty part. We should ignore
-// it and remove it from the sorted list.
-TEST_P(MultinodeLoggerTest, SortEmptyParts) {
- time_converter_.StartEqual();
- // Make a bunch of parts.
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(2000));
- }
-
- // TODO(austin): Should we flip out if the file can't open?
- const std::string kEmptyFile("foobarinvalidfiledoesnotexist" + Extension());
-
- aos::util::WriteStringToFileOrDie(kEmptyFile, "");
- logfiles_.emplace_back(kEmptyFile);
-
- const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
- VerifyParts(sorted_parts, {kEmptyFile});
-}
-
-// Tests that we can sort a bunch of parts with the end missing off a
-// file. We should use the part we can read.
-TEST_P(MultinodeLoggerTest, SortTruncatedParts) {
- std::vector<std::string> actual_filenames;
- time_converter_.StartEqual();
- // Make a bunch of parts.
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(2000));
-
- pi1_logger.AppendAllFilenames(&actual_filenames);
- pi2_logger.AppendAllFilenames(&actual_filenames);
- }
-
- ASSERT_THAT(actual_filenames,
- ::testing::UnorderedElementsAreArray(logfiles_));
-
- // Strip off the end of one of the files. Pick one with a lot of data.
- // For snappy, needs to have enough data to be >1 chunk of compressed data so
- // that we don't corrupt the entire log part.
- ::std::string compressed_contents =
- aos::util::ReadFileToStringOrDie(logfiles_[4]);
-
- aos::util::WriteStringToFileOrDie(
- logfiles_[4],
- compressed_contents.substr(0, compressed_contents.size() - 100));
-
- const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
- VerifyParts(sorted_parts);
-}
-
-// Tests that if we remap a remapped channel, it shows up correctly.
-TEST_P(MultinodeLoggerTest, RemapLoggedChannel) {
- time_converter_.StartEqual();
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- }
-
- LogReader reader(SortParts(logfiles_));
-
- // Remap just on pi1.
- reader.RemapLoggedChannel<aos::timing::Report>(
- "/aos", configuration::GetNode(reader.configuration(), "pi1"));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- std::vector<const Channel *> remapped_channels = reader.RemappedChannels();
- // Note: An extra channel gets remapped automatically due to a timestamp
- // channel being LOCAL_LOGGER'd.
- ASSERT_EQ(remapped_channels.size(), std::get<0>(GetParam()).shared ? 1u : 2u);
- EXPECT_EQ(remapped_channels[0]->name()->string_view(), "/original/pi1/aos");
- EXPECT_EQ(remapped_channels[0]->type()->string_view(), "aos.timing.Report");
- if (!std::get<0>(GetParam()).shared) {
- EXPECT_EQ(remapped_channels[1]->name()->string_view(),
- "/original/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp");
- EXPECT_EQ(remapped_channels[1]->type()->string_view(),
- "aos.message_bridge.RemoteMessage");
- }
-
- reader.Register(&log_reader_factory);
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- // Confirm we can read the data on the remapped channel, just for pi1. Nothing
- // else should have moved.
- std::unique_ptr<EventLoop> pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- pi1_event_loop->SkipTimingReport();
- std::unique_ptr<EventLoop> full_pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- full_pi1_event_loop->SkipTimingReport();
- std::unique_ptr<EventLoop> pi2_event_loop =
- log_reader_factory.MakeEventLoop("test", pi2);
- pi2_event_loop->SkipTimingReport();
-
- MessageCounter<aos::timing::Report> pi1_timing_report(pi1_event_loop.get(),
- "/aos");
- MessageCounter<aos::timing::Report> full_pi1_timing_report(
- full_pi1_event_loop.get(), "/pi1/aos");
- MessageCounter<aos::timing::Report> pi1_original_timing_report(
- pi1_event_loop.get(), "/original/aos");
- MessageCounter<aos::timing::Report> full_pi1_original_timing_report(
- full_pi1_event_loop.get(), "/original/pi1/aos");
- MessageCounter<aos::timing::Report> pi2_timing_report(pi2_event_loop.get(),
- "/aos");
-
- log_reader_factory.Run();
-
- EXPECT_EQ(pi1_timing_report.count(), 0u);
- EXPECT_EQ(full_pi1_timing_report.count(), 0u);
- EXPECT_NE(pi1_original_timing_report.count(), 0u);
- EXPECT_NE(full_pi1_original_timing_report.count(), 0u);
- EXPECT_NE(pi2_timing_report.count(), 0u);
-
- reader.Deregister();
-}
-
-// Tests that we can remap a forwarded channel as well.
-TEST_P(MultinodeLoggerTest, RemapForwardedLoggedChannel) {
- time_converter_.StartEqual();
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- }
-
- LogReader reader(SortParts(logfiles_));
-
- reader.RemapLoggedChannel<examples::Ping>("/test");
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- reader.Register(&log_reader_factory);
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- // Confirm we can read the data on the remapped channel, just for pi1. Nothing
- // else should have moved.
- std::unique_ptr<EventLoop> pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- pi1_event_loop->SkipTimingReport();
- std::unique_ptr<EventLoop> full_pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- full_pi1_event_loop->SkipTimingReport();
- std::unique_ptr<EventLoop> pi2_event_loop =
- log_reader_factory.MakeEventLoop("test", pi2);
- pi2_event_loop->SkipTimingReport();
-
- MessageCounter<examples::Ping> pi1_ping(pi1_event_loop.get(), "/test");
- MessageCounter<examples::Ping> pi2_ping(pi2_event_loop.get(), "/test");
- MessageCounter<examples::Ping> pi1_original_ping(pi1_event_loop.get(),
- "/original/test");
- MessageCounter<examples::Ping> pi2_original_ping(pi2_event_loop.get(),
- "/original/test");
-
- std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
- pi1_original_ping_timestamp;
- std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
- pi1_ping_timestamp;
- if (!shared()) {
- pi1_original_ping_timestamp =
- std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
- pi1_event_loop.get(),
- "/pi1/aos/remote_timestamps/pi2/original/test/aos-examples-Ping");
- pi1_ping_timestamp =
- std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
- pi1_event_loop.get(),
- "/pi1/aos/remote_timestamps/pi2/test/aos-examples-Ping");
- }
-
- log_reader_factory.Run();
-
- EXPECT_EQ(pi1_ping.count(), 0u);
- EXPECT_EQ(pi2_ping.count(), 0u);
- EXPECT_NE(pi1_original_ping.count(), 0u);
- EXPECT_NE(pi2_original_ping.count(), 0u);
- if (!shared()) {
- EXPECT_NE(pi1_original_ping_timestamp->count(), 0u);
- EXPECT_EQ(pi1_ping_timestamp->count(), 0u);
- }
-
- reader.Deregister();
-}
-
-// Tests that we observe all the same events in log replay (for a given node)
-// whether we just register an event loop for that node or if we register a full
-// event loop factory.
-TEST_P(MultinodeLoggerTest, SingleNodeReplay) {
- time_converter_.StartEqual();
- constexpr chrono::milliseconds kStartupDelay(95);
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(kStartupDelay);
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- }
-
- LogReader full_reader(SortParts(logfiles_));
- LogReader single_node_reader(SortParts(logfiles_));
-
- SimulatedEventLoopFactory full_factory(full_reader.configuration());
- SimulatedEventLoopFactory single_node_factory(
- single_node_reader.configuration());
- single_node_factory.SkipTimingReport();
- single_node_factory.DisableStatistics();
- std::unique_ptr<EventLoop> replay_event_loop =
- single_node_factory.GetNodeEventLoopFactory("pi1")->MakeEventLoop(
- "log_reader");
-
- full_reader.Register(&full_factory);
- single_node_reader.Register(replay_event_loop.get());
-
- const Node *full_pi1 =
- configuration::GetNode(full_factory.configuration(), "pi1");
-
- // Confirm we can read the data on the remapped channel, just for pi1. Nothing
- // else should have moved.
- std::unique_ptr<EventLoop> full_event_loop =
- full_factory.MakeEventLoop("test", full_pi1);
- full_event_loop->SkipTimingReport();
- full_event_loop->SkipAosLog();
- // maps are indexed on channel index.
- // observed_messages: {channel_index: [(message_sent_time, was_fetched),...]}
- std::map<size_t, std::vector<std::pair<monotonic_clock::time_point, bool>>>
- observed_messages;
- std::map<size_t, std::unique_ptr<RawFetcher>> fetchers;
- for (size_t ii = 0; ii < full_event_loop->configuration()->channels()->size();
- ++ii) {
- const Channel *channel =
- full_event_loop->configuration()->channels()->Get(ii);
- // We currently don't support replaying remote timestamp channels in
- // realtime replay (unless the remote timestamp channel was not NOT_LOGGED,
- // in which case it gets auto-remapped and replayed on a /original channel).
- if (channel->name()->string_view().find("remote_timestamp") !=
- std::string_view::npos &&
- channel->name()->string_view().find("/original") ==
- std::string_view::npos) {
- continue;
- }
- if (configuration::ChannelIsReadableOnNode(channel, full_pi1)) {
- observed_messages[ii] = {};
- fetchers[ii] = full_event_loop->MakeRawFetcher(channel);
- full_event_loop->OnRun([ii, &observed_messages, &fetchers]() {
- if (fetchers[ii]->Fetch()) {
- observed_messages[ii].push_back(std::make_pair(
- fetchers[ii]->context().monotonic_event_time, true));
- }
- });
- full_event_loop->MakeRawNoArgWatcher(
- channel, [ii, &observed_messages](const Context &context) {
- observed_messages[ii].push_back(
- std::make_pair(context.monotonic_event_time, false));
- });
- }
- }
-
- full_factory.Run();
- fetchers.clear();
- full_reader.Deregister();
-
- const Node *single_node_pi1 =
- configuration::GetNode(single_node_factory.configuration(), "pi1");
- std::map<size_t, std::unique_ptr<RawFetcher>> single_node_fetchers;
-
- std::unique_ptr<EventLoop> single_node_event_loop =
- single_node_factory.MakeEventLoop("test", single_node_pi1);
- single_node_event_loop->SkipTimingReport();
- single_node_event_loop->SkipAosLog();
- for (size_t ii = 0;
- ii < single_node_event_loop->configuration()->channels()->size(); ++ii) {
- const Channel *channel =
- single_node_event_loop->configuration()->channels()->Get(ii);
- single_node_factory.DisableForwarding(channel);
- if (configuration::ChannelIsReadableOnNode(channel, single_node_pi1)) {
- single_node_fetchers[ii] =
- single_node_event_loop->MakeRawFetcher(channel);
- single_node_event_loop->OnRun([channel, ii, &single_node_fetchers]() {
- EXPECT_FALSE(single_node_fetchers[ii]->Fetch())
- << "Single EventLoop replay doesn't support pre-loading fetchers. "
- << configuration::StrippedChannelToString(channel);
- });
- single_node_event_loop->MakeRawNoArgWatcher(
- channel, [ii, &observed_messages, channel,
- kStartupDelay](const Context &context) {
- if (observed_messages[ii].empty()) {
- FAIL() << "Observed extra message at "
- << context.monotonic_event_time << " on "
- << configuration::StrippedChannelToString(channel);
- return;
- }
- const std::pair<monotonic_clock::time_point, bool> &message =
- observed_messages[ii].front();
- if (message.second) {
- EXPECT_LE(message.first,
- context.monotonic_event_time + kStartupDelay)
- << "Mismatched message times " << context.monotonic_event_time
- << " and " << message.first << " on "
- << configuration::StrippedChannelToString(channel);
- } else {
- EXPECT_EQ(message.first,
- context.monotonic_event_time + kStartupDelay)
- << "Mismatched message times " << context.monotonic_event_time
- << " and " << message.first << " on "
- << configuration::StrippedChannelToString(channel);
- }
- observed_messages[ii].erase(observed_messages[ii].begin());
- });
- }
- }
-
- single_node_factory.Run();
-
- single_node_fetchers.clear();
-
- single_node_reader.Deregister();
-
- for (const auto &pair : observed_messages) {
- EXPECT_TRUE(pair.second.empty())
- << "Missed " << pair.second.size() << " messages on "
- << configuration::StrippedChannelToString(
- single_node_event_loop->configuration()->channels()->Get(
- pair.first));
- }
-}
-
-// Tests that we properly recreate forwarded timestamps when replaying a log.
-// This should be enough that we can then re-run the logger and get a valid log
-// back.
-TEST_P(MultinodeLoggerTest, MessageHeader) {
- time_converter_.StartEqual();
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- }
-
- LogReader reader(SortParts(logfiles_));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- // This sends out the fetched messages and advances time to the start of the
- // log file.
- reader.Register(&log_reader_factory);
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
- LOG(INFO) << "now pi1 "
- << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
- LOG(INFO) << "now pi2 "
- << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
- EXPECT_THAT(reader.LoggedNodes(),
- ::testing::ElementsAre(
- configuration::GetNode(reader.logged_configuration(), pi1),
- configuration::GetNode(reader.logged_configuration(), pi2)));
-
- reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
- std::unique_ptr<EventLoop> pi1_event_loop =
- log_reader_factory.MakeEventLoop("test", pi1);
- std::unique_ptr<EventLoop> pi2_event_loop =
- log_reader_factory.MakeEventLoop("test", pi2);
-
- aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi1_fetcher =
- pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
- aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi2_fetcher =
- pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
-
- aos::Fetcher<examples::Ping> ping_on_pi1_fetcher =
- pi1_event_loop->MakeFetcher<examples::Ping>("/test");
- aos::Fetcher<examples::Ping> ping_on_pi2_fetcher =
- pi2_event_loop->MakeFetcher<examples::Ping>("/test");
-
- aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi2_fetcher =
- pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
- aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi1_fetcher =
- pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
-
- aos::Fetcher<examples::Pong> pong_on_pi2_fetcher =
- pi2_event_loop->MakeFetcher<examples::Pong>("/test");
- aos::Fetcher<examples::Pong> pong_on_pi1_fetcher =
- pi1_event_loop->MakeFetcher<examples::Pong>("/test");
-
- const size_t pi1_timestamp_channel = configuration::ChannelIndex(
- pi1_event_loop->configuration(), pi1_timestamp_on_pi1_fetcher.channel());
- const size_t ping_timestamp_channel = configuration::ChannelIndex(
- pi2_event_loop->configuration(), ping_on_pi2_fetcher.channel());
-
- const size_t pi2_timestamp_channel = configuration::ChannelIndex(
- pi2_event_loop->configuration(), pi2_timestamp_on_pi2_fetcher.channel());
- const size_t pong_timestamp_channel = configuration::ChannelIndex(
- pi1_event_loop->configuration(), pong_on_pi1_fetcher.channel());
-
- const chrono::nanoseconds network_delay = event_loop_factory_.network_delay();
- const chrono::nanoseconds send_delay = event_loop_factory_.send_delay();
-
- for (std::pair<int, std::string> channel :
- shared()
- ? std::vector<
- std::pair<int, std::string>>{{-1,
- "/aos/remote_timestamps/pi2"}}
- : std::vector<std::pair<int, std::string>>{
- {pi1_timestamp_channel,
- "/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp"},
- {ping_timestamp_channel,
- "/aos/remote_timestamps/pi2/test/aos-examples-Ping"}}) {
- pi1_event_loop->MakeWatcher(
- channel.second,
- [&pi1_event_loop, &pi2_event_loop, pi1_timestamp_channel,
- ping_timestamp_channel, &pi1_timestamp_on_pi1_fetcher,
- &pi1_timestamp_on_pi2_fetcher, &ping_on_pi1_fetcher,
- &ping_on_pi2_fetcher, network_delay, send_delay,
- channel_index = channel.first](const RemoteMessage &header) {
- const aos::monotonic_clock::time_point header_monotonic_sent_time(
- chrono::nanoseconds(header.monotonic_sent_time()));
- const aos::realtime_clock::time_point header_realtime_sent_time(
- chrono::nanoseconds(header.realtime_sent_time()));
- const aos::monotonic_clock::time_point header_monotonic_remote_time(
- chrono::nanoseconds(header.monotonic_remote_time()));
- const aos::realtime_clock::time_point header_realtime_remote_time(
- chrono::nanoseconds(header.realtime_remote_time()));
-
- if (channel_index != -1) {
- ASSERT_EQ(channel_index, header.channel_index());
- }
-
- const Context *pi1_context = nullptr;
- const Context *pi2_context = nullptr;
-
- if (header.channel_index() == pi1_timestamp_channel) {
- ASSERT_TRUE(pi1_timestamp_on_pi1_fetcher.FetchNext());
- ASSERT_TRUE(pi1_timestamp_on_pi2_fetcher.FetchNext());
- pi1_context = &pi1_timestamp_on_pi1_fetcher.context();
- pi2_context = &pi1_timestamp_on_pi2_fetcher.context();
- } else if (header.channel_index() == ping_timestamp_channel) {
- ASSERT_TRUE(ping_on_pi1_fetcher.FetchNext());
- ASSERT_TRUE(ping_on_pi2_fetcher.FetchNext());
- pi1_context = &ping_on_pi1_fetcher.context();
- pi2_context = &ping_on_pi2_fetcher.context();
- } else {
- LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
- << configuration::CleanedChannelToString(
- pi1_event_loop->configuration()->channels()->Get(
- header.channel_index()));
- }
-
- ASSERT_TRUE(header.has_boot_uuid());
- EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
- pi2_event_loop->boot_uuid());
-
- EXPECT_EQ(pi1_context->queue_index, header.remote_queue_index());
- EXPECT_EQ(pi2_context->remote_queue_index,
- header.remote_queue_index());
- EXPECT_EQ(pi2_context->queue_index, header.queue_index());
-
- EXPECT_EQ(pi2_context->monotonic_event_time,
- header_monotonic_sent_time);
- EXPECT_EQ(pi2_context->realtime_event_time,
- header_realtime_sent_time);
- EXPECT_EQ(pi2_context->realtime_remote_time,
- header_realtime_remote_time);
- EXPECT_EQ(pi2_context->monotonic_remote_time,
- header_monotonic_remote_time);
-
- EXPECT_EQ(pi1_context->realtime_event_time,
- header_realtime_remote_time);
- EXPECT_EQ(pi1_context->monotonic_event_time,
- header_monotonic_remote_time);
-
- // Time estimation isn't perfect, but we know the clocks were
- // identical when logged, so we know when this should have come back.
- // Confirm we got it when we expected.
- EXPECT_EQ(pi1_event_loop->context().monotonic_event_time,
- pi1_context->monotonic_event_time + 2 * network_delay +
- send_delay);
- });
- }
- for (std::pair<int, std::string> channel :
- shared()
- ? std::vector<
- std::pair<int, std::string>>{{-1,
- "/aos/remote_timestamps/pi1"}}
- : std::vector<std::pair<int, std::string>>{
- {pi2_timestamp_channel,
- "/aos/remote_timestamps/pi1/pi2/aos/"
- "aos-message_bridge-Timestamp"}}) {
- pi2_event_loop->MakeWatcher(
- channel.second,
- [&pi2_event_loop, &pi1_event_loop, pi2_timestamp_channel,
- pong_timestamp_channel, &pi2_timestamp_on_pi2_fetcher,
- &pi2_timestamp_on_pi1_fetcher, &pong_on_pi2_fetcher,
- &pong_on_pi1_fetcher, network_delay, send_delay,
- channel_index = channel.first](const RemoteMessage &header) {
- const aos::monotonic_clock::time_point header_monotonic_sent_time(
- chrono::nanoseconds(header.monotonic_sent_time()));
- const aos::realtime_clock::time_point header_realtime_sent_time(
- chrono::nanoseconds(header.realtime_sent_time()));
- const aos::monotonic_clock::time_point header_monotonic_remote_time(
- chrono::nanoseconds(header.monotonic_remote_time()));
- const aos::realtime_clock::time_point header_realtime_remote_time(
- chrono::nanoseconds(header.realtime_remote_time()));
-
- if (channel_index != -1) {
- ASSERT_EQ(channel_index, header.channel_index());
- }
-
- const Context *pi2_context = nullptr;
- const Context *pi1_context = nullptr;
-
- if (header.channel_index() == pi2_timestamp_channel) {
- ASSERT_TRUE(pi2_timestamp_on_pi2_fetcher.FetchNext());
- ASSERT_TRUE(pi2_timestamp_on_pi1_fetcher.FetchNext());
- pi2_context = &pi2_timestamp_on_pi2_fetcher.context();
- pi1_context = &pi2_timestamp_on_pi1_fetcher.context();
- } else if (header.channel_index() == pong_timestamp_channel) {
- ASSERT_TRUE(pong_on_pi2_fetcher.FetchNext());
- ASSERT_TRUE(pong_on_pi1_fetcher.FetchNext());
- pi2_context = &pong_on_pi2_fetcher.context();
- pi1_context = &pong_on_pi1_fetcher.context();
- } else {
- LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
- << configuration::CleanedChannelToString(
- pi2_event_loop->configuration()->channels()->Get(
- header.channel_index()));
- }
-
- ASSERT_TRUE(header.has_boot_uuid());
- EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
- pi1_event_loop->boot_uuid());
-
- EXPECT_EQ(pi2_context->queue_index, header.remote_queue_index());
- EXPECT_EQ(pi1_context->remote_queue_index,
- header.remote_queue_index());
- EXPECT_EQ(pi1_context->queue_index, header.queue_index());
-
- EXPECT_EQ(pi1_context->monotonic_event_time,
- header_monotonic_sent_time);
- EXPECT_EQ(pi1_context->realtime_event_time,
- header_realtime_sent_time);
- EXPECT_EQ(pi1_context->realtime_remote_time,
- header_realtime_remote_time);
- EXPECT_EQ(pi1_context->monotonic_remote_time,
- header_monotonic_remote_time);
-
- EXPECT_EQ(pi2_context->realtime_event_time,
- header_realtime_remote_time);
- EXPECT_EQ(pi2_context->monotonic_event_time,
- header_monotonic_remote_time);
-
- // Time estimation isn't perfect, but we know the clocks were
- // identical when logged, so we know when this should have come back.
- // Confirm we got it when we expected.
- EXPECT_EQ(pi2_event_loop->context().monotonic_event_time,
- pi2_context->monotonic_event_time + 2 * network_delay +
- send_delay);
- });
- }
-
- // And confirm we can re-create a log again, while checking the contents.
- {
- LoggerState pi1_logger = MakeLogger(
- log_reader_factory.GetNodeEventLoopFactory("pi1"), &log_reader_factory);
- LoggerState pi2_logger = MakeLogger(
- log_reader_factory.GetNodeEventLoopFactory("pi2"), &log_reader_factory);
-
- StartLogger(&pi1_logger, tmp_dir_ + "/relogged1");
- StartLogger(&pi2_logger, tmp_dir_ + "/relogged2");
-
- log_reader_factory.Run();
- }
-
- reader.Deregister();
-
- // And verify that we can run the LogReader over the relogged files without
- // hitting any fatal errors.
- {
- LogReader relogged_reader(SortParts(MakeLogFiles(
- tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2", 3, 3, true)));
- relogged_reader.Register();
-
- relogged_reader.event_loop_factory()->Run();
- }
- // And confirm that we can read the logged file using the reader's
- // configuration.
- {
- LogReader relogged_reader(
- SortParts(MakeLogFiles(tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2",
- 3, 3, true)),
- reader.configuration());
- relogged_reader.Register();
-
- relogged_reader.event_loop_factory()->Run();
- }
-}
-
-// Tests that we properly populate and extract the logger_start time by setting
-// up a clock difference between 2 nodes and looking at the resulting parts.
-TEST_P(MultinodeLoggerTest, LoggerStartTime) {
- std::vector<std::string> actual_filenames;
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
-
- pi1_logger.AppendAllFilenames(&actual_filenames);
- pi2_logger.AppendAllFilenames(&actual_filenames);
- }
-
- ASSERT_THAT(actual_filenames,
- ::testing::UnorderedElementsAreArray(logfiles_));
-
- for (const LogFile &log_file : SortParts(logfiles_)) {
- for (const LogParts &log_part : log_file.parts) {
- if (log_part.node == log_file.logger_node) {
- EXPECT_EQ(log_part.logger_monotonic_start_time,
- aos::monotonic_clock::min_time);
- EXPECT_EQ(log_part.logger_realtime_start_time,
- aos::realtime_clock::min_time);
- } else {
- const chrono::seconds offset = log_file.logger_node == "pi1"
- ? -chrono::seconds(1000)
- : chrono::seconds(1000);
- EXPECT_EQ(log_part.logger_monotonic_start_time,
- log_part.monotonic_start_time + offset);
- EXPECT_EQ(log_part.logger_realtime_start_time,
- log_file.realtime_start_time +
- (log_part.logger_monotonic_start_time -
- log_file.monotonic_start_time));
- }
- }
- }
-}
-
-// Test that renaming the base, renames the folder.
-TEST_P(MultinodeLoggerTest, LoggerRenameFolder) {
- util::UnlinkRecursive(tmp_dir_ + "/renamefolder");
- util::UnlinkRecursive(tmp_dir_ + "/new-good");
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
- logfile_base1_ = tmp_dir_ + "/renamefolder/multi_logfile1";
- logfile_base2_ = tmp_dir_ + "/renamefolder/multi_logfile2";
- logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- logfile_base1_ = tmp_dir_ + "/new-good/multi_logfile1";
- logfile_base2_ = tmp_dir_ + "/new-good/multi_logfile2";
- logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
- ASSERT_TRUE(pi1_logger.logger->RenameLogBase(logfile_base1_));
- ASSERT_TRUE(pi2_logger.logger->RenameLogBase(logfile_base2_));
- for (auto &file : logfiles_) {
- struct stat s;
- EXPECT_EQ(0, stat(file.c_str(), &s));
- }
-}
-
-// Test that renaming the file base dies.
-TEST_P(MultinodeLoggerDeathTest, LoggerRenameFile) {
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
- util::UnlinkRecursive(tmp_dir_ + "/renamefile");
- logfile_base1_ = tmp_dir_ + "/renamefile/multi_logfile1";
- logfile_base2_ = tmp_dir_ + "/renamefile/multi_logfile2";
- logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
- LoggerState pi1_logger = MakeLogger(pi1_);
- StartLogger(&pi1_logger);
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- logfile_base1_ = tmp_dir_ + "/new-renamefile/new_multi_logfile1";
- EXPECT_DEATH({ pi1_logger.logger->RenameLogBase(logfile_base1_); },
- "Rename of file base from");
-}
-
-// TODO(austin): We can write a test which recreates a logfile and confirms that
-// we get it back. That is the ultimate test.
-
-// Tests that we properly recreate forwarded timestamps when replaying a log.
-// This should be enough that we can then re-run the logger and get a valid log
-// back.
-TEST_P(MultinodeLoggerTest, RemoteReboot) {
- std::vector<std::string> actual_filenames;
-
- const UUID pi1_boot0 = UUID::Random();
- const UUID pi2_boot0 = UUID::Random();
- const UUID pi2_boot1 = UUID::Random();
- {
- CHECK_EQ(pi1_index_, 0u);
- CHECK_EQ(pi2_index_, 1u);
-
- time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
- time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
- time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
-
- time_converter_.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
- const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
- time_converter_.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp::epoch() + reboot_time,
- BootTimestamp{
- .boot = 1,
- .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
- }
-
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
- pi1_boot0);
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
- pi2_boot0);
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
-
- VLOG(1) << "Reboot now!";
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
- pi1_boot0);
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
- pi2_boot1);
-
- pi1_logger.AppendAllFilenames(&actual_filenames);
- }
-
- std::sort(actual_filenames.begin(), actual_filenames.end());
- std::sort(pi1_reboot_logfiles_.begin(), pi1_reboot_logfiles_.end());
- ASSERT_THAT(actual_filenames,
- ::testing::UnorderedElementsAreArray(pi1_reboot_logfiles_));
-
- // Confirm that our new oldest timestamps properly update as we reboot and
- // rotate.
- for (const std::string &file : pi1_reboot_logfiles_) {
- std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
- ReadHeader(file);
- CHECK(log_header);
- if (log_header->message().has_configuration()) {
- continue;
- }
-
- const monotonic_clock::time_point monotonic_start_time =
- monotonic_clock::time_point(
- chrono::nanoseconds(log_header->message().monotonic_start_time()));
- const UUID source_node_boot_uuid = UUID::FromString(
- log_header->message().source_node_boot_uuid()->string_view());
-
- if (log_header->message().node()->name()->string_view() != "pi1") {
- // The remote message channel should rotate later and have more parts.
- // This only is true on the log files with shared remote messages.
- //
- // TODO(austin): I'm not the most thrilled with this test pattern... It
- // feels brittle in a different way.
- if (file.find("aos.message_bridge.RemoteMessage") == std::string::npos ||
- !shared()) {
- switch (log_header->message().parts_index()) {
- case 0:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- case 1:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- ASSERT_EQ(monotonic_start_time,
- monotonic_clock::epoch() + chrono::seconds(1));
- break;
- case 2:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
- break;
- case 3:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
- chrono::nanoseconds(2322999462))
- << " on " << file;
- break;
- default:
- FAIL();
- break;
- }
- } else {
- switch (log_header->message().parts_index()) {
- case 0:
- case 1:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- case 2:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- ASSERT_EQ(monotonic_start_time,
- monotonic_clock::epoch() + chrono::seconds(1));
- break;
- case 3:
- case 4:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
- break;
- case 5:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
- chrono::nanoseconds(2322999462))
- << " on " << file;
- break;
- default:
- FAIL();
- break;
- }
- }
- continue;
- }
- SCOPED_TRACE(file);
- SCOPED_TRACE(aos::FlatbufferToJson(
- *log_header, {.multi_line = true, .max_vector_size = 100}));
- ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
- ASSERT_EQ(
- log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
- EXPECT_EQ(
- log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
- ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
- ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
- 2u);
- EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
- ASSERT_TRUE(log_header->message()
- .has_oldest_remote_unreliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->size(),
- 2u);
- EXPECT_EQ(log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
- ASSERT_TRUE(log_header->message()
- .has_oldest_local_unreliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->size(),
- 2u);
- EXPECT_EQ(log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
-
- const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message().oldest_remote_monotonic_timestamps()->Get(
- 1)));
- const monotonic_clock::time_point oldest_local_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
- const monotonic_clock::time_point
- oldest_remote_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->Get(1)));
- const monotonic_clock::time_point
- oldest_local_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->Get(1)));
- const monotonic_clock::time_point
- oldest_remote_reliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_remote_reliable_monotonic_timestamps()
- ->Get(1)));
- const monotonic_clock::time_point
- oldest_local_reliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_local_reliable_monotonic_timestamps()
- ->Get(1)));
- const monotonic_clock::time_point
- oldest_logger_remote_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_logger_remote_unreliable_monotonic_timestamps()
- ->Get(0)));
- const monotonic_clock::time_point
- oldest_logger_local_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_logger_local_unreliable_monotonic_timestamps()
- ->Get(0)));
- EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- switch (log_header->message().parts_index()) {
- case 0:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- break;
- case 1:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90200)));
- EXPECT_EQ(oldest_local_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90350)));
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90200)));
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90350)));
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- break;
- case 2:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90200)))
- << file;
- EXPECT_EQ(oldest_local_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90350)))
- << file;
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90200)))
- << file;
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(90350)))
- << file;
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(100000)))
- << file;
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(100150)))
- << file;
- break;
- case 3:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::time_point(chrono::milliseconds(1323) +
- chrono::microseconds(200)));
- EXPECT_EQ(oldest_local_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(10100350)));
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::milliseconds(1323) +
- chrono::microseconds(200)));
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(10100350)));
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::max_time)
- << file;
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::max_time)
- << file;
- break;
- case 4:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::time_point(chrono::milliseconds(1323) +
- chrono::microseconds(200)));
- EXPECT_EQ(oldest_local_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(10100350)));
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::milliseconds(1323) +
- chrono::microseconds(200)));
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(10100350)));
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(1423000)))
- << file;
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::time_point(chrono::microseconds(10200150)))
- << file;
- break;
- default:
- FAIL();
- break;
- }
- }
-
- // Confirm that we refuse to replay logs with missing boot uuids.
- {
- LogReader reader(SortParts(pi1_reboot_logfiles_));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- // This sends out the fetched messages and advances time to the start of
- // the log file.
- reader.Register(&log_reader_factory);
-
- log_reader_factory.Run();
-
- reader.Deregister();
- }
-}
-
-// Tests that we can sort a log which only has timestamps from the remote
-// because the local message_bridge_client failed to connect.
-TEST_P(MultinodeLoggerTest, RemoteRebootOnlyTimestamps) {
- const UUID pi1_boot0 = UUID::Random();
- const UUID pi2_boot0 = UUID::Random();
- const UUID pi2_boot1 = UUID::Random();
- {
- CHECK_EQ(pi1_index_, 0u);
- CHECK_EQ(pi2_index_, 1u);
-
- time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
- time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
- time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
-
- time_converter_.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
- const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
- time_converter_.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp::epoch() + reboot_time,
- BootTimestamp{
- .boot = 1,
- .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
- }
- pi2_->Disconnect(pi1_->node());
-
- std::vector<std::string> filenames;
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
- pi1_boot0);
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
- pi2_boot0);
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
-
- VLOG(1) << "Reboot now!";
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
- pi1_boot0);
- EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
- pi2_boot1);
- pi1_logger.AppendAllFilenames(&filenames);
- }
-
- std::sort(filenames.begin(), filenames.end());
-
- // Confirm that our new oldest timestamps properly update as we reboot and
- // rotate.
- size_t timestamp_file_count = 0;
- for (const std::string &file : filenames) {
- std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
- ReadHeader(file);
- CHECK(log_header);
-
- if (log_header->message().has_configuration()) {
- continue;
- }
-
- const monotonic_clock::time_point monotonic_start_time =
- monotonic_clock::time_point(
- chrono::nanoseconds(log_header->message().monotonic_start_time()));
- const UUID source_node_boot_uuid = UUID::FromString(
- log_header->message().source_node_boot_uuid()->string_view());
-
- ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
- ASSERT_EQ(
- log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
- ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
- ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
- 2u);
- ASSERT_TRUE(log_header->message()
- .has_oldest_remote_unreliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->size(),
- 2u);
- ASSERT_TRUE(log_header->message()
- .has_oldest_local_unreliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->size(),
- 2u);
- ASSERT_TRUE(log_header->message()
- .has_oldest_remote_reliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_remote_reliable_monotonic_timestamps()
- ->size(),
- 2u);
- ASSERT_TRUE(
- log_header->message().has_oldest_local_reliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_local_reliable_monotonic_timestamps()
- ->size(),
- 2u);
-
- ASSERT_TRUE(
- log_header->message()
- .has_oldest_logger_remote_unreliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_logger_remote_unreliable_monotonic_timestamps()
- ->size(),
- 2u);
- ASSERT_TRUE(log_header->message()
- .has_oldest_logger_local_unreliable_monotonic_timestamps());
- ASSERT_EQ(log_header->message()
- .oldest_logger_local_unreliable_monotonic_timestamps()
- ->size(),
- 2u);
-
- if (log_header->message().node()->name()->string_view() != "pi1") {
- ASSERT_TRUE(file.find("aos.message_bridge.RemoteMessage") !=
- std::string::npos);
-
- const std::optional<SizePrefixedFlatbufferVector<MessageHeader>> msg =
- ReadNthMessage(file, 0);
- CHECK(msg);
-
- EXPECT_TRUE(msg->message().has_monotonic_sent_time());
- EXPECT_TRUE(msg->message().has_monotonic_remote_time());
-
- const monotonic_clock::time_point
- expected_oldest_local_monotonic_timestamps(
- chrono::nanoseconds(msg->message().monotonic_sent_time()));
- const monotonic_clock::time_point
- expected_oldest_remote_monotonic_timestamps(
- chrono::nanoseconds(msg->message().monotonic_remote_time()));
- const monotonic_clock::time_point
- expected_oldest_timestamp_monotonic_timestamps(
- chrono::nanoseconds(msg->message().monotonic_timestamp_time()));
-
- EXPECT_NE(expected_oldest_local_monotonic_timestamps,
- monotonic_clock::min_time);
- EXPECT_NE(expected_oldest_remote_monotonic_timestamps,
- monotonic_clock::min_time);
- EXPECT_NE(expected_oldest_timestamp_monotonic_timestamps,
- monotonic_clock::min_time);
-
- ++timestamp_file_count;
- // Since the log file is from the perspective of the other node,
- const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message().oldest_remote_monotonic_timestamps()->Get(
- 0)));
- const monotonic_clock::time_point oldest_local_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message().oldest_local_monotonic_timestamps()->Get(
- 0)));
- const monotonic_clock::time_point
- oldest_remote_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->Get(0)));
- const monotonic_clock::time_point
- oldest_local_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->Get(0)));
- const monotonic_clock::time_point
- oldest_remote_reliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_remote_reliable_monotonic_timestamps()
- ->Get(0)));
- const monotonic_clock::time_point
- oldest_local_reliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_local_reliable_monotonic_timestamps()
- ->Get(0)));
- const monotonic_clock::time_point
- oldest_logger_remote_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_logger_remote_unreliable_monotonic_timestamps()
- ->Get(1)));
- const monotonic_clock::time_point
- oldest_logger_local_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_logger_local_unreliable_monotonic_timestamps()
- ->Get(1)));
-
- const Channel *channel =
- event_loop_factory_.configuration()->channels()->Get(
- msg->message().channel_index());
- const Connection *connection = configuration::ConnectionToNode(
- channel, configuration::GetNode(
- event_loop_factory_.configuration(),
- log_header->message().node()->name()->string_view()));
-
- const bool reliable = connection->time_to_live() == 0;
-
- SCOPED_TRACE(file);
- SCOPED_TRACE(aos::FlatbufferToJson(
- *log_header, {.multi_line = true, .max_vector_size = 100}));
-
- if (shared()) {
- // Confirm that the oldest timestamps match what we expect. Based on
- // what we are doing, we know that the oldest time is the first
- // message's time.
- //
- // This makes the test robust to both the split and combined config
- // tests.
- switch (log_header->message().parts_index()) {
- case 0:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps)
- << file;
- EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
- expected_oldest_timestamp_monotonic_timestamps)
- << file;
-
- if (reliable) {
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- } else {
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- }
- break;
- case 1:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90000000));
- EXPECT_EQ(oldest_local_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90150000));
- EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90150000));
- EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90250000));
- if (reliable) {
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(
- oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90000000));
- EXPECT_EQ(
- oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90150000));
- } else {
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- }
- break;
- case 2:
- EXPECT_EQ(
- oldest_remote_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
- EXPECT_EQ(
- oldest_local_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
- EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps)
- << file;
- EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
- expected_oldest_timestamp_monotonic_timestamps)
- << file;
- if (reliable) {
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- } else {
- EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- }
- break;
-
- case 3:
- EXPECT_EQ(
- oldest_remote_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
- EXPECT_EQ(
- oldest_local_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(
- oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
- EXPECT_EQ(
- oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
- break;
- default:
- FAIL();
- break;
- }
-
- switch (log_header->message().parts_index()) {
- case 0:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- case 1:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- case 2:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- case 3:
- if (shared()) {
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- }
- [[fallthrough]];
- default:
- FAIL();
- break;
- }
- } else {
- switch (log_header->message().parts_index()) {
- case 0:
- if (reliable) {
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(
- oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(100150000))
- << file;
- EXPECT_EQ(
- oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(100250000))
- << file;
- } else {
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(
- oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90150000))
- << file;
- EXPECT_EQ(
- oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(90250000))
- << file;
- }
- break;
- case 1:
- if (reliable) {
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(
- oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
- EXPECT_EQ(
- oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
- } else {
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- expected_oldest_remote_monotonic_timestamps);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- expected_oldest_local_monotonic_timestamps);
- EXPECT_EQ(
- oldest_logger_remote_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(1323150000));
- EXPECT_EQ(
- oldest_logger_local_unreliable_monotonic_timestamps,
- monotonic_clock::epoch() + chrono::nanoseconds(10100250000));
- }
- break;
- default:
- FAIL();
- break;
- }
-
- switch (log_header->message().parts_index()) {
- case 0:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- case 1:
- EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
- EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
- break;
- default:
- FAIL();
- break;
- }
- }
-
- continue;
- }
- EXPECT_EQ(
- log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
- EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
- EXPECT_EQ(log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
- EXPECT_EQ(log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->Get(0),
- monotonic_clock::max_time.time_since_epoch().count());
-
- const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message().oldest_remote_monotonic_timestamps()->Get(
- 1)));
- const monotonic_clock::time_point oldest_local_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
- const monotonic_clock::time_point
- oldest_remote_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_remote_unreliable_monotonic_timestamps()
- ->Get(1)));
- const monotonic_clock::time_point
- oldest_local_unreliable_monotonic_timestamps =
- monotonic_clock::time_point(chrono::nanoseconds(
- log_header->message()
- .oldest_local_unreliable_monotonic_timestamps()
- ->Get(1)));
- switch (log_header->message().parts_index()) {
- case 0:
- EXPECT_EQ(oldest_remote_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
- EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
- monotonic_clock::max_time);
- break;
- default:
- FAIL();
- break;
- }
- }
-
- if (shared()) {
- EXPECT_EQ(timestamp_file_count, 4u);
- } else {
- EXPECT_EQ(timestamp_file_count, 4u);
- }
-
- // Confirm that we can actually sort the resulting log and read it.
- {
- LogReader reader(SortParts(filenames));
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- // This sends out the fetched messages and advances time to the start of
- // the log file.
- reader.Register(&log_reader_factory);
-
- log_reader_factory.Run();
-
- reader.Deregister();
- }
-}
-
-// Tests that we properly handle one direction of message_bridge being
-// unavailable.
-TEST_P(MultinodeLoggerTest, OneDirectionWithNegativeSlope) {
- pi1_->Disconnect(pi2_->node());
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
-
- time_converter_.AddMonotonic(
- {chrono::milliseconds(10000),
- chrono::milliseconds(10000) - chrono::milliseconds(1)});
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- }
-
- // Confirm that we can parse the result. LogReader has enough internal CHECKs
- // to confirm the right thing happened.
- ConfirmReadable(pi1_single_direction_logfiles_);
-}
-
-// Tests that we properly handle one direction of message_bridge being
-// unavailable.
-TEST_P(MultinodeLoggerTest, OneDirectionWithPositiveSlope) {
- pi1_->Disconnect(pi2_->node());
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(500)});
-
- time_converter_.AddMonotonic(
- {chrono::milliseconds(10000),
- chrono::milliseconds(10000) + chrono::milliseconds(1)});
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- }
-
- // Confirm that we can parse the result. LogReader has enough internal CHECKs
- // to confirm the right thing happened.
- ConfirmReadable(pi1_single_direction_logfiles_);
-}
-
-// Tests that we explode if someone passes in a part file twice with a better
-// error than an out of order error.
-TEST_P(MultinodeLoggerTest, DuplicateLogFiles) {
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- }
-
- std::vector<std::string> duplicates;
- for (const std::string &f : pi1_single_direction_logfiles_) {
- duplicates.emplace_back(f);
- duplicates.emplace_back(f);
- }
- EXPECT_DEATH({ SortParts(duplicates); }, "Found duplicate parts in");
-}
-
-// Tests that we explode if someone loses a part out of the middle of a log.
-TEST_P(MultinodeLoggerTest, MissingPartsFromMiddle) {
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- aos::monotonic_clock::time_point last_rotation_time =
- pi1_logger.event_loop->monotonic_now();
- pi1_logger.logger->set_on_logged_period([&] {
- const auto now = pi1_logger.event_loop->monotonic_now();
- if (now > last_rotation_time + std::chrono::seconds(5)) {
- pi1_logger.logger->Rotate();
- last_rotation_time = now;
- }
- });
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- }
-
- std::vector<std::string> missing_parts;
-
- missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
- missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
- missing_parts.emplace_back(absl::StrCat(
- logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
-
- EXPECT_DEATH({ SortParts(missing_parts); },
- "Broken log, missing part files between");
-}
-
-// Tests that we properly handle a dead node. Do this by just disconnecting it
-// and only using one nodes of logs.
-TEST_P(MultinodeLoggerTest, DeadNode) {
- pi1_->Disconnect(pi2_->node());
- pi2_->Disconnect(pi1_->node());
- time_converter_.AddMonotonic(
- {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(10000));
- }
-
- // Confirm that we can parse the result. LogReader has enough internal CHECKs
- // to confirm the right thing happened.
- ConfirmReadable(MakePi1DeadNodeLogfiles());
-}
-
-constexpr std::string_view kCombinedConfigSha1(
- "5d73fe35bacaa59d24f8f0c1a806fe10b783b0fcc80809ee30a9db824e82538b");
-constexpr std::string_view kSplitConfigSha1(
- "f25e8f6f90d61f41c41517e652300566228b077e44cd86f1af2af4a9bed31ad4");
-constexpr std::string_view kReloggedSplitConfigSha1(
- "f1fabd629bdf8735c3d81bc791d7a454e8e636951c26cba6426545cbc97f911f");
-
-INSTANTIATE_TEST_SUITE_P(
- All, MultinodeLoggerTest,
- ::testing::Combine(
- ::testing::Values(
- ConfigParams{"multinode_pingpong_combined_config.json", true,
- kCombinedConfigSha1, kCombinedConfigSha1},
- ConfigParams{"multinode_pingpong_split_config.json", false,
- kSplitConfigSha1, kReloggedSplitConfigSha1}),
- ::testing::ValuesIn(SupportedCompressionAlgorithms())));
-
-INSTANTIATE_TEST_SUITE_P(
- All, MultinodeLoggerDeathTest,
- ::testing::Combine(
- ::testing::Values(
- ConfigParams{"multinode_pingpong_combined_config.json", true,
- kCombinedConfigSha1, kCombinedConfigSha1},
- ConfigParams{"multinode_pingpong_split_config.json", false,
- kSplitConfigSha1, kReloggedSplitConfigSha1}),
- ::testing::ValuesIn(SupportedCompressionAlgorithms())));
-
-// Tests that we can relog with a different config. This makes most sense when
-// you are trying to edit a log and want to use channel renaming + the original
-// config in the new log.
-TEST_P(MultinodeLoggerTest, LogDifferentConfig) {
- time_converter_.StartEqual();
- {
- LoggerState pi1_logger = MakeLogger(pi1_);
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- event_loop_factory_.RunFor(chrono::milliseconds(95));
-
- StartLogger(&pi1_logger);
- StartLogger(&pi2_logger);
-
- event_loop_factory_.RunFor(chrono::milliseconds(20000));
- }
-
- LogReader reader(SortParts(logfiles_));
- reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
-
- SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- log_reader_factory.set_send_delay(chrono::microseconds(0));
-
- // This sends out the fetched messages and advances time to the start of the
- // log file.
- reader.Register(&log_reader_factory);
-
- const Node *pi1 =
- configuration::GetNode(log_reader_factory.configuration(), "pi1");
- const Node *pi2 =
- configuration::GetNode(log_reader_factory.configuration(), "pi2");
-
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
- LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
- LOG(INFO) << "now pi1 "
- << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
- LOG(INFO) << "now pi2 "
- << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
-
- EXPECT_THAT(reader.LoggedNodes(),
- ::testing::ElementsAre(
- configuration::GetNode(reader.logged_configuration(), pi1),
- configuration::GetNode(reader.logged_configuration(), pi2)));
-
- reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
-
- // And confirm we can re-create a log again, while checking the contents.
- std::vector<std::string> log_files;
- {
- LoggerState pi1_logger =
- MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi1"),
- &log_reader_factory, reader.logged_configuration());
- LoggerState pi2_logger =
- MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi2"),
- &log_reader_factory, reader.logged_configuration());
-
- pi1_logger.StartLogger(tmp_dir_ + "/relogged1");
- pi2_logger.StartLogger(tmp_dir_ + "/relogged2");
-
- log_reader_factory.Run();
-
- for (auto &x : pi1_logger.log_namer->all_filenames()) {
- log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged1_", x));
- }
- for (auto &x : pi2_logger.log_namer->all_filenames()) {
- log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged2_", x));
- }
- }
-
- reader.Deregister();
-
- // And verify that we can run the LogReader over the relogged files without
- // hitting any fatal errors.
- {
- LogReader relogged_reader(SortParts(log_files));
- relogged_reader.Register();
-
- relogged_reader.event_loop_factory()->Run();
- }
-}
-
-// Tests that we properly replay a log where the start time for a node is before
-// any data on the node. This can happen if the logger starts before data is
-// published. While the scenario below is a bit convoluted, we have seen logs
-// like this generated out in the wild.
-TEST(MultinodeRebootLoggerTest, StartTimeBeforeData) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_split3_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- const size_t pi1_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi1->node());
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- const size_t pi2_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi2->node());
- NodeEventLoopFactory *const pi3 =
- event_loop_factory.GetNodeEventLoopFactory("pi3");
- const size_t pi3_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi3->node());
-
- const std::string kLogfile1_1 =
- aos::testing::TestTmpDir() + "/multi_logfile1/";
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- const std::string kLogfile2_2 =
- aos::testing::TestTmpDir() + "/multi_logfile2.2/";
- const std::string kLogfile3_1 =
- aos::testing::TestTmpDir() + "/multi_logfile3/";
- util::UnlinkRecursive(kLogfile1_1);
- util::UnlinkRecursive(kLogfile2_1);
- util::UnlinkRecursive(kLogfile2_2);
- util::UnlinkRecursive(kLogfile3_1);
- const UUID pi1_boot0 = UUID::Random();
- const UUID pi2_boot0 = UUID::Random();
- const UUID pi2_boot1 = UUID::Random();
- const UUID pi3_boot0 = UUID::Random();
- {
- CHECK_EQ(pi1_index, 0u);
- CHECK_EQ(pi2_index, 1u);
- CHECK_EQ(pi3_index, 2u);
-
- time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
- time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
- time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
- time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
-
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch(),
- BootTimestamp::epoch()});
- const chrono::nanoseconds reboot_time = chrono::milliseconds(20000);
- time_converter.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp::epoch() + reboot_time,
- BootTimestamp{
- .boot = 1,
- .time = monotonic_clock::epoch() + chrono::milliseconds(1323)},
- BootTimestamp::epoch() + reboot_time});
- }
-
- // Make everything perfectly quiet.
- event_loop_factory.SkipTimingReport();
- event_loop_factory.DisableStatistics();
-
- std::vector<std::string> filenames;
- {
- LoggerState pi1_logger = LoggerState::MakeLogger(
- pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- LoggerState pi3_logger = LoggerState::MakeLogger(
- pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- {
- // And now start the logger.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
- event_loop_factory.RunFor(chrono::milliseconds(1000));
-
- pi1_logger.StartLogger(kLogfile1_1);
- pi3_logger.StartLogger(kLogfile3_1);
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(10000));
-
- // Now that we've got a start time in the past, turn on data.
- event_loop_factory.EnableStatistics();
- std::unique_ptr<aos::EventLoop> ping_event_loop =
- pi1->MakeEventLoop("ping");
- Ping ping(ping_event_loop.get());
-
- pi2->AlwaysStart<Pong>("pong");
-
- event_loop_factory.RunFor(chrono::milliseconds(3000));
-
- pi2_logger.AppendAllFilenames(&filenames);
-
- // Stop logging on pi2 before rebooting and completely shut off all
- // messages on pi2.
- pi2->DisableStatistics();
- pi1->Disconnect(pi2->node());
- pi2->Disconnect(pi1->node());
- }
- event_loop_factory.RunFor(chrono::milliseconds(7000));
- // pi2 now reboots.
- {
- event_loop_factory.RunFor(chrono::milliseconds(1000));
-
- // Start logging again on pi2 after it is up.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi2_logger.StartLogger(kLogfile2_2);
-
- event_loop_factory.RunFor(chrono::milliseconds(10000));
- // And, now that we have a start time in the log, turn data back on.
- pi2->EnableStatistics();
- pi1->Connect(pi2->node());
- pi2->Connect(pi1->node());
-
- pi2->AlwaysStart<Pong>("pong");
- std::unique_ptr<aos::EventLoop> ping_event_loop =
- pi1->MakeEventLoop("ping");
- Ping ping(ping_event_loop.get());
-
- event_loop_factory.RunFor(chrono::milliseconds(3000));
-
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- pi1_logger.AppendAllFilenames(&filenames);
- pi3_logger.AppendAllFilenames(&filenames);
- }
-
- // Confirm that we can parse the result. LogReader has enough internal CHECKs
- // to confirm the right thing happened.
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- auto result = ConfirmReadable(filenames);
- EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::seconds(1)));
- EXPECT_THAT(result[0].second,
- ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::microseconds(34990350)));
-
- EXPECT_THAT(result[1].first,
- ::testing::ElementsAre(
- realtime_clock::epoch() + chrono::seconds(1),
- realtime_clock::epoch() + chrono::microseconds(3323000)));
- EXPECT_THAT(result[1].second,
- ::testing::ElementsAre(
- realtime_clock::epoch() + chrono::microseconds(13990200),
- realtime_clock::epoch() + chrono::microseconds(16313200)));
-
- EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::seconds(1)));
- EXPECT_THAT(result[2].second,
- ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::microseconds(34900150)));
-}
-
-// Tests that local data before remote data after reboot is properly replayed.
-// We only trigger a reboot in the timestamp interpolation function when solving
-// the timestamp problem when we actually have a point in the function. This
-// originally only happened when a point passes the noncausal filter. At the
-// start of time for the second boot, if we aren't careful, we will have
-// messages which need to be published at times before the boot. This happens
-// when a local message is in the log before a forwarded message, so there is no
-// point in the interpolation function. This delays the reboot. So, we need to
-// recreate that situation and make sure it doesn't come back.
-TEST(MultinodeRebootLoggerTest,
- LocalMessageBeforeRemoteBeforeStartAfterReboot) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_split3_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- const size_t pi1_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi1->node());
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- const size_t pi2_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi2->node());
- NodeEventLoopFactory *const pi3 =
- event_loop_factory.GetNodeEventLoopFactory("pi3");
- const size_t pi3_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi3->node());
-
- const std::string kLogfile1_1 =
- aos::testing::TestTmpDir() + "/multi_logfile1/";
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- const std::string kLogfile2_2 =
- aos::testing::TestTmpDir() + "/multi_logfile2.2/";
- const std::string kLogfile3_1 =
- aos::testing::TestTmpDir() + "/multi_logfile3/";
- util::UnlinkRecursive(kLogfile1_1);
- util::UnlinkRecursive(kLogfile2_1);
- util::UnlinkRecursive(kLogfile2_2);
- util::UnlinkRecursive(kLogfile3_1);
- const UUID pi1_boot0 = UUID::Random();
- const UUID pi2_boot0 = UUID::Random();
- const UUID pi2_boot1 = UUID::Random();
- const UUID pi3_boot0 = UUID::Random();
- {
- CHECK_EQ(pi1_index, 0u);
- CHECK_EQ(pi2_index, 1u);
- CHECK_EQ(pi3_index, 2u);
-
- time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
- time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
- time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
- time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
-
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch(),
- BootTimestamp::epoch()});
- const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
- time_converter.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp::epoch() + reboot_time,
- BootTimestamp{.boot = 1,
- .time = monotonic_clock::epoch() + reboot_time +
- chrono::seconds(100)},
- BootTimestamp::epoch() + reboot_time});
- }
-
- std::vector<std::string> filenames;
- {
- LoggerState pi1_logger = LoggerState::MakeLogger(
- pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- LoggerState pi3_logger = LoggerState::MakeLogger(
- pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- {
- // And now start the logger.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
- pi1_logger.StartLogger(kLogfile1_1);
- pi3_logger.StartLogger(kLogfile3_1);
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(1005));
-
- // Now that we've got a start time in the past, turn on data.
- std::unique_ptr<aos::EventLoop> ping_event_loop =
- pi1->MakeEventLoop("ping");
- Ping ping(ping_event_loop.get());
-
- pi2->AlwaysStart<Pong>("pong");
-
- event_loop_factory.RunFor(chrono::milliseconds(3000));
-
- pi2_logger.AppendAllFilenames(&filenames);
-
- // Disable any remote messages on pi2.
- pi1->Disconnect(pi2->node());
- pi2->Disconnect(pi1->node());
- }
- event_loop_factory.RunFor(chrono::milliseconds(995));
- // pi2 now reboots at 5 seconds.
- {
- event_loop_factory.RunFor(chrono::milliseconds(1000));
-
- // Make local stuff happen before we start logging and connect the remote.
- pi2->AlwaysStart<Pong>("pong");
- std::unique_ptr<aos::EventLoop> ping_event_loop =
- pi1->MakeEventLoop("ping");
- Ping ping(ping_event_loop.get());
- event_loop_factory.RunFor(chrono::milliseconds(1005));
-
- // Start logging again on pi2 after it is up.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi2_logger.StartLogger(kLogfile2_2);
-
- // And allow remote messages now that we have some local ones.
- pi1->Connect(pi2->node());
- pi2->Connect(pi1->node());
-
- event_loop_factory.RunFor(chrono::milliseconds(1000));
-
- event_loop_factory.RunFor(chrono::milliseconds(3000));
-
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- pi1_logger.AppendAllFilenames(&filenames);
- pi3_logger.AppendAllFilenames(&filenames);
- }
-
- // Confirm that we can parse the result. LogReader has enough internal CHECKs
- // to confirm the right thing happened.
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- auto result = ConfirmReadable(filenames);
-
- EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
- EXPECT_THAT(result[0].second,
- ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::microseconds(11000350)));
-
- EXPECT_THAT(result[1].first,
- ::testing::ElementsAre(
- realtime_clock::epoch(),
- realtime_clock::epoch() + chrono::microseconds(107005000)));
- EXPECT_THAT(result[1].second,
- ::testing::ElementsAre(
- realtime_clock::epoch() + chrono::microseconds(4000150),
- realtime_clock::epoch() + chrono::microseconds(111000200)));
-
- EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
- EXPECT_THAT(result[2].second,
- ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::microseconds(11000150)));
-
- auto start_stop_result = ConfirmReadable(
- filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
- realtime_clock::epoch() + chrono::milliseconds(3000));
-
- EXPECT_THAT(
- start_stop_result[0].first,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
- EXPECT_THAT(
- start_stop_result[0].second,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
- EXPECT_THAT(
- start_stop_result[1].first,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
- EXPECT_THAT(
- start_stop_result[1].second,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
- EXPECT_THAT(
- start_stop_result[2].first,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
- EXPECT_THAT(
- start_stop_result[2].second,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
-}
-
-// Tests that setting the start and stop flags across a reboot works as
-// expected.
-TEST(MultinodeRebootLoggerTest, RebootStartStopTimes) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_split3_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- const size_t pi1_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi1->node());
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- const size_t pi2_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi2->node());
- NodeEventLoopFactory *const pi3 =
- event_loop_factory.GetNodeEventLoopFactory("pi3");
- const size_t pi3_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi3->node());
-
- const std::string kLogfile1_1 =
- aos::testing::TestTmpDir() + "/multi_logfile1/";
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- const std::string kLogfile2_2 =
- aos::testing::TestTmpDir() + "/multi_logfile2.2/";
- const std::string kLogfile3_1 =
- aos::testing::TestTmpDir() + "/multi_logfile3/";
- util::UnlinkRecursive(kLogfile1_1);
- util::UnlinkRecursive(kLogfile2_1);
- util::UnlinkRecursive(kLogfile2_2);
- util::UnlinkRecursive(kLogfile3_1);
- {
- CHECK_EQ(pi1_index, 0u);
- CHECK_EQ(pi2_index, 1u);
- CHECK_EQ(pi3_index, 2u);
-
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch(),
- BootTimestamp::epoch()});
- const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
- time_converter.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp::epoch() + reboot_time,
- BootTimestamp{.boot = 1,
- .time = monotonic_clock::epoch() + reboot_time},
- BootTimestamp::epoch() + reboot_time});
- }
-
- std::vector<std::string> filenames;
- {
- LoggerState pi1_logger = LoggerState::MakeLogger(
- pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- LoggerState pi3_logger = LoggerState::MakeLogger(
- pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- {
- // And now start the logger.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
- pi1_logger.StartLogger(kLogfile1_1);
- pi3_logger.StartLogger(kLogfile3_1);
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(1005));
-
- // Now that we've got a start time in the past, turn on data.
- std::unique_ptr<aos::EventLoop> ping_event_loop =
- pi1->MakeEventLoop("ping");
- Ping ping(ping_event_loop.get());
-
- pi2->AlwaysStart<Pong>("pong");
-
- event_loop_factory.RunFor(chrono::milliseconds(3000));
-
- pi2_logger.AppendAllFilenames(&filenames);
- }
- event_loop_factory.RunFor(chrono::milliseconds(995));
- // pi2 now reboots at 5 seconds.
- {
- event_loop_factory.RunFor(chrono::milliseconds(1000));
-
- // Make local stuff happen before we start logging and connect the remote.
- pi2->AlwaysStart<Pong>("pong");
- std::unique_ptr<aos::EventLoop> ping_event_loop =
- pi1->MakeEventLoop("ping");
- Ping ping(ping_event_loop.get());
- event_loop_factory.RunFor(chrono::milliseconds(5));
-
- // Start logging again on pi2 after it is up.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi2_logger.StartLogger(kLogfile2_2);
-
- event_loop_factory.RunFor(chrono::milliseconds(5000));
-
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- pi1_logger.AppendAllFilenames(&filenames);
- pi3_logger.AppendAllFilenames(&filenames);
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- auto result = ConfirmReadable(filenames);
-
- EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
- EXPECT_THAT(result[0].second,
- ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::microseconds(11000350)));
-
- EXPECT_THAT(result[1].first,
- ::testing::ElementsAre(
- realtime_clock::epoch(),
- realtime_clock::epoch() + chrono::microseconds(6005000)));
- EXPECT_THAT(result[1].second,
- ::testing::ElementsAre(
- realtime_clock::epoch() + chrono::microseconds(4900150),
- realtime_clock::epoch() + chrono::microseconds(11000200)));
-
- EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
- EXPECT_THAT(result[2].second,
- ::testing::ElementsAre(realtime_clock::epoch() +
- chrono::microseconds(11000150)));
-
- // Confirm we observed the correct start and stop times. We should see the
- // reboot here.
- auto start_stop_result = ConfirmReadable(
- filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
- realtime_clock::epoch() + chrono::milliseconds(8000));
-
- EXPECT_THAT(
- start_stop_result[0].first,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
- EXPECT_THAT(
- start_stop_result[0].second,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
- EXPECT_THAT(start_stop_result[1].first,
- ::testing::ElementsAre(
- realtime_clock::epoch() + chrono::seconds(2),
- realtime_clock::epoch() + chrono::microseconds(6005000)));
- EXPECT_THAT(start_stop_result[1].second,
- ::testing::ElementsAre(
- realtime_clock::epoch() + chrono::microseconds(4900150),
- realtime_clock::epoch() + chrono::seconds(8)));
- EXPECT_THAT(
- start_stop_result[2].first,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
- EXPECT_THAT(
- start_stop_result[2].second,
- ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
-}
-
-// Tests that we properly handle one direction being down.
-TEST(MissingDirectionTest, OneDirection) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_split4_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
-
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- const size_t pi1_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi1->node());
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- const size_t pi2_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi2->node());
- std::vector<std::string> filenames;
-
- {
- CHECK_EQ(pi1_index, 0u);
- CHECK_EQ(pi2_index, 1u);
-
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
- const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
- time_converter.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
- BootTimestamp::epoch() + reboot_time});
- }
-
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- const std::string kLogfile1_1 =
- aos::testing::TestTmpDir() + "/multi_logfile1.1/";
- util::UnlinkRecursive(kLogfile2_1);
- util::UnlinkRecursive(kLogfile1_1);
-
- pi2->Disconnect(pi1->node());
-
- pi1->AlwaysStart<Ping>("ping");
- pi2->AlwaysStart<Pong>("pong");
-
- {
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
- event_loop_factory.RunFor(chrono::milliseconds(95));
-
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(6000));
-
- pi2->Connect(pi1->node());
-
- LoggerState pi1_logger = LoggerState::MakeLogger(
- pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi1_logger.StartLogger(kLogfile1_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(5000));
- pi1_logger.AppendAllFilenames(&filenames);
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- ConfirmReadable(filenames);
-}
-
-// Tests that we properly handle only one direction ever existing after a
-// reboot.
-TEST(MissingDirectionTest, OneDirectionAfterReboot) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_split4_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
-
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- const size_t pi1_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi1->node());
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- const size_t pi2_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi2->node());
- std::vector<std::string> filenames;
-
- {
- CHECK_EQ(pi1_index, 0u);
- CHECK_EQ(pi2_index, 1u);
-
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
- const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
- time_converter.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
- BootTimestamp::epoch() + reboot_time});
- }
-
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- util::UnlinkRecursive(kLogfile2_1);
-
- pi1->AlwaysStart<Ping>("ping");
-
- // Pi1 sends to pi2. Reboot pi1, but don't let pi2 connect to pi1. This
- // makes it such that we will only get timestamps from pi1 -> pi2 on the
- // second boot.
- {
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
- event_loop_factory.RunFor(chrono::milliseconds(95));
-
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(4000));
-
- pi2->Disconnect(pi1->node());
-
- event_loop_factory.RunFor(chrono::milliseconds(1000));
- pi1->AlwaysStart<Ping>("ping");
-
- event_loop_factory.RunFor(chrono::milliseconds(5000));
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- ConfirmReadable(filenames);
-}
-
-// Tests that we properly handle only one direction ever existing after a reboot
-// with only reliable data.
-TEST(MissingDirectionTest, OneDirectionAfterRebootReliable) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_split4_reliable_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
-
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- const size_t pi1_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi1->node());
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- const size_t pi2_index = configuration::GetNodeIndex(
- event_loop_factory.configuration(), pi2->node());
- std::vector<std::string> filenames;
-
- {
- CHECK_EQ(pi1_index, 0u);
- CHECK_EQ(pi2_index, 1u);
-
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
- const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
- time_converter.AddNextTimestamp(
- distributed_clock::epoch() + reboot_time,
- {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
- BootTimestamp::epoch() + reboot_time});
- }
-
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- util::UnlinkRecursive(kLogfile2_1);
-
- pi1->AlwaysStart<Ping>("ping");
-
- // Pi1 sends to pi2. Reboot pi1, but don't let pi2 connect to pi1. This
- // makes it such that we will only get timestamps from pi1 -> pi2 on the
- // second boot.
- {
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
-
- event_loop_factory.RunFor(chrono::milliseconds(95));
-
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::milliseconds(4000));
-
- pi2->Disconnect(pi1->node());
-
- event_loop_factory.RunFor(chrono::milliseconds(1000));
- pi1->AlwaysStart<Ping>("ping");
-
- event_loop_factory.RunFor(chrono::milliseconds(5000));
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- ConfirmReadable(filenames);
-}
-
-// Tests that we properly handle what used to be a time violation in one
-// direction. This can occur when one direction goes down after sending some
-// data, but the other keeps working. The down direction ends up resolving to a
-// straight line in the noncausal filter, where the direction which is still up
-// can cross that line. Really, time progressed along just fine but we assumed
-// that the offset was a line when it could have deviated by up to 1ms/second.
-TEST_P(MultinodeLoggerTest, OneDirectionTimeDrift) {
- std::vector<std::string> filenames;
-
- CHECK_EQ(pi1_index_, 0u);
- CHECK_EQ(pi2_index_, 1u);
-
- time_converter_.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
- const chrono::nanoseconds before_disconnect_duration =
- time_converter_.AddMonotonic(
- {chrono::milliseconds(1000), chrono::milliseconds(1000)});
-
- const chrono::nanoseconds test_duration =
- time_converter_.AddMonotonic(
- {chrono::milliseconds(1000), chrono::milliseconds(1000)}) +
- time_converter_.AddMonotonic(
- {chrono::milliseconds(10000),
- chrono::milliseconds(10000) - chrono::milliseconds(5)}) +
- time_converter_.AddMonotonic(
- {chrono::milliseconds(10000),
- chrono::milliseconds(10000) + chrono::milliseconds(5)});
-
- const std::string kLogfile =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- util::UnlinkRecursive(kLogfile);
-
- {
- LoggerState pi2_logger = MakeLogger(pi2_);
- pi2_logger.StartLogger(kLogfile);
- event_loop_factory_.RunFor(before_disconnect_duration);
-
- pi2_->Disconnect(pi1_->node());
-
- event_loop_factory_.RunFor(test_duration);
- pi2_->Connect(pi1_->node());
-
- event_loop_factory_.RunFor(chrono::milliseconds(5000));
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- ConfirmReadable(filenames);
-}
-
-// Tests that we can replay a logfile that has timestamps such that at least one
-// node's epoch is at a positive distributed_clock (and thus will have to be
-// booted after the other node(s)).
-TEST_P(MultinodeLoggerTest, StartOneNodeBeforeOther) {
- std::vector<std::string> filenames;
-
- CHECK_EQ(pi1_index_, 0u);
- CHECK_EQ(pi2_index_, 1u);
-
- time_converter_.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(), BootTimestamp::epoch()});
-
- const chrono::nanoseconds before_reboot_duration = chrono::milliseconds(1000);
- time_converter_.RebootAt(
- 0, distributed_clock::time_point(before_reboot_duration));
-
- const chrono::nanoseconds test_duration = time_converter_.AddMonotonic(
- {chrono::milliseconds(10000), chrono::milliseconds(10000)});
-
- const std::string kLogfile =
- aos::testing::TestTmpDir() + "/multi_logfile2.1/";
- util::UnlinkRecursive(kLogfile);
-
- pi2_->Disconnect(pi1_->node());
- pi1_->Disconnect(pi2_->node());
-
- {
- LoggerState pi2_logger = MakeLogger(pi2_);
-
- pi2_logger.StartLogger(kLogfile);
- event_loop_factory_.RunFor(before_reboot_duration);
-
- pi2_->Connect(pi1_->node());
- pi1_->Connect(pi2_->node());
-
- event_loop_factory_.RunFor(test_duration);
-
- pi2_logger.AppendAllFilenames(&filenames);
- }
-
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- ConfirmReadable(filenames);
-
- {
- LogReader reader(sorted_parts);
- SimulatedEventLoopFactory replay_factory(reader.configuration());
- reader.RegisterWithoutStarting(&replay_factory);
-
- NodeEventLoopFactory *const replay_node =
- reader.event_loop_factory()->GetNodeEventLoopFactory("pi1");
-
- std::unique_ptr<EventLoop> test_event_loop =
- replay_node->MakeEventLoop("test_reader");
- replay_node->OnStartup([replay_node]() {
- // Check that we didn't boot until at least t=0.
- CHECK_LE(monotonic_clock::epoch(), replay_node->monotonic_now());
- });
- test_event_loop->OnRun([&test_event_loop]() {
- // Check that we didn't boot until at least t=0.
- EXPECT_LE(monotonic_clock::epoch(), test_event_loop->monotonic_now());
- });
- reader.event_loop_factory()->Run();
- reader.Deregister();
- }
-}
-
-// Tests that when we have a loop without all the logs at all points in time, we
-// can sort it properly.
-TEST(MultinodeLoggerLoopTest, Loop) {
- aos::FlatbufferDetachedBuffer<aos::Configuration> config =
- aos::configuration::ReadConfig(ArtifactPath(
- "aos/events/logging/multinode_pingpong_triangle_split_config.json"));
- message_bridge::TestingTimeConverter time_converter(
- configuration::NodesCount(&config.message()));
- SimulatedEventLoopFactory event_loop_factory(&config.message());
- event_loop_factory.SetTimeConverter(&time_converter);
-
- NodeEventLoopFactory *const pi1 =
- event_loop_factory.GetNodeEventLoopFactory("pi1");
- NodeEventLoopFactory *const pi2 =
- event_loop_factory.GetNodeEventLoopFactory("pi2");
- NodeEventLoopFactory *const pi3 =
- event_loop_factory.GetNodeEventLoopFactory("pi3");
-
- const std::string kLogfile1_1 =
- aos::testing::TestTmpDir() + "/multi_logfile1/";
- const std::string kLogfile2_1 =
- aos::testing::TestTmpDir() + "/multi_logfile2/";
- const std::string kLogfile3_1 =
- aos::testing::TestTmpDir() + "/multi_logfile3/";
- util::UnlinkRecursive(kLogfile1_1);
- util::UnlinkRecursive(kLogfile2_1);
- util::UnlinkRecursive(kLogfile3_1);
-
- {
- // Make pi1 boot before everything else.
- time_converter.AddNextTimestamp(
- distributed_clock::epoch(),
- {BootTimestamp::epoch(),
- BootTimestamp::epoch() - chrono::milliseconds(100),
- BootTimestamp::epoch() - chrono::milliseconds(300)});
- }
-
- // We want to setup a situation such that 2 of the 3 legs of the loop are very
- // confident about time being X, and the third leg is pulling the average off
- // to one side.
- //
- // It's easiest to visualize this in timestamp_plotter.
-
- std::vector<std::string> filenames;
- {
- // Have pi1 send out a reliable message at startup. This sets up a long
- // forwarding time message at the start to bias time.
- std::unique_ptr<EventLoop> pi1_event_loop = pi1->MakeEventLoop("ping");
- {
- aos::Sender<examples::Ping> ping_sender =
- pi1_event_loop->MakeSender<examples::Ping>("/reliable");
-
- aos::Sender<examples::Ping>::Builder builder = ping_sender.MakeBuilder();
- examples::Ping::Builder ping_builder =
- builder.MakeBuilder<examples::Ping>();
- CHECK_EQ(builder.Send(ping_builder.Finish()), RawSender::Error::kOk);
- }
-
- // Wait a while so there's enough data to let the worst case be rather off.
- event_loop_factory.RunFor(chrono::seconds(1000));
-
- // Now start a receiving node first. This sets up 2 tight bounds between 2
- // of the nodes.
- LoggerState pi2_logger = LoggerState::MakeLogger(
- pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi2_logger.StartLogger(kLogfile2_1);
-
- event_loop_factory.RunFor(chrono::seconds(100));
-
- // And now start the third leg.
- LoggerState pi3_logger = LoggerState::MakeLogger(
- pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi3_logger.StartLogger(kLogfile3_1);
-
- LoggerState pi1_logger = LoggerState::MakeLogger(
- pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
- pi1_logger.StartLogger(kLogfile1_1);
-
- event_loop_factory.RunFor(chrono::seconds(100));
-
- pi1_logger.AppendAllFilenames(&filenames);
- pi2_logger.AppendAllFilenames(&filenames);
- pi3_logger.AppendAllFilenames(&filenames);
- }
-
- // Make sure we can read this.
- const std::vector<LogFile> sorted_parts = SortParts(filenames);
- auto result = ConfirmReadable(filenames);
-}
-
} // namespace testing
} // namespace logger
} // namespace aos
diff --git a/aos/events/logging/multinode_logger_test.cc b/aos/events/logging/multinode_logger_test.cc
new file mode 100644
index 0000000..bc1f5b8
--- /dev/null
+++ b/aos/events/logging/multinode_logger_test.cc
@@ -0,0 +1,3594 @@
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/logging/multinode_logger_test_lib.h"
+#include "aos/events/message_counter.h"
+#include "aos/events/ping_lib.h"
+#include "aos/events/pong_lib.h"
+#include "aos/network/remote_message_generated.h"
+#include "aos/network/timestamp_generated.h"
+#include "aos/testing/tmpdir.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace aos {
+namespace logger {
+namespace testing {
+
+namespace chrono = std::chrono;
+using aos::message_bridge::RemoteMessage;
+using aos::testing::ArtifactPath;
+using aos::testing::MessageCounter;
+
+constexpr std::string_view kCombinedConfigSha1(
+ "5d73fe35bacaa59d24f8f0c1a806fe10b783b0fcc80809ee30a9db824e82538b");
+constexpr std::string_view kSplitConfigSha1(
+ "f25e8f6f90d61f41c41517e652300566228b077e44cd86f1af2af4a9bed31ad4");
+constexpr std::string_view kReloggedSplitConfigSha1(
+ "f1fabd629bdf8735c3d81bc791d7a454e8e636951c26cba6426545cbc97f911f");
+
+INSTANTIATE_TEST_SUITE_P(
+ All, MultinodeLoggerTest,
+ ::testing::Combine(
+ ::testing::Values(
+ ConfigParams{"multinode_pingpong_combined_config.json", true,
+ kCombinedConfigSha1, kCombinedConfigSha1},
+ ConfigParams{"multinode_pingpong_split_config.json", false,
+ kSplitConfigSha1, kReloggedSplitConfigSha1}),
+ ::testing::ValuesIn(SupportedCompressionAlgorithms())));
+
+INSTANTIATE_TEST_SUITE_P(
+ All, MultinodeLoggerDeathTest,
+ ::testing::Combine(
+ ::testing::Values(
+ ConfigParams{"multinode_pingpong_combined_config.json", true,
+ kCombinedConfigSha1, kCombinedConfigSha1},
+ ConfigParams{"multinode_pingpong_split_config.json", false,
+ kSplitConfigSha1, kReloggedSplitConfigSha1}),
+ ::testing::ValuesIn(SupportedCompressionAlgorithms())));
+
+// Tests that we can write and read simple multi-node log files.
+TEST_P(MultinodeLoggerTest, SimpleMultiNode) {
+ std::vector<std::string> actual_filenames;
+ time_converter_.StartEqual();
+
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ ASSERT_THAT(actual_filenames,
+ ::testing::UnorderedElementsAreArray(logfiles_));
+
+ {
+ std::set<std::string> logfile_uuids;
+ std::set<std::string> parts_uuids;
+ // Confirm that we have the expected number of UUIDs for both the logfile
+ // UUIDs and parts UUIDs.
+ std::vector<SizePrefixedFlatbufferVector<LogFileHeader>> log_header;
+ for (std::string_view f : logfiles_) {
+ log_header.emplace_back(ReadHeader(f).value());
+ if (!log_header.back().message().has_configuration()) {
+ logfile_uuids.insert(
+ log_header.back().message().log_event_uuid()->str());
+ parts_uuids.insert(log_header.back().message().parts_uuid()->str());
+ }
+ }
+
+ EXPECT_EQ(logfile_uuids.size(), 2u);
+ if (shared()) {
+ EXPECT_EQ(parts_uuids.size(), 7u);
+ } else {
+ EXPECT_EQ(parts_uuids.size(), 8u);
+ }
+
+ // And confirm everything is on the correct node.
+ EXPECT_EQ(log_header[2].message().node()->name()->string_view(), "pi1");
+ EXPECT_EQ(log_header[3].message().node()->name()->string_view(), "pi1");
+ EXPECT_EQ(log_header[4].message().node()->name()->string_view(), "pi1");
+
+ EXPECT_EQ(log_header[5].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[6].message().node()->name()->string_view(), "pi2");
+
+ EXPECT_EQ(log_header[7].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[8].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[9].message().node()->name()->string_view(), "pi2");
+
+ EXPECT_EQ(log_header[10].message().node()->name()->string_view(), "pi1");
+ EXPECT_EQ(log_header[11].message().node()->name()->string_view(), "pi1");
+
+ EXPECT_EQ(log_header[12].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[13].message().node()->name()->string_view(), "pi2");
+
+ if (shared()) {
+ EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi2");
+
+ EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
+ EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi1");
+ } else {
+ EXPECT_EQ(log_header[14].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[15].message().node()->name()->string_view(), "pi2");
+
+ EXPECT_EQ(log_header[16].message().node()->name()->string_view(), "pi1");
+ EXPECT_EQ(log_header[17].message().node()->name()->string_view(), "pi1");
+
+ EXPECT_EQ(log_header[18].message().node()->name()->string_view(), "pi2");
+ EXPECT_EQ(log_header[19].message().node()->name()->string_view(), "pi2");
+ }
+
+ // And the parts index matches.
+ EXPECT_EQ(log_header[2].message().parts_index(), 0);
+ EXPECT_EQ(log_header[3].message().parts_index(), 1);
+ EXPECT_EQ(log_header[4].message().parts_index(), 2);
+
+ EXPECT_EQ(log_header[5].message().parts_index(), 0);
+ EXPECT_EQ(log_header[6].message().parts_index(), 1);
+
+ EXPECT_EQ(log_header[7].message().parts_index(), 0);
+ EXPECT_EQ(log_header[8].message().parts_index(), 1);
+ EXPECT_EQ(log_header[9].message().parts_index(), 2);
+
+ EXPECT_EQ(log_header[10].message().parts_index(), 0);
+ EXPECT_EQ(log_header[11].message().parts_index(), 1);
+
+ EXPECT_EQ(log_header[12].message().parts_index(), 0);
+ EXPECT_EQ(log_header[13].message().parts_index(), 1);
+
+ if (shared()) {
+ EXPECT_EQ(log_header[14].message().parts_index(), 0);
+ EXPECT_EQ(log_header[15].message().parts_index(), 1);
+ EXPECT_EQ(log_header[16].message().parts_index(), 2);
+
+ EXPECT_EQ(log_header[17].message().parts_index(), 0);
+ EXPECT_EQ(log_header[18].message().parts_index(), 1);
+ } else {
+ EXPECT_EQ(log_header[14].message().parts_index(), 0);
+ EXPECT_EQ(log_header[15].message().parts_index(), 1);
+
+ EXPECT_EQ(log_header[16].message().parts_index(), 0);
+ EXPECT_EQ(log_header[17].message().parts_index(), 1);
+
+ EXPECT_EQ(log_header[18].message().parts_index(), 0);
+ EXPECT_EQ(log_header[19].message().parts_index(), 1);
+ }
+ }
+
+ const std::vector<LogFile> sorted_log_files = SortParts(logfiles_);
+ {
+ using ::testing::UnorderedElementsAre;
+ std::shared_ptr<const aos::Configuration> config =
+ sorted_log_files[0].config;
+
+ // Timing reports, pings
+ EXPECT_THAT(CountChannelsData(config, logfiles_[2]),
+ UnorderedElementsAre(
+ std::make_tuple("/pi1/aos",
+ "aos.message_bridge.ServerStatistics", 1),
+ std::make_tuple("/test", "aos.examples.Ping", 1)))
+ << " : " << logfiles_[2];
+ {
+ std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
+ std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 1),
+ std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
+ 1)};
+ if (!std::get<0>(GetParam()).shared) {
+ channel_counts.push_back(
+ std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp",
+ "aos.message_bridge.RemoteMessage", 1));
+ }
+ EXPECT_THAT(CountChannelsData(config, logfiles_[3]),
+ ::testing::UnorderedElementsAreArray(channel_counts))
+ << " : " << logfiles_[3];
+ }
+ {
+ std::vector<std::tuple<std::string, std::string, int>> channel_counts = {
+ std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 199),
+ std::make_tuple("/pi1/aos", "aos.message_bridge.ServerStatistics",
+ 20),
+ std::make_tuple("/pi1/aos", "aos.message_bridge.ClientStatistics",
+ 199),
+ std::make_tuple("/pi1/aos", "aos.timing.Report", 40),
+ std::make_tuple("/test", "aos.examples.Ping", 2000)};
+ if (!std::get<0>(GetParam()).shared) {
+ channel_counts.push_back(
+ std::make_tuple("/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp",
+ "aos.message_bridge.RemoteMessage", 199));
+ }
+ EXPECT_THAT(CountChannelsData(config, logfiles_[4]),
+ ::testing::UnorderedElementsAreArray(channel_counts))
+ << " : " << logfiles_[4];
+ }
+ // Timestamps for pong
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[2]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[2];
+ EXPECT_THAT(
+ CountChannelsTimestamp(config, logfiles_[3]),
+ UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
+ << " : " << logfiles_[3];
+ EXPECT_THAT(
+ CountChannelsTimestamp(config, logfiles_[4]),
+ UnorderedElementsAre(
+ std::make_tuple("/test", "aos.examples.Pong", 2000),
+ std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200)))
+ << " : " << logfiles_[4];
+
+ // Pong data.
+ EXPECT_THAT(
+ CountChannelsData(config, logfiles_[5]),
+ UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 91)))
+ << " : " << logfiles_[5];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[6]),
+ UnorderedElementsAre(
+ std::make_tuple("/test", "aos.examples.Pong", 1910)))
+ << " : " << logfiles_[6];
+
+ // No timestamps
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[5]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[5];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[6]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[6];
+
+ // Timing reports and pongs.
+ EXPECT_THAT(CountChannelsData(config, logfiles_[7]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.ServerStatistics", 1)))
+ << " : " << logfiles_[7];
+ EXPECT_THAT(
+ CountChannelsData(config, logfiles_[8]),
+ UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Pong", 1)))
+ << " : " << logfiles_[8];
+ EXPECT_THAT(
+ CountChannelsData(config, logfiles_[9]),
+ UnorderedElementsAre(
+ std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200),
+ std::make_tuple("/pi2/aos", "aos.message_bridge.ServerStatistics",
+ 20),
+ std::make_tuple("/pi2/aos", "aos.message_bridge.ClientStatistics",
+ 200),
+ std::make_tuple("/pi2/aos", "aos.timing.Report", 40),
+ std::make_tuple("/test", "aos.examples.Pong", 2000)))
+ << " : " << logfiles_[9];
+ // And ping timestamps.
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[7]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[7];
+ EXPECT_THAT(
+ CountChannelsTimestamp(config, logfiles_[8]),
+ UnorderedElementsAre(std::make_tuple("/test", "aos.examples.Ping", 1)))
+ << " : " << logfiles_[8];
+ EXPECT_THAT(
+ CountChannelsTimestamp(config, logfiles_[9]),
+ UnorderedElementsAre(
+ std::make_tuple("/test", "aos.examples.Ping", 2000),
+ std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 200)))
+ << " : " << logfiles_[9];
+
+ // And then test that the remotely logged timestamp data files only have
+ // timestamps in them.
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[10]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[10];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[11]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[11];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[12]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[12];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[13]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[13];
+
+ EXPECT_THAT(CountChannelsData(config, logfiles_[10]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
+ << " : " << logfiles_[10];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[11]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
+ << " : " << logfiles_[11];
+
+ EXPECT_THAT(CountChannelsData(config, logfiles_[12]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
+ << " : " << logfiles_[12];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[13]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
+ << " : " << logfiles_[13];
+
+ // Timestamps from pi2 on pi1, and the other way.
+ if (shared()) {
+ EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[14];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[15];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[16];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[17];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[18];
+
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
+ UnorderedElementsAre(
+ std::make_tuple("/test", "aos.examples.Ping", 1)))
+ << " : " << logfiles_[14];
+ EXPECT_THAT(
+ CountChannelsTimestamp(config, logfiles_[15]),
+ UnorderedElementsAre(
+ std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 9),
+ std::make_tuple("/test", "aos.examples.Ping", 90)))
+ << " : " << logfiles_[15];
+ EXPECT_THAT(
+ CountChannelsTimestamp(config, logfiles_[16]),
+ UnorderedElementsAre(
+ std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 191),
+ std::make_tuple("/test", "aos.examples.Ping", 1910)))
+ << " : " << logfiles_[16];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
+ << " : " << logfiles_[17];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
+ << " : " << logfiles_[18];
+ } else {
+ EXPECT_THAT(CountChannelsData(config, logfiles_[14]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[14];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[15]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[15];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[16]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[16];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[17]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[17];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[18]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[18];
+ EXPECT_THAT(CountChannelsData(config, logfiles_[19]),
+ UnorderedElementsAre())
+ << " : " << logfiles_[19];
+
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[14]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi1/aos", "aos.message_bridge.Timestamp", 9)))
+ << " : " << logfiles_[14];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[15]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi1/aos", "aos.message_bridge.Timestamp", 191)))
+ << " : " << logfiles_[15];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[16]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.Timestamp", 9)))
+ << " : " << logfiles_[16];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[17]),
+ UnorderedElementsAre(std::make_tuple(
+ "/pi2/aos", "aos.message_bridge.Timestamp", 191)))
+ << " : " << logfiles_[17];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[18]),
+ UnorderedElementsAre(
+ std::make_tuple("/test", "aos.examples.Ping", 91)))
+ << " : " << logfiles_[18];
+ EXPECT_THAT(CountChannelsTimestamp(config, logfiles_[19]),
+ UnorderedElementsAre(
+ std::make_tuple("/test", "aos.examples.Ping", 1910)))
+ << " : " << logfiles_[19];
+ }
+ }
+
+ LogReader reader(sorted_log_files);
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+ LOG(INFO) << "now pi1 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+ LOG(INFO) << "now pi2 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ int pi1_ping_count = 10;
+ int pi2_ping_count = 10;
+ int pi1_pong_count = 10;
+ int pi2_pong_count = 10;
+
+ // Confirm that the ping value matches.
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
+ VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping) << " at "
+ << pi1_event_loop->context().monotonic_remote_time << " -> "
+ << pi1_event_loop->context().monotonic_event_time;
+ EXPECT_EQ(ping.value(), pi1_ping_count + 1);
+ EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
+ pi1_ping_count * chrono::milliseconds(10) +
+ monotonic_clock::epoch());
+ EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
+ pi1_ping_count * chrono::milliseconds(10) +
+ realtime_clock::epoch());
+ EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
+ pi1_event_loop->context().monotonic_event_time);
+ EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
+ pi1_event_loop->context().realtime_event_time);
+
+ ++pi1_ping_count;
+ });
+ pi2_event_loop->MakeWatcher(
+ "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
+ VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping) << " at "
+ << pi2_event_loop->context().monotonic_remote_time << " -> "
+ << pi2_event_loop->context().monotonic_event_time;
+ EXPECT_EQ(ping.value(), pi2_ping_count + 1);
+
+ EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
+ pi2_ping_count * chrono::milliseconds(10) +
+ monotonic_clock::epoch());
+ EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
+ pi2_ping_count * chrono::milliseconds(10) +
+ realtime_clock::epoch());
+ EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time +
+ chrono::microseconds(150),
+ pi2_event_loop->context().monotonic_event_time);
+ EXPECT_EQ(pi2_event_loop->context().realtime_remote_time +
+ chrono::microseconds(150),
+ pi2_event_loop->context().realtime_event_time);
+ ++pi2_ping_count;
+ });
+
+ constexpr ssize_t kQueueIndexOffset = -9;
+ // Confirm that the ping and pong counts both match, and the value also
+ // matches.
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_event_loop, &pi1_ping_count,
+ &pi1_pong_count](const examples::Pong &pong) {
+ VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
+ << pi1_event_loop->context().monotonic_remote_time << " -> "
+ << pi1_event_loop->context().monotonic_event_time;
+
+ EXPECT_EQ(pi1_event_loop->context().remote_queue_index,
+ pi1_pong_count + kQueueIndexOffset);
+ EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
+ chrono::microseconds(200) +
+ pi1_pong_count * chrono::milliseconds(10) +
+ monotonic_clock::epoch());
+ EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
+ chrono::microseconds(200) +
+ pi1_pong_count * chrono::milliseconds(10) +
+ realtime_clock::epoch());
+
+ EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time +
+ chrono::microseconds(150),
+ pi1_event_loop->context().monotonic_event_time);
+ EXPECT_EQ(pi1_event_loop->context().realtime_remote_time +
+ chrono::microseconds(150),
+ pi1_event_loop->context().realtime_event_time);
+
+ EXPECT_EQ(pong.value(), pi1_pong_count + 1);
+ ++pi1_pong_count;
+ EXPECT_EQ(pi1_ping_count, pi1_pong_count);
+ });
+ pi2_event_loop->MakeWatcher(
+ "/test", [&pi2_event_loop, &pi2_ping_count,
+ &pi2_pong_count](const examples::Pong &pong) {
+ VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
+ << pi2_event_loop->context().monotonic_remote_time << " -> "
+ << pi2_event_loop->context().monotonic_event_time;
+
+ EXPECT_EQ(pi2_event_loop->context().remote_queue_index,
+ pi2_pong_count + kQueueIndexOffset);
+
+ EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
+ chrono::microseconds(200) +
+ pi2_pong_count * chrono::milliseconds(10) +
+ monotonic_clock::epoch());
+ EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
+ chrono::microseconds(200) +
+ pi2_pong_count * chrono::milliseconds(10) +
+ realtime_clock::epoch());
+
+ EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
+ pi2_event_loop->context().monotonic_event_time);
+ EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
+ pi2_event_loop->context().realtime_event_time);
+
+ EXPECT_EQ(pong.value(), pi2_pong_count + 1);
+ ++pi2_pong_count;
+ EXPECT_EQ(pi2_ping_count, pi2_pong_count);
+ });
+
+ log_reader_factory.Run();
+ EXPECT_EQ(pi1_ping_count, 2010);
+ EXPECT_EQ(pi2_ping_count, 2010);
+ EXPECT_EQ(pi1_pong_count, 2010);
+ EXPECT_EQ(pi2_pong_count, 2010);
+
+ reader.Deregister();
+}
+
+// Test that if we feed the replay with a mismatched node list that we die on
+// the LogReader constructor.
+TEST_P(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
+ time_converter_.StartEqual();
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ // Test that, if we add an additional node to the replay config that the
+ // logger complains about the mismatch in number of nodes.
+ FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
+ configuration::MergeWithConfig(&config_.message(), R"({
+ "nodes": [
+ {
+ "name": "extra-node"
+ }
+ ]
+ }
+ )");
+
+ const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+ EXPECT_DEATH(LogReader(sorted_parts, &extra_nodes_config.message()),
+ "Log file and replay config need to have matching nodes lists.");
+}
+
+// Tests that we can read log files where they don't start at the same monotonic
+// time.
+TEST_P(MultinodeLoggerTest, StaggeredStart) {
+ time_converter_.StartEqual();
+ std::vector<std::string> actual_filenames;
+
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(200));
+
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ // Since we delay starting pi2, it already knows about all the timestamps so
+ // we don't end up with extra parts.
+ LogReader reader(SortParts(actual_filenames));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ int pi1_ping_count = 30;
+ int pi2_ping_count = 30;
+ int pi1_pong_count = 30;
+ int pi2_pong_count = 30;
+
+ // Confirm that the ping value matches.
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
+ VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
+ << pi1_event_loop->context().monotonic_remote_time << " -> "
+ << pi1_event_loop->context().monotonic_event_time;
+ EXPECT_EQ(ping.value(), pi1_ping_count + 1);
+
+ ++pi1_ping_count;
+ });
+ pi2_event_loop->MakeWatcher(
+ "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
+ VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
+ << pi2_event_loop->context().monotonic_remote_time << " -> "
+ << pi2_event_loop->context().monotonic_event_time;
+ EXPECT_EQ(ping.value(), pi2_ping_count + 1);
+
+ ++pi2_ping_count;
+ });
+
+ // Confirm that the ping and pong counts both match, and the value also
+ // matches.
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_event_loop, &pi1_ping_count,
+ &pi1_pong_count](const examples::Pong &pong) {
+ VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
+ << pi1_event_loop->context().monotonic_remote_time << " -> "
+ << pi1_event_loop->context().monotonic_event_time;
+
+ EXPECT_EQ(pong.value(), pi1_pong_count + 1);
+ ++pi1_pong_count;
+ EXPECT_EQ(pi1_ping_count, pi1_pong_count);
+ });
+ pi2_event_loop->MakeWatcher(
+ "/test", [&pi2_event_loop, &pi2_ping_count,
+ &pi2_pong_count](const examples::Pong &pong) {
+ VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
+ << pi2_event_loop->context().monotonic_remote_time << " -> "
+ << pi2_event_loop->context().monotonic_event_time;
+
+ EXPECT_EQ(pong.value(), pi2_pong_count + 1);
+ ++pi2_pong_count;
+ EXPECT_EQ(pi2_ping_count, pi2_pong_count);
+ });
+
+ log_reader_factory.Run();
+ EXPECT_EQ(pi1_ping_count, 2030);
+ EXPECT_EQ(pi2_ping_count, 2030);
+ EXPECT_EQ(pi1_pong_count, 2030);
+ EXPECT_EQ(pi2_pong_count, 2030);
+
+ reader.Deregister();
+}
+
+// Tests that we can read log files where the monotonic clocks drift and don't
+// match correctly. While we are here, also test that different ending times
+// also is readable.
+TEST_P(MultinodeLoggerTest, MismatchedClocks) {
+ // TODO(austin): Negate...
+ const chrono::nanoseconds initial_pi2_offset = chrono::seconds(1000);
+
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + initial_pi2_offset});
+ // Wait for 95 ms, (~0.1 seconds - 1/2 of the ping/pong period), and set the
+ // skew to be 200 uS/s
+ const chrono::nanoseconds startup_sleep1 = time_converter_.AddMonotonic(
+ {chrono::milliseconds(95),
+ chrono::milliseconds(95) - chrono::nanoseconds(200) * 95});
+ // Run another 200 ms to have one logger start first.
+ const chrono::nanoseconds startup_sleep2 = time_converter_.AddMonotonic(
+ {chrono::milliseconds(200), chrono::milliseconds(200)});
+ // Slew one way then the other at the same 200 uS/S slew rate. Make sure we
+ // go far enough to cause problems if this isn't accounted for.
+ const chrono::nanoseconds logger_run1 = time_converter_.AddMonotonic(
+ {chrono::milliseconds(20000),
+ chrono::milliseconds(20000) - chrono::nanoseconds(200) * 20000});
+ const chrono::nanoseconds logger_run2 = time_converter_.AddMonotonic(
+ {chrono::milliseconds(40000),
+ chrono::milliseconds(40000) + chrono::nanoseconds(200) * 40000});
+ const chrono::nanoseconds logger_run3 = time_converter_.AddMonotonic(
+ {chrono::milliseconds(400), chrono::milliseconds(400)});
+
+ {
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ LOG(INFO) << "pi2 times: " << pi2_->monotonic_now() << " "
+ << pi2_->realtime_now() << " distributed "
+ << pi2_->ToDistributedClock(pi2_->monotonic_now());
+
+ LOG(INFO) << "pi2_ times: " << pi2_->monotonic_now() << " "
+ << pi2_->realtime_now() << " distributed "
+ << pi2_->ToDistributedClock(pi2_->monotonic_now());
+
+ event_loop_factory_.RunFor(startup_sleep1);
+
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(startup_sleep2);
+
+ {
+ // Run pi1's logger for only part of the time.
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ StartLogger(&pi1_logger);
+ event_loop_factory_.RunFor(logger_run1);
+
+ // Make sure we slewed time far enough so that the difference is greater
+ // than the network delay. This confirms that if we sort incorrectly, it
+ // would show in the results.
+ EXPECT_LT(
+ (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
+ -event_loop_factory_.send_delay() -
+ event_loop_factory_.network_delay());
+
+ event_loop_factory_.RunFor(logger_run2);
+
+ // And now check that we went far enough the other way to make sure we
+ // cover both problems.
+ EXPECT_GT(
+ (pi2_->monotonic_now() - pi1_->monotonic_now()) - initial_pi2_offset,
+ event_loop_factory_.send_delay() +
+ event_loop_factory_.network_delay());
+ }
+
+ // And log a bit more on pi2.
+ event_loop_factory_.RunFor(logger_run3);
+ }
+
+ LogReader reader(
+ SortParts(MakeLogFiles(logfile_base1_, logfile_base2_, 3, 2)));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+ LOG(INFO) << "now pi1 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+ LOG(INFO) << "now pi2 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+ LOG(INFO) << "Done registering (pi1) "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now()
+ << " "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->realtime_now();
+ LOG(INFO) << "Done registering (pi2) "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now()
+ << " "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->realtime_now();
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ int pi1_ping_count = 30;
+ int pi2_ping_count = 30;
+ int pi1_pong_count = 30;
+ int pi2_pong_count = 30;
+
+ // Confirm that the ping value matches.
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
+ VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
+ << pi1_event_loop->context().monotonic_remote_time << " -> "
+ << pi1_event_loop->context().monotonic_event_time;
+ EXPECT_EQ(ping.value(), pi1_ping_count + 1);
+
+ ++pi1_ping_count;
+ });
+ pi2_event_loop->MakeWatcher(
+ "/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
+ VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
+ << pi2_event_loop->context().monotonic_remote_time << " -> "
+ << pi2_event_loop->context().monotonic_event_time;
+ EXPECT_EQ(ping.value(), pi2_ping_count + 1);
+
+ ++pi2_ping_count;
+ });
+
+ // Confirm that the ping and pong counts both match, and the value also
+ // matches.
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_event_loop, &pi1_ping_count,
+ &pi1_pong_count](const examples::Pong &pong) {
+ VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
+ << pi1_event_loop->context().monotonic_remote_time << " -> "
+ << pi1_event_loop->context().monotonic_event_time;
+
+ EXPECT_EQ(pong.value(), pi1_pong_count + 1);
+ ++pi1_pong_count;
+ EXPECT_EQ(pi1_ping_count, pi1_pong_count);
+ });
+ pi2_event_loop->MakeWatcher(
+ "/test", [&pi2_event_loop, &pi2_ping_count,
+ &pi2_pong_count](const examples::Pong &pong) {
+ VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
+ << pi2_event_loop->context().monotonic_remote_time << " -> "
+ << pi2_event_loop->context().monotonic_event_time;
+
+ EXPECT_EQ(pong.value(), pi2_pong_count + 1);
+ ++pi2_pong_count;
+ EXPECT_EQ(pi2_ping_count, pi2_pong_count);
+ });
+
+ log_reader_factory.Run();
+ EXPECT_EQ(pi1_ping_count, 6030);
+ EXPECT_EQ(pi2_ping_count, 6030);
+ EXPECT_EQ(pi1_pong_count, 6030);
+ EXPECT_EQ(pi2_pong_count, 6030);
+
+ reader.Deregister();
+}
+
+// Tests that we can sort a bunch of parts into the pre-determined sorted parts.
+TEST_P(MultinodeLoggerTest, SortParts) {
+ time_converter_.StartEqual();
+ // Make a bunch of parts.
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(2000));
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+ VerifyParts(sorted_parts);
+}
+
+// Tests that we can sort a bunch of parts with an empty part. We should ignore
+// it and remove it from the sorted list.
+TEST_P(MultinodeLoggerTest, SortEmptyParts) {
+ time_converter_.StartEqual();
+ // Make a bunch of parts.
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(2000));
+ }
+
+ // TODO(austin): Should we flip out if the file can't open?
+ const std::string kEmptyFile("foobarinvalidfiledoesnotexist" + Extension());
+
+ aos::util::WriteStringToFileOrDie(kEmptyFile, "");
+ logfiles_.emplace_back(kEmptyFile);
+
+ const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+ VerifyParts(sorted_parts, {kEmptyFile});
+}
+
+// Tests that we can sort a bunch of parts with the end missing off a
+// file. We should use the part we can read.
+TEST_P(MultinodeLoggerTest, SortTruncatedParts) {
+ std::vector<std::string> actual_filenames;
+ time_converter_.StartEqual();
+ // Make a bunch of parts.
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(2000));
+
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ ASSERT_THAT(actual_filenames,
+ ::testing::UnorderedElementsAreArray(logfiles_));
+
+ // Strip off the end of one of the files. Pick one with a lot of data.
+ // For snappy, needs to have enough data to be >1 chunk of compressed data so
+ // that we don't corrupt the entire log part.
+ ::std::string compressed_contents =
+ aos::util::ReadFileToStringOrDie(logfiles_[4]);
+
+ aos::util::WriteStringToFileOrDie(
+ logfiles_[4],
+ compressed_contents.substr(0, compressed_contents.size() - 100));
+
+ const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
+ VerifyParts(sorted_parts);
+}
+
+// Tests that if we remap a remapped channel, it shows up correctly.
+TEST_P(MultinodeLoggerTest, RemapLoggedChannel) {
+ time_converter_.StartEqual();
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ LogReader reader(SortParts(logfiles_));
+
+ // Remap just on pi1.
+ reader.RemapLoggedChannel<aos::timing::Report>(
+ "/aos", configuration::GetNode(reader.configuration(), "pi1"));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ std::vector<const Channel *> remapped_channels = reader.RemappedChannels();
+ // Note: An extra channel gets remapped automatically due to a timestamp
+ // channel being LOCAL_LOGGER'd.
+ ASSERT_EQ(remapped_channels.size(), std::get<0>(GetParam()).shared ? 1u : 2u);
+ EXPECT_EQ(remapped_channels[0]->name()->string_view(), "/original/pi1/aos");
+ EXPECT_EQ(remapped_channels[0]->type()->string_view(), "aos.timing.Report");
+ if (!std::get<0>(GetParam()).shared) {
+ EXPECT_EQ(remapped_channels[1]->name()->string_view(),
+ "/original/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp");
+ EXPECT_EQ(remapped_channels[1]->type()->string_view(),
+ "aos.message_bridge.RemoteMessage");
+ }
+
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ // Confirm we can read the data on the remapped channel, just for pi1. Nothing
+ // else should have moved.
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ pi1_event_loop->SkipTimingReport();
+ std::unique_ptr<EventLoop> full_pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ full_pi1_event_loop->SkipTimingReport();
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+ pi2_event_loop->SkipTimingReport();
+
+ MessageCounter<aos::timing::Report> pi1_timing_report(pi1_event_loop.get(),
+ "/aos");
+ MessageCounter<aos::timing::Report> full_pi1_timing_report(
+ full_pi1_event_loop.get(), "/pi1/aos");
+ MessageCounter<aos::timing::Report> pi1_original_timing_report(
+ pi1_event_loop.get(), "/original/aos");
+ MessageCounter<aos::timing::Report> full_pi1_original_timing_report(
+ full_pi1_event_loop.get(), "/original/pi1/aos");
+ MessageCounter<aos::timing::Report> pi2_timing_report(pi2_event_loop.get(),
+ "/aos");
+
+ log_reader_factory.Run();
+
+ EXPECT_EQ(pi1_timing_report.count(), 0u);
+ EXPECT_EQ(full_pi1_timing_report.count(), 0u);
+ EXPECT_NE(pi1_original_timing_report.count(), 0u);
+ EXPECT_NE(full_pi1_original_timing_report.count(), 0u);
+ EXPECT_NE(pi2_timing_report.count(), 0u);
+
+ reader.Deregister();
+}
+
+// Tests that we can remap a forwarded channel as well.
+TEST_P(MultinodeLoggerTest, RemapForwardedLoggedChannel) {
+ time_converter_.StartEqual();
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ LogReader reader(SortParts(logfiles_));
+
+ reader.RemapLoggedChannel<examples::Ping>("/test");
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ // Confirm we can read the data on the remapped channel, just for pi1. Nothing
+ // else should have moved.
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ pi1_event_loop->SkipTimingReport();
+ std::unique_ptr<EventLoop> full_pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ full_pi1_event_loop->SkipTimingReport();
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+ pi2_event_loop->SkipTimingReport();
+
+ MessageCounter<examples::Ping> pi1_ping(pi1_event_loop.get(), "/test");
+ MessageCounter<examples::Ping> pi2_ping(pi2_event_loop.get(), "/test");
+ MessageCounter<examples::Ping> pi1_original_ping(pi1_event_loop.get(),
+ "/original/test");
+ MessageCounter<examples::Ping> pi2_original_ping(pi2_event_loop.get(),
+ "/original/test");
+
+ std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
+ pi1_original_ping_timestamp;
+ std::unique_ptr<MessageCounter<message_bridge::RemoteMessage>>
+ pi1_ping_timestamp;
+ if (!shared()) {
+ pi1_original_ping_timestamp =
+ std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
+ pi1_event_loop.get(),
+ "/pi1/aos/remote_timestamps/pi2/original/test/aos-examples-Ping");
+ pi1_ping_timestamp =
+ std::make_unique<MessageCounter<message_bridge::RemoteMessage>>(
+ pi1_event_loop.get(),
+ "/pi1/aos/remote_timestamps/pi2/test/aos-examples-Ping");
+ }
+
+ log_reader_factory.Run();
+
+ EXPECT_EQ(pi1_ping.count(), 0u);
+ EXPECT_EQ(pi2_ping.count(), 0u);
+ EXPECT_NE(pi1_original_ping.count(), 0u);
+ EXPECT_NE(pi2_original_ping.count(), 0u);
+ if (!shared()) {
+ EXPECT_NE(pi1_original_ping_timestamp->count(), 0u);
+ EXPECT_EQ(pi1_ping_timestamp->count(), 0u);
+ }
+
+ reader.Deregister();
+}
+
+// Tests that we observe all the same events in log replay (for a given node)
+// whether we just register an event loop for that node or if we register a full
+// event loop factory.
+TEST_P(MultinodeLoggerTest, SingleNodeReplay) {
+ time_converter_.StartEqual();
+ constexpr chrono::milliseconds kStartupDelay(95);
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(kStartupDelay);
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ LogReader full_reader(SortParts(logfiles_));
+ LogReader single_node_reader(SortParts(logfiles_));
+
+ SimulatedEventLoopFactory full_factory(full_reader.configuration());
+ SimulatedEventLoopFactory single_node_factory(
+ single_node_reader.configuration());
+ single_node_factory.SkipTimingReport();
+ single_node_factory.DisableStatistics();
+ std::unique_ptr<EventLoop> replay_event_loop =
+ single_node_factory.GetNodeEventLoopFactory("pi1")->MakeEventLoop(
+ "log_reader");
+
+ full_reader.Register(&full_factory);
+ single_node_reader.Register(replay_event_loop.get());
+
+ const Node *full_pi1 =
+ configuration::GetNode(full_factory.configuration(), "pi1");
+
+ // Confirm we can read the data on the remapped channel, just for pi1. Nothing
+ // else should have moved.
+ std::unique_ptr<EventLoop> full_event_loop =
+ full_factory.MakeEventLoop("test", full_pi1);
+ full_event_loop->SkipTimingReport();
+ full_event_loop->SkipAosLog();
+ // maps are indexed on channel index.
+ // observed_messages: {channel_index: [(message_sent_time, was_fetched),...]}
+ std::map<size_t, std::vector<std::pair<monotonic_clock::time_point, bool>>>
+ observed_messages;
+ std::map<size_t, std::unique_ptr<RawFetcher>> fetchers;
+ for (size_t ii = 0; ii < full_event_loop->configuration()->channels()->size();
+ ++ii) {
+ const Channel *channel =
+ full_event_loop->configuration()->channels()->Get(ii);
+ // We currently don't support replaying remote timestamp channels in
+ // realtime replay (unless the remote timestamp channel was not NOT_LOGGED,
+ // in which case it gets auto-remapped and replayed on a /original channel).
+ if (channel->name()->string_view().find("remote_timestamp") !=
+ std::string_view::npos &&
+ channel->name()->string_view().find("/original") ==
+ std::string_view::npos) {
+ continue;
+ }
+ if (configuration::ChannelIsReadableOnNode(channel, full_pi1)) {
+ observed_messages[ii] = {};
+ fetchers[ii] = full_event_loop->MakeRawFetcher(channel);
+ full_event_loop->OnRun([ii, &observed_messages, &fetchers]() {
+ if (fetchers[ii]->Fetch()) {
+ observed_messages[ii].push_back(std::make_pair(
+ fetchers[ii]->context().monotonic_event_time, true));
+ }
+ });
+ full_event_loop->MakeRawNoArgWatcher(
+ channel, [ii, &observed_messages](const Context &context) {
+ observed_messages[ii].push_back(
+ std::make_pair(context.monotonic_event_time, false));
+ });
+ }
+ }
+
+ full_factory.Run();
+ fetchers.clear();
+ full_reader.Deregister();
+
+ const Node *single_node_pi1 =
+ configuration::GetNode(single_node_factory.configuration(), "pi1");
+ std::map<size_t, std::unique_ptr<RawFetcher>> single_node_fetchers;
+
+ std::unique_ptr<EventLoop> single_node_event_loop =
+ single_node_factory.MakeEventLoop("test", single_node_pi1);
+ single_node_event_loop->SkipTimingReport();
+ single_node_event_loop->SkipAosLog();
+ for (size_t ii = 0;
+ ii < single_node_event_loop->configuration()->channels()->size(); ++ii) {
+ const Channel *channel =
+ single_node_event_loop->configuration()->channels()->Get(ii);
+ single_node_factory.DisableForwarding(channel);
+ if (configuration::ChannelIsReadableOnNode(channel, single_node_pi1)) {
+ single_node_fetchers[ii] =
+ single_node_event_loop->MakeRawFetcher(channel);
+ single_node_event_loop->OnRun([channel, ii, &single_node_fetchers]() {
+ EXPECT_FALSE(single_node_fetchers[ii]->Fetch())
+ << "Single EventLoop replay doesn't support pre-loading fetchers. "
+ << configuration::StrippedChannelToString(channel);
+ });
+ single_node_event_loop->MakeRawNoArgWatcher(
+ channel, [ii, &observed_messages, channel,
+ kStartupDelay](const Context &context) {
+ if (observed_messages[ii].empty()) {
+ FAIL() << "Observed extra message at "
+ << context.monotonic_event_time << " on "
+ << configuration::StrippedChannelToString(channel);
+ return;
+ }
+ const std::pair<monotonic_clock::time_point, bool> &message =
+ observed_messages[ii].front();
+ if (message.second) {
+ EXPECT_LE(message.first,
+ context.monotonic_event_time + kStartupDelay)
+ << "Mismatched message times " << context.monotonic_event_time
+ << " and " << message.first << " on "
+ << configuration::StrippedChannelToString(channel);
+ } else {
+ EXPECT_EQ(message.first,
+ context.monotonic_event_time + kStartupDelay)
+ << "Mismatched message times " << context.monotonic_event_time
+ << " and " << message.first << " on "
+ << configuration::StrippedChannelToString(channel);
+ }
+ observed_messages[ii].erase(observed_messages[ii].begin());
+ });
+ }
+ }
+
+ single_node_factory.Run();
+
+ single_node_fetchers.clear();
+
+ single_node_reader.Deregister();
+
+ for (const auto &pair : observed_messages) {
+ EXPECT_TRUE(pair.second.empty())
+ << "Missed " << pair.second.size() << " messages on "
+ << configuration::StrippedChannelToString(
+ single_node_event_loop->configuration()->channels()->Get(
+ pair.first));
+ }
+}
+
+// Tests that we properly recreate forwarded timestamps when replaying a log.
+// This should be enough that we can then re-run the logger and get a valid log
+// back.
+TEST_P(MultinodeLoggerTest, MessageHeader) {
+ time_converter_.StartEqual();
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ LogReader reader(SortParts(logfiles_));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+ LOG(INFO) << "now pi1 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+ LOG(INFO) << "now pi2 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi1_fetcher =
+ pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
+ aos::Fetcher<message_bridge::Timestamp> pi1_timestamp_on_pi2_fetcher =
+ pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi1/aos");
+
+ aos::Fetcher<examples::Ping> ping_on_pi1_fetcher =
+ pi1_event_loop->MakeFetcher<examples::Ping>("/test");
+ aos::Fetcher<examples::Ping> ping_on_pi2_fetcher =
+ pi2_event_loop->MakeFetcher<examples::Ping>("/test");
+
+ aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi2_fetcher =
+ pi2_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
+ aos::Fetcher<message_bridge::Timestamp> pi2_timestamp_on_pi1_fetcher =
+ pi1_event_loop->MakeFetcher<message_bridge::Timestamp>("/pi2/aos");
+
+ aos::Fetcher<examples::Pong> pong_on_pi2_fetcher =
+ pi2_event_loop->MakeFetcher<examples::Pong>("/test");
+ aos::Fetcher<examples::Pong> pong_on_pi1_fetcher =
+ pi1_event_loop->MakeFetcher<examples::Pong>("/test");
+
+ const size_t pi1_timestamp_channel = configuration::ChannelIndex(
+ pi1_event_loop->configuration(), pi1_timestamp_on_pi1_fetcher.channel());
+ const size_t ping_timestamp_channel = configuration::ChannelIndex(
+ pi2_event_loop->configuration(), ping_on_pi2_fetcher.channel());
+
+ const size_t pi2_timestamp_channel = configuration::ChannelIndex(
+ pi2_event_loop->configuration(), pi2_timestamp_on_pi2_fetcher.channel());
+ const size_t pong_timestamp_channel = configuration::ChannelIndex(
+ pi1_event_loop->configuration(), pong_on_pi1_fetcher.channel());
+
+ const chrono::nanoseconds network_delay = event_loop_factory_.network_delay();
+ const chrono::nanoseconds send_delay = event_loop_factory_.send_delay();
+
+ for (std::pair<int, std::string> channel :
+ shared()
+ ? std::vector<
+ std::pair<int, std::string>>{{-1,
+ "/aos/remote_timestamps/pi2"}}
+ : std::vector<std::pair<int, std::string>>{
+ {pi1_timestamp_channel,
+ "/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp"},
+ {ping_timestamp_channel,
+ "/aos/remote_timestamps/pi2/test/aos-examples-Ping"}}) {
+ pi1_event_loop->MakeWatcher(
+ channel.second,
+ [&pi1_event_loop, &pi2_event_loop, pi1_timestamp_channel,
+ ping_timestamp_channel, &pi1_timestamp_on_pi1_fetcher,
+ &pi1_timestamp_on_pi2_fetcher, &ping_on_pi1_fetcher,
+ &ping_on_pi2_fetcher, network_delay, send_delay,
+ channel_index = channel.first](const RemoteMessage &header) {
+ const aos::monotonic_clock::time_point header_monotonic_sent_time(
+ chrono::nanoseconds(header.monotonic_sent_time()));
+ const aos::realtime_clock::time_point header_realtime_sent_time(
+ chrono::nanoseconds(header.realtime_sent_time()));
+ const aos::monotonic_clock::time_point header_monotonic_remote_time(
+ chrono::nanoseconds(header.monotonic_remote_time()));
+ const aos::realtime_clock::time_point header_realtime_remote_time(
+ chrono::nanoseconds(header.realtime_remote_time()));
+
+ if (channel_index != -1) {
+ ASSERT_EQ(channel_index, header.channel_index());
+ }
+
+ const Context *pi1_context = nullptr;
+ const Context *pi2_context = nullptr;
+
+ if (header.channel_index() == pi1_timestamp_channel) {
+ ASSERT_TRUE(pi1_timestamp_on_pi1_fetcher.FetchNext());
+ ASSERT_TRUE(pi1_timestamp_on_pi2_fetcher.FetchNext());
+ pi1_context = &pi1_timestamp_on_pi1_fetcher.context();
+ pi2_context = &pi1_timestamp_on_pi2_fetcher.context();
+ } else if (header.channel_index() == ping_timestamp_channel) {
+ ASSERT_TRUE(ping_on_pi1_fetcher.FetchNext());
+ ASSERT_TRUE(ping_on_pi2_fetcher.FetchNext());
+ pi1_context = &ping_on_pi1_fetcher.context();
+ pi2_context = &ping_on_pi2_fetcher.context();
+ } else {
+ LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
+ << configuration::CleanedChannelToString(
+ pi1_event_loop->configuration()->channels()->Get(
+ header.channel_index()));
+ }
+
+ ASSERT_TRUE(header.has_boot_uuid());
+ EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
+ pi2_event_loop->boot_uuid());
+
+ EXPECT_EQ(pi1_context->queue_index, header.remote_queue_index());
+ EXPECT_EQ(pi2_context->remote_queue_index,
+ header.remote_queue_index());
+ EXPECT_EQ(pi2_context->queue_index, header.queue_index());
+
+ EXPECT_EQ(pi2_context->monotonic_event_time,
+ header_monotonic_sent_time);
+ EXPECT_EQ(pi2_context->realtime_event_time,
+ header_realtime_sent_time);
+ EXPECT_EQ(pi2_context->realtime_remote_time,
+ header_realtime_remote_time);
+ EXPECT_EQ(pi2_context->monotonic_remote_time,
+ header_monotonic_remote_time);
+
+ EXPECT_EQ(pi1_context->realtime_event_time,
+ header_realtime_remote_time);
+ EXPECT_EQ(pi1_context->monotonic_event_time,
+ header_monotonic_remote_time);
+
+ // Time estimation isn't perfect, but we know the clocks were
+ // identical when logged, so we know when this should have come back.
+ // Confirm we got it when we expected.
+ EXPECT_EQ(pi1_event_loop->context().monotonic_event_time,
+ pi1_context->monotonic_event_time + 2 * network_delay +
+ send_delay);
+ });
+ }
+ for (std::pair<int, std::string> channel :
+ shared()
+ ? std::vector<
+ std::pair<int, std::string>>{{-1,
+ "/aos/remote_timestamps/pi1"}}
+ : std::vector<std::pair<int, std::string>>{
+ {pi2_timestamp_channel,
+ "/aos/remote_timestamps/pi1/pi2/aos/"
+ "aos-message_bridge-Timestamp"}}) {
+ pi2_event_loop->MakeWatcher(
+ channel.second,
+ [&pi2_event_loop, &pi1_event_loop, pi2_timestamp_channel,
+ pong_timestamp_channel, &pi2_timestamp_on_pi2_fetcher,
+ &pi2_timestamp_on_pi1_fetcher, &pong_on_pi2_fetcher,
+ &pong_on_pi1_fetcher, network_delay, send_delay,
+ channel_index = channel.first](const RemoteMessage &header) {
+ const aos::monotonic_clock::time_point header_monotonic_sent_time(
+ chrono::nanoseconds(header.monotonic_sent_time()));
+ const aos::realtime_clock::time_point header_realtime_sent_time(
+ chrono::nanoseconds(header.realtime_sent_time()));
+ const aos::monotonic_clock::time_point header_monotonic_remote_time(
+ chrono::nanoseconds(header.monotonic_remote_time()));
+ const aos::realtime_clock::time_point header_realtime_remote_time(
+ chrono::nanoseconds(header.realtime_remote_time()));
+
+ if (channel_index != -1) {
+ ASSERT_EQ(channel_index, header.channel_index());
+ }
+
+ const Context *pi2_context = nullptr;
+ const Context *pi1_context = nullptr;
+
+ if (header.channel_index() == pi2_timestamp_channel) {
+ ASSERT_TRUE(pi2_timestamp_on_pi2_fetcher.FetchNext());
+ ASSERT_TRUE(pi2_timestamp_on_pi1_fetcher.FetchNext());
+ pi2_context = &pi2_timestamp_on_pi2_fetcher.context();
+ pi1_context = &pi2_timestamp_on_pi1_fetcher.context();
+ } else if (header.channel_index() == pong_timestamp_channel) {
+ ASSERT_TRUE(pong_on_pi2_fetcher.FetchNext());
+ ASSERT_TRUE(pong_on_pi1_fetcher.FetchNext());
+ pi2_context = &pong_on_pi2_fetcher.context();
+ pi1_context = &pong_on_pi1_fetcher.context();
+ } else {
+ LOG(FATAL) << "Unknown channel " << FlatbufferToJson(&header) << " "
+ << configuration::CleanedChannelToString(
+ pi2_event_loop->configuration()->channels()->Get(
+ header.channel_index()));
+ }
+
+ ASSERT_TRUE(header.has_boot_uuid());
+ EXPECT_EQ(UUID::FromVector(header.boot_uuid()),
+ pi1_event_loop->boot_uuid());
+
+ EXPECT_EQ(pi2_context->queue_index, header.remote_queue_index());
+ EXPECT_EQ(pi1_context->remote_queue_index,
+ header.remote_queue_index());
+ EXPECT_EQ(pi1_context->queue_index, header.queue_index());
+
+ EXPECT_EQ(pi1_context->monotonic_event_time,
+ header_monotonic_sent_time);
+ EXPECT_EQ(pi1_context->realtime_event_time,
+ header_realtime_sent_time);
+ EXPECT_EQ(pi1_context->realtime_remote_time,
+ header_realtime_remote_time);
+ EXPECT_EQ(pi1_context->monotonic_remote_time,
+ header_monotonic_remote_time);
+
+ EXPECT_EQ(pi2_context->realtime_event_time,
+ header_realtime_remote_time);
+ EXPECT_EQ(pi2_context->monotonic_event_time,
+ header_monotonic_remote_time);
+
+ // Time estimation isn't perfect, but we know the clocks were
+ // identical when logged, so we know when this should have come back.
+ // Confirm we got it when we expected.
+ EXPECT_EQ(pi2_event_loop->context().monotonic_event_time,
+ pi2_context->monotonic_event_time + 2 * network_delay +
+ send_delay);
+ });
+ }
+
+ // And confirm we can re-create a log again, while checking the contents.
+ {
+ LoggerState pi1_logger = MakeLogger(
+ log_reader_factory.GetNodeEventLoopFactory("pi1"), &log_reader_factory);
+ LoggerState pi2_logger = MakeLogger(
+ log_reader_factory.GetNodeEventLoopFactory("pi2"), &log_reader_factory);
+
+ StartLogger(&pi1_logger, tmp_dir_ + "/relogged1");
+ StartLogger(&pi2_logger, tmp_dir_ + "/relogged2");
+
+ log_reader_factory.Run();
+ }
+
+ reader.Deregister();
+
+ // And verify that we can run the LogReader over the relogged files without
+ // hitting any fatal errors.
+ {
+ LogReader relogged_reader(SortParts(MakeLogFiles(
+ tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2", 3, 3, true)));
+ relogged_reader.Register();
+
+ relogged_reader.event_loop_factory()->Run();
+ }
+ // And confirm that we can read the logged file using the reader's
+ // configuration.
+ {
+ LogReader relogged_reader(
+ SortParts(MakeLogFiles(tmp_dir_ + "/relogged1", tmp_dir_ + "/relogged2",
+ 3, 3, true)),
+ reader.configuration());
+ relogged_reader.Register();
+
+ relogged_reader.event_loop_factory()->Run();
+ }
+}
+
+// Tests that we properly populate and extract the logger_start time by setting
+// up a clock difference between 2 nodes and looking at the resulting parts.
+TEST_P(MultinodeLoggerTest, LoggerStartTime) {
+ std::vector<std::string> actual_filenames;
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ ASSERT_THAT(actual_filenames,
+ ::testing::UnorderedElementsAreArray(logfiles_));
+
+ for (const LogFile &log_file : SortParts(logfiles_)) {
+ for (const LogParts &log_part : log_file.parts) {
+ if (log_part.node == log_file.logger_node) {
+ EXPECT_EQ(log_part.logger_monotonic_start_time,
+ aos::monotonic_clock::min_time);
+ EXPECT_EQ(log_part.logger_realtime_start_time,
+ aos::realtime_clock::min_time);
+ } else {
+ const chrono::seconds offset = log_file.logger_node == "pi1"
+ ? -chrono::seconds(1000)
+ : chrono::seconds(1000);
+ EXPECT_EQ(log_part.logger_monotonic_start_time,
+ log_part.monotonic_start_time + offset);
+ EXPECT_EQ(log_part.logger_realtime_start_time,
+ log_file.realtime_start_time +
+ (log_part.logger_monotonic_start_time -
+ log_file.monotonic_start_time));
+ }
+ }
+ }
+}
+
+// Test that renaming the base, renames the folder.
+TEST_P(MultinodeLoggerTest, LoggerRenameFolder) {
+ util::UnlinkRecursive(tmp_dir_ + "/renamefolder");
+ util::UnlinkRecursive(tmp_dir_ + "/new-good");
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+ logfile_base1_ = tmp_dir_ + "/renamefolder/multi_logfile1";
+ logfile_base2_ = tmp_dir_ + "/renamefolder/multi_logfile2";
+ logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ logfile_base1_ = tmp_dir_ + "/new-good/multi_logfile1";
+ logfile_base2_ = tmp_dir_ + "/new-good/multi_logfile2";
+ logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
+ ASSERT_TRUE(pi1_logger.logger->RenameLogBase(logfile_base1_));
+ ASSERT_TRUE(pi2_logger.logger->RenameLogBase(logfile_base2_));
+ for (auto &file : logfiles_) {
+ struct stat s;
+ EXPECT_EQ(0, stat(file.c_str(), &s));
+ }
+}
+
+// Test that renaming the file base dies.
+TEST_P(MultinodeLoggerDeathTest, LoggerRenameFile) {
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+ util::UnlinkRecursive(tmp_dir_ + "/renamefile");
+ logfile_base1_ = tmp_dir_ + "/renamefile/multi_logfile1";
+ logfile_base2_ = tmp_dir_ + "/renamefile/multi_logfile2";
+ logfiles_ = MakeLogFiles(logfile_base1_, logfile_base2_);
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ StartLogger(&pi1_logger);
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ logfile_base1_ = tmp_dir_ + "/new-renamefile/new_multi_logfile1";
+ EXPECT_DEATH({ pi1_logger.logger->RenameLogBase(logfile_base1_); },
+ "Rename of file base from");
+}
+
+// TODO(austin): We can write a test which recreates a logfile and confirms that
+// we get it back. That is the ultimate test.
+
+// Tests that we properly recreate forwarded timestamps when replaying a log.
+// This should be enough that we can then re-run the logger and get a valid log
+// back.
+TEST_P(MultinodeLoggerTest, RemoteReboot) {
+ std::vector<std::string> actual_filenames;
+
+ const UUID pi1_boot0 = UUID::Random();
+ const UUID pi2_boot0 = UUID::Random();
+ const UUID pi2_boot1 = UUID::Random();
+ {
+ CHECK_EQ(pi1_index_, 0u);
+ CHECK_EQ(pi2_index_, 1u);
+
+ time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
+ time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
+ time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
+
+ time_converter_.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
+ time_converter_.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp::epoch() + reboot_time,
+ BootTimestamp{
+ .boot = 1,
+ .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
+ }
+
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+ pi1_boot0);
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+ pi2_boot0);
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+
+ VLOG(1) << "Reboot now!";
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+ pi1_boot0);
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+ pi2_boot1);
+
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ std::sort(actual_filenames.begin(), actual_filenames.end());
+ std::sort(pi1_reboot_logfiles_.begin(), pi1_reboot_logfiles_.end());
+ ASSERT_THAT(actual_filenames,
+ ::testing::UnorderedElementsAreArray(pi1_reboot_logfiles_));
+
+ // Confirm that our new oldest timestamps properly update as we reboot and
+ // rotate.
+ for (const std::string &file : pi1_reboot_logfiles_) {
+ std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
+ ReadHeader(file);
+ CHECK(log_header);
+ if (log_header->message().has_configuration()) {
+ continue;
+ }
+
+ const monotonic_clock::time_point monotonic_start_time =
+ monotonic_clock::time_point(
+ chrono::nanoseconds(log_header->message().monotonic_start_time()));
+ const UUID source_node_boot_uuid = UUID::FromString(
+ log_header->message().source_node_boot_uuid()->string_view());
+
+ if (log_header->message().node()->name()->string_view() != "pi1") {
+ // The remote message channel should rotate later and have more parts.
+ // This only is true on the log files with shared remote messages.
+ //
+ // TODO(austin): I'm not the most thrilled with this test pattern... It
+ // feels brittle in a different way.
+ if (file.find("aos.message_bridge.RemoteMessage") == std::string::npos ||
+ !shared()) {
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ case 1:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ ASSERT_EQ(monotonic_start_time,
+ monotonic_clock::epoch() + chrono::seconds(1));
+ break;
+ case 2:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
+ break;
+ case 3:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
+ chrono::nanoseconds(2322999462))
+ << " on " << file;
+ break;
+ default:
+ FAIL();
+ break;
+ }
+ } else {
+ switch (log_header->message().parts_index()) {
+ case 0:
+ case 1:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ case 2:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ ASSERT_EQ(monotonic_start_time,
+ monotonic_clock::epoch() + chrono::seconds(1));
+ break;
+ case 3:
+ case 4:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time) << file;
+ break;
+ case 5:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ ASSERT_EQ(monotonic_start_time, monotonic_clock::epoch() +
+ chrono::nanoseconds(2322999462))
+ << " on " << file;
+ break;
+ default:
+ FAIL();
+ break;
+ }
+ }
+ continue;
+ }
+ SCOPED_TRACE(file);
+ SCOPED_TRACE(aos::FlatbufferToJson(
+ *log_header, {.multi_line = true, .max_vector_size = 100}));
+ ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
+ ASSERT_EQ(
+ log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
+ EXPECT_EQ(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
+ ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
+ 2u);
+ EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_remote_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ EXPECT_EQ(log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_local_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ EXPECT_EQ(log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+
+ const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(
+ 1)));
+ const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
+ const monotonic_clock::time_point
+ oldest_remote_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_local_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_remote_reliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_remote_reliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_local_reliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_local_reliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_logger_remote_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_logger_remote_unreliable_monotonic_timestamps()
+ ->Get(0)));
+ const monotonic_clock::time_point
+ oldest_logger_local_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_logger_local_unreliable_monotonic_timestamps()
+ ->Get(0)));
+ EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ break;
+ case 1:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90200)));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90350)));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90200)));
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90350)));
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ break;
+ case 2:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90200)))
+ << file;
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90350)))
+ << file;
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90200)))
+ << file;
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90350)))
+ << file;
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(100000)))
+ << file;
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(100150)))
+ << file;
+ break;
+ case 3:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::milliseconds(1323) +
+ chrono::microseconds(200)));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100350)));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::milliseconds(1323) +
+ chrono::microseconds(200)));
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100350)));
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::max_time)
+ << file;
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::max_time)
+ << file;
+ break;
+ case 4:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::milliseconds(1323) +
+ chrono::microseconds(200)));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100350)));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::milliseconds(1323) +
+ chrono::microseconds(200)));
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100350)));
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(1423000)))
+ << file;
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10200150)))
+ << file;
+ break;
+ default:
+ FAIL();
+ break;
+ }
+ }
+
+ // Confirm that we refuse to replay logs with missing boot uuids.
+ {
+ LogReader reader(SortParts(pi1_reboot_logfiles_));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of
+ // the log file.
+ reader.Register(&log_reader_factory);
+
+ log_reader_factory.Run();
+
+ reader.Deregister();
+ }
+}
+
+// Tests that we can sort a log which only has timestamps from the remote
+// because the local message_bridge_client failed to connect.
+TEST_P(MultinodeLoggerTest, RemoteRebootOnlyTimestamps) {
+ const UUID pi1_boot0 = UUID::Random();
+ const UUID pi2_boot0 = UUID::Random();
+ const UUID pi2_boot1 = UUID::Random();
+ {
+ CHECK_EQ(pi1_index_, 0u);
+ CHECK_EQ(pi2_index_, 1u);
+
+ time_converter_.set_boot_uuid(pi1_index_, 0, pi1_boot0);
+ time_converter_.set_boot_uuid(pi2_index_, 0, pi2_boot0);
+ time_converter_.set_boot_uuid(pi2_index_, 1, pi2_boot1);
+
+ time_converter_.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(10100);
+ time_converter_.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp::epoch() + reboot_time,
+ BootTimestamp{
+ .boot = 1,
+ .time = monotonic_clock::epoch() + chrono::milliseconds(1323)}});
+ }
+ pi2_->Disconnect(pi1_->node());
+
+ std::vector<std::string> filenames;
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+ pi1_boot0);
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+ pi2_boot0);
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+
+ VLOG(1) << "Reboot now!";
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi1")->boot_uuid(),
+ pi1_boot0);
+ EXPECT_EQ(event_loop_factory_.GetNodeEventLoopFactory("pi2")->boot_uuid(),
+ pi2_boot1);
+ pi1_logger.AppendAllFilenames(&filenames);
+ }
+
+ std::sort(filenames.begin(), filenames.end());
+
+ // Confirm that our new oldest timestamps properly update as we reboot and
+ // rotate.
+ size_t timestamp_file_count = 0;
+ for (const std::string &file : filenames) {
+ std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
+ ReadHeader(file);
+ CHECK(log_header);
+
+ if (log_header->message().has_configuration()) {
+ continue;
+ }
+
+ const monotonic_clock::time_point monotonic_start_time =
+ monotonic_clock::time_point(
+ chrono::nanoseconds(log_header->message().monotonic_start_time()));
+ const UUID source_node_boot_uuid = UUID::FromString(
+ log_header->message().source_node_boot_uuid()->string_view());
+
+ ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
+ ASSERT_EQ(
+ log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
+ ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
+ ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
+ 2u);
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_remote_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_local_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_remote_reliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_remote_reliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ ASSERT_TRUE(
+ log_header->message().has_oldest_local_reliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_local_reliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+
+ ASSERT_TRUE(
+ log_header->message()
+ .has_oldest_logger_remote_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_logger_remote_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_logger_local_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_logger_local_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+
+ if (log_header->message().node()->name()->string_view() != "pi1") {
+ ASSERT_TRUE(file.find("aos.message_bridge.RemoteMessage") !=
+ std::string::npos);
+
+ const std::optional<SizePrefixedFlatbufferVector<MessageHeader>> msg =
+ ReadNthMessage(file, 0);
+ CHECK(msg);
+
+ EXPECT_TRUE(msg->message().has_monotonic_sent_time());
+ EXPECT_TRUE(msg->message().has_monotonic_remote_time());
+
+ const monotonic_clock::time_point
+ expected_oldest_local_monotonic_timestamps(
+ chrono::nanoseconds(msg->message().monotonic_sent_time()));
+ const monotonic_clock::time_point
+ expected_oldest_remote_monotonic_timestamps(
+ chrono::nanoseconds(msg->message().monotonic_remote_time()));
+ const monotonic_clock::time_point
+ expected_oldest_timestamp_monotonic_timestamps(
+ chrono::nanoseconds(msg->message().monotonic_timestamp_time()));
+
+ EXPECT_NE(expected_oldest_local_monotonic_timestamps,
+ monotonic_clock::min_time);
+ EXPECT_NE(expected_oldest_remote_monotonic_timestamps,
+ monotonic_clock::min_time);
+ EXPECT_NE(expected_oldest_timestamp_monotonic_timestamps,
+ monotonic_clock::min_time);
+
+ ++timestamp_file_count;
+ // Since the log file is from the perspective of the other node,
+ const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(
+ 0)));
+ const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_local_monotonic_timestamps()->Get(
+ 0)));
+ const monotonic_clock::time_point
+ oldest_remote_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(0)));
+ const monotonic_clock::time_point
+ oldest_local_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(0)));
+ const monotonic_clock::time_point
+ oldest_remote_reliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_remote_reliable_monotonic_timestamps()
+ ->Get(0)));
+ const monotonic_clock::time_point
+ oldest_local_reliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_local_reliable_monotonic_timestamps()
+ ->Get(0)));
+ const monotonic_clock::time_point
+ oldest_logger_remote_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_logger_remote_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_logger_local_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_logger_local_unreliable_monotonic_timestamps()
+ ->Get(1)));
+
+ const Channel *channel =
+ event_loop_factory_.configuration()->channels()->Get(
+ msg->message().channel_index());
+ const Connection *connection = configuration::ConnectionToNode(
+ channel, configuration::GetNode(
+ event_loop_factory_.configuration(),
+ log_header->message().node()->name()->string_view()));
+
+ const bool reliable = connection->time_to_live() == 0;
+
+ SCOPED_TRACE(file);
+ SCOPED_TRACE(aos::FlatbufferToJson(
+ *log_header, {.multi_line = true, .max_vector_size = 100}));
+
+ if (shared()) {
+ // Confirm that the oldest timestamps match what we expect. Based on
+ // what we are doing, we know that the oldest time is the first
+ // message's time.
+ //
+ // This makes the test robust to both the split and combined config
+ // tests.
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps)
+ << file;
+ EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+ expected_oldest_timestamp_monotonic_timestamps)
+ << file;
+
+ if (reliable) {
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ } else {
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ }
+ break;
+ case 1:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90000000));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90150000));
+ EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90150000));
+ EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90250000));
+ if (reliable) {
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(
+ oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90000000));
+ EXPECT_EQ(
+ oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90150000));
+ } else {
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ }
+ break;
+ case 2:
+ EXPECT_EQ(
+ oldest_remote_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
+ EXPECT_EQ(
+ oldest_local_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+ EXPECT_EQ(oldest_logger_remote_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps)
+ << file;
+ EXPECT_EQ(oldest_logger_local_unreliable_monotonic_timestamps,
+ expected_oldest_timestamp_monotonic_timestamps)
+ << file;
+ if (reliable) {
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ } else {
+ EXPECT_EQ(oldest_remote_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_reliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ }
+ break;
+
+ case 3:
+ EXPECT_EQ(
+ oldest_remote_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(10000000000));
+ EXPECT_EQ(
+ oldest_local_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(
+ oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+ EXPECT_EQ(
+ oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
+ break;
+ default:
+ FAIL();
+ break;
+ }
+
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ case 1:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ case 2:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ case 3:
+ if (shared()) {
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ }
+ [[fallthrough]];
+ default:
+ FAIL();
+ break;
+ }
+ } else {
+ switch (log_header->message().parts_index()) {
+ case 0:
+ if (reliable) {
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(
+ oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(100150000))
+ << file;
+ EXPECT_EQ(
+ oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(100250000))
+ << file;
+ } else {
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(
+ oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90150000))
+ << file;
+ EXPECT_EQ(
+ oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(90250000))
+ << file;
+ }
+ break;
+ case 1:
+ if (reliable) {
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(
+ oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(1323100000));
+ EXPECT_EQ(
+ oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(10100200000));
+ } else {
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ expected_oldest_remote_monotonic_timestamps);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ expected_oldest_local_monotonic_timestamps);
+ EXPECT_EQ(
+ oldest_logger_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(1323150000));
+ EXPECT_EQ(
+ oldest_logger_local_unreliable_monotonic_timestamps,
+ monotonic_clock::epoch() + chrono::nanoseconds(10100250000));
+ }
+ break;
+ default:
+ FAIL();
+ break;
+ }
+
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot0);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ case 1:
+ EXPECT_EQ(source_node_boot_uuid, pi2_boot1);
+ EXPECT_EQ(monotonic_start_time, monotonic_clock::min_time);
+ break;
+ default:
+ FAIL();
+ break;
+ }
+ }
+
+ continue;
+ }
+ EXPECT_EQ(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ EXPECT_EQ(log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ EXPECT_EQ(log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+
+ const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(
+ 1)));
+ const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
+ const monotonic_clock::time_point
+ oldest_remote_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_local_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ break;
+ default:
+ FAIL();
+ break;
+ }
+ }
+
+ if (shared()) {
+ EXPECT_EQ(timestamp_file_count, 4u);
+ } else {
+ EXPECT_EQ(timestamp_file_count, 4u);
+ }
+
+ // Confirm that we can actually sort the resulting log and read it.
+ {
+ LogReader reader(SortParts(filenames));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of
+ // the log file.
+ reader.Register(&log_reader_factory);
+
+ log_reader_factory.Run();
+
+ reader.Deregister();
+ }
+}
+
+// Tests that we properly handle one direction of message_bridge being
+// unavailable.
+TEST_P(MultinodeLoggerTest, OneDirectionWithNegativeSlope) {
+ pi1_->Disconnect(pi2_->node());
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+
+ time_converter_.AddMonotonic(
+ {chrono::milliseconds(10000),
+ chrono::milliseconds(10000) - chrono::milliseconds(1)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ }
+
+ // Confirm that we can parse the result. LogReader has enough internal CHECKs
+ // to confirm the right thing happened.
+ ConfirmReadable(pi1_single_direction_logfiles_);
+}
+
+// Tests that we properly handle one direction of message_bridge being
+// unavailable.
+TEST_P(MultinodeLoggerTest, OneDirectionWithPositiveSlope) {
+ pi1_->Disconnect(pi2_->node());
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(500)});
+
+ time_converter_.AddMonotonic(
+ {chrono::milliseconds(10000),
+ chrono::milliseconds(10000) + chrono::milliseconds(1)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ }
+
+ // Confirm that we can parse the result. LogReader has enough internal CHECKs
+ // to confirm the right thing happened.
+ ConfirmReadable(pi1_single_direction_logfiles_);
+}
+
+// Tests that we explode if someone passes in a part file twice with a better
+// error than an out of order error.
+TEST_P(MultinodeLoggerTest, DuplicateLogFiles) {
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ }
+
+ std::vector<std::string> duplicates;
+ for (const std::string &f : pi1_single_direction_logfiles_) {
+ duplicates.emplace_back(f);
+ duplicates.emplace_back(f);
+ }
+ EXPECT_DEATH({ SortParts(duplicates); }, "Found duplicate parts in");
+}
+
+// Tests that we explode if someone loses a part out of the middle of a log.
+TEST_P(MultinodeLoggerTest, MissingPartsFromMiddle) {
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ aos::monotonic_clock::time_point last_rotation_time =
+ pi1_logger.event_loop->monotonic_now();
+ pi1_logger.logger->set_on_logged_period([&] {
+ const auto now = pi1_logger.event_loop->monotonic_now();
+ if (now > last_rotation_time + std::chrono::seconds(5)) {
+ pi1_logger.logger->Rotate();
+ last_rotation_time = now;
+ }
+ });
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ }
+
+ std::vector<std::string> missing_parts;
+
+ missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+ missing_parts.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
+ missing_parts.emplace_back(absl::StrCat(
+ logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+
+ EXPECT_DEATH({ SortParts(missing_parts); },
+ "Broken log, missing part files between");
+}
+
+// Tests that we properly handle a dead node. Do this by just disconnecting it
+// and only using one nodes of logs.
+TEST_P(MultinodeLoggerTest, DeadNode) {
+ pi1_->Disconnect(pi2_->node());
+ pi2_->Disconnect(pi1_->node());
+ time_converter_.AddMonotonic(
+ {BootTimestamp::epoch(), BootTimestamp::epoch() + chrono::seconds(1000)});
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(10000));
+ }
+
+ // Confirm that we can parse the result. LogReader has enough internal CHECKs
+ // to confirm the right thing happened.
+ ConfirmReadable(MakePi1DeadNodeLogfiles());
+}
+
+// Tests that we can relog with a different config. This makes most sense when
+// you are trying to edit a log and want to use channel renaming + the original
+// config in the new log.
+TEST_P(MultinodeLoggerTest, LogDifferentConfig) {
+ time_converter_.StartEqual();
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ LogReader reader(SortParts(logfiles_));
+ reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+ LOG(INFO) << "now pi1 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+ LOG(INFO) << "now pi2 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+ // And confirm we can re-create a log again, while checking the contents.
+ std::vector<std::string> log_files;
+ {
+ LoggerState pi1_logger =
+ MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi1"),
+ &log_reader_factory, reader.logged_configuration());
+ LoggerState pi2_logger =
+ MakeLogger(log_reader_factory.GetNodeEventLoopFactory("pi2"),
+ &log_reader_factory, reader.logged_configuration());
+
+ pi1_logger.StartLogger(tmp_dir_ + "/relogged1");
+ pi2_logger.StartLogger(tmp_dir_ + "/relogged2");
+
+ log_reader_factory.Run();
+
+ for (auto &x : pi1_logger.log_namer->all_filenames()) {
+ log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged1_", x));
+ }
+ for (auto &x : pi2_logger.log_namer->all_filenames()) {
+ log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged2_", x));
+ }
+ }
+
+ reader.Deregister();
+
+ // And verify that we can run the LogReader over the relogged files without
+ // hitting any fatal errors.
+ {
+ LogReader relogged_reader(SortParts(log_files));
+ relogged_reader.Register();
+
+ relogged_reader.event_loop_factory()->Run();
+ }
+}
+
+// Tests that we properly replay a log where the start time for a node is before
+// any data on the node. This can happen if the logger starts before data is
+// published. While the scenario below is a bit convoluted, we have seen logs
+// like this generated out in the wild.
+TEST(MultinodeRebootLoggerTest, StartTimeBeforeData) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_split3_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ const size_t pi1_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi1->node());
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ const size_t pi2_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi2->node());
+ NodeEventLoopFactory *const pi3 =
+ event_loop_factory.GetNodeEventLoopFactory("pi3");
+ const size_t pi3_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi3->node());
+
+ const std::string kLogfile1_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile1/";
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ const std::string kLogfile2_2 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.2/";
+ const std::string kLogfile3_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile3/";
+ util::UnlinkRecursive(kLogfile1_1);
+ util::UnlinkRecursive(kLogfile2_1);
+ util::UnlinkRecursive(kLogfile2_2);
+ util::UnlinkRecursive(kLogfile3_1);
+ const UUID pi1_boot0 = UUID::Random();
+ const UUID pi2_boot0 = UUID::Random();
+ const UUID pi2_boot1 = UUID::Random();
+ const UUID pi3_boot0 = UUID::Random();
+ {
+ CHECK_EQ(pi1_index, 0u);
+ CHECK_EQ(pi2_index, 1u);
+ CHECK_EQ(pi3_index, 2u);
+
+ time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
+ time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
+ time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
+ time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
+
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch(),
+ BootTimestamp::epoch()});
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(20000);
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp::epoch() + reboot_time,
+ BootTimestamp{
+ .boot = 1,
+ .time = monotonic_clock::epoch() + chrono::milliseconds(1323)},
+ BootTimestamp::epoch() + reboot_time});
+ }
+
+ // Make everything perfectly quiet.
+ event_loop_factory.SkipTimingReport();
+ event_loop_factory.DisableStatistics();
+
+ std::vector<std::string> filenames;
+ {
+ LoggerState pi1_logger = MakeLoggerState(
+ pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ LoggerState pi3_logger = MakeLoggerState(
+ pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ {
+ // And now start the logger.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+ pi1_logger.StartLogger(kLogfile1_1);
+ pi3_logger.StartLogger(kLogfile3_1);
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(10000));
+
+ // Now that we've got a start time in the past, turn on data.
+ event_loop_factory.EnableStatistics();
+ std::unique_ptr<aos::EventLoop> ping_event_loop =
+ pi1->MakeEventLoop("ping");
+ Ping ping(ping_event_loop.get());
+
+ pi2->AlwaysStart<Pong>("pong");
+
+ event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+ pi2_logger.AppendAllFilenames(&filenames);
+
+ // Stop logging on pi2 before rebooting and completely shut off all
+ // messages on pi2.
+ pi2->DisableStatistics();
+ pi1->Disconnect(pi2->node());
+ pi2->Disconnect(pi1->node());
+ }
+ event_loop_factory.RunFor(chrono::milliseconds(7000));
+ // pi2 now reboots.
+ {
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+ // Start logging again on pi2 after it is up.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi2_logger.StartLogger(kLogfile2_2);
+
+ event_loop_factory.RunFor(chrono::milliseconds(10000));
+ // And, now that we have a start time in the log, turn data back on.
+ pi2->EnableStatistics();
+ pi1->Connect(pi2->node());
+ pi2->Connect(pi1->node());
+
+ pi2->AlwaysStart<Pong>("pong");
+ std::unique_ptr<aos::EventLoop> ping_event_loop =
+ pi1->MakeEventLoop("ping");
+ Ping ping(ping_event_loop.get());
+
+ event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ pi1_logger.AppendAllFilenames(&filenames);
+ pi3_logger.AppendAllFilenames(&filenames);
+ }
+
+ // Confirm that we can parse the result. LogReader has enough internal CHECKs
+ // to confirm the right thing happened.
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ auto result = ConfirmReadable(filenames);
+ EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::seconds(1)));
+ EXPECT_THAT(result[0].second,
+ ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::microseconds(34990350)));
+
+ EXPECT_THAT(result[1].first,
+ ::testing::ElementsAre(
+ realtime_clock::epoch() + chrono::seconds(1),
+ realtime_clock::epoch() + chrono::microseconds(3323000)));
+ EXPECT_THAT(result[1].second,
+ ::testing::ElementsAre(
+ realtime_clock::epoch() + chrono::microseconds(13990200),
+ realtime_clock::epoch() + chrono::microseconds(16313200)));
+
+ EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::seconds(1)));
+ EXPECT_THAT(result[2].second,
+ ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::microseconds(34900150)));
+}
+
+// Tests that local data before remote data after reboot is properly replayed.
+// We only trigger a reboot in the timestamp interpolation function when solving
+// the timestamp problem when we actually have a point in the function. This
+// originally only happened when a point passes the noncausal filter. At the
+// start of time for the second boot, if we aren't careful, we will have
+// messages which need to be published at times before the boot. This happens
+// when a local message is in the log before a forwarded message, so there is no
+// point in the interpolation function. This delays the reboot. So, we need to
+// recreate that situation and make sure it doesn't come back.
+TEST(MultinodeRebootLoggerTest,
+ LocalMessageBeforeRemoteBeforeStartAfterReboot) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_split3_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ const size_t pi1_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi1->node());
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ const size_t pi2_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi2->node());
+ NodeEventLoopFactory *const pi3 =
+ event_loop_factory.GetNodeEventLoopFactory("pi3");
+ const size_t pi3_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi3->node());
+
+ const std::string kLogfile1_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile1/";
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ const std::string kLogfile2_2 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.2/";
+ const std::string kLogfile3_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile3/";
+ util::UnlinkRecursive(kLogfile1_1);
+ util::UnlinkRecursive(kLogfile2_1);
+ util::UnlinkRecursive(kLogfile2_2);
+ util::UnlinkRecursive(kLogfile3_1);
+ const UUID pi1_boot0 = UUID::Random();
+ const UUID pi2_boot0 = UUID::Random();
+ const UUID pi2_boot1 = UUID::Random();
+ const UUID pi3_boot0 = UUID::Random();
+ {
+ CHECK_EQ(pi1_index, 0u);
+ CHECK_EQ(pi2_index, 1u);
+ CHECK_EQ(pi3_index, 2u);
+
+ time_converter.set_boot_uuid(pi1_index, 0, pi1_boot0);
+ time_converter.set_boot_uuid(pi2_index, 0, pi2_boot0);
+ time_converter.set_boot_uuid(pi2_index, 1, pi2_boot1);
+ time_converter.set_boot_uuid(pi3_index, 0, pi3_boot0);
+
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch(),
+ BootTimestamp::epoch()});
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp::epoch() + reboot_time,
+ BootTimestamp{.boot = 1,
+ .time = monotonic_clock::epoch() + reboot_time +
+ chrono::seconds(100)},
+ BootTimestamp::epoch() + reboot_time});
+ }
+
+ std::vector<std::string> filenames;
+ {
+ LoggerState pi1_logger = MakeLoggerState(
+ pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ LoggerState pi3_logger = MakeLoggerState(
+ pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ {
+ // And now start the logger.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+ pi1_logger.StartLogger(kLogfile1_1);
+ pi3_logger.StartLogger(kLogfile3_1);
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(1005));
+
+ // Now that we've got a start time in the past, turn on data.
+ std::unique_ptr<aos::EventLoop> ping_event_loop =
+ pi1->MakeEventLoop("ping");
+ Ping ping(ping_event_loop.get());
+
+ pi2->AlwaysStart<Pong>("pong");
+
+ event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+ pi2_logger.AppendAllFilenames(&filenames);
+
+ // Disable any remote messages on pi2.
+ pi1->Disconnect(pi2->node());
+ pi2->Disconnect(pi1->node());
+ }
+ event_loop_factory.RunFor(chrono::milliseconds(995));
+ // pi2 now reboots at 5 seconds.
+ {
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+ // Make local stuff happen before we start logging and connect the remote.
+ pi2->AlwaysStart<Pong>("pong");
+ std::unique_ptr<aos::EventLoop> ping_event_loop =
+ pi1->MakeEventLoop("ping");
+ Ping ping(ping_event_loop.get());
+ event_loop_factory.RunFor(chrono::milliseconds(1005));
+
+ // Start logging again on pi2 after it is up.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi2_logger.StartLogger(kLogfile2_2);
+
+ // And allow remote messages now that we have some local ones.
+ pi1->Connect(pi2->node());
+ pi2->Connect(pi1->node());
+
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+ event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ pi1_logger.AppendAllFilenames(&filenames);
+ pi3_logger.AppendAllFilenames(&filenames);
+ }
+
+ // Confirm that we can parse the result. LogReader has enough internal CHECKs
+ // to confirm the right thing happened.
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ auto result = ConfirmReadable(filenames);
+
+ EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
+ EXPECT_THAT(result[0].second,
+ ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::microseconds(11000350)));
+
+ EXPECT_THAT(result[1].first,
+ ::testing::ElementsAre(
+ realtime_clock::epoch(),
+ realtime_clock::epoch() + chrono::microseconds(107005000)));
+ EXPECT_THAT(result[1].second,
+ ::testing::ElementsAre(
+ realtime_clock::epoch() + chrono::microseconds(4000150),
+ realtime_clock::epoch() + chrono::microseconds(111000200)));
+
+ EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
+ EXPECT_THAT(result[2].second,
+ ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::microseconds(11000150)));
+
+ auto start_stop_result = ConfirmReadable(
+ filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
+ realtime_clock::epoch() + chrono::milliseconds(3000));
+
+ EXPECT_THAT(
+ start_stop_result[0].first,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+ EXPECT_THAT(
+ start_stop_result[0].second,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
+ EXPECT_THAT(
+ start_stop_result[1].first,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+ EXPECT_THAT(
+ start_stop_result[1].second,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
+ EXPECT_THAT(
+ start_stop_result[2].first,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+ EXPECT_THAT(
+ start_stop_result[2].second,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(3)));
+}
+
+// Tests that setting the start and stop flags across a reboot works as
+// expected.
+TEST(MultinodeRebootLoggerTest, RebootStartStopTimes) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_split3_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ const size_t pi1_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi1->node());
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ const size_t pi2_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi2->node());
+ NodeEventLoopFactory *const pi3 =
+ event_loop_factory.GetNodeEventLoopFactory("pi3");
+ const size_t pi3_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi3->node());
+
+ const std::string kLogfile1_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile1/";
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ const std::string kLogfile2_2 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.2/";
+ const std::string kLogfile3_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile3/";
+ util::UnlinkRecursive(kLogfile1_1);
+ util::UnlinkRecursive(kLogfile2_1);
+ util::UnlinkRecursive(kLogfile2_2);
+ util::UnlinkRecursive(kLogfile3_1);
+ {
+ CHECK_EQ(pi1_index, 0u);
+ CHECK_EQ(pi2_index, 1u);
+ CHECK_EQ(pi3_index, 2u);
+
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch(),
+ BootTimestamp::epoch()});
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp::epoch() + reboot_time,
+ BootTimestamp{.boot = 1,
+ .time = monotonic_clock::epoch() + reboot_time},
+ BootTimestamp::epoch() + reboot_time});
+ }
+
+ std::vector<std::string> filenames;
+ {
+ LoggerState pi1_logger = MakeLoggerState(
+ pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ LoggerState pi3_logger = MakeLoggerState(
+ pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ {
+ // And now start the logger.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+ pi1_logger.StartLogger(kLogfile1_1);
+ pi3_logger.StartLogger(kLogfile3_1);
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(1005));
+
+ // Now that we've got a start time in the past, turn on data.
+ std::unique_ptr<aos::EventLoop> ping_event_loop =
+ pi1->MakeEventLoop("ping");
+ Ping ping(ping_event_loop.get());
+
+ pi2->AlwaysStart<Pong>("pong");
+
+ event_loop_factory.RunFor(chrono::milliseconds(3000));
+
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+ event_loop_factory.RunFor(chrono::milliseconds(995));
+ // pi2 now reboots at 5 seconds.
+ {
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+
+ // Make local stuff happen before we start logging and connect the remote.
+ pi2->AlwaysStart<Pong>("pong");
+ std::unique_ptr<aos::EventLoop> ping_event_loop =
+ pi1->MakeEventLoop("ping");
+ Ping ping(ping_event_loop.get());
+ event_loop_factory.RunFor(chrono::milliseconds(5));
+
+ // Start logging again on pi2 after it is up.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi2_logger.StartLogger(kLogfile2_2);
+
+ event_loop_factory.RunFor(chrono::milliseconds(5000));
+
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ pi1_logger.AppendAllFilenames(&filenames);
+ pi3_logger.AppendAllFilenames(&filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ auto result = ConfirmReadable(filenames);
+
+ EXPECT_THAT(result[0].first, ::testing::ElementsAre(realtime_clock::epoch()));
+ EXPECT_THAT(result[0].second,
+ ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::microseconds(11000350)));
+
+ EXPECT_THAT(result[1].first,
+ ::testing::ElementsAre(
+ realtime_clock::epoch(),
+ realtime_clock::epoch() + chrono::microseconds(6005000)));
+ EXPECT_THAT(result[1].second,
+ ::testing::ElementsAre(
+ realtime_clock::epoch() + chrono::microseconds(4900150),
+ realtime_clock::epoch() + chrono::microseconds(11000200)));
+
+ EXPECT_THAT(result[2].first, ::testing::ElementsAre(realtime_clock::epoch()));
+ EXPECT_THAT(result[2].second,
+ ::testing::ElementsAre(realtime_clock::epoch() +
+ chrono::microseconds(11000150)));
+
+ // Confirm we observed the correct start and stop times. We should see the
+ // reboot here.
+ auto start_stop_result = ConfirmReadable(
+ filenames, realtime_clock::epoch() + chrono::milliseconds(2000),
+ realtime_clock::epoch() + chrono::milliseconds(8000));
+
+ EXPECT_THAT(
+ start_stop_result[0].first,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+ EXPECT_THAT(
+ start_stop_result[0].second,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
+ EXPECT_THAT(start_stop_result[1].first,
+ ::testing::ElementsAre(
+ realtime_clock::epoch() + chrono::seconds(2),
+ realtime_clock::epoch() + chrono::microseconds(6005000)));
+ EXPECT_THAT(start_stop_result[1].second,
+ ::testing::ElementsAre(
+ realtime_clock::epoch() + chrono::microseconds(4900150),
+ realtime_clock::epoch() + chrono::seconds(8)));
+ EXPECT_THAT(
+ start_stop_result[2].first,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(2)));
+ EXPECT_THAT(
+ start_stop_result[2].second,
+ ::testing::ElementsAre(realtime_clock::epoch() + chrono::seconds(8)));
+}
+
+// Tests that we properly handle one direction being down.
+TEST(MissingDirectionTest, OneDirection) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_split4_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ const size_t pi1_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi1->node());
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ const size_t pi2_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi2->node());
+ std::vector<std::string> filenames;
+
+ {
+ CHECK_EQ(pi1_index, 0u);
+ CHECK_EQ(pi2_index, 1u);
+
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
+ BootTimestamp::epoch() + reboot_time});
+ }
+
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ const std::string kLogfile1_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile1.1/";
+ util::UnlinkRecursive(kLogfile2_1);
+ util::UnlinkRecursive(kLogfile1_1);
+
+ pi2->Disconnect(pi1->node());
+
+ pi1->AlwaysStart<Ping>("ping");
+ pi2->AlwaysStart<Pong>("pong");
+
+ {
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+ event_loop_factory.RunFor(chrono::milliseconds(95));
+
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(6000));
+
+ pi2->Connect(pi1->node());
+
+ LoggerState pi1_logger = MakeLoggerState(
+ pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi1_logger.StartLogger(kLogfile1_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(5000));
+ pi1_logger.AppendAllFilenames(&filenames);
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ ConfirmReadable(filenames);
+}
+
+// Tests that we properly handle only one direction ever existing after a
+// reboot.
+TEST(MissingDirectionTest, OneDirectionAfterReboot) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_split4_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ const size_t pi1_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi1->node());
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ const size_t pi2_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi2->node());
+ std::vector<std::string> filenames;
+
+ {
+ CHECK_EQ(pi1_index, 0u);
+ CHECK_EQ(pi2_index, 1u);
+
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
+ BootTimestamp::epoch() + reboot_time});
+ }
+
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ util::UnlinkRecursive(kLogfile2_1);
+
+ pi1->AlwaysStart<Ping>("ping");
+
+ // Pi1 sends to pi2. Reboot pi1, but don't let pi2 connect to pi1. This
+ // makes it such that we will only get timestamps from pi1 -> pi2 on the
+ // second boot.
+ {
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+ event_loop_factory.RunFor(chrono::milliseconds(95));
+
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(4000));
+
+ pi2->Disconnect(pi1->node());
+
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+ pi1->AlwaysStart<Ping>("ping");
+
+ event_loop_factory.RunFor(chrono::milliseconds(5000));
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ ConfirmReadable(filenames);
+}
+
+// Tests that we properly handle only one direction ever existing after a reboot
+// with only reliable data.
+TEST(MissingDirectionTest, OneDirectionAfterRebootReliable) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_split4_reliable_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ const size_t pi1_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi1->node());
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ const size_t pi2_index = configuration::GetNodeIndex(
+ event_loop_factory.configuration(), pi2->node());
+ std::vector<std::string> filenames;
+
+ {
+ CHECK_EQ(pi1_index, 0u);
+ CHECK_EQ(pi2_index, 1u);
+
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+ const chrono::nanoseconds reboot_time = chrono::milliseconds(5000);
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch() + reboot_time,
+ {BootTimestamp{.boot = 1, .time = monotonic_clock::epoch()},
+ BootTimestamp::epoch() + reboot_time});
+ }
+
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ util::UnlinkRecursive(kLogfile2_1);
+
+ pi1->AlwaysStart<Ping>("ping");
+
+ // Pi1 sends to pi2. Reboot pi1, but don't let pi2 connect to pi1. This
+ // makes it such that we will only get timestamps from pi1 -> pi2 on the
+ // second boot.
+ {
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+
+ event_loop_factory.RunFor(chrono::milliseconds(95));
+
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::milliseconds(4000));
+
+ pi2->Disconnect(pi1->node());
+
+ event_loop_factory.RunFor(chrono::milliseconds(1000));
+ pi1->AlwaysStart<Ping>("ping");
+
+ event_loop_factory.RunFor(chrono::milliseconds(5000));
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ ConfirmReadable(filenames);
+}
+
+// Tests that we properly handle what used to be a time violation in one
+// direction. This can occur when one direction goes down after sending some
+// data, but the other keeps working. The down direction ends up resolving to a
+// straight line in the noncausal filter, where the direction which is still up
+// can cross that line. Really, time progressed along just fine but we assumed
+// that the offset was a line when it could have deviated by up to 1ms/second.
+TEST_P(MultinodeLoggerTest, OneDirectionTimeDrift) {
+ std::vector<std::string> filenames;
+
+ CHECK_EQ(pi1_index_, 0u);
+ CHECK_EQ(pi2_index_, 1u);
+
+ time_converter_.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+ const chrono::nanoseconds before_disconnect_duration =
+ time_converter_.AddMonotonic(
+ {chrono::milliseconds(1000), chrono::milliseconds(1000)});
+
+ const chrono::nanoseconds test_duration =
+ time_converter_.AddMonotonic(
+ {chrono::milliseconds(1000), chrono::milliseconds(1000)}) +
+ time_converter_.AddMonotonic(
+ {chrono::milliseconds(10000),
+ chrono::milliseconds(10000) - chrono::milliseconds(5)}) +
+ time_converter_.AddMonotonic(
+ {chrono::milliseconds(10000),
+ chrono::milliseconds(10000) + chrono::milliseconds(5)});
+
+ const std::string kLogfile =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ util::UnlinkRecursive(kLogfile);
+
+ {
+ LoggerState pi2_logger = MakeLogger(pi2_);
+ pi2_logger.StartLogger(kLogfile);
+ event_loop_factory_.RunFor(before_disconnect_duration);
+
+ pi2_->Disconnect(pi1_->node());
+
+ event_loop_factory_.RunFor(test_duration);
+ pi2_->Connect(pi1_->node());
+
+ event_loop_factory_.RunFor(chrono::milliseconds(5000));
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ ConfirmReadable(filenames);
+}
+
+// Tests that we can replay a logfile that has timestamps such that at least one
+// node's epoch is at a positive distributed_clock (and thus will have to be
+// booted after the other node(s)).
+TEST_P(MultinodeLoggerTest, StartOneNodeBeforeOther) {
+ std::vector<std::string> filenames;
+
+ CHECK_EQ(pi1_index_, 0u);
+ CHECK_EQ(pi2_index_, 1u);
+
+ time_converter_.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(), BootTimestamp::epoch()});
+
+ const chrono::nanoseconds before_reboot_duration = chrono::milliseconds(1000);
+ time_converter_.RebootAt(
+ 0, distributed_clock::time_point(before_reboot_duration));
+
+ const chrono::nanoseconds test_duration = time_converter_.AddMonotonic(
+ {chrono::milliseconds(10000), chrono::milliseconds(10000)});
+
+ const std::string kLogfile =
+ aos::testing::TestTmpDir() + "/multi_logfile2.1/";
+ util::UnlinkRecursive(kLogfile);
+
+ pi2_->Disconnect(pi1_->node());
+ pi1_->Disconnect(pi2_->node());
+
+ {
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ pi2_logger.StartLogger(kLogfile);
+ event_loop_factory_.RunFor(before_reboot_duration);
+
+ pi2_->Connect(pi1_->node());
+ pi1_->Connect(pi2_->node());
+
+ event_loop_factory_.RunFor(test_duration);
+
+ pi2_logger.AppendAllFilenames(&filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ ConfirmReadable(filenames);
+
+ {
+ LogReader reader(sorted_parts);
+ SimulatedEventLoopFactory replay_factory(reader.configuration());
+ reader.RegisterWithoutStarting(&replay_factory);
+
+ NodeEventLoopFactory *const replay_node =
+ reader.event_loop_factory()->GetNodeEventLoopFactory("pi1");
+
+ std::unique_ptr<EventLoop> test_event_loop =
+ replay_node->MakeEventLoop("test_reader");
+ replay_node->OnStartup([replay_node]() {
+ // Check that we didn't boot until at least t=0.
+ CHECK_LE(monotonic_clock::epoch(), replay_node->monotonic_now());
+ });
+ test_event_loop->OnRun([&test_event_loop]() {
+ // Check that we didn't boot until at least t=0.
+ EXPECT_LE(monotonic_clock::epoch(), test_event_loop->monotonic_now());
+ });
+ reader.event_loop_factory()->Run();
+ reader.Deregister();
+ }
+}
+
+// Tests that when we have a loop without all the logs at all points in time, we
+// can sort it properly.
+TEST(MultinodeLoggerLoopTest, Loop) {
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(ArtifactPath(
+ "aos/events/logging/multinode_pingpong_triangle_split_config.json"));
+ message_bridge::TestingTimeConverter time_converter(
+ configuration::NodesCount(&config.message()));
+ SimulatedEventLoopFactory event_loop_factory(&config.message());
+ event_loop_factory.SetTimeConverter(&time_converter);
+
+ NodeEventLoopFactory *const pi1 =
+ event_loop_factory.GetNodeEventLoopFactory("pi1");
+ NodeEventLoopFactory *const pi2 =
+ event_loop_factory.GetNodeEventLoopFactory("pi2");
+ NodeEventLoopFactory *const pi3 =
+ event_loop_factory.GetNodeEventLoopFactory("pi3");
+
+ const std::string kLogfile1_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile1/";
+ const std::string kLogfile2_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile2/";
+ const std::string kLogfile3_1 =
+ aos::testing::TestTmpDir() + "/multi_logfile3/";
+ util::UnlinkRecursive(kLogfile1_1);
+ util::UnlinkRecursive(kLogfile2_1);
+ util::UnlinkRecursive(kLogfile3_1);
+
+ {
+ // Make pi1 boot before everything else.
+ time_converter.AddNextTimestamp(
+ distributed_clock::epoch(),
+ {BootTimestamp::epoch(),
+ BootTimestamp::epoch() - chrono::milliseconds(100),
+ BootTimestamp::epoch() - chrono::milliseconds(300)});
+ }
+
+ // We want to setup a situation such that 2 of the 3 legs of the loop are very
+ // confident about time being X, and the third leg is pulling the average off
+ // to one side.
+ //
+ // It's easiest to visualize this in timestamp_plotter.
+
+ std::vector<std::string> filenames;
+ {
+ // Have pi1 send out a reliable message at startup. This sets up a long
+ // forwarding time message at the start to bias time.
+ std::unique_ptr<EventLoop> pi1_event_loop = pi1->MakeEventLoop("ping");
+ {
+ aos::Sender<examples::Ping> ping_sender =
+ pi1_event_loop->MakeSender<examples::Ping>("/reliable");
+
+ aos::Sender<examples::Ping>::Builder builder = ping_sender.MakeBuilder();
+ examples::Ping::Builder ping_builder =
+ builder.MakeBuilder<examples::Ping>();
+ CHECK_EQ(builder.Send(ping_builder.Finish()), RawSender::Error::kOk);
+ }
+
+ // Wait a while so there's enough data to let the worst case be rather off.
+ event_loop_factory.RunFor(chrono::seconds(1000));
+
+ // Now start a receiving node first. This sets up 2 tight bounds between 2
+ // of the nodes.
+ LoggerState pi2_logger = MakeLoggerState(
+ pi2, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi2_logger.StartLogger(kLogfile2_1);
+
+ event_loop_factory.RunFor(chrono::seconds(100));
+
+ // And now start the third leg.
+ LoggerState pi3_logger = MakeLoggerState(
+ pi3, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi3_logger.StartLogger(kLogfile3_1);
+
+ LoggerState pi1_logger = MakeLoggerState(
+ pi1, &event_loop_factory, SupportedCompressionAlgorithms()[0]);
+ pi1_logger.StartLogger(kLogfile1_1);
+
+ event_loop_factory.RunFor(chrono::seconds(100));
+
+ pi1_logger.AppendAllFilenames(&filenames);
+ pi2_logger.AppendAllFilenames(&filenames);
+ pi3_logger.AppendAllFilenames(&filenames);
+ }
+
+ // Make sure we can read this.
+ const std::vector<LogFile> sorted_parts = SortParts(filenames);
+ auto result = ConfirmReadable(filenames);
+}
+
+} // namespace testing
+} // namespace logger
+} // namespace aos
diff --git a/aos/events/logging/multinode_logger_test_lib.cc b/aos/events/logging/multinode_logger_test_lib.cc
new file mode 100644
index 0000000..0f588c6
--- /dev/null
+++ b/aos/events/logging/multinode_logger_test_lib.cc
@@ -0,0 +1,647 @@
+#include "aos/events/logging/multinode_logger_test_lib.h"
+
+#include "aos/events/event_loop.h"
+#include "aos/events/logging/log_reader.h"
+#include "aos/events/logging/logfile_utils.h"
+#include "aos/events/ping_lib.h"
+#include "aos/events/pong_lib.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/testing/tmpdir.h"
+
+namespace aos {
+namespace logger {
+namespace testing {
+
+using aos::testing::ArtifactPath;
+
+LoggerState MakeLoggerState(NodeEventLoopFactory *node,
+ SimulatedEventLoopFactory *factory,
+ CompressionParams params,
+ const Configuration *configuration) {
+ if (configuration == nullptr) {
+ configuration = factory->configuration();
+ }
+ return {node->MakeEventLoop("logger"),
+ {},
+ configuration,
+ configuration::GetNode(configuration, node->node()),
+ nullptr,
+ params};
+}
+
+void LoggerState::StartLogger(std::string logfile_base) {
+ CHECK(!logfile_base.empty());
+
+ logger = std::make_unique<Logger>(event_loop.get(), configuration);
+ logger->set_polling_period(std::chrono::milliseconds(100));
+ logger->set_name(
+ absl::StrCat("name_prefix_", event_loop->node()->name()->str()));
+ logger->set_logger_sha1(
+ absl::StrCat("logger_sha1_", event_loop->node()->name()->str()));
+ logger->set_logger_version(
+ absl::StrCat("logger_version_", event_loop->node()->name()->str()));
+ event_loop->OnRun([this, logfile_base]() {
+ std::unique_ptr<MultiNodeLogNamer> namer =
+ std::make_unique<MultiNodeLogNamer>(logfile_base, configuration,
+ event_loop.get(), node);
+ namer->set_extension(params.extension);
+ namer->set_encoder_factory(params.encoder_factory);
+ log_namer = namer.get();
+
+ logger->StartLogging(std::move(namer));
+ });
+}
+
+void LoggerState::AppendAllFilenames(std::vector<std::string> *filenames) {
+ for (const std::string &file : log_namer->all_filenames()) {
+ const std::string_view separator =
+ log_namer->base_name().back() == '/' ? "" : "_";
+ filenames->emplace_back(
+ absl::StrCat(log_namer->base_name(), separator, file));
+ }
+}
+
+LoggerState::~LoggerState() {
+ if (logger) {
+ std::vector<std::string> filenames;
+ AppendAllFilenames(&filenames);
+ std::sort(filenames.begin(), filenames.end());
+ for (const std::string &file : filenames) {
+ LOG(INFO) << "Wrote to " << file;
+ auto x = ReadHeader(file);
+ if (x) {
+ VLOG(1) << aos::FlatbufferToJson(x.value());
+ }
+ }
+ }
+}
+
+MultinodeLoggerTest::MultinodeLoggerTest()
+ : config_(aos::configuration::ReadConfig(ArtifactPath(absl::StrCat(
+ "aos/events/logging/", std::get<0>(GetParam()).config)))),
+ time_converter_(configuration::NodesCount(&config_.message())),
+ event_loop_factory_(&config_.message()),
+ pi1_(event_loop_factory_.GetNodeEventLoopFactory("pi1")),
+ pi1_index_(configuration::GetNodeIndex(
+ event_loop_factory_.configuration(), pi1_->node())),
+ pi2_(event_loop_factory_.GetNodeEventLoopFactory("pi2")),
+ pi2_index_(configuration::GetNodeIndex(
+ event_loop_factory_.configuration(), pi2_->node())),
+ tmp_dir_(aos::testing::TestTmpDir()),
+ logfile_base1_(tmp_dir_ + "/multi_logfile1"),
+ logfile_base2_(tmp_dir_ + "/multi_logfile2"),
+ pi1_reboot_logfiles_(MakePi1RebootLogfiles()),
+ logfiles_(MakeLogFiles(logfile_base1_, logfile_base2_)),
+ pi1_single_direction_logfiles_(MakePi1SingleDirectionLogfiles()),
+ structured_logfiles_(StructureLogFiles()) {
+ LOG(INFO) << "Config " << std::get<0>(GetParam()).config;
+ event_loop_factory_.SetTimeConverter(&time_converter_);
+
+ // Go through and remove the logfiles if they already exist.
+ for (const auto &file : logfiles_) {
+ unlink(file.c_str());
+ unlink((file + ".xz").c_str());
+ }
+
+ for (const auto &file : MakeLogFiles(tmp_dir_ + "/relogged1",
+ tmp_dir_ + "/relogged2", 3, 3, true)) {
+ unlink(file.c_str());
+ }
+
+ for (const auto &file : pi1_reboot_logfiles_) {
+ unlink(file.c_str());
+ }
+
+ LOG(INFO) << "Logging data to " << logfiles_[0] << ", " << logfiles_[1]
+ << " and " << logfiles_[2];
+
+ pi1_->OnStartup([this]() { pi1_->AlwaysStart<Ping>("ping"); });
+ pi2_->OnStartup([this]() { pi2_->AlwaysStart<Pong>("pong"); });
+}
+
+bool MultinodeLoggerTest::shared() const {
+ return std::get<0>(GetParam()).shared;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakeLogFiles(
+ std::string logfile_base1, std::string logfile_base2, size_t pi1_data_count,
+ size_t pi2_data_count, bool relogged_config) {
+ std::string_view sha256 = relogged_config
+ ? std::get<0>(GetParam()).relogged_sha256
+ : std::get<0>(GetParam()).sha256;
+ std::vector<std::string> result;
+ result.emplace_back(absl::StrCat(logfile_base1, "_", sha256, Extension()));
+ result.emplace_back(absl::StrCat(logfile_base2, "_", sha256, Extension()));
+ for (size_t i = 0; i < pi1_data_count; ++i) {
+ result.emplace_back(
+ absl::StrCat(logfile_base1, "_pi1_data.part", i, Extension()));
+ }
+ result.emplace_back(logfile_base1 + "_pi2_data/test/aos.examples.Pong.part0" +
+ Extension());
+ result.emplace_back(logfile_base1 + "_pi2_data/test/aos.examples.Pong.part1" +
+ Extension());
+ for (size_t i = 0; i < pi2_data_count; ++i) {
+ result.emplace_back(
+ absl::StrCat(logfile_base2, "_pi2_data.part", i, Extension()));
+ }
+ result.emplace_back(logfile_base2 +
+ "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part0" +
+ Extension());
+ result.emplace_back(logfile_base2 +
+ "_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part1" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
+ Extension());
+ if (shared()) {
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/"
+ "aos.message_bridge.RemoteMessage.part2" +
+ Extension());
+ result.emplace_back(logfile_base2 +
+ "_timestamps/pi2/aos/remote_timestamps/pi1/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base2 +
+ "_timestamps/pi2/aos/remote_timestamps/pi1/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ } else {
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ result.emplace_back(logfile_base2 +
+ "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base2 +
+ "_timestamps/pi2/aos/remote_timestamps/pi1/pi2/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+ "aos-examples-Ping/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base1 +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+ "aos-examples-Ping/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ }
+
+ return result;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakePi1RebootLogfiles() {
+ std::vector<std::string> result;
+ result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+ result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
+ result.emplace_back(logfile_base1_ + "_pi1_data.part2" + Extension());
+ result.emplace_back(logfile_base1_ + "_pi1_data.part3" + Extension());
+ result.emplace_back(logfile_base1_ + "_pi1_data.part4" + Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/test/aos.examples.Pong.part0" + Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/test/aos.examples.Pong.part1" + Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/test/aos.examples.Pong.part2" + Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/test/aos.examples.Pong.part3" + Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part2" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part3" +
+ Extension());
+ result.emplace_back(absl::StrCat(
+ logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+ if (shared()) {
+ for (size_t i = 0; i < 6; ++i) {
+ result.emplace_back(
+ absl::StrCat(logfile_base1_,
+ "_timestamps/pi1/aos/remote_timestamps/pi2/"
+ "aos.message_bridge.RemoteMessage.part",
+ i, Extension()));
+ }
+ } else {
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part2" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
+ "aos-message_bridge-Timestamp/"
+ "aos.message_bridge.RemoteMessage.part3" +
+ Extension());
+
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+ "aos-examples-Ping/"
+ "aos.message_bridge.RemoteMessage.part0" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+ "aos-examples-Ping/"
+ "aos.message_bridge.RemoteMessage.part1" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+ "aos-examples-Ping/"
+ "aos.message_bridge.RemoteMessage.part2" +
+ Extension());
+ result.emplace_back(logfile_base1_ +
+ "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
+ "aos-examples-Ping/"
+ "aos.message_bridge.RemoteMessage.part3" +
+ Extension());
+ }
+ return result;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakePi1SingleDirectionLogfiles() {
+ std::vector<std::string> result;
+ result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+ result.emplace_back(logfile_base1_ + "_pi1_data.part1" + Extension());
+ result.emplace_back(logfile_base1_ +
+ "_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0" +
+ Extension());
+ result.emplace_back(absl::StrCat(
+ logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+ return result;
+}
+
+std::vector<std::string> MultinodeLoggerTest::MakePi1DeadNodeLogfiles() {
+ std::vector<std::string> result;
+ result.emplace_back(logfile_base1_ + "_pi1_data.part0" + Extension());
+ result.emplace_back(absl::StrCat(
+ logfile_base1_, "_", std::get<0>(GetParam()).sha256, Extension()));
+ return result;
+}
+
+std::vector<std::vector<std::string>> MultinodeLoggerTest::StructureLogFiles() {
+ std::vector<std::vector<std::string>> result{
+ std::vector<std::string>{logfiles_[2], logfiles_[3], logfiles_[4]},
+ std::vector<std::string>{logfiles_[5], logfiles_[6]},
+ std::vector<std::string>{logfiles_[7], logfiles_[8], logfiles_[9]},
+ std::vector<std::string>{logfiles_[10], logfiles_[11]},
+ std::vector<std::string>{logfiles_[12], logfiles_[13]}};
+
+ if (shared()) {
+ result.emplace_back(
+ std::vector<std::string>{logfiles_[14], logfiles_[15], logfiles_[16]});
+ result.emplace_back(std::vector<std::string>{logfiles_[17], logfiles_[18]});
+ } else {
+ result.emplace_back(std::vector<std::string>{logfiles_[14], logfiles_[15]});
+ result.emplace_back(std::vector<std::string>{logfiles_[16], logfiles_[17]});
+ result.emplace_back(std::vector<std::string>{logfiles_[18], logfiles_[19]});
+ }
+
+ return result;
+}
+
+std::string MultinodeLoggerTest::Extension() {
+ return absl::StrCat(".bfbs", std::get<1>(GetParam()).extension);
+}
+
+LoggerState MultinodeLoggerTest::MakeLogger(
+ NodeEventLoopFactory *node, SimulatedEventLoopFactory *factory,
+ const Configuration *configuration) {
+ if (factory == nullptr) {
+ factory = &event_loop_factory_;
+ }
+ return MakeLoggerState(node, factory, std::get<1>(GetParam()), configuration);
+}
+
+void MultinodeLoggerTest::StartLogger(LoggerState *logger,
+ std::string logfile_base) {
+ if (logfile_base.empty()) {
+ if (logger->event_loop->node()->name()->string_view() == "pi1") {
+ logfile_base = logfile_base1_;
+ } else {
+ logfile_base = logfile_base2_;
+ }
+ }
+ logger->StartLogger(logfile_base);
+}
+
+void MultinodeLoggerTest::VerifyParts(
+ const std::vector<LogFile> &sorted_parts,
+ const std::vector<std::string> &corrupted_parts) {
+ EXPECT_EQ(sorted_parts.size(), 2u);
+
+ // Count up the number of UUIDs and make sure they are what we expect as a
+ // sanity check.
+ std::set<std::string> log_event_uuids;
+ std::set<std::string> parts_uuids;
+ std::set<std::string> both_uuids;
+
+ size_t missing_rt_count = 0;
+
+ std::vector<std::string> logger_nodes;
+ for (const LogFile &log_file : sorted_parts) {
+ EXPECT_FALSE(log_file.log_event_uuid.empty());
+ log_event_uuids.insert(log_file.log_event_uuid);
+ logger_nodes.emplace_back(log_file.logger_node);
+ both_uuids.insert(log_file.log_event_uuid);
+ EXPECT_TRUE(log_file.config);
+ EXPECT_EQ(log_file.name,
+ absl::StrCat("name_prefix_", log_file.logger_node));
+ EXPECT_EQ(log_file.logger_sha1,
+ absl::StrCat("logger_sha1_", log_file.logger_node));
+ EXPECT_EQ(log_file.logger_version,
+ absl::StrCat("logger_version_", log_file.logger_node));
+
+ for (const LogParts &part : log_file.parts) {
+ EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
+ << ": " << part;
+ missing_rt_count +=
+ part.realtime_start_time == aos::realtime_clock::min_time;
+
+ EXPECT_TRUE(log_event_uuids.find(part.log_event_uuid) !=
+ log_event_uuids.end());
+ EXPECT_NE(part.node, "");
+ EXPECT_TRUE(log_file.config);
+ parts_uuids.insert(part.parts_uuid);
+ both_uuids.insert(part.parts_uuid);
+ }
+ }
+
+ // We won't have RT timestamps for 5 or 6 log files. We don't log the RT
+ // start time on remote nodes because we don't know it and would be
+ // guessing. And the log reader can actually do a better job. The number
+ // depends on if we have the remote timestamps split across 2 files, or just
+ // across 1, depending on if we are using a split or combined timestamp
+ // channel config.
+ EXPECT_EQ(missing_rt_count, shared() ? 5u : 6u);
+
+ EXPECT_EQ(log_event_uuids.size(), 2u);
+ EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
+ EXPECT_EQ(log_event_uuids.size() + parts_uuids.size(), both_uuids.size());
+
+ // Test that each list of parts is in order. Don't worry about the ordering
+ // between part file lists though.
+ // (inner vectors all need to be in order, but outer one doesn't matter).
+ ASSERT_THAT(ToLogReaderVector(sorted_parts),
+ ::testing::UnorderedElementsAreArray(structured_logfiles_));
+
+ EXPECT_THAT(logger_nodes, ::testing::UnorderedElementsAre("pi1", "pi2"));
+
+ EXPECT_NE(sorted_parts[0].realtime_start_time, aos::realtime_clock::min_time);
+ EXPECT_NE(sorted_parts[1].realtime_start_time, aos::realtime_clock::min_time);
+
+ EXPECT_NE(sorted_parts[0].monotonic_start_time,
+ aos::monotonic_clock::min_time);
+ EXPECT_NE(sorted_parts[1].monotonic_start_time,
+ aos::monotonic_clock::min_time);
+
+ EXPECT_THAT(sorted_parts[0].corrupted, ::testing::Eq(corrupted_parts));
+ EXPECT_THAT(sorted_parts[1].corrupted, ::testing::Eq(corrupted_parts));
+}
+
+void MultinodeLoggerTest::AddExtension(std::string_view extension) {
+ std::transform(logfiles_.begin(), logfiles_.end(), logfiles_.begin(),
+ [extension](const std::string &in) {
+ return absl::StrCat(in, extension);
+ });
+
+ std::transform(structured_logfiles_.begin(), structured_logfiles_.end(),
+ structured_logfiles_.begin(),
+ [extension](std::vector<std::string> in) {
+ std::transform(in.begin(), in.end(), in.begin(),
+ [extension](const std::string &in_str) {
+ return absl::StrCat(in_str, extension);
+ });
+ return in;
+ });
+}
+
+std::vector<std::vector<std::string>> ToLogReaderVector(
+ const std::vector<LogFile> &log_files) {
+ std::vector<std::vector<std::string>> result;
+ for (const LogFile &log_file : log_files) {
+ for (const LogParts &log_parts : log_file.parts) {
+ std::vector<std::string> parts;
+ for (const std::string &part : log_parts.parts) {
+ parts.emplace_back(part);
+ }
+ result.emplace_back(std::move(parts));
+ }
+ }
+ return result;
+}
+
+std::vector<CompressionParams> SupportedCompressionAlgorithms() {
+ return {{"",
+ [](size_t max_message_size) {
+ return std::make_unique<DummyEncoder>(max_message_size);
+ }},
+ {SnappyDecoder::kExtension,
+ [](size_t max_message_size) {
+ return std::make_unique<SnappyEncoder>(max_message_size, 32768);
+ }},
+#ifdef LZMA
+ {LzmaDecoder::kExtension,
+ [](size_t max_message_size) {
+ return std::make_unique<LzmaEncoder>(max_message_size, 3);
+ }}
+#endif // LZMA
+ };
+}
+
+std::ostream &operator<<(std::ostream &ostream,
+ const CompressionParams ¶ms) {
+ ostream << "\"" << params.extension << "\"";
+ return ostream;
+}
+
+std::ostream &operator<<(std::ostream &ostream, const ConfigParams ¶ms) {
+ ostream << "{config: \"" << params.config << "\", shared: " << params.shared
+ << ", sha256: \"" << params.sha256 << "\", relogged_sha256: \""
+ << params.relogged_sha256 << "\"}";
+ return ostream;
+}
+
+std::vector<std::pair<std::vector<realtime_clock::time_point>,
+ std::vector<realtime_clock::time_point>>>
+ConfirmReadable(const std::vector<std::string> &files,
+ realtime_clock::time_point start_time,
+ realtime_clock::time_point end_time) {
+ {
+ LogReader reader(SortParts(files));
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ reader.Register(&log_reader_factory);
+
+ log_reader_factory.Run();
+
+ reader.Deregister();
+ }
+ {
+ std::vector<std::pair<std::vector<realtime_clock::time_point>,
+ std::vector<realtime_clock::time_point>>>
+ result;
+ LogReader reader(SortParts(files));
+
+ reader.SetStartTime(start_time);
+ reader.SetEndTime(end_time);
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ reader.RegisterWithoutStarting(&log_reader_factory);
+ result.resize(
+ configuration::NodesCount(log_reader_factory.configuration()));
+ if (configuration::MultiNode(log_reader_factory.configuration())) {
+ size_t i = 0;
+ for (const aos::Node *node :
+ *log_reader_factory.configuration()->nodes()) {
+ LOG(INFO) << "Registering start";
+ reader.OnStart(node, [node, &log_reader_factory, &result,
+ node_index = i]() {
+ LOG(INFO) << "Starting " << node->name()->string_view();
+ result[node_index].first.push_back(
+ log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
+ });
+ reader.OnEnd(node, [node, &log_reader_factory, &result,
+ node_index = i]() {
+ LOG(INFO) << "Ending " << node->name()->string_view();
+ result[node_index].second.push_back(
+ log_reader_factory.GetNodeEventLoopFactory(node)->realtime_now());
+ });
+ ++i;
+ }
+ } else {
+ reader.OnStart([&log_reader_factory, &result]() {
+ LOG(INFO) << "Starting";
+ result[0].first.push_back(
+ log_reader_factory.GetNodeEventLoopFactory(nullptr)
+ ->realtime_now());
+ });
+ reader.OnEnd([&log_reader_factory, &result]() {
+ LOG(INFO) << "Ending";
+ result[0].second.push_back(
+ log_reader_factory.GetNodeEventLoopFactory(nullptr)
+ ->realtime_now());
+ });
+ }
+
+ log_reader_factory.Run();
+
+ reader.Deregister();
+
+ for (auto x : result) {
+ for (auto y : x.first) {
+ VLOG(1) << "Start " << y;
+ }
+ for (auto y : x.second) {
+ VLOG(1) << "End " << y;
+ }
+ }
+ return result;
+ }
+}
+
+// Counts the number of messages on a channel. Returns (channel name, channel
+// type, count) for every message matching matcher()
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
+ std::shared_ptr<const aos::Configuration> config, std::string_view filename,
+ std::function<bool(const UnpackedMessageHeader *)> matcher) {
+ MessageReader message_reader(filename);
+ std::vector<int> counts(config->channels()->size(), 0);
+
+ while (true) {
+ std::shared_ptr<UnpackedMessageHeader> msg = message_reader.ReadMessage();
+ if (!msg) {
+ break;
+ }
+
+ if (matcher(msg.get())) {
+ counts[msg->channel_index]++;
+ }
+ }
+
+ std::vector<std::tuple<std::string, std::string, int>> result;
+ int channel = 0;
+ for (size_t i = 0; i < counts.size(); ++i) {
+ if (counts[i] != 0) {
+ const Channel *channel = config->channels()->Get(i);
+ result.push_back(std::make_tuple(channel->name()->str(),
+ channel->type()->str(), counts[i]));
+ }
+ ++channel;
+ }
+
+ return result;
+}
+
+// Counts the number of messages (channel, count) for all data messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
+ std::shared_ptr<const aos::Configuration> config,
+ std::string_view filename) {
+ return CountChannelsMatching(
+ config, filename, [](const UnpackedMessageHeader *msg) {
+ if (msg->span.data() != nullptr) {
+ CHECK(!msg->monotonic_remote_time.has_value());
+ CHECK(!msg->realtime_remote_time.has_value());
+ CHECK(!msg->remote_queue_index.has_value());
+ return true;
+ }
+ return false;
+ });
+}
+
+// Counts the number of messages (channel, count) for all timestamp messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
+ std::shared_ptr<const aos::Configuration> config,
+ std::string_view filename) {
+ return CountChannelsMatching(
+ config, filename, [](const UnpackedMessageHeader *msg) {
+ if (msg->span.data() == nullptr) {
+ CHECK(msg->monotonic_remote_time.has_value());
+ CHECK(msg->realtime_remote_time.has_value());
+ CHECK(msg->remote_queue_index.has_value());
+ return true;
+ }
+ return false;
+ });
+}
+
+} // namespace testing
+} // namespace logger
+} // namespace aos
diff --git a/aos/events/logging/multinode_logger_test_lib.h b/aos/events/logging/multinode_logger_test_lib.h
new file mode 100644
index 0000000..d3754e4
--- /dev/null
+++ b/aos/events/logging/multinode_logger_test_lib.h
@@ -0,0 +1,148 @@
+#ifndef AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
+#define AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
+
+#include "absl/strings/str_format.h"
+#include "aos/events/event_loop.h"
+#include "aos/events/logging/log_writer.h"
+#include "aos/events/logging/snappy_encoder.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/network/testing_time_converter.h"
+#include "aos/testing/path.h"
+#include "aos/util/file.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+
+#ifdef LZMA
+#include "aos/events/logging/lzma_encoder.h"
+#endif
+
+namespace aos {
+namespace logger {
+namespace testing {
+
+struct CompressionParams {
+ std::string_view extension;
+ std::function<std::unique_ptr<DataEncoder>(size_t max_message_size)>
+ encoder_factory;
+};
+
+// Parameters to run all the tests with.
+struct ConfigParams {
+ // The config file to use.
+ std::string config;
+ // If true, the RemoteMessage channel should be shared between all the remote
+ // channels. If false, there will be 1 RemoteMessage channel per remote
+ // channel.
+ bool shared;
+ // sha256 of the config.
+ std::string_view sha256;
+ // sha256 of the relogged config
+ std::string_view relogged_sha256;
+};
+
+struct LoggerState {
+ void StartLogger(std::string logfile_base);
+
+ std::unique_ptr<EventLoop> event_loop;
+ std::unique_ptr<Logger> logger;
+ const Configuration *configuration;
+ const Node *node;
+ MultiNodeLogNamer *log_namer;
+ CompressionParams params;
+
+ void AppendAllFilenames(std::vector<std::string> *filenames);
+
+ ~LoggerState();
+};
+
+LoggerState MakeLoggerState(NodeEventLoopFactory *node,
+ SimulatedEventLoopFactory *factory,
+ CompressionParams params,
+ const Configuration *configuration = nullptr);
+std::vector<std::vector<std::string>> ToLogReaderVector(
+ const std::vector<LogFile> &log_files);
+std::vector<CompressionParams> SupportedCompressionAlgorithms();
+std::ostream &operator<<(std::ostream &ostream,
+ const CompressionParams ¶ms);
+std::ostream &operator<<(std::ostream &ostream, const ConfigParams ¶ms);
+std::vector<std::pair<std::vector<realtime_clock::time_point>,
+ std::vector<realtime_clock::time_point>>>
+ConfirmReadable(
+ const std::vector<std::string> &files,
+ realtime_clock::time_point start_time = realtime_clock::min_time,
+ realtime_clock::time_point end_time = realtime_clock::max_time);
+// Counts the number of messages on a channel. Returns (channel name, channel
+// type, count) for every message matching matcher()
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
+ std::shared_ptr<const aos::Configuration> config, std::string_view filename,
+ std::function<bool(const UnpackedMessageHeader *)> matcher);
+// Counts the number of messages (channel, count) for all data messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
+ std::shared_ptr<const aos::Configuration> config,
+ std::string_view filename);
+// Counts the number of messages (channel, count) for all timestamp messages.
+std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
+ std::shared_ptr<const aos::Configuration> config,
+ std::string_view filename);
+
+class MultinodeLoggerTest : public ::testing::TestWithParam<
+ std::tuple<ConfigParams, CompressionParams>> {
+ public:
+ MultinodeLoggerTest();
+
+ bool shared() const;
+
+ std::vector<std::string> MakeLogFiles(std::string logfile_base1,
+ std::string logfile_base2,
+ size_t pi1_data_count = 3,
+ size_t pi2_data_count = 3,
+ bool relogged_config = false);
+
+ std::vector<std::string> MakePi1RebootLogfiles();
+
+ std::vector<std::string> MakePi1SingleDirectionLogfiles();
+
+ std::vector<std::string> MakePi1DeadNodeLogfiles();
+
+ std::vector<std::vector<std::string>> StructureLogFiles();
+
+ std::string Extension();
+
+ LoggerState MakeLogger(NodeEventLoopFactory *node,
+ SimulatedEventLoopFactory *factory = nullptr,
+ const Configuration *configuration = nullptr);
+
+ void StartLogger(LoggerState *logger, std::string logfile_base = "");
+
+ void VerifyParts(const std::vector<LogFile> &sorted_parts,
+ const std::vector<std::string> &corrupted_parts = {});
+
+ void AddExtension(std::string_view extension);
+
+ // Config and factory.
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
+ message_bridge::TestingTimeConverter time_converter_;
+ SimulatedEventLoopFactory event_loop_factory_;
+
+ NodeEventLoopFactory *const pi1_;
+ const size_t pi1_index_;
+ NodeEventLoopFactory *const pi2_;
+ const size_t pi2_index_;
+
+ std::string tmp_dir_;
+ std::string logfile_base1_;
+ std::string logfile_base2_;
+ std::vector<std::string> pi1_reboot_logfiles_;
+ std::vector<std::string> logfiles_;
+ std::vector<std::string> pi1_single_direction_logfiles_;
+
+ std::vector<std::vector<std::string>> structured_logfiles_;
+};
+
+typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
+
+} // namespace testing
+} // namespace logger
+} // namespace aos
+
+#endif // AOS_EVENTS_LOGGING_MULTINODE_LOGGER_TEST_LIB_H
diff --git a/aos/libc/aos_strsignal_test.cc b/aos/libc/aos_strsignal_test.cc
index 51389af..d854aab 100644
--- a/aos/libc/aos_strsignal_test.cc
+++ b/aos/libc/aos_strsignal_test.cc
@@ -26,13 +26,15 @@
}
};
+// msan doesn't seem to like strsignal().
+#if !__has_feature(memory_sanitizer)
// Tests that all the signals give the same result as strsignal(3).
TEST(StrsignalTest, All) {
// Sigh, strsignal allocates a buffer that uses pthread local storage. This
// interacts poorly with asan. Spawning a thread causes the storage to get
// cleaned up before asan checks.
SignalNameTester t;
-#ifdef AOS_SANITIZER_thread
+#if defined(AOS_SANITIZER_thread)
// tsan doesn't like this usage of ::std::thread. It looks like
// <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57507>.
t();
@@ -41,6 +43,7 @@
thread.join();
#endif
}
+#endif
} // namespace testing
} // namespace libc
diff --git a/aos/network/sctp_lib.cc b/aos/network/sctp_lib.cc
index 53a28cb..9632d3c 100644
--- a/aos/network/sctp_lib.cc
+++ b/aos/network/sctp_lib.cc
@@ -56,6 +56,7 @@
struct sockaddr_storage ResolveSocket(std::string_view host, int port,
bool use_ipv6) {
struct sockaddr_storage result;
+ memset(&result, 0, sizeof(result));
struct addrinfo *addrinfo_result;
struct sockaddr_in *t_addr = (struct sockaddr_in *)&result;
struct sockaddr_in6 *t_addr6 = (struct sockaddr_in6 *)&result;
@@ -282,6 +283,7 @@
// Use the assoc_id for the destination instead of the msg_name.
struct msghdr outmsg;
+ memset(&outmsg, 0, sizeof(outmsg));
if (sockaddr_remote) {
outmsg.msg_name = &*sockaddr_remote;
outmsg.msg_namelen = sizeof(*sockaddr_remote);
@@ -338,8 +340,13 @@
CHECK(fd_ != -1);
while (true) {
- aos::unique_c_ptr<Message> result(
- reinterpret_cast<Message *>(malloc(sizeof(Message) + max_size_ + 1)));
+ constexpr size_t kMessageAlign = alignof(Message);
+ const size_t max_message_size =
+ ((sizeof(Message) + max_size_ + 1 + (kMessageAlign - 1)) /
+ kMessageAlign) *
+ kMessageAlign;
+ aos::unique_c_ptr<Message> result(reinterpret_cast<Message *>(
+ aligned_alloc(kMessageAlign, max_message_size)));
struct msghdr inmessage;
memset(&inmessage, 0, sizeof(struct msghdr));
@@ -489,6 +496,7 @@
// Use the assoc_id for the destination instead of the msg_name.
struct msghdr outmsg;
+ memset(&outmsg, 0, sizeof(outmsg));
outmsg.msg_namelen = 0;
outmsg.msg_iovlen = 0;
diff --git a/aos/network/timestamp_channel.cc b/aos/network/timestamp_channel.cc
index f8f525f..52032f5 100644
--- a/aos/network/timestamp_channel.cc
+++ b/aos/network/timestamp_channel.cc
@@ -5,6 +5,10 @@
DEFINE_bool(combined_timestamp_channel_fallback, true,
"If true, fall back to using the combined timestamp channel if the "
"single timestamp channel doesn't exist for a timestamp.");
+DEFINE_bool(check_timestamp_channel_frequencies, true,
+ "If true, include a debug CHECK to ensure that remote timestamp "
+ "channels are configured to have at least as great a frequency as "
+ "the corresponding data channel.");
namespace aos {
namespace message_bridge {
@@ -109,12 +113,14 @@
const Channel *timestamp_channel = finder.ForChannel(channel, connection);
- // Sanity-check that the timestamp channel can actually support full-rate
- // messages coming through on the source channel.
- CHECK_GE(timestamp_channel->frequency(), channel->frequency())
- << ": Timestamp channel "
- << configuration::StrippedChannelToString(timestamp_channel)
- << "'s rate is lower than the source channel.";
+ if (FLAGS_check_timestamp_channel_frequencies) {
+ // Sanity-check that the timestamp channel can actually support full-rate
+ // messages coming through on the source channel.
+ CHECK_GE(timestamp_channel->frequency(), channel->frequency())
+ << ": Timestamp channel "
+ << configuration::StrippedChannelToString(timestamp_channel)
+ << "'s rate is lower than the source channel.";
+ }
{
auto it = timestamp_loggers_.find(timestamp_channel);
diff --git a/aos/starter/BUILD b/aos/starter/BUILD
index 8a30408..9068caa 100644
--- a/aos/starter/BUILD
+++ b/aos/starter/BUILD
@@ -247,3 +247,16 @@
"//aos/testing:googletest",
],
)
+
+cc_library(
+ name = "mock_starter",
+ srcs = ["mock_starter.cc"],
+ hdrs = ["mock_starter.h"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//aos/events:simulated_event_loop",
+ "//aos/starter:starter_fbs",
+ "//aos/starter:starter_rpc_fbs",
+ "//aos/starter:starter_rpc_lib",
+ ],
+)
diff --git a/aos/starter/mock_starter.cc b/aos/starter/mock_starter.cc
new file mode 100644
index 0000000..1e06758
--- /dev/null
+++ b/aos/starter/mock_starter.cc
@@ -0,0 +1,123 @@
+#include "aos/starter/mock_starter.h"
+
+namespace aos {
+namespace starter {
+
+MockStarter::MockStarter(aos::EventLoop *event_loop)
+ : event_loop_(event_loop),
+ status_sender_(event_loop_->MakeSender<aos::starter::Status>("/aos")) {
+ aos::TimerHandler *send_timer =
+ event_loop_->AddTimer([this]() { SendStatus(); });
+
+ CHECK(aos::configuration::MultiNode(event_loop_->configuration()));
+
+ for (const aos::Node *node :
+ aos::configuration::GetNodes(event_loop_->configuration())) {
+ const aos::Channel *channel = aos::starter::StarterRpcChannelForNode(
+ event_loop_->configuration(), node);
+ if (aos::configuration::ChannelIsReadableOnNode(channel,
+ event_loop_->node())) {
+ std::string_view channel_name = channel->name()->string_view();
+ event_loop_->MakeWatcher(
+ channel_name, [this](const aos::starter::StarterRpc &command) {
+ for (const flatbuffers::String *node : *command.nodes()) {
+ if (node->string_view() ==
+ event_loop_->node()->name()->string_view()) {
+ CHECK(statuses_.count(command.name()->str()) > 0)
+ << "Unable to find " << command.name()->string_view()
+ << " in our list of applications.";
+ ApplicationStatus &status = statuses_[command.name()->str()];
+ switch (command.command()) {
+ case aos::starter::Command::START:
+ if (!status.running) {
+ VLOG(1) << "Starting " << command.name()->string_view()
+ << " at " << event_loop_->monotonic_now();
+ status.running = true;
+ status.start_time = event_loop_->monotonic_now();
+ status.id = next_id_++;
+ }
+ break;
+ case aos::starter::Command::STOP:
+ if (status.running) {
+ VLOG(1) << "Stopping " << command.name()->string_view()
+ << " at " << event_loop_->monotonic_now();
+ }
+ status.running = false;
+ break;
+ case aos::starter::Command::RESTART:
+ status.running = true;
+ VLOG(1) << "Restarting " << command.name()->string_view()
+ << " at " << event_loop_->monotonic_now();
+ status.start_time = event_loop_->monotonic_now();
+ status.id = next_id_++;
+ }
+ SendStatus();
+ }
+ }
+ });
+ }
+ }
+
+ event_loop_->OnRun([this, send_timer]() {
+ send_timer->Setup(event_loop_->monotonic_now(), std::chrono::seconds(1));
+
+ for (const aos::Application *application :
+ *event_loop_->configuration()->applications()) {
+ if (aos::configuration::ApplicationShouldStart(
+ event_loop_->configuration(), event_loop_->node(), application)) {
+ statuses_[application->name()->str()] = ApplicationStatus{
+ next_id_++, application->autostart(), event_loop_->monotonic_now()};
+ }
+ }
+ });
+}
+
+void MockStarter::SendStatus() {
+ aos::Sender<aos::starter::Status>::Builder builder =
+ status_sender_.MakeBuilder();
+ std::vector<flatbuffers::Offset<aos::starter::ApplicationStatus>>
+ status_offsets;
+ for (const std::pair<const std::string, ApplicationStatus> &pair :
+ statuses_) {
+ const flatbuffers::Offset<flatbuffers::String> name_offset =
+ builder.fbb()->CreateString(pair.first);
+ aos::starter::ApplicationStatus::Builder status_builder =
+ builder.MakeBuilder<aos::starter::ApplicationStatus>();
+ status_builder.add_name(name_offset);
+ status_builder.add_state(pair.second.running
+ ? aos::starter::State::RUNNING
+ : aos::starter::State::STOPPED);
+ status_builder.add_last_exit_code(0);
+ status_builder.add_id(pair.second.id);
+ status_builder.add_last_stop_reason(
+ aos::starter::LastStopReason::STOP_REQUESTED);
+ status_builder.add_last_start_time(
+ pair.second.start_time.time_since_epoch().count());
+ if (pair.second.running) {
+ status_builder.add_pid(pair.second.id);
+ }
+ status_offsets.push_back(status_builder.Finish());
+ }
+ const flatbuffers::Offset<
+ flatbuffers::Vector<flatbuffers::Offset<aos::starter::ApplicationStatus>>>
+ statuses_offset = builder.fbb()->CreateVector(status_offsets);
+ aos::starter::Status::Builder status_builder =
+ builder.MakeBuilder<aos::starter::Status>();
+ status_builder.add_statuses(statuses_offset);
+ builder.CheckOk(builder.Send(status_builder.Finish()));
+}
+
+MockStarters::MockStarters(aos::SimulatedEventLoopFactory *event_loop_factory) {
+ CHECK(aos::configuration::MultiNode(event_loop_factory->configuration()));
+ for (const aos::Node *node :
+ aos::configuration::GetNodes(event_loop_factory->configuration())) {
+ event_loops_.emplace_back(
+ event_loop_factory->GetNodeEventLoopFactory(node)->MakeEventLoop(
+ "starterd"));
+ mock_starters_.emplace_back(
+ std::make_unique<MockStarter>(event_loops_.back().get()));
+ }
+}
+
+} // namespace starter
+} // namespace aos
diff --git a/aos/starter/mock_starter.h b/aos/starter/mock_starter.h
new file mode 100644
index 0000000..a0c1b76
--- /dev/null
+++ b/aos/starter/mock_starter.h
@@ -0,0 +1,54 @@
+#include <map>
+
+#include "aos/events/event_loop.h"
+#include "aos/events/simulated_event_loop.h"
+#include "aos/starter/starter_generated.h"
+#include "aos/starter/starter_rpc_generated.h"
+#include "aos/starter/starterd_lib.h"
+
+namespace aos {
+namespace starter {
+
+// Simple mock of starterd that updates the starter status message to act as
+// though applications are started and stopped when requested.
+// TODO(james.kuszmaul): Consider integrating with SimulatedEventLoopFactory.
+class MockStarter {
+ public:
+ struct ApplicationStatus {
+ int id;
+ bool running;
+ aos::monotonic_clock::time_point start_time;
+ };
+
+ MockStarter(aos::EventLoop *event_loop);
+
+ const aos::Node *node() const { return event_loop_->node(); }
+
+ const std::map<std::string, ApplicationStatus> &statuses() const {
+ return statuses_;
+ }
+
+ private:
+ void SendStatus();
+
+ aos::EventLoop *event_loop_;
+ aos::Sender<aos::starter::Status> status_sender_;
+ std::map<std::string, ApplicationStatus> statuses_;
+ int next_id_ = 0;
+};
+
+// Spins up MockStarter's for each node.
+class MockStarters {
+ public:
+ MockStarters(aos::SimulatedEventLoopFactory *event_loop_factory);
+ const std::vector<std::unique_ptr<MockStarter>> &starters() const {
+ return mock_starters_;
+ }
+
+ private:
+ std::vector<std::unique_ptr<aos::EventLoop>> event_loops_;
+ std::vector<std::unique_ptr<MockStarter>> mock_starters_;
+};
+
+} // namespace starter
+} // namespace aos
diff --git a/aos/starter/starterd_lib.cc b/aos/starter/starterd_lib.cc
index 485d1f1..b8b7343 100644
--- a/aos/starter/starterd_lib.cc
+++ b/aos/starter/starterd_lib.cc
@@ -84,7 +84,8 @@
if (aos::configuration::MultiNode(config_msg_)) {
std::string_view current_node = event_loop_.node()->name()->string_view();
for (const aos::Application *application : *applications) {
- CHECK(application->has_nodes());
+ CHECK(application->has_nodes())
+ << ": Missing nodes on " << aos::FlatbufferToJson(application);
for (const flatbuffers::String *node : *application->nodes()) {
if (node->string_view() == current_node) {
AddApplication(application);
diff --git a/aos/util/BUILD b/aos/util/BUILD
index 929b376..2f10a70 100644
--- a/aos/util/BUILD
+++ b/aos/util/BUILD
@@ -346,6 +346,7 @@
target_compatible_with = ["@platforms//os:linux"],
deps = [
"//aos/scoped:scoped_fd",
+ "@com_github_google_flatbuffers//:flatbuffers",
"@com_github_google_glog//:glog",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
diff --git a/aos/util/file.cc b/aos/util/file.cc
index 6c35627..4e2d1cd 100644
--- a/aos/util/file.cc
+++ b/aos/util/file.cc
@@ -165,12 +165,46 @@
return span;
}
+FileReader::FileReader(std::string_view filename)
+ : file_(open(::std::string(filename).c_str(), O_RDONLY)) {
+ PCHECK(file_.get() != -1) << ": opening " << filename;
+}
+
+absl::Span<char> FileReader::ReadContents(absl::Span<char> buffer) {
+ PCHECK(0 == lseek(file_.get(), 0, SEEK_SET));
+ const ssize_t result = read(file_.get(), buffer.data(), buffer.size());
+ PCHECK(result >= 0);
+ return {buffer.data(), static_cast<size_t>(result)};
+}
+
FileWriter::FileWriter(std::string_view filename, mode_t permissions)
: file_(open(::std::string(filename).c_str(), O_WRONLY | O_CREAT | O_TRUNC,
permissions)) {
PCHECK(file_.get() != -1) << ": opening " << filename;
}
+// absl::SimpleAtoi doesn't interpret a leading 0x as hex, which we need here.
+// Instead, we use the flatbufers API, which unfortunately relies on NUL
+// termination.
+int32_t FileReader::ReadInt32() {
+ // Maximum characters for a 32-bit integer, +1 for the NUL.
+ // Hex is the same size with the leading 0x.
+ std::array<char, 11> buffer;
+ int32_t result;
+ const auto string_span =
+ ReadContents(absl::Span<char>(buffer.data(), buffer.size())
+ .subspan(0, buffer.size() - 1));
+ // Verify we found the newline.
+ CHECK_EQ(buffer[string_span.size() - 1], '\n');
+ // Truncate the newline.
+ buffer[string_span.size() - 1] = '\0';
+ CHECK(flatbuffers::StringToNumber(buffer.data(), &result))
+ << ": Error parsing string to integer: "
+ << std::string_view(string_span.data(), string_span.size());
+
+ return result;
+}
+
FileWriter::WriteResult FileWriter::WriteBytes(
absl::Span<const uint8_t> bytes) {
size_t size_written = 0;
diff --git a/aos/util/file.h b/aos/util/file.h
index 56479ce..ddd5d47 100644
--- a/aos/util/file.h
+++ b/aos/util/file.h
@@ -6,12 +6,14 @@
#include <sys/types.h>
#include <memory>
+#include <optional>
#include <string>
#include <string_view>
#include "absl/strings/numbers.h"
#include "absl/types/span.h"
#include "aos/scoped/scoped_fd.h"
+#include "flatbuffers/util.h"
#include "glog/logging.h"
namespace aos {
@@ -48,43 +50,38 @@
// Wrapper to handle reading the contents of a file into a buffer. Meant for
// situations where the malloc'ing of ReadFileToStringOrDie is inappropriate,
// but where you still want to read a file.
-template <int kBufferSize = 1024>
class FileReader {
public:
- FileReader(std::string_view filename)
- : file_(open(::std::string(filename).c_str(), O_RDONLY)) {
- PCHECK(file_.get() != -1) << ": opening " << filename;
- memset(buffer_, 0, kBufferSize);
- }
+ FileReader(std::string_view filename);
// Reads the entire contents of the file into the internal buffer and returns
// a string_view of it.
// Note: The result may not be null-terminated.
- std::string_view ReadContents() {
- PCHECK(0 == lseek(file_.get(), 0, SEEK_SET));
- const ssize_t result = read(file_.get(), buffer_, sizeof(buffer_));
- PCHECK(result >= 0);
- return {buffer_, static_cast<size_t>(result)};
+ absl::Span<char> ReadContents(absl::Span<char> buffer);
+ // Returns the value of the file as a string, for a fixed-length file.
+ // Returns nullopt if the result is smaller than kSize. Ignores any
+ // bytes beyond kSize.
+ template <int kSize>
+ std::optional<std::array<char, kSize>> ReadString() {
+ std::array<char, kSize> result;
+ const absl::Span<char> used_span =
+ ReadContents(absl::Span<char>(result.data(), result.size()));
+ if (used_span.size() == kSize) {
+ return result;
+ } else {
+ return std::nullopt;
+ }
}
- // Calls ReadContents() and attempts to convert the result into an integer, or
- // dies trying.
- int ReadInt() {
- int result;
- std::string_view contents = ReadContents();
- CHECK(absl::SimpleAtoi(contents, &result))
- << "Failed to parse \"" << contents << "\" as int.";
- return result;
- }
+ // Returns the value of the file as an integer. Crashes if it doesn't fit in a
+ // 32-bit integer. The value may start with 0x for a hex value, otherwise it
+ // must be base 10.
+ int32_t ReadInt32();
private:
aos::ScopedFD file_;
- char buffer_[kBufferSize];
};
// Simple interface to allow opening a file for writing and then writing it
// without any malloc's.
-// TODO(james): It may make sense to add a ReadBytes() interface here that can
-// take a memory buffer to fill, to avoid the templating required by the
-// self-managed buffer of FileReader<>.
class FileWriter {
public:
// The result of an individual call to WriteBytes().
diff --git a/aos/util/file_test.cc b/aos/util/file_test.cc
index 9b0def0..d4382c4 100644
--- a/aos/util/file_test.cc
+++ b/aos/util/file_test.cc
@@ -56,8 +56,20 @@
FLAGS_die_on_malloc = true;
RegisterMallocHook();
aos::ScopedRealtime realtime;
- EXPECT_EQ("123456789\n", reader.ReadContents());
- EXPECT_EQ(123456789, reader.ReadInt());
+ {
+ std::array<char, 20> contents;
+ absl::Span<char> read_result =
+ reader.ReadContents({contents.data(), contents.size()});
+ EXPECT_EQ("123456789\n",
+ std::string_view(read_result.data(), read_result.size()));
+ }
+ {
+ std::optional<std::array<char, 10>> read_result = reader.ReadString<10>();
+ ASSERT_TRUE(read_result.has_value());
+ EXPECT_EQ("123456789\n",
+ std::string_view(read_result->data(), read_result->size()));
+ }
+ EXPECT_EQ(123456789, reader.ReadInt32());
}
// Tests that we can write to a file without malloc'ing.
diff --git a/frc971/autonomous/base_autonomous_actor.cc b/frc971/autonomous/base_autonomous_actor.cc
index ca6cc89..90c1454 100644
--- a/frc971/autonomous/base_autonomous_actor.cc
+++ b/frc971/autonomous/base_autonomous_actor.cc
@@ -177,6 +177,24 @@
return false;
}
+double BaseAutonomousActor::X() {
+ drivetrain_status_fetcher_.Fetch();
+ CHECK(drivetrain_status_fetcher_.get());
+ return drivetrain_status_fetcher_->x();
+}
+
+double BaseAutonomousActor::Y() {
+ drivetrain_status_fetcher_.Fetch();
+ CHECK(drivetrain_status_fetcher_.get());
+ return drivetrain_status_fetcher_->y();
+}
+
+double BaseAutonomousActor::Theta() {
+ drivetrain_status_fetcher_.Fetch();
+ CHECK(drivetrain_status_fetcher_.get());
+ return drivetrain_status_fetcher_->theta();
+}
+
bool BaseAutonomousActor::WaitForAboveAngle(double angle) {
::aos::time::PhasedLoop phased_loop(frc971::controls::kLoopFrequency,
event_loop()->monotonic_now(),
@@ -412,18 +430,26 @@
// when we reach the end of the spline).
// (b) The spline that we are executing is the correct one.
// (c) There is less than distance distance remaining.
+ if (base_autonomous_actor_->drivetrain_status_fetcher_->trajectory_logging()
+ ->goal_spline_handle() != spline_handle_) {
+ // Never done if we aren't the active spline.
+ return false;
+ }
+
+ if (base_autonomous_actor_->drivetrain_status_fetcher_->trajectory_logging()
+ ->is_executed()) {
+ return true;
+ }
return base_autonomous_actor_->drivetrain_status_fetcher_
->trajectory_logging()
->is_executing() &&
base_autonomous_actor_->drivetrain_status_fetcher_
->trajectory_logging()
- ->goal_spline_handle() == spline_handle_ &&
- base_autonomous_actor_->drivetrain_status_fetcher_
- ->trajectory_logging()
->distance_remaining() < distance;
}
return false;
}
+
bool BaseAutonomousActor::SplineHandle::WaitForSplineDistanceRemaining(
double distance) {
::aos::time::PhasedLoop phased_loop(
diff --git a/frc971/autonomous/base_autonomous_actor.h b/frc971/autonomous/base_autonomous_actor.h
index a70a440..5562dde 100644
--- a/frc971/autonomous/base_autonomous_actor.h
+++ b/frc971/autonomous/base_autonomous_actor.h
@@ -91,6 +91,11 @@
// Returns true if the drive has finished.
bool IsDriveDone();
+ // Returns the current x, y, theta of the robot on the field.
+ double X();
+ double Y();
+ double Theta();
+
void LineFollowAtVelocity(
double velocity,
y2019::control_loops::drivetrain::SelectionHint hint =
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.h b/frc971/control_loops/drivetrain/splinedrivetrain.h
index cd712bc..71e5ffe 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.h
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.h
@@ -22,7 +22,7 @@
class SplineDrivetrain {
public:
- static constexpr size_t kMaxTrajectories = 5;
+ static constexpr size_t kMaxTrajectories = 6;
SplineDrivetrain(const DrivetrainConfig<double> &dt_config);
void SetGoal(const ::frc971::control_loops::drivetrain::Goal *goal);
diff --git a/scouting/db/db.go b/scouting/db/db.go
index adf1eae..3791596 100644
--- a/scouting/db/db.go
+++ b/scouting/db/db.go
@@ -7,6 +7,7 @@
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/logger"
+ "strconv"
)
type Database struct {
@@ -19,7 +20,7 @@
CompLevel string `gorm:"primaryKey"`
Alliance string `gorm:"primaryKey"` // "R" or "B"
AlliancePosition int32 `gorm:"primaryKey"` // 1, 2, or 3
- TeamNumber int32
+ TeamNumber string
}
type Shift struct {
@@ -75,7 +76,9 @@
LowConesAuto, MiddleConesAuto, HighConesAuto, ConesDroppedAuto int32
LowCubes, MiddleCubes, HighCubes, CubesDropped int32
LowCones, MiddleCones, HighCones, ConesDropped int32
- AvgCycle int32
+ AvgCycle int64
+ DockedAuto, EngagedAuto bool
+ Docked, Engaged bool
// The username of the person who collected these statistics.
// "unknown" if submitted without logging in.
// Empty if the stats have not yet been collected.
@@ -195,7 +198,7 @@
}
func (database *Database) AddToStats(s Stats) error {
- matches, err := database.queryMatches(s.TeamNumber)
+ matches, err := database.queryMatches(strconv.Itoa(int(s.TeamNumber)))
if err != nil {
return err
}
@@ -344,7 +347,7 @@
return rankins, result.Error
}
-func (database *Database) queryMatches(teamNumber_ int32) ([]TeamMatch, error) {
+func (database *Database) queryMatches(teamNumber_ string) ([]TeamMatch, error) {
var matches []TeamMatch
result := database.
Where("team_number = $1", teamNumber_).
diff --git a/scouting/db/db_test.go b/scouting/db/db_test.go
index a6bfa79..72c5f86 100644
--- a/scouting/db/db_test.go
+++ b/scouting/db/db_test.go
@@ -6,7 +6,6 @@
"os"
"os/exec"
"reflect"
- "strconv"
"strings"
"testing"
"time"
@@ -75,27 +74,27 @@
correct := []TeamMatch{
TeamMatch{
MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 9999,
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "9999",
},
TeamMatch{
MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 1000,
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "1000",
},
TeamMatch{
MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 777,
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "777",
},
TeamMatch{
MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 0000,
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "0000",
},
TeamMatch{
MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 4321,
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "4321",
},
TeamMatch{
MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 1234,
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "1234",
},
}
@@ -202,17 +201,17 @@
}
matches := []TeamMatch{
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 1236},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "1236"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 1001},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "1001"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 777},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "777"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 1000},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "1000"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 4321},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "4321"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 1234},
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "1234"},
}
for _, match := range matches {
@@ -246,7 +245,8 @@
ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 2,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 0, CollectedBy: "emma",
+ AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "emma",
},
Stats2023{
TeamNumber: "7454", MatchNumber: 3, SetNumber: 1,
@@ -256,7 +256,8 @@
ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 0,
HighCubes: 0, CubesDropped: 1, LowCones: 0,
MiddleCones: 0, HighCones: 1, ConesDropped: 0,
- AvgCycle: 0, CollectedBy: "tyler",
+ AvgCycle: 0, DockedAuto: false, EngagedAuto: false,
+ Docked: true, Engaged: true, CollectedBy: "tyler",
},
Stats2023{
TeamNumber: "4354", MatchNumber: 3, SetNumber: 1,
@@ -266,7 +267,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
HighCubes: 2, CubesDropped: 1, LowCones: 1,
MiddleCones: 1, HighCones: 0, ConesDropped: 1,
- AvgCycle: 0, CollectedBy: "isaac",
+ AvgCycle: 0, DockedAuto: false, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "isaac",
},
Stats2023{
TeamNumber: "6533", MatchNumber: 3, SetNumber: 1,
@@ -276,7 +278,8 @@
ConesDroppedAuto: 0, LowCubes: 0, MiddleCubes: 1,
HighCubes: 2, CubesDropped: 1, LowCones: 0,
MiddleCones: 1, HighCones: 0, ConesDropped: 0,
- AvgCycle: 0, CollectedBy: "will",
+ AvgCycle: 0, DockedAuto: true, EngagedAuto: true,
+ Docked: false, Engaged: false, CollectedBy: "will",
},
Stats2023{
TeamNumber: "8354", MatchNumber: 3, SetNumber: 1,
@@ -286,21 +289,22 @@
ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 0,
HighCubes: 0, CubesDropped: 2, LowCones: 1,
MiddleCones: 1, HighCones: 0, ConesDropped: 1,
- AvgCycle: 0, CollectedBy: "unkown",
+ AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+ Docked: true, Engaged: false, CollectedBy: "unkown",
},
}
matches := []TeamMatch{
TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 6344},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "6344"},
TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 7454},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "7454"},
TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 4354},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "4354"},
TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 6533},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "6533"},
TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 8354},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "8354"},
}
for _, match := range matches {
@@ -334,7 +338,8 @@
ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 2,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 0, CollectedBy: "emma",
+ AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "emma",
},
Stats2023{
TeamNumber: "7454", MatchNumber: 4, SetNumber: 1,
@@ -344,7 +349,8 @@
ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 0,
HighCubes: 0, CubesDropped: 1, LowCones: 0,
MiddleCones: 0, HighCones: 1, ConesDropped: 0,
- AvgCycle: 0, CollectedBy: "tyler",
+ AvgCycle: 0, DockedAuto: true, EngagedAuto: true,
+ Docked: false, Engaged: false, CollectedBy: "tyler",
},
Stats2023{
TeamNumber: "6344", MatchNumber: 5, SetNumber: 1,
@@ -354,17 +360,18 @@
ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 2,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 0, CollectedBy: "emma",
+ AvgCycle: 0, DockedAuto: true, EngagedAuto: false,
+ Docked: true, Engaged: false, CollectedBy: "emma",
},
}
matches := []TeamMatch{
TeamMatch{MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 6344},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "6344"},
TeamMatch{MatchNumber: 4, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 7454},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "7454"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 6344},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "6344"},
}
for _, match := range matches {
@@ -413,17 +420,17 @@
matches := []TeamMatch{
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 1236},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "1236"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 1001},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "1001"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 777},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "777"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 1000},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "1000"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 4321},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "4321"},
TeamMatch{MatchNumber: 7, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 1234},
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "1234"},
}
for _, match := range matches {
@@ -458,7 +465,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 1,
HighCubes: 2, CubesDropped: 1, LowCones: 1,
MiddleCones: 0, HighCones: 1, ConesDropped: 2,
- AvgCycle: 58, CollectedBy: "unknown",
+ AvgCycle: 58, DockedAuto: false, EngagedAuto: false,
+ Docked: true, Engaged: true, CollectedBy: "unknown",
},
Stats2023{
TeamNumber: "2314", MatchNumber: 5, SetNumber: 1,
@@ -468,7 +476,8 @@
ConesDroppedAuto: 0, LowCubes: 2, MiddleCubes: 0,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 0,
- AvgCycle: 34, CollectedBy: "simon",
+ AvgCycle: 34, DockedAuto: true, EngagedAuto: true,
+ Docked: true, Engaged: false, CollectedBy: "simon",
},
Stats2023{
TeamNumber: "3242", MatchNumber: 5, SetNumber: 1,
@@ -478,7 +487,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 2,
HighCubes: 0, CubesDropped: 0, LowCones: 2,
MiddleCones: 0, HighCones: 1, ConesDropped: 1,
- AvgCycle: 50, CollectedBy: "eliza",
+ AvgCycle: 50, DockedAuto: false, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "eliza",
},
Stats2023{
TeamNumber: "1742", MatchNumber: 5, SetNumber: 1,
@@ -488,7 +498,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 1,
HighCubes: 2, CubesDropped: 1, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 49, CollectedBy: "isaac",
+ AvgCycle: 49, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "isaac",
},
Stats2023{
TeamNumber: "2454", MatchNumber: 5, SetNumber: 1,
@@ -498,7 +509,8 @@
ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 2,
HighCubes: 0, CubesDropped: 0, LowCones: 1,
MiddleCones: 1, HighCones: 1, ConesDropped: 0,
- AvgCycle: 70, CollectedBy: "sam",
+ AvgCycle: 70, DockedAuto: true, EngagedAuto: true,
+ Docked: false, Engaged: false, CollectedBy: "sam",
},
}
@@ -511,7 +523,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 2,
HighCubes: 0, CubesDropped: 0, LowCones: 2,
MiddleCones: 0, HighCones: 1, ConesDropped: 1,
- AvgCycle: 50, CollectedBy: "eliza",
+ AvgCycle: 50, DockedAuto: false, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "eliza",
},
Stats2023{
TeamNumber: "2454", MatchNumber: 5, SetNumber: 1,
@@ -521,31 +534,32 @@
ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 2,
HighCubes: 0, CubesDropped: 0, LowCones: 1,
MiddleCones: 1, HighCones: 1, ConesDropped: 0,
- AvgCycle: 70, CollectedBy: "sam",
+ AvgCycle: 70, DockedAuto: true, EngagedAuto: true,
+ Docked: false, Engaged: false, CollectedBy: "sam",
},
}
originalMatches := []TeamMatch{
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 1111},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "1111"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 2314},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "2314"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 1742},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "1742"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 2454},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "2454"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 3242},
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "3242"},
}
// Matches for which we want to delete the stats.
matches := []TeamMatch{
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- TeamNumber: 1111},
+ TeamNumber: "1111"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- TeamNumber: 2314},
+ TeamNumber: "2314"},
TeamMatch{MatchNumber: 5, SetNumber: 1, CompLevel: "quals",
- TeamNumber: 1742},
+ TeamNumber: "1742"},
}
for _, match := range originalMatches {
@@ -560,7 +574,7 @@
}
for _, match := range matches {
- err := fixture.db.DeleteFromStats(match.CompLevel, match.MatchNumber, match.SetNumber, strconv.Itoa(int(match.TeamNumber)))
+ err := fixture.db.DeleteFromStats(match.CompLevel, match.MatchNumber, match.SetNumber, match.TeamNumber)
check(t, err, "Failed to delete stat")
}
@@ -663,17 +677,17 @@
matches := []TeamMatch{
TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 1235},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "1235"},
TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 1234},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "1234"},
TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 1233},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "1233"},
TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 1232},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "1232"},
TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 1231},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "1231"},
TeamMatch{MatchNumber: 94, SetNumber: 2, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 1239},
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "1239"},
}
for _, match := range matches {
@@ -759,15 +773,15 @@
correct := []TeamMatch{
TeamMatch{
- MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 1, TeamNumber: 6835},
+ MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 1, TeamNumber: "6835"},
TeamMatch{
- MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: 4834},
+ MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: "4834"},
TeamMatch{
- MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 9824},
+ MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "9824"},
TeamMatch{
- MatchNumber: 7, SetNumber: 2, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: 3732},
+ MatchNumber: 7, SetNumber: 2, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: "3732"},
TeamMatch{
- MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: 3732},
+ MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 1, TeamNumber: "3732"},
}
for i := 0; i < len(correct); i++ {
@@ -789,11 +803,11 @@
testDatabase := []TeamMatch{
TeamMatch{
- MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 4464},
+ MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "4464"},
TeamMatch{
- MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: 2352},
+ MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: "2352"},
TeamMatch{
- MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 6321},
+ MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "6321"},
}
for i := 0; i < len(testDatabase); i++ {
@@ -803,9 +817,9 @@
correct := []TeamMatch{
TeamMatch{
- MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: 2352},
+ MatchNumber: 8, SetNumber: 1, CompLevel: "quals", Alliance: "R", AlliancePosition: 2, TeamNumber: "2352"},
TeamMatch{
- MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: 6321},
+ MatchNumber: 9, SetNumber: 1, CompLevel: "quals", Alliance: "B", AlliancePosition: 3, TeamNumber: "6321"},
}
got, err := fixture.db.ReturnMatches()
@@ -940,17 +954,17 @@
matches := []TeamMatch{
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 1235},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "1235"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 1236},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "1236"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 1237},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "1237"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 1238},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "1238"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 1239},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "1239"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 1233},
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "1233"},
}
for _, match := range matches {
@@ -984,7 +998,8 @@
ConesDroppedAuto: 1, LowCubes: 1, MiddleCubes: 2,
HighCubes: 1, CubesDropped: 0, LowCones: 2,
MiddleCones: 0, HighCones: 2, ConesDropped: 1,
- AvgCycle: 51, CollectedBy: "isaac",
+ AvgCycle: 51, DockedAuto: true, EngagedAuto: true,
+ Docked: false, Engaged: false, CollectedBy: "isaac",
},
Stats2023{
TeamNumber: "5443", MatchNumber: 2, SetNumber: 1,
@@ -994,7 +1009,8 @@
ConesDroppedAuto: 0, LowCubes: 2, MiddleCubes: 2,
HighCubes: 1, CubesDropped: 0, LowCones: 1,
MiddleCones: 0, HighCones: 2, ConesDropped: 1,
- AvgCycle: 39, CollectedBy: "jack",
+ AvgCycle: 39, DockedAuto: false, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "jack",
},
Stats2023{
TeamNumber: "5436", MatchNumber: 2, SetNumber: 1,
@@ -1004,7 +1020,8 @@
ConesDroppedAuto: 1, LowCubes: 2, MiddleCubes: 2,
HighCubes: 0, CubesDropped: 0, LowCones: 1,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 45, CollectedBy: "martin",
+ AvgCycle: 45, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "martin",
},
Stats2023{
TeamNumber: "5643", MatchNumber: 2, SetNumber: 1,
@@ -1014,19 +1031,20 @@
ConesDroppedAuto: 1, LowCubes: 2, MiddleCubes: 2,
HighCubes: 0, CubesDropped: 0, LowCones: 2,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 34, CollectedBy: "unknown",
+ AvgCycle: 34, DockedAuto: true, EngagedAuto: false,
+ Docked: true, Engaged: false, CollectedBy: "unknown",
},
}
matches := []TeamMatch{
TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 2343},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "2343"},
TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 5443},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "5443"},
TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 5436},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "5436"},
TeamMatch{MatchNumber: 2, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 5643},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "5643"},
}
for _, match := range matches {
@@ -1079,17 +1097,17 @@
matches := []TeamMatch{
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 1235},
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "1235"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 1236},
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "1236"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 1237},
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "1237"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 1238},
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "1238"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 1239},
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "1239"},
TeamMatch{MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 1233},
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "1233"},
}
for _, match := range matches {
diff --git a/scouting/webserver/match_list/match_list.go b/scouting/webserver/match_list/match_list.go
index 9029438..c5af661 100644
--- a/scouting/webserver/match_list/match_list.go
+++ b/scouting/webserver/match_list/match_list.go
@@ -97,32 +97,32 @@
{
MatchNumber: int32(match.MatchNumber),
SetNumber: int32(match.SetNumber), CompLevel: match.CompLevel,
- Alliance: "R", AlliancePosition: 1, TeamNumber: red[0],
+ Alliance: "R", AlliancePosition: 1, TeamNumber: strconv.Itoa(int(red[0])),
},
{
MatchNumber: int32(match.MatchNumber),
SetNumber: int32(match.SetNumber), CompLevel: match.CompLevel,
- Alliance: "R", AlliancePosition: 2, TeamNumber: red[1],
+ Alliance: "R", AlliancePosition: 2, TeamNumber: strconv.Itoa(int(red[1])),
},
{
MatchNumber: int32(match.MatchNumber),
SetNumber: int32(match.SetNumber), CompLevel: match.CompLevel,
- Alliance: "R", AlliancePosition: 3, TeamNumber: red[2],
+ Alliance: "R", AlliancePosition: 3, TeamNumber: strconv.Itoa(int(red[2])),
},
{
MatchNumber: int32(match.MatchNumber),
SetNumber: int32(match.SetNumber), CompLevel: match.CompLevel,
- Alliance: "B", AlliancePosition: 1, TeamNumber: blue[0],
+ Alliance: "B", AlliancePosition: 1, TeamNumber: strconv.Itoa(int(blue[0])),
},
{
MatchNumber: int32(match.MatchNumber),
SetNumber: int32(match.SetNumber), CompLevel: match.CompLevel,
- Alliance: "B", AlliancePosition: 2, TeamNumber: blue[1],
+ Alliance: "B", AlliancePosition: 2, TeamNumber: strconv.Itoa(int(blue[1])),
},
{
MatchNumber: int32(match.MatchNumber),
SetNumber: int32(match.SetNumber), CompLevel: match.CompLevel,
- Alliance: "B", AlliancePosition: 3, TeamNumber: blue[2],
+ Alliance: "B", AlliancePosition: 3, TeamNumber: strconv.Itoa(int(blue[2])),
},
}
diff --git a/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs b/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs
index d9d36b3..cd6afc6 100644
--- a/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs
+++ b/scouting/webserver/requests/messages/request_2023_data_scouting_response.fbs
@@ -24,13 +24,18 @@
middle_cones:int (id:16);
high_cones:int (id:17);
cones_dropped:int (id:18);
- avg_cycle:int (id:19);
+ // Time in nanoseconds.
+ avg_cycle:int64 (id:19);
+ docked_auto:bool (id:20);
+ engaged_auto:bool (id:23);
+ docked:bool (id:25);
+ engaged:bool (id:26);
- collected_by:string (id:20);
+ collected_by:string (id:24);
}
table Request2023DataScoutingResponse {
stats_list:[Stats2023] (id:0);
}
-root_type Request2023DataScoutingResponse;
\ No newline at end of file
+root_type Request2023DataScoutingResponse;
diff --git a/scouting/webserver/requests/messages/request_all_matches_response.fbs b/scouting/webserver/requests/messages/request_all_matches_response.fbs
index 9404675..55da7bb 100644
--- a/scouting/webserver/requests/messages/request_all_matches_response.fbs
+++ b/scouting/webserver/requests/messages/request_all_matches_response.fbs
@@ -14,12 +14,12 @@
match_number:int (id: 0);
set_number:int (id: 1);
comp_level:string (id: 2);
- r1:int (id: 3);
- r2:int (id: 4);
- r3:int (id: 5);
- b1:int (id: 6);
- b2:int (id: 7);
- b3:int (id: 8);
+ r1:string (id: 3);
+ r2:string (id: 4);
+ r3:string (id: 5);
+ b1:string (id: 6);
+ b2:string (id: 7);
+ b3:string (id: 8);
// Tells you how completely we've data scouted this match.
data_scouted: ScoutedLevel (id: 9);
diff --git a/scouting/webserver/requests/messages/submit_actions.fbs b/scouting/webserver/requests/messages/submit_actions.fbs
index ebbaa5c..5488a79 100644
--- a/scouting/webserver/requests/messages/submit_actions.fbs
+++ b/scouting/webserver/requests/messages/submit_actions.fbs
@@ -50,7 +50,7 @@
}
table Action {
- timestamp:int (id:0);
+ timestamp:int64 (id:0);
action_taken:ActionType (id:2);
}
@@ -61,4 +61,4 @@
comp_level:string (id: 3);
actions_list:[Action] (id:4);
collected_by:string (id: 5);
-}
\ No newline at end of file
+}
diff --git a/scouting/webserver/requests/requests.go b/scouting/webserver/requests/requests.go
index 39f344e..4f12a4f 100644
--- a/scouting/webserver/requests/requests.go
+++ b/scouting/webserver/requests/requests.go
@@ -232,9 +232,9 @@
return -1, errors.New(fmt.Sprint("Failed to find comp level ", comp_level, " in list ", list))
}
-func (handler requestAllMatchesHandler) teamHasBeenDataScouted(key MatchAssemblyKey, teamNumber int32) (bool, error) {
+func (handler requestAllMatchesHandler) teamHasBeenDataScouted(key MatchAssemblyKey, teamNumber string) (bool, error) {
stats, err := handler.db.ReturnStats2023ForTeam(
- strconv.Itoa(int(teamNumber)), key.MatchNumber, key.SetNumber, key.CompLevel)
+ teamNumber, key.MatchNumber, key.SetNumber, key.CompLevel)
if err != nil {
return false, err
}
@@ -277,7 +277,7 @@
}
}
- var team *int32
+ var team *string
var dataScoutedTeam *bool
// Fill in the field for the match that we have in in the
@@ -524,6 +524,15 @@
var startMatchAction submit_actions.StartMatchAction
startMatchAction.Init(actionTable.Bytes, actionTable.Pos)
stat.StartingQuadrant = startMatchAction.Position()
+ } else if action_type == submit_actions.ActionTypeAutoBalanceAction {
+ var autoBalanceAction submit_actions.AutoBalanceAction
+ autoBalanceAction.Init(actionTable.Bytes, actionTable.Pos)
+ if autoBalanceAction.Docked() {
+ stat.DockedAuto = true
+ }
+ if autoBalanceAction.Engaged() {
+ stat.EngagedAuto = true
+ }
} else if action_type == submit_actions.ActionTypePickupObjectAction {
var pick_up_action submit_actions.PickupObjectAction
pick_up_action.Init(actionTable.Bytes, actionTable.Pos)
@@ -587,10 +596,19 @@
cycles += 1
}
lastPlacedTime = int64(action.Timestamp())
+ } else if action_type == submit_actions.ActionTypeEndMatchAction {
+ var endMatchAction submit_actions.EndMatchAction
+ endMatchAction.Init(actionTable.Bytes, actionTable.Pos)
+ if endMatchAction.Docked() {
+ stat.Docked = true
+ }
+ if endMatchAction.Engaged() {
+ stat.Engaged = true
+ }
}
}
if cycles != 0 {
- stat.AvgCycle = int32(overall_time / cycles)
+ stat.AvgCycle = overall_time / cycles
} else {
stat.AvgCycle = 0
}
@@ -645,6 +663,10 @@
HighCones: stat.HighCones,
ConesDropped: stat.ConesDropped,
AvgCycle: stat.AvgCycle,
+ DockedAuto: stat.DockedAuto,
+ EngagedAuto: stat.EngagedAuto,
+ Docked: stat.Docked,
+ Engaged: stat.Engaged,
CollectedBy: stat.CollectedBy,
})
}
diff --git a/scouting/webserver/requests/requests_test.go b/scouting/webserver/requests/requests_test.go
index dbad955..d2d5acc 100644
--- a/scouting/webserver/requests/requests_test.go
+++ b/scouting/webserver/requests/requests_test.go
@@ -128,75 +128,75 @@
matches: []db.TeamMatch{
{
MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 5,
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "5",
},
{
MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 42,
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "42",
},
{
MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 600,
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "600",
},
{
MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 971,
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "971",
},
{
MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 400,
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "400",
},
{
MatchNumber: 1, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 200,
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "200",
},
{
MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 6,
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "6",
},
{
MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 43,
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "43",
},
{
MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 601,
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "601",
},
{
MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 972,
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "972",
},
{
MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 401,
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "401",
},
{
MatchNumber: 2, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 201,
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "201",
},
{
MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 1, TeamNumber: 7,
+ Alliance: "R", AlliancePosition: 1, TeamNumber: "7",
},
{
MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 2, TeamNumber: 44,
+ Alliance: "R", AlliancePosition: 2, TeamNumber: "44",
},
{
MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "R", AlliancePosition: 3, TeamNumber: 602,
+ Alliance: "R", AlliancePosition: 3, TeamNumber: "602",
},
{
MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 1, TeamNumber: 973,
+ Alliance: "B", AlliancePosition: 1, TeamNumber: "973",
},
{
MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 2, TeamNumber: 402,
+ Alliance: "B", AlliancePosition: 2, TeamNumber: "402",
},
{
MatchNumber: 3, SetNumber: 1, CompLevel: "qm",
- Alliance: "B", AlliancePosition: 3, TeamNumber: 202,
+ Alliance: "B", AlliancePosition: 3, TeamNumber: "202",
},
},
// Pretend that we have some data scouting data.
@@ -209,7 +209,8 @@
ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 1,
HighCubes: 2, CubesDropped: 1, LowCones: 1,
MiddleCones: 2, HighCones: 0, ConesDropped: 1,
- AvgCycle: 34, CollectedBy: "alex",
+ AvgCycle: 34, DockedAuto: true, EngagedAuto: true,
+ Docked: false, Engaged: false, CollectedBy: "alex",
},
{
TeamNumber: "973", MatchNumber: 3, SetNumber: 1,
@@ -219,7 +220,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 53, CollectedBy: "bob",
+ AvgCycle: 53, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "bob",
},
},
}
@@ -242,7 +244,7 @@
// R1, R2, R3, B1, B2, B3
{
1, 1, "qm",
- 5, 42, 600, 971, 400, 200,
+ "5", "42", "600", "971", "400", "200",
&request_all_matches_response.ScoutedLevelT{
// The R1 team has already been data
// scouted.
@@ -251,14 +253,14 @@
},
{
2, 1, "qm",
- 6, 43, 601, 972, 401, 201,
+ "6", "43", "601", "972", "401", "201",
&request_all_matches_response.ScoutedLevelT{
false, false, false, false, false, false,
},
},
{
3, 1, "qm",
- 7, 44, 602, 973, 402, 202,
+ "7", "44", "602", "973", "402", "202",
&request_all_matches_response.ScoutedLevelT{
// The B1 team has already been data
// scouted.
@@ -367,7 +369,8 @@
ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 1,
HighCubes: 2, CubesDropped: 1, LowCones: 1,
MiddleCones: 2, HighCones: 0, ConesDropped: 1,
- AvgCycle: 34, CollectedBy: "isaac",
+ AvgCycle: 34, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "isaac",
},
{
TeamNumber: "2343", MatchNumber: 1, SetNumber: 2,
@@ -377,7 +380,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 53, CollectedBy: "unknown",
+ AvgCycle: 53, DockedAuto: false, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "unknown",
},
},
}
@@ -404,7 +408,8 @@
ConesDroppedAuto: 0, LowCubes: 1, MiddleCubes: 1,
HighCubes: 2, CubesDropped: 1, LowCones: 1,
MiddleCones: 2, HighCones: 0, ConesDropped: 1,
- AvgCycle: 34, CollectedBy: "isaac",
+ AvgCycle: 34, DockedAuto: true, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "isaac",
},
{
TeamNumber: "2343", MatchNumber: 1, SetNumber: 2,
@@ -414,7 +419,8 @@
ConesDroppedAuto: 1, LowCubes: 0, MiddleCubes: 0,
HighCubes: 1, CubesDropped: 0, LowCones: 0,
MiddleCones: 2, HighCones: 1, ConesDropped: 1,
- AvgCycle: 53, CollectedBy: "unknown",
+ AvgCycle: 53, DockedAuto: false, EngagedAuto: false,
+ Docked: false, Engaged: false, CollectedBy: "unknown",
},
},
}
@@ -480,6 +486,16 @@
},
{
ActionTaken: &submit_actions.ActionTypeT{
+ Type: submit_actions.ActionTypeAutoBalanceAction,
+ Value: &submit_actions.AutoBalanceActionT{
+ Docked: true,
+ Engaged: true,
+ },
+ },
+ Timestamp: 2400,
+ },
+ {
+ ActionTaken: &submit_actions.ActionTypeT{
Type: submit_actions.ActionTypePickupObjectAction,
Value: &submit_actions.PickupObjectActionT{
ObjectType: submit_actions.ObjectTypekCone,
@@ -499,6 +515,16 @@
},
Timestamp: 3100,
},
+ {
+ ActionTaken: &submit_actions.ActionTypeT{
+ Type: submit_actions.ActionTypeEndMatchAction,
+ Value: &submit_actions.EndMatchActionT{
+ Docked: true,
+ Engaged: false,
+ },
+ },
+ Timestamp: 4000,
+ },
},
}).Pack(builder))
@@ -517,7 +543,8 @@
ConesDroppedAuto: 0, LowCubes: 0, MiddleCubes: 0,
HighCubes: 0, CubesDropped: 0, LowCones: 0,
MiddleCones: 0, HighCones: 1, ConesDropped: 0,
- AvgCycle: 1100, CollectedBy: "katie",
+ AvgCycle: 1100, DockedAuto: true, EngagedAuto: true,
+ Docked: true, Engaged: false, CollectedBy: "katie",
}
if expected != response {
diff --git a/scouting/www/entry/entry.component.ts b/scouting/www/entry/entry.component.ts
index 44fc958..aef97f7 100644
--- a/scouting/www/entry/entry.component.ts
+++ b/scouting/www/entry/entry.component.ts
@@ -118,7 +118,6 @@
matchStartTimestamp: number = 0;
addAction(action: ActionT): void {
- action.timestamp = Math.floor(Date.now() / 1000);
if (action.type == 'startMatchAction') {
// Unix nanosecond timestamp.
this.matchStartTimestamp = Date.now() * 1e6;
@@ -193,7 +192,7 @@
StartMatchAction.createStartMatchAction(builder, action.position);
actionOffset = Action.createAction(
builder,
- action.timestamp || 0,
+ BigInt(action.timestamp || 0),
ActionType.StartMatchAction,
startMatchActionOffset
);
@@ -208,7 +207,7 @@
);
actionOffset = Action.createAction(
builder,
- action.timestamp || 0,
+ BigInt(action.timestamp || 0),
ActionType.PickupObjectAction,
pickupObjectActionOffset
);
@@ -223,7 +222,7 @@
);
actionOffset = Action.createAction(
builder,
- action.timestamp || 0,
+ BigInt(action.timestamp || 0),
ActionType.AutoBalanceAction,
autoBalanceActionOffset
);
@@ -239,7 +238,7 @@
);
actionOffset = Action.createAction(
builder,
- action.timestamp || 0,
+ BigInt(action.timestamp || 0),
ActionType.PlaceObjectAction,
placeObjectActionOffset
);
@@ -250,7 +249,7 @@
RobotDeathAction.createRobotDeathAction(builder, action.robotOn);
actionOffset = Action.createAction(
builder,
- action.timestamp || 0,
+ BigInt(action.timestamp || 0),
ActionType.RobotDeathAction,
robotDeathActionOffset
);
@@ -264,7 +263,7 @@
);
actionOffset = Action.createAction(
builder,
- action.timestamp || 0,
+ BigInt(action.timestamp || 0),
ActionType.EndMatchAction,
endMatchActionOffset
);
diff --git a/scouting/www/entry/entry.ng.html b/scouting/www/entry/entry.ng.html
index 9f4c8eb..e06ab18 100644
--- a/scouting/www/entry/entry.ng.html
+++ b/scouting/www/entry/entry.ng.html
@@ -115,20 +115,17 @@
<!-- 'Balancing' during auto. -->
<div *ngIf="autoPhase" class="d-grid gap-2">
<label>
- <input type="checkbox" (change)="dockedValue = $event.target.value" />
+ <input #docked type="checkbox" />
Docked
</label>
<label>
- <input
- type="checkbox"
- (change)="engagedValue = $event.target.value"
- />
+ <input #engaged type="checkbox" />
Engaged
</label>
<br />
<button
class="btn btn-info"
- (click)="addAction({type: 'autoBalanceAction', docked: dockedValue, engaged: engagedValue});"
+ (click)="addAction({type: 'autoBalanceAction', docked: docked.checked, engaged: engaged.checked});"
>
Submit Balancing
</button>
@@ -183,20 +180,17 @@
<!-- 'Balancing' during auto. -->
<div *ngIf="autoPhase" class="d-grid gap-2">
<label>
- <input type="checkbox" (change)="dockedValue = $event.target.value" />
+ <input #docked type="checkbox" />
Docked
</label>
<label>
- <input
- type="checkbox"
- (change)="engagedValue = $event.target.value"
- />
+ <input #engaged type="checkbox" />
Engaged
</label>
<br />
<button
class="btn btn-info"
- (click)="addAction({type: 'autoBalanceAction', docked: dockedValue, engaged: engagedValue});"
+ (click)="addAction({type: 'autoBalanceAction', docked: docked.checked, engaged: engaged.checked});"
>
Submit Balancing
</button>
@@ -231,17 +225,17 @@
DEAD
</button>
<label>
- <input type="checkbox" (change)="dockedValue = $event.target.value" />
+ <input #docked type="checkbox" />
Docked
</label>
<label>
- <input type="checkbox" (change)="engagedValue = $event.target.value" />
+ <input #engaged type="checkbox" />
Engaged
</label>
<button
*ngIf="!autoPhase"
class="btn btn-info"
- (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: dockedValue, engaged: engagedValue});"
+ (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: docked.checked, engaged: engaged.checked});"
>
End Match
</button>
@@ -259,7 +253,7 @@
</button>
<button
class="btn btn-info"
- (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: dockedValue, engaged: engagedValue});"
+ (click)="changeSectionTo('Review and Submit'); addAction({type: 'endMatchAction', docked: docked.checked, engaged: engaged.checked});"
>
End Match
</button>
@@ -276,4 +270,6 @@
<div *ngSwitchCase="'Success'" id="Success" class="container-fluid">
<h2>Successfully submitted data.</h2>
</div>
+
+ <span class="error_message" role="alert">{{ errorMessage }}</span>
</ng-container>
diff --git a/scouting/www/match_list/match_list.component.ts b/scouting/www/match_list/match_list.component.ts
index a656a1f..0deeb11 100644
--- a/scouting/www/match_list/match_list.component.ts
+++ b/scouting/www/match_list/match_list.component.ts
@@ -10,7 +10,7 @@
import {MatchListRequestor} from '@org_frc971/scouting/www/rpc';
type TeamInMatch = {
- teamNumber: number;
+ teamNumber: string;
matchNumber: number;
setNumber: number;
compLevel: string;
@@ -30,19 +30,37 @@
constructor(private readonly matchListRequestor: MatchListRequestor) {}
- // Returns a class for the row to hide it if all teams in this match have
- // already been scouted.
- getRowClass(match: Match): string {
+ // Returns true if the match is fully scouted. Returns false otherwise.
+ matchIsFullyScouted(match: Match): boolean {
const scouted = match.dataScouted();
- if (
- this.hideCompletedMatches &&
+ return (
scouted.r1() &&
scouted.r2() &&
scouted.r3() &&
scouted.b1() &&
scouted.b2() &&
scouted.b3()
- ) {
+ );
+ }
+
+ // Returns true if at least one team in this match has been scouted. Returns
+ // false otherwise.
+ matchIsPartiallyScouted(match: Match): boolean {
+ const scouted = match.dataScouted();
+ return (
+ scouted.r1() ||
+ scouted.r2() ||
+ scouted.r3() ||
+ scouted.b1() ||
+ scouted.b2() ||
+ scouted.b3()
+ );
+ }
+
+ // Returns a class for the row to hide it if all teams in this match have
+ // already been scouted.
+ getRowClass(match: Match): string {
+ if (this.hideCompletedMatches && this.matchIsFullyScouted(match)) {
return 'hidden_row';
}
return '';
@@ -54,7 +72,7 @@
teamsInMatch(
match: Match
- ): {teamNumber: number; color: 'red' | 'blue'; disabled: boolean}[] {
+ ): {teamNumber: string; color: 'red' | 'blue'; disabled: boolean}[] {
const scouted = match.dataScouted();
return [
{
@@ -110,7 +128,22 @@
displayMatchNumber(match: Match): string {
// Only display the set number for eliminations matches.
const setNumber = match.compLevel() == 'qm' ? '' : `${match.setNumber()}`;
- return `${this.matchType(match)} ${setNumber} Match ${match.matchNumber()}`;
+ const matchType = this.matchType(match);
+ const mainText = `${matchType} ${setNumber} Match ${match.matchNumber()}`;
+
+ // When showing the full match list (i.e. not hiding completed matches)
+ // it's useful to know if a match has already been scouted or not.
+ const suffix = (() => {
+ if (this.matchIsFullyScouted(match)) {
+ return '(fully scouted)';
+ } else if (this.matchIsPartiallyScouted(match)) {
+ return '(partially scouted)';
+ } else {
+ return '';
+ }
+ })();
+
+ return `${mainText} ${suffix}`;
}
ngOnInit() {
diff --git a/y2023/BUILD b/y2023/BUILD
index e92292b..095b856 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -60,6 +60,7 @@
"//y2023/constants:constants_sender",
"//y2023/vision:foxglove_image_converter",
"//aos/network:web_proxy_main",
+ ":joystick_republish",
"//aos/events/logging:log_cat",
"//y2023/rockpi:imu_main",
"//frc971/image_streamer:image_streamer",
@@ -327,6 +328,23 @@
],
)
+cc_binary(
+ name = "joystick_republish",
+ srcs = [
+ "joystick_republish.cc",
+ ],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//aos:configuration",
+ "//aos:flatbuffer_merge",
+ "//aos:init",
+ "//aos/events:shm_event_loop",
+ "//frc971/input:joystick_state_fbs",
+ "@com_github_google_glog//:glog",
+ ],
+)
+
py_library(
name = "python_init",
srcs = ["__init__.py"],
diff --git a/y2023/constants.cc b/y2023/constants.cc
index c2a09aa..f11c68c 100644
--- a/y2023/constants.cc
+++ b/y2023/constants.cc
@@ -83,19 +83,20 @@
break;
case kCompTeamNumber:
- arm_proximal->zeroing.measured_absolute_position = 0.138453705930275;
+ arm_proximal->zeroing.measured_absolute_position = 0.153241637089465;
arm_proximal->potentiometer_offset =
0.931355973012855 + 8.6743197253382 - 0.101200335326309 -
- 0.0820901660993467 - 0.0703733798337964 - 0.0294645384848748;
+ 0.0820901660993467 - 0.0703733798337964 - 0.0294645384848748 -
+ 0.577156175549626;
- arm_distal->zeroing.measured_absolute_position = 0.562947209110251;
+ arm_distal->zeroing.measured_absolute_position = 0.119544808434349;
arm_distal->potentiometer_offset =
0.436664933370656 + 0.49457213779426 + 6.78213223139724 -
0.0220711555235029 - 0.0162945074111813 + 0.00630344935527365 -
0.0164398318919943 - 0.145833494945215 + 0.234878799868491 +
- 0.125924230298394 + 0.147136306208754;
+ 0.125924230298394 + 0.147136306208754 - 0.69167546169753;
- roll_joint->zeroing.measured_absolute_position = 0.593975883699743;
+ roll_joint->zeroing.measured_absolute_position = 0.62315534539819;
roll_joint->potentiometer_offset =
-(3.87038557084874 - 0.0241774522172967 + 0.0711345168020632 -
0.866186131631967 - 0.0256788357596952 + 0.18101759154572017 -
@@ -105,7 +106,7 @@
0.0201047336425017 - 1.0173426655158 - 0.186085272847293 - 0.0317706563397807;
wrist->subsystem_params.zeroing_constants.measured_absolute_position =
- 0.894159203288852;
+ 1.00731305518279;
break;
@@ -119,10 +120,11 @@
7.673132586937 - 0.0799284644472573 - 0.0323574039310657 +
0.0143810684138064 + 0.00945555248207735;
- roll_joint->zeroing.measured_absolute_position = 1.7490367887908;
+ roll_joint->zeroing.measured_absolute_position = 1.85482286175059;
roll_joint->potentiometer_offset =
0.624713611895747 + 3.10458504917251 - 0.0966407797407789 +
- 0.0257708772364788 - 0.0395076737853459 - 6.87914956118006;
+ 0.0257708772364788 - 0.0395076737853459 - 6.87914956118006 -
+ 0.097581301615046;
wrist->subsystem_params.zeroing_constants.measured_absolute_position =
6.04062267812154;
diff --git a/y2023/constants.h b/y2023/constants.h
index 8996597..595a694 100644
--- a/y2023/constants.h
+++ b/y2023/constants.h
@@ -168,7 +168,7 @@
// Game object is spit from end effector for at least this time
static constexpr std::chrono::milliseconds kExtraSpittingTime() {
- return std::chrono::seconds(2);
+ return std::chrono::seconds(1);
}
// if true, tune down all the arm constants for testing.
diff --git a/y2023/control_loops/drivetrain/drivetrain_main.cc b/y2023/control_loops/drivetrain/drivetrain_main.cc
index b13e76f..eebae91 100644
--- a/y2023/control_loops/drivetrain/drivetrain_main.cc
+++ b/y2023/control_loops/drivetrain/drivetrain_main.cc
@@ -15,6 +15,8 @@
aos::FlatbufferDetachedBuffer<aos::Configuration> config =
aos::configuration::ReadConfig("aos_config.json");
+ frc971::constants::WaitForConstants<y2023::Constants>(&config.message());
+
aos::ShmEventLoop event_loop(&config.message());
std::unique_ptr<::frc971::control_loops::drivetrain::PuppetLocalizer>
localizer = std::make_unique<
diff --git a/y2023/control_loops/python/drivetrain.py b/y2023/control_loops/python/drivetrain.py
index 05739fc..9d2b006 100644
--- a/y2023/control_loops/python/drivetrain.py
+++ b/y2023/control_loops/python/drivetrain.py
@@ -14,17 +14,17 @@
kDrivetrain = drivetrain.DrivetrainParams(
J=6.5,
- mass=58.0,
+ mass=68.0,
# TODO(austin): Measure radius a bit better.
robot_radius=0.39,
wheel_radius=2.5 * 0.0254,
motor_type=control_loop.Falcon(),
num_motors=3,
- G=(14.0 / 54.0) * (22.0 / 56.0),
+ G=(14.0 / 52.0) * (26.0 / 58.0),
q_pos=0.24,
q_vel=2.5,
- efficiency=0.75,
- has_imu=True,
+ efficiency=0.92,
+ has_imu=False,
force=True,
kf_q_voltage=1.0,
controller_poles=[0.82, 0.82])
diff --git a/y2023/control_loops/python/graph_edit.py b/y2023/control_loops/python/graph_edit.py
index ea6f5b2..bbc7a1b 100644
--- a/y2023/control_loops/python/graph_edit.py
+++ b/y2023/control_loops/python/graph_edit.py
@@ -255,7 +255,6 @@
# Extra stuff for drawing lines.
self.segments = segments
- self.prev_segment_pt = None
self.now_segment_pt = None
self.spline_edit = 0
self.edit_control1 = True
@@ -557,9 +556,6 @@
self.circular_index_select -= 1
print(self.circular_index_select)
- elif keyval == Gdk.KEY_r:
- self.prev_segment_pt = self.now_segment_pt
-
elif keyval == Gdk.KEY_o:
# Only prints current segment
print(repr(self.segments[self.index]))
@@ -567,19 +563,6 @@
# Generate theta points.
if self.segments:
print(repr(self.segments[self.index].ToThetaPoints()))
- elif keyval == Gdk.KEY_e:
- best_pt = self.now_segment_pt
- best_dist = 1e10
- for segment in self.segments:
- d = angle_dist_sqr(segment.start, self.now_segment_pt)
- if (d < best_dist):
- best_pt = segment.start
- best_dist = d
- d = angle_dist_sqr(segment.end, self.now_segment_pt)
- if (d < best_dist):
- best_pt = segment.end
- best_dist = d
- self.now_segment_pt = best_pt
elif keyval == Gdk.KEY_p:
if self.index > 0:
@@ -592,6 +575,19 @@
elif keyval == Gdk.KEY_i:
self.show_indicators = not self.show_indicators
+ elif keyval == Gdk.KEY_h:
+ print("q: Quit the program")
+ print("c: Incriment which arm solution we render")
+ print("v: Decrement which arm solution we render")
+ print("o: Print the current segment")
+ print("g: Generate theta points")
+ print("p: Move to the previous segment")
+ print("n: Move to the next segment")
+ print("i: Switch on or off the control point indicators")
+ print("l: Switch on or off viewing only the selected spline")
+ print("t: Toggle between xy or theta renderings")
+ print("z: Switch between editing control point 1 and 2")
+
elif keyval == Gdk.KEY_n:
self.index += 1
self.index = self.index % len(self.segments)
diff --git a/y2023/control_loops/superstructure/end_effector.cc b/y2023/control_loops/superstructure/end_effector.cc
index 8ab2f27..444be0d 100644
--- a/y2023/control_loops/superstructure/end_effector.cc
+++ b/y2023/control_loops/superstructure/end_effector.cc
@@ -18,9 +18,9 @@
beambreak_(false) {}
void EndEffector::RunIteration(
- const ::aos::monotonic_clock::time_point timestamp, RollerGoal roller_goal,
- double falcon_current, double cone_position, bool beambreak,
- double *roller_voltage, bool preloaded_with_cone) {
+ const ::aos::monotonic_clock::time_point timestamp,
+ RollerGoal roller_goal, double falcon_current, double cone_position,
+ bool beambreak, double *roller_voltage, bool preloaded_with_cone) {
*roller_voltage = 0.0;
constexpr double kMinCurrent = 40.0;
@@ -32,6 +32,7 @@
switch (state_) {
case EndEffectorState::IDLE:
case EndEffectorState::INTAKING:
+ game_piece_ = vision::Class::CONE_UP;
state_ = EndEffectorState::LOADED;
break;
case EndEffectorState::LOADED:
@@ -124,6 +125,12 @@
// Finished spitting
state_ = EndEffectorState::IDLE;
game_piece_ = vision::Class::NONE;
+ } else if (roller_goal == RollerGoal::INTAKE_CONE_UP ||
+ roller_goal == RollerGoal::INTAKE_CONE_DOWN ||
+ roller_goal == RollerGoal::INTAKE_CUBE ||
+ roller_goal == RollerGoal::INTAKE_LAST) {
+ state_ = EndEffectorState::INTAKING;
+ timer_ = timestamp;
}
break;
diff --git a/y2023/joystick_reader.cc b/y2023/joystick_reader.cc
index 8d04056..efd7537 100644
--- a/y2023/joystick_reader.cc
+++ b/y2023/joystick_reader.cc
@@ -382,6 +382,8 @@
GamePiece current_game_piece_ = GamePiece::CONE_UP;
+ bool has_scored_ = false;
+
void HandleTeleop(
const ::frc971::input::driver_station::Data &data) override {
superstructure_status_fetcher_.Fetch();
@@ -445,6 +447,7 @@
// Ok, no active setpoint. Search for the right one.
if (current_setpoint_ == nullptr) {
+ has_scored_ = false;
const Side current_side =
data.IsPressed(kBack) ? Side::BACK : Side::FRONT;
// Search for the active setpoint.
@@ -485,13 +488,16 @@
// spit.
if (std::abs(score_wrist_goal.value() -
superstructure_status_fetcher_->wrist()->goal_position()) <
- 0.1) {
+ 0.1 ||
+ has_scored_) {
if (place_index.has_value()) {
arm_goal_position_ = place_index.value();
- if (arm_goal_position_ ==
- superstructure_status_fetcher_->arm()->current_node() &&
- superstructure_status_fetcher_->arm()->path_distance_to_go() <
- 0.01) {
+ if ((arm_goal_position_ ==
+ superstructure_status_fetcher_->arm()->current_node() &&
+ superstructure_status_fetcher_->arm()->path_distance_to_go() <
+ 0.01) ||
+ has_scored_) {
+ has_scored_ = true;
roller_goal = RollerGoal::SPIT;
}
} else {
diff --git a/y2023/joystick_republish.cc b/y2023/joystick_republish.cc
new file mode 100644
index 0000000..9542001
--- /dev/null
+++ b/y2023/joystick_republish.cc
@@ -0,0 +1,34 @@
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#include "aos/configuration.h"
+#include "aos/init.h"
+#include "aos/events/shm_event_loop.h"
+#include "aos/flatbuffer_merge.h"
+#include "aos/init.h"
+#include "frc971/input/joystick_state_generated.h"
+#include "glog/logging.h"
+
+DEFINE_string(config, "aos_config.json", "Config file to use.");
+
+int main(int argc, char *argv[]) {
+ aos::InitGoogle(&argc, &argv);
+
+ aos::FlatbufferDetachedBuffer<aos::Configuration> config =
+ aos::configuration::ReadConfig(FLAGS_config);
+ aos::ShmEventLoop event_loop(&config.message());
+
+ aos::Sender<aos::JoystickState> sender(
+ event_loop.MakeSender<aos::JoystickState>("/imu/aos"));
+
+ event_loop.MakeWatcher(
+ "/roborio/aos", [&](const aos::JoystickState &joystick_state) {
+ auto builder = sender.MakeBuilder();
+ flatbuffers::Offset<aos::JoystickState> state_fbs =
+ aos::CopyFlatBuffer(&joystick_state, builder.fbb());
+ builder.CheckOk(builder.Send(state_fbs));
+ });
+
+ event_loop.Run();
+ return 0;
+}
diff --git a/y2023/localizer/localizer.cc b/y2023/localizer/localizer.cc
index d0daa7b..32f5604 100644
--- a/y2023/localizer/localizer.cc
+++ b/y2023/localizer/localizer.cc
@@ -294,6 +294,12 @@
builder.add_implied_robot_y(Z(Corrector::kY));
builder.add_implied_robot_theta(Z(Corrector::kTheta));
+ Eigen::AngleAxisd rvec_camera_target(
+ Eigen::Affine3d(H_camera_target).rotation());
+ // Use y angle (around vertical axis) to compute skew
+ double skew = rvec_camera_target.axis().y() * rvec_camera_target.angle();
+ builder.add_skew(skew);
+
double distance_to_target =
Eigen::Affine3d(H_camera_target).translation().norm();
diff --git a/y2023/localizer/visualization.fbs b/y2023/localizer/visualization.fbs
index 9c9c648..8fa03f0 100644
--- a/y2023/localizer/visualization.fbs
+++ b/y2023/localizer/visualization.fbs
@@ -25,6 +25,8 @@
correction_x: double (id: 12);
correction_y: double (id: 13);
correction_theta: double (id: 14);
+ // The angle between the camera axis and target normal.
+ skew:double (id: 15);
}
table Visualization {
diff --git a/y2023/vision/camera_reader.cc b/y2023/vision/camera_reader.cc
index 7de3eb5..b526086 100644
--- a/y2023/vision/camera_reader.cc
+++ b/y2023/vision/camera_reader.cc
@@ -12,6 +12,7 @@
DEFINE_string(config, "aos_config.json", "Path to the config file to use.");
DEFINE_bool(lowlight_camera, true, "Switch to use imx462 image sensor.");
+DEFINE_int32(gain, 200, "analogue_gain");
DEFINE_double(red, 1.252, "Red gain");
DEFINE_double(green, 1, "Green gain");
@@ -117,7 +118,7 @@
rkisp1_selfpath->device(),
camera->device());
if (FLAGS_lowlight_camera) {
- v4l2_reader_selfpath.SetGainExt(100);
+ v4l2_reader_selfpath.SetGainExt(FLAGS_gain);
v4l2_reader_selfpath.SetVerticalBlanking(1000);
v4l2_reader_selfpath.SetExposure(FLAGS_exposure);
} else {
diff --git a/y2023/vision/image_logger.cc b/y2023/vision/image_logger.cc
index b87cec0..d90f9c2 100644
--- a/y2023/vision/image_logger.cc
+++ b/y2023/vision/image_logger.cc
@@ -62,7 +62,7 @@
});
event_loop.MakeWatcher(
- "/roborio/aos", [&](const aos::JoystickState &joystick_state) {
+ "/imu/aos", [&](const aos::JoystickState &joystick_state) {
const auto timestamp = event_loop.context().monotonic_event_time;
// Store the last time we got disabled
if (enabled && !joystick_state.enabled()) {
diff --git a/y2023/y2023_imu.json b/y2023/y2023_imu.json
index e802795..416552b 100644
--- a/y2023/y2023_imu.json
+++ b/y2023/y2023_imu.json
@@ -2,6 +2,108 @@
"channels": [
{
"name": "/imu/aos",
+ "type": "aos.JoystickState",
+ "source_node": "imu",
+ "frequency": 100,
+ "logger": "LOCAL_AND_REMOTE_LOGGER",
+ "logger_nodes": [
+ "imu"
+ ],
+ "destination_nodes": [
+ {
+ "name": "logger",
+ "priority": 5,
+ "time_to_live": 50000000,
+ "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+ "timestamp_logger_nodes": [
+ "imu"
+ ]
+ },
+ {
+ "name": "pi1",
+ "priority": 5,
+ "time_to_live": 50000000,
+ "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+ "timestamp_logger_nodes": [
+ "imu"
+ ]
+ },
+ {
+ "name": "pi2",
+ "priority": 5,
+ "time_to_live": 50000000,
+ "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+ "timestamp_logger_nodes": [
+ "imu"
+ ]
+ },
+ {
+ "name": "pi3",
+ "priority": 5,
+ "time_to_live": 50000000,
+ "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+ "timestamp_logger_nodes": [
+ "imu"
+ ]
+ },
+ {
+ "name": "pi4",
+ "priority": 5,
+ "time_to_live": 50000000,
+ "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
+ "timestamp_logger_nodes": [
+ "imu"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "/imu/aos/remote_timestamps/logger/imu/aos/aos-JoystickState",
+ "type": "aos.message_bridge.RemoteMessage",
+ "source_node": "imu",
+ "logger": "NOT_LOGGED",
+ "frequency": 300,
+ "num_senders": 2,
+ "max_size": 200
+ },
+ {
+ "name": "/imu/aos/remote_timestamps/pi1/imu/aos/aos-JoystickState",
+ "type": "aos.message_bridge.RemoteMessage",
+ "source_node": "imu",
+ "logger": "NOT_LOGGED",
+ "frequency": 300,
+ "num_senders": 2,
+ "max_size": 200
+ },
+ {
+ "name": "/imu/aos/remote_timestamps/pi2/imu/aos/aos-JoystickState",
+ "type": "aos.message_bridge.RemoteMessage",
+ "source_node": "imu",
+ "logger": "NOT_LOGGED",
+ "frequency": 300,
+ "num_senders": 2,
+ "max_size": 200
+ },
+ {
+ "name": "/imu/aos/remote_timestamps/pi3/imu/aos/aos-JoystickState",
+ "type": "aos.message_bridge.RemoteMessage",
+ "source_node": "imu",
+ "logger": "NOT_LOGGED",
+ "frequency": 300,
+ "num_senders": 2,
+ "max_size": 200
+ },
+ {
+ "name": "/imu/aos/remote_timestamps/pi4/imu/aos/aos-JoystickState",
+ "type": "aos.message_bridge.RemoteMessage",
+ "source_node": "imu",
+ "logger": "NOT_LOGGED",
+ "frequency": 300,
+ "num_senders": 2,
+ "max_size": 200
+ },
+ {
+ "name": "/imu/aos",
"type": "aos.timing.Report",
"source_node": "imu",
"frequency": 50,
@@ -373,6 +475,14 @@
]
},
{
+ "name": "joystick_republish",
+ "executable_name": "joystick_republish",
+ "user": "pi",
+ "nodes": [
+ "imu"
+ ]
+ },
+ {
"name": "message_bridge_server",
"executable_name": "message_bridge_server",
"user": "pi",
@@ -383,7 +493,12 @@
{
"name": "localizer_logger",
"executable_name": "logger_main",
- "args": ["--logging_folder", "", "--snappy_compress"],
+ "args": [
+ "--logging_folder",
+ "",
+ "--snappy_compress",
+ "--rotate_every", "30.0"
+ ],
"user": "pi",
"nodes": [
"imu"
@@ -392,6 +507,10 @@
{
"name": "web_proxy",
"executable_name": "web_proxy_main",
+ "args": [
+ "--min_ice_port=5800",
+ "--max_ice_port=5810"
+ ],
"user": "pi",
"nodes": [
"imu"
@@ -450,6 +569,18 @@
},
{
"name": "roborio"
+ },
+ {
+ "name": "pi1"
+ },
+ {
+ "name": "pi2"
+ },
+ {
+ "name": "pi3"
+ },
+ {
+ "name": "pi4"
}
]
}
diff --git a/y2023/y2023_pi_template.json b/y2023/y2023_pi_template.json
index 113e48f..8d9fdaa 100644
--- a/y2023/y2023_pi_template.json
+++ b/y2023/y2023_pi_template.json
@@ -377,6 +377,10 @@
"name": "web_proxy",
"executable_name": "web_proxy_main",
"user": "pi",
+ "args": [
+ "--min_ice_port=5800",
+ "--max_ice_port=5810"
+ ],
"nodes": [
"pi{{ NUM }}"
]
@@ -397,7 +401,7 @@
"--logging_folder",
"",
"--rotate_every",
- "60.0",
+ "30.0",
"--direct",
"--flush_size=4194304"
],
diff --git a/y2023/y2023_roborio.json b/y2023/y2023_roborio.json
index f3697ac..3114ac9 100644
--- a/y2023/y2023_roborio.json
+++ b/y2023/y2023_roborio.json
@@ -6,7 +6,7 @@
"source_node": "roborio",
"frequency": 100,
"logger": "LOCAL_AND_REMOTE_LOGGER",
- "logger_nodes" : [
+ "logger_nodes": [
"imu",
"logger"
],
@@ -19,51 +19,6 @@
"timestamp_logger_nodes": [
"roborio"
]
- },
- {
- "name": "logger",
- "priority": 5,
- "time_to_live": 50000000,
- "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
- "timestamp_logger_nodes": [
- "roborio"
- ]
- },
- {
- "name": "pi1",
- "priority": 5,
- "time_to_live": 50000000,
- "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
- "timestamp_logger_nodes": [
- "roborio"
- ]
- },
- {
- "name": "pi2",
- "priority": 5,
- "time_to_live": 50000000,
- "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
- "timestamp_logger_nodes": [
- "roborio"
- ]
- },
- {
- "name": "pi3",
- "priority": 5,
- "time_to_live": 50000000,
- "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
- "timestamp_logger_nodes": [
- "roborio"
- ]
- },
- {
- "name": "pi4",
- "priority": 5,
- "time_to_live": 50000000,
- "timestamp_logger": "LOCAL_AND_REMOTE_LOGGER",
- "timestamp_logger_nodes": [
- "roborio"
- ]
}
]
},
@@ -77,51 +32,6 @@
"max_size": 200
},
{
- "name": "/roborio/aos/remote_timestamps/logger/roborio/aos/aos-JoystickState",
- "type": "aos.message_bridge.RemoteMessage",
- "source_node": "roborio",
- "logger": "NOT_LOGGED",
- "frequency": 300,
- "num_senders": 2,
- "max_size": 200
- },
- {
- "name": "/roborio/aos/remote_timestamps/pi1/roborio/aos/aos-JoystickState",
- "type": "aos.message_bridge.RemoteMessage",
- "source_node": "roborio",
- "logger": "NOT_LOGGED",
- "frequency": 300,
- "num_senders": 2,
- "max_size": 200
- },
- {
- "name": "/roborio/aos/remote_timestamps/pi2/roborio/aos/aos-JoystickState",
- "type": "aos.message_bridge.RemoteMessage",
- "source_node": "roborio",
- "logger": "NOT_LOGGED",
- "frequency": 300,
- "num_senders": 2,
- "max_size": 200
- },
- {
- "name": "/roborio/aos/remote_timestamps/pi3/roborio/aos/aos-JoystickState",
- "type": "aos.message_bridge.RemoteMessage",
- "source_node": "roborio",
- "logger": "NOT_LOGGED",
- "frequency": 300,
- "num_senders": 2,
- "max_size": 200
- },
- {
- "name": "/roborio/aos/remote_timestamps/pi4/roborio/aos/aos-JoystickState",
- "type": "aos.message_bridge.RemoteMessage",
- "source_node": "roborio",
- "logger": "NOT_LOGGED",
- "frequency": 300,
- "num_senders": 2,
- "max_size": 200
- },
- {
"name": "/roborio/aos",
"type": "aos.RobotState",
"source_node": "roborio",
@@ -608,7 +518,10 @@
{
"name": "roborio_web_proxy",
"executable_name": "web_proxy_main",
- "args": ["--min_ice_port=5800", "--max_ice_port=5810"],
+ "args": [
+ "--min_ice_port=5800",
+ "--max_ice_port=5810"
+ ],
"nodes": [
"roborio"
]
@@ -616,7 +529,9 @@
{
"name": "roborio_message_bridge_client",
"executable_name": "message_bridge_client",
- "args": ["--rt_priority=16"],
+ "args": [
+ "--rt_priority=16"
+ ],
"nodes": [
"roborio"
]
@@ -624,7 +539,9 @@
{
"name": "roborio_message_bridge_server",
"executable_name": "message_bridge_server",
- "args": ["--rt_priority=16"],
+ "args": [
+ "--rt_priority=16"
+ ],
"nodes": [
"roborio"
]
@@ -632,7 +549,11 @@
{
"name": "logger",
"executable_name": "logger_main",
- "args": ["--snappy_compress", "--logging_folder=/home/admin/logs/"],
+ "args": [
+ "--snappy_compress",
+ "--logging_folder=/home/admin/logs/",
+ "--rotate_every", "30.0"
+ ],
"nodes": [
"roborio"
]