blob: 2095529e89fc7c5d381fd336bb242da73c8c2ae3 [file] [log] [blame]
#include "aos/events/logging/logger.h"
#include "aos/events/event_loop.h"
#include "aos/events/message_counter.h"
#include "aos/events/ping_lib.h"
#include "aos/events/pong_lib.h"
#include "aos/events/simulated_event_loop.h"
#include "aos/util/file.h"
#include "glog/logging.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace aos {
namespace logger {
namespace testing {
namespace chrono = std::chrono;
using aos::testing::MessageCounter;
class LoggerTest : public ::testing::Test {
public:
LoggerTest()
: config_(
aos::configuration::ReadConfig("aos/events/pingpong_config.json")),
event_loop_factory_(&config_.message()),
ping_event_loop_(event_loop_factory_.MakeEventLoop("ping")),
ping_(ping_event_loop_.get()),
pong_event_loop_(event_loop_factory_.MakeEventLoop("pong")),
pong_(pong_event_loop_.get()) {}
// Config and factory.
aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
SimulatedEventLoopFactory event_loop_factory_;
// Event loop and app for Ping
std::unique_ptr<EventLoop> ping_event_loop_;
Ping ping_;
// Event loop and app for Pong
std::unique_ptr<EventLoop> pong_event_loop_;
Pong pong_;
};
using LoggerDeathTest = LoggerTest;
// Tests that we can startup at all. This confirms that the channels are all in
// the config.
TEST_F(LoggerTest, Starts) {
const ::std::string tmpdir(getenv("TEST_TMPDIR"));
const ::std::string base_name = tmpdir + "/logfile";
const ::std::string logfile = base_name + ".part0.bfbs";
// Remove it.
unlink(logfile.c_str());
LOG(INFO) << "Logging data to " << logfile;
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop("logger");
event_loop_factory_.RunFor(chrono::milliseconds(95));
Logger logger(logger_event_loop.get());
logger.set_polling_period(std::chrono::milliseconds(100));
logger.StartLoggingLocalNamerOnRun(base_name);
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
// Even though it doesn't make any difference here, exercise the logic for
// passing in a separate config.
LogReader reader(logfile, &config_.message());
// Confirm that we can remap logged channels to point to new buses.
reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
// This sends out the fetched messages and advances time to the start of the
// log file.
reader.Register();
EXPECT_THAT(reader.Nodes(), ::testing::ElementsAre(nullptr));
std::unique_ptr<EventLoop> test_event_loop =
reader.event_loop_factory()->MakeEventLoop("log_reader");
int ping_count = 10;
int pong_count = 10;
// Confirm that the ping value matches in the remapped channel location.
test_event_loop->MakeWatcher("/original/test",
[&ping_count](const examples::Ping &ping) {
EXPECT_EQ(ping.value(), ping_count + 1);
++ping_count;
});
// Confirm that the ping and pong counts both match, and the value also
// matches.
test_event_loop->MakeWatcher(
"/test", [&pong_count, &ping_count](const examples::Pong &pong) {
EXPECT_EQ(pong.value(), pong_count + 1);
++pong_count;
EXPECT_EQ(ping_count, pong_count);
});
reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
EXPECT_EQ(ping_count, 2010);
}
// Tests calling StartLogging twice.
TEST_F(LoggerDeathTest, ExtraStart) {
const ::std::string tmpdir(getenv("TEST_TMPDIR"));
const ::std::string base_name1 = tmpdir + "/logfile1";
const ::std::string logfile1 = base_name1 + ".part0.bfbs";
const ::std::string base_name2 = tmpdir + "/logfile2";
const ::std::string logfile2 = base_name2 + ".part0.bfbs";
unlink(logfile1.c_str());
unlink(logfile2.c_str());
LOG(INFO) << "Logging data to " << logfile1 << " then " << logfile2;
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop("logger");
event_loop_factory_.RunFor(chrono::milliseconds(95));
Logger logger(logger_event_loop.get());
logger.set_polling_period(std::chrono::milliseconds(100));
logger_event_loop->OnRun(
[base_name1, base_name2, &logger_event_loop, &logger]() {
logger.StartLogging(std::make_unique<LocalLogNamer>(
base_name1, logger_event_loop->node()));
EXPECT_DEATH(logger.StartLogging(std::make_unique<LocalLogNamer>(
base_name2, logger_event_loop->node())),
"Already logging");
});
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
}
// Tests calling StopLogging twice.
TEST_F(LoggerDeathTest, ExtraStop) {
const ::std::string tmpdir(getenv("TEST_TMPDIR"));
const ::std::string base_name = tmpdir + "/logfile";
const ::std::string logfile = base_name + ".part0.bfbs";
// Remove it.
unlink(logfile.c_str());
LOG(INFO) << "Logging data to " << logfile;
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop("logger");
event_loop_factory_.RunFor(chrono::milliseconds(95));
Logger logger(logger_event_loop.get());
logger.set_polling_period(std::chrono::milliseconds(100));
logger_event_loop->OnRun([base_name, &logger_event_loop, &logger]() {
logger.StartLogging(std::make_unique<LocalLogNamer>(
base_name, logger_event_loop->node()));
logger.StopLogging(aos::monotonic_clock::min_time);
EXPECT_DEATH(logger.StopLogging(aos::monotonic_clock::min_time),
"Not logging right now");
});
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
}
// Tests that we can startup twice.
TEST_F(LoggerTest, StartsTwice) {
const ::std::string tmpdir(getenv("TEST_TMPDIR"));
const ::std::string base_name1 = tmpdir + "/logfile1";
const ::std::string logfile1 = base_name1 + ".part0.bfbs";
const ::std::string base_name2 = tmpdir + "/logfile2";
const ::std::string logfile2 = base_name2 + ".part0.bfbs";
unlink(logfile1.c_str());
unlink(logfile2.c_str());
LOG(INFO) << "Logging data to " << logfile1 << " then " << logfile2;
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop("logger");
event_loop_factory_.RunFor(chrono::milliseconds(95));
Logger logger(logger_event_loop.get());
logger.set_polling_period(std::chrono::milliseconds(100));
logger.StartLogging(
std::make_unique<LocalLogNamer>(base_name1, logger_event_loop->node()));
event_loop_factory_.RunFor(chrono::milliseconds(10000));
logger.StopLogging(logger_event_loop->monotonic_now());
event_loop_factory_.RunFor(chrono::milliseconds(10000));
logger.StartLogging(
std::make_unique<LocalLogNamer>(base_name2, logger_event_loop->node()));
event_loop_factory_.RunFor(chrono::milliseconds(10000));
}
for (const auto &logfile :
{std::make_tuple(logfile1, 10), std::make_tuple(logfile2, 2010)}) {
SCOPED_TRACE(std::get<0>(logfile));
LogReader reader(std::get<0>(logfile));
reader.Register();
EXPECT_THAT(reader.Nodes(), ::testing::ElementsAre(nullptr));
std::unique_ptr<EventLoop> test_event_loop =
reader.event_loop_factory()->MakeEventLoop("log_reader");
int ping_count = std::get<1>(logfile);
int pong_count = std::get<1>(logfile);
// Confirm that the ping and pong counts both match, and the value also
// matches.
test_event_loop->MakeWatcher("/test",
[&ping_count](const examples::Ping &ping) {
EXPECT_EQ(ping.value(), ping_count + 1);
++ping_count;
});
test_event_loop->MakeWatcher(
"/test", [&pong_count, &ping_count](const examples::Pong &pong) {
EXPECT_EQ(pong.value(), pong_count + 1);
++pong_count;
EXPECT_EQ(ping_count, pong_count);
});
reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
EXPECT_EQ(ping_count, std::get<1>(logfile) + 1000);
}
}
// Tests that we can read and write rotated log files.
TEST_F(LoggerTest, RotatedLogFile) {
const ::std::string tmpdir(getenv("TEST_TMPDIR"));
const ::std::string base_name = tmpdir + "/logfile";
const ::std::string logfile0 = base_name + ".part0.bfbs";
const ::std::string logfile1 = base_name + ".part1.bfbs";
// Remove it.
unlink(logfile0.c_str());
unlink(logfile1.c_str());
LOG(INFO) << "Logging data to " << logfile0 << " and " << logfile1;
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop("logger");
event_loop_factory_.RunFor(chrono::milliseconds(95));
Logger logger(logger_event_loop.get());
logger.set_polling_period(std::chrono::milliseconds(100));
logger.StartLoggingLocalNamerOnRun(base_name);
event_loop_factory_.RunFor(chrono::milliseconds(10000));
logger.Rotate();
event_loop_factory_.RunFor(chrono::milliseconds(10000));
}
{
// Confirm that the UUIDs match for both the parts and the logger, and the
// parts_index increments.
std::vector<FlatbufferVector<LogFileHeader>> log_header;
for (std::string_view f : {logfile0, logfile1}) {
log_header.emplace_back(ReadHeader(f));
}
EXPECT_EQ(log_header[0].message().logger_uuid()->string_view(),
log_header[1].message().logger_uuid()->string_view());
EXPECT_EQ(log_header[0].message().parts_uuid()->string_view(),
log_header[1].message().parts_uuid()->string_view());
EXPECT_EQ(log_header[0].message().parts_index(), 0);
EXPECT_EQ(log_header[1].message().parts_index(), 1);
}
// Even though it doesn't make any difference here, exercise the logic for
// passing in a separate config.
LogReader reader(std::vector<std::string>{logfile0, logfile1},
&config_.message());
// Confirm that we can remap logged channels to point to new buses.
reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
// This sends out the fetched messages and advances time to the start of the
// log file.
reader.Register();
EXPECT_THAT(reader.Nodes(), ::testing::ElementsAre(nullptr));
std::unique_ptr<EventLoop> test_event_loop =
reader.event_loop_factory()->MakeEventLoop("log_reader");
int ping_count = 10;
int pong_count = 10;
// Confirm that the ping value matches in the remapped channel location.
test_event_loop->MakeWatcher("/original/test",
[&ping_count](const examples::Ping &ping) {
EXPECT_EQ(ping.value(), ping_count + 1);
++ping_count;
});
// Confirm that the ping and pong counts both match, and the value also
// matches.
test_event_loop->MakeWatcher(
"/test", [&pong_count, &ping_count](const examples::Pong &pong) {
EXPECT_EQ(pong.value(), pong_count + 1);
++pong_count;
EXPECT_EQ(ping_count, pong_count);
});
reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
EXPECT_EQ(ping_count, 2010);
}
// Tests that a large number of messages per second doesn't overwhelm writev.
TEST_F(LoggerTest, ManyMessages) {
const ::std::string tmpdir(getenv("TEST_TMPDIR"));
const ::std::string base_name = tmpdir + "/logfile";
const ::std::string logfile = base_name + ".part0.bfbs";
// Remove the log file.
unlink(logfile.c_str());
LOG(INFO) << "Logging data to " << logfile;
ping_.set_quiet(true);
{
std::unique_ptr<EventLoop> logger_event_loop =
event_loop_factory_.MakeEventLoop("logger");
std::unique_ptr<EventLoop> ping_spammer_event_loop =
event_loop_factory_.MakeEventLoop("ping_spammer");
aos::Sender<examples::Ping> ping_sender =
ping_spammer_event_loop->MakeSender<examples::Ping>("/test");
aos::TimerHandler *timer_handler =
ping_spammer_event_loop->AddTimer([&ping_sender]() {
aos::Sender<examples::Ping>::Builder builder =
ping_sender.MakeBuilder();
examples::Ping::Builder ping_builder =
builder.MakeBuilder<examples::Ping>();
CHECK(builder.Send(ping_builder.Finish()));
});
// 100 ms / 0.05 ms -> 2000 messages. Should be enough to crash it.
ping_spammer_event_loop->OnRun([&ping_spammer_event_loop, timer_handler]() {
timer_handler->Setup(ping_spammer_event_loop->monotonic_now(),
chrono::microseconds(50));
});
Logger logger(logger_event_loop.get());
logger.set_polling_period(std::chrono::milliseconds(100));
logger.StartLoggingLocalNamerOnRun(base_name);
event_loop_factory_.RunFor(chrono::milliseconds(1000));
}
}
class MultinodeLoggerTest : public ::testing::Test {
public:
MultinodeLoggerTest()
: config_(aos::configuration::ReadConfig(
"aos/events/logging/multinode_pingpong_config.json")),
event_loop_factory_(&config_.message()),
pi1_(
configuration::GetNode(event_loop_factory_.configuration(), "pi1")),
pi2_(
configuration::GetNode(event_loop_factory_.configuration(), "pi2")),
tmp_dir_(getenv("TEST_TMPDIR")),
logfile_base_(tmp_dir_ + "/multi_logfile"),
logfiles_(
{logfile_base_ + "_pi1_data.part0.bfbs",
logfile_base_ + "_pi2_data/test/aos.examples.Pong.part0.bfbs",
logfile_base_ + "_pi2_data/test/aos.examples.Pong.part1.bfbs",
logfile_base_ + "_pi2_data.part0.bfbs",
logfile_base_ + "_timestamps/pi1/aos/remote_timestamps/pi2/"
"aos.logger.MessageHeader.part0.bfbs",
logfile_base_ + "_timestamps/pi1/aos/remote_timestamps/pi2/"
"aos.logger.MessageHeader.part1.bfbs",
logfile_base_ + "_timestamps/pi2/aos/remote_timestamps/pi1/"
"aos.logger.MessageHeader.part0.bfbs",
logfile_base_ + "_timestamps/pi2/aos/remote_timestamps/pi1/"
"aos.logger.MessageHeader.part1.bfbs",
logfile_base_ +
"_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part0.bfbs",
logfile_base_ +
"_pi1_data/pi1/aos/aos.message_bridge.Timestamp.part1.bfbs",
logfile_base_ +
"_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0.bfbs",
logfile_base_ +
"_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part1.bfbs"}),
structured_logfiles_{
std::vector<std::string>{logfiles_[0]},
std::vector<std::string>{logfiles_[1], logfiles_[2]},
std::vector<std::string>{logfiles_[3]},
std::vector<std::string>{logfiles_[4], logfiles_[5]},
std::vector<std::string>{logfiles_[6], logfiles_[7]},
std::vector<std::string>{logfiles_[8], logfiles_[9]},
std::vector<std::string>{logfiles_[10], logfiles_[11]}},
ping_event_loop_(event_loop_factory_.MakeEventLoop("ping", pi1_)),
ping_(ping_event_loop_.get()),
pong_event_loop_(event_loop_factory_.MakeEventLoop("pong", pi2_)),
pong_(pong_event_loop_.get()) {
// Go through and remove the logfiles if they already exist.
for (const auto file : logfiles_) {
unlink(file.c_str());
}
LOG(INFO) << "Logging data to " << logfiles_[0] << ", " << logfiles_[1]
<< " and " << logfiles_[2];
}
struct LoggerState {
std::unique_ptr<EventLoop> event_loop;
std::unique_ptr<Logger> logger;
};
LoggerState MakeLogger(const Node *node) {
return {event_loop_factory_.MakeEventLoop("logger", node), {}};
}
void StartLogger(LoggerState *logger) {
logger->logger = std::make_unique<Logger>(logger->event_loop.get());
logger->logger->set_polling_period(std::chrono::milliseconds(100));
logger->event_loop->OnRun([this, logger]() {
logger->logger->StartLogging(std::make_unique<MultiNodeLogNamer>(
logfile_base_, logger->event_loop->configuration(),
logger->event_loop->node()));
});
}
// Config and factory.
aos::FlatbufferDetachedBuffer<aos::Configuration> config_;
SimulatedEventLoopFactory event_loop_factory_;
const Node *pi1_;
const Node *pi2_;
std::string tmp_dir_;
std::string logfile_base_;
std::vector<std::string> logfiles_;
std::vector<std::vector<std::string>> structured_logfiles_;
std::unique_ptr<EventLoop> ping_event_loop_;
Ping ping_;
std::unique_ptr<EventLoop> pong_event_loop_;
Pong pong_;
};
// Counts the number of messages on a channel. Returns (channel name, channel
// type, count) for every message matching matcher()
std::vector<std::tuple<std::string, std::string, int>> CountChannelsMatching(
std::string_view filename,
std::function<bool(const MessageHeader *)> matcher) {
MessageReader message_reader(filename);
std::vector<int> counts(
message_reader.log_file_header()->configuration()->channels()->size(), 0);
while (true) {
std::optional<FlatbufferVector<MessageHeader>> msg =
message_reader.ReadMessage();
if (!msg) {
break;
}
if (matcher(&msg.value().message())) {
counts[msg.value().message().channel_index()]++;
}
}
std::vector<std::tuple<std::string, std::string, int>> result;
int channel = 0;
for (size_t i = 0; i < counts.size(); ++i) {
if (counts[i] != 0) {
const Channel *channel =
message_reader.log_file_header()->configuration()->channels()->Get(i);
result.push_back(std::make_tuple(channel->name()->str(),
channel->type()->str(), counts[i]));
}
++channel;
}
return result;
}
// Counts the number of messages (channel, count) for all data messages.
std::vector<std::tuple<std::string, std::string, int>> CountChannelsData(
std::string_view filename) {
return CountChannelsMatching(filename, [](const MessageHeader *msg) {
if (msg->has_data()) {
CHECK(!msg->has_monotonic_remote_time());
CHECK(!msg->has_realtime_remote_time());
CHECK(!msg->has_remote_queue_index());
return true;
}
return false;
});
}
// Counts the number of messages (channel, count) for all timestamp messages.
std::vector<std::tuple<std::string, std::string, int>> CountChannelsTimestamp(
std::string_view filename) {
return CountChannelsMatching(filename, [](const MessageHeader *msg) {
if (!msg->has_data()) {
CHECK(msg->has_monotonic_remote_time());
CHECK(msg->has_realtime_remote_time());
CHECK(msg->has_remote_queue_index());
return true;
}
return false;
});
}
// Tests that we can write and read simple multi-node log files.
TEST_F(MultinodeLoggerTest, SimpleMultiNode) {
{
LoggerState pi1_logger = MakeLogger(pi1_);
LoggerState pi2_logger = MakeLogger(pi2_);
event_loop_factory_.RunFor(chrono::milliseconds(95));
StartLogger(&pi1_logger);
StartLogger(&pi2_logger);
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
{
std::set<std::string> logfile_uuids;
std::set<std::string> parts_uuids;
// Confirm that we have the expected number of UUIDs for both the logfile
// UUIDs and parts UUIDs.
std::vector<FlatbufferVector<LogFileHeader>> log_header;
for (std::string_view f : logfiles_) {
log_header.emplace_back(ReadHeader(f));
logfile_uuids.insert(log_header.back().message().logger_uuid()->str());
parts_uuids.insert(log_header.back().message().parts_uuid()->str());
}
EXPECT_EQ(logfile_uuids.size(), 2u);
EXPECT_EQ(parts_uuids.size(), 7u);
// And confirm everything is on the correct node.
EXPECT_EQ(log_header[0].message().node()->name()->string_view(), "pi1");
EXPECT_EQ(log_header[1].message().node()->name()->string_view(), "pi2");
EXPECT_EQ(log_header[2].message().node()->name()->string_view(), "pi2");
EXPECT_EQ(log_header[3].message().node()->name()->string_view(), "pi2");
EXPECT_EQ(log_header[4].message().node()->name()->string_view(), "pi2");
EXPECT_EQ(log_header[5].message().node()->name()->string_view(), "pi2");
EXPECT_EQ(log_header[6].message().node()->name()->string_view(), "pi1");
EXPECT_EQ(log_header[7].message().node()->name()->string_view(), "pi1");
EXPECT_EQ(log_header[8].message().node()->name()->string_view(), "pi1");
EXPECT_EQ(log_header[9].message().node()->name()->string_view(), "pi1");
EXPECT_EQ(log_header[10].message().node()->name()->string_view(), "pi2");
EXPECT_EQ(log_header[11].message().node()->name()->string_view(), "pi2");
// And the parts index matches.
EXPECT_EQ(log_header[0].message().parts_index(), 0);
EXPECT_EQ(log_header[1].message().parts_index(), 0);
EXPECT_EQ(log_header[2].message().parts_index(), 1);
EXPECT_EQ(log_header[3].message().parts_index(), 0);
EXPECT_EQ(log_header[4].message().parts_index(), 0);
EXPECT_EQ(log_header[5].message().parts_index(), 1);
EXPECT_EQ(log_header[6].message().parts_index(), 0);
EXPECT_EQ(log_header[7].message().parts_index(), 1);
EXPECT_EQ(log_header[8].message().parts_index(), 0);
EXPECT_EQ(log_header[9].message().parts_index(), 1);
EXPECT_EQ(log_header[10].message().parts_index(), 0);
EXPECT_EQ(log_header[11].message().parts_index(), 1);
}
{
using ::testing::UnorderedElementsAre;
// Timing reports, pings
EXPECT_THAT(
CountChannelsData(logfiles_[0]),
UnorderedElementsAre(
std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 200),
std::make_tuple("/pi1/aos", "aos.timing.Report", 40),
std::make_tuple("/test", "aos.examples.Ping", 2001)));
// Timestamps for pong
EXPECT_THAT(
CountChannelsTimestamp(logfiles_[0]),
UnorderedElementsAre(
std::make_tuple("/test", "aos.examples.Pong", 2001),
std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200)));
// Pong data.
EXPECT_THAT(CountChannelsData(logfiles_[1]),
UnorderedElementsAre(
std::make_tuple("/test", "aos.examples.Pong", 101)));
EXPECT_THAT(CountChannelsData(logfiles_[2]),
UnorderedElementsAre(
std::make_tuple("/test", "aos.examples.Pong", 1900)));
// No timestamps
EXPECT_THAT(CountChannelsTimestamp(logfiles_[1]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsTimestamp(logfiles_[2]), UnorderedElementsAre());
// Timing reports and pongs.
EXPECT_THAT(
CountChannelsData(logfiles_[3]),
UnorderedElementsAre(
std::make_tuple("/pi2/aos", "aos.message_bridge.Timestamp", 200),
std::make_tuple("/pi2/aos", "aos.timing.Report", 40),
std::make_tuple("/test", "aos.examples.Pong", 2001)));
// And ping timestamps.
EXPECT_THAT(
CountChannelsTimestamp(logfiles_[3]),
UnorderedElementsAre(
std::make_tuple("/test", "aos.examples.Ping", 2001),
std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 200)));
// Timestamps from pi2 on pi1, and the other way.
EXPECT_THAT(CountChannelsData(logfiles_[4]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsData(logfiles_[5]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsData(logfiles_[6]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsData(logfiles_[7]), UnorderedElementsAre());
EXPECT_THAT(
CountChannelsTimestamp(logfiles_[4]),
UnorderedElementsAre(
std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 10),
std::make_tuple("/test", "aos.examples.Ping", 101)));
EXPECT_THAT(
CountChannelsTimestamp(logfiles_[5]),
UnorderedElementsAre(
std::make_tuple("/pi1/aos", "aos.message_bridge.Timestamp", 190),
std::make_tuple("/test", "aos.examples.Ping", 1900)));
EXPECT_THAT(CountChannelsTimestamp(logfiles_[6]),
UnorderedElementsAre(std::make_tuple(
"/pi2/aos", "aos.message_bridge.Timestamp", 10)));
EXPECT_THAT(CountChannelsTimestamp(logfiles_[7]),
UnorderedElementsAre(std::make_tuple(
"/pi2/aos", "aos.message_bridge.Timestamp", 190)));
// And then test that the remotely logged timestamp data files only have
// timestamps in them.
EXPECT_THAT(CountChannelsTimestamp(logfiles_[8]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsTimestamp(logfiles_[9]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsTimestamp(logfiles_[10]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsTimestamp(logfiles_[11]), UnorderedElementsAre());
EXPECT_THAT(CountChannelsData(logfiles_[8]),
UnorderedElementsAre(std::make_tuple(
"/pi1/aos", "aos.message_bridge.Timestamp", 10)));
EXPECT_THAT(CountChannelsData(logfiles_[9]),
UnorderedElementsAre(std::make_tuple(
"/pi1/aos", "aos.message_bridge.Timestamp", 190)));
EXPECT_THAT(CountChannelsData(logfiles_[10]),
UnorderedElementsAre(std::make_tuple(
"/pi2/aos", "aos.message_bridge.Timestamp", 10)));
EXPECT_THAT(CountChannelsData(logfiles_[11]),
UnorderedElementsAre(std::make_tuple(
"/pi2/aos", "aos.message_bridge.Timestamp", 190)));
}
LogReader reader(structured_logfiles_);
SimulatedEventLoopFactory log_reader_factory(reader.logged_configuration());
log_reader_factory.set_send_delay(chrono::microseconds(0));
// This sends out the fetched messages and advances time to the start of the
// log file.
reader.Register(&log_reader_factory);
const Node *pi1 =
configuration::GetNode(log_reader_factory.configuration(), "pi1");
const Node *pi2 =
configuration::GetNode(log_reader_factory.configuration(), "pi2");
LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
LOG(INFO) << "now pi1 "
<< log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
LOG(INFO) << "now pi2 "
<< log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
EXPECT_THAT(reader.Nodes(), ::testing::ElementsAre(pi1, pi2));
reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
std::unique_ptr<EventLoop> pi1_event_loop =
log_reader_factory.MakeEventLoop("test", pi1);
std::unique_ptr<EventLoop> pi2_event_loop =
log_reader_factory.MakeEventLoop("test", pi2);
int pi1_ping_count = 10;
int pi2_ping_count = 10;
int pi1_pong_count = 10;
int pi2_pong_count = 10;
// Confirm that the ping value matches.
pi1_event_loop->MakeWatcher(
"/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping) << " at "
<< pi1_event_loop->context().monotonic_remote_time << " -> "
<< pi1_event_loop->context().monotonic_event_time;
EXPECT_EQ(ping.value(), pi1_ping_count + 1);
EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
pi1_ping_count * chrono::milliseconds(10) +
monotonic_clock::epoch());
EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
pi1_ping_count * chrono::milliseconds(10) +
realtime_clock::epoch());
EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
pi1_event_loop->context().monotonic_event_time);
EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
pi1_event_loop->context().realtime_event_time);
++pi1_ping_count;
});
pi2_event_loop->MakeWatcher(
"/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping) << " at "
<< pi2_event_loop->context().monotonic_remote_time << " -> "
<< pi2_event_loop->context().monotonic_event_time;
EXPECT_EQ(ping.value(), pi2_ping_count + 1);
EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
pi2_ping_count * chrono::milliseconds(10) +
monotonic_clock::epoch());
EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
pi2_ping_count * chrono::milliseconds(10) +
realtime_clock::epoch());
EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time +
chrono::microseconds(150),
pi2_event_loop->context().monotonic_event_time);
EXPECT_EQ(pi2_event_loop->context().realtime_remote_time +
chrono::microseconds(150),
pi2_event_loop->context().realtime_event_time);
++pi2_ping_count;
});
constexpr ssize_t kQueueIndexOffset = 0;
// Confirm that the ping and pong counts both match, and the value also
// matches.
pi1_event_loop->MakeWatcher(
"/test", [&pi1_event_loop, &pi1_ping_count,
&pi1_pong_count](const examples::Pong &pong) {
VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
<< pi1_event_loop->context().monotonic_remote_time << " -> "
<< pi1_event_loop->context().monotonic_event_time;
EXPECT_EQ(pi1_event_loop->context().remote_queue_index,
pi1_pong_count + kQueueIndexOffset);
EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time,
chrono::microseconds(200) +
pi1_pong_count * chrono::milliseconds(10) +
monotonic_clock::epoch());
EXPECT_EQ(pi1_event_loop->context().realtime_remote_time,
chrono::microseconds(200) +
pi1_pong_count * chrono::milliseconds(10) +
realtime_clock::epoch());
EXPECT_EQ(pi1_event_loop->context().monotonic_remote_time +
chrono::microseconds(150),
pi1_event_loop->context().monotonic_event_time);
EXPECT_EQ(pi1_event_loop->context().realtime_remote_time +
chrono::microseconds(150),
pi1_event_loop->context().realtime_event_time);
EXPECT_EQ(pong.value(), pi1_pong_count + 1);
++pi1_pong_count;
EXPECT_EQ(pi1_ping_count, pi1_pong_count);
});
pi2_event_loop->MakeWatcher(
"/test", [&pi2_event_loop, &pi2_ping_count,
&pi2_pong_count](const examples::Pong &pong) {
VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
<< pi2_event_loop->context().monotonic_remote_time << " -> "
<< pi2_event_loop->context().monotonic_event_time;
EXPECT_EQ(pi2_event_loop->context().remote_queue_index,
pi2_pong_count + kQueueIndexOffset - 9);
EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
chrono::microseconds(200) +
pi2_pong_count * chrono::milliseconds(10) +
monotonic_clock::epoch());
EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
chrono::microseconds(200) +
pi2_pong_count * chrono::milliseconds(10) +
realtime_clock::epoch());
EXPECT_EQ(pi2_event_loop->context().monotonic_remote_time,
pi2_event_loop->context().monotonic_event_time);
EXPECT_EQ(pi2_event_loop->context().realtime_remote_time,
pi2_event_loop->context().realtime_event_time);
EXPECT_EQ(pong.value(), pi2_pong_count + 1);
++pi2_pong_count;
EXPECT_EQ(pi2_ping_count, pi2_pong_count);
});
log_reader_factory.Run();
EXPECT_EQ(pi1_ping_count, 2010);
EXPECT_EQ(pi2_ping_count, 2010);
EXPECT_EQ(pi1_pong_count, 2010);
EXPECT_EQ(pi2_pong_count, 2010);
reader.Deregister();
}
typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
// Test that if we feed the replay with a mismatched node list that we die on
// the LogReader constructor.
TEST_F(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
{
LoggerState pi1_logger = MakeLogger(pi1_);
LoggerState pi2_logger = MakeLogger(pi2_);
event_loop_factory_.RunFor(chrono::milliseconds(95));
StartLogger(&pi1_logger);
StartLogger(&pi2_logger);
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
// Test that, if we add an additional node to the replay config that the
// logger complains about the mismatch in number of nodes.
FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
configuration::MergeWithConfig(&config_.message(), R"({
"nodes": [
{
"name": "extra-node"
}
]
}
)");
EXPECT_DEATH(LogReader(structured_logfiles_, &extra_nodes_config.message()),
"Log file and replay config need to have matching nodes lists.");
}
// Tests that we can read log files where they don't start at the same monotonic
// time.
TEST_F(MultinodeLoggerTest, StaggeredStart) {
{
LoggerState pi1_logger = MakeLogger(pi1_);
LoggerState pi2_logger = MakeLogger(pi2_);
event_loop_factory_.RunFor(chrono::milliseconds(95));
StartLogger(&pi1_logger);
event_loop_factory_.RunFor(chrono::milliseconds(200));
StartLogger(&pi2_logger);
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
LogReader reader(structured_logfiles_);
SimulatedEventLoopFactory log_reader_factory(reader.logged_configuration());
log_reader_factory.set_send_delay(chrono::microseconds(0));
// This sends out the fetched messages and advances time to the start of the
// log file.
reader.Register(&log_reader_factory);
const Node *pi1 =
configuration::GetNode(log_reader_factory.configuration(), "pi1");
const Node *pi2 =
configuration::GetNode(log_reader_factory.configuration(), "pi2");
EXPECT_THAT(reader.Nodes(), ::testing::ElementsAre(pi1, pi2));
reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
std::unique_ptr<EventLoop> pi1_event_loop =
log_reader_factory.MakeEventLoop("test", pi1);
std::unique_ptr<EventLoop> pi2_event_loop =
log_reader_factory.MakeEventLoop("test", pi2);
int pi1_ping_count = 30;
int pi2_ping_count = 30;
int pi1_pong_count = 30;
int pi2_pong_count = 30;
// Confirm that the ping value matches.
pi1_event_loop->MakeWatcher(
"/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
<< pi1_event_loop->context().monotonic_remote_time << " -> "
<< pi1_event_loop->context().monotonic_event_time;
EXPECT_EQ(ping.value(), pi1_ping_count + 1);
++pi1_ping_count;
});
pi2_event_loop->MakeWatcher(
"/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
<< pi2_event_loop->context().monotonic_remote_time << " -> "
<< pi2_event_loop->context().monotonic_event_time;
EXPECT_EQ(ping.value(), pi2_ping_count + 1);
++pi2_ping_count;
});
// Confirm that the ping and pong counts both match, and the value also
// matches.
pi1_event_loop->MakeWatcher(
"/test", [&pi1_event_loop, &pi1_ping_count,
&pi1_pong_count](const examples::Pong &pong) {
VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
<< pi1_event_loop->context().monotonic_remote_time << " -> "
<< pi1_event_loop->context().monotonic_event_time;
EXPECT_EQ(pong.value(), pi1_pong_count + 1);
++pi1_pong_count;
EXPECT_EQ(pi1_ping_count, pi1_pong_count);
});
pi2_event_loop->MakeWatcher(
"/test", [&pi2_event_loop, &pi2_ping_count,
&pi2_pong_count](const examples::Pong &pong) {
VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
<< pi2_event_loop->context().monotonic_remote_time << " -> "
<< pi2_event_loop->context().monotonic_event_time;
EXPECT_EQ(pong.value(), pi2_pong_count + 1);
++pi2_pong_count;
EXPECT_EQ(pi2_ping_count, pi2_pong_count);
});
log_reader_factory.Run();
EXPECT_EQ(pi1_ping_count, 2030);
EXPECT_EQ(pi2_ping_count, 2030);
EXPECT_EQ(pi1_pong_count, 2030);
EXPECT_EQ(pi2_pong_count, 2030);
reader.Deregister();
}
// Tests that we can read log files where the monotonic clocks drift and don't
// match correctly. While we are here, also test that different ending times
// also is readable.
TEST_F(MultinodeLoggerTest, MismatchedClocks) {
{
LoggerState pi2_logger = MakeLogger(pi2_);
NodeEventLoopFactory *pi2 =
event_loop_factory_.GetNodeEventLoopFactory(pi2_);
LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
<< pi2->realtime_now() << " distributed "
<< pi2->ToDistributedClock(pi2->monotonic_now());
const chrono::nanoseconds initial_pi2_offset = -chrono::seconds(1000);
chrono::nanoseconds pi2_offset = initial_pi2_offset;
pi2->SetDistributedOffset(-pi2_offset, 1.0);
LOG(INFO) << "pi2 times: " << pi2->monotonic_now() << " "
<< pi2->realtime_now() << " distributed "
<< pi2->ToDistributedClock(pi2->monotonic_now());
for (int i = 0; i < 95; ++i) {
pi2_offset += chrono::nanoseconds(200);
pi2->SetDistributedOffset(-pi2_offset, 1.0);
event_loop_factory_.RunFor(chrono::milliseconds(1));
}
StartLogger(&pi2_logger);
event_loop_factory_.RunFor(chrono::milliseconds(200));
{
// Run pi1's logger for only part of the time.
LoggerState pi1_logger = MakeLogger(pi1_);
StartLogger(&pi1_logger);
for (int i = 0; i < 20000; ++i) {
pi2_offset += chrono::nanoseconds(200);
pi2->SetDistributedOffset(-pi2_offset, 1.0);
event_loop_factory_.RunFor(chrono::milliseconds(1));
}
EXPECT_GT(pi2_offset - initial_pi2_offset,
event_loop_factory_.send_delay() +
event_loop_factory_.network_delay());
for (int i = 0; i < 40000; ++i) {
pi2_offset -= chrono::nanoseconds(200);
pi2->SetDistributedOffset(-pi2_offset, 1.0);
event_loop_factory_.RunFor(chrono::milliseconds(1));
}
}
// And log a bit more on pi2.
event_loop_factory_.RunFor(chrono::milliseconds(400));
}
LogReader reader(structured_logfiles_);
SimulatedEventLoopFactory log_reader_factory(reader.logged_configuration());
log_reader_factory.set_send_delay(chrono::microseconds(0));
const Node *pi1 =
configuration::GetNode(log_reader_factory.configuration(), "pi1");
const Node *pi2 =
configuration::GetNode(log_reader_factory.configuration(), "pi2");
// This sends out the fetched messages and advances time to the start of the
// log file.
reader.Register(&log_reader_factory);
LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
LOG(INFO) << "now pi1 "
<< log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
LOG(INFO) << "now pi2 "
<< log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
LOG(INFO) << "Done registering (pi1) "
<< log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now()
<< " "
<< log_reader_factory.GetNodeEventLoopFactory(pi1)->realtime_now();
LOG(INFO) << "Done registering (pi2) "
<< log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now()
<< " "
<< log_reader_factory.GetNodeEventLoopFactory(pi2)->realtime_now();
EXPECT_THAT(reader.Nodes(), ::testing::ElementsAre(pi1, pi2));
reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
std::unique_ptr<EventLoop> pi1_event_loop =
log_reader_factory.MakeEventLoop("test", pi1);
std::unique_ptr<EventLoop> pi2_event_loop =
log_reader_factory.MakeEventLoop("test", pi2);
int pi1_ping_count = 30;
int pi2_ping_count = 30;
int pi1_pong_count = 30;
int pi2_pong_count = 30;
// Confirm that the ping value matches.
pi1_event_loop->MakeWatcher(
"/test", [&pi1_ping_count, &pi1_event_loop](const examples::Ping &ping) {
VLOG(1) << "Pi1 ping " << FlatbufferToJson(&ping)
<< pi1_event_loop->context().monotonic_remote_time << " -> "
<< pi1_event_loop->context().monotonic_event_time;
EXPECT_EQ(ping.value(), pi1_ping_count + 1);
++pi1_ping_count;
});
pi2_event_loop->MakeWatcher(
"/test", [&pi2_ping_count, &pi2_event_loop](const examples::Ping &ping) {
VLOG(1) << "Pi2 ping " << FlatbufferToJson(&ping)
<< pi2_event_loop->context().monotonic_remote_time << " -> "
<< pi2_event_loop->context().monotonic_event_time;
EXPECT_EQ(ping.value(), pi2_ping_count + 1);
++pi2_ping_count;
});
// Confirm that the ping and pong counts both match, and the value also
// matches.
pi1_event_loop->MakeWatcher(
"/test", [&pi1_event_loop, &pi1_ping_count,
&pi1_pong_count](const examples::Pong &pong) {
VLOG(1) << "Pi1 pong " << FlatbufferToJson(&pong) << " at "
<< pi1_event_loop->context().monotonic_remote_time << " -> "
<< pi1_event_loop->context().monotonic_event_time;
EXPECT_EQ(pong.value(), pi1_pong_count + 1);
++pi1_pong_count;
EXPECT_EQ(pi1_ping_count, pi1_pong_count);
});
pi2_event_loop->MakeWatcher(
"/test", [&pi2_event_loop, &pi2_ping_count,
&pi2_pong_count](const examples::Pong &pong) {
VLOG(1) << "Pi2 pong " << FlatbufferToJson(&pong) << " at "
<< pi2_event_loop->context().monotonic_remote_time << " -> "
<< pi2_event_loop->context().monotonic_event_time;
EXPECT_EQ(pong.value(), pi2_pong_count + 1);
++pi2_pong_count;
EXPECT_EQ(pi2_ping_count, pi2_pong_count);
});
log_reader_factory.Run();
EXPECT_EQ(pi1_ping_count, 6030);
EXPECT_EQ(pi2_ping_count, 6030);
EXPECT_EQ(pi1_pong_count, 6030);
EXPECT_EQ(pi2_pong_count, 6030);
reader.Deregister();
}
// Tests that we can sort a bunch of parts into the pre-determined sorted parts.
TEST_F(MultinodeLoggerTest, SortParts) {
// Make a bunch of parts.
{
LoggerState pi1_logger = MakeLogger(pi1_);
LoggerState pi2_logger = MakeLogger(pi2_);
event_loop_factory_.RunFor(chrono::milliseconds(95));
StartLogger(&pi1_logger);
StartLogger(&pi2_logger);
event_loop_factory_.RunFor(chrono::milliseconds(2000));
}
const std::vector<LogFile> sorted_parts = SortParts(logfiles_);
EXPECT_EQ(sorted_parts.size(), 2u);
// Count up the number of UUIDs and make sure they are what we expect as a
// sanity check.
std::set<std::string> logger_uuids;
std::set<std::string> parts_uuids;
std::set<std::string> both_uuids;
size_t missing_rt_count = 0;
for (const LogFile &log_file : sorted_parts) {
EXPECT_FALSE(log_file.logger_uuid.empty());
logger_uuids.insert(log_file.logger_uuid);
both_uuids.insert(log_file.logger_uuid);
for (const LogParts &part : log_file.parts) {
EXPECT_NE(part.monotonic_start_time, aos::monotonic_clock::min_time)
<< ": " << part;
missing_rt_count +=
part.realtime_start_time == aos::realtime_clock::min_time;
EXPECT_TRUE(logger_uuids.find(part.logger_uuid) != logger_uuids.end());
EXPECT_NE(part.node, "");
parts_uuids.insert(part.parts_uuid);
both_uuids.insert(part.parts_uuid);
}
}
// We won't have RT timestamps for 5 log files. We don't log the RT start
// time on remote nodes because we don't know it and would be guessing. And
// the log reader can actually do a better job.
EXPECT_EQ(missing_rt_count, 5u);
EXPECT_EQ(logger_uuids.size(), 2u);
EXPECT_EQ(parts_uuids.size(), ToLogReaderVector(sorted_parts).size());
EXPECT_EQ(logger_uuids.size() + parts_uuids.size(), both_uuids.size());
// Test that each list of parts is in order. Don't worry about the ordering
// between part file lists though.
// (inner vectors all need to be in order, but outer one doesn't matter).
EXPECT_THAT(ToLogReaderVector(sorted_parts),
::testing::UnorderedElementsAreArray(structured_logfiles_));
}
// Tests that if we remap a remapped channel, it shows up correctly.
TEST_F(MultinodeLoggerTest, RemapLoggedChannel) {
{
LoggerState pi1_logger = MakeLogger(pi1_);
LoggerState pi2_logger = MakeLogger(pi2_);
event_loop_factory_.RunFor(chrono::milliseconds(95));
StartLogger(&pi1_logger);
StartLogger(&pi2_logger);
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
LogReader reader(structured_logfiles_);
// Remap just on pi1.
reader.RemapLoggedChannel<aos::timing::Report>(
"/aos", configuration::GetNode(reader.configuration(), "pi1"));
SimulatedEventLoopFactory log_reader_factory(reader.configuration());
log_reader_factory.set_send_delay(chrono::microseconds(0));
reader.Register(&log_reader_factory);
const Node *pi1 =
configuration::GetNode(log_reader_factory.configuration(), "pi1");
const Node *pi2 =
configuration::GetNode(log_reader_factory.configuration(), "pi2");
// Confirm we can read the data on the remapped channel, just for pi1. Nothing
// else should have moved.
std::unique_ptr<EventLoop> pi1_event_loop =
log_reader_factory.MakeEventLoop("test", pi1);
pi1_event_loop->SkipTimingReport();
std::unique_ptr<EventLoop> full_pi1_event_loop =
log_reader_factory.MakeEventLoop("test", pi1);
full_pi1_event_loop->SkipTimingReport();
std::unique_ptr<EventLoop> pi2_event_loop =
log_reader_factory.MakeEventLoop("test", pi2);
pi2_event_loop->SkipTimingReport();
MessageCounter<aos::timing::Report> pi1_timing_report(pi1_event_loop.get(),
"/aos");
MessageCounter<aos::timing::Report> full_pi1_timing_report(
full_pi1_event_loop.get(), "/pi1/aos");
MessageCounter<aos::timing::Report> pi1_original_timing_report(
pi1_event_loop.get(), "/original/aos");
MessageCounter<aos::timing::Report> full_pi1_original_timing_report(
full_pi1_event_loop.get(), "/original/pi1/aos");
MessageCounter<aos::timing::Report> pi2_timing_report(pi2_event_loop.get(),
"/aos");
log_reader_factory.Run();
EXPECT_EQ(pi1_timing_report.count(), 0u);
EXPECT_EQ(full_pi1_timing_report.count(), 0u);
EXPECT_NE(pi1_original_timing_report.count(), 0u);
EXPECT_NE(full_pi1_original_timing_report.count(), 0u);
EXPECT_NE(pi2_timing_report.count(), 0u);
reader.Deregister();
}
// TODO(austin): We can write a test which recreates a logfile and confirms that
// we get it back. That is the ultimate test.
} // namespace testing
} // namespace logger
} // namespace aos