Merge "Add zoom to the Spline UI"
diff --git a/aos/configuration.cc b/aos/configuration.cc
index 7fad440..24e15d9 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -1137,15 +1137,15 @@
bool ConnectionDeliveryTimeIsLoggedOnNode(const Channel *channel,
const Node *node,
const Node *logger_node) {
- const Connection *connection = ConnectionToNode(channel, node);
- if (connection == nullptr) {
- return false;
- }
- return ConnectionDeliveryTimeIsLoggedOnNode(connection, logger_node);
+ return ConnectionDeliveryTimeIsLoggedOnNode(ConnectionToNode(channel, node),
+ logger_node);
}
bool ConnectionDeliveryTimeIsLoggedOnNode(const Connection *connection,
const Node *node) {
+ if (connection == nullptr) {
+ return false;
+ }
switch (connection->timestamp_logger()) {
case LoggerConfig::LOCAL_AND_REMOTE_LOGGER:
CHECK(connection->has_timestamp_logger_nodes());
diff --git a/aos/events/logging/log_cat.cc b/aos/events/logging/log_cat.cc
index a26eb49..9741f70 100644
--- a/aos/events/logging/log_cat.cc
+++ b/aos/events/logging/log_cat.cc
@@ -171,6 +171,8 @@
if (message.span() == absl::Span<const uint8_t>()) {
break;
}
+ CHECK(message.Verify());
+
const auto *const channels = full_header->configuration()->channels();
const size_t channel_index = message.message().channel_index();
CHECK_LT(channel_index, channels->size());
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index a4b15e8..a880f35 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -25,10 +25,8 @@
log_namer_(log_namer),
reopen_(std::move(reopen)),
close_(std::move(close)) {
- boot_uuids_.resize(configuration::NodesCount(log_namer->configuration_),
- UUID::Zero());
- CHECK_LT(node_index_, boot_uuids_.size());
- reopen_(this);
+ state_.resize(configuration::NodesCount(log_namer->configuration_));
+ CHECK_LT(node_index_, state_.size());
}
NewDataWriter::~NewDataWriter() {
@@ -54,14 +52,44 @@
header_written_ = false;
}
-void NewDataWriter::UpdateRemote(size_t remote_node_index,
- const UUID &remote_node_boot_uuid) {
- CHECK_LT(remote_node_index, boot_uuids_.size());
- if (boot_uuids_[remote_node_index] != remote_node_boot_uuid) {
+void NewDataWriter::UpdateRemote(
+ const size_t remote_node_index, const UUID &remote_node_boot_uuid,
+ const monotonic_clock::time_point monotonic_remote_time,
+ const monotonic_clock::time_point monotonic_event_time,
+ const bool reliable) {
+ bool rotate = false;
+ CHECK_LT(remote_node_index, state_.size());
+ State &state = state_[remote_node_index];
+ if (state.boot_uuid != remote_node_boot_uuid) {
VLOG(1) << filename() << " Remote " << remote_node_index << " updated to "
- << remote_node_boot_uuid << " from "
- << boot_uuids_[remote_node_index];
- boot_uuids_[remote_node_index] = remote_node_boot_uuid;
+ << remote_node_boot_uuid << " from " << state.boot_uuid;
+ state.boot_uuid = remote_node_boot_uuid;
+ state.oldest_remote_monotonic_timestamp = monotonic_clock::max_time;
+ state.oldest_local_monotonic_timestamp = monotonic_clock::max_time;
+ state.oldest_remote_unreliable_monotonic_timestamp =
+ monotonic_clock::max_time;
+ state.oldest_local_unreliable_monotonic_timestamp =
+ monotonic_clock::max_time;
+ rotate = true;
+ }
+
+ if (!reliable) {
+ if (state.oldest_remote_unreliable_monotonic_timestamp >
+ monotonic_remote_time) {
+ state.oldest_remote_unreliable_monotonic_timestamp =
+ monotonic_remote_time;
+ state.oldest_local_unreliable_monotonic_timestamp = monotonic_event_time;
+ rotate = true;
+ }
+ }
+
+ if (state.oldest_remote_monotonic_timestamp > monotonic_remote_time) {
+ state.oldest_remote_monotonic_timestamp = monotonic_remote_time;
+ state.oldest_local_monotonic_timestamp = monotonic_event_time;
+ rotate = true;
+ }
+
+ if (rotate) {
Rotate();
}
}
@@ -70,17 +98,18 @@
const UUID &source_node_boot_uuid,
aos::monotonic_clock::time_point now) {
// TODO(austin): Handle remote nodes changing too, not just the source node.
- if (boot_uuids_[node_index_] != source_node_boot_uuid) {
- boot_uuids_[node_index_] = source_node_boot_uuid;
+ if (state_[node_index_].boot_uuid != source_node_boot_uuid) {
+ state_[node_index_].boot_uuid = source_node_boot_uuid;
if (header_written_) {
Reboot();
}
QueueHeader(MakeHeader());
}
- CHECK_EQ(boot_uuids_[node_index_], source_node_boot_uuid);
+ CHECK_EQ(state_[node_index_].boot_uuid, source_node_boot_uuid);
CHECK(header_written_) << ": Attempting to write message before header to "
<< writer->filename();
+ CHECK(writer);
writer->QueueSizedFlatbuffer(fbb, now);
}
@@ -88,14 +117,14 @@
NewDataWriter::MakeHeader() {
const size_t logger_node_index = log_namer_->logger_node_index();
const UUID &logger_node_boot_uuid = log_namer_->logger_node_boot_uuid();
- if (boot_uuids_[logger_node_index] == UUID::Zero()) {
+ if (state_[logger_node_index].boot_uuid == UUID::Zero()) {
VLOG(1) << filename() << " Logger node is " << logger_node_index
<< " and uuid is " << logger_node_boot_uuid;
- boot_uuids_[logger_node_index] = logger_node_boot_uuid;
+ state_[logger_node_index].boot_uuid = logger_node_boot_uuid;
} else {
- CHECK_EQ(boot_uuids_[logger_node_index], logger_node_boot_uuid);
+ CHECK_EQ(state_[logger_node_index].boot_uuid, logger_node_boot_uuid);
}
- return log_namer_->MakeHeader(node_index_, boot_uuids_, parts_uuid(),
+ return log_namer_->MakeHeader(node_index_, state_, parts_uuid(),
parts_index_);
}
@@ -104,10 +133,15 @@
CHECK(!header_written_) << ": Attempting to write duplicate header to "
<< writer->filename();
CHECK(header.message().has_source_node_boot_uuid());
- CHECK_EQ(boot_uuids_[node_index_],
+ CHECK_EQ(state_[node_index_].boot_uuid,
UUID::FromString(header.message().source_node_boot_uuid()));
+ if (!writer) {
+ reopen_(this);
+ }
+
// TODO(austin): This triggers a dummy allocation that we don't need as part
// of releasing. Can we skip it?
+ CHECK(writer);
writer->QueueSizedFlatbuffer(header.Release());
header_written_ = true;
}
@@ -120,12 +154,12 @@
}
aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> LogNamer::MakeHeader(
- size_t node_index, const std::vector<UUID> &boot_uuids,
+ size_t node_index, const std::vector<NewDataWriter::State> &state,
const UUID &parts_uuid, int parts_index) const {
- const UUID &source_node_boot_uuid = boot_uuids[node_index];
+ const UUID &source_node_boot_uuid = state[node_index].boot_uuid;
const Node *const source_node =
configuration::GetNode(configuration_, node_index);
- CHECK_EQ(LogFileHeader::MiniReflectTypeTable()->num_elems, 20u);
+ CHECK_EQ(LogFileHeader::MiniReflectTypeTable()->num_elems, 24u);
flatbuffers::FlatBufferBuilder fbb;
fbb.ForceDefaults(true);
@@ -181,10 +215,10 @@
}
std::vector<flatbuffers::Offset<flatbuffers::String>> boot_uuid_offsets;
- boot_uuid_offsets.reserve(boot_uuids.size());
- for (const UUID &uuid : boot_uuids) {
- if (uuid != UUID::Zero()) {
- boot_uuid_offsets.emplace_back(uuid.PackString(&fbb));
+ boot_uuid_offsets.reserve(state.size());
+ for (const NewDataWriter::State &state : state) {
+ if (state.boot_uuid != UUID::Zero()) {
+ boot_uuid_offsets.emplace_back(state.boot_uuid.PackString(&fbb));
} else {
boot_uuid_offsets.emplace_back(fbb.CreateString(""));
}
@@ -194,6 +228,43 @@
flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
boot_uuids_offset = fbb.CreateVector(boot_uuid_offsets);
+ int64_t *oldest_remote_monotonic_timestamps;
+ flatbuffers::Offset<flatbuffers::Vector<int64_t>>
+ oldest_remote_monotonic_timestamps_offset = fbb.CreateUninitializedVector(
+ state.size(), &oldest_remote_monotonic_timestamps);
+
+ int64_t *oldest_local_monotonic_timestamps;
+ flatbuffers::Offset<flatbuffers::Vector<int64_t>>
+ oldest_local_monotonic_timestamps_offset = fbb.CreateUninitializedVector(
+ state.size(), &oldest_local_monotonic_timestamps);
+
+ int64_t *oldest_remote_unreliable_monotonic_timestamps;
+ flatbuffers::Offset<flatbuffers::Vector<int64_t>>
+ oldest_remote_unreliable_monotonic_timestamps_offset =
+ fbb.CreateUninitializedVector(
+ state.size(), &oldest_remote_unreliable_monotonic_timestamps);
+
+ int64_t *oldest_local_unreliable_monotonic_timestamps;
+ flatbuffers::Offset<flatbuffers::Vector<int64_t>>
+ oldest_local_unreliable_monotonic_timestamps_offset =
+ fbb.CreateUninitializedVector(
+ state.size(), &oldest_local_unreliable_monotonic_timestamps);
+
+ for (size_t i = 0; i < state.size(); ++i) {
+ oldest_remote_monotonic_timestamps[i] =
+ state[i].oldest_remote_monotonic_timestamp.time_since_epoch().count();
+ oldest_local_monotonic_timestamps[i] =
+ state[i].oldest_local_monotonic_timestamp.time_since_epoch().count();
+ oldest_remote_unreliable_monotonic_timestamps[i] =
+ state[i]
+ .oldest_remote_unreliable_monotonic_timestamp.time_since_epoch()
+ .count();
+ oldest_local_unreliable_monotonic_timestamps[i] =
+ state[i]
+ .oldest_local_unreliable_monotonic_timestamp.time_since_epoch()
+ .count();
+ }
+
aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
log_file_header_builder.add_name(name_offset);
@@ -263,6 +334,14 @@
std::chrono::duration_cast<std::chrono::nanoseconds>(
event_loop_->realtime_now().time_since_epoch())
.count());
+ log_file_header_builder.add_oldest_remote_monotonic_timestamps(
+ oldest_remote_monotonic_timestamps_offset);
+ log_file_header_builder.add_oldest_local_monotonic_timestamps(
+ oldest_local_monotonic_timestamps_offset);
+ log_file_header_builder.add_oldest_remote_unreliable_monotonic_timestamps(
+ oldest_remote_unreliable_monotonic_timestamps_offset);
+ log_file_header_builder.add_oldest_local_unreliable_monotonic_timestamps(
+ oldest_local_unreliable_monotonic_timestamps_offset);
fbb.FinishSizePrefixed(log_file_header_builder.Finish());
aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> result(
fbb.Release());
@@ -309,10 +388,17 @@
LOG(FATAL) << "Can't log forwarded timestamps in a singe log file.";
return nullptr;
}
-
MultiNodeLogNamer::MultiNodeLogNamer(std::string_view base_name,
EventLoop *event_loop)
- : LogNamer(event_loop), base_name_(base_name), old_base_name_() {}
+ : MultiNodeLogNamer(base_name, event_loop->configuration(), event_loop,
+ event_loop->node()) {}
+
+MultiNodeLogNamer::MultiNodeLogNamer(std::string_view base_name,
+ const Configuration *configuration,
+ EventLoop *event_loop, const Node *node)
+ : LogNamer(configuration, event_loop, node),
+ base_name_(base_name),
+ old_base_name_() {}
MultiNodeLogNamer::~MultiNodeLogNamer() {
if (!ran_out_of_space_) {
@@ -353,7 +439,8 @@
writer->QueueSizedFlatbuffer(header->Release());
if (!writer->ran_out_of_space()) {
- all_filenames_.emplace_back(filename);
+ all_filenames_.emplace_back(
+ absl::StrCat(config_sha256, ".bfbs", extension_));
}
CloseWriter(&writer);
}
@@ -420,7 +507,7 @@
nodes_.emplace_back(node);
}
- NewDataWriter data_writer(this, node,
+ NewDataWriter data_writer(this, configuration::GetNode(configuration_, node),
[this, channel](NewDataWriter *data_writer) {
OpenForwardedTimestampWriter(channel,
data_writer);
diff --git a/aos/events/logging/log_namer.h b/aos/events/logging/log_namer.h
index d9f42db..c2c8dd1 100644
--- a/aos/events/logging/log_namer.h
+++ b/aos/events/logging/log_namer.h
@@ -44,14 +44,10 @@
// Rotates the log file, delaying writing the new header until data arrives.
void Rotate();
- // TODO(austin): Copy header and add all UUIDs and such when available
- // whenever data is written.
- //
- // TODO(austin): Add known timestamps for each node every time we cycle a log
- // for sorting.
-
- void UpdateRemote(size_t remote_node_index,
- const UUID &remote_node_boot_uuid);
+ void UpdateRemote(size_t remote_node_index, const UUID &remote_node_boot_uuid,
+ monotonic_clock::time_point monotonic_remote_time,
+ monotonic_clock::time_point monotonic_event_time,
+ bool reliable);
// Queues up a message with the provided boot UUID.
void QueueMessage(flatbuffers::FlatBufferBuilder *fbb,
const UUID &node_boot_uuid,
@@ -69,6 +65,28 @@
size_t parts_index() const { return parts_index_; }
const Node *node() const { return node_; }
+ // Datastructure used to capture all the information about a remote node.
+ struct State {
+ // Boot UUID of the node.
+ UUID boot_uuid = UUID::Zero();
+ // Timestamp on the remote monotonic clock of the oldest message sent to
+ // node_index_.
+ monotonic_clock::time_point oldest_remote_monotonic_timestamp =
+ monotonic_clock::max_time;
+ // Timestamp on the local monotonic clock of the message in
+ // oldest_remote_monotonic_timestamp.
+ monotonic_clock::time_point oldest_local_monotonic_timestamp =
+ monotonic_clock::max_time;
+ // Timestamp on the remote monotonic clock of the oldest message sent to
+ // node_index_, excluding messages forwarded with time_to_live() == 0.
+ monotonic_clock::time_point oldest_remote_unreliable_monotonic_timestamp =
+ monotonic_clock::max_time;
+ // Timestamp on the local monotonic clock of the message in
+ // oldest_local_unreliable_monotonic_timestamp.
+ monotonic_clock::time_point oldest_local_unreliable_monotonic_timestamp =
+ monotonic_clock::max_time;
+ };
+
private:
// Signals that a node has rebooted.
void Reboot();
@@ -88,7 +106,7 @@
std::function<void(NewDataWriter *)> close_;
bool header_written_ = false;
- std::vector<UUID> boot_uuids_;
+ std::vector<State> state_;
};
// Interface describing how to name, track, and add headers to log file parts.
@@ -96,10 +114,11 @@
public:
// Constructs a LogNamer with the primary node (ie the one the logger runs on)
// being node.
- LogNamer(EventLoop *event_loop)
+ LogNamer(const aos::Configuration *configuration, EventLoop *event_loop,
+ const aos::Node *node)
: event_loop_(event_loop),
- configuration_(event_loop_->configuration()),
- node_(event_loop_->node()),
+ configuration_(configuration),
+ node_(node),
logger_node_index_(configuration::GetNodeIndex(configuration_, node_)) {
nodes_.emplace_back(node_);
node_states_.resize(configuration::NodesCount(configuration_));
@@ -185,7 +204,7 @@
// Creates a new header by copying fields out of the template and combining
// them with the arguments provided.
aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> MakeHeader(
- size_t node_index, const std::vector<UUID> &boot_uuids,
+ size_t node_index, const std::vector<NewDataWriter::State> &state,
const UUID &parts_uuid, int parts_index) const;
EventLoop *event_loop_;
@@ -223,10 +242,11 @@
// any other log type.
class LocalLogNamer : public LogNamer {
public:
- LocalLogNamer(std::string_view base_name, aos::EventLoop *event_loop)
- : LogNamer(event_loop),
+ LocalLogNamer(std::string_view base_name, aos::EventLoop *event_loop,
+ const aos::Node *node)
+ : LogNamer(event_loop->configuration(), event_loop, node),
base_name_(base_name),
- data_writer_(this, node(),
+ data_writer_(this, node,
[this](NewDataWriter *writer) {
writer->writer = std::make_unique<DetachedBufferWriter>(
absl::StrCat(base_name_, ".part",
@@ -271,6 +291,9 @@
class MultiNodeLogNamer : public LogNamer {
public:
MultiNodeLogNamer(std::string_view base_name, EventLoop *event_loop);
+ MultiNodeLogNamer(std::string_view base_name,
+ const Configuration *configuration, EventLoop *event_loop,
+ const Node *node);
~MultiNodeLogNamer() override;
std::string_view base_name() const final { return base_name_; }
diff --git a/aos/events/logging/log_writer.cc b/aos/events/logging/log_writer.cc
index 4bc4448..fffc886 100644
--- a/aos/events/logging/log_writer.cc
+++ b/aos/events/logging/log_writer.cc
@@ -23,6 +23,8 @@
std::function<bool(const Channel *)> should_log)
: event_loop_(event_loop),
configuration_(configuration),
+ node_(configuration::GetNode(configuration_, event_loop->node())),
+ node_index_(configuration::GetNodeIndex(configuration_, node_)),
name_(network::GetHostname()),
timer_handler_(event_loop_->AddTimer(
[this]() { DoLogData(event_loop_->monotonic_now(), true); })),
@@ -31,7 +33,7 @@
? event_loop_->MakeFetcher<message_bridge::ServerStatistics>(
"/aos")
: aos::Fetcher<message_bridge::ServerStatistics>()) {
- VLOG(1) << "Creating logger for " << FlatbufferToJson(event_loop_->node());
+ VLOG(1) << "Creating logger for " << FlatbufferToJson(node_);
std::map<const Channel *, const Node *> timestamp_logger_channels;
@@ -47,7 +49,7 @@
if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
connection, event_loop_->node())) {
const Node *other_node = configuration::GetNode(
- event_loop_->configuration(), connection->name()->string_view());
+ configuration_, connection->name()->string_view());
VLOG(1) << "Timestamps are logged from "
<< FlatbufferToJson(other_node);
@@ -57,9 +59,6 @@
}
}
- const size_t our_node_index =
- configuration::GetNodeIndex(configuration_, event_loop_->node());
-
for (size_t channel_index = 0;
channel_index < configuration_->channels()->size(); ++channel_index) {
const Channel *const config_channel =
@@ -73,7 +72,7 @@
CHECK(channel != nullptr)
<< ": Failed to look up channel "
<< aos::configuration::CleanedChannelToString(config_channel);
- if (!should_log(channel)) {
+ if (!should_log(config_channel)) {
continue;
}
@@ -82,18 +81,29 @@
fs.channel = channel;
const bool is_local =
- configuration::ChannelIsSendableOnNode(channel, event_loop_->node());
+ configuration::ChannelIsSendableOnNode(config_channel, node_);
const bool is_readable =
- configuration::ChannelIsReadableOnNode(channel, event_loop_->node());
+ configuration::ChannelIsReadableOnNode(config_channel, node_);
const bool is_logged = configuration::ChannelMessageIsLoggedOnNode(
- channel, event_loop_->node());
+ config_channel, node_);
const bool log_message = is_logged && is_readable;
bool log_delivery_times = false;
- if (event_loop_->node() != nullptr) {
+ if (configuration::MultiNode(configuration_)) {
+ const aos::Connection *connection =
+ configuration::ConnectionToNode(config_channel, node_);
+
log_delivery_times = configuration::ConnectionDeliveryTimeIsLoggedOnNode(
- channel, event_loop_->node(), event_loop_->node());
+ connection, event_loop_->node());
+
+ CHECK_EQ(log_delivery_times,
+ configuration::ConnectionDeliveryTimeIsLoggedOnNode(
+ config_channel, node_, node_));
+
+ if (connection) {
+ fs.reliable_forwarding = (connection->time_to_live() == 0);
+ }
}
// Now, detect a RemoteMessage timestamp logger where we should just log the
@@ -109,7 +119,7 @@
if (log_delivery_times) {
VLOG(1) << " Delivery times";
fs.wants_timestamp_writer = true;
- fs.timestamp_node_index = our_node_index;
+ fs.timestamp_node_index = static_cast<int>(node_index_);
}
// Both the timestamp and data writers want data_node_index so it knows
// what the source node is.
@@ -127,7 +137,7 @@
if (!is_local) {
fs.log_type = LogType::kLogRemoteMessage;
} else {
- fs.data_node_index = our_node_index;
+ fs.data_node_index = static_cast<int>(node_index_);
}
}
if (log_contents) {
@@ -156,8 +166,7 @@
const Channel *logged_channel = aos::configuration::GetChannel(
configuration_, event_loop_channel->name()->string_view(),
- event_loop_channel->type()->string_view(), "",
- configuration::GetNode(configuration_, event_loop_->node()));
+ event_loop_channel->type()->string_view(), "", node_);
if (logged_channel != nullptr) {
event_loop_to_logged_channel_index_[event_loop_channel_index] =
@@ -241,7 +250,7 @@
log_event_uuid_ = UUID::Random();
log_start_uuid_ = log_start_uuid;
- VLOG(1) << "Starting logger for " << FlatbufferToJson(event_loop_->node());
+ VLOG(1) << "Starting logger for " << FlatbufferToJson(node_);
// We want to do as much work as possible before the initial Fetch. Time
// between that and actually starting to log opens up the possibility of
@@ -291,7 +300,7 @@
const aos::monotonic_clock::time_point header_time =
event_loop_->monotonic_now();
- LOG(INFO) << "Logging node as " << FlatbufferToJson(event_loop_->node())
+ LOG(INFO) << "Logging node as " << FlatbufferToJson(node_)
<< " start_time " << last_synchronized_time_ << ", took "
<< chrono::duration<double>(fetch_time - beginning_time).count()
<< " to fetch, "
@@ -393,7 +402,7 @@
monotonic_clock::min_time) {
return false;
}
- if (event_loop_->node() == node ||
+ if (node_ == node ||
!configuration::MultiNode(configuration_)) {
// There are no offsets to compute for ourself, so always succeed.
log_namer_->SetStartTimes(node_index, monotonic_start_time,
@@ -473,7 +482,7 @@
flatbuffers::Offset<Node> logger_node_offset;
if (configuration::MultiNode(configuration_)) {
- logger_node_offset = RecursiveCopyFlatBuffer(event_loop_->node(), &fbb);
+ logger_node_offset = RecursiveCopyFlatBuffer(node_, &fbb);
}
aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
@@ -546,9 +555,6 @@
// reboots which may have happened.
WriteMissingTimestamps();
- int our_node_index = aos::configuration::GetNodeIndex(
- event_loop_->configuration(), event_loop_->node());
-
// Write each channel to disk, one at a time.
for (FetcherStruct &f : fetchers_) {
while (true) {
@@ -572,7 +578,7 @@
}
if (f.writer != nullptr) {
const UUID source_node_boot_uuid =
- our_node_index != f.data_node_index
+ static_cast<int>(node_index_) != f.data_node_index
? f.fetcher->context().source_boot_uuid
: event_loop_->boot_uuid();
// Write!
@@ -587,7 +593,7 @@
RecordCreateMessageTime(start, end, &f);
VLOG(2) << "Writing data as node "
- << FlatbufferToJson(event_loop_->node()) << " for channel "
+ << FlatbufferToJson(node_) << " for channel "
<< configuration::CleanedChannelToString(f.fetcher->channel())
<< " to " << f.writer->filename() << " data "
<< FlatbufferToJson(
@@ -612,7 +618,7 @@
RecordCreateMessageTime(start, end, &f);
VLOG(2) << "Writing timestamps as node "
- << FlatbufferToJson(event_loop_->node()) << " for channel "
+ << FlatbufferToJson(node_) << " for channel "
<< configuration::CleanedChannelToString(f.fetcher->channel())
<< " to " << f.timestamp_writer->filename() << " timestamp "
<< FlatbufferToJson(
@@ -620,8 +626,10 @@
fbb.GetBufferPointer()));
// Tell our writer that we know something about the remote boot.
- f.timestamp_writer->UpdateRemote(f.data_node_index,
- f.fetcher->context().source_boot_uuid);
+ f.timestamp_writer->UpdateRemote(
+ f.data_node_index, f.fetcher->context().source_boot_uuid,
+ f.fetcher->context().monotonic_remote_time,
+ f.fetcher->context().monotonic_event_time, f.reliable_forwarding);
f.timestamp_writer->QueueMessage(&fbb, event_loop_->boot_uuid(), end);
}
diff --git a/aos/events/logging/log_writer.h b/aos/events/logging/log_writer.h
index f624028..5c8b0c7 100644
--- a/aos/events/logging/log_writer.h
+++ b/aos/events/logging/log_writer.h
@@ -149,7 +149,8 @@
// starts.
void StartLoggingLocalNamerOnRun(std::string base_name) {
event_loop_->OnRun([this, base_name]() {
- StartLogging(std::make_unique<LocalLogNamer>(base_name, event_loop_));
+ StartLogging(
+ std::make_unique<LocalLogNamer>(base_name, event_loop_, node_));
});
}
@@ -157,7 +158,8 @@
// processing starts.
void StartLoggingOnRun(std::string base_name) {
event_loop_->OnRun([this, base_name]() {
- StartLogging(std::make_unique<MultiNodeLogNamer>(base_name, event_loop_));
+ StartLogging(std::make_unique<MultiNodeLogNamer>(
+ base_name, configuration_, event_loop_, node_));
});
}
@@ -194,6 +196,9 @@
int timestamp_node_index = -1;
// Node that the contents this contents_writer will log are from.
int contents_node_index = -1;
+
+ // If true, this message is being sent over a reliable channel.
+ bool reliable_forwarding = false;
};
// Vector mapping from the channel index from the event loop to the logged
@@ -233,6 +238,19 @@
// The configuration to place at the top of the log file.
const Configuration *const configuration_;
+ // The node that is writing the log.
+ // For most cases, this is the same node as the node that is reading the
+ // messages. However, in some cases, these two nodes may be different. i.e. if
+ // one node reading and modifying the messages, and another node is listening
+ // and saving those messages to another log.
+ //
+ // node_ is a pointer to the writing node, and that node is guaranteed to be
+ // in configuration_ which is the configuration being written to the top of
+ // the log file.
+ const Node *const node_;
+ // The node_index_ is the index of the node in configuration_.
+ const size_t node_index_;
+
UUID log_event_uuid_ = UUID::Zero();
const UUID logger_instance_uuid_ = UUID::Random();
std::unique_ptr<LogNamer> log_namer_;
diff --git a/aos/events/logging/logfile_sorting.cc b/aos/events/logging/logfile_sorting.cc
index 1821039..e27cecb 100644
--- a/aos/events/logging/logfile_sorting.cc
+++ b/aos/events/logging/logfile_sorting.cc
@@ -33,7 +33,7 @@
}
bool ConfigOnly(const LogFileHeader *header) {
- CHECK_EQ(LogFileHeader::MiniReflectTypeTable()->num_elems, 20u);
+ CHECK_EQ(LogFileHeader::MiniReflectTypeTable()->num_elems, 24u);
if (header->has_monotonic_start_time()) return false;
if (header->has_realtime_start_time()) return false;
if (header->has_max_out_of_order_duration()) return false;
@@ -53,6 +53,10 @@
if (header->has_boot_uuids()) return false;
if (header->has_logger_part_monotonic_start_time()) return false;
if (header->has_logger_part_realtime_start_time()) return false;
+ if (header->has_oldest_remote_monotonic_timestamps()) return false;
+ if (header->has_oldest_local_monotonic_timestamps()) return false;
+ if (header->has_oldest_remote_unreliable_monotonic_timestamps()) return false;
+ if (header->has_oldest_local_unreliable_monotonic_timestamps()) return false;
return header->has_configuration();
}
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 4c19867..20e38d2 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -486,6 +486,8 @@
raw_log_file_header_ = std::move(*raw_log_file_header);
+ CHECK(raw_log_file_header_.Verify()) << "Log file header is corrupted";
+
max_out_of_order_duration_ =
FLAGS_max_out_of_order > 0
? chrono::duration_cast<chrono::nanoseconds>(
diff --git a/aos/events/logging/logger.fbs b/aos/events/logging/logger.fbs
index 23da360..c71baad 100644
--- a/aos/events/logging/logger.fbs
+++ b/aos/events/logging/logger.fbs
@@ -67,7 +67,8 @@
source_node_boot_uuid: string (id: 13);
// Timestamps that this logfile started at on the logger's clocks. This is
- // mostly useful when trying to deduce the order of node reboots.
+ // mostly useful when trying to deduce the order of node reboots. These
+ // timestamps don't change on reboot, so they can't be used reliably.
logger_monotonic_start_time:int64 = -9223372036854775808 (id: 14);
logger_realtime_start_time:int64 = -9223372036854775808 (id: 15);
@@ -95,12 +96,53 @@
logger_node:Node (id: 9);
// The boot UUIDs for all nodes we know them for, or "" for the ones we don't.
- // TODO(austin): Actually add this in the log writer.
boot_uuids:[string] (id: 17);
// Timestamps that the header on this part file was written on the logger node.
logger_part_monotonic_start_time:int64 = -9223372036854775808 (id: 18);
logger_part_realtime_start_time:int64 = -9223372036854775808 (id: 19);
+
+ // These timestamps provide summary information about the oldest messages we
+ // know which crossed the network. The goal is to enable log file sorting
+ // to determine the order of all boots by observing corresponding times
+ // across the network and using those to determine constraints so we can sort
+ // a DAG.
+ //
+ // There are 2 main problem cases. Let's say we have 2 channels. /a which
+ // is reliable, and /b which isn't, both sent from the same remote node.
+ //
+ // case 1: /a -> boot 0 received on boot 0.
+ // /b -> boot 1 received on boot 0.
+ // We start logging after both messages arrive.
+ //
+ // case 2: /a -> boot 0 received on boot 0.
+ // /b -> boot 0 received on boot 0.
+ // We log for a bit, then reboot. More messages show up when we reconnect.
+ // /a -> boot 0 received on boot 1.
+ // /b -> boot 0 received on boot 1.
+ //
+ // In case 1: we only have a reliable timestamp from boot 0, but that
+ // reliable timestamp makes it clear that /a was before /b, so boot 0 was
+ // before boot 1.
+ //
+ // In case 2: we have the same reliable timestamp, so that tells us nothing.
+ // The unreliable timestamps though tell a different story. /b will be after
+ // /a, since any messages on /b generated before the reboot won't get
+ // delivered. So, we get an ordering constraint saying that any sent /b's
+ // on the second boot were after /b on the first boot.
+ //
+ // We believe that any other cases are covered by the same mechanism.
+ //
+ // For all channels sent from a specific node, these vectors hold the
+ // timestamp of the oldest known message from that node, and the
+ // corresponding monotonic timestamp for when that was received on our node.
+ oldest_remote_monotonic_timestamps:[int64] (id: 20);
+ oldest_local_monotonic_timestamps:[int64] (id: 21);
+
+ // For all channels *excluding* the reliable channels (ttl == 0), record the
+ // same quantity.
+ oldest_remote_unreliable_monotonic_timestamps:[int64] (id: 22);
+ oldest_local_unreliable_monotonic_timestamps:[int64] (id: 23);
}
// Table holding a message.
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index a439f84..d6ad8fe 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -170,10 +170,11 @@
logger.set_polling_period(std::chrono::milliseconds(100));
logger_event_loop->OnRun(
[base_name1, base_name2, &logger_event_loop, &logger]() {
- logger.StartLogging(
- std::make_unique<LocalLogNamer>(base_name1, logger_event_loop.get()));
+ logger.StartLogging(std::make_unique<LocalLogNamer>(
+ base_name1, logger_event_loop.get(), logger_event_loop->node()));
EXPECT_DEATH(logger.StartLogging(std::make_unique<LocalLogNamer>(
- base_name2, logger_event_loop.get())),
+ base_name2, logger_event_loop.get(),
+ logger_event_loop->node())),
"Already logging");
});
event_loop_factory_.RunFor(chrono::milliseconds(20000));
@@ -203,8 +204,8 @@
logger.set_separate_config(false);
logger.set_polling_period(std::chrono::milliseconds(100));
logger_event_loop->OnRun([base_name, &logger_event_loop, &logger]() {
- logger.StartLogging(
- std::make_unique<LocalLogNamer>(base_name, logger_event_loop.get()));
+ logger.StartLogging(std::make_unique<LocalLogNamer>(
+ base_name, logger_event_loop.get(), logger_event_loop->node()));
logger.StopLogging(aos::monotonic_clock::min_time);
EXPECT_DEATH(logger.StopLogging(aos::monotonic_clock::min_time),
"Not logging right now");
@@ -240,13 +241,13 @@
Logger logger(logger_event_loop.get());
logger.set_separate_config(false);
logger.set_polling_period(std::chrono::milliseconds(100));
- logger.StartLogging(
- std::make_unique<LocalLogNamer>(base_name1, logger_event_loop.get()));
+ logger.StartLogging(std::make_unique<LocalLogNamer>(
+ base_name1, logger_event_loop.get(), logger_event_loop->node()));
event_loop_factory_.RunFor(chrono::milliseconds(10000));
logger.StopLogging(logger_event_loop->monotonic_now());
event_loop_factory_.RunFor(chrono::milliseconds(10000));
- logger.StartLogging(
- std::make_unique<LocalLogNamer>(base_name2, logger_event_loop.get()));
+ logger.StartLogging(std::make_unique<LocalLogNamer>(
+ base_name2, logger_event_loop.get(), logger_event_loop->node()));
event_loop_factory_.RunFor(chrono::milliseconds(10000));
}
@@ -558,6 +559,7 @@
result.emplace_back(logfile_base1_ + "_pi1_data.part0.bfbs");
result.emplace_back(logfile_base1_ + "_pi1_data.part1.bfbs");
result.emplace_back(logfile_base1_ + "_pi1_data.part2.bfbs");
+ result.emplace_back(logfile_base1_ + "_pi1_data.part3.bfbs");
result.emplace_back(logfile_base1_ +
"_pi2_data/test/aos.examples.Pong.part0.bfbs");
result.emplace_back(logfile_base1_ +
@@ -618,28 +620,20 @@
std::vector<std::string> MakePi1SingleDirectionLogfiles() {
std::vector<std::string> result;
result.emplace_back(logfile_base1_ + "_pi1_data.part0.bfbs");
- result.emplace_back(logfile_base1_ +
- "_pi2_data/test/aos.examples.Pong.part0.bfbs");
+ result.emplace_back(logfile_base1_ + "_pi1_data.part1.bfbs");
result.emplace_back(
logfile_base1_ +
"_pi2_data/pi2/aos/aos.message_bridge.Timestamp.part0.bfbs");
result.emplace_back(
absl::StrCat(logfile_base1_, "_", GetParam().sha256, ".bfbs"));
+ return result;
+ }
- if (shared()) {
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/"
- "aos.message_bridge.RemoteMessage.part0.bfbs");
- } else {
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/pi1/aos/"
- "aos-message_bridge-Timestamp/"
- "aos.message_bridge.RemoteMessage.part0.bfbs");
- result.emplace_back(logfile_base1_ +
- "_timestamps/pi1/aos/remote_timestamps/pi2/test/"
- "aos-examples-Ping/"
- "aos.message_bridge.RemoteMessage.part0.bfbs");
- }
+ std::vector<std::string> MakePi1DeadNodeLogfiles() {
+ std::vector<std::string> result;
+ result.emplace_back(logfile_base1_ + "_pi1_data.part0.bfbs");
+ result.emplace_back(
+ absl::StrCat(logfile_base1_, "_", GetParam().sha256, ".bfbs"));
return result;
}
@@ -664,14 +658,27 @@
struct LoggerState {
std::unique_ptr<EventLoop> event_loop;
std::unique_ptr<Logger> logger;
+ const Configuration *configuration;
+ const Node *node;
+ MultiNodeLogNamer *log_namer;
};
LoggerState MakeLogger(const Node *node,
- SimulatedEventLoopFactory *factory = nullptr) {
+ SimulatedEventLoopFactory *factory = nullptr,
+ const Configuration *configuration = nullptr) {
if (factory == nullptr) {
factory = &event_loop_factory_;
}
- return {factory->MakeEventLoop("logger", node), {}};
+ if (configuration == nullptr) {
+ configuration = factory->configuration();
+ }
+ return {
+ factory->MakeEventLoop(
+ "logger", configuration::GetNode(factory->configuration(), node)),
+ {},
+ configuration,
+ node,
+ nullptr};
}
void StartLogger(LoggerState *logger, std::string logfile_base = "",
@@ -684,14 +691,16 @@
}
}
- logger->logger = std::make_unique<Logger>(logger->event_loop.get());
+ logger->logger = std::make_unique<Logger>(logger->event_loop.get(),
+ logger->configuration);
logger->logger->set_polling_period(std::chrono::milliseconds(100));
logger->logger->set_name(absl::StrCat(
"name_prefix_", logger->event_loop->node()->name()->str()));
logger->event_loop->OnRun([logger, logfile_base, compress]() {
std::unique_ptr<MultiNodeLogNamer> namer =
- std::make_unique<MultiNodeLogNamer>(logfile_base,
- logger->event_loop.get());
+ std::make_unique<MultiNodeLogNamer>(
+ logfile_base, logger->configuration, logger->event_loop.get(),
+ logger->node);
if (compress) {
#ifdef LZMA
namer->set_extension(".xz");
@@ -701,6 +710,7 @@
LOG(FATAL) << "Compression unsupported";
#endif
}
+ logger->log_namer = namer.get();
logger->logger->StartLogging(std::move(namer));
});
@@ -1527,7 +1537,8 @@
event_loop_factory_.RunFor(logger_run3);
}
- LogReader reader(SortParts(logfiles_));
+ LogReader reader(
+ SortParts(MakeLogFiles(logfile_base1_, logfile_base2_, 3, 2)));
SimulatedEventLoopFactory log_reader_factory(reader.configuration());
log_reader_factory.set_send_delay(chrono::microseconds(0));
@@ -2268,6 +2279,119 @@
event_loop_factory_.RunFor(chrono::milliseconds(20000));
}
+ // Confirm that our new oldest timestamps properly update as we reboot and
+ // rotate.
+ for (const std::string &file : pi1_reboot_logfiles_) {
+ std::optional<SizePrefixedFlatbufferVector<LogFileHeader>> log_header =
+ ReadHeader(file);
+ CHECK(log_header);
+ if (log_header->message().has_configuration()) {
+ continue;
+ }
+
+ if (log_header->message().node()->name()->string_view() != "pi1") {
+ continue;
+ }
+ SCOPED_TRACE(file);
+ SCOPED_TRACE(aos::FlatbufferToJson(
+ *log_header, {.multi_line = true, .max_vector_size = 100}));
+ ASSERT_TRUE(log_header->message().has_oldest_remote_monotonic_timestamps());
+ ASSERT_EQ(
+ log_header->message().oldest_remote_monotonic_timestamps()->size(), 2u);
+ EXPECT_EQ(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ ASSERT_TRUE(log_header->message().has_oldest_local_monotonic_timestamps());
+ ASSERT_EQ(log_header->message().oldest_local_monotonic_timestamps()->size(),
+ 2u);
+ EXPECT_EQ(log_header->message().oldest_local_monotonic_timestamps()->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_remote_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ EXPECT_EQ(log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+ ASSERT_TRUE(log_header->message()
+ .has_oldest_local_unreliable_monotonic_timestamps());
+ ASSERT_EQ(log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->size(),
+ 2u);
+ EXPECT_EQ(log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(0),
+ monotonic_clock::max_time.time_since_epoch().count());
+
+ const monotonic_clock::time_point oldest_remote_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_remote_monotonic_timestamps()->Get(
+ 1)));
+ const monotonic_clock::time_point oldest_local_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message().oldest_local_monotonic_timestamps()->Get(1)));
+ const monotonic_clock::time_point
+ oldest_remote_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_remote_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ const monotonic_clock::time_point
+ oldest_local_unreliable_monotonic_timestamps =
+ monotonic_clock::time_point(chrono::nanoseconds(
+ log_header->message()
+ .oldest_local_unreliable_monotonic_timestamps()
+ ->Get(1)));
+ switch (log_header->message().parts_index()) {
+ case 0:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_monotonic_timestamps, monotonic_clock::max_time);
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ break;
+ case 1:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90200)));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90350)));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90200)));
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(90350)));
+ break;
+ case 2:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100000)));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100150)));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::max_time);
+ break;
+ case 3:
+ EXPECT_EQ(oldest_remote_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100000)));
+ EXPECT_EQ(oldest_local_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100150)));
+ EXPECT_EQ(oldest_remote_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100200)));
+ EXPECT_EQ(oldest_local_unreliable_monotonic_timestamps,
+ monotonic_clock::time_point(chrono::microseconds(10100350)));
+ break;
+ default:
+ FAIL();
+ break;
+ }
+ }
+
// Confirm that we refuse to replay logs with missing boot uuids.
EXPECT_DEATH(
{
@@ -2356,7 +2480,7 @@
// Confirm that we can parse the result. LogReader has enough internal CHECKs
// to confirm the right thing happened.
- ConfirmReadable(pi1_single_direction_logfiles_);
+ ConfirmReadable(MakePi1DeadNodeLogfiles());
}
constexpr std::string_view kCombinedConfigSha1(
@@ -2378,7 +2502,86 @@
Param{"multinode_pingpong_split_config.json", false,
kSplitConfigSha1}));
-// TODO(austin): Make a log file where the remote node has no start time.
+// Tests that we can relog with a different config. This makes most sense when
+// you are trying to edit a log and want to use channel renaming + the original
+// config in the new log.
+TEST_P(MultinodeLoggerTest, LogDifferentConfig) {
+ time_converter_.StartEqual();
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ LogReader reader(SortParts(logfiles_));
+ reader.RemapLoggedChannel<aos::examples::Ping>("/test", "/original");
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+ log_reader_factory.set_send_delay(chrono::microseconds(0));
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi1) << " pi1";
+ LOG(INFO) << "Start time " << reader.monotonic_start_time(pi2) << " pi2";
+ LOG(INFO) << "now pi1 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi1)->monotonic_now();
+ LOG(INFO) << "now pi2 "
+ << log_reader_factory.GetNodeEventLoopFactory(pi2)->monotonic_now();
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ reader.event_loop_factory()->set_send_delay(chrono::microseconds(0));
+
+ // And confirm we can re-create a log again, while checking the contents.
+ std::vector<std::string> log_files;
+ {
+ LoggerState pi1_logger =
+ MakeLogger(configuration::GetNode(reader.logged_configuration(), pi1_),
+ &log_reader_factory, reader.logged_configuration());
+ LoggerState pi2_logger =
+ MakeLogger(configuration::GetNode(reader.logged_configuration(), pi2_),
+ &log_reader_factory, reader.logged_configuration());
+
+ StartLogger(&pi1_logger, tmp_dir_ + "/relogged1");
+ StartLogger(&pi2_logger, tmp_dir_ + "/relogged2");
+
+ log_reader_factory.Run();
+
+ for (auto &x : pi1_logger.log_namer->all_filenames()) {
+ log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged1_", x));
+ }
+ for (auto &x : pi2_logger.log_namer->all_filenames()) {
+ log_files.emplace_back(absl::StrCat(tmp_dir_, "/relogged2_", x));
+ }
+ }
+
+ reader.Deregister();
+
+ // And verify that we can run the LogReader over the relogged files without
+ // hitting any fatal errors.
+ {
+ LogReader relogged_reader(SortParts(log_files));
+ relogged_reader.Register();
+
+ relogged_reader.event_loop_factory()->Run();
+ }
+}
} // namespace testing
} // namespace logger
diff --git a/y2020/vision/tools/python_code/BUILD b/y2020/vision/tools/python_code/BUILD
index 6223d76..a867578 100644
--- a/y2020/vision/tools/python_code/BUILD
+++ b/y2020/vision/tools/python_code/BUILD
@@ -1,14 +1,77 @@
load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_py_library")
load("//tools:platforms.bzl", "platforms")
+py_library(
+ name = "train_and_match",
+ srcs = ["train_and_match.py"],
+ data = [
+ "@python_repo//:scipy",
+ ],
+ deps = [
+ "//external:python-glog",
+ "@opencv_contrib_nonfree_amd64//:python_opencv",
+ ],
+)
+
+py_library(
+ name = "define_training_data",
+ srcs = [
+ "define_training_data.py",
+ ],
+ data = [
+ "@python_repo//:scipy",
+ ],
+ deps = [
+ ":train_and_match",
+ "//external:python-glog",
+ "@opencv_contrib_nonfree_amd64//:python_opencv",
+ ],
+)
+
+py_library(
+ name = "camera_definition",
+ srcs = [
+ "camera_definition.py",
+ ],
+ deps = [
+ ":define_training_data",
+ "//external:python-glog",
+ ],
+)
+
+py_library(
+ name = "target_definition",
+ srcs = [
+ "target_definition.py",
+ ],
+ deps = [
+ ":camera_definition",
+ ":define_training_data",
+ ":train_and_match",
+ "//external:python-glog",
+ "@opencv_contrib_nonfree_amd64//:python_opencv",
+ ],
+)
+
+py_binary(
+ name = "target_definition_main",
+ srcs = ["target_definition.py"],
+ data = glob(["calib_files/*.json"]) + glob([
+ "test_images/*.png",
+ ]),
+ main = "target_definition.py",
+ python_version = "PY3",
+ target_compatible_with = ["@platforms//os:linux"],
+ deps = [
+ ":target_definition",
+ "@bazel_tools//tools/python/runfiles",
+ ],
+)
+
py_binary(
name = "load_sift_training",
srcs = [
- "camera_definition.py",
- "define_training_data.py",
"load_sift_training.py",
- "target_definition.py",
- "train_and_match.py",
],
args = [
"sift_training_data.h",
@@ -17,9 +80,30 @@
"test_images/*.png",
]),
python_version = "PY3",
- srcs_version = "PY2AND3",
target_compatible_with = ["@platforms//os:linux"],
deps = [
+ ":camera_definition",
+ ":target_definition",
+ "//external:python-glog",
+ "//y2020/vision/sift:sift_fbs_python",
+ "@bazel_tools//tools/python/runfiles",
+ "@opencv_contrib_nonfree_amd64//:python_opencv",
+ ],
+)
+
+py_binary(
+ name = "image_match_test",
+ srcs = [
+ "image_match_test.py",
+ ],
+ data = glob(["calib_files/*.json"]) + glob([
+ "test_images/*.png",
+ ]),
+ python_version = "PY3",
+ target_compatible_with = ["@platforms//os:linux"],
+ deps = [
+ ":camera_definition",
+ ":target_definition",
"//external:python-glog",
"//y2020/vision/sift:sift_fbs_python",
"@bazel_tools//tools/python/runfiles",
diff --git a/y2020/vision/tools/python_code/image_match_test.py b/y2020/vision/tools/python_code/image_match_test.py
index b085166..b6d3c1d 100644
--- a/y2020/vision/tools/python_code/image_match_test.py
+++ b/y2020/vision/tools/python_code/image_match_test.py
@@ -9,8 +9,6 @@
import camera_definition
### DEFINITIONS
-target_definition.USE_BAZEL = False
-camera_definition.USE_BAZEL = False
target_list = target_definition.compute_target_definition()
camera_list = camera_definition.load_camera_definitions()