Merge "Sort parts by UUID and part_index"
diff --git a/README.md b/README.md
index e588d5c..af5ae6e 100644
--- a/README.md
+++ b/README.md
@@ -31,6 +31,42 @@
```
2. Allow Bazel's sandboxing to work:
Follow the direction in `doc/frc971.conf`.
+### Setting up access to a workspace on the build server
+ 1. Use ssh-keygen to create a public and private key.
+```console
+# IMPORTANT These are the windows instructions.
+cd ~
+ssh-keygen -f .\.ssh\id_971_rsa
+```
+ 2. Send the contents of id_971_rsa.pub to Stephan along with the password that you want to use. WAIT for feedback, as he needs to setup the account.
+```console
+cat .\.ssh\id_971_rsa.pub
+# Then send the stuff that gets printed to Stephan via slack.
+```
+ 3. Once you hear back from Stephan, test SSH.
+```console
+ssh [user]@build.frc971.org -p 2222 -i ./ssh/id_971_rsa -L 9971:127.0.0.1:3389
+```
+ 4. If that doesnt work, then send the error msg to #coding However, if it does then use the `exit` command and then SSH tunnel.
+```console
+ssh [user]@build.frc971.org -p 2222 -i ./ssh/id_971_rsa -L 971:127.0.0.1:3389
+```
+ 5. So at this point you run the Remote Desktop app in windows.
+Once you get there, all you need to do is put `127.0.0.1:9971` for the computer name, and use your SVN usrname.
+Once you get connected accept the server certificate, and then enter your password that you gave Stephan. (Its either something unique or your SVN pwd)
+Then select the Default panel config.
+You can exit the Remote Desktop if you are good w/ the raw cmd line interface.
+And for future logins all you have to do is tunnel and then login using the app.
+Now if you have a graphical application that you are developing (ie spline UI), then you have to run the build command in the Remote Desktop application.
+
+# ONE VERY IMPORTANT LAST STEP
+In order for you to be able to commit, you need to run this command, replacing <YOUR EMAIL> w/ your email that is in gerrit.
+```console
+git config --global user.email "<YOUR EMAIL>"
+```
+One thing that also needs to be said is that you DO NOT need to do any of the installation steps, or the step w/ `frc971.conf`.
+If there are any questions, use #coding so that other people who may reach the same issue can refer back to that.
+Some people that you can @ would be Het <--Insert your name here if you are pingable-->
### Some useful Bazel commands:
* Build and test everything (on the host system):
diff --git a/aos/BUILD b/aos/BUILD
index a7d691b..286882f 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -466,7 +466,7 @@
"configuration_test.cc",
],
data = [
- "//aos/events:pingpong_config.json",
+ "//aos/events:pingpong_config",
"//aos/events:pong.bfbs",
"//aos/testdata:test_configs",
],
diff --git a/aos/actions/BUILD b/aos/actions/BUILD
index 6276105..14592cf 100644
--- a/aos/actions/BUILD
+++ b/aos/actions/BUILD
@@ -58,7 +58,7 @@
srcs = [
"action_test.cc",
],
- data = ["action_test_config.json"],
+ data = [":action_test_config"],
deps = [
":action_lib",
":actions_fbs",
diff --git a/aos/aos_dump.cc b/aos/aos_dump.cc
index ac82b95..ef61f6b 100644
--- a/aos/aos_dump.cc
+++ b/aos/aos_dump.cc
@@ -14,6 +14,9 @@
"If true, fetch the current message on the channel first");
DEFINE_bool(pretty, false,
"If true, pretty print the messages on multiple lines");
+DEFINE_bool(print_timestamps, true, "If true, timestamps are printed.");
+DEFINE_uint64(count, 0,
+ "If >0, aos_dump will exit after printing this many messages.");
namespace {
@@ -29,14 +32,18 @@
builder, channel->schema(), static_cast<const uint8_t *>(context.data),
{FLAGS_pretty, static_cast<size_t>(FLAGS_max_vector_size)});
- if (context.monotonic_remote_time != context.monotonic_event_time) {
- std::cout << context.realtime_remote_time << " ("
- << context.monotonic_remote_time << ") delivered "
- << context.realtime_event_time << " ("
- << context.monotonic_event_time << "): " << *builder << '\n';
+ if (FLAGS_print_timestamps) {
+ if (context.monotonic_remote_time != context.monotonic_event_time) {
+ std::cout << context.realtime_remote_time << " ("
+ << context.monotonic_remote_time << ") delivered "
+ << context.realtime_event_time << " ("
+ << context.monotonic_event_time << "): " << *builder << '\n';
+ } else {
+ std::cout << context.realtime_event_time << " ("
+ << context.monotonic_event_time << "): " << *builder << '\n';
+ }
} else {
- std::cout << context.realtime_event_time << " ("
- << context.monotonic_event_time << "): " << *builder << '\n';
+ std::cout << *builder << '\n';
}
}
@@ -58,7 +65,7 @@
aos::configuration::ReadConfig(FLAGS_config);
const aos::Configuration *config_msg = &config.message();
- ::aos::ShmEventLoop event_loop(config_msg);
+ aos::ShmEventLoop event_loop(config_msg);
event_loop.SkipTimingReport();
event_loop.SkipAosLog();
@@ -98,6 +105,8 @@
LOG(FATAL) << "Multiple channels found with same type";
}
+ uint64_t message_count = 0;
+
aos::FastStringBuilder str_builder;
for (const aos::Channel *channel : found_channels) {
@@ -106,13 +115,22 @@
event_loop.MakeRawFetcher(channel);
if (fetcher->Fetch()) {
PrintMessage(channel, fetcher->context(), &str_builder);
+ ++message_count;
}
}
+ if (FLAGS_count > 0 && message_count >= FLAGS_count) {
+ return 0;
+ }
+
event_loop.MakeRawWatcher(
- channel, [channel, &str_builder](const aos::Context &context,
- const void * /*message*/) {
+ channel, [channel, &str_builder, &event_loop, &message_count](
+ const aos::Context &context, const void * /*message*/) {
PrintMessage(channel, context, &str_builder);
+ ++message_count;
+ if (FLAGS_count > 0 && message_count >= FLAGS_count) {
+ event_loop.Exit();
+ }
});
}
diff --git a/aos/config.bzl b/aos/config.bzl
index d6dcf1e..8f040d6 100644
--- a/aos/config.bzl
+++ b/aos/config.bzl
@@ -5,16 +5,20 @@
"transitive_src",
])
-def aos_config(name, src, flatbuffers = [], deps = [], visibility = None):
+def aos_config(name, src, flatbuffers = [], deps = [], visibility = None, testonly = False):
_aos_config(
name = name,
src = src,
deps = deps,
flatbuffers = [expand_label(flatbuffer) + "_reflection_out" for flatbuffer in flatbuffers],
visibility = visibility,
+ testonly = testonly,
)
def _aos_config_impl(ctx):
+ config = ctx.actions.declare_file(ctx.label.name + ".json")
+ stripped_config = ctx.actions.declare_file(ctx.label.name + ".stripped.json")
+
flatbuffers_depset = depset(
ctx.files.flatbuffers,
transitive = [dep[AosConfigInfo].transitive_flatbuffers for dep in ctx.attr.deps],
@@ -27,16 +31,23 @@
all_files = flatbuffers_depset.to_list() + src_depset.to_list()
ctx.actions.run(
- outputs = [ctx.outputs.config, ctx.outputs.stripped_config],
+ outputs = [config, stripped_config],
inputs = all_files,
- arguments = [ctx.outputs.config.path, ctx.outputs.stripped_config.path, ctx.files.src[0].short_path, ctx.bin_dir.path] + [f.path for f in flatbuffers_depset.to_list()],
+ arguments = [config.path, stripped_config.path, ctx.files.src[0].short_path, ctx.bin_dir.path] + [f.path for f in flatbuffers_depset.to_list()],
progress_message = "Flattening config",
executable = ctx.executable._config_flattener,
)
- return AosConfigInfo(
- transitive_flatbuffers = flatbuffers_depset,
- transitive_src = src_depset,
- )
+ runfiles = ctx.runfiles(files = [config, stripped_config])
+ return [
+ DefaultInfo(
+ files = depset([config, stripped_config]),
+ runfiles = runfiles,
+ ),
+ AosConfigInfo(
+ transitive_flatbuffers = flatbuffers_depset,
+ transitive_src = src_depset,
+ ),
+ ]
_aos_config = rule(
attrs = {
@@ -56,9 +67,5 @@
mandatory = False,
),
},
- outputs = {
- "config": "%{name}.json",
- "stripped_config": "%{name}.stripped.json",
- },
implementation = _aos_config_impl,
)
diff --git a/aos/events/BUILD b/aos/events/BUILD
index 2a032e5..5912ac6 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -106,7 +106,7 @@
srcs = [
"ping.cc",
],
- data = ["pingpong_config.json"],
+ data = [":pingpong_config"],
deps = [
":ping_lib",
":shm_event_loop",
@@ -171,7 +171,7 @@
srcs = [
"pong.cc",
],
- data = ["pingpong_config.json"],
+ data = [":pingpong_config"],
deps = [
":ping_fbs",
":pong_fbs",
@@ -187,7 +187,7 @@
cc_test(
name = "pingpong_test",
srcs = ["pingpong_test.cc"],
- data = [":pingpong_config.json"],
+ data = [":pingpong_config"],
deps = [
":ping_lib",
":pong_lib",
@@ -267,7 +267,7 @@
cc_test(
name = "simulated_event_loop_test",
srcs = ["simulated_event_loop_test.cc"],
- data = ["multinode_pingpong_config.json"],
+ data = [":multinode_pingpong_config"],
shard_count = 4,
deps = [
":event_loop_param_test",
diff --git a/aos/events/aos_logging.cc b/aos/events/aos_logging.cc
index f831071..d312925 100644
--- a/aos/events/aos_logging.cc
+++ b/aos/events/aos_logging.cc
@@ -19,7 +19,7 @@
builder.add_message(message_str);
builder.add_level(
static_cast<::aos::logging::Level>(message_data.level));
- builder.add_source(message_data.source);
+ builder.add_source_pid(message_data.source);
builder.add_name(name_str);
message.Send(builder.Finish());
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 81ab90f..56bef43 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -36,11 +36,13 @@
cc_library(
name = "logger",
srcs = [
+ "log_namer.cc",
"logger.cc",
"logger_math.cc",
],
hdrs = [
"eigen_mpq.h",
+ "log_namer.h",
"logger.h",
],
visibility = ["//visibility:public"],
@@ -148,8 +150,8 @@
name = "logger_test",
srcs = ["logger_test.cc"],
data = [
- ":multinode_pingpong_config.json",
- "//aos/events:pingpong_config.json",
+ ":multinode_pingpong_config",
+ "//aos/events:pingpong_config",
],
deps = [
":logger",
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
new file mode 100644
index 0000000..def0842
--- /dev/null
+++ b/aos/events/logging/log_namer.cc
@@ -0,0 +1,237 @@
+#include "aos/events/logging/log_namer.h"
+
+#include <functional>
+#include <map>
+#include <memory>
+#include <string_view>
+#include <vector>
+
+#include "absl/strings/str_cat.h"
+#include "aos/events/logging/logfile_utils.h"
+#include "aos/events/logging/logger_generated.h"
+#include "aos/events/logging/uuid.h"
+#include "flatbuffers/flatbuffers.h"
+#include "glog/logging.h"
+
+namespace aos {
+namespace logger {
+
+void LogNamer::UpdateHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const UUID &uuid, int parts_index) const {
+ header->mutable_message()->mutate_parts_index(parts_index);
+ CHECK_EQ(uuid.string_view().size(),
+ header->mutable_message()->mutable_parts_uuid()->size());
+ std::copy(uuid.string_view().begin(), uuid.string_view().end(),
+ reinterpret_cast<char *>(
+ header->mutable_message()->mutable_parts_uuid()->Data()));
+}
+
+void LocalLogNamer::WriteHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const Node *node) {
+ CHECK_EQ(node, this->node());
+ UpdateHeader(header, uuid_, part_number_);
+ data_writer_->WriteSizedFlatbuffer(header->full_span());
+}
+
+DetachedBufferWriter *LocalLogNamer::MakeWriter(const Channel *channel) {
+ CHECK(configuration::ChannelIsSendableOnNode(channel, node()));
+ return data_writer_.get();
+}
+
+void LocalLogNamer::Rotate(
+ const Node *node,
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) {
+ CHECK(node == this->node());
+ ++part_number_;
+ *data_writer_ = std::move(*OpenDataWriter());
+ UpdateHeader(header, uuid_, part_number_);
+ data_writer_->WriteSizedFlatbuffer(header->full_span());
+}
+
+DetachedBufferWriter *LocalLogNamer::MakeTimestampWriter(
+ const Channel *channel) {
+ CHECK(configuration::ChannelIsReadableOnNode(channel, node_))
+ << ": Message is not delivered to this node.";
+ CHECK(node_ != nullptr) << ": Can't log timestamps in a single node world";
+ CHECK(configuration::ConnectionDeliveryTimeIsLoggedOnNode(channel, node_,
+ node_))
+ << ": Delivery times aren't logged for this channel on this node.";
+ return data_writer_.get();
+}
+
+DetachedBufferWriter *LocalLogNamer::MakeForwardedTimestampWriter(
+ const Channel * /*channel*/, const Node * /*node*/) {
+ LOG(FATAL) << "Can't log forwarded timestamps in a singe log file.";
+ return nullptr;
+}
+
+MultiNodeLogNamer::MultiNodeLogNamer(std::string_view base_name,
+ const Configuration *configuration,
+ const Node *node)
+ : LogNamer(node),
+ base_name_(base_name),
+ configuration_(configuration),
+ uuid_(UUID::Random()),
+ data_writer_(OpenDataWriter()) {}
+
+void MultiNodeLogNamer::WriteHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const Node *node) {
+ if (node == this->node()) {
+ UpdateHeader(header, uuid_, part_number_);
+ data_writer_->WriteSizedFlatbuffer(header->full_span());
+ } else {
+ for (std::pair<const Channel *const, DataWriter> &data_writer :
+ data_writers_) {
+ if (node == data_writer.second.node) {
+ UpdateHeader(header, data_writer.second.uuid,
+ data_writer.second.part_number);
+ data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
+ }
+ }
+ }
+}
+
+void MultiNodeLogNamer::Rotate(
+ const Node *node,
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) {
+ if (node == this->node()) {
+ ++part_number_;
+ *data_writer_ = std::move(*OpenDataWriter());
+ UpdateHeader(header, uuid_, part_number_);
+ data_writer_->WriteSizedFlatbuffer(header->full_span());
+ } else {
+ for (std::pair<const Channel *const, DataWriter> &data_writer :
+ data_writers_) {
+ if (node == data_writer.second.node) {
+ ++data_writer.second.part_number;
+ data_writer.second.rotate(data_writer.first, &data_writer.second);
+ UpdateHeader(header, data_writer.second.uuid,
+ data_writer.second.part_number);
+ data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
+ }
+ }
+ }
+}
+
+DetachedBufferWriter *MultiNodeLogNamer::MakeWriter(const Channel *channel) {
+ // See if we can read the data on this node at all.
+ const bool is_readable =
+ configuration::ChannelIsReadableOnNode(channel, this->node());
+ if (!is_readable) {
+ return nullptr;
+ }
+
+ // Then, see if we are supposed to log the data here.
+ const bool log_message =
+ configuration::ChannelMessageIsLoggedOnNode(channel, this->node());
+
+ if (!log_message) {
+ return nullptr;
+ }
+
+ // Now, sort out if this is data generated on this node, or not. It is
+ // generated if it is sendable on this node.
+ if (configuration::ChannelIsSendableOnNode(channel, this->node())) {
+ return data_writer_.get();
+ }
+
+ // Ok, we have data that is being forwarded to us that we are supposed to
+ // log. It needs to be logged with send timestamps, but be sorted enough
+ // to be able to be processed.
+ CHECK(data_writers_.find(channel) == data_writers_.end());
+
+ // Track that this node is being logged.
+ const Node *source_node = configuration::GetNode(
+ configuration_, channel->source_node()->string_view());
+
+ if (std::find(nodes_.begin(), nodes_.end(), source_node) == nodes_.end()) {
+ nodes_.emplace_back(source_node);
+ }
+
+ DataWriter data_writer;
+ data_writer.node = source_node;
+ data_writer.rotate = [this](const Channel *channel, DataWriter *data_writer) {
+ OpenWriter(channel, data_writer);
+ };
+ data_writer.rotate(channel, &data_writer);
+
+ return data_writers_.insert(std::make_pair(channel, std::move(data_writer)))
+ .first->second.writer.get();
+}
+
+DetachedBufferWriter *MultiNodeLogNamer::MakeForwardedTimestampWriter(
+ const Channel *channel, const Node *node) {
+ // See if we can read the data on this node at all.
+ const bool is_readable =
+ configuration::ChannelIsReadableOnNode(channel, this->node());
+ CHECK(is_readable) << ": " << configuration::CleanedChannelToString(channel);
+
+ CHECK(data_writers_.find(channel) == data_writers_.end());
+
+ if (std::find(nodes_.begin(), nodes_.end(), node) == nodes_.end()) {
+ nodes_.emplace_back(node);
+ }
+
+ DataWriter data_writer;
+ data_writer.node = node;
+ data_writer.rotate = [this](const Channel *channel, DataWriter *data_writer) {
+ OpenForwardedTimestampWriter(channel, data_writer);
+ };
+ data_writer.rotate(channel, &data_writer);
+
+ return data_writers_.insert(std::make_pair(channel, std::move(data_writer)))
+ .first->second.writer.get();
+}
+
+DetachedBufferWriter *MultiNodeLogNamer::MakeTimestampWriter(
+ const Channel *channel) {
+ const bool log_delivery_times =
+ (this->node() == nullptr)
+ ? false
+ : configuration::ConnectionDeliveryTimeIsLoggedOnNode(
+ channel, this->node(), this->node());
+ if (!log_delivery_times) {
+ return nullptr;
+ }
+
+ return data_writer_.get();
+}
+
+void MultiNodeLogNamer::OpenForwardedTimestampWriter(const Channel *channel,
+ DataWriter *data_writer) {
+ std::string filename =
+ absl::StrCat(base_name_, "_timestamps", channel->name()->string_view(),
+ "/", channel->type()->string_view(), ".part",
+ data_writer->part_number, ".bfbs");
+
+ if (!data_writer->writer) {
+ data_writer->writer = std::make_unique<DetachedBufferWriter>(filename);
+ } else {
+ *data_writer->writer = DetachedBufferWriter(filename);
+ }
+}
+
+void MultiNodeLogNamer::OpenWriter(const Channel *channel,
+ DataWriter *data_writer) {
+ const std::string filename = absl::StrCat(
+ base_name_, "_", channel->source_node()->string_view(), "_data",
+ channel->name()->string_view(), "/", channel->type()->string_view(),
+ ".part", data_writer->part_number, ".bfbs");
+ if (!data_writer->writer) {
+ data_writer->writer = std::make_unique<DetachedBufferWriter>(filename);
+ } else {
+ *data_writer->writer = DetachedBufferWriter(filename);
+ }
+}
+
+std::unique_ptr<DetachedBufferWriter> MultiNodeLogNamer::OpenDataWriter() {
+ return std::make_unique<DetachedBufferWriter>(
+ absl::StrCat(base_name_, "_", node()->name()->string_view(), "_data.part",
+ part_number_, ".bfbs"));
+}
+
+} // namespace logger
+} // namespace aos
diff --git a/aos/events/logging/log_namer.h b/aos/events/logging/log_namer.h
new file mode 100644
index 0000000..631016b
--- /dev/null
+++ b/aos/events/logging/log_namer.h
@@ -0,0 +1,168 @@
+#ifndef AOS_EVENTS_LOGGING_LOG_NAMER_H_
+#define AOS_EVENTS_LOGGING_LOG_NAMER_H_
+
+#include <functional>
+#include <map>
+#include <memory>
+#include <string_view>
+#include <vector>
+
+#include "aos/events/logging/logfile_utils.h"
+#include "aos/events/logging/logger_generated.h"
+#include "aos/events/logging/uuid.h"
+#include "flatbuffers/flatbuffers.h"
+
+namespace aos {
+namespace logger {
+
+// Interface describing how to name, track, and add headers to log file parts.
+class LogNamer {
+ public:
+ // Constructs a LogNamer with the primary node (ie the one the logger runs on)
+ // being node.
+ LogNamer(const Node *node) : node_(node) { nodes_.emplace_back(node_); }
+ virtual ~LogNamer() {}
+
+ // Writes the header to all log files for a specific node. This function
+ // needs to be called after all the writers are created.
+ //
+ // Modifies header to contain the uuid and part number for each writer as it
+ // writes it. Since this is done unconditionally, it does not restore the
+ // previous value at the end.
+ virtual void WriteHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const Node *node) = 0;
+
+ // Returns a writer for writing data logged on this channel (on the node
+ // provided in the constructor).
+ virtual DetachedBufferWriter *MakeWriter(const Channel *channel) = 0;
+
+ // Returns a writer for writing timestamps logged on this channel (on the node
+ // provided in the constructor).
+ virtual DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) = 0;
+
+ // Returns a writer for writing timestamps delivered over the special
+ // /aos/remote_timestamps/* channels. node is the node that the timestamps
+ // are forwarded back from.
+ virtual DetachedBufferWriter *MakeForwardedTimestampWriter(
+ const Channel *channel, const Node *node) = 0;
+
+ // Rotates all log files for the provided node. The provided header will be
+ // modified and written per WriteHeader above.
+ virtual void Rotate(
+ const Node *node,
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) = 0;
+
+ // Returns all the nodes that data is being written for.
+ const std::vector<const Node *> &nodes() const { return nodes_; }
+
+ // Returns the node the logger is running on.
+ const Node *node() const { return node_; }
+
+ protected:
+ // Modifies the header to have the provided UUID and part id.
+ void UpdateHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const UUID &uuid, int part_id) const;
+
+ const Node *const node_;
+ std::vector<const Node *> nodes_;
+};
+
+// Local log namer is a simple version which only names things
+// "base_name.part#.bfbs" and increments the part number. It doesn't support
+// any other log type.
+class LocalLogNamer : public LogNamer {
+ public:
+ LocalLogNamer(std::string_view base_name, const Node *node)
+ : LogNamer(node),
+ base_name_(base_name),
+ uuid_(UUID::Random()),
+ data_writer_(OpenDataWriter()) {}
+
+ void WriteHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const Node *node) override;
+
+ DetachedBufferWriter *MakeWriter(const Channel *channel) override;
+
+ void Rotate(const Node *node,
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header)
+ override;
+
+ DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) override;
+
+ DetachedBufferWriter *MakeForwardedTimestampWriter(
+ const Channel * /*channel*/, const Node * /*node*/) override;
+
+ private:
+ // Creates a new data writer with the new part number.
+ std::unique_ptr<DetachedBufferWriter> OpenDataWriter() {
+ return std::make_unique<DetachedBufferWriter>(
+ absl::StrCat(base_name_, ".part", part_number_, ".bfbs"));
+ }
+
+ const std::string base_name_;
+ const UUID uuid_;
+ size_t part_number_ = 0;
+ std::unique_ptr<DetachedBufferWriter> data_writer_;
+};
+
+// Log namer which uses a config and a base name to name a bunch of files.
+class MultiNodeLogNamer : public LogNamer {
+ public:
+ MultiNodeLogNamer(std::string_view base_name,
+ const Configuration *configuration, const Node *node);
+
+ void WriteHeader(
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
+ const Node *node) override;
+
+ void Rotate(const Node *node,
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header)
+ override;
+
+ DetachedBufferWriter *MakeWriter(const Channel *channel) override;
+
+ DetachedBufferWriter *MakeForwardedTimestampWriter(
+ const Channel *channel, const Node *node) override;
+
+ DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) override;
+
+ private:
+ // Files to write remote data to. We want one per channel. Maps the channel
+ // to the writer, Node, and part number.
+ struct DataWriter {
+ std::unique_ptr<DetachedBufferWriter> writer = nullptr;
+ const Node *node;
+ size_t part_number = 0;
+ UUID uuid = UUID::Random();
+ std::function<void(const Channel *, DataWriter *)> rotate;
+ };
+
+ // Opens up a writer for timestamps forwarded back.
+ void OpenForwardedTimestampWriter(const Channel *channel,
+ DataWriter *data_writer);
+
+ // Opens up a writer for remote data.
+ void OpenWriter(const Channel *channel, DataWriter *data_writer);
+
+ // Opens the main data writer file for this node responsible for data_writer_.
+ std::unique_ptr<DetachedBufferWriter> OpenDataWriter();
+
+ const std::string base_name_;
+ const Configuration *const configuration_;
+ const UUID uuid_;
+
+ size_t part_number_ = 0;
+
+ // File to write both delivery timestamps and local data to.
+ std::unique_ptr<DetachedBufferWriter> data_writer_;
+
+ std::map<const Channel *, DataWriter> data_writers_;
+};
+
+} // namespace logger
+} // namespace aos
+
+#endif // AOS_EVENTS_LOGGING_LOG_NAMER_H_
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 938ede7..7f53577 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -1318,23 +1318,6 @@
channel_heap_.erase(channel_iterator);
std::make_heap(channel_heap_.begin(), channel_heap_.end(),
ChannelHeapCompare);
-
- if (timestamp_mergers_[channel_index].has_timestamps()) {
- const auto timestamp_iterator = std::find_if(
- timestamp_heap_.begin(), timestamp_heap_.end(),
- [channel_index](const std::pair<monotonic_clock::time_point, int> x) {
- return x.second == channel_index;
- });
- DCHECK(timestamp_iterator != timestamp_heap_.end());
- if (std::get<0>(*timestamp_iterator) == timestamp) {
- // It's already in the heap, in the correct spot, so nothing
- // more for us to do here.
- return;
- }
- timestamp_heap_.erase(timestamp_iterator);
- std::make_heap(timestamp_heap_.begin(), timestamp_heap_.end(),
- ChannelHeapCompare);
- }
}
if (timestamp == monotonic_clock::min_time) {
@@ -1348,37 +1331,18 @@
// put the oldest message first.
std::push_heap(channel_heap_.begin(), channel_heap_.end(),
ChannelHeapCompare);
-
- if (timestamp_mergers_[channel_index].has_timestamps()) {
- timestamp_heap_.push_back(std::make_pair(timestamp, channel_index));
- std::push_heap(timestamp_heap_.begin(), timestamp_heap_.end(),
- ChannelHeapCompare);
- }
}
void ChannelMerger::VerifyHeaps() {
- {
- std::vector<std::pair<monotonic_clock::time_point, int>> channel_heap =
- channel_heap_;
- std::make_heap(channel_heap.begin(), channel_heap.end(),
- &ChannelHeapCompare);
+ std::vector<std::pair<monotonic_clock::time_point, int>> channel_heap =
+ channel_heap_;
+ std::make_heap(channel_heap.begin(), channel_heap.end(), &ChannelHeapCompare);
- for (size_t i = 0; i < channel_heap_.size(); ++i) {
- CHECK(channel_heap_[i] == channel_heap[i]) << ": Heaps diverged...";
- CHECK_EQ(std::get<0>(channel_heap[i]),
- timestamp_mergers_[std::get<1>(channel_heap[i])]
- .channel_merger_time());
- }
- }
- {
- std::vector<std::pair<monotonic_clock::time_point, int>> timestamp_heap =
- timestamp_heap_;
- std::make_heap(timestamp_heap.begin(), timestamp_heap.end(),
- &ChannelHeapCompare);
-
- for (size_t i = 0; i < timestamp_heap_.size(); ++i) {
- CHECK(timestamp_heap_[i] == timestamp_heap[i]) << ": Heaps diverged...";
- }
+ for (size_t i = 0; i < channel_heap_.size(); ++i) {
+ CHECK(channel_heap_[i] == channel_heap[i]) << ": Heaps diverged...";
+ CHECK_EQ(
+ std::get<0>(channel_heap[i]),
+ timestamp_mergers_[std::get<1>(channel_heap[i])].channel_merger_time());
}
}
@@ -1397,17 +1361,6 @@
TimestampMerger *merger = ×tamp_mergers_[channel_index];
- if (merger->has_timestamps()) {
- CHECK_GT(timestamp_heap_.size(), 0u);
- std::pair<monotonic_clock::time_point, int> oldest_timestamp_data =
- timestamp_heap_.front();
- CHECK(oldest_timestamp_data == oldest_channel_data)
- << ": Timestamp heap out of sync.";
- std::pop_heap(timestamp_heap_.begin(), timestamp_heap_.end(),
- &ChannelHeapCompare);
- timestamp_heap_.pop_back();
- }
-
// Merger handles any queueing needed from here.
std::tuple<TimestampMerger::DeliveryTimestamp,
FlatbufferVector<MessageHeader>>
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index 87ea229..9534860 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -651,10 +651,6 @@
// A heap of the channel readers and timestamps for the oldest data in each.
std::vector<std::pair<monotonic_clock::time_point, int>> channel_heap_;
- // A heap of just the timestamp channel readers and timestamps for the oldest
- // data in each.
- // TODO(austin): I think this is no longer used and can be removed (!)
- std::vector<std::pair<monotonic_clock::time_point, int>> timestamp_heap_;
// Configured node.
const Node *node_;
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index 637d1ae..caccf03 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -35,67 +35,29 @@
namespace logger {
namespace chrono = std::chrono;
-void LogNamer::UpdateHeader(
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
- const UUID &uuid, int parts_index) {
- header->mutable_message()->mutate_parts_index(parts_index);
- CHECK_EQ(uuid.string_view().size(),
- header->mutable_message()->mutable_parts_uuid()->size());
- std::copy(uuid.string_view().begin(), uuid.string_view().end(),
- reinterpret_cast<char *>(
- header->mutable_message()->mutable_parts_uuid()->Data()));
-}
-
-void MultiNodeLogNamer::WriteHeader(
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
- const Node *node) {
- if (node == this->node()) {
- UpdateHeader(header, uuid_, part_number_);
- data_writer_->WriteSizedFlatbuffer(header->full_span());
- } else {
- for (std::pair<const Channel *const, DataWriter> &data_writer :
- data_writers_) {
- if (node == data_writer.second.node) {
- UpdateHeader(header, data_writer.second.uuid,
- data_writer.second.part_number);
- data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
- }
- }
- }
-}
-
-void MultiNodeLogNamer::Rotate(
- const Node *node,
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) {
- if (node == this->node()) {
- ++part_number_;
- *data_writer_ = std::move(*OpenDataWriter());
- UpdateHeader(header, uuid_, part_number_);
- data_writer_->WriteSizedFlatbuffer(header->full_span());
- } else {
- for (std::pair<const Channel *const, DataWriter> &data_writer :
- data_writers_) {
- if (node == data_writer.second.node) {
- ++data_writer.second.part_number;
- data_writer.second.rotate(data_writer.first, &data_writer.second);
- UpdateHeader(header, data_writer.second.uuid,
- data_writer.second.part_number);
- data_writer.second.writer->WriteSizedFlatbuffer(header->full_span());
- }
- }
- }
-}
Logger::Logger(std::string_view base_name, EventLoop *event_loop,
std::chrono::milliseconds polling_period)
+ : Logger(base_name, event_loop, event_loop->configuration(),
+ polling_period) {}
+Logger::Logger(std::string_view base_name, EventLoop *event_loop,
+ const Configuration *configuration,
+ std::chrono::milliseconds polling_period)
: Logger(std::make_unique<LocalLogNamer>(base_name, event_loop->node()),
- event_loop, polling_period) {}
+ event_loop, configuration, polling_period) {}
+Logger::Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
+ std::chrono::milliseconds polling_period)
+ : Logger(std::move(log_namer), event_loop, event_loop->configuration(),
+ polling_period) {}
Logger::Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
+ const Configuration *configuration,
std::chrono::milliseconds polling_period)
: event_loop_(event_loop),
uuid_(UUID::Random()),
log_namer_(std::move(log_namer)),
+ configuration_(configuration),
+ name_(network::GetHostname()),
timer_handler_(event_loop_->AddTimer([this]() { DoLogData(); })),
polling_period_(polling_period),
server_statistics_fetcher_(
@@ -108,14 +70,14 @@
// Find all the nodes which are logging timestamps on our node.
std::set<const Node *> timestamp_logger_nodes;
- for (const Channel *channel : *event_loop_->configuration()->channels()) {
+ for (const Channel *channel : *configuration_->channels()) {
if (!configuration::ChannelIsSendableOnNode(channel, event_loop_->node()) ||
!channel->has_destination_nodes()) {
continue;
}
for (const Connection *connection : *channel->destination_nodes()) {
const Node *other_node = configuration::GetNode(
- event_loop_->configuration(), connection->name()->string_view());
+ configuration_, connection->name()->string_view());
if (configuration::ConnectionDeliveryTimeIsLoggedOnNode(
connection, event_loop_->node())) {
@@ -132,7 +94,7 @@
// for them.
for (const Node *node : timestamp_logger_nodes) {
const Channel *channel = configuration::GetChannel(
- event_loop_->configuration(),
+ configuration_,
absl::StrCat("/aos/remote_timestamps/", node->name()->string_view()),
logger::MessageHeader::GetFullyQualifiedName(), event_loop_->name(),
event_loop_->node());
@@ -146,9 +108,16 @@
}
const size_t our_node_index = configuration::GetNodeIndex(
- event_loop_->configuration(), event_loop_->node());
+ configuration_, event_loop_->node());
- for (const Channel *channel : *event_loop_->configuration()->channels()) {
+ for (const Channel *config_channel : *configuration_->channels()) {
+ // The MakeRawFetcher method needs a channel which is in the event loop
+ // configuration() object, not the configuration_ object. Go look that up
+ // from the config.
+ const Channel *channel = aos::configuration::GetChannel(
+ event_loop_->configuration(), config_channel->name()->string_view(),
+ config_channel->type()->string_view(), "", event_loop_->node());
+
FetcherStruct fs;
fs.node_index = our_node_index;
const bool is_local =
@@ -195,8 +164,8 @@
<< configuration::CleanedChannelToString(channel);
fs.contents_writer =
log_namer_->MakeForwardedTimestampWriter(channel, timestamp_node);
- fs.node_index = configuration::GetNodeIndex(
- event_loop_->configuration(), timestamp_node);
+ fs.node_index =
+ configuration::GetNodeIndex(configuration_, timestamp_node);
}
fs.channel_index = channel_index;
fs.written = false;
@@ -205,13 +174,13 @@
++channel_index;
}
- node_state_.resize(configuration::MultiNode(event_loop_->configuration())
- ? event_loop_->configuration()->nodes()->size()
+ node_state_.resize(configuration::MultiNode(configuration_)
+ ? configuration_->nodes()->size()
: 1u);
for (const Node *node : log_namer_->nodes()) {
const int node_index =
- configuration::GetNodeIndex(event_loop_->configuration(), node);
+ configuration::GetNodeIndex(configuration_, node);
node_state_[node_index].log_file_header = MakeHeader(node);
}
@@ -222,6 +191,14 @@
event_loop_->OnRun([this]() { StartLogging(); });
}
+Logger::~Logger() {
+ // If we are replaying a log file, or in simulation, we want to force the last
+ // bit of data to be logged. The easiest way to deal with this is to poll
+ // everything as we go to destroy the class, ie, shut down the logger, and
+ // write it to disk.
+ DoLogData();
+}
+
void Logger::StartLogging() {
// Grab data from each channel right before we declare the log file started
// so we can capture the latest message on each channel. This lets us have
@@ -245,7 +222,7 @@
}
void Logger::WriteHeader() {
- if (configuration::MultiNode(event_loop_->configuration())) {
+ if (configuration::MultiNode(configuration_)) {
server_statistics_fetcher_.Fetch();
}
@@ -262,7 +239,7 @@
for (const Node *node : log_namer_->nodes()) {
const int node_index =
- configuration::GetNodeIndex(event_loop_->configuration(), node);
+ configuration::GetNodeIndex(configuration_, node);
MaybeUpdateTimestamp(node, node_index, monotonic_start_time,
realtime_start_time);
log_namer_->WriteHeader(&node_state_[node_index].log_file_header, node);
@@ -270,7 +247,7 @@
}
void Logger::WriteMissingTimestamps() {
- if (configuration::MultiNode(event_loop_->configuration())) {
+ if (configuration::MultiNode(configuration_)) {
server_statistics_fetcher_.Fetch();
} else {
return;
@@ -282,7 +259,7 @@
for (const Node *node : log_namer_->nodes()) {
const int node_index =
- configuration::GetNodeIndex(event_loop_->configuration(), node);
+ configuration::GetNodeIndex(configuration_, node);
if (MaybeUpdateTimestamp(
node, node_index,
server_statistics_fetcher_.context().monotonic_event_time,
@@ -324,7 +301,7 @@
monotonic_clock::min_time) {
return false;
}
- if (configuration::MultiNode(event_loop_->configuration())) {
+ if (configuration::MultiNode(configuration_)) {
if (event_loop_->node() == node) {
// There are no offsets to compute for ourself, so always succeed.
SetStartTime(node_index, monotonic_start_time, realtime_start_time);
@@ -378,10 +355,10 @@
// TODO(austin): Compress this much more efficiently. There are a bunch of
// duplicated schemas.
flatbuffers::Offset<aos::Configuration> configuration_offset =
- CopyFlatBuffer(event_loop_->configuration(), &fbb);
+ CopyFlatBuffer(configuration_, &fbb);
flatbuffers::Offset<flatbuffers::String> name_offset =
- fbb.CreateString(network::GetHostname());
+ fbb.CreateString(name_);
flatbuffers::Offset<flatbuffers::String> logger_uuid_offset =
fbb.CreateString(uuid_.string_view());
@@ -391,7 +368,7 @@
flatbuffers::Offset<Node> node_offset;
- if (configuration::MultiNode(event_loop_->configuration())) {
+ if (configuration::MultiNode(configuration_)) {
node_offset = CopyFlatBuffer(node, &fbb);
}
@@ -437,7 +414,7 @@
void Logger::Rotate() {
for (const Node *node : log_namer_->nodes()) {
const int node_index =
- configuration::GetNodeIndex(event_loop_->configuration(), node);
+ configuration::GetNodeIndex(configuration_, node);
log_namer_->Rotate(node, &node_state_[node_index].log_file_header);
}
}
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index c4ce769..32c2022 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -12,6 +12,7 @@
#include "absl/types/span.h"
#include "aos/events/event_loop.h"
#include "aos/events/logging/eigen_mpq.h"
+#include "aos/events/logging/log_namer.h"
#include "aos/events/logging/logfile_utils.h"
#include "aos/events/logging/logger_generated.h"
#include "aos/events/logging/uuid.h"
@@ -25,272 +26,38 @@
namespace aos {
namespace logger {
-class LogNamer {
- public:
- LogNamer(const Node *node) : node_(node) { nodes_.emplace_back(node_); }
- virtual ~LogNamer() {}
-
- virtual void WriteHeader(
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
- const Node *node) = 0;
- virtual DetachedBufferWriter *MakeWriter(const Channel *channel) = 0;
-
- virtual DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) = 0;
- virtual DetachedBufferWriter *MakeForwardedTimestampWriter(
- const Channel *channel, const Node *node) = 0;
- virtual void Rotate(
- const Node *node,
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header) = 0;
- const std::vector<const Node *> &nodes() const { return nodes_; }
-
- const Node *node() const { return node_; }
-
- protected:
- void UpdateHeader(
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
- const UUID &uuid, int part_id);
-
- const Node *const node_;
- std::vector<const Node *> nodes_;
-};
-
-class LocalLogNamer : public LogNamer {
- public:
- LocalLogNamer(std::string_view base_name, const Node *node)
- : LogNamer(node),
- base_name_(base_name),
- uuid_(UUID::Random()),
- data_writer_(OpenDataWriter()) {}
-
- void WriteHeader(
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
- const Node *node) override {
- CHECK_EQ(node, this->node());
- UpdateHeader(header, uuid_, part_number_);
- data_writer_->WriteSizedFlatbuffer(header->full_span());
- }
-
- DetachedBufferWriter *MakeWriter(const Channel *channel) override {
- CHECK(configuration::ChannelIsSendableOnNode(channel, node()));
- return data_writer_.get();
- }
-
- void Rotate(const Node *node,
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header)
- override {
- CHECK(node == this->node());
- ++part_number_;
- *data_writer_ = std::move(*OpenDataWriter());
- UpdateHeader(header, uuid_, part_number_);
- data_writer_->WriteSizedFlatbuffer(header->full_span());
- }
-
- DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) override {
- CHECK(configuration::ChannelIsReadableOnNode(channel, node_))
- << ": Message is not delivered to this node.";
- CHECK(node_ != nullptr) << ": Can't log timestamps in a single node world";
- CHECK(configuration::ConnectionDeliveryTimeIsLoggedOnNode(channel, node_,
- node_))
- << ": Delivery times aren't logged for this channel on this node.";
- return data_writer_.get();
- }
-
- DetachedBufferWriter *MakeForwardedTimestampWriter(
- const Channel * /*channel*/, const Node * /*node*/) override {
- LOG(FATAL) << "Can't log forwarded timestamps in a singe log file.";
- return nullptr;
- }
-
- private:
- std::unique_ptr<DetachedBufferWriter> OpenDataWriter() {
- return std::make_unique<DetachedBufferWriter>(
- absl::StrCat(base_name_, ".part", part_number_, ".bfbs"));
- }
- const std::string base_name_;
- const UUID uuid_;
- size_t part_number_ = 0;
- std::unique_ptr<DetachedBufferWriter> data_writer_;
-};
-
-// TODO(austin): Split naming files from making files so we can re-use the
-// naming code to predict the log file names for a provided base name.
-class MultiNodeLogNamer : public LogNamer {
- public:
- MultiNodeLogNamer(std::string_view base_name,
- const Configuration *configuration, const Node *node)
- : LogNamer(node),
- base_name_(base_name),
- configuration_(configuration),
- uuid_(UUID::Random()),
- data_writer_(OpenDataWriter()) {}
-
- // Writes the header to all log files for a specific node. This function
- // needs to be called after all the writers are created.
- void WriteHeader(
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header,
- const Node *node) override;
-
- void Rotate(const Node *node,
- aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> *header)
- override;
-
- // Makes a data logger for a specific channel.
- DetachedBufferWriter *MakeWriter(const Channel *channel) override {
- // See if we can read the data on this node at all.
- const bool is_readable =
- configuration::ChannelIsReadableOnNode(channel, this->node());
- if (!is_readable) {
- return nullptr;
- }
-
- // Then, see if we are supposed to log the data here.
- const bool log_message =
- configuration::ChannelMessageIsLoggedOnNode(channel, this->node());
-
- if (!log_message) {
- return nullptr;
- }
-
- // Now, sort out if this is data generated on this node, or not. It is
- // generated if it is sendable on this node.
- if (configuration::ChannelIsSendableOnNode(channel, this->node())) {
- return data_writer_.get();
- }
-
- // Ok, we have data that is being forwarded to us that we are supposed to
- // log. It needs to be logged with send timestamps, but be sorted enough
- // to be able to be processed.
- CHECK(data_writers_.find(channel) == data_writers_.end());
-
- // Track that this node is being logged.
- const Node *source_node = configuration::GetNode(
- configuration_, channel->source_node()->string_view());
-
- if (std::find(nodes_.begin(), nodes_.end(), source_node) == nodes_.end()) {
- nodes_.emplace_back(source_node);
- }
-
- DataWriter data_writer;
- data_writer.node = source_node;
- data_writer.rotate = [this](const Channel *channel,
- DataWriter *data_writer) {
- OpenWriter(channel, data_writer);
- };
- data_writer.rotate(channel, &data_writer);
-
- return data_writers_.insert(std::make_pair(channel, std::move(data_writer)))
- .first->second.writer.get();
- }
-
- DetachedBufferWriter *MakeForwardedTimestampWriter(
- const Channel *channel, const Node *node) override {
- // See if we can read the data on this node at all.
- const bool is_readable =
- configuration::ChannelIsReadableOnNode(channel, this->node());
- CHECK(is_readable) << ": "
- << configuration::CleanedChannelToString(channel);
-
- CHECK(data_writers_.find(channel) == data_writers_.end());
-
- if (std::find(nodes_.begin(), nodes_.end(), node) == nodes_.end()) {
- nodes_.emplace_back(node);
- }
-
- DataWriter data_writer;
- data_writer.node = node;
- data_writer.rotate = [this](const Channel *channel,
- DataWriter *data_writer) {
- OpenForwardedTimestampWriter(channel, data_writer);
- };
- data_writer.rotate(channel, &data_writer);
-
- return data_writers_.insert(std::make_pair(channel, std::move(data_writer)))
- .first->second.writer.get();
- }
-
- // Makes a timestamp (or timestamp and data) logger for a channel and
- // forwarding connection.
- DetachedBufferWriter *MakeTimestampWriter(const Channel *channel) override {
- const bool log_delivery_times =
- (this->node() == nullptr)
- ? false
- : configuration::ConnectionDeliveryTimeIsLoggedOnNode(
- channel, this->node(), this->node());
- if (!log_delivery_times) {
- return nullptr;
- }
-
- return data_writer_.get();
- }
-
- const std::vector<const Node *> &nodes() const { return nodes_; }
-
- private:
- // Files to write remote data to. We want one per channel. Maps the channel
- // to the writer, Node, and part number.
- struct DataWriter {
- std::unique_ptr<DetachedBufferWriter> writer = nullptr;
- const Node *node;
- size_t part_number = 0;
- UUID uuid = UUID::Random();
- std::function<void(const Channel *, DataWriter *)> rotate;
- };
-
- void OpenForwardedTimestampWriter(const Channel *channel,
- DataWriter *data_writer) {
- std::string filename =
- absl::StrCat(base_name_, "_timestamps", channel->name()->string_view(),
- "/", channel->type()->string_view(), ".part",
- data_writer->part_number, ".bfbs");
-
- if (!data_writer->writer) {
- data_writer->writer = std::make_unique<DetachedBufferWriter>(filename);
- } else {
- *data_writer->writer = DetachedBufferWriter(filename);
- }
- }
-
- void OpenWriter(const Channel *channel, DataWriter *data_writer) {
- const std::string filename = absl::StrCat(
- base_name_, "_", channel->source_node()->string_view(), "_data",
- channel->name()->string_view(), "/", channel->type()->string_view(),
- ".part", data_writer->part_number, ".bfbs");
- if (!data_writer->writer) {
- data_writer->writer = std::make_unique<DetachedBufferWriter>(filename);
- } else {
- *data_writer->writer = DetachedBufferWriter(filename);
- }
- }
-
- std::unique_ptr<DetachedBufferWriter> OpenDataWriter() {
- return std::make_unique<DetachedBufferWriter>(
- absl::StrCat(base_name_, "_", node()->name()->string_view(),
- "_data.part", part_number_, ".bfbs"));
- }
-
- const std::string base_name_;
- const Configuration *const configuration_;
- const UUID uuid_;
-
- size_t part_number_ = 0;
-
- // File to write both delivery timestamps and local data to.
- std::unique_ptr<DetachedBufferWriter> data_writer_;
-
- std::map<const Channel *, DataWriter> data_writers_;
-};
-
// Logs all channels available in the event loop to disk every 100 ms.
// Start by logging one message per channel to capture any state and
// configuration that is sent rately on a channel and would affect execution.
class Logger {
public:
+ // Constructs a logger.
+ // base_name/log_namer: Object used to write data to disk in one or more log
+ // files. If a base_name is passed in, a LocalLogNamer is wrapped
+ // around it.
+ // event_loop: The event loop used to read the messages.
+ // polling_period: The period used to poll the data.
+ // configuration: When provided, this is the configuration to log, and the
+ // configuration to use for the channel list to log. If not provided,
+ // this becomes the configuration from the event loop.
Logger(std::string_view base_name, EventLoop *event_loop,
std::chrono::milliseconds polling_period =
std::chrono::milliseconds(100));
+ Logger(std::string_view base_name, EventLoop *event_loop,
+ const Configuration *configuration,
+ std::chrono::milliseconds polling_period =
+ std::chrono::milliseconds(100));
Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
std::chrono::milliseconds polling_period =
std::chrono::milliseconds(100));
+ Logger(std::unique_ptr<LogNamer> log_namer, EventLoop *event_loop,
+ const Configuration *configuration,
+ std::chrono::milliseconds polling_period =
+ std::chrono::milliseconds(100));
+ ~Logger();
+
+ // Overrides the name in the log file header.
+ void set_name(std::string_view name) { name_ = name; }
// Rotates the log file(s), triggering new part files to be written for each
// log file.
@@ -319,6 +86,12 @@
const UUID uuid_;
std::unique_ptr<LogNamer> log_namer_;
+ // The configuration to place at the top of the log file.
+ const Configuration *configuration_;
+
+ // Name to save in the log file. Defaults to hostname.
+ std::string name_;
+
// Structure to track both a fetcher, and if the data fetched has been
// written. We may want to delay writing data to disk so that we don't let
// data get too far out of order when written to disk so we can avoid making
@@ -454,7 +227,11 @@
// gets called.
void Deregister();
- // Returns the configuration from the log file.
+ // Returns the configuration being used for replay from the log file.
+ // Note that this may be different from the configuration actually used for
+ // handling events. You should generally only use this to create a
+ // SimulatedEventLoopFactory, and then get the configuration from there for
+ // everything else.
const Configuration *logged_configuration() const;
// Returns the configuration being used for replay.
// The pointer is invalidated whenever RemapLoggedChannel is called.
@@ -496,6 +273,10 @@
return &log_file_header_.message();
}
+ std::string_view name() const {
+ return log_file_header()->name()->string_view();
+ }
+
private:
const Channel *RemapChannel(const EventLoop *event_loop,
const Channel *channel);
diff --git a/aos/events/simulated_event_loop.cc b/aos/events/simulated_event_loop.cc
index b2e54bb..a7301bf 100644
--- a/aos/events/simulated_event_loop.cc
+++ b/aos/events/simulated_event_loop.cc
@@ -741,20 +741,27 @@
DoCallCallback([monotonic_now]() { return monotonic_now; }, context);
msgs_.pop_front();
+ if (token_ != scheduler_->InvalidToken()) {
+ scheduler_->Deschedule(token_);
+ token_ = scheduler_->InvalidToken();
+ }
if (msgs_.size() != 0) {
event_.set_event_time(msgs_.front()->context.monotonic_event_time);
simulated_event_loop_->AddEvent(&event_);
DoSchedule(event_.event_time());
- } else {
- token_ = scheduler_->InvalidToken();
}
}
void SimulatedWatcher::DoSchedule(monotonic_clock::time_point event_time) {
- token_ =
- scheduler_->Schedule(event_time + simulated_event_loop_->send_delay(),
- [this]() { simulated_event_loop_->HandleEvent(); });
+ CHECK(token_ == scheduler_->InvalidToken())
+ << ": May not schedule multiple times";
+ token_ = scheduler_->Schedule(
+ event_time + simulated_event_loop_->send_delay(), [this]() {
+ DCHECK(token_ != scheduler_->InvalidToken());
+ token_ = scheduler_->InvalidToken();
+ simulated_event_loop_->HandleEvent();
+ });
}
void SimulatedChannel::MakeRawWatcher(SimulatedWatcher *watcher) {
@@ -817,13 +824,11 @@
simulated_event_loop_->monotonic_now();
base_ = base;
repeat_offset_ = repeat_offset;
- if (base < monotonic_now) {
- token_ = scheduler_->Schedule(
- monotonic_now, [this]() { simulated_event_loop_->HandleEvent(); });
- } else {
- token_ = scheduler_->Schedule(
- base, [this]() { simulated_event_loop_->HandleEvent(); });
- }
+ token_ = scheduler_->Schedule(std::max(base, monotonic_now), [this]() {
+ DCHECK(token_ != scheduler_->InvalidToken());
+ token_ = scheduler_->InvalidToken();
+ simulated_event_loop_->HandleEvent();
+ });
event_.set_event_time(base_);
simulated_event_loop_->AddEvent(&event_);
}
@@ -835,15 +840,20 @@
if (simulated_event_loop_->log_impl_ != nullptr) {
logging::SetImplementation(simulated_event_loop_->log_impl_);
}
+ if (token_ != scheduler_->InvalidToken()) {
+ scheduler_->Deschedule(token_);
+ token_ = scheduler_->InvalidToken();
+ }
if (repeat_offset_ != ::aos::monotonic_clock::zero()) {
// Reschedule.
while (base_ <= monotonic_now) base_ += repeat_offset_;
- token_ = scheduler_->Schedule(
- base_, [this]() { simulated_event_loop_->HandleEvent(); });
+ token_ = scheduler_->Schedule(base_, [this]() {
+ DCHECK(token_ != scheduler_->InvalidToken());
+ token_ = scheduler_->InvalidToken();
+ simulated_event_loop_->HandleEvent();
+ });
event_.set_event_time(base_);
simulated_event_loop_->AddEvent(&event_);
- } else {
- token_ = scheduler_->InvalidToken();
}
Call([monotonic_now]() { return monotonic_now; }, monotonic_now);
@@ -889,8 +899,15 @@
void SimulatedPhasedLoopHandler::Schedule(
monotonic_clock::time_point sleep_time) {
- token_ = scheduler_->Schedule(
- sleep_time, [this]() { simulated_event_loop_->HandleEvent(); });
+ if (token_ != scheduler_->InvalidToken()) {
+ scheduler_->Deschedule(token_);
+ token_ = scheduler_->InvalidToken();
+ }
+ token_ = scheduler_->Schedule(sleep_time, [this]() {
+ DCHECK(token_ != scheduler_->InvalidToken());
+ token_ = scheduler_->InvalidToken();
+ simulated_event_loop_->HandleEvent();
+ });
event_.set_event_time(sleep_time);
simulated_event_loop_->AddEvent(&event_);
}
diff --git a/aos/ipc_lib/signalfd.cc b/aos/ipc_lib/signalfd.cc
index 363bf4a..051a654 100644
--- a/aos/ipc_lib/signalfd.cc
+++ b/aos/ipc_lib/signalfd.cc
@@ -2,6 +2,9 @@
#include <signal.h>
#include <sys/types.h>
+#if __has_feature(memory_sanitizer)
+#include <sanitizer/msan_interface.h>
+#endif
#include <unistd.h>
#include <initializer_list>
@@ -9,6 +12,50 @@
namespace aos {
namespace ipc_lib {
+namespace {
+
+// Wrapper which propagates msan information.
+// TODO(Brian): Drop this once we have <https://reviews.llvm.org/D82411> to
+// intercept this function natively.
+int wrapped_sigandset(sigset_t *dest, const sigset_t *left,
+ const sigset_t *right) {
+#if __has_feature(memory_sanitizer)
+ if (left) {
+ __msan_check_mem_is_initialized(left, sizeof(*left));
+ }
+ if (right) {
+ __msan_check_mem_is_initialized(right, sizeof(*right));
+ }
+#endif
+ const int r = sigandset(dest, left, right);
+#if __has_feature(memory_sanitizer)
+ if (!r && dest) {
+ __msan_unpoison(dest, sizeof(*dest));
+ }
+#endif
+ return r;
+}
+
+// Wrapper which propagates msan information.
+// TODO(Brian): Drop this once we have
+// <https://reviews.llvm.org/rG89ae290b58e20fc5f56b7bfae4b34e7fef06e1b1> to
+// intercept this function natively.
+int wrapped_pthread_sigmask(int how, const sigset_t *set, sigset_t *oldset) {
+#if __has_feature(memory_sanitizer)
+ if (set) {
+ __msan_check_mem_is_initialized(set, sizeof(*set));
+ }
+#endif
+ const int r = pthread_sigmask(how, set, oldset);
+#if __has_feature(memory_sanitizer)
+ if (!r && oldset) {
+ __msan_unpoison(oldset, sizeof(*oldset));
+ }
+#endif
+ return r;
+}
+
+} // namespace
SignalFd::SignalFd(::std::initializer_list<unsigned int> signals) {
// Build up the mask with the provided signals.
@@ -23,7 +70,7 @@
// signalfd gets them. Record which ones we actually blocked, so we can
// unblock just those later.
sigset_t old_mask;
- CHECK_EQ(0, pthread_sigmask(SIG_BLOCK, &blocked_mask_, &old_mask));
+ CHECK_EQ(0, wrapped_pthread_sigmask(SIG_BLOCK, &blocked_mask_, &old_mask));
for (int signal : signals) {
if (sigismember(&old_mask, signal)) {
CHECK_EQ(0, sigdelset(&blocked_mask_, signal));
@@ -35,9 +82,9 @@
// Unwind the constructor. Unblock the signals and close the fd. Verify nobody
// else unblocked the signals we're supposed to unblock in the meantime.
sigset_t old_mask;
- CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &blocked_mask_, &old_mask));
+ CHECK_EQ(0, wrapped_pthread_sigmask(SIG_UNBLOCK, &blocked_mask_, &old_mask));
sigset_t unblocked_mask;
- CHECK_EQ(0, sigandset(&unblocked_mask, &blocked_mask_, &old_mask));
+ CHECK_EQ(0, wrapped_sigandset(&unblocked_mask, &blocked_mask_, &old_mask));
if (memcmp(&unblocked_mask, &blocked_mask_, sizeof(unblocked_mask)) != 0) {
LOG(FATAL) << "Some other code unblocked one or more of our signals";
}
diff --git a/aos/logging/context.cc b/aos/logging/context.cc
index 72c1970..84e4e80 100644
--- a/aos/logging/context.cc
+++ b/aos/logging/context.cc
@@ -4,6 +4,9 @@
#define _GNU_SOURCE /* See feature_test_macros(7) */
#endif
+#if __has_feature(memory_sanitizer)
+#include <sanitizer/msan_interface.h>
+#endif
#include <string.h>
#include <sys/prctl.h>
#include <sys/types.h>
@@ -16,8 +19,8 @@
extern char *program_invocation_name;
extern char *program_invocation_short_name;
-#include "aos/die.h"
#include "aos/complex_thread_local.h"
+#include "aos/die.h"
namespace aos {
namespace logging {
@@ -39,6 +42,10 @@
if (prctl(PR_GET_NAME, thread_name_array) != 0) {
PDie("prctl(PR_GET_NAME, %p) failed", thread_name_array);
}
+#if __has_feature(memory_sanitizer)
+ // msan doesn't understand PR_GET_NAME, so help it along.
+ __msan_unpoison(thread_name_array, sizeof(thread_name_array));
+#endif
thread_name_array[sizeof(thread_name_array) - 1] = '\0';
::std::string thread_name(thread_name_array);
@@ -67,8 +74,7 @@
::std::atomic<LogImplementation *> global_top_implementation(NULL);
Context::Context()
- : implementation(global_top_implementation.load()),
- sequence(0) {
+ : implementation(global_top_implementation.load()), sequence(0) {
cork_data.Reset();
}
@@ -77,8 +83,7 @@
if (my_context.created()) {
::std::string my_name = GetMyName();
if (my_name.size() + 1 > sizeof(Context::name)) {
- Die("logging: process/thread name '%s' is too long\n",
- my_name.c_str());
+ Die("logging: process/thread name '%s' is too long\n", my_name.c_str());
}
strcpy(my_context->name, my_name.c_str());
my_context->name_size = my_name.size();
@@ -98,9 +103,7 @@
return my_context.get();
}
-void Context::Delete() {
- delete_current_context = true;
-}
+void Context::Delete() { delete_current_context = true; }
} // namespace internal
} // namespace logging
diff --git a/aos/logging/log_message.fbs b/aos/logging/log_message.fbs
index 789724f..7e246cf 100644
--- a/aos/logging/log_message.fbs
+++ b/aos/logging/log_message.fbs
@@ -18,7 +18,7 @@
level:Level (id: 1);
// Pid of the process creating the log message
- source:int (id:2);
+ source_pid:int (id:2);
// Application name
name:string (id:3);
diff --git a/aos/network/BUILD b/aos/network/BUILD
index 69482e1..98a271c 100644
--- a/aos/network/BUILD
+++ b/aos/network/BUILD
@@ -296,8 +296,8 @@
"message_bridge_test.cc",
],
data = [
- ":message_bridge_test_client_config.json",
- ":message_bridge_test_server_config.json",
+ ":message_bridge_test_client_config",
+ ":message_bridge_test_server_config",
],
shard_count = 3,
deps = [
diff --git a/aos/network/www/BUILD b/aos/network/www/BUILD
index 33291b2..60bea09 100644
--- a/aos/network/www/BUILD
+++ b/aos/network/www/BUILD
@@ -106,8 +106,8 @@
":flatbuffers",
":reflection_test.html",
":reflection_test_bundle",
- ":test_config.json",
+ ":test_config",
"//aos/network:web_proxy_main",
- "//y2020:config.json",
+ "//y2020:config",
],
)
diff --git a/aos/realtime.cc b/aos/realtime.cc
index ff05f32..a0af97a 100644
--- a/aos/realtime.cc
+++ b/aos/realtime.cc
@@ -74,10 +74,12 @@
WriteCoreDumps();
PCHECK(mlockall(MCL_CURRENT | MCL_FUTURE) == 0);
+#if !__has_feature(address_sanitizer)
// Don't give freed memory back to the OS.
CHECK_EQ(1, mallopt(M_TRIM_THRESHOLD, -1));
// Don't use mmap for large malloc chunks.
CHECK_EQ(1, mallopt(M_MMAP_MAX, 0));
+#endif
if (&FLAGS_tcmalloc_release_rate) {
// Tell tcmalloc not to return memory.
diff --git a/aos/seasocks/gen_embedded.bzl b/aos/seasocks/gen_embedded.bzl
index 93cdc39..5bdb80d 100644
--- a/aos/seasocks/gen_embedded.bzl
+++ b/aos/seasocks/gen_embedded.bzl
@@ -16,7 +16,7 @@
attrs = {
"srcs": attr.label_list(
mandatory = True,
- non_empty = True,
+ allow_empty = False,
allow_files = True,
),
"_gen_embedded": attr.label(
diff --git a/debian/m4.BUILD b/debian/m4.BUILD
index 0abafda..50f83cd 100644
--- a/debian/m4.BUILD
+++ b/debian/m4.BUILD
@@ -3,3 +3,9 @@
srcs = ["usr/bin/m4"],
visibility = ["//visibility:public"],
)
+
+filegroup(
+ name = "lib",
+ srcs = glob(["usr/lib/**"]),
+ visibility = ["//visibility:public"],
+)
diff --git a/frc971/codelab/BUILD b/frc971/codelab/BUILD
index ac0b80c..ddc5466 100644
--- a/frc971/codelab/BUILD
+++ b/frc971/codelab/BUILD
@@ -7,7 +7,7 @@
name = "basic_test",
testonly = 1,
srcs = ["basic_test.cc"],
- data = [":config.json"],
+ data = [":config"],
deps = [
":basic",
":basic_goal_fbs",
diff --git a/frc971/control_loops/drivetrain/BUILD b/frc971/control_loops/drivetrain/BUILD
index 2e357c7..ed905cf 100644
--- a/frc971/control_loops/drivetrain/BUILD
+++ b/frc971/control_loops/drivetrain/BUILD
@@ -462,7 +462,7 @@
srcs = [
"drivetrain_lib_test.cc",
],
- data = ["simulation_config.json"],
+ data = [":simulation_config"],
defines =
cpu_select({
"amd64": [
diff --git a/frc971/wpilib/BUILD b/frc971/wpilib/BUILD
index 734a231..8010edc 100644
--- a/frc971/wpilib/BUILD
+++ b/frc971/wpilib/BUILD
@@ -175,7 +175,7 @@
"loop_output_handler_test.cc",
],
data = [
- "loop_output_handler_test_config.json",
+ ":loop_output_handler_test_config",
],
deps = [
":loop_output_handler",
diff --git a/third_party/gmp/BUILD b/third_party/gmp/BUILD
index e4f7029..06cfa51 100644
--- a/third_party/gmp/BUILD
+++ b/third_party/gmp/BUILD
@@ -4,6 +4,7 @@
":mpn.bzl",
"architecture_includes",
"config_include_from_architecture",
+ "current_directory",
"file_from_architecture",
"mparam_path",
"mpn_cc_library",
@@ -66,10 +67,10 @@
"-Wno-unused-parameter",
"-Wno-missing-field-initializers",
"-DHAVE_CONFIG_H",
- "-Ithird_party/gmp",
- "-I$(GENDIR)/third_party/gmp",
- "-Ithird_party/gmp/mpn",
- "-I$(GENDIR)/third_party/gmp/mpn",
+ "-I" + current_directory(),
+ "-I$(GENDIR)/" + current_directory(),
+ "-I" + current_directory() + "/mpn",
+ "-I$(GENDIR)/" + current_directory() + "/mpn",
"-Wno-cast-align",
"-Wno-dangling-else",
"-Wno-cast-qual",
@@ -926,17 +927,20 @@
genrule(
name = "call_m4",
- srcs = native.glob([
+ srcs = glob([
"**/*.m4",
"**/*.asm",
]) + ["config.m4"],
outs = ["tests/call.s"],
- cmd = "cd third_party/gmp/tests; ../../../$(location @m4_v1.4.18//:bin) -I ../../../$(GENDIR)/third_party/gmp/tests -DHAVE_CONFIG_H -DPIC " + cpu_select({
+ cmd = "ROOT=$$(pwd); cd ./" + current_directory() + "/tests; LD_LIBRARY_PATH=$${ROOT}/external/m4_v1.4.18/usr/lib/x86_64-linux-gnu/ $${ROOT}/$(location @m4_v1.4.18//:bin) -I $${ROOT}/$(GENDIR)/" + current_directory() + "/tests -DHAVE_CONFIG_H -DPIC " + cpu_select({
"arm": "arm32call.asm",
"amd64": "amd64call.asm",
}) +
- " > ../../../$@",
- tools = ["@m4_v1.4.18//:bin"],
+ " > $${ROOT}/$@",
+ tools = [
+ "@m4_v1.4.18//:bin",
+ "@m4_v1.4.18//:lib",
+ ],
)
cc_library(
diff --git a/third_party/gmp/mpn.bzl b/third_party/gmp/mpn.bzl
index 8f50773..039725f 100644
--- a/third_party/gmp/mpn.bzl
+++ b/third_party/gmp/mpn.bzl
@@ -48,6 +48,9 @@
"sec_pi1_div_r": ["sec_pi1_div"],
}
+def current_directory():
+ return native.package_name()
+
def mpn_cc_library(
name,
srcs,
@@ -60,9 +63,9 @@
hdrs = hdrs,
copts = copts + [
"-DHAVE_CONFIG_H",
- "-Ithird_party/gmp/mpn",
- "-Ithird_party/gmp",
- "-I$(GENDIR)/third_party/gmp",
+ "-I" + current_directory() + "/mpn",
+ "-I" + current_directory(),
+ "-I$(GENDIR)/" + current_directory(),
"-D__GMP_WITHIN_GMP",
"-DOPERATION_" + name,
],
@@ -87,17 +90,19 @@
out = ctx.actions.declare_file("mpn/" + ctx.attr.operation + ".s")
+ ruledir = ctx.label.workspace_root + "/" + ctx.label.package
ctx.actions.run_shell(
inputs = [ctx.files.files[0]] + ctx.files.deps,
outputs = [out],
progress_message = "Generating " + out.short_path,
- tools = [ctx.executable._m4],
- command = "&&".join([
- "cd third_party/gmp/mpn",
- "echo '#define OPERATION_" + ctx.attr.operation + " 1' > ../../../" + out.path,
- "../../../" + ctx.executable._m4.path + " -I ../../../" + ctx.var["GENDIR"] + "/third_party/gmp/mpn/ " +
+ tools = [ctx.executable._m4] + ctx.attr._m4_lib.files.to_list(),
+ command = " && ".join([
+ "ROOT=$(pwd)",
+ "cd ./" + ruledir + "/mpn",
+ "echo '#define OPERATION_" + ctx.attr.operation + " 1' > ${ROOT}/" + out.path,
+ "LD_LIBRARY_PATH=${ROOT}/external/m4_v1.4.18/usr/lib/x86_64-linux-gnu/ ${ROOT}/" + ctx.executable._m4.path + " -I ${ROOT}/" + ctx.var["GENDIR"] + "/" + ruledir + "/mpn" +
" -DHAVE_CONFIG_H -D__GMP_WITHIN_GMP -DOPERATION_" + ctx.attr.operation +
- " -DPIC ../../../" + ctx.files.files[0].path + " >> ../../../" + out.path,
+ " -DPIC ${ROOT}/" + ctx.files.files[0].path + " >> ${ROOT}/" + out.path,
]),
)
@@ -120,6 +125,10 @@
cfg = "host",
executable = True,
),
+ "_m4_lib": attr.label(
+ default = "@m4_v1.4.18//:lib",
+ cfg = "host",
+ ),
},
implementation = _m4_mpn_function_impl,
)
@@ -140,7 +149,7 @@
def architecture_includes(architecture_paths):
result = dict()
for key in architecture_paths:
- result[key] = ["-Ithird_party/gmp/mpn/" + p for p in architecture_paths[key]]
+ result[key] = ["-I" + current_directory() + "/mpn/" + p for p in architecture_paths[key]]
return select(result)
def file_from_architecture(architecture_paths, f):
@@ -152,7 +161,7 @@
def config_include_from_architecture(architecture_paths):
result = dict()
for key in architecture_paths:
- result[key] = ["-Ithird_party/gmp/config/" + architecture_paths[key][0] + "/"]
+ result[key] = ["-I" + current_directory() + "/config/" + architecture_paths[key][0] + "/"]
return select(result)
def mpn_m4_cc_library(name, architecture_paths):
diff --git a/tools/build_rules/BUILD b/tools/build_rules/BUILD
index f96fb3a..8ad7018 100644
--- a/tools/build_rules/BUILD
+++ b/tools/build_rules/BUILD
@@ -3,3 +3,10 @@
srcs = ["quiet_success.sh"],
visibility = ["//visibility:public"],
)
+
+py_binary(
+ name = "jinja2_generator",
+ srcs = ["jinja2_generator.py"],
+ visibility = ["//visibility:public"],
+ deps = ["@python_jinja2"],
+)
diff --git a/y2020/generate_pi_config.py b/tools/build_rules/jinja2_generator.py
similarity index 96%
rename from y2020/generate_pi_config.py
rename to tools/build_rules/jinja2_generator.py
index afd1fca..34f3354 100644
--- a/y2020/generate_pi_config.py
+++ b/tools/build_rules/jinja2_generator.py
@@ -1,7 +1,6 @@
#!/usr/bin/python3
import argparse
-from pathlib import Path
import json
import sys
diff --git a/tools/build_rules/template.bzl b/tools/build_rules/template.bzl
new file mode 100644
index 0000000..d4807e8
--- /dev/null
+++ b/tools/build_rules/template.bzl
@@ -0,0 +1,33 @@
+def _jinja2_template_impl(ctx):
+ out = ctx.actions.declare_file(ctx.attr.name)
+
+ ctx.actions.run_shell(
+ inputs = ctx.files.src,
+ tools = [ctx.executable._jinja2],
+ progress_message = "Generating " + out.short_path,
+ outputs = [out],
+ command = ctx.executable._jinja2.path + " " + ctx.files.src[0].path + " '" + str(ctx.attr.parameters) + "' " + out.path,
+ )
+
+ return [DefaultInfo(files = depset([out])), OutputGroupInfo(out = depset([out]))]
+
+jinja2_template = rule(
+ attrs = {
+ "src": attr.label(
+ mandatory = True,
+ allow_single_file = True,
+ doc = """The jinja2 template file to expand.""",
+ ),
+ "parameters": attr.string_dict(
+ mandatory = True,
+ doc = """The parameters to supply to Jinja2.""",
+ ),
+ "_jinja2": attr.label(
+ default = "//tools/build_rules:jinja2_generator",
+ cfg = "host",
+ executable = True,
+ ),
+ },
+ implementation = _jinja2_template_impl,
+ doc = """Expands a jinja2 template given parameters.""",
+)
diff --git a/y2014/control_loops/claw/BUILD b/y2014/control_loops/claw/BUILD
index 670dd76..2ae59b0 100644
--- a/y2014/control_loops/claw/BUILD
+++ b/y2014/control_loops/claw/BUILD
@@ -83,7 +83,7 @@
srcs = [
"claw_lib_test.cc",
],
- data = ["//y2014:config.json"],
+ data = ["//y2014:config"],
deps = [
":claw_goal_fbs",
":claw_lib",
diff --git a/y2014/control_loops/shooter/BUILD b/y2014/control_loops/shooter/BUILD
index a2073c9..ed95317 100644
--- a/y2014/control_loops/shooter/BUILD
+++ b/y2014/control_loops/shooter/BUILD
@@ -84,7 +84,7 @@
srcs = [
"shooter_lib_test.cc",
],
- data = ["//y2014:config.json"],
+ data = ["//y2014:config"],
deps = [
":shooter_goal_fbs",
":shooter_lib",
diff --git a/y2016/BUILD b/y2016/BUILD
index a96ea5a..190e0e0 100644
--- a/y2016/BUILD
+++ b/y2016/BUILD
@@ -52,7 +52,7 @@
robot_downloader(
data = [
- ":config.json",
+ ":config",
],
dirs = [
"//y2016/dashboard:www_files",
diff --git a/y2016/control_loops/shooter/BUILD b/y2016/control_loops/shooter/BUILD
index 794c4fc..3ffc9cc 100644
--- a/y2016/control_loops/shooter/BUILD
+++ b/y2016/control_loops/shooter/BUILD
@@ -87,7 +87,7 @@
srcs = [
"shooter_lib_test.cc",
],
- data = ["//y2016:config.json"],
+ data = ["//y2016:config"],
deps = [
":shooter_goal_fbs",
":shooter_lib",
diff --git a/y2016/control_loops/superstructure/BUILD b/y2016/control_loops/superstructure/BUILD
index 4048cdc..aecfd98 100644
--- a/y2016/control_loops/superstructure/BUILD
+++ b/y2016/control_loops/superstructure/BUILD
@@ -122,7 +122,7 @@
srcs = [
"superstructure_lib_test.cc",
],
- data = ["//y2016:config.json"],
+ data = ["//y2016:config"],
deps = [
":superstructure_goal_fbs",
":superstructure_lib",
diff --git a/y2017/control_loops/superstructure/BUILD b/y2017/control_loops/superstructure/BUILD
index 535475d..7107e23 100644
--- a/y2017/control_loops/superstructure/BUILD
+++ b/y2017/control_loops/superstructure/BUILD
@@ -73,7 +73,7 @@
srcs = [
"superstructure_lib_test.cc",
],
- data = ["//y2017:config.json"],
+ data = ["//y2017:config"],
deps = [
":superstructure_goal_fbs",
":superstructure_lib",
@@ -128,7 +128,7 @@
srcs = [
"vision_time_adjuster_test.cc",
],
- data = ["//y2017:config.json"],
+ data = ["//y2017:config"],
deps = [
":vision_time_adjuster",
"//aos/events:simulated_event_loop",
diff --git a/y2018/control_loops/superstructure/BUILD b/y2018/control_loops/superstructure/BUILD
index 4e97571..db01298 100644
--- a/y2018/control_loops/superstructure/BUILD
+++ b/y2018/control_loops/superstructure/BUILD
@@ -74,7 +74,7 @@
srcs = [
"superstructure_lib_test.cc",
],
- data = ["//y2018:config.json"],
+ data = ["//y2018:config"],
shard_count = 5,
deps = [
":superstructure_goal_fbs",
diff --git a/y2019/BUILD b/y2019/BUILD
index e286fc7..1d8b996 100644
--- a/y2019/BUILD
+++ b/y2019/BUILD
@@ -5,7 +5,7 @@
robot_downloader(
data = [
- ":config.json",
+ ":config",
],
dirs = [
"//y2019/vision/server:www_files",
diff --git a/y2019/control_loops/drivetrain/BUILD b/y2019/control_loops/drivetrain/BUILD
index cd3a793..c500ce6 100644
--- a/y2019/control_loops/drivetrain/BUILD
+++ b/y2019/control_loops/drivetrain/BUILD
@@ -127,7 +127,7 @@
cc_test(
name = "target_selector_test",
srcs = ["target_selector_test.cc"],
- data = ["//y2019:config.json"],
+ data = ["//y2019:config"],
deps = [
":target_selector",
"//aos/events:simulated_event_loop",
@@ -192,7 +192,7 @@
cc_test(
name = "localized_drivetrain_test",
srcs = ["localized_drivetrain_test.cc"],
- data = [":simulation_config.json"],
+ data = [":simulation_config"],
deps = [
":camera_fbs",
":drivetrain_base",
@@ -210,7 +210,7 @@
cc_binary(
name = "drivetrain_replay",
srcs = ["drivetrain_replay.cc"],
- data = ["//y2019:config.json"],
+ data = ["//y2019:config"],
deps = [
":drivetrain_base",
":event_loop_localizer",
diff --git a/y2019/control_loops/superstructure/BUILD b/y2019/control_loops/superstructure/BUILD
index ed0c916..c352ba6 100644
--- a/y2019/control_loops/superstructure/BUILD
+++ b/y2019/control_loops/superstructure/BUILD
@@ -75,7 +75,7 @@
"superstructure_lib_test.cc",
],
data = [
- "//y2019:config.json",
+ "//y2019:config",
],
deps = [
":superstructure_goal_fbs",
diff --git a/y2020/BUILD b/y2020/BUILD
index 918ae1d..38832b8 100644
--- a/y2020/BUILD
+++ b/y2020/BUILD
@@ -2,14 +2,14 @@
load("//aos:config.bzl", "aos_config")
load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library")
load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
-load("//y2020:config_gen.bzl", "generate_pi_config")
+load("//tools/build_rules:template.bzl", "jinja2_template")
robot_downloader(
binaries = [
":setpoint_setter",
],
data = [
- ":config.json",
+ ":config",
],
start_binaries = [
"//aos/events/logging:logger_main",
@@ -29,7 +29,7 @@
"//y2020/vision:viewer",
],
data = [
- ":config.json",
+ ":config",
],
dirs = [
"//y2020/www:www_files",
@@ -182,11 +182,30 @@
"pi2",
"pi3",
"pi4",
- "laptop",
]
]
aos_config(
+ name = "config_laptop",
+ src = "y2020_laptop.json",
+ flatbuffers = [
+ "//aos/network:message_bridge_client_fbs",
+ "//aos/network:message_bridge_server_fbs",
+ "//aos/network:timestamp_fbs",
+ "//y2020/vision/sift:sift_fbs",
+ "//y2020/vision/sift:sift_training_fbs",
+ "//y2020/vision:vision_fbs",
+ "//aos/events/logging:logger_fbs",
+ ],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//aos/events:config",
+ "//aos/robot_state:config",
+ "//frc971/control_loops/drivetrain:config",
+ ],
+)
+
+aos_config(
name = "config_roborio",
src = "y2020_roborio.json",
flatbuffers = [
@@ -219,7 +238,7 @@
name = "web_proxy",
srcs = ["web_proxy.sh"],
data = [
- ":config.json",
+ ":config",
"//aos/network:web_proxy_main",
"//y2020/www:camera_main_bundle",
"//y2020/www:field_main_bundle",
@@ -248,10 +267,11 @@
],
)
-py_binary(
- name = "generate_pi_config",
- srcs = ["generate_pi_config.py"],
- deps = ["@python_jinja2"],
-)
-
-[generate_pi_config(num) for num in range(1, 5)]
+[
+ jinja2_template(
+ name = "y2020_pi" + str(num) + ".json",
+ src = "y2020_pi_template.json",
+ parameters = {"NUM": str(num)},
+ )
+ for num in range(1, 5)
+]
diff --git a/y2020/control_loops/drivetrain/BUILD b/y2020/control_loops/drivetrain/BUILD
index 0e06057..7e2ba18 100644
--- a/y2020/control_loops/drivetrain/BUILD
+++ b/y2020/control_loops/drivetrain/BUILD
@@ -122,7 +122,7 @@
cc_test(
name = "localizer_test",
srcs = ["localizer_test.cc"],
- data = [":simulation_config.json"],
+ data = [":simulation_config"],
deps = [
":drivetrain_base",
":localizer",
@@ -140,7 +140,7 @@
name = "drivetrain_replay_test",
srcs = ["drivetrain_replay_test.cc"],
data = [
- ":replay_config.json",
+ ":replay_config",
"@drivetrain_replay//file:spinning_wheels_while_still.bfbs",
],
deps = [
@@ -160,7 +160,7 @@
cc_binary(
name = "drivetrain_replay",
srcs = ["drivetrain_replay.cc"],
- data = ["//y2020:config.json"],
+ data = ["//y2020:config"],
deps = [
":drivetrain_base",
":localizer",
diff --git a/y2020/control_loops/superstructure/BUILD b/y2020/control_loops/superstructure/BUILD
index 4534404..e97fb12 100644
--- a/y2020/control_loops/superstructure/BUILD
+++ b/y2020/control_loops/superstructure/BUILD
@@ -88,7 +88,7 @@
"superstructure_lib_test.cc",
],
data = [
- "//y2020:config.json",
+ "//y2020:config",
],
deps = [
":superstructure_goal_fbs",
diff --git a/y2020/vision/BUILD b/y2020/vision/BUILD
index 8b73691..aaad085 100644
--- a/y2020/vision/BUILD
+++ b/y2020/vision/BUILD
@@ -32,7 +32,7 @@
"camera_reader.cc",
],
data = [
- "//y2020:config.json",
+ "//y2020:config",
],
restricted_to = [
"//tools:k8",
@@ -66,7 +66,7 @@
"viewer.cc",
],
data = [
- "//y2020:config.json",
+ "//y2020:config",
],
restricted_to = [
"//tools:k8",
@@ -88,7 +88,7 @@
"calibration.cc",
],
data = [
- "//y2020:config.json",
+ "//y2020:config",
],
restricted_to = [
"//tools:k8",
@@ -115,7 +115,7 @@
"viewer_replay.cc",
],
data = [
- "//y2020:config.json",
+ "//y2020:config",
],
restricted_to = [
"//tools:k8",