Merge "Improve log_cats raw and fetching capabilities"
diff --git a/WORKSPACE b/WORKSPACE
index 9f087bc..168e690 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -60,11 +60,11 @@
)
load(
"//debian:gstreamer_amd64.bzl",
- gstreamer_amd64_debs = "files"
+ gstreamer_amd64_debs = "files",
)
load(
"//debian:gstreamer_armhf.bzl",
- gstreamer_armhf_debs = "files"
+ gstreamer_armhf_debs = "files",
)
load("//debian:packages.bzl", "generate_repositories_for_debs")
@@ -718,3 +718,23 @@
sha256 = "c5ac4c604952c274a50636e244f0d091bd1de302032446f24f0e9e03ae9c76f7",
url = "http://www.frc971.org/Build-Dependencies/gstreamer_armhf.tar.gz",
)
+
+# Downloaded from:
+# https://files.pythonhosted.org/packages/64/a7/45e11eebf2f15bf987c3bc11d37dcc838d9dc81250e67e4c5968f6008b6c/Jinja2-2.11.2.tar.gz
+http_archive(
+ name = "python_jinja2",
+ build_file = "@//debian:python_jinja2.BUILD",
+ sha256 = "89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0",
+ strip_prefix = "Jinja2-2.11.2",
+ url = "http://www.frc971.org/Build-Dependencies/Jinja2-2.11.2.tar.gz",
+)
+
+# Downloaded from:
+# https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz
+http_archive(
+ name = "python_markupsafe",
+ build_file = "@//debian:python_markupsafe.BUILD",
+ sha256 = "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
+ strip_prefix = "MarkupSafe-1.1.1",
+ url = "http://www.frc971.org/Build-Dependencies/MarkupSafe-1.1.1.tar.gz",
+)
diff --git a/aos/events/event_loop.h b/aos/events/event_loop.h
index bc5a5ae..7634479 100644
--- a/aos/events/event_loop.h
+++ b/aos/events/event_loop.h
@@ -1,6 +1,8 @@
#ifndef AOS_EVENTS_EVENT_LOOP_H_
#define AOS_EVENTS_EVENT_LOOP_H_
+#include <sched.h>
+
#include <atomic>
#include <string>
#include <string_view>
@@ -404,6 +406,15 @@
internal::TimerTiming timing_;
};
+inline cpu_set_t MakeCpusetFromCpus(std::initializer_list<int> cpus) {
+ cpu_set_t result;
+ CPU_ZERO(&result);
+ for (int cpu : cpus) {
+ CPU_SET(cpu, &result);
+ }
+ return result;
+}
+
class EventLoop {
public:
EventLoop(const Configuration *configuration);
@@ -524,6 +535,10 @@
virtual void SetRuntimeRealtimePriority(int priority) = 0;
virtual int priority() const = 0;
+ // Sets the scheduler affinity to run the event loop with. This may only be
+ // called before Run().
+ virtual void SetRuntimeAffinity(const cpu_set_t &cpuset) = 0;
+
// Fetches new messages from the provided channel (path, type).
//
// Note: this channel must be a member of the exact configuration object this
diff --git a/aos/events/event_loop_param_test.cc b/aos/events/event_loop_param_test.cc
index f0fb3d3..71bef37 100644
--- a/aos/events/event_loop_param_test.cc
+++ b/aos/events/event_loop_param_test.cc
@@ -585,6 +585,17 @@
EXPECT_DEATH(Run(), "realtime");
}
+// Verify that SetRuntimeAffinity fails while running.
+TEST_P(AbstractEventLoopDeathTest, SetRuntimeAffinity) {
+ auto loop = MakePrimary();
+ // Confirm that runtime priority calls work when not running.
+ loop->SetRuntimeAffinity(MakeCpusetFromCpus({0}));
+
+ loop->OnRun([&]() { loop->SetRuntimeAffinity(MakeCpusetFromCpus({1})); });
+
+ EXPECT_DEATH(Run(), "Cannot set affinity while running");
+}
+
// Verify that registering a watcher and a sender for "/test" fails.
TEST_P(AbstractEventLoopDeathTest, WatcherAndSender) {
auto loop = Make();
@@ -593,6 +604,18 @@
"/test");
}
+// Verify that creating too many senders fails.
+TEST_P(AbstractEventLoopDeathTest, TooManySenders) {
+ auto loop = Make();
+ std::vector<aos::Sender<TestMessage>> senders;
+ for (int i = 0; i < 10; ++i) {
+ senders.emplace_back(loop->MakeSender<TestMessage>("/test"));
+ }
+ EXPECT_DEATH({ loop->MakeSender<TestMessage>("/test"); },
+ "Failed to create sender on \\{ \"name\": \"/test\", \"type\": "
+ "\"aos.TestMessage\" \\}, too many senders.");
+}
+
// Verify that we can't create a sender inside OnRun.
TEST_P(AbstractEventLoopDeathTest, SenderInOnRun) {
auto loop1 = MakePrimary();
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index 2e63f45..52ea5a8 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -12,28 +12,47 @@
)
cc_library(
- name = "logger",
+ name = "logfile_utils",
srcs = [
"logfile_utils.cc",
- "logger.cc",
- "logger_math.cc",
],
hdrs = [
"logfile_utils.h",
- "logger.h",
],
visibility = ["//visibility:public"],
deps = [
":logger_fbs",
+ "//aos:configuration",
"//aos:flatbuffer_merge",
"//aos/events:event_loop",
+ "//aos/util:file",
+ "@com_github_gflags_gflags//:gflags",
+ "@com_github_google_flatbuffers//:flatbuffers",
+ "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/types:span",
+ ],
+)
+
+cc_library(
+ name = "logger",
+ srcs = [
+ "logger.cc",
+ "logger_math.cc",
+ ],
+ hdrs = [
+ "logger.h",
+ ],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":logfile_utils",
+ ":logger_fbs",
+ "//aos/events:event_loop",
"//aos/events:simulated_event_loop",
"//aos/network:team_number",
"//aos/network:timestamp_filter",
"//aos/time",
"@com_github_google_flatbuffers//:flatbuffers",
- "@com_google_absl//absl/container:inlined_vector",
- "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/types:span",
"@org_tuxfamily_eigen//:eigen",
],
)
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index 5388df7..2cc2f61 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -62,6 +62,7 @@
PCHECK(written == static_cast<ssize_t>(n.iov_len))
<< ": Wrote " << written << " expected " << n.iov_len;
+ written_size_ += written;
}
void DetachedBufferWriter::QueueSizedFlatbuffer(
@@ -96,6 +97,7 @@
PCHECK(written == static_cast<ssize_t>(queued_size_))
<< ": Wrote " << written << " expected " << queued_size_;
+ written_size_ += written;
queued_size_ = 0;
queue_.clear();
@@ -417,8 +419,7 @@
bool was_emplaced = false;
while (true) {
// Stop if we have enough.
- if (newest_timestamp() >
- time_to_queue_ + max_out_of_order_duration() &&
+ if (newest_timestamp() > time_to_queue_ + max_out_of_order_duration() &&
was_emplaced) {
VLOG(1) << "Done queueing on " << this << ", queued to "
<< newest_timestamp() << " with requeue time " << time_to_queue_;
@@ -1007,7 +1008,21 @@
for (const std::unique_ptr<SplitMessageReader> &reader :
split_message_readers_) {
- if (CompareFlatBuffer(reader->node(), target_node)) {
+ // In order to identify which logfile(s) map to the target node, do a
+ // logical comparison of the nodes, by confirming that we are either in a
+ // single-node setup (where the nodes will both be nullptr) or that the
+ // node names match (but the other node fields--e.g., hostname lists--may
+ // not).
+ const bool both_null =
+ reader->node() == nullptr && target_node == nullptr;
+ const bool both_have_name =
+ (reader->node() != nullptr) && (target_node != nullptr) &&
+ (reader->node()->has_name() && target_node->has_name());
+ const bool node_names_identical =
+ both_have_name &&
+ (reader->node()->name()->string_view() ==
+ target_node->name()->string_view());
+ if (both_null || node_names_identical) {
if (!found_node) {
found_node = true;
log_file_header_ = CopyFlatBuffer(reader->log_file_header());
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index 4ab4dca..7acbbd3 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -7,6 +7,7 @@
#include <optional>
#include <string>
#include <string_view>
+#include <tuple>
#include <vector>
#include "absl/types/span.h"
@@ -38,6 +39,9 @@
DetachedBufferWriter(std::string_view filename);
~DetachedBufferWriter();
+ DetachedBufferWriter(const DetachedBufferWriter &) = delete;
+ DetachedBufferWriter &operator=(const DetachedBufferWriter &) = delete;
+
std::string_view filename() const { return filename_; }
// TODO(austin): Snappy compress the log file if it ends with .snappy!
@@ -53,6 +57,12 @@
// Triggers data to be provided to the kernel and written.
void Flush();
+ // Returns the number of bytes written.
+ size_t written_size() const { return written_size_; }
+
+ // Returns the number of bytes written or currently queued.
+ size_t total_size() const { return written_size_ + queued_size_; }
+
private:
const std::string filename_;
@@ -60,6 +70,7 @@
// Size of all the data in the queue.
size_t queued_size_ = 0;
+ size_t written_size_ = 0;
// List of buffers to flush.
std::vector<flatbuffers::DetachedBuffer> queue_;
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index de9d344..b3472d7 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -312,9 +312,16 @@
state->channel_merger = std::make_unique<ChannelMerger>(filenames);
} else {
if (replay_configuration) {
- CHECK_EQ(configuration()->nodes()->size(),
+ CHECK_EQ(logged_configuration()->nodes()->size(),
replay_configuration->nodes()->size())
<< ": Log file and replay config need to have matching nodes lists.";
+ for (const Node *node : *logged_configuration()->nodes()) {
+ if (configuration::GetNode(replay_configuration, node) == nullptr) {
+ LOG(FATAL)
+ << "Found node " << FlatbufferToJson(node)
+ << " in logged config that is not present in the replay config.";
+ }
+ }
}
states_.resize(configuration()->nodes()->size());
}
@@ -387,6 +394,11 @@
Register(state->event_loop_unique_ptr.get());
}
+ if (live_nodes_ == 0) {
+ LOG(FATAL)
+ << "Don't have logs from any of the nodes in the replay config--are "
+ "you sure that the replay config matches the original config?";
+ }
// We need to now seed our per-node time offsets and get everything set up to
// run.
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index 34dbd24..ce21598 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -424,6 +424,8 @@
// Returns the offset from the monotonic clock for a node to the distributed
// clock. distributed = monotonic + offset;
std::chrono::nanoseconds offset(int node_index) const {
+ CHECK_LT(node_index, offset_matrix_.rows())
+ << ": Got too high of a node index.";
return -std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::duration<double>(offset_matrix_(node_index))) -
base_offset_matrix_(node_index);
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 9e69ae4..b894bf7 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -505,6 +505,77 @@
reader.Deregister();
}
+typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
+
+// Test that if we feed the replay with a mismatched node list that we die on
+// the LogReader constructor.
+TEST_F(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
+ const ::std::string tmpdir(getenv("TEST_TMPDIR"));
+ const ::std::string logfile_base = tmpdir + "/multi_logfile";
+ const ::std::string logfile1 = logfile_base + "_pi1_data.bfbs";
+ const ::std::string logfile2 =
+ logfile_base + "_pi2_data/test/aos.examples.Pong.bfbs";
+ const ::std::string logfile3 = logfile_base + "_pi2_data.bfbs";
+
+ // Remove them.
+ unlink(logfile1.c_str());
+ unlink(logfile2.c_str());
+ unlink(logfile3.c_str());
+
+ LOG(INFO) << "Logging data to " << logfile1 << ", " << logfile2 << " and "
+ << logfile3;
+
+ {
+ std::unique_ptr<EventLoop> ping_event_loop =
+ event_loop_factory_.MakeEventLoop("ping", pi1_);
+ Ping ping(ping_event_loop.get());
+ std::unique_ptr<EventLoop> pong_event_loop =
+ event_loop_factory_.MakeEventLoop("pong", pi2_);
+ Pong pong(pong_event_loop.get());
+
+ std::unique_ptr<EventLoop> pi1_logger_event_loop =
+ event_loop_factory_.MakeEventLoop("logger", pi1_);
+ std::unique_ptr<LogNamer> pi1_log_namer =
+ std::make_unique<MultiNodeLogNamer>(
+ logfile_base, pi1_logger_event_loop->configuration(),
+ pi1_logger_event_loop->node());
+
+ std::unique_ptr<EventLoop> pi2_logger_event_loop =
+ event_loop_factory_.MakeEventLoop("logger", pi2_);
+ std::unique_ptr<LogNamer> pi2_log_namer =
+ std::make_unique<MultiNodeLogNamer>(
+ logfile_base, pi2_logger_event_loop->configuration(),
+ pi2_logger_event_loop->node());
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ Logger pi1_logger(std::move(pi1_log_namer), pi1_logger_event_loop.get(),
+ chrono::milliseconds(100));
+
+ Logger pi2_logger(std::move(pi2_log_namer), pi2_logger_event_loop.get(),
+ chrono::milliseconds(100));
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ // Test that, if we add an additional node to the replay config that the
+ // logger complains about the mismatch in number of nodes.
+ FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
+ configuration::MergeWithConfig(&config_.message(), R"({
+ "nodes": [
+ {
+ "name": "extra-node"
+ }
+ ]
+ }
+ )");
+
+ EXPECT_DEATH(LogReader({std::vector<std::string>{logfile1},
+ std::vector<std::string>{logfile3}},
+ &extra_nodes_config.message()),
+ "Log file and replay config need to have matching nodes lists.");
+ ;
+}
+
// Tests that we can read log files where they don't start at the same monotonic
// time.
TEST_F(MultinodeLoggerTest, StaggeredStart) {
diff --git a/aos/events/pingpong.fbs b/aos/events/pingpong.fbs
deleted file mode 100644
index 67e5015..0000000
--- a/aos/events/pingpong.fbs
+++ /dev/null
@@ -1,14 +0,0 @@
-namespace aos.examples;
-
-table Ping {
- value:int;
- send_time:long;
-}
-
-table Pong {
- value:int;
- initial_send_time:long;
-}
-
-root_type Ping;
-root_type Pong;
diff --git a/aos/events/shm_event_loop.cc b/aos/events/shm_event_loop.cc
index b2ccca6..b53be2f 100644
--- a/aos/events/shm_event_loop.cc
+++ b/aos/events/shm_event_loop.cc
@@ -374,10 +374,22 @@
event_loop->configuration()->channel_storage_duration()))),
lockless_queue_(lockless_queue_memory_.memory(),
lockless_queue_memory_.config()),
- lockless_queue_sender_(lockless_queue_.MakeSender()) {}
+ lockless_queue_sender_(
+ VerifySender(lockless_queue_.MakeSender(), channel)) {}
~ShmSender() override {}
+ static ipc_lib::LocklessQueue::Sender VerifySender(
+ std::optional<ipc_lib::LocklessQueue::Sender> &&sender,
+ const Channel *channel) {
+ if (sender) {
+ return std::move(sender.value());
+ }
+ LOG(FATAL) << "Failed to create sender on "
+ << configuration::CleanedChannelToString(channel)
+ << ", too many senders.";
+ }
+
void *data() override { return lockless_queue_sender_.Data(); }
size_t size() override { return lockless_queue_sender_.size(); }
bool DoSend(size_t length,
@@ -811,6 +823,10 @@
}
aos::SetCurrentThreadName(name_.substr(0, 16));
+ const cpu_set_t default_affinity = DefaultAffinity();
+ if (!CPU_EQUAL(&affinity_, &default_affinity)) {
+ ::aos::SetCurrentThreadAffinity(affinity_);
+ }
// Now, all the callbacks are setup. Lock everything into memory and go RT.
if (priority_ != 0) {
::aos::InitRT();
@@ -880,6 +896,13 @@
priority_ = priority;
}
+void ShmEventLoop::SetRuntimeAffinity(const cpu_set_t &cpuset) {
+ if (is_running()) {
+ LOG(FATAL) << "Cannot set affinity while running.";
+ }
+ affinity_ = cpuset;
+}
+
void ShmEventLoop::set_name(const std::string_view name) {
name_ = std::string(name);
UpdateTimingReport();
diff --git a/aos/events/shm_event_loop.h b/aos/events/shm_event_loop.h
index d3f1295..15759f4 100644
--- a/aos/events/shm_event_loop.h
+++ b/aos/events/shm_event_loop.h
@@ -66,6 +66,7 @@
void OnRun(std::function<void()> on_run) override;
void SetRuntimeRealtimePriority(int priority) override;
+ void SetRuntimeAffinity(const cpu_set_t &cpuset) override;
void set_name(const std::string_view name) override;
const std::string_view name() const override { return name_; }
@@ -94,6 +95,14 @@
friend class internal::ShmSender;
friend class internal::ShmFetcher;
+ static cpu_set_t DefaultAffinity() {
+ cpu_set_t result;
+ for (int i = 0; i < CPU_SETSIZE; ++i) {
+ CPU_SET(i, &result);
+ }
+ return result;
+ }
+
void HandleEvent();
// Returns the TID of the event loop.
@@ -104,6 +113,7 @@
std::vector<std::function<void()>> on_run_;
int priority_ = 0;
+ cpu_set_t affinity_ = DefaultAffinity();
std::string name_;
const Node *const node_;
diff --git a/aos/events/simulated_event_loop.cc b/aos/events/simulated_event_loop.cc
index cf58b46..ee4e6d6 100644
--- a/aos/events/simulated_event_loop.cc
+++ b/aos/events/simulated_event_loop.cc
@@ -102,6 +102,19 @@
const Channel *channel() const { return channel_; }
+ void CountSenderCreated() {
+ if (sender_count_ >= channel()->num_senders()) {
+ LOG(FATAL) << "Failed to create sender on "
+ << configuration::CleanedChannelToString(channel())
+ << ", too many senders.";
+ }
+ ++sender_count_;
+ }
+ void CountSenderDestroyed() {
+ --sender_count_;
+ CHECK_GE(sender_count_, 0);
+ }
+
private:
const Channel *channel_;
@@ -114,6 +127,8 @@
EventScheduler *scheduler_;
ipc_lib::QueueIndex next_queue_index_;
+
+ int sender_count_ = 0;
};
namespace {
@@ -134,8 +149,10 @@
SimulatedSender(SimulatedChannel *simulated_channel, EventLoop *event_loop)
: RawSender(event_loop, simulated_channel->channel()),
simulated_channel_(simulated_channel),
- event_loop_(event_loop) {}
- ~SimulatedSender() {}
+ event_loop_(event_loop) {
+ simulated_channel_->CountSenderCreated();
+ }
+ ~SimulatedSender() { simulated_channel_->CountSenderDestroyed(); }
void *data() override {
if (!message_) {
@@ -414,6 +431,10 @@
int priority() const override { return priority_; }
+ void SetRuntimeAffinity(const cpu_set_t & /*cpuset*/) override {
+ CHECK(!is_running()) << ": Cannot set affinity while running.";
+ }
+
void Setup() {
MaybeScheduleTimingReports();
if (!skip_logger_) {
diff --git a/aos/events/simulated_event_loop.h b/aos/events/simulated_event_loop.h
index a7f7920..7718cfb 100644
--- a/aos/events/simulated_event_loop.h
+++ b/aos/events/simulated_event_loop.h
@@ -87,7 +87,9 @@
std::chrono::nanoseconds send_delay() const { return send_delay_; }
// Sets the simulated network delay for messages forwarded between nodes.
- void set_network_delay(std::chrono::nanoseconds network_delay);
+ void set_network_delay(std::chrono::nanoseconds network_delay) {
+ network_delay_ = network_delay;
+ }
std::chrono::nanoseconds network_delay() const { return network_delay_; }
// Returns the clock used to synchronize the nodes.
diff --git a/aos/flatbuffer_introspection.cc b/aos/flatbuffer_introspection.cc
index 036a830..6f32424 100644
--- a/aos/flatbuffer_introspection.cc
+++ b/aos/flatbuffer_introspection.cc
@@ -70,7 +70,8 @@
const reflection::Object *obj,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums,
- const ObjT *object, size_t max_vector_size, std::stringstream *out);
+ const ObjT *object, size_t max_vector_size, std::stringstream *out,
+ bool multi_line = false, int tree_depth = 0);
// Get enum value name
const char *EnumToString(
@@ -114,13 +115,34 @@
}
}
+// Adds a newline and indents
+// Every increment in tree depth is two spaces
+void AddWrapping(std::stringstream *out, int tree_depth) {
+ *out << "\n";
+ for (int i = 0; i < tree_depth; i++) {
+ *out << " ";
+ }
+}
+
+// Detects if a field should trigger wrapping of the parent object.
+bool ShouldCauseWrapping(reflection::BaseType type) {
+ switch (type) {
+ case BaseType::Vector:
+ case BaseType::Obj:
+ return true;
+ default:
+ return false;
+ }
+}
+
// Print field in flatbuffer table. Field must be populated.
template <typename ObjT>
void FieldToString(
const ObjT *table, const reflection::Field *field,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums,
- size_t max_vector_size, std::stringstream *out) {
+ size_t max_vector_size, std::stringstream *out, bool multi_line,
+ int tree_depth) {
const reflection::Type *type = field->type();
switch (type->base_type()) {
@@ -187,10 +209,25 @@
*out << "[ ... " << vector->size() << " elements ... ]";
break;
}
+
+ bool wrap = false;
+ const int child_tree_depth = tree_depth + 1;
+
+ if (multi_line) {
+ wrap = ShouldCauseWrapping(elem_type);
+ }
+
*out << '[';
for (flatbuffers::uoffset_t i = 0; i < vector->size(); ++i) {
if (i != 0) {
- *out << ", ";
+ if (wrap) {
+ *out << ",";
+ } else {
+ *out << ", ";
+ }
+ }
+ if (wrap) {
+ AddWrapping(out, child_tree_depth);
}
if (flatbuffers::IsInteger(elem_type)) {
IntOrEnumToString(
@@ -211,18 +248,20 @@
flatbuffers::GetAnyVectorElemAddressOf<
const flatbuffers::Struct>(
vector, i, objects->Get(type->index())->bytesize()),
- max_vector_size,
- out);
+ max_vector_size, out, multi_line, child_tree_depth);
} else {
ObjectToString(objects->Get(type->index()), objects, enums,
flatbuffers::GetAnyVectorElemPointer<
const flatbuffers::Table>(vector, i),
- max_vector_size,
- out);
+ max_vector_size, out, multi_line,
+ child_tree_depth);
}
}
}
}
+ if (wrap) {
+ AddWrapping(out, tree_depth);
+ }
*out << ']';
} else {
*out << "null";
@@ -232,10 +271,12 @@
if (type->index() > -1 && type->index() < (int32_t)objects->size()) {
if (objects->Get(type->index())->is_struct()) {
ObjectToString(objects->Get(type->index()), objects, enums,
- flatbuffers::GetFieldStruct(*table, *field), max_vector_size, out);
+ flatbuffers::GetFieldStruct(*table, *field),
+ max_vector_size, out, multi_line, tree_depth);
} else if constexpr (std::is_same<flatbuffers::Table, ObjT>()) {
ObjectToString(objects->Get(type->index()), objects, enums,
- flatbuffers::GetFieldT(*table, *field), max_vector_size,out);
+ flatbuffers::GetFieldT(*table, *field),
+ max_vector_size, out, multi_line, tree_depth);
}
} else {
*out << "null";
@@ -253,32 +294,63 @@
const reflection::Object *obj,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums,
- const ObjT *object, size_t max_vector_size, std::stringstream *out) {
+ const ObjT *object, size_t max_vector_size, std::stringstream *out,
+ bool multi_line, int tree_depth) {
static_assert(std::is_same<flatbuffers::Table, ObjT>() ||
std::is_same<flatbuffers::Struct, ObjT>(),
"Type must be either flatbuffer table or struct");
bool print_sep = false;
+
+ const int child_tree_depth = tree_depth + 1;
+
+ bool wrap = false;
+ if (multi_line) {
+ // Check whether this object has objects, vectors, or floats inside of it
+ for (const reflection::Field *field : *obj->fields()) {
+ if (ShouldCauseWrapping(field->type()->base_type())) {
+ wrap = true;
+ break;
+ }
+ }
+ }
+
*out << '{';
for (const reflection::Field *field : *obj->fields()) {
// Check whether this object has the field populated (even for structs,
// which should have all fields populated)
if (object->GetAddressOf(field->offset())) {
if (print_sep) {
- *out << ", ";
+ if (wrap) {
+ *out << ",";
+ } else {
+ *out << ", ";
+ }
} else {
print_sep = true;
}
+
+ if (wrap) {
+ AddWrapping(out, child_tree_depth);
+ }
+
*out << '"' << field->name()->c_str() << "\": ";
- FieldToString(object, field, objects, enums, max_vector_size, out);
+ FieldToString(object, field, objects, enums, max_vector_size, out,
+ multi_line, child_tree_depth);
}
}
+
+ if (wrap) {
+ AddWrapping(out, tree_depth);
+ }
+
*out << '}';
}
} // namespace
std::string FlatbufferToJson(const reflection::Schema *schema,
- const uint8_t *data, size_t max_vector_size) {
+ const uint8_t *data, bool multi_line,
+ size_t max_vector_size) {
const flatbuffers::Table *table = flatbuffers::GetAnyRoot(data);
const reflection::Object *obj = schema->root_table();
@@ -286,7 +358,7 @@
std::stringstream out;
ObjectToString(obj, schema->objects(), schema->enums(), table,
- max_vector_size, &out);
+ max_vector_size, &out, multi_line);
return out.str();
}
diff --git a/aos/flatbuffer_introspection_test.cc b/aos/flatbuffer_introspection_test.cc
index 9da705f..4214202 100644
--- a/aos/flatbuffer_introspection_test.cc
+++ b/aos/flatbuffer_introspection_test.cc
@@ -62,6 +62,20 @@
"{\"foo_double\": 0.555555555555556, \"foo_float\": 0.333333}");
}
+TEST_F(FlatbufferIntrospectionTest, NanFloatTest) {
+ flatbuffers::FlatBufferBuilder builder;
+ ConfigurationBuilder config_builder(builder);
+
+ config_builder.add_foo_float(std::numeric_limits<float>::quiet_NaN());
+ config_builder.add_foo_double(std::numeric_limits<double>::quiet_NaN());
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer());
+
+ EXPECT_EQ(out, "{\"foo_double\": null, \"foo_float\": null}");
+}
+
TEST_F(FlatbufferIntrospectionTest, VectorScalarTest) {
flatbuffers::FlatBufferBuilder builder;
@@ -154,6 +168,21 @@
EXPECT_EQ(out, "{\"foo_enum\": \"UShort\"}");
}
+TEST_F(FlatbufferIntrospectionTest, EnumWithUnknownValueTest) {
+ flatbuffers::FlatBufferBuilder builder;
+
+ ConfigurationBuilder config_builder(builder);
+ // 123 is not part of the enum. We expect it to be represented by null in
+ // the json.
+ config_builder.fbb_.AddElement<uint8_t>(Configuration::VT_FOO_ENUM, 123, 0);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer());
+
+ EXPECT_EQ(out, "{\"foo_enum\": 123}");
+}
+
TEST_F(FlatbufferIntrospectionTest, VectorStringTest) {
flatbuffers::FlatBufferBuilder builder;
@@ -328,9 +357,112 @@
builder.Finish(config_builder.Finish());
- std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), 100);
+ std::string out =
+ FlatbufferToJson(schema_, builder.GetBufferPointer(), false, 100);
EXPECT_EQ(out, "{\"vector_foo_int\": [ ... 101 elements ... ]}");
}
+TEST_F(FlatbufferIntrospectionTest, MultilineTest) {
+ flatbuffers::FlatBufferBuilder builder;
+ ConfigurationBuilder config_builder(builder);
+
+ config_builder.add_foo_bool(true);
+ config_builder.add_foo_int(-20);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"foo_bool\": true,\n"
+ " \"foo_int\": -20\n"
+ "}");
+}
+
+TEST_F(FlatbufferIntrospectionTest, MultilineStructTest) {
+ flatbuffers::FlatBufferBuilder builder;
+ ConfigurationBuilder config_builder(builder);
+
+ FooStructNested foo_struct2(10);
+ FooStruct foo_struct(5, foo_struct2);
+
+ config_builder.add_foo_struct(&foo_struct);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"foo_struct\": {\n"
+ " \"foo_byte\": 5,\n"
+ " \"nested_struct\": {\"foo_byte\": 10}\n"
+ " }\n"
+ "}");
+}
+
+TEST_F(FlatbufferIntrospectionTest, MultilineVectorStructTest) {
+ flatbuffers::FlatBufferBuilder builder;
+
+ FooStructNested foo_struct2(1);
+
+ auto structs = builder.CreateVectorOfStructs(
+ std::vector<FooStruct>({{5, foo_struct2}, {10, foo_struct2}}));
+
+ ConfigurationBuilder config_builder(builder);
+ config_builder.add_vector_foo_struct(structs);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"vector_foo_struct\": [\n"
+ " {\n"
+ " \"foo_byte\": 5,\n"
+ " \"nested_struct\": {\"foo_byte\": 1}\n"
+ " },\n"
+ " {\n"
+ " \"foo_byte\": 10,\n"
+ " \"nested_struct\": {\"foo_byte\": 1}\n"
+ " }\n"
+ " ]\n"
+ "}");
+}
+
+TEST_F(FlatbufferIntrospectionTest, MultilineVectorScalarTest) {
+ flatbuffers::FlatBufferBuilder builder;
+
+ // Flatbuffers don't like creating vectors simultaneously with table, so do
+ // first.
+ auto foo_ints =
+ builder.CreateVector<int32_t>({-300, -200, -100, 0, 100, 200, 300});
+
+ auto foo_floats =
+ builder.CreateVector<float>({0.0, 1.0 / 9.0, 2.0 / 9.0, 3.0 / 9.0});
+ auto foo_doubles =
+ builder.CreateVector<double>({0, 1.0 / 9.0, 2.0 / 9.0, 3.0 / 9.0});
+
+ ConfigurationBuilder config_builder(builder);
+
+ config_builder.add_vector_foo_int(foo_ints);
+ config_builder.add_vector_foo_float(foo_floats);
+ config_builder.add_vector_foo_double(foo_doubles);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"vector_foo_double\": [0, 0.111111111111111, "
+ "0.222222222222222, 0.333333333333333],\n"
+ " \"vector_foo_float\": [0, 0.111111, 0.222222, 0.333333],\n"
+ " \"vector_foo_int\": [-300, -200, -100, 0, 100, 200, 300]\n"
+ "}");
+}
+
} // namespace testing
} // namespace aos
diff --git a/aos/flatbuffers.h b/aos/flatbuffers.h
index 6e86d35..6fca458 100644
--- a/aos/flatbuffers.h
+++ b/aos/flatbuffers.h
@@ -125,7 +125,8 @@
FlatbufferString(const std::string_view data) : data_(data) {}
// Builds a Flatbuffer by copying the data from the other flatbuffer.
FlatbufferString(const Flatbuffer<T> &other) {
- data_ = std::string(other.data(), other.size());
+ data_ =
+ std::string(reinterpret_cast<const char *>(other.data()), other.size());
}
// Coppies the data from the other flatbuffer.
diff --git a/aos/ipc_lib/lockless_queue.cc b/aos/ipc_lib/lockless_queue.cc
index f31d80d..02aebcb 100644
--- a/aos/ipc_lib/lockless_queue.cc
+++ b/aos/ipc_lib/lockless_queue.cc
@@ -276,6 +276,50 @@
// Everything should be zero initialized already. So we just need to fill
// everything out properly.
+ // This is the UID we will use for checking signal-sending permission
+ // compatibility.
+ //
+ // The manpage says:
+ // For a process to have permission to send a signal, it must either be
+ // privileged [...], or the real or effective user ID of the sending process
+ // must equal the real or saved set-user-ID of the target process.
+ //
+ // Processes typically initialize a queue in random order as they start up.
+ // This means we need an algorithm for verifying all processes have
+ // permissions to send each other signals which gives the same answer no
+ // matter what order they attach in. We would also like to avoid maintaining a
+ // shared list of the UIDs of all processes.
+ //
+ // To do this while still giving sufficient flexibility for all current use
+ // cases, we track a single UID for the queue. All processes with a matching
+ // euid+suid must have this UID. Any processes with distinct euid/suid must
+ // instead have a matching ruid. This guarantees signals can be sent between
+ // all processes attached to the queue.
+ //
+ // In particular, this allows a process to change only its euid (to interact
+ // with a queue) while still maintaining privileges via its ruid. However, it
+ // can only use privileges in ways that do not require changing the euid back,
+ // because while the euid is different it will not be able to receive signals.
+ // We can't actually verify that, but we can sanity check that things are
+ // valid when the queue is initialized.
+
+ uid_t uid;
+ {
+ uid_t ruid, euid, suid;
+ PCHECK(getresuid(&ruid, &euid, &suid) == 0);
+ // If these are equal, then use them, even if that's different from the real
+ // UID. This allows processes to keep a real UID of 0 (to have permissions
+ // to perform system-level changes) while still being able to communicate
+ // with processes running unprivileged as a distinct user.
+ if (euid == suid) {
+ uid = euid;
+ VLOG(1) << "Using euid==suid " << uid;
+ } else {
+ uid = ruid;
+ VLOG(1) << "Using ruid " << ruid;
+ }
+ }
+
// Grab the mutex. We don't care if the previous reader died. We are going
// to check everything anyways.
GrabQueueSetupLockOrDie grab_queue_setup_lock(memory);
@@ -306,7 +350,7 @@
}
memory->next_queue_index.Invalidate();
- memory->uid = getuid();
+ memory->uid = uid;
for (size_t i = 0; i < memory->num_senders(); ++i) {
::aos::ipc_lib::Sender *s = memory->GetSender(i);
@@ -321,7 +365,7 @@
// redo initialization.
memory->initialized = true;
} else {
- CHECK_EQ(getuid(), memory->uid) << ": UIDs must match for all processes";
+ CHECK_EQ(uid, memory->uid) << ": UIDs must match for all processes";
}
return memory;
@@ -518,7 +562,8 @@
}
if (sender_index_ == -1) {
- LOG(FATAL) << "Too many senders";
+ VLOG(1) << "Too many senders, starting to bail.";
+ return;
}
::aos::ipc_lib::Sender *s = memory_->GetSender(sender_index_);
@@ -529,13 +574,18 @@
}
LocklessQueue::Sender::~Sender() {
- if (memory_ != nullptr) {
+ if (valid()) {
death_notification_release(&(memory_->GetSender(sender_index_)->tid));
}
}
-LocklessQueue::Sender LocklessQueue::MakeSender() {
- return LocklessQueue::Sender(memory_);
+std::optional<LocklessQueue::Sender> LocklessQueue::MakeSender() {
+ LocklessQueue::Sender result = LocklessQueue::Sender(memory_);
+ if (result.valid()) {
+ return std::move(result);
+ } else {
+ return std::nullopt;
+ }
}
QueueIndex ZeroOrValid(QueueIndex index) {
diff --git a/aos/ipc_lib/lockless_queue.h b/aos/ipc_lib/lockless_queue.h
index 550485f..de80f3d 100644
--- a/aos/ipc_lib/lockless_queue.h
+++ b/aos/ipc_lib/lockless_queue.h
@@ -5,6 +5,7 @@
#include <sys/signalfd.h>
#include <sys/types.h>
#include <vector>
+#include <optional>
#include "aos/ipc_lib/aos_sync.h"
#include "aos/ipc_lib/data_alignment.h"
@@ -245,6 +246,11 @@
Sender(LocklessQueueMemory *memory);
+ // Returns true if this sender is valid. If it isn't valid, any of the
+ // other methods won't work. This is here to allow the lockless queue to
+ // only build a sender if there was one available.
+ bool valid() const { return sender_index_ != -1 && memory_ != nullptr; }
+
// Pointer to the backing memory.
LocklessQueueMemory *memory_ = nullptr;
@@ -252,8 +258,9 @@
int sender_index_ = -1;
};
- // Creates a sender.
- Sender MakeSender();
+ // Creates a sender. If we couldn't allocate a sender, returns nullopt.
+ // TODO(austin): Change the API if we find ourselves with more errors.
+ std::optional<Sender> MakeSender();
private:
LocklessQueueMemory *memory_ = nullptr;
diff --git a/aos/ipc_lib/lockless_queue_death_test.cc b/aos/ipc_lib/lockless_queue_death_test.cc
index 213c9e4..b7cdfee 100644
--- a/aos/ipc_lib/lockless_queue_death_test.cc
+++ b/aos/ipc_lib/lockless_queue_death_test.cc
@@ -521,7 +521,7 @@
LocklessQueue queue(
reinterpret_cast<aos::ipc_lib::LocklessQueueMemory *>(memory),
config);
- LocklessQueue::Sender sender = queue.MakeSender();
+ LocklessQueue::Sender sender = queue.MakeSender().value();
for (int i = 0; i < 2; ++i) {
char data[100];
size_t s = snprintf(data, sizeof(data), "foobar%d", i + 1);
@@ -555,7 +555,7 @@
LocklessQueue queue(memory, config);
// Building and destroying a sender will clean up the queue.
- { LocklessQueue::Sender sender = queue.MakeSender(); }
+ { LocklessQueue::Sender sender = queue.MakeSender().value(); }
if (print) {
printf("Cleaned up version:\n");
@@ -563,12 +563,12 @@
}
{
- LocklessQueue::Sender sender = queue.MakeSender();
+ LocklessQueue::Sender sender = queue.MakeSender().value();
{
// Make a second sender to confirm that the slot was freed.
// If the sender doesn't get cleaned up, this will fail.
LocklessQueue queue2(memory, config);
- queue2.MakeSender();
+ queue2.MakeSender().value();
}
// Send a message to make sure that the queue still works.
diff --git a/aos/ipc_lib/lockless_queue_test.cc b/aos/ipc_lib/lockless_queue_test.cc
index 109c2ea..65b2a15 100644
--- a/aos/ipc_lib/lockless_queue_test.cc
+++ b/aos/ipc_lib/lockless_queue_test.cc
@@ -194,17 +194,15 @@
}
// Tests that too many watchers dies like expected.
-TEST_F(LocklessQueueDeathTest, TooManySenders) {
- EXPECT_DEATH(
- {
- ::std::vector<::std::unique_ptr<LocklessQueue>> queues;
- ::std::vector<LocklessQueue::Sender> senders;
- for (size_t i = 0; i < config_.num_senders + 1; ++i) {
- queues.emplace_back(new LocklessQueue(get_memory(), config_));
- senders.emplace_back(queues.back()->MakeSender());
- }
- },
- "Too many senders");
+TEST_F(LocklessQueueTest, TooManySenders) {
+ ::std::vector<::std::unique_ptr<LocklessQueue>> queues;
+ ::std::vector<LocklessQueue::Sender> senders;
+ for (size_t i = 0; i < config_.num_senders; ++i) {
+ queues.emplace_back(new LocklessQueue(get_memory(), config_));
+ senders.emplace_back(queues.back()->MakeSender().value());
+ }
+ queues.emplace_back(new LocklessQueue(get_memory(), config_));
+ EXPECT_FALSE(queues.back()->MakeSender());
}
// Now, start 2 threads and have them receive the signals.
@@ -240,7 +238,7 @@
TEST_F(LocklessQueueTest, Send) {
LocklessQueue queue(get_memory(), config_);
- LocklessQueue::Sender sender = queue.MakeSender();
+ LocklessQueue::Sender sender = queue.MakeSender().value();
// Send enough messages to wrap.
for (int i = 0; i < 20000; ++i) {
diff --git a/aos/ipc_lib/queue_racer.cc b/aos/ipc_lib/queue_racer.cc
index 9bb0a70..f5b3d7e 100644
--- a/aos/ipc_lib/queue_racer.cc
+++ b/aos/ipc_lib/queue_racer.cc
@@ -143,7 +143,7 @@
::std::thread([this, &t, thread_index, &run, write_wrap_count]() {
// Build up a sender.
LocklessQueue queue(memory_, config_);
- LocklessQueue::Sender sender = queue.MakeSender();
+ LocklessQueue::Sender sender = queue.MakeSender().value();
// Signal that we are ready to start sending.
t.ready.Set();
diff --git a/aos/json_to_flatbuffer.h b/aos/json_to_flatbuffer.h
index cadc0de..e852ead 100644
--- a/aos/json_to_flatbuffer.h
+++ b/aos/json_to_flatbuffer.h
@@ -44,7 +44,8 @@
::std::string TableFlatbufferToJson(const flatbuffers::Table *t,
const ::flatbuffers::TypeTable *typetable,
- bool multi_line, size_t max_vector_size = SIZE_MAX);
+ bool multi_line,
+ size_t max_vector_size = SIZE_MAX);
// Converts a DetachedBuffer holding a flatbuffer to JSON.
inline ::std::string FlatbufferToJson(const flatbuffers::DetachedBuffer &buffer,
@@ -78,7 +79,7 @@
std::string FlatbufferToJson(const reflection::Schema *const schema,
const uint8_t *const data,
- size_t max_vector_size = SIZE_MAX);
+ bool multi_line = false, size_t max_vector_size = SIZE_MAX);
} // namespace aos
diff --git a/aos/json_tokenizer.cc b/aos/json_tokenizer.cc
index 2486227..a3d804e 100644
--- a/aos/json_tokenizer.cc
+++ b/aos/json_tokenizer.cc
@@ -146,6 +146,12 @@
return true;
}
+ // People tend to use null instead of nan. Accept that too.
+ if (Consume("null")) {
+ *s = ::std::string("nan");
+ return true;
+ }
+
// Then, we either get a 0, or we get a nonzero. Only nonzero can be followed
// by a second number.
if (!Consume("0")) {
@@ -227,7 +233,11 @@
switch (state_) {
case State::kExpectObjectStart:
// We should always start out with a {
- if (!Consume("{")) return TokenType::kError;
+ if (!Consume("{")) {
+ fprintf(stderr, "Error on line %d, expected { for start.\n",
+ linenumber_);
+ return TokenType::kError;
+ }
// Document that we just started an object.
object_type_.push_back(ObjectType::kObject);
diff --git a/aos/realtime.cc b/aos/realtime.cc
index 6668855..558417f 100644
--- a/aos/realtime.cc
+++ b/aos/realtime.cc
@@ -50,8 +50,7 @@
if (set_for_root == SetLimitForRoot::kYes || !am_root) {
struct rlimit64 rlim;
PCHECK(getrlimit64(resource, &rlim) == 0)
- << ": " << program_invocation_short_name << "-init: getrlimit64("
- << resource << ") failed";
+ << ": getting limit for " << resource;
if (allow_decrease == AllowSoftLimitDecrease::kYes) {
rlim.rlim_cur = soft;
@@ -61,9 +60,8 @@
rlim.rlim_max = ::std::max(rlim.rlim_max, soft);
PCHECK(setrlimit64(resource, &rlim) == 0)
- << ": " << program_invocation_short_name << "-init: setrlimit64("
- << resource << ", {cur=" << (uintmax_t)rlim.rlim_cur
- << ",max=" << (uintmax_t)rlim.rlim_max << "}) failed";
+ << ": changing limit for " << resource << " to " << rlim.rlim_cur
+ << " with max of " << rlim.rlim_max;
}
}
@@ -74,8 +72,7 @@
SetSoftRLimit(RLIMIT_MEMLOCK, RLIM_INFINITY, SetLimitForRoot::kNo);
WriteCoreDumps();
- PCHECK(mlockall(MCL_CURRENT | MCL_FUTURE) == 0)
- << ": " << program_invocation_short_name << "-init: mlockall failed";
+ PCHECK(mlockall(MCL_CURRENT | MCL_FUTURE) == 0);
// Don't give freed memory back to the OS.
CHECK_EQ(1, mallopt(M_TRIM_THRESHOLD, -1));
@@ -114,15 +111,19 @@
void UnsetCurrentThreadRealtimePriority() {
struct sched_param param;
param.sched_priority = 0;
- PCHECK(sched_setscheduler(0, SCHED_OTHER, ¶m) == 0)
- << ": sched_setscheduler(0, SCHED_OTHER, 0) failed";
+ PCHECK(sched_setscheduler(0, SCHED_OTHER, ¶m) == 0);
+}
+
+void SetCurrentThreadAffinity(const cpu_set_t &cpuset) {
+ PCHECK(sched_setaffinity(0, sizeof(cpuset), &cpuset) == 0);
}
void SetCurrentThreadName(const std::string_view name) {
CHECK_LE(name.size(), 16u) << ": thread name '" << name << "' too long";
VLOG(1) << "This thread is changing to '" << name << "'";
std::string string_name(name);
- PCHECK(prctl(PR_SET_NAME, string_name.c_str()) == 0);
+ PCHECK(prctl(PR_SET_NAME, string_name.c_str()) == 0)
+ << ": changing name to " << string_name;
if (&logging::internal::ReloadThreadName != nullptr) {
logging::internal::ReloadThreadName();
}
@@ -135,7 +136,7 @@
struct sched_param param;
param.sched_priority = priority;
PCHECK(sched_setscheduler(0, SCHED_FIFO, ¶m) == 0)
- << ": sched_setscheduler(0, SCHED_FIFO, " << priority << ") failed";
+ << ": changing to SCHED_FIFO with " << priority;
}
void WriteCoreDumps() {
diff --git a/aos/realtime.h b/aos/realtime.h
index 20df979..6e0a472 100644
--- a/aos/realtime.h
+++ b/aos/realtime.h
@@ -1,6 +1,7 @@
#ifndef AOS_REALTIME_H_
#define AOS_REALTIME_H_
+#include <sched.h>
#include <string_view>
namespace aos {
@@ -21,6 +22,9 @@
// Sets the current thread's realtime priority.
void SetCurrentThreadRealtimePriority(int priority);
+// Sets the current thread's scheduling affinity.
+void SetCurrentThreadAffinity(const cpu_set_t &cpuset);
+
// Sets up this process to write core dump files.
// This is called by Init*, but it's here for other files that want this
// behavior without calling Init*.
diff --git a/aos/util/file.cc b/aos/util/file.cc
index b334ded..089efbc 100644
--- a/aos/util/file.cc
+++ b/aos/util/file.cc
@@ -66,5 +66,10 @@
PCHECK(result == 0) << ": Error creating " << folder;
}
+bool PathExists(std::string_view path) {
+ struct stat buffer;
+ return stat(path.data(), &buffer) == 0;
+}
+
} // namespace util
} // namespace aos
diff --git a/aos/util/file.h b/aos/util/file.h
index d6724af..9ee2fb4 100644
--- a/aos/util/file.h
+++ b/aos/util/file.h
@@ -17,6 +17,8 @@
void MkdirP(std::string_view path, mode_t mode);
+bool PathExists(std::string_view path);
+
} // namespace util
} // namespace aos
diff --git a/aos/util/file_test.cc b/aos/util/file_test.cc
index fa259e8..6851302 100644
--- a/aos/util/file_test.cc
+++ b/aos/util/file_test.cc
@@ -26,6 +26,19 @@
EXPECT_EQ(my_pid, stat.substr(0, my_pid.size()));
}
+// Tests that the PathExists function works under normal conditions.
+TEST(FileTest, PathExistsTest) {
+ const std::string tmpdir(getenv("TEST_TMPDIR"));
+ const std::string test_file = tmpdir + "/test_file";
+ // Make sure the test_file doesn't exist.
+ unlink(test_file.c_str());
+ EXPECT_FALSE(PathExists(test_file));
+
+ WriteStringToFileOrDie(test_file, "abc");
+
+ EXPECT_TRUE(PathExists(test_file));
+}
+
} // namespace testing
} // namespace util
} // namespace aos
diff --git a/build_tests/BUILD b/build_tests/BUILD
index 28dc677..e837390 100644
--- a/build_tests/BUILD
+++ b/build_tests/BUILD
@@ -124,3 +124,10 @@
srcs_version = "PY2AND3",
deps = ["@opencv_contrib_nonfree_amd64//:python_opencv"],
)
+
+py_test(
+ name = "python_jinja2",
+ srcs = ["python_jinja2.py"],
+ srcs_version = "PY2AND3",
+ deps = ["@python_jinja2"],
+)
diff --git a/build_tests/python_jinja2.py b/build_tests/python_jinja2.py
new file mode 100644
index 0000000..a926e31
--- /dev/null
+++ b/build_tests/python_jinja2.py
@@ -0,0 +1,4 @@
+#!/usr/bin/python3
+
+# Confirm that we can import jinja2.
+import jinja2
diff --git a/debian/python_jinja2.BUILD b/debian/python_jinja2.BUILD
new file mode 100644
index 0000000..1adab6a
--- /dev/null
+++ b/debian/python_jinja2.BUILD
@@ -0,0 +1,7 @@
+py_library(
+ name = "python_jinja2",
+ srcs = glob(["src/jinja2/*.py"]),
+ imports = ["src/"],
+ visibility = ["//visibility:public"],
+ deps = ["@python_markupsafe"],
+)
diff --git a/debian/python_markupsafe.BUILD b/debian/python_markupsafe.BUILD
new file mode 100644
index 0000000..87f10ea
--- /dev/null
+++ b/debian/python_markupsafe.BUILD
@@ -0,0 +1,6 @@
+py_library(
+ name = "python_markupsafe",
+ srcs = glob(["src/markupsafe/*.py"]),
+ imports = ["src/"],
+ visibility = ["//visibility:public"],
+)
diff --git a/y2020/constants.cc b/y2020/constants.cc
index eefc158..131b0de 100644
--- a/y2020/constants.cc
+++ b/y2020/constants.cc
@@ -26,6 +26,7 @@
const uint16_t kCompTeamNumber = 971;
const uint16_t kPracticeTeamNumber = 9971;
+const uint16_t kSpareRoborioTeamNumber = 6971;
const Values *DoGetValuesForTeam(uint16_t team) {
Values *const r = new Values();
@@ -99,6 +100,7 @@
switch (team) {
// A set of constants for tests.
case 1:
+ case kSpareRoborioTeamNumber:
break;
case kCompTeamNumber:
diff --git a/y2020/y2020_roborio.json b/y2020/y2020_roborio.json
index 7de7f51..b63982d 100644
--- a/y2020/y2020_roborio.json
+++ b/y2020/y2020_roborio.json
@@ -263,6 +263,7 @@
"hostname": "roborio",
"hostnames": [
"roboRIO-971-FRC",
+ "roboRIO-6971-FRC",
"roboRIO-7971-FRC",
"roboRIO-8971-FRC",
"roboRIO-9971-FRC"