Merge "Add the channel name into the sender creation failure message"
diff --git a/WORKSPACE b/WORKSPACE
index 9f087bc..168e690 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -60,11 +60,11 @@
)
load(
"//debian:gstreamer_amd64.bzl",
- gstreamer_amd64_debs = "files"
+ gstreamer_amd64_debs = "files",
)
load(
"//debian:gstreamer_armhf.bzl",
- gstreamer_armhf_debs = "files"
+ gstreamer_armhf_debs = "files",
)
load("//debian:packages.bzl", "generate_repositories_for_debs")
@@ -718,3 +718,23 @@
sha256 = "c5ac4c604952c274a50636e244f0d091bd1de302032446f24f0e9e03ae9c76f7",
url = "http://www.frc971.org/Build-Dependencies/gstreamer_armhf.tar.gz",
)
+
+# Downloaded from:
+# https://files.pythonhosted.org/packages/64/a7/45e11eebf2f15bf987c3bc11d37dcc838d9dc81250e67e4c5968f6008b6c/Jinja2-2.11.2.tar.gz
+http_archive(
+ name = "python_jinja2",
+ build_file = "@//debian:python_jinja2.BUILD",
+ sha256 = "89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0",
+ strip_prefix = "Jinja2-2.11.2",
+ url = "http://www.frc971.org/Build-Dependencies/Jinja2-2.11.2.tar.gz",
+)
+
+# Downloaded from:
+# https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz
+http_archive(
+ name = "python_markupsafe",
+ build_file = "@//debian:python_markupsafe.BUILD",
+ sha256 = "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
+ strip_prefix = "MarkupSafe-1.1.1",
+ url = "http://www.frc971.org/Build-Dependencies/MarkupSafe-1.1.1.tar.gz",
+)
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index d455bf5..2cc2f61 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -1008,7 +1008,21 @@
for (const std::unique_ptr<SplitMessageReader> &reader :
split_message_readers_) {
- if (CompareFlatBuffer(reader->node(), target_node)) {
+ // In order to identify which logfile(s) map to the target node, do a
+ // logical comparison of the nodes, by confirming that we are either in a
+ // single-node setup (where the nodes will both be nullptr) or that the
+ // node names match (but the other node fields--e.g., hostname lists--may
+ // not).
+ const bool both_null =
+ reader->node() == nullptr && target_node == nullptr;
+ const bool both_have_name =
+ (reader->node() != nullptr) && (target_node != nullptr) &&
+ (reader->node()->has_name() && target_node->has_name());
+ const bool node_names_identical =
+ both_have_name &&
+ (reader->node()->name()->string_view() ==
+ target_node->name()->string_view());
+ if (both_null || node_names_identical) {
if (!found_node) {
found_node = true;
log_file_header_ = CopyFlatBuffer(reader->log_file_header());
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index de9d344..b3472d7 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -312,9 +312,16 @@
state->channel_merger = std::make_unique<ChannelMerger>(filenames);
} else {
if (replay_configuration) {
- CHECK_EQ(configuration()->nodes()->size(),
+ CHECK_EQ(logged_configuration()->nodes()->size(),
replay_configuration->nodes()->size())
<< ": Log file and replay config need to have matching nodes lists.";
+ for (const Node *node : *logged_configuration()->nodes()) {
+ if (configuration::GetNode(replay_configuration, node) == nullptr) {
+ LOG(FATAL)
+ << "Found node " << FlatbufferToJson(node)
+ << " in logged config that is not present in the replay config.";
+ }
+ }
}
states_.resize(configuration()->nodes()->size());
}
@@ -387,6 +394,11 @@
Register(state->event_loop_unique_ptr.get());
}
+ if (live_nodes_ == 0) {
+ LOG(FATAL)
+ << "Don't have logs from any of the nodes in the replay config--are "
+ "you sure that the replay config matches the original config?";
+ }
// We need to now seed our per-node time offsets and get everything set up to
// run.
diff --git a/aos/events/logging/logger.h b/aos/events/logging/logger.h
index 34dbd24..ce21598 100644
--- a/aos/events/logging/logger.h
+++ b/aos/events/logging/logger.h
@@ -424,6 +424,8 @@
// Returns the offset from the monotonic clock for a node to the distributed
// clock. distributed = monotonic + offset;
std::chrono::nanoseconds offset(int node_index) const {
+ CHECK_LT(node_index, offset_matrix_.rows())
+ << ": Got too high of a node index.";
return -std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::duration<double>(offset_matrix_(node_index))) -
base_offset_matrix_(node_index);
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 9e69ae4..b894bf7 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -505,6 +505,77 @@
reader.Deregister();
}
+typedef MultinodeLoggerTest MultinodeLoggerDeathTest;
+
+// Test that if we feed the replay with a mismatched node list that we die on
+// the LogReader constructor.
+TEST_F(MultinodeLoggerDeathTest, MultiNodeBadReplayConfig) {
+ const ::std::string tmpdir(getenv("TEST_TMPDIR"));
+ const ::std::string logfile_base = tmpdir + "/multi_logfile";
+ const ::std::string logfile1 = logfile_base + "_pi1_data.bfbs";
+ const ::std::string logfile2 =
+ logfile_base + "_pi2_data/test/aos.examples.Pong.bfbs";
+ const ::std::string logfile3 = logfile_base + "_pi2_data.bfbs";
+
+ // Remove them.
+ unlink(logfile1.c_str());
+ unlink(logfile2.c_str());
+ unlink(logfile3.c_str());
+
+ LOG(INFO) << "Logging data to " << logfile1 << ", " << logfile2 << " and "
+ << logfile3;
+
+ {
+ std::unique_ptr<EventLoop> ping_event_loop =
+ event_loop_factory_.MakeEventLoop("ping", pi1_);
+ Ping ping(ping_event_loop.get());
+ std::unique_ptr<EventLoop> pong_event_loop =
+ event_loop_factory_.MakeEventLoop("pong", pi2_);
+ Pong pong(pong_event_loop.get());
+
+ std::unique_ptr<EventLoop> pi1_logger_event_loop =
+ event_loop_factory_.MakeEventLoop("logger", pi1_);
+ std::unique_ptr<LogNamer> pi1_log_namer =
+ std::make_unique<MultiNodeLogNamer>(
+ logfile_base, pi1_logger_event_loop->configuration(),
+ pi1_logger_event_loop->node());
+
+ std::unique_ptr<EventLoop> pi2_logger_event_loop =
+ event_loop_factory_.MakeEventLoop("logger", pi2_);
+ std::unique_ptr<LogNamer> pi2_log_namer =
+ std::make_unique<MultiNodeLogNamer>(
+ logfile_base, pi2_logger_event_loop->configuration(),
+ pi2_logger_event_loop->node());
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ Logger pi1_logger(std::move(pi1_log_namer), pi1_logger_event_loop.get(),
+ chrono::milliseconds(100));
+
+ Logger pi2_logger(std::move(pi2_log_namer), pi2_logger_event_loop.get(),
+ chrono::milliseconds(100));
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ }
+
+ // Test that, if we add an additional node to the replay config that the
+ // logger complains about the mismatch in number of nodes.
+ FlatbufferDetachedBuffer<Configuration> extra_nodes_config =
+ configuration::MergeWithConfig(&config_.message(), R"({
+ "nodes": [
+ {
+ "name": "extra-node"
+ }
+ ]
+ }
+ )");
+
+ EXPECT_DEATH(LogReader({std::vector<std::string>{logfile1},
+ std::vector<std::string>{logfile3}},
+ &extra_nodes_config.message()),
+ "Log file and replay config need to have matching nodes lists.");
+ ;
+}
+
// Tests that we can read log files where they don't start at the same monotonic
// time.
TEST_F(MultinodeLoggerTest, StaggeredStart) {
diff --git a/aos/flatbuffer_introspection.cc b/aos/flatbuffer_introspection.cc
index b72d059..6f32424 100644
--- a/aos/flatbuffer_introspection.cc
+++ b/aos/flatbuffer_introspection.cc
@@ -70,7 +70,8 @@
const reflection::Object *obj,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums,
- const ObjT *object, size_t max_vector_size, std::stringstream *out);
+ const ObjT *object, size_t max_vector_size, std::stringstream *out,
+ bool multi_line = false, int tree_depth = 0);
// Get enum value name
const char *EnumToString(
@@ -114,13 +115,34 @@
}
}
+// Adds a newline and indents
+// Every increment in tree depth is two spaces
+void AddWrapping(std::stringstream *out, int tree_depth) {
+ *out << "\n";
+ for (int i = 0; i < tree_depth; i++) {
+ *out << " ";
+ }
+}
+
+// Detects if a field should trigger wrapping of the parent object.
+bool ShouldCauseWrapping(reflection::BaseType type) {
+ switch (type) {
+ case BaseType::Vector:
+ case BaseType::Obj:
+ return true;
+ default:
+ return false;
+ }
+}
+
// Print field in flatbuffer table. Field must be populated.
template <typename ObjT>
void FieldToString(
const ObjT *table, const reflection::Field *field,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums,
- size_t max_vector_size, std::stringstream *out) {
+ size_t max_vector_size, std::stringstream *out, bool multi_line,
+ int tree_depth) {
const reflection::Type *type = field->type();
switch (type->base_type()) {
@@ -187,10 +209,25 @@
*out << "[ ... " << vector->size() << " elements ... ]";
break;
}
+
+ bool wrap = false;
+ const int child_tree_depth = tree_depth + 1;
+
+ if (multi_line) {
+ wrap = ShouldCauseWrapping(elem_type);
+ }
+
*out << '[';
for (flatbuffers::uoffset_t i = 0; i < vector->size(); ++i) {
if (i != 0) {
- *out << ", ";
+ if (wrap) {
+ *out << ",";
+ } else {
+ *out << ", ";
+ }
+ }
+ if (wrap) {
+ AddWrapping(out, child_tree_depth);
}
if (flatbuffers::IsInteger(elem_type)) {
IntOrEnumToString(
@@ -211,16 +248,20 @@
flatbuffers::GetAnyVectorElemAddressOf<
const flatbuffers::Struct>(
vector, i, objects->Get(type->index())->bytesize()),
- max_vector_size, out);
+ max_vector_size, out, multi_line, child_tree_depth);
} else {
ObjectToString(objects->Get(type->index()), objects, enums,
flatbuffers::GetAnyVectorElemPointer<
const flatbuffers::Table>(vector, i),
- max_vector_size, out);
+ max_vector_size, out, multi_line,
+ child_tree_depth);
}
}
}
}
+ if (wrap) {
+ AddWrapping(out, tree_depth);
+ }
*out << ']';
} else {
*out << "null";
@@ -231,11 +272,11 @@
if (objects->Get(type->index())->is_struct()) {
ObjectToString(objects->Get(type->index()), objects, enums,
flatbuffers::GetFieldStruct(*table, *field),
- max_vector_size, out);
+ max_vector_size, out, multi_line, tree_depth);
} else if constexpr (std::is_same<flatbuffers::Table, ObjT>()) {
ObjectToString(objects->Get(type->index()), objects, enums,
flatbuffers::GetFieldT(*table, *field),
- max_vector_size, out);
+ max_vector_size, out, multi_line, tree_depth);
}
} else {
*out << "null";
@@ -253,32 +294,63 @@
const reflection::Object *obj,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects,
const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums,
- const ObjT *object, size_t max_vector_size, std::stringstream *out) {
+ const ObjT *object, size_t max_vector_size, std::stringstream *out,
+ bool multi_line, int tree_depth) {
static_assert(std::is_same<flatbuffers::Table, ObjT>() ||
std::is_same<flatbuffers::Struct, ObjT>(),
"Type must be either flatbuffer table or struct");
bool print_sep = false;
+
+ const int child_tree_depth = tree_depth + 1;
+
+ bool wrap = false;
+ if (multi_line) {
+ // Check whether this object has objects, vectors, or floats inside of it
+ for (const reflection::Field *field : *obj->fields()) {
+ if (ShouldCauseWrapping(field->type()->base_type())) {
+ wrap = true;
+ break;
+ }
+ }
+ }
+
*out << '{';
for (const reflection::Field *field : *obj->fields()) {
// Check whether this object has the field populated (even for structs,
// which should have all fields populated)
if (object->GetAddressOf(field->offset())) {
if (print_sep) {
- *out << ", ";
+ if (wrap) {
+ *out << ",";
+ } else {
+ *out << ", ";
+ }
} else {
print_sep = true;
}
+
+ if (wrap) {
+ AddWrapping(out, child_tree_depth);
+ }
+
*out << '"' << field->name()->c_str() << "\": ";
- FieldToString(object, field, objects, enums, max_vector_size, out);
+ FieldToString(object, field, objects, enums, max_vector_size, out,
+ multi_line, child_tree_depth);
}
}
+
+ if (wrap) {
+ AddWrapping(out, tree_depth);
+ }
+
*out << '}';
}
} // namespace
std::string FlatbufferToJson(const reflection::Schema *schema,
- const uint8_t *data, size_t max_vector_size) {
+ const uint8_t *data, bool multi_line,
+ size_t max_vector_size) {
const flatbuffers::Table *table = flatbuffers::GetAnyRoot(data);
const reflection::Object *obj = schema->root_table();
@@ -286,7 +358,7 @@
std::stringstream out;
ObjectToString(obj, schema->objects(), schema->enums(), table,
- max_vector_size, &out);
+ max_vector_size, &out, multi_line);
return out.str();
}
diff --git a/aos/flatbuffer_introspection_test.cc b/aos/flatbuffer_introspection_test.cc
index 5cc26fb..4214202 100644
--- a/aos/flatbuffer_introspection_test.cc
+++ b/aos/flatbuffer_introspection_test.cc
@@ -357,9 +357,112 @@
builder.Finish(config_builder.Finish());
- std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), 100);
+ std::string out =
+ FlatbufferToJson(schema_, builder.GetBufferPointer(), false, 100);
EXPECT_EQ(out, "{\"vector_foo_int\": [ ... 101 elements ... ]}");
}
+TEST_F(FlatbufferIntrospectionTest, MultilineTest) {
+ flatbuffers::FlatBufferBuilder builder;
+ ConfigurationBuilder config_builder(builder);
+
+ config_builder.add_foo_bool(true);
+ config_builder.add_foo_int(-20);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"foo_bool\": true,\n"
+ " \"foo_int\": -20\n"
+ "}");
+}
+
+TEST_F(FlatbufferIntrospectionTest, MultilineStructTest) {
+ flatbuffers::FlatBufferBuilder builder;
+ ConfigurationBuilder config_builder(builder);
+
+ FooStructNested foo_struct2(10);
+ FooStruct foo_struct(5, foo_struct2);
+
+ config_builder.add_foo_struct(&foo_struct);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"foo_struct\": {\n"
+ " \"foo_byte\": 5,\n"
+ " \"nested_struct\": {\"foo_byte\": 10}\n"
+ " }\n"
+ "}");
+}
+
+TEST_F(FlatbufferIntrospectionTest, MultilineVectorStructTest) {
+ flatbuffers::FlatBufferBuilder builder;
+
+ FooStructNested foo_struct2(1);
+
+ auto structs = builder.CreateVectorOfStructs(
+ std::vector<FooStruct>({{5, foo_struct2}, {10, foo_struct2}}));
+
+ ConfigurationBuilder config_builder(builder);
+ config_builder.add_vector_foo_struct(structs);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"vector_foo_struct\": [\n"
+ " {\n"
+ " \"foo_byte\": 5,\n"
+ " \"nested_struct\": {\"foo_byte\": 1}\n"
+ " },\n"
+ " {\n"
+ " \"foo_byte\": 10,\n"
+ " \"nested_struct\": {\"foo_byte\": 1}\n"
+ " }\n"
+ " ]\n"
+ "}");
+}
+
+TEST_F(FlatbufferIntrospectionTest, MultilineVectorScalarTest) {
+ flatbuffers::FlatBufferBuilder builder;
+
+ // Flatbuffers don't like creating vectors simultaneously with table, so do
+ // first.
+ auto foo_ints =
+ builder.CreateVector<int32_t>({-300, -200, -100, 0, 100, 200, 300});
+
+ auto foo_floats =
+ builder.CreateVector<float>({0.0, 1.0 / 9.0, 2.0 / 9.0, 3.0 / 9.0});
+ auto foo_doubles =
+ builder.CreateVector<double>({0, 1.0 / 9.0, 2.0 / 9.0, 3.0 / 9.0});
+
+ ConfigurationBuilder config_builder(builder);
+
+ config_builder.add_vector_foo_int(foo_ints);
+ config_builder.add_vector_foo_float(foo_floats);
+ config_builder.add_vector_foo_double(foo_doubles);
+
+ builder.Finish(config_builder.Finish());
+
+ std::string out = FlatbufferToJson(schema_, builder.GetBufferPointer(), true);
+
+ EXPECT_EQ(out,
+ "{\n"
+ " \"vector_foo_double\": [0, 0.111111111111111, "
+ "0.222222222222222, 0.333333333333333],\n"
+ " \"vector_foo_float\": [0, 0.111111, 0.222222, 0.333333],\n"
+ " \"vector_foo_int\": [-300, -200, -100, 0, 100, 200, 300]\n"
+ "}");
+}
+
} // namespace testing
} // namespace aos
diff --git a/aos/ipc_lib/lockless_queue.cc b/aos/ipc_lib/lockless_queue.cc
index 728d2d1..02aebcb 100644
--- a/aos/ipc_lib/lockless_queue.cc
+++ b/aos/ipc_lib/lockless_queue.cc
@@ -276,6 +276,50 @@
// Everything should be zero initialized already. So we just need to fill
// everything out properly.
+ // This is the UID we will use for checking signal-sending permission
+ // compatibility.
+ //
+ // The manpage says:
+ // For a process to have permission to send a signal, it must either be
+ // privileged [...], or the real or effective user ID of the sending process
+ // must equal the real or saved set-user-ID of the target process.
+ //
+ // Processes typically initialize a queue in random order as they start up.
+ // This means we need an algorithm for verifying all processes have
+ // permissions to send each other signals which gives the same answer no
+ // matter what order they attach in. We would also like to avoid maintaining a
+ // shared list of the UIDs of all processes.
+ //
+ // To do this while still giving sufficient flexibility for all current use
+ // cases, we track a single UID for the queue. All processes with a matching
+ // euid+suid must have this UID. Any processes with distinct euid/suid must
+ // instead have a matching ruid. This guarantees signals can be sent between
+ // all processes attached to the queue.
+ //
+ // In particular, this allows a process to change only its euid (to interact
+ // with a queue) while still maintaining privileges via its ruid. However, it
+ // can only use privileges in ways that do not require changing the euid back,
+ // because while the euid is different it will not be able to receive signals.
+ // We can't actually verify that, but we can sanity check that things are
+ // valid when the queue is initialized.
+
+ uid_t uid;
+ {
+ uid_t ruid, euid, suid;
+ PCHECK(getresuid(&ruid, &euid, &suid) == 0);
+ // If these are equal, then use them, even if that's different from the real
+ // UID. This allows processes to keep a real UID of 0 (to have permissions
+ // to perform system-level changes) while still being able to communicate
+ // with processes running unprivileged as a distinct user.
+ if (euid == suid) {
+ uid = euid;
+ VLOG(1) << "Using euid==suid " << uid;
+ } else {
+ uid = ruid;
+ VLOG(1) << "Using ruid " << ruid;
+ }
+ }
+
// Grab the mutex. We don't care if the previous reader died. We are going
// to check everything anyways.
GrabQueueSetupLockOrDie grab_queue_setup_lock(memory);
@@ -306,7 +350,7 @@
}
memory->next_queue_index.Invalidate();
- memory->uid = getuid();
+ memory->uid = uid;
for (size_t i = 0; i < memory->num_senders(); ++i) {
::aos::ipc_lib::Sender *s = memory->GetSender(i);
@@ -321,7 +365,7 @@
// redo initialization.
memory->initialized = true;
} else {
- CHECK_EQ(getuid(), memory->uid) << ": UIDs must match for all processes";
+ CHECK_EQ(uid, memory->uid) << ": UIDs must match for all processes";
}
return memory;
diff --git a/aos/json_to_flatbuffer.h b/aos/json_to_flatbuffer.h
index 37560ec..e852ead 100644
--- a/aos/json_to_flatbuffer.h
+++ b/aos/json_to_flatbuffer.h
@@ -79,7 +79,7 @@
std::string FlatbufferToJson(const reflection::Schema *const schema,
const uint8_t *const data,
- size_t max_vector_size = SIZE_MAX);
+ bool multi_line = false, size_t max_vector_size = SIZE_MAX);
} // namespace aos
diff --git a/aos/util/file.cc b/aos/util/file.cc
index b334ded..089efbc 100644
--- a/aos/util/file.cc
+++ b/aos/util/file.cc
@@ -66,5 +66,10 @@
PCHECK(result == 0) << ": Error creating " << folder;
}
+bool PathExists(std::string_view path) {
+ struct stat buffer;
+ return stat(path.data(), &buffer) == 0;
+}
+
} // namespace util
} // namespace aos
diff --git a/aos/util/file.h b/aos/util/file.h
index d6724af..9ee2fb4 100644
--- a/aos/util/file.h
+++ b/aos/util/file.h
@@ -17,6 +17,8 @@
void MkdirP(std::string_view path, mode_t mode);
+bool PathExists(std::string_view path);
+
} // namespace util
} // namespace aos
diff --git a/aos/util/file_test.cc b/aos/util/file_test.cc
index fa259e8..6851302 100644
--- a/aos/util/file_test.cc
+++ b/aos/util/file_test.cc
@@ -26,6 +26,19 @@
EXPECT_EQ(my_pid, stat.substr(0, my_pid.size()));
}
+// Tests that the PathExists function works under normal conditions.
+TEST(FileTest, PathExistsTest) {
+ const std::string tmpdir(getenv("TEST_TMPDIR"));
+ const std::string test_file = tmpdir + "/test_file";
+ // Make sure the test_file doesn't exist.
+ unlink(test_file.c_str());
+ EXPECT_FALSE(PathExists(test_file));
+
+ WriteStringToFileOrDie(test_file, "abc");
+
+ EXPECT_TRUE(PathExists(test_file));
+}
+
} // namespace testing
} // namespace util
} // namespace aos
diff --git a/build_tests/BUILD b/build_tests/BUILD
index 28dc677..e837390 100644
--- a/build_tests/BUILD
+++ b/build_tests/BUILD
@@ -124,3 +124,10 @@
srcs_version = "PY2AND3",
deps = ["@opencv_contrib_nonfree_amd64//:python_opencv"],
)
+
+py_test(
+ name = "python_jinja2",
+ srcs = ["python_jinja2.py"],
+ srcs_version = "PY2AND3",
+ deps = ["@python_jinja2"],
+)
diff --git a/build_tests/python_jinja2.py b/build_tests/python_jinja2.py
new file mode 100644
index 0000000..a926e31
--- /dev/null
+++ b/build_tests/python_jinja2.py
@@ -0,0 +1,4 @@
+#!/usr/bin/python3
+
+# Confirm that we can import jinja2.
+import jinja2
diff --git a/debian/python_jinja2.BUILD b/debian/python_jinja2.BUILD
new file mode 100644
index 0000000..1adab6a
--- /dev/null
+++ b/debian/python_jinja2.BUILD
@@ -0,0 +1,7 @@
+py_library(
+ name = "python_jinja2",
+ srcs = glob(["src/jinja2/*.py"]),
+ imports = ["src/"],
+ visibility = ["//visibility:public"],
+ deps = ["@python_markupsafe"],
+)
diff --git a/debian/python_markupsafe.BUILD b/debian/python_markupsafe.BUILD
new file mode 100644
index 0000000..87f10ea
--- /dev/null
+++ b/debian/python_markupsafe.BUILD
@@ -0,0 +1,6 @@
+py_library(
+ name = "python_markupsafe",
+ srcs = glob(["src/markupsafe/*.py"]),
+ imports = ["src/"],
+ visibility = ["//visibility:public"],
+)
diff --git a/y2020/constants.cc b/y2020/constants.cc
index eefc158..131b0de 100644
--- a/y2020/constants.cc
+++ b/y2020/constants.cc
@@ -26,6 +26,7 @@
const uint16_t kCompTeamNumber = 971;
const uint16_t kPracticeTeamNumber = 9971;
+const uint16_t kSpareRoborioTeamNumber = 6971;
const Values *DoGetValuesForTeam(uint16_t team) {
Values *const r = new Values();
@@ -99,6 +100,7 @@
switch (team) {
// A set of constants for tests.
case 1:
+ case kSpareRoborioTeamNumber:
break;
case kCompTeamNumber:
diff --git a/y2020/y2020_roborio.json b/y2020/y2020_roborio.json
index 7de7f51..b63982d 100644
--- a/y2020/y2020_roborio.json
+++ b/y2020/y2020_roborio.json
@@ -263,6 +263,7 @@
"hostname": "roborio",
"hostnames": [
"roboRIO-971-FRC",
+ "roboRIO-6971-FRC",
"roboRIO-7971-FRC",
"roboRIO-8971-FRC",
"roboRIO-9971-FRC"