Merge "Write april tag image coordinates to flatbuffer"
diff --git a/BUILD b/BUILD
index a568e20..413620d 100644
--- a/BUILD
+++ b/BUILD
@@ -35,6 +35,8 @@
# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/refresh_match_list_response //scouting/webserver/requests/messages:refresh_match_list_response_go_fbs
# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule //scouting/webserver/requests/messages:request_shift_schedule_go_fbs
# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response //scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs
+# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_actions //scouting/webserver/requests/messages:submit_actions_go_fbs
+# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_actions_response //scouting/webserver/requests/messages:submit_actions_response_go_fbs
# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule //scouting/webserver/requests/messages:submit_shift_schedule_go_fbs
# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_shift_schedule_response //scouting/webserver/requests/messages:submit_shift_schedule_response_go_fbs
# gazelle:resolve go github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_driver_ranking //scouting/webserver/requests/messages:submit_driver_ranking_go_fbs
diff --git a/aos/events/event_loop.cc b/aos/events/event_loop.cc
index f5e9f51..c29d820 100644
--- a/aos/events/event_loop.cc
+++ b/aos/events/event_loop.cc
@@ -236,7 +236,8 @@
CHECK(taken_senders_.find(channel) == taken_senders_.end())
<< ": " << configuration::CleanedChannelToString(channel)
- << " is already being used.";
+ << " is already being used for sending. Can't make a watcher on the "
+ "same event loop.";
auto result = taken_watchers_.insert(channel);
CHECK(result.second) << ": " << configuration::CleanedChannelToString(channel)
diff --git a/aos/events/logging/buffer_encoder.cc b/aos/events/logging/buffer_encoder.cc
index ae20440..4b3ccf3 100644
--- a/aos/events/logging/buffer_encoder.cc
+++ b/aos/events/logging/buffer_encoder.cc
@@ -32,8 +32,8 @@
const size_t input_buffer_initial_size = input_buffer_.size();
input_buffer_.resize(input_buffer_initial_size + copy->size());
- const size_t written_size =
- copy->Copy(input_buffer_.data() + input_buffer_initial_size);
+ const size_t written_size = copy->Copy(
+ input_buffer_.data() + input_buffer_initial_size, 0, copy->size());
DCHECK_EQ(written_size, copy->size());
total_bytes_ += written_size;
diff --git a/aos/events/logging/buffer_encoder.h b/aos/events/logging/buffer_encoder.h
index 4cd661d..34de00a 100644
--- a/aos/events/logging/buffer_encoder.h
+++ b/aos/events/logging/buffer_encoder.h
@@ -23,7 +23,8 @@
size_t size() const { return size_; }
// Writes size() bytes to data, and returns the data written.
- [[nodiscard]] virtual size_t Copy(uint8_t *data) = 0;
+ [[nodiscard]] virtual size_t Copy(uint8_t *data, size_t start_byte,
+ size_t end_byte) = 0;
private:
size_t size_;
@@ -35,12 +36,15 @@
public:
SpanCopier(absl::Span<const uint8_t> data)
: Copier(data.size()), data_(data) {
- CHECK(data_.data());
+ CHECK(data_.data() != nullptr);
}
- size_t Copy(uint8_t *data) final {
- std::memcpy(data, data_.data(), data_.size());
- return data_.size();
+ size_t Copy(uint8_t *data, size_t start_byte, size_t end_byte) final {
+ DCHECK_LE(start_byte, end_byte);
+ DCHECK_LE(end_byte, data_.size());
+
+ std::memcpy(data, data_.data() + start_byte, end_byte - start_byte);
+ return end_byte - start_byte;
}
private:
diff --git a/aos/events/logging/buffer_encoder_param_test.h b/aos/events/logging/buffer_encoder_param_test.h
index e6c4867..9e7882d 100644
--- a/aos/events/logging/buffer_encoder_param_test.h
+++ b/aos/events/logging/buffer_encoder_param_test.h
@@ -20,15 +20,12 @@
public:
static constexpr size_t kMaxMessageSize = 2 * 1024 * 1024;
- class DetachedBufferCopier : public DataEncoder::Copier {
+ class DetachedBufferCopier : public DataEncoder::SpanCopier {
public:
DetachedBufferCopier(flatbuffers::DetachedBuffer &&data)
- : DataEncoder::Copier(data.size()), data_(std::move(data)) {}
-
- size_t Copy(uint8_t *data) final {
- std::memcpy(data, data_.data(), data_.size());
- return data_.size();
- }
+ : DataEncoder::SpanCopier(
+ absl::Span<const uint8_t>(data.data(), data.size())),
+ data_(std::move(data)) {}
private:
const flatbuffers::DetachedBuffer data_;
diff --git a/aos/events/logging/buffer_encoder_test.cc b/aos/events/logging/buffer_encoder_test.cc
index 8e12e37..5f3ecab 100644
--- a/aos/events/logging/buffer_encoder_test.cc
+++ b/aos/events/logging/buffer_encoder_test.cc
@@ -115,4 +115,33 @@
}),
::testing::Range(0, 100)));
+// Tests that SpanCopier copies as expected.
+TEST(SpanCopierTest, Matches) {
+ std::vector<uint8_t> data;
+ for (int i = 0; i < 32; ++i) {
+ data.push_back(i);
+ }
+
+ CHECK_EQ(data.size(), 32u);
+
+ for (int i = 0; i < 32; i += 8) {
+ for (int j = i; j < 32; j += 8) {
+ std::vector<uint8_t> destination(data.size(), 0);
+ DataEncoder::SpanCopier copier(
+ absl::Span<const uint8_t>(data.data(), data.size()));
+
+ copier.Copy(destination.data(), i, j);
+
+ size_t index = 0;
+ for (int k = i; k < j; ++k) {
+ EXPECT_EQ(destination[index], k);
+ ++index;
+ }
+ for (; index < destination.size(); ++index) {
+ EXPECT_EQ(destination[index], 0u);
+ }
+ }
+ }
+}
+
} // namespace aos::logger::testing
diff --git a/aos/events/logging/log_edit.cc b/aos/events/logging/log_edit.cc
index e3cfc3a..dca56bb 100644
--- a/aos/events/logging/log_edit.cc
+++ b/aos/events/logging/log_edit.cc
@@ -51,7 +51,10 @@
aos::logger::DetachedBufferWriter buffer_writer(
FLAGS_logfile,
std::make_unique<aos::logger::DummyEncoder>(FLAGS_max_message_size));
- buffer_writer.QueueSpan(header.span());
+ {
+ aos::logger::DataEncoder::SpanCopier coppier(header.span());
+ buffer_writer.CopyMessage(&coppier, aos::monotonic_clock::min_time);
+ }
while (true) {
absl::Span<const uint8_t> msg_data = span_reader.ReadMessage();
@@ -59,7 +62,10 @@
break;
}
- buffer_writer.QueueSpan(msg_data);
+ {
+ aos::logger::DataEncoder::SpanCopier coppier(msg_data);
+ buffer_writer.CopyMessage(&coppier, aos::monotonic_clock::min_time);
+ }
}
} else {
aos::logger::MessageReader reader(FLAGS_logfile);
diff --git a/aos/events/logging/log_namer.cc b/aos/events/logging/log_namer.cc
index 120bd79..c0c7c73 100644
--- a/aos/events/logging/log_namer.cc
+++ b/aos/events/logging/log_namer.cc
@@ -248,7 +248,8 @@
header, {.multi_line = false, .max_vector_size = 100});
CHECK(writer);
- writer->QueueSpan(header.span());
+ DataEncoder::SpanCopier coppier(header.span());
+ writer->CopyMessage(&coppier, aos::monotonic_clock::now());
header_written_ = true;
monotonic_start_time_ = log_namer_->monotonic_start_time(
node_index_, state_[node_index_].boot_uuid);
@@ -606,7 +607,8 @@
std::make_unique<DetachedBufferWriter>(
filename, encoder_factory_(header->span().size()));
- writer->QueueSpan(header->span());
+ DataEncoder::SpanCopier coppier(header->span());
+ writer->CopyMessage(&coppier, aos::monotonic_clock::now());
if (!writer->ran_out_of_space()) {
all_filenames_.emplace_back(
diff --git a/aos/events/logging/log_writer.cc b/aos/events/logging/log_writer.cc
index 5671a3f..84f7503 100644
--- a/aos/events/logging/log_writer.cc
+++ b/aos/events/logging/log_writer.cc
@@ -766,64 +766,6 @@
}
}
-// Class to copy a context into the provided buffer.
-class ContextDataCopier : public DataEncoder::Copier {
- public:
- ContextDataCopier(const Context &context, int channel_index, LogType log_type,
- EventLoop *event_loop)
- : DataEncoder::Copier(PackMessageSize(log_type, context.size)),
- context_(context),
- channel_index_(channel_index),
- log_type_(log_type),
- event_loop_(event_loop) {}
-
- monotonic_clock::time_point end_time() const { return end_time_; }
-
- size_t Copy(uint8_t *data) final {
- size_t result =
- PackMessageInline(data, context_, channel_index_, log_type_);
- end_time_ = event_loop_->monotonic_now();
- return result;
- }
-
- private:
- const Context &context_;
- const int channel_index_;
- const LogType log_type_;
- EventLoop *event_loop_;
- monotonic_clock::time_point end_time_;
-};
-
-// Class to copy a RemoteMessage into the provided buffer.
-class RemoteMessageCopier : public DataEncoder::Copier {
- public:
- RemoteMessageCopier(const message_bridge::RemoteMessage *message,
- int channel_index,
- aos::monotonic_clock::time_point monotonic_timestamp_time,
- EventLoop *event_loop)
- : DataEncoder::Copier(PackRemoteMessageSize()),
- message_(message),
- channel_index_(channel_index),
- monotonic_timestamp_time_(monotonic_timestamp_time),
- event_loop_(event_loop) {}
-
- monotonic_clock::time_point end_time() const { return end_time_; }
-
- size_t Copy(uint8_t *data) final {
- size_t result = PackRemoteMessageInline(data, message_, channel_index_,
- monotonic_timestamp_time_);
- end_time_ = event_loop_->monotonic_now();
- return result;
- }
-
- private:
- const message_bridge::RemoteMessage *message_;
- int channel_index_;
- aos::monotonic_clock::time_point monotonic_timestamp_time_;
- EventLoop *event_loop_;
- monotonic_clock::time_point end_time_;
-};
-
void Logger::WriteData(NewDataWriter *writer, const FetcherStruct &f) {
if (writer != nullptr) {
const UUID source_node_boot_uuid =
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index ab56356..0918545 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -129,27 +129,12 @@
return *this;
}
-void DetachedBufferWriter::QueueSpan(absl::Span<const uint8_t> span) {
+void DetachedBufferWriter::CopyMessage(DataEncoder::Copier *coppier,
+ aos::monotonic_clock::time_point now) {
if (ran_out_of_space_) {
// We don't want any later data to be written after space becomes
// available, so refuse to write anything more once we've dropped data
// because we ran out of space.
- VLOG(1) << "Ignoring span: " << span.size();
- return;
- }
-
- if (!encoder_->HasSpace(span.size())) {
- Flush();
- CHECK(encoder_->HasSpace(span.size()));
- }
- DataEncoder::SpanCopier coppier(span);
- encoder_->Encode(&coppier);
- FlushAtThreshold(aos::monotonic_clock::now());
-}
-
-void DetachedBufferWriter::CopyMessage(DataEncoder::Copier *coppier,
- aos::monotonic_clock::time_point now) {
- if (ran_out_of_space_) {
return;
}
@@ -338,73 +323,144 @@
size_t PackRemoteMessageInline(
uint8_t *buffer, const message_bridge::RemoteMessage *msg,
int channel_index,
- const aos::monotonic_clock::time_point monotonic_timestamp_time) {
+ const aos::monotonic_clock::time_point monotonic_timestamp_time,
+ size_t start_byte, size_t end_byte) {
const flatbuffers::uoffset_t message_size = PackRemoteMessageSize();
+ DCHECK_EQ((start_byte % 8u), 0u);
+ DCHECK_EQ((end_byte % 8u), 0u);
+ DCHECK_LE(start_byte, end_byte);
+ DCHECK_LE(end_byte, message_size);
- // clang-format off
- // header:
- // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
- buffer = Push<flatbuffers::uoffset_t>(
- buffer, message_size - sizeof(flatbuffers::uoffset_t));
- // +0x04 | 20 00 00 00 | UOffset32 | 0x00000020 (32) Loc: +0x24 | offset to root table `aos.logger.MessageHeader`
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x20);
- //
- // padding:
- // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
- buffer = Pad(buffer, 6);
- //
- // vtable (aos.logger.MessageHeader):
- // +0x0E | 16 00 | uint16_t | 0x0016 (22) | size of this vtable
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x16);
- // +0x10 | 3C 00 | uint16_t | 0x003C (60) | size of referring table
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x3c);
- // +0x12 | 38 00 | VOffset16 | 0x0038 (56) | offset to field `channel_index` (id: 0)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x38);
- // +0x14 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `monotonic_sent_time` (id: 1)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
- // +0x16 | 24 00 | VOffset16 | 0x0024 (36) | offset to field `realtime_sent_time` (id: 2)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x24);
- // +0x18 | 34 00 | VOffset16 | 0x0034 (52) | offset to field `queue_index` (id: 3)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x34);
- // +0x1A | 00 00 | VOffset16 | 0x0000 (0) | offset to field `data` (id: 4) <null> (Vector)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x00);
- // +0x1C | 1C 00 | VOffset16 | 0x001C (28) | offset to field `monotonic_remote_time` (id: 5)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
- // +0x1E | 14 00 | VOffset16 | 0x0014 (20) | offset to field `realtime_remote_time` (id: 6)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
- // +0x20 | 10 00 | VOffset16 | 0x0010 (16) | offset to field `remote_queue_index` (id: 7)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
- // +0x22 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `monotonic_timestamp_time` (id: 8)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
- //
- // root_table (aos.logger.MessageHeader):
- // +0x24 | 16 00 00 00 | SOffset32 | 0x00000016 (22) Loc: +0x0E | offset to vtable
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x16);
- // +0x28 | F6 0B D8 11 A4 A8 B1 71 | int64_t | 0x71B1A8A411D80BF6 (8192514619791117302) | table field `monotonic_timestamp_time` (Long)
- buffer = Push<int64_t>(buffer,
- monotonic_timestamp_time.time_since_epoch().count());
- // +0x30 | 00 00 00 00 | uint8_t[4] | .... | padding
- // TODO(austin): Can we re-arrange the order to ditch the padding?
- // (Answer is yes, but what is the impact elsewhere? It will change the
- // binary format)
- buffer = Pad(buffer, 4);
- // +0x34 | 75 00 00 00 | uint32_t | 0x00000075 (117) | table field `remote_queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, msg->remote_queue_index());
- // +0x38 | AA B0 43 0A 35 BE FA D2 | int64_t | 0xD2FABE350A43B0AA (-3244071446552268630) | table field `realtime_remote_time` (Long)
- buffer = Push<int64_t>(buffer, msg->realtime_remote_time());
- // +0x40 | D5 40 30 F3 C1 A7 26 1D | int64_t | 0x1D26A7C1F33040D5 (2100550727665467605) | table field `monotonic_remote_time` (Long)
- buffer = Push<int64_t>(buffer, msg->monotonic_remote_time());
- // +0x48 | 5B 25 32 A1 4A E8 46 CA | int64_t | 0xCA46E84AA132255B (-3871151422448720549) | table field `realtime_sent_time` (Long)
- buffer = Push<int64_t>(buffer, msg->realtime_sent_time());
- // +0x50 | 49 7D 45 1F 8C 36 6B A3 | int64_t | 0xA36B368C1F457D49 (-6671178447571288759) | table field `monotonic_sent_time` (Long)
- buffer = Push<int64_t>(buffer, msg->monotonic_sent_time());
- // +0x58 | 33 00 00 00 | uint32_t | 0x00000033 (51) | table field `queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, msg->queue_index());
- // +0x5C | 76 00 00 00 | uint32_t | 0x00000076 (118) | table field `channel_index` (UInt)
- buffer = Push<uint32_t>(buffer, channel_index);
- // clang-format on
+ switch (start_byte) {
+ case 0x00u:
+ if ((end_byte) == 0x00u) {
+ break;
+ }
+ // clang-format off
+ // header:
+ // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
+ buffer = Push<flatbuffers::uoffset_t>(
+ buffer, message_size - sizeof(flatbuffers::uoffset_t));
+ // +0x04 | 20 00 00 00 | UOffset32 | 0x00000020 (32) Loc: +0x24 | offset to root table `aos.logger.MessageHeader`
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x20);
+ [[fallthrough]];
+ case 0x08u:
+ if ((end_byte) == 0x08u) {
+ break;
+ }
+ //
+ // padding:
+ // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
+ buffer = Pad(buffer, 6);
+ //
+ // vtable (aos.logger.MessageHeader):
+ // +0x0E | 16 00 | uint16_t | 0x0016 (22) | size of this vtable
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x16);
+ [[fallthrough]];
+ case 0x10u:
+ if ((end_byte) == 0x10u) {
+ break;
+ }
+ // +0x10 | 3C 00 | uint16_t | 0x003C (60) | size of referring table
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x3c);
+ // +0x12 | 38 00 | VOffset16 | 0x0038 (56) | offset to field `channel_index` (id: 0)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x38);
+ // +0x14 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `monotonic_sent_time` (id: 1)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
+ // +0x16 | 24 00 | VOffset16 | 0x0024 (36) | offset to field `realtime_sent_time` (id: 2)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x24);
+ [[fallthrough]];
+ case 0x18u:
+ if ((end_byte) == 0x18u) {
+ break;
+ }
+ // +0x18 | 34 00 | VOffset16 | 0x0034 (52) | offset to field `queue_index` (id: 3)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x34);
+ // +0x1A | 00 00 | VOffset16 | 0x0000 (0) | offset to field `data` (id: 4) <null> (Vector)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x00);
+ // +0x1C | 1C 00 | VOffset16 | 0x001C (28) | offset to field `monotonic_remote_time` (id: 5)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
+ // +0x1E | 14 00 | VOffset16 | 0x0014 (20) | offset to field `realtime_remote_time` (id: 6)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
+ [[fallthrough]];
+ case 0x20u:
+ if ((end_byte) == 0x20u) {
+ break;
+ }
+ // +0x20 | 10 00 | VOffset16 | 0x0010 (16) | offset to field `remote_queue_index` (id: 7)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
+ // +0x22 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `monotonic_timestamp_time` (id: 8)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
+ //
+ // root_table (aos.logger.MessageHeader):
+ // +0x24 | 16 00 00 00 | SOffset32 | 0x00000016 (22) Loc: +0x0E | offset to vtable
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x16);
+ [[fallthrough]];
+ case 0x28u:
+ if ((end_byte) == 0x28u) {
+ break;
+ }
+ // +0x28 | F6 0B D8 11 A4 A8 B1 71 | int64_t | 0x71B1A8A411D80BF6 (8192514619791117302) | table field `monotonic_timestamp_time` (Long)
+ buffer = Push<int64_t>(buffer,
+ monotonic_timestamp_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x30u:
+ if ((end_byte) == 0x30u) {
+ break;
+ }
+ // +0x30 | 00 00 00 00 | uint8_t[4] | .... | padding
+ // TODO(austin): Can we re-arrange the order to ditch the padding?
+ // (Answer is yes, but what is the impact elsewhere? It will change the
+ // binary format)
+ buffer = Pad(buffer, 4);
+ // +0x34 | 75 00 00 00 | uint32_t | 0x00000075 (117) | table field `remote_queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, msg->remote_queue_index());
+ [[fallthrough]];
+ case 0x38u:
+ if ((end_byte) == 0x38u) {
+ break;
+ }
+ // +0x38 | AA B0 43 0A 35 BE FA D2 | int64_t | 0xD2FABE350A43B0AA (-3244071446552268630) | table field `realtime_remote_time` (Long)
+ buffer = Push<int64_t>(buffer, msg->realtime_remote_time());
+ [[fallthrough]];
+ case 0x40u:
+ if ((end_byte) == 0x40u) {
+ break;
+ }
+ // +0x40 | D5 40 30 F3 C1 A7 26 1D | int64_t | 0x1D26A7C1F33040D5 (2100550727665467605) | table field `monotonic_remote_time` (Long)
+ buffer = Push<int64_t>(buffer, msg->monotonic_remote_time());
+ [[fallthrough]];
+ case 0x48u:
+ if ((end_byte) == 0x48u) {
+ break;
+ }
+ // +0x48 | 5B 25 32 A1 4A E8 46 CA | int64_t | 0xCA46E84AA132255B (-3871151422448720549) | table field `realtime_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, msg->realtime_sent_time());
+ [[fallthrough]];
+ case 0x50u:
+ if ((end_byte) == 0x50u) {
+ break;
+ }
+ // +0x50 | 49 7D 45 1F 8C 36 6B A3 | int64_t | 0xA36B368C1F457D49 (-6671178447571288759) | table field `monotonic_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, msg->monotonic_sent_time());
+ [[fallthrough]];
+ case 0x58u:
+ if ((end_byte) == 0x58u) {
+ break;
+ }
+ // +0x58 | 33 00 00 00 | uint32_t | 0x00000033 (51) | table field `queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, msg->queue_index());
+ // +0x5C | 76 00 00 00 | uint32_t | 0x00000076 (118) | table field `channel_index` (UInt)
+ buffer = Push<uint32_t>(buffer, channel_index);
+ // clang-format on
+ [[fallthrough]];
+ case 0x60u:
+ if ((end_byte) == 0x60u) {
+ break;
+ }
+ }
- return message_size;
+ return end_byte - start_byte;
}
flatbuffers::Offset<MessageHeader> PackMessage(
@@ -595,271 +651,565 @@
}
size_t PackMessageInline(uint8_t *buffer, const Context &context,
- int channel_index, LogType log_type) {
+ int channel_index, LogType log_type, size_t start_byte,
+ size_t end_byte) {
// TODO(austin): Figure out how to copy directly from shared memory instead of
// first into the fetcher's memory and then into here. That would save a lot
// of memory.
const flatbuffers::uoffset_t message_size =
PackMessageSize(log_type, context.size);
-
- buffer = Push<flatbuffers::uoffset_t>(
- buffer, message_size - sizeof(flatbuffers::uoffset_t));
+ DCHECK_EQ((message_size % 8), 0u) << ": Non 8 byte length...";
+ DCHECK_EQ((start_byte % 8u), 0u);
+ DCHECK_EQ((end_byte % 8u), 0u);
+ DCHECK_LE(start_byte, end_byte);
+ DCHECK_LE(end_byte, message_size);
// Pack all the data in. This is brittle but easy to change. Use the
// InlinePackMessage.Equivilent unit test to verify everything matches.
switch (log_type) {
case LogType::kLogMessage:
- // clang-format off
- // header:
- // +0x00 | 4C 00 00 00 | UOffset32 | 0x0000004C (76) Loc: +0x4C | size prefix
- // +0x04 | 18 00 00 00 | UOffset32 | 0x00000018 (24) Loc: +0x1C | offset to root table `aos.logger.MessageHeader`
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x18);
- //
- // padding:
- // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
- buffer = Pad(buffer, 6);
- //
- // vtable (aos.logger.MessageHeader):
- // +0x0E | 0E 00 | uint16_t | 0x000E (14) | size of this vtable
- buffer = Push<flatbuffers::voffset_t>(buffer, 0xe);
- // +0x10 | 20 00 | uint16_t | 0x0020 (32) | size of referring table
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
- // +0x12 | 1C 00 | VOffset16 | 0x001C (28) | offset to field `channel_index` (id: 0)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
- // +0x14 | 0C 00 | VOffset16 | 0x000C (12) | offset to field `monotonic_sent_time` (id: 1)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x0c);
- // +0x16 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `realtime_sent_time` (id: 2)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
- // +0x18 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `queue_index` (id: 3)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
- // +0x1A | 14 00 | VOffset16 | 0x0014 (20) | offset to field `data` (id: 4)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
- //
- // root_table (aos.logger.MessageHeader):
- // +0x1C | 0E 00 00 00 | SOffset32 | 0x0000000E (14) Loc: +0x0E | offset to vtable
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0e);
- // +0x20 | B2 E4 EF 89 19 7D 7F 6F | int64_t | 0x6F7F7D1989EFE4B2 (8034277808894108850) | table field `realtime_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
- // +0x28 | 86 8D 92 65 FC 79 74 2B | int64_t | 0x2B7479FC65928D86 (3131261765872160134) | table field `monotonic_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
- // +0x30 | 0C 00 00 00 | UOffset32 | 0x0000000C (12) Loc: +0x3C | offset to field `data` (vector)
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0c);
- // +0x34 | 86 00 00 00 | uint32_t | 0x00000086 (134) | table field `queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, context.queue_index);
- // +0x38 | 71 00 00 00 | uint32_t | 0x00000071 (113) | table field `channel_index` (UInt)
- buffer = Push<uint32_t>(buffer, channel_index);
- //
- // vector (aos.logger.MessageHeader.data):
- // +0x3C | 0E 00 00 00 | uint32_t | 0x0000000E (14) | length of vector (# items)
- buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
- // +0x40 | FF | uint8_t | 0xFF (255) | value[0]
- // +0x41 | B8 | uint8_t | 0xB8 (184) | value[1]
- // +0x42 | EE | uint8_t | 0xEE (238) | value[2]
- // +0x43 | 00 | uint8_t | 0x00 (0) | value[3]
- // +0x44 | 20 | uint8_t | 0x20 (32) | value[4]
- // +0x45 | 4D | uint8_t | 0x4D (77) | value[5]
- // +0x46 | FF | uint8_t | 0xFF (255) | value[6]
- // +0x47 | 25 | uint8_t | 0x25 (37) | value[7]
- // +0x48 | 3C | uint8_t | 0x3C (60) | value[8]
- // +0x49 | 17 | uint8_t | 0x17 (23) | value[9]
- // +0x4A | 65 | uint8_t | 0x65 (101) | value[10]
- // +0x4B | 2F | uint8_t | 0x2F (47) | value[11]
- // +0x4C | 63 | uint8_t | 0x63 (99) | value[12]
- // +0x4D | 58 | uint8_t | 0x58 (88) | value[13]
- buffer = PushBytes(buffer, context.data, context.size);
- //
- // padding:
- // +0x4E | 00 00 | uint8_t[2] | .. | padding
- buffer = Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
- // clang-format on
+ switch (start_byte) {
+ case 0x00u:
+ if ((end_byte) == 0x00u) {
+ break;
+ }
+ // clang-format off
+ // header:
+ // +0x00 | 4C 00 00 00 | UOffset32 | 0x0000004C (76) Loc: +0x4C | size prefix
+ buffer = Push<flatbuffers::uoffset_t>(
+ buffer, message_size - sizeof(flatbuffers::uoffset_t));
+
+ // +0x04 | 18 00 00 00 | UOffset32 | 0x00000018 (24) Loc: +0x1C | offset to root table `aos.logger.MessageHeader`
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x18);
+ [[fallthrough]];
+ case 0x08u:
+ if ((end_byte) == 0x08u) {
+ break;
+ }
+ //
+ // padding:
+ // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
+ buffer = Pad(buffer, 6);
+ //
+ // vtable (aos.logger.MessageHeader):
+ // +0x0E | 0E 00 | uint16_t | 0x000E (14) | size of this vtable
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0xe);
+ [[fallthrough]];
+ case 0x10u:
+ if ((end_byte) == 0x10u) {
+ break;
+ }
+ // +0x10 | 20 00 | uint16_t | 0x0020 (32) | size of referring table
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
+ // +0x12 | 1C 00 | VOffset16 | 0x001C (28) | offset to field `channel_index` (id: 0)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
+ // +0x14 | 0C 00 | VOffset16 | 0x000C (12) | offset to field `monotonic_sent_time` (id: 1)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x0c);
+ // +0x16 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `realtime_sent_time` (id: 2)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
+ [[fallthrough]];
+ case 0x18u:
+ if ((end_byte) == 0x18u) {
+ break;
+ }
+ // +0x18 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `queue_index` (id: 3)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
+ // +0x1A | 14 00 | VOffset16 | 0x0014 (20) | offset to field `data` (id: 4)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
+ //
+ // root_table (aos.logger.MessageHeader):
+ // +0x1C | 0E 00 00 00 | SOffset32 | 0x0000000E (14) Loc: +0x0E | offset to vtable
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0e);
+ [[fallthrough]];
+ case 0x20u:
+ if ((end_byte) == 0x20u) {
+ break;
+ }
+ // +0x20 | B2 E4 EF 89 19 7D 7F 6F | int64_t | 0x6F7F7D1989EFE4B2 (8034277808894108850) | table field `realtime_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x28u:
+ if ((end_byte) == 0x28u) {
+ break;
+ }
+ // +0x28 | 86 8D 92 65 FC 79 74 2B | int64_t | 0x2B7479FC65928D86 (3131261765872160134) | table field `monotonic_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x30u:
+ if ((end_byte) == 0x30u) {
+ break;
+ }
+ // +0x30 | 0C 00 00 00 | UOffset32 | 0x0000000C (12) Loc: +0x3C | offset to field `data` (vector)
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0c);
+ // +0x34 | 86 00 00 00 | uint32_t | 0x00000086 (134) | table field `queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, context.queue_index);
+ [[fallthrough]];
+ case 0x38u:
+ if ((end_byte) == 0x38u) {
+ break;
+ }
+ // +0x38 | 71 00 00 00 | uint32_t | 0x00000071 (113) | table field `channel_index` (UInt)
+ buffer = Push<uint32_t>(buffer, channel_index);
+ //
+ // vector (aos.logger.MessageHeader.data):
+ // +0x3C | 0E 00 00 00 | uint32_t | 0x0000000E (14) | length of vector (# items)
+ buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
+ [[fallthrough]];
+ case 0x40u:
+ if ((end_byte) == 0x40u) {
+ break;
+ }
+ [[fallthrough]];
+ default:
+ // +0x40 | FF | uint8_t | 0xFF (255) | value[0]
+ // +0x41 | B8 | uint8_t | 0xB8 (184) | value[1]
+ // +0x42 | EE | uint8_t | 0xEE (238) | value[2]
+ // +0x43 | 00 | uint8_t | 0x00 (0) | value[3]
+ // +0x44 | 20 | uint8_t | 0x20 (32) | value[4]
+ // +0x45 | 4D | uint8_t | 0x4D (77) | value[5]
+ // +0x46 | FF | uint8_t | 0xFF (255) | value[6]
+ // +0x47 | 25 | uint8_t | 0x25 (37) | value[7]
+ // +0x48 | 3C | uint8_t | 0x3C (60) | value[8]
+ // +0x49 | 17 | uint8_t | 0x17 (23) | value[9]
+ // +0x4A | 65 | uint8_t | 0x65 (101) | value[10]
+ // +0x4B | 2F | uint8_t | 0x2F (47) | value[11]
+ // +0x4C | 63 | uint8_t | 0x63 (99) | value[12]
+ // +0x4D | 58 | uint8_t | 0x58 (88) | value[13]
+ //
+ // padding:
+ // +0x4E | 00 00 | uint8_t[2] | .. | padding
+ // clang-format on
+ if (start_byte <= 0x40 && end_byte == message_size) {
+ // The easy one, slap it all down.
+ buffer = PushBytes(buffer, context.data, context.size);
+ buffer =
+ Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
+ } else {
+ const size_t data_start_byte =
+ start_byte < 0x40 ? 0x0u : (start_byte - 0x40);
+ const size_t data_end_byte = end_byte - 0x40;
+ const size_t padded_size = ((context.size + 7) & 0xfffffff8u);
+ if (data_start_byte < padded_size) {
+ buffer = PushBytes(
+ buffer,
+ reinterpret_cast<const uint8_t *>(context.data) +
+ data_start_byte,
+ std::min(context.size, data_end_byte) - data_start_byte);
+ if (data_end_byte == padded_size) {
+ // We can only pad the last 7 bytes, so this only gets written
+ // if we write the last byte.
+ buffer = Pad(buffer,
+ ((context.size + 7) & 0xfffffff8u) - context.size);
+ }
+ }
+ }
+ break;
+ }
break;
case LogType::kLogDeliveryTimeOnly:
- // clang-format off
- // header:
- // +0x00 | 4C 00 00 00 | UOffset32 | 0x0000004C (76) Loc: +0x4C | size prefix
- // +0x04 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: +0x20 | offset to root table `aos.logger.MessageHeader`
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x1c);
- //
- // padding:
- // +0x08 | 00 00 00 00 | uint8_t[4] | .... | padding
- buffer = Pad(buffer, 4);
- //
- // vtable (aos.logger.MessageHeader):
- // +0x0C | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
- // +0x0E | 30 00 | uint16_t | 0x0030 (48) | size of referring table
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x30);
- // +0x10 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `channel_index` (id: 0)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
- // +0x12 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `monotonic_sent_time` (id: 1)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
- // +0x14 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `realtime_sent_time` (id: 2)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
- // +0x16 | 28 00 | VOffset16 | 0x0028 (40) | offset to field `queue_index` (id: 3)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x28);
- // +0x18 | 00 00 | VOffset16 | 0x0000 (0) | offset to field `data` (id: 4) <null> (Vector)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x00);
- // +0x1A | 10 00 | VOffset16 | 0x0010 (16) | offset to field `monotonic_remote_time` (id: 5)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
- // +0x1C | 08 00 | VOffset16 | 0x0008 (8) | offset to field `realtime_remote_time` (id: 6)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x08);
- // +0x1E | 04 00 | VOffset16 | 0x0004 (4) | offset to field `remote_queue_index` (id: 7)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
- //
- // root_table (aos.logger.MessageHeader):
- // +0x20 | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: +0x0C | offset to vtable
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x14);
- // +0x24 | 69 00 00 00 | uint32_t | 0x00000069 (105) | table field `remote_queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, context.remote_queue_index);
- // +0x28 | C6 85 F1 AB 83 B5 CD EB | int64_t | 0xEBCDB583ABF185C6 (-1455307527440726586) | table field `realtime_remote_time` (Long)
- buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
- // +0x30 | 47 24 D3 97 1E 42 2D 99 | int64_t | 0x992D421E97D32447 (-7409193112790948793) | table field `monotonic_remote_time` (Long)
- buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
- // +0x38 | C8 B9 A7 AB 79 F2 CD 60 | int64_t | 0x60CDF279ABA7B9C8 (6975498002251626952) | table field `realtime_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
- // +0x40 | EA 8F 2A 0F AF 01 7A AB | int64_t | 0xAB7A01AF0F2A8FEA (-6090553694679822358) | table field `monotonic_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
- // +0x48 | F5 00 00 00 | uint32_t | 0x000000F5 (245) | table field `queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, context.queue_index);
- // +0x4C | 88 00 00 00 | uint32_t | 0x00000088 (136) | table field `channel_index` (UInt)
- buffer = Push<uint32_t>(buffer, channel_index);
+ switch (start_byte) {
+ case 0x00u:
+ if ((end_byte) == 0x00u) {
+ break;
+ }
+ // clang-format off
+ // header:
+ // +0x00 | 4C 00 00 00 | UOffset32 | 0x0000004C (76) Loc: +0x4C | size prefix
+ buffer = Push<flatbuffers::uoffset_t>(
+ buffer, message_size - sizeof(flatbuffers::uoffset_t));
+ // +0x04 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: +0x20 | offset to root table `aos.logger.MessageHeader`
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x1c);
- // clang-format on
+ [[fallthrough]];
+ case 0x08u:
+ if ((end_byte) == 0x08u) {
+ break;
+ }
+ //
+ // padding:
+ // +0x08 | 00 00 00 00 | uint8_t[4] | .... | padding
+ buffer = Pad(buffer, 4);
+ //
+ // vtable (aos.logger.MessageHeader):
+ // +0x0C | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
+ // +0x0E | 30 00 | uint16_t | 0x0030 (48) | size of referring table
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x30);
+ [[fallthrough]];
+ case 0x10u:
+ if ((end_byte) == 0x10u) {
+ break;
+ }
+ // +0x10 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `channel_index` (id: 0)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
+ // +0x12 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `monotonic_sent_time` (id: 1)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
+ // +0x14 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `realtime_sent_time` (id: 2)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
+ // +0x16 | 28 00 | VOffset16 | 0x0028 (40) | offset to field `queue_index` (id: 3)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x28);
+ [[fallthrough]];
+ case 0x18u:
+ if ((end_byte) == 0x18u) {
+ break;
+ }
+ // +0x18 | 00 00 | VOffset16 | 0x0000 (0) | offset to field `data` (id: 4) <null> (Vector)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x00);
+ // +0x1A | 10 00 | VOffset16 | 0x0010 (16) | offset to field `monotonic_remote_time` (id: 5)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
+ // +0x1C | 08 00 | VOffset16 | 0x0008 (8) | offset to field `realtime_remote_time` (id: 6)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x08);
+ // +0x1E | 04 00 | VOffset16 | 0x0004 (4) | offset to field `remote_queue_index` (id: 7)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
+ [[fallthrough]];
+ case 0x20u:
+ if ((end_byte) == 0x20u) {
+ break;
+ }
+ //
+ // root_table (aos.logger.MessageHeader):
+ // +0x20 | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: +0x0C | offset to vtable
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x14);
+ // +0x24 | 69 00 00 00 | uint32_t | 0x00000069 (105) | table field `remote_queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, context.remote_queue_index);
+ [[fallthrough]];
+ case 0x28u:
+ if ((end_byte) == 0x28u) {
+ break;
+ }
+ // +0x28 | C6 85 F1 AB 83 B5 CD EB | int64_t | 0xEBCDB583ABF185C6 (-1455307527440726586) | table field `realtime_remote_time` (Long)
+ buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x30u:
+ if ((end_byte) == 0x30u) {
+ break;
+ }
+ // +0x30 | 47 24 D3 97 1E 42 2D 99 | int64_t | 0x992D421E97D32447 (-7409193112790948793) | table field `monotonic_remote_time` (Long)
+ buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x38u:
+ if ((end_byte) == 0x38u) {
+ break;
+ }
+ // +0x38 | C8 B9 A7 AB 79 F2 CD 60 | int64_t | 0x60CDF279ABA7B9C8 (6975498002251626952) | table field `realtime_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x40u:
+ if ((end_byte) == 0x40u) {
+ break;
+ }
+ // +0x40 | EA 8F 2A 0F AF 01 7A AB | int64_t | 0xAB7A01AF0F2A8FEA (-6090553694679822358) | table field `monotonic_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x48u:
+ if ((end_byte) == 0x48u) {
+ break;
+ }
+ // +0x48 | F5 00 00 00 | uint32_t | 0x000000F5 (245) | table field `queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, context.queue_index);
+ // +0x4C | 88 00 00 00 | uint32_t | 0x00000088 (136) | table field `channel_index` (UInt)
+ buffer = Push<uint32_t>(buffer, channel_index);
+
+ // clang-format on
+ }
break;
case LogType::kLogMessageAndDeliveryTime:
- // clang-format off
- // header:
- // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
- // +0x04 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: +0x20 | offset to root table `aos.logger.MessageHeader`
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x1c);
- //
- // padding:
- // +0x08 | 00 00 00 00 | uint8_t[4] | .... | padding
- buffer = Pad(buffer, 4);
- //
- // vtable (aos.logger.MessageHeader):
- // +0x0C | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
- // +0x0E | 34 00 | uint16_t | 0x0034 (52) | size of referring table
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x34);
- // +0x10 | 30 00 | VOffset16 | 0x0030 (48) | offset to field `channel_index` (id: 0)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x30);
- // +0x12 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `monotonic_sent_time` (id: 1)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
- // +0x14 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `realtime_sent_time` (id: 2)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
- // +0x16 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `queue_index` (id: 3)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
- // +0x18 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `data` (id: 4)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
- // +0x1A | 10 00 | VOffset16 | 0x0010 (16) | offset to field `monotonic_remote_time` (id: 5)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
- // +0x1C | 08 00 | VOffset16 | 0x0008 (8) | offset to field `realtime_remote_time` (id: 6)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x08);
- // +0x1E | 28 00 | VOffset16 | 0x0028 (40) | offset to field `remote_queue_index` (id: 7)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x28);
- //
- // root_table (aos.logger.MessageHeader):
- // +0x20 | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: +0x0C | offset to vtable
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x14);
- // +0x24 | 30 00 00 00 | UOffset32 | 0x00000030 (48) Loc: +0x54 | offset to field `data` (vector)
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x30);
- // +0x28 | C4 C8 87 BF 40 6C 1F 29 | int64_t | 0x291F6C40BF87C8C4 (2963206105180129476) | table field `realtime_remote_time` (Long)
- buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
- // +0x30 | 0F 00 26 FD D2 6D C0 1F | int64_t | 0x1FC06DD2FD26000F (2287949363661897743) | table field `monotonic_remote_time` (Long)
- buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
- // +0x38 | 29 75 09 C0 73 73 BF 88 | int64_t | 0x88BF7373C0097529 (-8593022623019338455) | table field `realtime_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
- // +0x40 | 6D 8A AE 04 50 25 9C E9 | int64_t | 0xE99C255004AE8A6D (-1613373540899321235) | table field `monotonic_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
- // +0x48 | 47 00 00 00 | uint32_t | 0x00000047 (71) | table field `remote_queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, context.remote_queue_index);
- // +0x4C | 4C 00 00 00 | uint32_t | 0x0000004C (76) | table field `queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, context.queue_index);
- // +0x50 | 72 00 00 00 | uint32_t | 0x00000072 (114) | table field `channel_index` (UInt)
- buffer = Push<uint32_t>(buffer, channel_index);
- //
- // vector (aos.logger.MessageHeader.data):
- // +0x54 | 07 00 00 00 | uint32_t | 0x00000007 (7) | length of vector (# items)
- buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
- // +0x58 | B1 | uint8_t | 0xB1 (177) | value[0]
- // +0x59 | 4A | uint8_t | 0x4A (74) | value[1]
- // +0x5A | 50 | uint8_t | 0x50 (80) | value[2]
- // +0x5B | 24 | uint8_t | 0x24 (36) | value[3]
- // +0x5C | AF | uint8_t | 0xAF (175) | value[4]
- // +0x5D | C8 | uint8_t | 0xC8 (200) | value[5]
- // +0x5E | D5 | uint8_t | 0xD5 (213) | value[6]
- buffer = PushBytes(buffer, context.data, context.size);
- //
- // padding:
- // +0x5F | 00 | uint8_t[1] | . | padding
- buffer = Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
- // clang-format on
+ switch (start_byte) {
+ case 0x00u:
+ if ((end_byte) == 0x00u) {
+ break;
+ }
+ // clang-format off
+ // header:
+ // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
+ buffer = Push<flatbuffers::uoffset_t>(
+ buffer, message_size - sizeof(flatbuffers::uoffset_t));
+ // +0x04 | 1C 00 00 00 | UOffset32 | 0x0000001C (28) Loc: +0x20 | offset to root table `aos.logger.MessageHeader`
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x1c);
+ [[fallthrough]];
+ case 0x08u:
+ if ((end_byte) == 0x08u) {
+ break;
+ }
+ //
+ // padding:
+ // +0x08 | 00 00 00 00 | uint8_t[4] | .... | padding
+ buffer = Pad(buffer, 4);
+ //
+ // vtable (aos.logger.MessageHeader):
+ // +0x0C | 14 00 | uint16_t | 0x0014 (20) | size of this vtable
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
+ // +0x0E | 34 00 | uint16_t | 0x0034 (52) | size of referring table
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x34);
+ [[fallthrough]];
+ case 0x10u:
+ if ((end_byte) == 0x10u) {
+ break;
+ }
+ // +0x10 | 30 00 | VOffset16 | 0x0030 (48) | offset to field `channel_index` (id: 0)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x30);
+ // +0x12 | 20 00 | VOffset16 | 0x0020 (32) | offset to field `monotonic_sent_time` (id: 1)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
+ // +0x14 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `realtime_sent_time` (id: 2)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
+ // +0x16 | 2C 00 | VOffset16 | 0x002C (44) | offset to field `queue_index` (id: 3)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x2c);
+ [[fallthrough]];
+ case 0x18u:
+ if ((end_byte) == 0x18u) {
+ break;
+ }
+ // +0x18 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `data` (id: 4)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
+ // +0x1A | 10 00 | VOffset16 | 0x0010 (16) | offset to field `monotonic_remote_time` (id: 5)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x10);
+ // +0x1C | 08 00 | VOffset16 | 0x0008 (8) | offset to field `realtime_remote_time` (id: 6)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x08);
+ // +0x1E | 28 00 | VOffset16 | 0x0028 (40) | offset to field `remote_queue_index` (id: 7)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x28);
+ [[fallthrough]];
+ case 0x20u:
+ if ((end_byte) == 0x20u) {
+ break;
+ }
+ //
+ // root_table (aos.logger.MessageHeader):
+ // +0x20 | 14 00 00 00 | SOffset32 | 0x00000014 (20) Loc: +0x0C | offset to vtable
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x14);
+ // +0x24 | 30 00 00 00 | UOffset32 | 0x00000030 (48) Loc: +0x54 | offset to field `data` (vector)
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x30);
+ [[fallthrough]];
+ case 0x28u:
+ if ((end_byte) == 0x28u) {
+ break;
+ }
+ // +0x28 | C4 C8 87 BF 40 6C 1F 29 | int64_t | 0x291F6C40BF87C8C4 (2963206105180129476) | table field `realtime_remote_time` (Long)
+ buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x30u:
+ if ((end_byte) == 0x30u) {
+ break;
+ }
+ // +0x30 | 0F 00 26 FD D2 6D C0 1F | int64_t | 0x1FC06DD2FD26000F (2287949363661897743) | table field `monotonic_remote_time` (Long)
+ buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x38u:
+ if ((end_byte) == 0x38u) {
+ break;
+ }
+ // +0x38 | 29 75 09 C0 73 73 BF 88 | int64_t | 0x88BF7373C0097529 (-8593022623019338455) | table field `realtime_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.realtime_event_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x40u:
+ if ((end_byte) == 0x40u) {
+ break;
+ }
+ // +0x40 | 6D 8A AE 04 50 25 9C E9 | int64_t | 0xE99C255004AE8A6D (-1613373540899321235) | table field `monotonic_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.monotonic_event_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x48u:
+ if ((end_byte) == 0x48u) {
+ break;
+ }
+ // +0x48 | 47 00 00 00 | uint32_t | 0x00000047 (71) | table field `remote_queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, context.remote_queue_index);
+ // +0x4C | 4C 00 00 00 | uint32_t | 0x0000004C (76) | table field `queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, context.queue_index);
+ [[fallthrough]];
+ case 0x50u:
+ if ((end_byte) == 0x50u) {
+ break;
+ }
+ // +0x50 | 72 00 00 00 | uint32_t | 0x00000072 (114) | table field `channel_index` (UInt)
+ buffer = Push<uint32_t>(buffer, channel_index);
+ //
+ // vector (aos.logger.MessageHeader.data):
+ // +0x54 | 07 00 00 00 | uint32_t | 0x00000007 (7) | length of vector (# items)
+ buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
+ [[fallthrough]];
+ case 0x58u:
+ if ((end_byte) == 0x58u) {
+ break;
+ }
+ [[fallthrough]];
+ default:
+ // +0x58 | B1 | uint8_t | 0xB1 (177) | value[0]
+ // +0x59 | 4A | uint8_t | 0x4A (74) | value[1]
+ // +0x5A | 50 | uint8_t | 0x50 (80) | value[2]
+ // +0x5B | 24 | uint8_t | 0x24 (36) | value[3]
+ // +0x5C | AF | uint8_t | 0xAF (175) | value[4]
+ // +0x5D | C8 | uint8_t | 0xC8 (200) | value[5]
+ // +0x5E | D5 | uint8_t | 0xD5 (213) | value[6]
+ //
+ // padding:
+ // +0x5F | 00 | uint8_t[1] | . | padding
+ // clang-format on
+
+ if (start_byte <= 0x58 && end_byte == message_size) {
+ // The easy one, slap it all down.
+ buffer = PushBytes(buffer, context.data, context.size);
+ buffer =
+ Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
+ } else {
+ const size_t data_start_byte =
+ start_byte < 0x58 ? 0x0u : (start_byte - 0x58);
+ const size_t data_end_byte = end_byte - 0x58;
+ const size_t padded_size = ((context.size + 7) & 0xfffffff8u);
+ if (data_start_byte < padded_size) {
+ buffer = PushBytes(
+ buffer,
+ reinterpret_cast<const uint8_t *>(context.data) +
+ data_start_byte,
+ std::min(context.size, data_end_byte) - data_start_byte);
+ if (data_end_byte == padded_size) {
+ // We can only pad the last 7 bytes, so this only gets written
+ // if we write the last byte.
+ buffer = Pad(buffer,
+ ((context.size + 7) & 0xfffffff8u) - context.size);
+ }
+ }
+ }
+
+ break;
+ }
break;
case LogType::kLogRemoteMessage:
- // This is the message we need to recreate.
- //
- // clang-format off
- // header:
- // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
- // +0x04 | 18 00 00 00 | UOffset32 | 0x00000018 (24) Loc: +0x1C | offset to root table `aos.logger.MessageHeader`
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x18);
- //
- // padding:
- // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
- buffer = Pad(buffer, 6);
- //
- // vtable (aos.logger.MessageHeader):
- // +0x0E | 0E 00 | uint16_t | 0x000E (14) | size of this vtable
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x0e);
- // +0x10 | 20 00 | uint16_t | 0x0020 (32) | size of referring table
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
- // +0x12 | 1C 00 | VOffset16 | 0x001C (28) | offset to field `channel_index` (id: 0)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
- // +0x14 | 0C 00 | VOffset16 | 0x000C (12) | offset to field `monotonic_sent_time` (id: 1)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x0c);
- // +0x16 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `realtime_sent_time` (id: 2)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
- // +0x18 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `queue_index` (id: 3)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
- // +0x1A | 14 00 | VOffset16 | 0x0014 (20) | offset to field `data` (id: 4)
- buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
- //
- // root_table (aos.logger.MessageHeader):
- // +0x1C | 0E 00 00 00 | SOffset32 | 0x0000000E (14) Loc: +0x0E | offset to vtable
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0E);
- // +0x20 | D8 96 32 1A A0 D3 23 BB | int64_t | 0xBB23D3A01A3296D8 (-4961889679844403496) | table field `realtime_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
- // +0x28 | 2E 5D 23 B3 BE 84 CF C2 | int64_t | 0xC2CF84BEB3235D2E (-4409159555588334290) | table field `monotonic_sent_time` (Long)
- buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
- // +0x30 | 0C 00 00 00 | UOffset32 | 0x0000000C (12) Loc: +0x3C | offset to field `data` (vector)
- buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0C);
- // +0x34 | 69 00 00 00 | uint32_t | 0x00000069 (105) | table field `queue_index` (UInt)
- buffer = Push<uint32_t>(buffer, context.remote_queue_index);
- // +0x38 | F3 00 00 00 | uint32_t | 0x000000F3 (243) | table field `channel_index` (UInt)
- buffer = Push<uint32_t>(buffer, channel_index);
- //
- // vector (aos.logger.MessageHeader.data):
- // +0x3C | 1A 00 00 00 | uint32_t | 0x0000001A (26) | length of vector (# items)
- buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
- // +0x40 | 38 | uint8_t | 0x38 (56) | value[0]
- // +0x41 | 1A | uint8_t | 0x1A (26) | value[1]
- // ...
- // +0x58 | 90 | uint8_t | 0x90 (144) | value[24]
- // +0x59 | 92 | uint8_t | 0x92 (146) | value[25]
- buffer = PushBytes(buffer, context.data, context.size);
- //
- // padding:
- // +0x5A | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
- buffer = Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
- // clang-format on
+ switch (start_byte) {
+ case 0x00u:
+ if ((end_byte) == 0x00u) {
+ break;
+ }
+ // This is the message we need to recreate.
+ //
+ // clang-format off
+ // header:
+ // +0x00 | 5C 00 00 00 | UOffset32 | 0x0000005C (92) Loc: +0x5C | size prefix
+ buffer = Push<flatbuffers::uoffset_t>(
+ buffer, message_size - sizeof(flatbuffers::uoffset_t));
+ // +0x04 | 18 00 00 00 | UOffset32 | 0x00000018 (24) Loc: +0x1C | offset to root table `aos.logger.MessageHeader`
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x18);
+ [[fallthrough]];
+ case 0x08u:
+ if ((end_byte) == 0x08u) {
+ break;
+ }
+ //
+ // padding:
+ // +0x08 | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
+ buffer = Pad(buffer, 6);
+ //
+ // vtable (aos.logger.MessageHeader):
+ // +0x0E | 0E 00 | uint16_t | 0x000E (14) | size of this vtable
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x0e);
+ [[fallthrough]];
+ case 0x10u:
+ if ((end_byte) == 0x10u) {
+ break;
+ }
+ // +0x10 | 20 00 | uint16_t | 0x0020 (32) | size of referring table
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x20);
+ // +0x12 | 1C 00 | VOffset16 | 0x001C (28) | offset to field `channel_index` (id: 0)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x1c);
+ // +0x14 | 0C 00 | VOffset16 | 0x000C (12) | offset to field `monotonic_sent_time` (id: 1)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x0c);
+ // +0x16 | 04 00 | VOffset16 | 0x0004 (4) | offset to field `realtime_sent_time` (id: 2)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x04);
+ [[fallthrough]];
+ case 0x18u:
+ if ((end_byte) == 0x18u) {
+ break;
+ }
+ // +0x18 | 18 00 | VOffset16 | 0x0018 (24) | offset to field `queue_index` (id: 3)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x18);
+ // +0x1A | 14 00 | VOffset16 | 0x0014 (20) | offset to field `data` (id: 4)
+ buffer = Push<flatbuffers::voffset_t>(buffer, 0x14);
+ //
+ // root_table (aos.logger.MessageHeader):
+ // +0x1C | 0E 00 00 00 | SOffset32 | 0x0000000E (14) Loc: +0x0E | offset to vtable
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0E);
+ [[fallthrough]];
+ case 0x20u:
+ if ((end_byte) == 0x20u) {
+ break;
+ }
+ // +0x20 | D8 96 32 1A A0 D3 23 BB | int64_t | 0xBB23D3A01A3296D8 (-4961889679844403496) | table field `realtime_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.realtime_remote_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x28u:
+ if ((end_byte) == 0x28u) {
+ break;
+ }
+ // +0x28 | 2E 5D 23 B3 BE 84 CF C2 | int64_t | 0xC2CF84BEB3235D2E (-4409159555588334290) | table field `monotonic_sent_time` (Long)
+ buffer = Push<int64_t>(buffer, context.monotonic_remote_time.time_since_epoch().count());
+ [[fallthrough]];
+ case 0x30u:
+ if ((end_byte) == 0x30u) {
+ break;
+ }
+ // +0x30 | 0C 00 00 00 | UOffset32 | 0x0000000C (12) Loc: +0x3C | offset to field `data` (vector)
+ buffer = Push<flatbuffers::uoffset_t>(buffer, 0x0C);
+ // +0x34 | 69 00 00 00 | uint32_t | 0x00000069 (105) | table field `queue_index` (UInt)
+ buffer = Push<uint32_t>(buffer, context.remote_queue_index);
+ [[fallthrough]];
+ case 0x38u:
+ if ((end_byte) == 0x38u) {
+ break;
+ }
+ // +0x38 | F3 00 00 00 | uint32_t | 0x000000F3 (243) | table field `channel_index` (UInt)
+ buffer = Push<uint32_t>(buffer, channel_index);
+ //
+ // vector (aos.logger.MessageHeader.data):
+ // +0x3C | 1A 00 00 00 | uint32_t | 0x0000001A (26) | length of vector (# items)
+ buffer = Push<flatbuffers::uoffset_t>(buffer, context.size);
+ [[fallthrough]];
+ case 0x40u:
+ if ((end_byte) == 0x40u) {
+ break;
+ }
+ [[fallthrough]];
+ default:
+ // +0x40 | 38 | uint8_t | 0x38 (56) | value[0]
+ // +0x41 | 1A | uint8_t | 0x1A (26) | value[1]
+ // ...
+ // +0x58 | 90 | uint8_t | 0x90 (144) | value[24]
+ // +0x59 | 92 | uint8_t | 0x92 (146) | value[25]
+ //
+ // padding:
+ // +0x5A | 00 00 00 00 00 00 | uint8_t[6] | ...... | padding
+ // clang-format on
+ if (start_byte <= 0x40 && end_byte == message_size) {
+ // The easy one, slap it all down.
+ buffer = PushBytes(buffer, context.data, context.size);
+ buffer =
+ Pad(buffer, ((context.size + 7) & 0xfffffff8u) - context.size);
+ } else {
+ const size_t data_start_byte =
+ start_byte < 0x40 ? 0x0u : (start_byte - 0x40);
+ const size_t data_end_byte = end_byte - 0x40;
+ const size_t padded_size = ((context.size + 7) & 0xfffffff8u);
+ if (data_start_byte < padded_size) {
+ buffer = PushBytes(
+ buffer,
+ reinterpret_cast<const uint8_t *>(context.data) +
+ data_start_byte,
+ std::min(context.size, data_end_byte) - data_start_byte);
+ if (data_end_byte == padded_size) {
+ // We can only pad the last 7 bytes, so this only gets written
+ // if we write the last byte.
+ buffer = Pad(buffer,
+ ((context.size + 7) & 0xfffffff8u) - context.size);
+ }
+ }
+ }
+ break;
+ }
}
- return message_size;
+ return end_byte - start_byte;
}
SpanReader::SpanReader(std::string_view filename, bool quiet)
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index 346a36e..4e38feb 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -76,9 +76,6 @@
void CopyMessage(DataEncoder::Copier *coppier,
aos::monotonic_clock::time_point now);
- // Queues up data in span. May copy or may write it to disk immediately.
- void QueueSpan(absl::Span<const uint8_t> span);
-
// Indicates we got ENOSPC when trying to write. After this returns true, no
// further data is written.
bool ran_out_of_space() const { return ran_out_of_space_; }
@@ -189,7 +186,8 @@
constexpr flatbuffers::uoffset_t PackRemoteMessageSize() { return 96u; }
size_t PackRemoteMessageInline(
uint8_t *data, const message_bridge::RemoteMessage *msg, int channel_index,
- const aos::monotonic_clock::time_point monotonic_timestamp_time);
+ const aos::monotonic_clock::time_point monotonic_timestamp_time,
+ size_t start_byte, size_t end_byte);
// Packes a message pointed to by the context into a MessageHeader.
flatbuffers::Offset<MessageHeader> PackMessage(
@@ -204,7 +202,8 @@
// This is equivalent to PackMessage, but doesn't require allocating a
// FlatBufferBuilder underneath.
size_t PackMessageInline(uint8_t *data, const Context &contex,
- int channel_index, LogType log_type);
+ int channel_index, LogType log_type, size_t start_byte,
+ size_t end_byte);
// Class to read chunks out of a log file.
class SpanReader {
@@ -915,6 +914,65 @@
// a single node.
std::string MaybeNodeName(const Node *);
+// Class to copy a RemoteMessage into the provided buffer.
+class RemoteMessageCopier : public DataEncoder::Copier {
+ public:
+ RemoteMessageCopier(const message_bridge::RemoteMessage *message,
+ int channel_index,
+ aos::monotonic_clock::time_point monotonic_timestamp_time,
+ EventLoop *event_loop)
+ : DataEncoder::Copier(PackRemoteMessageSize()),
+ message_(message),
+ channel_index_(channel_index),
+ monotonic_timestamp_time_(monotonic_timestamp_time),
+ event_loop_(event_loop) {}
+
+ monotonic_clock::time_point end_time() const { return end_time_; }
+
+ size_t Copy(uint8_t *data, size_t start_byte, size_t end_byte) final {
+ size_t result = PackRemoteMessageInline(data, message_, channel_index_,
+ monotonic_timestamp_time_,
+ start_byte, end_byte);
+ end_time_ = event_loop_->monotonic_now();
+ return result;
+ }
+
+ private:
+ const message_bridge::RemoteMessage *message_;
+ int channel_index_;
+ aos::monotonic_clock::time_point monotonic_timestamp_time_;
+ EventLoop *event_loop_;
+ monotonic_clock::time_point end_time_;
+};
+
+// Class to copy a context into the provided buffer.
+class ContextDataCopier : public DataEncoder::Copier {
+ public:
+ ContextDataCopier(const Context &context, int channel_index, LogType log_type,
+ EventLoop *event_loop)
+ : DataEncoder::Copier(PackMessageSize(log_type, context.size)),
+ context_(context),
+ channel_index_(channel_index),
+ log_type_(log_type),
+ event_loop_(event_loop) {}
+
+ monotonic_clock::time_point end_time() const { return end_time_; }
+
+ size_t Copy(uint8_t *data, size_t start_byte, size_t end_byte) final {
+ size_t result = PackMessageInline(data, context_, channel_index_, log_type_,
+ start_byte, end_byte);
+ end_time_ = event_loop_->monotonic_now();
+ return result;
+ }
+
+ private:
+ const Context &context_;
+ const int channel_index_;
+ const LogType log_type_;
+ EventLoop *event_loop_;
+ monotonic_clock::time_point end_time_;
+};
+
} // namespace aos::logger
#endif // AOS_EVENTS_LOGGING_LOGFILE_UTILS_H_
diff --git a/aos/events/logging/logfile_utils_out_of_space_test_runner.cc b/aos/events/logging/logfile_utils_out_of_space_test_runner.cc
index afc4d77..719eb6e 100644
--- a/aos/events/logging/logfile_utils_out_of_space_test_runner.cc
+++ b/aos/events/logging/logfile_utils_out_of_space_test_runner.cc
@@ -22,10 +22,14 @@
FLAGS_tmpfs + "/file",
std::make_unique<aos::logger::DummyEncoder>(data.size()));
for (int i = 0; i < 8; ++i) {
- writer.QueueSpan(data);
+ aos::logger::DataEncoder::SpanCopier coppier(data);
+ writer.CopyMessage(&coppier, aos::monotonic_clock::now());
CHECK(!writer.ran_out_of_space()) << ": " << i;
}
- writer.QueueSpan(data);
+ {
+ aos::logger::DataEncoder::SpanCopier coppier(data);
+ writer.CopyMessage(&coppier, aos::monotonic_clock::now());
+ }
CHECK(writer.ran_out_of_space());
writer.acknowledge_out_of_space();
}
diff --git a/aos/events/logging/logfile_utils_test.cc b/aos/events/logging/logfile_utils_test.cc
index f453798..ec84b58 100644
--- a/aos/events/logging/logfile_utils_test.cc
+++ b/aos/events/logging/logfile_utils_test.cc
@@ -39,6 +39,10 @@
void WriteSizedFlatbuffer(flatbuffers::DetachedBuffer &&buffer) {
QueueSpan(absl::Span<const uint8_t>(buffer.data(), buffer.size()));
}
+ void QueueSpan(absl::Span<const uint8_t> buffer) {
+ DataEncoder::SpanCopier coppier(buffer);
+ CopyMessage(&coppier, monotonic_clock::now());
+ }
};
// Creates a size prefixed flatbuffer from json.
@@ -3096,6 +3100,90 @@
"/foo.afb");
}
+// Event loop which just has working time functions for the Copier classes
+// tested below.
+class TimeEventLoop : public EventLoop {
+ public:
+ TimeEventLoop() : EventLoop(nullptr) {}
+
+ aos::monotonic_clock::time_point monotonic_now() const final {
+ return aos::monotonic_clock::min_time;
+ }
+ realtime_clock::time_point realtime_now() const final {
+ return aos::realtime_clock::min_time;
+ }
+
+ void OnRun(::std::function<void()> /*on_run*/) final { LOG(FATAL); }
+
+ const std::string_view name() const final { return "time"; }
+ const Node *node() const final { return nullptr; }
+
+ void SetRuntimeAffinity(const cpu_set_t & /*cpuset*/) final { LOG(FATAL); }
+ void SetRuntimeRealtimePriority(int /*priority*/) final { LOG(FATAL); }
+
+ const cpu_set_t &runtime_affinity() const final {
+ LOG(FATAL);
+ return cpuset_;
+ }
+
+ TimerHandler *AddTimer(::std::function<void()> /*callback*/) final {
+ LOG(FATAL);
+ return nullptr;
+ }
+
+ std::unique_ptr<RawSender> MakeRawSender(const Channel * /*channel*/) final {
+ LOG(FATAL);
+ return std::unique_ptr<RawSender>();
+ }
+
+ const UUID &boot_uuid() const final {
+ LOG(FATAL);
+ return boot_uuid_;
+ }
+
+ void set_name(const std::string_view name) final { LOG(FATAL) << name; }
+
+ pid_t GetTid() final {
+ LOG(FATAL);
+ return 0;
+ }
+
+ int NumberBuffers(const Channel * /*channel*/) final {
+ LOG(FATAL);
+ return 0;
+ }
+
+ int runtime_realtime_priority() const final {
+ LOG(FATAL);
+ return 0;
+ }
+
+ std::unique_ptr<RawFetcher> MakeRawFetcher(
+ const Channel * /*channel*/) final {
+ LOG(FATAL);
+ return std::unique_ptr<RawFetcher>();
+ }
+
+ PhasedLoopHandler *AddPhasedLoop(
+ ::std::function<void(int)> /*callback*/,
+ const monotonic_clock::duration /*interval*/,
+ const monotonic_clock::duration /*offset*/) final {
+ LOG(FATAL);
+ return nullptr;
+ }
+
+ void MakeRawWatcher(
+ const Channel * /*channel*/,
+ std::function<void(const Context &context, const void *message)>
+ /*watcher*/) final {
+ LOG(FATAL);
+ }
+
+ private:
+ const cpu_set_t cpuset_ = DefaultAffinity();
+ UUID boot_uuid_ = UUID ::Zero();
+};
+
// Tests that all variations of PackMessage are equivalent to the inline
// PackMessage used to avoid allocations.
TEST_F(InlinePackMessage, Equivilent) {
@@ -3132,14 +3220,41 @@
67);
// And verify packing inline works as expected.
- EXPECT_EQ(repacked_message.size(),
- PackMessageInline(repacked_message.data(), context,
- channel_index, type));
+ EXPECT_EQ(
+ repacked_message.size(),
+ PackMessageInline(repacked_message.data(), context, channel_index,
+ type, 0u, repacked_message.size()));
EXPECT_EQ(absl::Span<uint8_t>(repacked_message),
absl::Span<uint8_t>(fbb.GetBufferSpan().data(),
fbb.GetBufferSpan().size()))
<< AnnotateBinaries(schema, "aos/events/logging/logger.bfbs",
fbb.GetBufferSpan());
+
+ // Ok, now we want to confirm that we can build up arbitrary pieces of
+ // said flatbuffer. Try all of them since it is cheap.
+ TimeEventLoop event_loop;
+ for (size_t i = 0; i < repacked_message.size(); i += 8) {
+ for (size_t j = i; j < repacked_message.size(); j += 8) {
+ std::vector<uint8_t> destination(repacked_message.size(), 67u);
+ ContextDataCopier copier(context, channel_index, type, &event_loop);
+
+ copier.Copy(destination.data(), i, j);
+
+ size_t index = 0;
+ for (size_t k = i; k < j; ++k) {
+ ASSERT_EQ(destination[index], repacked_message[k])
+ << ": Failed to match type " << static_cast<int>(type)
+ << ", index " << index << " while testing range " << i << " to "
+ << j;
+ ;
+ ++index;
+ }
+ // Now, confirm that none of the other bytes have been touched.
+ for (; index < destination.size(); ++index) {
+ ASSERT_EQ(destination[index], 67u);
+ }
+ }
+ }
}
}
}
@@ -3177,15 +3292,37 @@
std::vector<uint8_t> repacked_message(PackRemoteMessageSize(), 67);
// And verify packing inline works as expected.
- EXPECT_EQ(
- repacked_message.size(),
- PackRemoteMessageInline(repacked_message.data(), &random_msg.message(),
- channel_index, monotonic_timestamp_time));
+ EXPECT_EQ(repacked_message.size(),
+ PackRemoteMessageInline(
+ repacked_message.data(), &random_msg.message(), channel_index,
+ monotonic_timestamp_time, 0u, repacked_message.size()));
EXPECT_EQ(absl::Span<uint8_t>(repacked_message),
absl::Span<uint8_t>(fbb.GetBufferSpan().data(),
fbb.GetBufferSpan().size()))
<< AnnotateBinaries(schema, "aos/events/logging/logger.bfbs",
fbb.GetBufferSpan());
+
+ // Ok, now we want to confirm that we can build up arbitrary pieces of said
+ // flatbuffer. Try all of them since it is cheap.
+ TimeEventLoop event_loop;
+ for (size_t i = 0; i < repacked_message.size(); i += 8) {
+ for (size_t j = i; j < repacked_message.size(); j += 8) {
+ std::vector<uint8_t> destination(repacked_message.size(), 67u);
+ RemoteMessageCopier copier(&random_msg.message(), channel_index,
+ monotonic_timestamp_time, &event_loop);
+
+ copier.Copy(destination.data(), i, j);
+
+ size_t index = 0;
+ for (size_t k = i; k < j; ++k) {
+ ASSERT_EQ(destination[index], repacked_message[k]);
+ ++index;
+ }
+ for (; index < destination.size(); ++index) {
+ ASSERT_EQ(destination[index], 67u);
+ }
+ }
+ }
}
}
diff --git a/aos/events/logging/lzma_encoder.cc b/aos/events/logging/lzma_encoder.cc
index f583818..3959727 100644
--- a/aos/events/logging/lzma_encoder.cc
+++ b/aos/events/logging/lzma_encoder.cc
@@ -95,7 +95,7 @@
// since lzma is going to take it from here.
CHECK_LE(copy_size, input_buffer_.size());
- CHECK_EQ(copy->Copy(input_buffer_.data()), copy_size);
+ CHECK_EQ(copy->Copy(input_buffer_.data(), 0, copy_size), copy_size);
stream_.next_in = input_buffer_.data();
stream_.avail_in = copy_size;
diff --git a/aos/events/logging/snappy_encoder.cc b/aos/events/logging/snappy_encoder.cc
index 2ef8363..1b80fe0 100644
--- a/aos/events/logging/snappy_encoder.cc
+++ b/aos/events/logging/snappy_encoder.cc
@@ -142,7 +142,7 @@
CHECK_LE(copy_size + data_.size(), data_.capacity());
size_t starting_size = data_.size();
data_.resize(starting_size + copy_size);
- CHECK_EQ(copy->Copy(data_.data() + starting_size), copy_size);
+ CHECK_EQ(copy->Copy(data_.data() + starting_size, 0, copy_size), copy_size);
accumulated_checksum_ = AccumulateCrc32(
{data_.data() + starting_size, copy_size}, accumulated_checksum_);
}
diff --git a/aos/util/foxglove_websocket.cc b/aos/util/foxglove_websocket.cc
index 26092bc..6ecb600 100644
--- a/aos/util/foxglove_websocket.cc
+++ b/aos/util/foxglove_websocket.cc
@@ -5,6 +5,12 @@
DEFINE_string(config, "/app/aos_config.json", "Path to the config.");
DEFINE_uint32(port, 8765, "Port to use for foxglove websocket server.");
+DEFINE_string(mode, "flatbuffer", "json or flatbuffer serialization.");
+DEFINE_bool(fetch_pinned_channels, true,
+ "Set this to allow foxglove_websocket to make fetchers on channels "
+ "with a read_method of PIN (see aos/configuration.fbs; PIN is an "
+ "enum value). Having this enabled will cause foxglove to consume "
+ "extra shared memory resources.");
int main(int argc, char *argv[]) {
gflags::SetUsageMessage(
@@ -40,7 +46,14 @@
aos::ShmEventLoop event_loop(&config.message());
- aos::FoxgloveWebsocketServer server(&event_loop, FLAGS_port);
+ aos::FoxgloveWebsocketServer server(
+ &event_loop, FLAGS_port,
+ FLAGS_mode == "flatbuffer"
+ ? aos::FoxgloveWebsocketServer::Serialization::kFlatbuffer
+ : aos::FoxgloveWebsocketServer::Serialization::kJson,
+ FLAGS_fetch_pinned_channels
+ ? aos::FoxgloveWebsocketServer::FetchPinnedChannels::kYes
+ : aos::FoxgloveWebsocketServer::FetchPinnedChannels::kNo);
event_loop.Run();
}
diff --git a/aos/util/foxglove_websocket_lib.cc b/aos/util/foxglove_websocket_lib.cc
index b4d262a..1cc3a8a 100644
--- a/aos/util/foxglove_websocket_lib.cc
+++ b/aos/util/foxglove_websocket_lib.cc
@@ -1,17 +1,13 @@
#include "aos/util/foxglove_websocket_lib.h"
#include "aos/util/mcap_logger.h"
+#include "aos/flatbuffer_merge.h"
+#include "absl/strings/escaping.h"
#include "gflags/gflags.h"
DEFINE_uint32(sorting_buffer_ms, 100,
"Amount of time to buffer messages to sort them before sending "
"them to foxglove.");
-DEFINE_bool(fetch_pinned_channels, false,
- "Set this to allow foxglove_websocket to make fetchers on channels "
- "with a read_method of PIN (see aos/configuration.fbs; PIN is an "
- "enum value). By default, we don't make fetchers for "
- "these channels since using up a fetcher slot on PIN'd channels "
- "can have side-effects.");
namespace {
// Period at which to poll the fetchers for all the channels.
@@ -19,21 +15,38 @@
} // namespace
namespace aos {
-FoxgloveWebsocketServer::FoxgloveWebsocketServer(aos::EventLoop *event_loop,
- uint32_t port)
- : event_loop_(event_loop), server_(port, "aos_foxglove") {
+FoxgloveWebsocketServer::FoxgloveWebsocketServer(
+ aos::EventLoop *event_loop, uint32_t port, Serialization serialization,
+ FetchPinnedChannels fetch_pinned_channels)
+ : event_loop_(event_loop),
+ serialization_(serialization),
+ fetch_pinned_channels_(fetch_pinned_channels),
+ server_(port, "aos_foxglove") {
for (const aos::Channel *channel :
*event_loop_->configuration()->channels()) {
const bool is_pinned = (channel->read_method() == ReadMethod::PIN);
if (aos::configuration::ChannelIsReadableOnNode(channel,
event_loop_->node()) &&
- (!is_pinned || FLAGS_fetch_pinned_channels)) {
+ (!is_pinned || fetch_pinned_channels_ == FetchPinnedChannels::kYes)) {
+ const FlatbufferDetachedBuffer<reflection::Schema> schema =
+ RecursiveCopyFlatBuffer(channel->schema());
const ChannelId id =
- server_.addChannel(foxglove::websocket::ChannelWithoutId{
- .topic = channel->name()->str() + " " + channel->type()->str(),
- .encoding = "json",
- .schemaName = channel->type()->str(),
- .schema = JsonSchemaForFlatbuffer({channel->schema()}).dump()});
+ (serialization_ == Serialization::kJson)
+ ? server_.addChannel(foxglove::websocket::ChannelWithoutId{
+ .topic =
+ channel->name()->str() + " " + channel->type()->str(),
+ .encoding = "json",
+ .schemaName = channel->type()->str(),
+ .schema =
+ JsonSchemaForFlatbuffer({channel->schema()}).dump()})
+ : server_.addChannel(foxglove::websocket::ChannelWithoutId{
+ .topic =
+ channel->name()->str() + " " + channel->type()->str(),
+ .encoding = "flatbuffer",
+ .schemaName = channel->type()->str(),
+ .schema = absl::Base64Escape(
+ {reinterpret_cast<const char *>(schema.span().data()),
+ schema.span().size()})});
CHECK(fetchers_.count(id) == 0);
fetchers_[id] =
FetcherState{.fetcher = event_loop_->MakeRawFetcher(channel)};
@@ -100,11 +113,20 @@
while (!fetcher_times.empty()) {
const ChannelId channel = fetcher_times.begin()->second;
FetcherState *fetcher = &fetchers_[channel];
- server_.sendMessage(
- channel, fetcher_times.begin()->first.time_since_epoch().count(),
- aos::FlatbufferToJson(
- fetcher->fetcher->channel()->schema(),
- static_cast<const uint8_t *>(fetcher->fetcher->context().data)));
+ switch (serialization_) {
+ case Serialization::kJson:
+ server_.sendMessage(
+ channel, fetcher_times.begin()->first.time_since_epoch().count(),
+ aos::FlatbufferToJson(fetcher->fetcher->channel()->schema(),
+ static_cast<const uint8_t *>(
+ fetcher->fetcher->context().data)));
+ break;
+ case Serialization::kFlatbuffer:
+ server_.sendMessage(
+ channel, fetcher_times.begin()->first.time_since_epoch().count(),
+ {static_cast<const char *>(fetcher->fetcher->context().data),
+ fetcher->fetcher->context().size});
+ }
fetcher_times.erase(fetcher_times.begin());
fetcher->sent_current_message = true;
if (fetcher->fetcher->FetchNext()) {
diff --git a/aos/util/foxglove_websocket_lib.h b/aos/util/foxglove_websocket_lib.h
index 8160653..9be2f61 100644
--- a/aos/util/foxglove_websocket_lib.h
+++ b/aos/util/foxglove_websocket_lib.h
@@ -14,7 +14,19 @@
// See foxglove_websocket.cc for some usage notes.
class FoxgloveWebsocketServer {
public:
- FoxgloveWebsocketServer(aos::EventLoop *event_loop, uint32_t port);
+ // Whether to serialize the messages into the MCAP file as JSON or
+ // flatbuffers.
+ enum class Serialization {
+ kJson,
+ kFlatbuffer,
+ };
+ enum class FetchPinnedChannels {
+ kYes,
+ kNo,
+ };
+ FoxgloveWebsocketServer(aos::EventLoop *event_loop, uint32_t port,
+ Serialization serialization,
+ FetchPinnedChannels fetch_pinned_channels);
~FoxgloveWebsocketServer();
private:
@@ -33,6 +45,8 @@
};
aos::EventLoop *event_loop_;
+ const Serialization serialization_;
+ const FetchPinnedChannels fetch_pinned_channels_;
foxglove::websocket::Server server_;
// A map of fetchers for every single channel that could be subscribed to.
std::map<ChannelId, FetcherState> fetchers_;
diff --git a/aos/util/mcap_logger.cc b/aos/util/mcap_logger.cc
index 96a9b60..40e55f0 100644
--- a/aos/util/mcap_logger.cc
+++ b/aos/util/mcap_logger.cc
@@ -309,7 +309,7 @@
CHECK(channel->has_schema());
const FlatbufferDetachedBuffer<reflection::Schema> schema =
- CopyFlatBuffer(channel->schema());
+ RecursiveCopyFlatBuffer(channel->schema());
// Write out the schema (we don't bother deduplicating schema types):
string_builder_.Reset();
diff --git a/aos/util/threaded_consumer_test.cc b/aos/util/threaded_consumer_test.cc
index f137108..96375f0 100644
--- a/aos/util/threaded_consumer_test.cc
+++ b/aos/util/threaded_consumer_test.cc
@@ -41,7 +41,7 @@
ThreadedConsumer<int, 4> threaded_consumer(
[&counter](int task) {
CheckRealtime();
- LOG(INFO) << "task:" << task << " counter: " << counter;
+ VLOG(1) << "task:" << task << " counter: " << counter;
counter = task;
},
20);
@@ -71,7 +71,7 @@
ThreadedConsumer<int, 4> threaded_consumer(
[&counter, &should_block](int task) {
- LOG(INFO) << "task:" << task << " counter: " << counter;
+ VLOG(1) << "task:" << task << " counter: " << counter;
counter = task;
@@ -111,7 +111,7 @@
{
ThreadedConsumer<int, 4> threaded_consumer(
[&counter, &should_block](int task) {
- LOG(INFO) << "task:" << task << " counter: " << counter;
+ VLOG(1) << "task:" << task << " counter: " << counter;
counter = task;
diff --git a/frc971/constants/BUILD b/frc971/constants/BUILD
index 891067c..c17dc31 100644
--- a/frc971/constants/BUILD
+++ b/frc971/constants/BUILD
@@ -4,10 +4,12 @@
"constants_sender_lib.h",
],
target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
deps = [
"//aos:flatbuffer_merge",
"//aos:json_to_flatbuffer",
"//aos/events:event_loop",
+ "//aos/events:shm_event_loop",
"//aos/network:team_number",
"@com_github_gflags_gflags//:gflags",
"@com_github_google_glog//:glog",
diff --git a/frc971/constants/constants_sender_lib.h b/frc971/constants/constants_sender_lib.h
index 92c14fa..48cacb5 100644
--- a/frc971/constants/constants_sender_lib.h
+++ b/frc971/constants/constants_sender_lib.h
@@ -2,6 +2,7 @@
#define FRC971_CONSTANTS_CONSTANTS_SENDER_H_
#include "aos/events/event_loop.h"
+#include "aos/events/shm_event_loop.h"
#include "aos/flatbuffer_merge.h"
#include "aos/json_to_flatbuffer.h"
#include "aos/network/team_number.h"
@@ -27,11 +28,9 @@
constants_path_(constants_path),
event_loop_(event_loop),
sender_(event_loop_->MakeSender<ConstantsData>(channel_name_)) {
- event_loop->OnRun([this]() {
- typename aos::Sender<ConstantsData>::Builder builder =
- sender_.MakeBuilder();
- builder.CheckOk(builder.Send(GetConstantsForTeamNumber(builder.fbb())));
- });
+ typename aos::Sender<ConstantsData>::Builder builder =
+ sender_.MakeBuilder();
+ builder.CheckOk(builder.Send(GetConstantsForTeamNumber(builder.fbb())));
}
private:
@@ -63,6 +62,56 @@
aos::Sender<ConstantsData> sender_;
};
+// This class fetches the current constants for the device, with appropriate
+// CHECKs to ensure that (a) the constants never change and (b) that the
+// constants are always available. This can be paired with WaitForConstants to
+// create the conditions for (b). In simulation, the constants should simply be
+// sent before starting up other EventLoops.
+template <typename ConstantsData>
+class ConstantsFetcher {
+ public:
+ ConstantsFetcher(aos::EventLoop *event_loop,
+ std::string_view channel = "/constants")
+ : fetcher_(event_loop->MakeFetcher<ConstantsData>(channel)) {
+ CHECK(fetcher_.Fetch())
+ << "Constants information must be available at startup.";
+ event_loop->MakeNoArgWatcher<ConstantsData>(channel, []() {
+ LOG(FATAL)
+ << "Don't know how to handle changes to constants information.";
+ });
+ }
+
+ const ConstantsData& constants() const {
+ return *fetcher_.get();
+ }
+
+ private:
+ aos::Fetcher<ConstantsData> fetcher_;
+};
+
+// Blocks until data is available on the requested channel using a ShmEventLoop.
+// This is for use during initialization in C++ binaries so that we can delay
+// initialization until everything is available. This allows applications to
+// depend on constants data during their initialization.
+template <typename ConstantsData>
+void WaitForConstants(const aos::Configuration *config,
+ std::string_view channel = "/constants") {
+ aos::ShmEventLoop event_loop(config);
+ aos::Fetcher fetcher = event_loop.MakeFetcher<ConstantsData>(channel);
+ event_loop.MakeNoArgWatcher<ConstantsData>(
+ channel, [&event_loop]() { event_loop.Exit(); });
+ event_loop.OnRun([&event_loop, &fetcher]() {
+ // If the constants were already published, we don't need to wait for them.
+ if (fetcher.Fetch()) {
+ event_loop.Exit();
+ }
+ });
+ LOG(INFO) << "Waiting for constants data on " << channel << " "
+ << ConstantsData::GetFullyQualifiedName();
+ event_loop.Run();
+ LOG(INFO) << "Got constants data.";
+}
+
} // namespace frc971::constants
#endif // FRC971_CONSTANTS_CONSTANTS_SENDER_H_
diff --git a/frc971/constants/constants_sender_test.cc b/frc971/constants/constants_sender_test.cc
index 3edc320..1441767 100644
--- a/frc971/constants/constants_sender_test.cc
+++ b/frc971/constants/constants_sender_test.cc
@@ -40,11 +40,10 @@
ConstantSender<testdata::ConstantsData, testdata::ConstantsList> test971(
constants_sender_event_loop_.get(),
"frc971/constants/testdata/test_constants.json", "/constants");
- test_event_loop->MakeWatcher("/constants",
- [](const testdata::ConstantsData &data) {
- EXPECT_EQ(data.max_roller_voltage(), 12);
- EXPECT_EQ(data.min_roller_voltage(), -12);
- });
+ ConstantsFetcher<testdata::ConstantsData> fetcher(test_event_loop.get());
+ EXPECT_EQ(fetcher.constants().max_roller_voltage(), 12);
+ EXPECT_EQ(fetcher.constants().min_roller_voltage(), -12);
+ // Ensure that the watcher in ConstantsFetcher never triggers.
event_loop_factory_.RunFor(std::chrono::seconds(1));
}
@@ -57,14 +56,42 @@
ConstantSender<testdata::ConstantsData, testdata::ConstantsList> test971(
constants_sender_event_loop_.get(),
"frc971/constants/testdata/test_constants.json", 9971, "/constants");
- test_event_loop->MakeWatcher("/constants",
- [](const testdata::ConstantsData &data) {
- EXPECT_EQ(data.max_roller_voltage(), 6);
- EXPECT_EQ(data.min_roller_voltage(), -6);
- });
+ ConstantsFetcher<testdata::ConstantsData> fetcher(test_event_loop.get());
+ EXPECT_EQ(fetcher.constants().max_roller_voltage(), 6);
+ EXPECT_EQ(fetcher.constants().min_roller_voltage(), -6);
event_loop_factory_.RunFor(std::chrono::seconds(1));
}
+// Tests that the ConstantsFetcher dies when there is no data available during
+// construction.
+TEST_F(ConstantSenderTest, NoDataOnStartup) {
+ std::unique_ptr<aos::EventLoop> test_event_loop =
+ event_loop_factory_.MakeEventLoop("constants");
+ EXPECT_DEATH(ConstantsFetcher<testdata::ConstantsData>(test_event_loop.get()),
+ "information must be available at startup");
+}
+
+// Tests that the ConstantsFetcher dies when there is a change to the constants
+// data.
+TEST_F(ConstantSenderTest, DieOnDataUpdate) {
+ std::unique_ptr<aos::EventLoop> test_event_loop =
+ event_loop_factory_.MakeEventLoop("constants");
+ ConstantSender<testdata::ConstantsData, testdata::ConstantsList> test971(
+ constants_sender_event_loop_.get(),
+ "frc971/constants/testdata/test_constants.json", 9971, "/constants");
+ ConstantsFetcher<testdata::ConstantsData> fetcher(test_event_loop.get());
+ auto sender =
+ constants_sender_event_loop_->MakeSender<testdata::ConstantsData>(
+ "/constants");
+ constants_sender_event_loop_->OnRun([&sender]() {
+ auto builder = sender.MakeBuilder();
+ builder.CheckOk(builder.Send(
+ builder.MakeBuilder<testdata::ConstantsData>().Finish()));
+ });
+ EXPECT_DEATH(event_loop_factory_.RunFor(std::chrono::seconds(1)),
+ "changes to constants");
+}
+
// When given a team number that it not recognized we kill the program.
TEST_F(ConstantSenderTest, TeamNotFound) {
diff --git a/frc971/control_loops/drivetrain/distance_spline.h b/frc971/control_loops/drivetrain/distance_spline.h
index 91b9898..f445fa4 100644
--- a/frc971/control_loops/drivetrain/distance_spline.h
+++ b/frc971/control_loops/drivetrain/distance_spline.h
@@ -123,7 +123,7 @@
// The spline we are converting to a distance.
aos::SizedArray<Spline, kMaxSplines> splines_;
// An interpolation table of distances evenly distributed in alpha.
- const absl::Span<const float> distances_;
+ absl::Span<const float> distances_;
};
} // namespace drivetrain
diff --git a/frc971/control_loops/drivetrain/drivetrain.h b/frc971/control_loops/drivetrain/drivetrain.h
index a06b965..0d100ed 100644
--- a/frc971/control_loops/drivetrain/drivetrain.h
+++ b/frc971/control_loops/drivetrain/drivetrain.h
@@ -146,9 +146,10 @@
class DrivetrainLoop
: public frc971::controls::ControlLoop<Goal, Position, Status, Output> {
public:
- // Note that we only actually store N - 1 splines, since we need to keep one
- // fetcher free to check whether there are any new splines.
- static constexpr size_t kNumSplineFetchers = 5;
+ // Note that we only actually store N - 1 splines consistently, since we need
+ // to keep one fetcher free to check whether there are any new splines.
+ static constexpr size_t kNumSplineFetchers =
+ SplineDrivetrain::kMaxTrajectories;
// Constructs a control loop which can take a Drivetrain or defaults to the
// drivetrain at frc971::control_loops::drivetrain
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.cc b/frc971/control_loops/drivetrain/splinedrivetrain.cc
index 5a4e5da..0f9418c 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.cc
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.cc
@@ -36,18 +36,16 @@
void SplineDrivetrain::SetGoal(
const ::frc971::control_loops::drivetrain::Goal *goal) {
- if (goal->has_spline_handle()) {
- commanded_spline_ = goal->spline_handle();
- } else {
- commanded_spline_.reset();
- }
- UpdateSplineHandles();
+ UpdateSplineHandles(goal->has_spline_handle()
+ ? std::make_optional<int>(goal->spline_handle())
+ : std::nullopt);
}
bool SplineDrivetrain::IsCurrentTrajectory(
const fb::Trajectory *trajectory) const {
- return (current_trajectory_ != nullptr &&
- current_trajectory().spline_handle() == trajectory->handle());
+ const FinishedTrajectory *current = current_trajectory();
+ return (current != nullptr &&
+ current->spline_handle() == trajectory->handle());
}
bool SplineDrivetrain::HasTrajectory(const fb::Trajectory *trajectory) const {
@@ -55,7 +53,7 @@
return false;
}
for (size_t ii = 0; ii < trajectories_.size(); ++ii) {
- if (trajectories_[ii]->spline_handle() == trajectory->handle()) {
+ if (trajectories_[ii].spline_handle() == trajectory->handle()) {
return true;
}
}
@@ -66,7 +64,7 @@
CHECK(trajectory != nullptr);
for (size_t ii = 0; ii < trajectories_.size(); ++ii) {
- if (trajectories_[ii]->spline_handle() == trajectory->handle()) {
+ if (trajectories_[ii].spline_handle() == trajectory->handle()) {
trajectories_.erase(trajectories_.begin() + ii);
return;
}
@@ -76,49 +74,69 @@
}
void SplineDrivetrain::AddTrajectory(const fb::Trajectory *trajectory) {
- trajectories_.emplace_back(std::make_unique<FinishedTrajectory>(
- dt_config_, trajectory, velocity_drivetrain_));
- UpdateSplineHandles();
+ CHECK_LT(trajectories_.size(), trajectories_.capacity());
+ trajectories_.emplace_back(dt_config_, trajectory, velocity_drivetrain_);
+ UpdateSplineHandles(commanded_spline_);
}
void SplineDrivetrain::DeleteCurrentSpline() {
- DeleteTrajectory(&CHECK_NOTNULL(current_trajectory_)->trajectory());
+ DeleteTrajectory(&CHECK_NOTNULL(current_trajectory())->trajectory());
executing_spline_ = false;
- current_trajectory_ = nullptr;
+ commanded_spline_.reset();
current_xva_.setZero();
}
-void SplineDrivetrain::UpdateSplineHandles() {
+void SplineDrivetrain::UpdateSplineHandles(
+ std::optional<int> commanded_spline) {
// If we are currently executing a spline and have received a change
if (executing_spline_) {
- if (!commanded_spline_) {
+ if (!commanded_spline) {
// We've been told to stop executing a spline; remove it from our queue,
// and clean up.
DeleteCurrentSpline();
return;
} else {
if (executing_spline_ &&
- current_trajectory().spline_handle() != *commanded_spline_) {
+ CHECK_NOTNULL(current_trajectory())->spline_handle() !=
+ *commanded_spline) {
// If we are executing a spline, and the handle has changed, garbage
// collect the old spline.
DeleteCurrentSpline();
}
}
}
+ commanded_spline_ = commanded_spline;
// We've now cleaned up the previous state; handle any new commands.
if (!commanded_spline_) {
return;
}
for (size_t ii = 0; ii < trajectories_.size(); ++ii) {
- if (trajectories_[ii]->spline_handle() == *commanded_spline_) {
+ if (trajectories_[ii].spline_handle() == *commanded_spline_) {
executing_spline_ = true;
- current_trajectory_ = trajectories_[ii].get();
}
}
// If we didn't find the commanded spline in the list of available splines,
// that's fine; it just means, it hasn't been fully planned yet.
}
+FinishedTrajectory *SplineDrivetrain::current_trajectory() {
+ for (size_t ii = 0; ii < trajectories_.size(); ++ii) {
+ if (trajectories_[ii].spline_handle() == *commanded_spline_) {
+ return &trajectories_[ii];
+ }
+ }
+ return nullptr;
+}
+
+const FinishedTrajectory *SplineDrivetrain::current_trajectory() const {
+ for (size_t ii = 0; ii < trajectories_.size(); ++ii) {
+ if (trajectories_[ii].spline_handle() == *commanded_spline_) {
+ return &trajectories_[ii];
+ }
+ }
+ return nullptr;
+}
+
// TODO(alex): Hold position when done following the spline.
void SplineDrivetrain::Update(
bool enable, const ::Eigen::Matrix<double, 5, 1> &state,
@@ -127,17 +145,19 @@
enable_ = enable;
if (enable && executing_spline_) {
::Eigen::Matrix<double, 2, 1> U_ff = ::Eigen::Matrix<double, 2, 1>::Zero();
+ const FinishedTrajectory *const trajectory =
+ CHECK_NOTNULL(current_trajectory());
if (!IsAtEnd() && executing_spline_) {
// TODO(alex): It takes about a cycle for the outputs to propagate to the
// motors. Consider delaying the output by a cycle.
- U_ff = current_trajectory().FFVoltage(current_xva_(0));
+ U_ff = trajectory->FFVoltage(current_xva_(0));
}
const double current_distance = current_xva_(0);
::Eigen::Matrix<double, 2, 5> K =
- current_trajectory().GainForDistance(current_distance);
+ trajectory->GainForDistance(current_distance);
::Eigen::Matrix<double, 5, 1> goal_state = CurrentGoalState();
- const bool backwards = current_trajectory().drive_spline_backwards();
+ const bool backwards = trajectory->drive_spline_backwards();
if (backwards) {
::Eigen::Matrix<double, 2, 1> swapU(U_ff(1, 0), U_ff(0, 0));
U_ff = -swapU;
@@ -148,11 +168,11 @@
goal_state(4, 0) = -left_goal;
}
const Eigen::Matrix<double, 5, 1> relative_goal =
- current_trajectory().StateToPathRelativeState(current_distance,
- goal_state, backwards);
+ trajectory->StateToPathRelativeState(current_distance, goal_state,
+ backwards);
const Eigen::Matrix<double, 5, 1> relative_state =
- current_trajectory().StateToPathRelativeState(current_distance, state,
- backwards);
+ trajectory->StateToPathRelativeState(current_distance, state,
+ backwards);
Eigen::Matrix<double, 5, 1> state_error = relative_goal - relative_state;
state_error(2, 0) = ::aos::math::NormalizeAngle(state_error(2, 0));
::Eigen::Matrix<double, 2, 1> U_fb = K * state_error;
@@ -163,7 +183,7 @@
}
::Eigen::Matrix<double, 2, 1> xv_state = current_xva_.block<2, 1>(0, 0);
- next_xva_ = current_trajectory().GetNextXVA(dt_config_.dt, &xv_state);
+ next_xva_ = trajectory->GetNextXVA(dt_config_.dt, &xv_state);
next_U_ = U_ff + U_fb - voltage_error;
uncapped_U_ = next_U_;
ScaleCapU(&next_U_);
@@ -203,7 +223,7 @@
builder->CreateUninitializedVector(trajectories_.size(), &spline_handles);
for (size_t ii = 0; ii < trajectories_.size(); ++ii) {
- spline_handles[ii] = trajectories_[ii]->spline_handle();
+ spline_handles[ii] = trajectories_[ii].spline_handle();
}
drivetrain::TrajectoryLogging::Builder trajectory_logging_builder(*builder);
@@ -211,7 +231,7 @@
::Eigen::Matrix<double, 5, 1> goal_state = CurrentGoalState();
trajectory_logging_builder.add_x(goal_state(0));
trajectory_logging_builder.add_y(goal_state(1));
- if (current_trajectory().drive_spline_backwards()) {
+ if (CHECK_NOTNULL(current_trajectory())->drive_spline_backwards()) {
trajectory_logging_builder.add_left_velocity(-goal_state(4));
trajectory_logging_builder.add_right_velocity(-goal_state(3));
trajectory_logging_builder.add_theta(
@@ -223,8 +243,7 @@
trajectory_logging_builder.add_right_velocity(goal_state(4));
}
}
- trajectory_logging_builder.add_is_executing(!IsAtEnd() &&
- executing_spline_);
+ trajectory_logging_builder.add_is_executing(!IsAtEnd() && executing_spline_);
trajectory_logging_builder.add_is_executed(executing_spline_ && IsAtEnd());
if (commanded_spline_) {
trajectory_logging_builder.add_goal_spline_handle(*commanded_spline_);
@@ -233,8 +252,9 @@
}
}
trajectory_logging_builder.add_distance_remaining(
- executing_spline_ ? current_trajectory().length() - current_xva_.x()
- : 0.0);
+ executing_spline_
+ ? CHECK_NOTNULL(current_trajectory())->length() - current_xva_.x()
+ : 0.0);
trajectory_logging_builder.add_available_splines(handles_vector);
return trajectory_logging_builder.Finish();
diff --git a/frc971/control_loops/drivetrain/splinedrivetrain.h b/frc971/control_loops/drivetrain/splinedrivetrain.h
index 390236b..cd712bc 100644
--- a/frc971/control_loops/drivetrain/splinedrivetrain.h
+++ b/frc971/control_loops/drivetrain/splinedrivetrain.h
@@ -5,7 +5,6 @@
#include <thread>
#include "Eigen/Dense"
-
#include "aos/condition.h"
#include "aos/mutex/mutex.h"
#include "frc971/control_loops/control_loops_generated.h"
@@ -23,6 +22,7 @@
class SplineDrivetrain {
public:
+ static constexpr size_t kMaxTrajectories = 5;
SplineDrivetrain(const DrivetrainConfig<double> &dt_config);
void SetGoal(const ::frc971::control_loops::drivetrain::Goal *goal);
@@ -49,15 +49,15 @@
// Accessor for the current goal state, pretty much only present for debugging
// purposes.
::Eigen::Matrix<double, 5, 1> CurrentGoalState() const {
- return executing_spline_ ? current_trajectory().GoalState(current_xva_(0),
- current_xva_(1))
+ return executing_spline_ ? CHECK_NOTNULL(current_trajectory())
+ ->GoalState(current_xva_(0), current_xva_(1))
: ::Eigen::Matrix<double, 5, 1>::Zero();
}
bool IsAtEnd() const {
- return executing_spline_
- ? current_trajectory().is_at_end(current_xva_.block<2, 1>(0, 0))
- : true;
+ return executing_spline_ ? CHECK_NOTNULL(current_trajectory())
+ ->is_at_end(current_xva_.block<2, 1>(0, 0))
+ : true;
}
size_t trajectory_count() const { return trajectories_.size(); }
@@ -70,21 +70,20 @@
// This is called to update the internal state for managing all the splines.
// Calling it redundantly does not cause any issues. It checks the value of
- // commanded_spline_ to determine whether we are being commanded to run a
+ // commanded_spline to determine whether we are being commanded to run a
// spline, and if there is any trajectory in the list of trajectories matching
- // the command, we begin/continue executing that spline. If commanded_spline_
+ // the command, we begin/continue executing that spline. If commanded_spline
// is empty or has changed, we stop executing the previous trajectory and
// remove it from trajectories_. Then, when the drivetrain code checks
// HasTrajectory() for the old trajectory, it will return false and the
// drivetrain can free up the fetcher to get the next trajectory.
- void UpdateSplineHandles();
+ void UpdateSplineHandles(std::optional<int> commanded_spline);
// Deletes the currently executing trajectory.
void DeleteCurrentSpline();
- const FinishedTrajectory ¤t_trajectory() const {
- return *CHECK_NOTNULL(current_trajectory_);
- }
+ FinishedTrajectory *current_trajectory();
+ const FinishedTrajectory *current_trajectory() const;
const DrivetrainConfig<double> dt_config_;
@@ -97,8 +96,7 @@
// TODO(james): Sort out construction to avoid so much dynamic memory
// allocation...
- std::vector<std::unique_ptr<FinishedTrajectory>> trajectories_;
- const FinishedTrajectory *current_trajectory_ = nullptr;
+ aos::SizedArray<FinishedTrajectory, kMaxTrajectories> trajectories_;
std::optional<int> commanded_spline_;
diff --git a/frc971/control_loops/drivetrain/trajectory.h b/frc971/control_loops/drivetrain/trajectory.h
index 6bccc74..ab1c55f 100644
--- a/frc971/control_loops/drivetrain/trajectory.h
+++ b/frc971/control_loops/drivetrain/trajectory.h
@@ -3,12 +3,12 @@
#include <chrono>
-#include "aos/flatbuffers.h"
#include "Eigen/Dense"
+#include "aos/flatbuffers.h"
#include "frc971/control_loops/drivetrain/distance_spline.h"
#include "frc971/control_loops/drivetrain/drivetrain_config.h"
-#include "frc971/control_loops/drivetrain/trajectory_generated.h"
#include "frc971/control_loops/drivetrain/spline_goal_generated.h"
+#include "frc971/control_loops/drivetrain/trajectory_generated.h"
#include "frc971/control_loops/hybrid_state_feedback_loop.h"
#include "frc971/control_loops/runge_kutta.h"
#include "frc971/control_loops/state_feedback_loop.h"
@@ -193,11 +193,11 @@
HybridKalman<2, 2, 2>>>
velocity_drivetrain_;
- const DrivetrainConfig<double> config_;
+ DrivetrainConfig<double> config_;
// Robot radiuses.
- const double robot_radius_l_;
- const double robot_radius_r_;
+ double robot_radius_l_;
+ double robot_radius_r_;
float lateral_acceleration_ = 3.0;
float longitudinal_acceleration_ = 2.0;
float voltage_limit_ = 12.0;
@@ -205,7 +205,7 @@
// A wrapper around the Trajectory flatbuffer to allow for controlling to a
// spline using a pre-generated trajectory.
-class FinishedTrajectory : public BaseTrajectory {
+class FinishedTrajectory : public BaseTrajectory {
public:
// Note: The lifetime of the supplied buffer is assumed to be greater than
// that of this object.
@@ -225,6 +225,11 @@
HybridKalman<2, 2, 2>>>(
config.make_hybrid_drivetrain_velocity_loop())) {}
+ FinishedTrajectory(const FinishedTrajectory &) = delete;
+ FinishedTrajectory &operator=(const FinishedTrajectory &) = delete;
+ FinishedTrajectory(FinishedTrajectory &&) = default;
+ FinishedTrajectory &operator=(FinishedTrajectory &&) = default;
+
virtual ~FinishedTrajectory() = default;
// Takes the 5-element state that is [x, y, theta, v_left, v_right] and
@@ -252,7 +257,7 @@
private:
const DistanceSplineBase &spline() const override { return spline_; }
const fb::Trajectory *buffer_;
- const FinishedDistanceSpline spline_;
+ FinishedDistanceSpline spline_;
};
// Class to handle plannign a trajectory and producing a flatbuffer containing
@@ -268,8 +273,7 @@
virtual ~Trajectory() = default;
- std::vector<Eigen::Matrix<double, 3, 1>> PlanXVA(
- std::chrono::nanoseconds dt);
+ std::vector<Eigen::Matrix<double, 3, 1>> PlanXVA(std::chrono::nanoseconds dt);
enum class VoltageLimit {
kConservative,
@@ -375,10 +379,7 @@
const DistanceSpline &spline() const override { return spline_; }
private:
-
- float plan_velocity(size_t index) const override {
- return plan_[index];
- }
+ float plan_velocity(size_t index) const override { return plan_[index]; }
size_t distance_plan_size() const override { return plan_.size(); }
fb::SegmentConstraint plan_constraint(size_t index) const override {
@@ -410,8 +411,7 @@
inline Eigen::Matrix<double, 5, 1> ContinuousDynamics(
const StateFeedbackHybridPlant<2, 2, 2> &velocity_drivetrain,
const Eigen::Matrix<double, 2, 2> &Tlr_to_la,
- const Eigen::Matrix<double, 5, 1> X,
- const Eigen::Matrix<double, 2, 1> U) {
+ const Eigen::Matrix<double, 5, 1> X, const Eigen::Matrix<double, 2, 1> U) {
const auto &velocity = X.block<2, 1>(3, 0);
const double theta = X(2);
Eigen::Matrix<double, 2, 1> la = Tlr_to_la * velocity;
diff --git a/frc971/rockpi/contents/home/pi/.ssh/authorized_keys b/frc971/rockpi/contents/home/pi/.ssh/authorized_keys
index 9fd2597..f2966fc 100644
--- a/frc971/rockpi/contents/home/pi/.ssh/authorized_keys
+++ b/frc971/rockpi/contents/home/pi/.ssh/authorized_keys
@@ -1,4 +1,19 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI002gCm4aRVrIcg2G4/qF4D1oNY74HFFAHjNUIgvrmqSEn+Oy+pxigpJFiZZaJMJpaw4kpd1IEpZxhooZm4DC4/bVV3wAFTw/OJI7D75WgrRrBRHd95TMBwYyNUhoDOcPAoZ69+IL9P0rhmNjgCv6Y+3PG+Rl6IqRPuf3dXX/PT3E/h8B18PRkEnas/3WTW8goov6x10kVAa5I+iQansiyAbPQF7E+Q5mpsnl26V2vpHo1UAk7y+TD7jqifEn13TmLeTkDXmaIOflQeOBMAdErftuqrClPa00VbejP18v02RI/jOIAQ250g0hN3zvKi2eNHUPdAzlMB4cSvZspRrB /home/austin/.ssh/id_rsa
+
ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgzaqXNuB589EgR6/ljdYhp5Ca+B8eimCTmmC23oQvNyIAAAADAQABAAABAQDI002gCm4aRVrIcg2G4/qF4D1oNY74HFFAHjNUIgvrmqSEn+Oy+pxigpJFiZZaJMJpaw4kpd1IEpZxhooZm4DC4/bVV3wAFTw/OJI7D75WgrRrBRHd95TMBwYyNUhoDOcPAoZ69+IL9P0rhmNjgCv6Y+3PG+Rl6IqRPuf3dXX/PT3E/h8B18PRkEnas/3WTW8goov6x10kVAa5I+iQansiyAbPQF7E+Q5mpsnl26V2vpHo1UAk7y+TD7jqifEn13TmLeTkDXmaIOflQeOBMAdErftuqrClPa00VbejP18v02RI/jOIAQ250g0hN3zvKi2eNHUPdAzlMB4cSvZspRrBAAAAAAAAAAAAAAABAAAAHmF1c3Rpbi5zY2h1aEBibHVlcml2ZXJ0ZWNoLmNvbQAAAA8AAAADYnJ0AAAABHJvb3QAAAAAWGi3PAAAAABjhSpmAAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAKwAAAATZWNkc2Etc2hhMi1uaXN0cDUyMQAAAAhuaXN0cDUyMQAAAIUEANvRmN8fXmKOO6xQPsllgbHxX+hvP4sU8/ayxw1K9C2MlGT3OKPgnjWEmvEPgpPR+/YQ6asQnP+jucdgCM8q7+c4ATwFnMO7yl2LCU1UkCKShzoumXflKC9rWNVT6MY4PTbpQXui5XE0gIZrjKrkcfCGjvRouUasM/C5Zro/aGQFkL6XAAAApwAAABNlY2RzYS1zaGEyLW5pc3RwNTIxAAAAjAAAAEIAswnueuP8iT7Qbzr1yBx6tLbNY9jewA6NEkLnFJtu11VzBFFaLWxeXwKPy3ajT0DCzt6EX6YKBHfYngnzdyjP9KkAAABCAWsxaA9D59ToYmbEKT//85dczH397v6As8WeQMAMzKfJYVSJBceHiwt6EbRKd6m+xUsd/Sr4Bj/Eu2VvwplqCpOq /home/austin/.ssh/id_rsa
+
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLyVTxr8s3kjz+PEjEymgwC8o64IZgBRJbtq9HZ16ufEHqxCD6WK21v8XsbAyTo3/vIfiae+SxhZTC9PMA1AQXuXCBTcvH1avHlHNPgnfxOfzNpU5LSZx/hqrx9tJ+ELV6m34XUbAhIhXJSyiPE2Mst8et6XUvXLgQ8hr0vwXZ3jitI0WzdoZE2svQhn/Cw+NnFiIyhVm4VTnw0bo5XVvvCawvZdTWsyXIvYx9P7rJ5Kvr1eJTZB+tDynzEFxJZeC+lnE6kV8NudC/7hLwwn1Uvqon17Z4P8ukxDsaD2Y4a2v0zqqN0FkEAKjhcqRWdyHM2JOeygRJa1sABNzt4gJB austin@ASchuh-T480s
+
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDDs88Uby+A7k69WSUXp6wmDrsIIFib5bsJ/0oHdjRWSZc4gHVGVF4cFCIJsjvuiQ3LlQ1vVb4ot7kXDNhPWEENRiuMVN3bovTr0fIjSi+YzhidIUZV44LhIkf2XorjpBjfdKE8YyZgYU0wway6myLijoEy6UnLaYerFjUh0k0p+R/axNtD48Glge82pvihosNt4J4592PGbfoTg7hgPizz4Yp39MtYN0OAqHDSrthU/ceA97prMo9tugozHthDasNAb1u/KiOr86dswLiGhwfM0aWAStIu+jie8fKzFtPFFvCyeEaGTYJ/nKiTq2qX2VNLk2zoqXoP6OPHTztejMtRyuRZxx3+mwhDT1lwUQx/ZsFqMTuIOjGQjpzQg3/Q2Y7rnSeQmgc5LglzaH5SRQ7i3nXJvDm1akdHRFFjarBw9Pb2p8DsDaTmJ6gpoEFqZZa1RM5ZCab8bL9z0pHBdpqhIXcPflDQE7Qi8win+LlWBwFyjhu5PvNnAKFEv6uQC1M= austin@aschuh-3950x
+
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDONITvwkh8whp/ac9mhqIk+H4JV7gHIO6OYiOeJfd8mJGAKbox6JNPsnZ2Yewse9oc+MDKZ+dyLqBsfIVG0MUh96hJZHwIYW6wObcZt7Zj6c+JzbAQNavWltsu0IIA7wVrdz2qOXLuH3MhM5URP29IsmF9EUp2H1/iRUMlwz3+1GckYWmiJl+JKNct1DofJb4RdbY2iG34QgS6FvLEKZBxGYPboPD0DLylymdAZAq2TjCcngiq+5BWmy79BLoA2xwJjiz1WPbSILIygfX6e85FGf9ZskylfgHcltAP4xrg3ci7ygJZSw7rd37pV6PfxOz4vGmA7ROT+ocbkXtzkmGk6Qb8HPqT9uRl9Y2Wm4YQMIrREHvjXtGHePCS6R8rrK6og4U4l6Fn4ghicemoRcQXTeX8RJFN+jCpBmks+P6UbYW8k2424kVhNMHz2sHT+BU5Klz9HQvcqlCqupqiPT6HF/gzMZgMHAf2Dp17CpoDbhkhLmCE1TmFOKqNqHP31fs= milindupadhyay@mdu-brt
+
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNUJEb8hoIboZ+FWUp9OX37xRs6s63pXfkF0Hirka+ovIziorNLxR6BixqD7xkVBwXgyMo+YRRy80eHLX2NoItoRLoMfT4uqhu/izdidLcUrpYicAxMIi1pCi1SS5BcKCHbyEqS5FrHzea+0dhIeemb1EP2NXxTDlFUINTni/Aw0z8bB3UI0tY0+K8zypuDKohL9tWOOln6e0VirhPhSCpfs89959jhmeI7Al1DuyZw1vfcaVgQ/7W59rHqP8RNcziMrwwt5hCMQQRxEu4Dd11jBiLklS+wLBI8exP/aEVXGwwc6D/zhg2zma4wQ7HuivZF+sMrP4T1RlLMK3ngbMX james@hp-debian
+
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCaElJXdu6YMo7n32jAYXVPW9HKdp2vbE1BcKz+hqZDiIpMtIwmyu+KQD7XHOfkDrDLlsKRRnEvAKAhE+Q5apHdVwnR70h79khBjvRZqXBt+lip3GyGBuymE6+2bAL6zMnZ6oKBvKcPRdIUEbv1jvJV4SfCERGp0/yQxlf/h+yy2pGTEV2iGZerfYBDIyggDzrhjEeiFUL7In4kR2dQId/BPlzZx9AVIdF6CIY7MnfsPBZUkrk9XQrCJOxzN2gGPLdan3DMCp1o2K93dvojl4dQguphanwOdw8wm+wlMiucY/buHgqqbS0EFOz/taQDBFfQ2v7ckht5CFvCY56YpGFaVW7ztf7OYbU6t8LS3dsoN8+rAesBe9fJhJWR/0zFIAM7WW6j95Sppikqh0ZcaQK2/eMKFqn8R7K9zJ20BOI+QYGNnLvIVXlFzRt0/Qy/GczhejpJddwK9zbYxVKJyMNpVz3ZUlr+nRR2UtlcM+lWe7Kbu3HxTPBNk6vSCXPE+Pc= ravago@rjmacAir
+
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSB1Nji8HEfnQX6E2FIfyvolrlihiBKukyLMA5isJhtvdaOXVOrSilpd91wB+ljLOfDy+4XWK4+8p84tvggdfvJdsLRtSBhExJNqe2ViyfUqh0O61lZg4Yvw6DGFEdS+DrqIkZh1v3WQ1Py1Mpt5V18dSu42DttHCigXFnYlpZfx9n4QT6GphkWYE3hLWHQH3uSkujzgkr86WreUi4idRDmq/r21H/MBx4q3uwWuftLS6oX79y7aukpieopeoWN8WUNscwUpUpmAn6rdFe/IHl0u8Zw4wmXoYSIQErFGsOK/rA3nKuK0uvVvUQkVKBZpjMOAAuaLugu9NYsOgqIZf/ filip@Filips-MacBook-Pro.local
+
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILZrP0on5xPZHadvlMN2/+iZzEbIeGpS5MT5hXfb+kP0 uid@pop-os
+
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+t3HrUbZb9P/zWw9+qH9i8HBcCBRO6BS0rYZMz3UuXFSxhLendcqmAJQ+cu1lI0sb2HAyIhv4d5CQrjZ2en/ZyN93DThWNRWA+iH+ybkJhpUiRVSa/e1rz06o//kL+PVoZEHRUpyy7iJgEgcRxnVZd90baMkkm51bF0NSVn+iGCIao76VS+PBGwtQFghJyAvTz+w/hLYmFtXnY2DRfPvbw8H3cehvDhUBY9V+MUwucMU38Yfucr/BguPyw/sRFzRJtpIzP+JFQZzy/Bz4DI06vIWBps6NZqGo+ETXsrlEmNRKXgp3YWMZg/VcT6IIi9NijtDt6gHNDLfxX1Q41cYErBIaUpDKWPpGJ9z+/FxtxxBHwhIQPzSvdxqgsoGKyy7rFfWOPqWHzASsAcdx8V0EPIL4VmJQOd461WRfZxAhDdo2hQe37joRjcHW2i1nJcpVSepMf7EJH/AoYcifp+b+JUpMZx79HmKtE1cua2JcfZ6b20w7rQzLIP3M2WvrDak= deepachainani@Yashs-MBP
diff --git a/scouting/db/db.go b/scouting/db/db.go
index 85e3d98..8f043de 100644
--- a/scouting/db/db.go
+++ b/scouting/db/db.go
@@ -66,6 +66,34 @@
CollectedBy string
}
+type Stats2023 struct {
+ TeamNumber string `gorm:"primaryKey"`
+ MatchNumber int32 `gorm:"primaryKey"`
+ SetNumber int32 `gorm:"primaryKey"`
+ CompLevel string `gorm:"primaryKey"`
+ StartingQuadrant int32
+ LowCubesAuto, MiddleCubesAuto, HighCubesAuto, CubesDroppedAuto int32
+ LowConesAuto, MiddleConesAuto, HighConesAuto, ConesDroppedAuto int32
+ LowCubes, MiddleCubes, HighCubes, CubesDropped int32
+ LowCones, MiddleCones, HighCones, ConesDropped int32
+ AvgCycle int32
+ // The username of the person who collected these statistics.
+ // "unknown" if submitted without logging in.
+ // Empty if the stats have not yet been collected.
+ CollectedBy string
+}
+
+type Action struct {
+ TeamNumber string `gorm:"primaryKey"`
+ MatchNumber int32 `gorm:"primaryKey"`
+ SetNumber int32 `gorm:"primaryKey"`
+ CompLevel string `gorm:"primaryKey"`
+ CompletedAction []byte
+ // This contains a serialized scouting.webserver.requests.ActionType flatbuffer.
+ TimeStamp int32 `gorm:"primaryKey"`
+ CollectedBy string
+}
+
type NotesData struct {
ID uint `gorm:"primaryKey"`
TeamNumber int32
@@ -113,7 +141,7 @@
return nil, errors.New(fmt.Sprint("Failed to connect to postgres: ", err))
}
- err = database.AutoMigrate(&Match{}, &Shift{}, &Stats{}, &NotesData{}, &Ranking{}, &DriverRankingData{})
+ err = database.AutoMigrate(&Match{}, &Shift{}, &Stats{}, &Stats2023{}, &Action{}, &NotesData{}, &Ranking{}, &DriverRankingData{})
if err != nil {
database.Delete()
return nil, errors.New(fmt.Sprint("Failed to create/migrate tables: ", err))
@@ -148,6 +176,13 @@
return result.Error
}
+func (database *Database) AddAction(a Action) error {
+ result := database.Clauses(clause.OnConflict{
+ UpdateAll: true,
+ }).Create(&a)
+ return result.Error
+}
+
func (database *Database) AddToStats(s Stats) error {
matches, err := database.queryMatches(s.TeamNumber)
if err != nil {
@@ -176,6 +211,28 @@
return result.Error
}
+func (database *Database) AddToStats2023(s Stats2023) error {
+ matches, err := database.QueryMatchesString(s.TeamNumber)
+ if err != nil {
+ return err
+ }
+ foundMatch := false
+ for _, match := range matches {
+ if match.MatchNumber == s.MatchNumber {
+ foundMatch = true
+ break
+ }
+ }
+ if !foundMatch {
+ return errors.New(fmt.Sprint(
+ "Failed to find team ", s.TeamNumber,
+ " in match ", s.MatchNumber, " in the schedule."))
+ }
+
+ result := database.Create(&s)
+ return result.Error
+}
+
func (database *Database) AddOrUpdateRankings(r Ranking) error {
result := database.Clauses(clause.OnConflict{
UpdateAll: true,
@@ -207,6 +264,12 @@
return shifts, result.Error
}
+func (database *Database) ReturnActions() ([]Action, error) {
+ var actions []Action
+ result := database.Find(&actions)
+ return actions, result.Error
+}
+
// Packs the stats. This really just consists of taking the individual auto
// ball booleans and turning them into an array. The individual booleans are
// cleared so that they don't affect struct comparisons.
@@ -249,6 +312,14 @@
return matches, result.Error
}
+func (database *Database) QueryMatchesString(teamNumber_ string) ([]Match, error) {
+ var matches []Match
+ result := database.
+ Where("r1 = $1 OR r2 = $1 OR r3 = $1 OR b1 = $1 OR b2 = $1 OR b3 = $1", teamNumber_).
+ Find(&matches)
+ return matches, result.Error
+}
+
func (database *Database) QueryAllShifts(matchNumber_ int) ([]Shift, error) {
var shifts []Shift
result := database.Where("match_number = ?", matchNumber_).Find(&shifts)
@@ -265,6 +336,13 @@
return stats, result.Error
}
+func (database *Database) QueryActions(teamNumber_ int) ([]Action, error) {
+ var actions []Action
+ result := database.
+ Where("team_number = ?", teamNumber_).Find(&actions)
+ return actions, result.Error
+}
+
func (database *Database) QueryNotes(TeamNumber int32) ([]string, error) {
var rawNotes []NotesData
result := database.Where("team_number = ?", TeamNumber).Find(&rawNotes)
diff --git a/scouting/db/db_test.go b/scouting/db/db_test.go
index 47a0a90..9e85651 100644
--- a/scouting/db/db_test.go
+++ b/scouting/db/db_test.go
@@ -623,6 +623,54 @@
}
}
+func TestReturnActionsDB(t *testing.T) {
+ fixture := createDatabase(t)
+ defer fixture.TearDown()
+ correct := []Action{
+ Action{
+ TeamNumber: "1235", MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ CompletedAction: []byte(""), TimeStamp: 0000, CollectedBy: "",
+ },
+ Action{
+ TeamNumber: "1236", MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ CompletedAction: []byte(""), TimeStamp: 0321, CollectedBy: "",
+ },
+ Action{
+ TeamNumber: "1237", MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ CompletedAction: []byte(""), TimeStamp: 0222, CollectedBy: "",
+ },
+ Action{
+ TeamNumber: "1238", MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ CompletedAction: []byte(""), TimeStamp: 0110, CollectedBy: "",
+ },
+ Action{
+ TeamNumber: "1239", MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ CompletedAction: []byte(""), TimeStamp: 0004, CollectedBy: "",
+ },
+ Action{
+ TeamNumber: "1233", MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ CompletedAction: []byte(""), TimeStamp: 0004, CollectedBy: "",
+ },
+ }
+
+ err := fixture.db.AddToMatch(Match{
+ MatchNumber: 94, SetNumber: 1, CompLevel: "quals",
+ R1: 1235, R2: 1236, R3: 1237, B1: 1238, B2: 1239, B3: 1233})
+ check(t, err, "Failed to add match")
+
+ for i := 0; i < len(correct); i++ {
+ err = fixture.db.AddAction(correct[i])
+ check(t, err, fmt.Sprint("Failed to add to actions ", i))
+ }
+
+ got, err := fixture.db.ReturnActions()
+ check(t, err, "Failed ReturnActions()")
+
+ if !reflect.DeepEqual(correct, got) {
+ t.Errorf("Got %#v,\nbut expected %#v.", got, correct)
+ }
+}
+
func TestRankingsDbUpdate(t *testing.T) {
fixture := createDatabase(t)
defer fixture.TearDown()
diff --git a/scouting/webserver/requests/BUILD b/scouting/webserver/requests/BUILD
index 8cc59a9..9eb207d 100644
--- a/scouting/webserver/requests/BUILD
+++ b/scouting/webserver/requests/BUILD
@@ -24,6 +24,8 @@
"//scouting/webserver/requests/messages:request_notes_for_team_response_go_fbs",
"//scouting/webserver/requests/messages:request_shift_schedule_go_fbs",
"//scouting/webserver/requests/messages:request_shift_schedule_response_go_fbs",
+ "//scouting/webserver/requests/messages:submit_actions_go_fbs",
+ "//scouting/webserver/requests/messages:submit_actions_response_go_fbs",
"//scouting/webserver/requests/messages:submit_data_scouting_go_fbs",
"//scouting/webserver/requests/messages:submit_data_scouting_response_go_fbs",
"//scouting/webserver/requests/messages:submit_driver_ranking_go_fbs",
diff --git a/scouting/webserver/requests/messages/BUILD b/scouting/webserver/requests/messages/BUILD
index 57b7ae3..2e118d5 100644
--- a/scouting/webserver/requests/messages/BUILD
+++ b/scouting/webserver/requests/messages/BUILD
@@ -25,6 +25,8 @@
"submit_shift_schedule_response",
"submit_driver_ranking",
"submit_driver_ranking_response",
+ "submit_actions",
+ "submit_actions_response",
)
filegroup(
diff --git a/scouting/webserver/requests/messages/request_data_scouting_response.fbs b/scouting/webserver/requests/messages/request_data_scouting_response.fbs
index 071a848..6048210 100644
--- a/scouting/webserver/requests/messages/request_data_scouting_response.fbs
+++ b/scouting/webserver/requests/messages/request_data_scouting_response.fbs
@@ -49,4 +49,4 @@
stats_list:[Stats] (id:0);
}
-root_type RequestDataScoutingResponse;
+root_type RequestDataScoutingResponse;
\ No newline at end of file
diff --git a/scouting/webserver/requests/messages/submit_actions.fbs b/scouting/webserver/requests/messages/submit_actions.fbs
new file mode 100644
index 0000000..9d9efa4
--- /dev/null
+++ b/scouting/webserver/requests/messages/submit_actions.fbs
@@ -0,0 +1,53 @@
+namespace scouting.webserver.requests;
+
+table StartMatchAction {
+ position:int (id:0);
+}
+
+enum ObjectType: short {
+ kCube,
+ kCone
+}
+
+enum ScoreLevel: short {
+ kLow,
+ kMiddle,
+ kHigh
+}
+
+table PickupObjectAction {
+ object_type:ObjectType (id:0);
+ auto:bool (id:1);
+}
+
+table PlaceObjectAction {
+ object_type:ObjectType (id:0);
+ score_level:ScoreLevel (id:1);
+ auto:bool (id:2);
+}
+
+table RobotDeathAction {
+ robot_on:bool (id:0);
+}
+
+table EndMatchAction {
+}
+
+union ActionType {
+ StartMatchAction,
+ PickupObjectAction,
+ PlaceObjectAction,
+ RobotDeathAction,
+ EndMatchAction
+}
+
+table Action {
+ timestamp:int (id:0);
+ action_taken:ActionType (id:2);
+}
+
+table SubmitActions {
+ actions_list:[Action] (id:0);
+}
+
+root_type SubmitActions;
\ No newline at end of file
diff --git a/scouting/webserver/requests/messages/submit_actions_response.fbs b/scouting/webserver/requests/messages/submit_actions_response.fbs
new file mode 100644
index 0000000..4079914
--- /dev/null
+++ b/scouting/webserver/requests/messages/submit_actions_response.fbs
@@ -0,0 +1,7 @@
+namespace scouting.webserver.requests;
+
+table SubmitActionsResponse {
+ // empty response
+}
+
+root_type SubmitActionsResponse;
\ No newline at end of file
diff --git a/scouting/webserver/requests/requests.go b/scouting/webserver/requests/requests.go
index 76a26de..99b5459 100644
--- a/scouting/webserver/requests/requests.go
+++ b/scouting/webserver/requests/requests.go
@@ -27,6 +27,8 @@
"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_notes_for_team_response"
"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule"
"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/request_shift_schedule_response"
+ "github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_actions"
+ "github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_actions_response"
"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting"
"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_data_scouting_response"
"github.com/frc971/971-Robot-Code/scouting/webserver/requests/messages/submit_driver_ranking"
@@ -61,6 +63,8 @@
type SubmitShiftScheduleResponseT = submit_shift_schedule_response.SubmitShiftScheduleResponseT
type SubmitDriverRanking = submit_driver_ranking.SubmitDriverRanking
type SubmitDriverRankingResponseT = submit_driver_ranking_response.SubmitDriverRankingResponseT
+type SubmitActions = submit_actions.SubmitActions
+type SubmitActionsResponseT = submit_actions_response.SubmitActionsResponseT
// The interface we expect the database abstraction to conform to.
// We use an interface here because it makes unit testing easier.
diff --git a/tools/build_rules/jinja2_generator.py b/tools/build_rules/jinja2_generator.py
index e5f6fa3..3575e99 100644
--- a/tools/build_rules/jinja2_generator.py
+++ b/tools/build_rules/jinja2_generator.py
@@ -22,7 +22,8 @@
args = parser.parse_args(sys.argv[1:])
with open(args.template, 'r') as input_file:
- template = jinja2.Template(input_file.read())
+ template = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(".")).from_string(input_file.read())
output = template.render(args.replacements)
with open(args.output, 'w') as config_file:
diff --git a/tools/build_rules/template.bzl b/tools/build_rules/template.bzl
index d4807e8..0b9167e 100644
--- a/tools/build_rules/template.bzl
+++ b/tools/build_rules/template.bzl
@@ -2,7 +2,7 @@
out = ctx.actions.declare_file(ctx.attr.name)
ctx.actions.run_shell(
- inputs = ctx.files.src,
+ inputs = ctx.files.src + ctx.files.includes,
tools = [ctx.executable._jinja2],
progress_message = "Generating " + out.short_path,
outputs = [out],
@@ -22,6 +22,10 @@
mandatory = True,
doc = """The parameters to supply to Jinja2.""",
),
+ "includes": attr.label_list(
+ allow_files = True,
+ doc = """Files which are included by the template.""",
+ ),
"_jinja2": attr.label(
default = "//tools/build_rules:jinja2_generator",
cfg = "host",
diff --git a/y2020/control_loops/superstructure/turret/aiming.cc b/y2020/control_loops/superstructure/turret/aiming.cc
index 4976efa..0b2c5f3 100644
--- a/y2020/control_loops/superstructure/turret/aiming.cc
+++ b/y2020/control_loops/superstructure/turret/aiming.cc
@@ -9,9 +9,9 @@
namespace turret {
using frc971::control_loops::Pose;
-using frc971::control_loops::aiming::TurretGoal;
-using frc971::control_loops::aiming::ShotConfig;
using frc971::control_loops::aiming::RobotState;
+using frc971::control_loops::aiming::ShotConfig;
+using frc971::control_loops::aiming::TurretGoal;
// Shooting-on-the-fly concept:
// The current way that we manage shooting-on-the fly endeavors to be reasonably
@@ -129,7 +129,9 @@
return target;
}
-Aimer::Aimer() : goal_(MakePrefilledGoal()) {}
+Aimer::Aimer()
+ : goal_(MakePrefilledGoal()),
+ Tlr_to_la_(drivetrain::GetDrivetrainConfig().Tlr_to_la()) {}
void Aimer::Update(const Status *status, aos::Alliance alliance,
WrapMode wrap_mode, ShotMode shot_mode) {
@@ -145,9 +147,8 @@
// robot. All of this would be helped by just doing this work in the Localizer
// itself.
const Eigen::Vector2d linear_angular =
- drivetrain::GetDrivetrainConfig().Tlr_to_la() *
- Eigen::Vector2d(status->localizer()->left_velocity(),
- status->localizer()->right_velocity());
+ Tlr_to_la_ * Eigen::Vector2d(status->localizer()->left_velocity(),
+ status->localizer()->right_velocity());
const double xdot = linear_angular(0) * std::cos(status->theta());
const double ydot = linear_angular(0) * std::sin(status->theta());
diff --git a/y2020/control_loops/superstructure/turret/aiming.h b/y2020/control_loops/superstructure/turret/aiming.h
index 217085c..f38113d 100644
--- a/y2020/control_loops/superstructure/turret/aiming.h
+++ b/y2020/control_loops/superstructure/turret/aiming.h
@@ -64,6 +64,8 @@
// Real-world distance to the target.
double target_distance_ = 0.0; // meters
double inner_port_angle_ = 0.0; // radians
+
+ Eigen::Matrix<double, 2, 2> Tlr_to_la_;
};
} // namespace turret
diff --git a/y2020/control_loops/superstructure/turret/aiming_test.cc b/y2020/control_loops/superstructure/turret/aiming_test.cc
index a01f47d..fe71e31 100644
--- a/y2020/control_loops/superstructure/turret/aiming_test.cc
+++ b/y2020/control_loops/superstructure/turret/aiming_test.cc
@@ -12,12 +12,24 @@
namespace turret {
namespace testing {
+class TeamNumberEnvironment : public ::testing::Environment {
+ public:
+ ~TeamNumberEnvironment() override {}
+
+ // Override this to define how to set up the environment.
+ void SetUp() override { aos::network::OverrideTeamNumber(971); }
+
+ // Override this to define how to tear down the environment.
+ void TearDown() override {}
+};
+
+::testing::Environment *const team_number_env =
+ ::testing::AddGlobalTestEnvironment(new TeamNumberEnvironment);
+
using frc971::control_loops::Pose;
class AimerTest : public ::testing::Test {
public:
- AimerTest() { aos::network::OverrideTeamNumber(971); }
-
typedef Aimer::Goal Goal;
typedef Aimer::Status Status;
struct StatusData {
diff --git a/y2023/BUILD b/y2023/BUILD
index 20092eb..eb265a4 100644
--- a/y2023/BUILD
+++ b/y2023/BUILD
@@ -267,3 +267,15 @@
],
target_compatible_with = ["@platforms//os:linux"],
)
+
+cc_binary(
+ name = "ssd_profiler",
+ srcs = [
+ "ssd_profiler.cc",
+ ],
+ deps = [
+ "//aos:init",
+ "//aos/time",
+ "@com_github_google_glog//:glog",
+ ],
+)
diff --git a/y2023/control_loops/python/graph_edit.py b/y2023/control_loops/python/graph_edit.py
index 7caf8bc..39cd814 100644
--- a/y2023/control_loops/python/graph_edit.py
+++ b/y2023/control_loops/python/graph_edit.py
@@ -14,7 +14,7 @@
from graph_tools import XYSegment, AngleSegment, to_theta, to_xy, alpha_blend, draw_lines
from graph_tools import back_to_xy_loop, subdivide_theta, to_theta_loop, px
from graph_tools import l1, l2, joint_center
-from graph_paths import segments
+import graph_paths
from frc971.control_loops.python.basic_window import quit_main_loop, set_color
@@ -206,9 +206,6 @@
event.x = o_x
event.y = o_y
- def do_button_press(self, event):
- pass
-
def _do_configure(self, event):
self.window_shape = (event.width, event.height)
@@ -257,7 +254,32 @@
self.extents_y_max - self.extents_y_min)
cr.fill()
- if not self.theta_version:
+ if self.theta_version:
+ # Draw a filled white rectangle.
+ set_color(cr, palette["WHITE"])
+ cr.rectangle(-numpy.pi, -numpy.pi, numpy.pi * 2.0, numpy.pi * 2.0)
+ cr.fill()
+
+ set_color(cr, palette["BLUE"])
+ for i in range(-6, 6):
+ cr.move_to(-40, -40 + i * numpy.pi)
+ cr.line_to(40, 40 + i * numpy.pi)
+ with px(cr):
+ cr.stroke()
+
+ set_color(cr, Color(0.5, 0.5, 1.0))
+ draw_lines(cr, lines_theta)
+
+ set_color(cr, Color(0.0, 1.0, 0.2))
+ cr.move_to(self.last_pos[0], self.last_pos[1])
+ draw_px_cross(cr, 5)
+
+ c_pt, dist = closest_segment(lines_theta, self.last_pos)
+ print("dist:", dist, c_pt, self.last_pos)
+ set_color(cr, palette["CYAN"])
+ cr.move_to(c_pt[0], c_pt[1])
+ draw_px_cross(cr, 5)
+ else:
# Draw a filled white rectangle.
set_color(cr, palette["WHITE"])
cr.rectangle(-2.0, -2.0, 4.0, 4.0)
@@ -272,24 +294,7 @@
2.0 * numpy.pi)
with px(cr):
cr.stroke()
- else:
- # Draw a filled white rectangle.
- set_color(cr, palette["WHITE"])
- cr.rectangle(-numpy.pi, -numpy.pi, numpy.pi * 2.0, numpy.pi * 2.0)
- cr.fill()
- if self.theta_version:
- set_color(cr, palette["BLUE"])
- for i in range(-6, 6):
- cr.move_to(-40, -40 + i * numpy.pi)
- cr.line_to(40, 40 + i * numpy.pi)
- with px(cr):
- cr.stroke()
-
- if self.theta_version:
- set_color(cr, Color(0.5, 0.5, 1.0))
- draw_lines(cr, lines_theta)
- else:
set_color(cr, Color(0.5, 1.0, 1.0))
draw_lines(cr, lines1)
draw_lines(cr, lines2)
@@ -320,7 +325,6 @@
with px(cr):
cr.stroke()
- if not self.theta_version:
theta1, theta2 = to_theta(self.last_pos,
self.circular_index_select)
x, y = joint_center[0], joint_center[1]
@@ -339,17 +343,6 @@
set_color(cr, Color(0.0, 1.0, 0.2))
draw_px_cross(cr, 20)
- if self.theta_version:
- set_color(cr, Color(0.0, 1.0, 0.2))
- cr.move_to(self.last_pos[0], self.last_pos[1])
- draw_px_cross(cr, 5)
-
- c_pt, dist = closest_segment(lines_theta, self.last_pos)
- print("dist:", dist, c_pt, self.last_pos)
- set_color(cr, palette["CYAN"])
- cr.move_to(c_pt[0], c_pt[1])
- draw_px_cross(cr, 5)
-
set_color(cr, Color(0.0, 0.5, 1.0))
for segment in self.segments:
color = [0, random.random(), 1]
@@ -478,5 +471,5 @@
silly = Silly()
-silly.segments = segments
+silly.segments = graph_paths.segments
basic_window.RunApp()
diff --git a/y2023/control_loops/python/graph_paths.py b/y2023/control_loops/python/graph_paths.py
index 26488bd..da0ad4f 100644
--- a/y2023/control_loops/python/graph_paths.py
+++ b/y2023/control_loops/python/graph_paths.py
@@ -2,273 +2,25 @@
from graph_tools import *
-tall_box_x = 0.411
-tall_box_y = 0.125
+neutral = to_theta_with_circular_index(-0.2, 0.33, circular_index=-1)
+zero = to_theta_with_circular_index(0.0, 0.0, circular_index=-1)
-short_box_x = 0.431
-short_box_y = 0.082
+neutral_to_cone_1 = to_theta_with_circular_index(0.0, 0.7, circular_index=-1)
+neutral_to_cone_2 = to_theta_with_circular_index(0.2, 0.5, circular_index=-1)
+cone_pos = to_theta_with_circular_index(1.0, 0.4, circular_index=-1)
-ready_above_box = to_theta_with_circular_index(tall_box_x,
- tall_box_y + 0.08,
- circular_index=-1)
-tall_box_grab = to_theta_with_circular_index(tall_box_x,
- tall_box_y,
- circular_index=-1)
-short_box_grab = to_theta_with_circular_index(short_box_x,
- short_box_y,
- circular_index=-1)
+neutral_to_cone_perch_pos_1 = to_theta_with_circular_index(0.4,
+ 1.0,
+ circular_index=-1)
+neutral_to_cone_perch_pos_2 = to_theta_with_circular_index(0.7,
+ 1.5,
+ circular_index=-1)
+cone_perch_pos = to_theta_with_circular_index(1.0, 2.0, circular_index=-1)
-# TODO(austin): Drive the front/back off the same numbers a bit better.
-front_high_box = to_theta_with_circular_index(0.378, 2.46, circular_index=-1)
-front_middle3_box = to_theta_with_circular_index(0.700,
- 2.125,
- circular_index=-1.000000)
-front_middle2_box = to_theta_with_circular_index(0.700,
- 2.268,
- circular_index=-1)
-front_middle1_box = to_theta_with_circular_index(0.800,
- 1.915,
- circular_index=-1)
-front_low_box = to_theta_with_circular_index(0.87, 1.572, circular_index=-1)
-back_high_box = to_theta_with_circular_index(-0.75, 2.48, circular_index=0)
-back_middle2_box = to_theta_with_circular_index(-0.700, 2.27, circular_index=0)
-back_middle1_box = to_theta_with_circular_index(-0.800, 1.93, circular_index=0)
-back_low_box = to_theta_with_circular_index(-0.87, 1.64, circular_index=0)
-
-back_extra_low_box = to_theta_with_circular_index(-0.87,
- 1.52,
- circular_index=0)
-
-front_switch = to_theta_with_circular_index(0.88, 0.967, circular_index=-1)
-back_switch = to_theta_with_circular_index(-0.88, 0.967, circular_index=-2)
-
-neutral = to_theta_with_circular_index(0.0, 0.33, circular_index=-1)
-
-up = to_theta_with_circular_index(0.0, 2.547, circular_index=-1)
-
-front_switch_auto = to_theta_with_circular_index(0.750,
- 2.20,
- circular_index=-1.000000)
-
-duck = numpy.array([numpy.pi / 2.0 - 0.92, numpy.pi / 2.0 - 4.26])
-
-starting = numpy.array([numpy.pi / 2.0 - 0.593329, numpy.pi / 2.0 - 3.749631])
-vertical_starting = numpy.array([numpy.pi / 2.0, -numpy.pi / 2.0])
-
-self_hang = numpy.array([numpy.pi / 2.0 - 0.191611, numpy.pi / 2.0])
-partner_hang = numpy.array([numpy.pi / 2.0 - (-0.30), numpy.pi / 2.0])
-
-above_hang = numpy.array([numpy.pi / 2.0 - 0.14, numpy.pi / 2.0 - (-0.165)])
-below_hang = numpy.array([numpy.pi / 2.0 - 0.39, numpy.pi / 2.0 - (-0.517)])
-
-up_c1 = to_theta((0.63, 1.17), circular_index=-1)
-up_c2 = to_theta((0.65, 1.62), circular_index=-1)
-
-front_high_box_c1 = to_theta((0.63, 1.04), circular_index=-1)
-front_high_box_c2 = to_theta((0.50, 1.60), circular_index=-1)
-
-front_middle2_box_c1 = to_theta((0.41, 0.83), circular_index=-1)
-front_middle2_box_c2 = to_theta((0.52, 1.30), circular_index=-1)
-
-front_middle1_box_c1 = to_theta((0.34, 0.82), circular_index=-1)
-front_middle1_box_c2 = to_theta((0.48, 1.15), circular_index=-1)
-
-#c1: (1.421433, -1.070254)
-#c2: (1.434384, -1.057803
-ready_above_box_c1 = numpy.array([1.480802, -1.081218])
-ready_above_box_c2 = numpy.array([1.391449, -1.060331])
-
-front_switch_c1 = numpy.array([1.903841, -0.622351])
-front_switch_c2 = numpy.array([1.903841, -0.622351])
-
-sparse_front_points = [
- (front_high_box, "FrontHighBox"),
- (front_middle2_box, "FrontMiddle2Box"),
- (front_middle3_box, "FrontMiddle3Box"),
- (front_middle1_box, "FrontMiddle1Box"),
- (front_low_box, "FrontLowBox"),
- (front_switch, "FrontSwitch"),
-] # yapf: disable
-
-sparse_back_points = [
- (back_high_box, "BackHighBox"),
- (back_middle2_box, "BackMiddle2Box"),
- (back_middle1_box, "BackMiddle1Box"),
- (back_low_box, "BackLowBox"),
- (back_extra_low_box, "BackExtraLowBox"),
-] # yapf: disable
-
-front_points, front_paths = expand_points(sparse_front_points, 0.06)
-back_points, back_paths = expand_points(sparse_back_points, 0.06)
-
-points = [(ready_above_box, "ReadyAboveBox"),
- (tall_box_grab, "TallBoxGrab"),
- (short_box_grab, "ShortBoxGrab"),
- (back_switch, "BackSwitch"),
- (neutral, "Neutral"),
- (up, "Up"),
- (above_hang, "AboveHang"),
- (below_hang, "BelowHang"),
- (self_hang, "SelfHang"),
- (partner_hang, "PartnerHang"),
- (front_switch_auto, "FrontSwitchAuto"),
- (starting, "Starting"),
- (duck, "Duck"),
- (vertical_starting, "VerticalStarting"),
-] + front_points + back_points # yapf: disable
-
-duck_c1 = numpy.array([1.337111, -1.721008])
-duck_c2 = numpy.array([1.283701, -1.795519])
-
-ready_to_up_c1 = numpy.array([1.792962, 0.198329])
-ready_to_up_c2 = numpy.array([1.792962, 0.198329])
-
-front_switch_auto_c1 = numpy.array([1.792857, -0.372768])
-front_switch_auto_c2 = numpy.array([1.861885, -0.273664])
-
-# We need to define critical points so we can create paths connecting them.
-# TODO(austin): Attach velocities to the slow ones.
-ready_to_back_low_c1 = numpy.array([2.524325, 0.046417])
-
-neutral_to_back_low_c1 = numpy.array([2.381942, -0.070220])
-
-tall_to_back_low_c1 = numpy.array([2.603918, 0.088298])
-tall_to_back_low_c2 = numpy.array([1.605624, 1.003434])
-
-tall_to_back_high_c2 = numpy.array([1.508610, 0.946147])
-
-# If true, only plot the first named segment
-isolate = False
-
-long_alpha_unitizer = numpy.matrix([[1.0 / 17.0, 0.0], [0.0, 1.0 / 17.0]])
-
-neutral_to_back_c1 = numpy.array([0.702527, -2.618276])
-neutral_to_back_c2 = numpy.array([0.526914, -3.109691])
-
-named_segments = [
- ThetaSplineSegment(neutral, neutral_to_back_c1, neutral_to_back_c2,
- back_switch, "BackSwitch"),
- ThetaSplineSegment(neutral,
- neutral_to_back_low_c1,
- tall_to_back_high_c2,
- back_high_box,
- "NeutralBoxToHigh",
- alpha_unitizer=long_alpha_unitizer),
- ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_high_c2,
- back_middle2_box, "NeutralBoxToMiddle2",
- long_alpha_unitizer),
- ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_low_c2,
- back_middle1_box, "NeutralBoxToMiddle1",
- long_alpha_unitizer),
- ThetaSplineSegment(neutral, neutral_to_back_low_c1, tall_to_back_low_c2,
- back_low_box, "NeutralBoxToLow", long_alpha_unitizer),
- ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
- tall_to_back_high_c2, back_high_box, "ReadyBoxToHigh",
- long_alpha_unitizer),
- ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
- tall_to_back_high_c2, back_middle2_box,
- "ReadyBoxToMiddle2", long_alpha_unitizer),
- ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
- tall_to_back_low_c2, back_middle1_box,
- "ReadyBoxToMiddle1", long_alpha_unitizer),
- ThetaSplineSegment(ready_above_box, ready_to_back_low_c1,
- tall_to_back_low_c2, back_low_box, "ReadyBoxToLow",
- long_alpha_unitizer),
- ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
- tall_to_back_high_c2, back_high_box, "ShortBoxToHigh",
- long_alpha_unitizer),
- ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
- tall_to_back_high_c2, back_middle2_box,
- "ShortBoxToMiddle2", long_alpha_unitizer),
- ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
- tall_to_back_low_c2, back_middle1_box,
- "ShortBoxToMiddle1", long_alpha_unitizer),
- ThetaSplineSegment(short_box_grab, tall_to_back_low_c1,
- tall_to_back_low_c2, back_low_box, "ShortBoxToLow",
- long_alpha_unitizer),
- ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1,
- tall_to_back_high_c2, back_high_box, "TallBoxToHigh",
- long_alpha_unitizer),
- ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1,
- tall_to_back_high_c2, back_middle2_box,
- "TallBoxToMiddle2", long_alpha_unitizer),
- ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_low_c2,
- back_middle1_box, "TallBoxToMiddle1",
- long_alpha_unitizer),
- ThetaSplineSegment(tall_box_grab, tall_to_back_low_c1, tall_to_back_low_c2,
- back_low_box, "TallBoxToLow", long_alpha_unitizer),
- SplineSegment(neutral, ready_above_box_c1, ready_above_box_c2,
- ready_above_box, "ReadyToNeutral"),
- XYSegment(ready_above_box, tall_box_grab, "ReadyToTallBox", vmax=6.0),
- XYSegment(ready_above_box, short_box_grab, "ReadyToShortBox", vmax=6.0),
- XYSegment(tall_box_grab, short_box_grab, "TallToShortBox", vmax=6.0),
- SplineSegment(neutral, ready_above_box_c1, ready_above_box_c2,
- tall_box_grab, "TallToNeutral"),
- SplineSegment(neutral, ready_above_box_c1, ready_above_box_c2,
- short_box_grab, "ShortToNeutral"),
- SplineSegment(neutral, up_c1, up_c2, up, "NeutralToUp"),
- SplineSegment(neutral, front_high_box_c1, front_high_box_c2,
- front_high_box, "NeutralToFrontHigh"),
- SplineSegment(neutral, front_middle2_box_c1, front_middle2_box_c2,
- front_middle2_box, "NeutralToFrontMiddle2"),
- SplineSegment(neutral, front_middle1_box_c1, front_middle1_box_c2,
- front_middle1_box, "NeutralToFrontMiddle1"),
+segments = [
+ ThetaSplineSegment(neutral, neutral_to_cone_1, neutral_to_cone_2, cone_pos,
+ "NeutralToCone"),
+ ThetaSplineSegment(neutral, neutral_to_cone_perch_pos_1,
+ neutral_to_cone_perch_pos_2, cone_perch_pos,
+ "NeutralToPerchedCone"),
]
-
-unnamed_segments = [
- SplineSegment(neutral, front_switch_auto_c1, front_switch_auto_c2,
- front_switch_auto),
- SplineSegment(tall_box_grab, ready_to_up_c1, ready_to_up_c2, up),
- SplineSegment(short_box_grab, ready_to_up_c1, ready_to_up_c2, up),
- SplineSegment(ready_above_box, ready_to_up_c1, ready_to_up_c2, up),
- ThetaSplineSegment(duck, duck_c1, duck_c2, neutral),
- SplineSegment(neutral, front_switch_c1, front_switch_c2, front_switch),
- XYSegment(ready_above_box, front_low_box),
- XYSegment(ready_above_box, front_switch),
- XYSegment(ready_above_box, front_middle1_box),
- XYSegment(ready_above_box, front_middle2_box),
- XYSegment(ready_above_box, front_middle3_box),
- SplineSegment(ready_above_box, ready_to_up_c1, ready_to_up_c2,
- front_high_box),
- AngleSegment(starting, vertical_starting),
- AngleSegment(vertical_starting, neutral),
- XYSegment(neutral, front_low_box),
- XYSegment(up, front_high_box),
- XYSegment(up, front_middle2_box),
- XYSegment(front_middle3_box, up),
- XYSegment(front_middle3_box, front_high_box),
- XYSegment(front_middle3_box, front_middle2_box),
- XYSegment(front_middle3_box, front_middle1_box),
- XYSegment(up, front_middle1_box),
- XYSegment(up, front_low_box),
- XYSegment(front_high_box, front_middle2_box),
- XYSegment(front_high_box, front_middle1_box),
- XYSegment(front_high_box, front_low_box),
- XYSegment(front_middle2_box, front_middle1_box),
- XYSegment(front_middle2_box, front_low_box),
- XYSegment(front_middle1_box, front_low_box),
- XYSegment(front_switch, front_low_box),
- XYSegment(front_switch, up),
- XYSegment(front_switch, front_high_box),
- AngleSegment(up, back_high_box),
- AngleSegment(up, back_middle2_box),
- AngleSegment(up, back_middle1_box),
- AngleSegment(up, back_low_box),
- XYSegment(back_high_box, back_middle2_box),
- XYSegment(back_high_box, back_middle1_box),
- XYSegment(back_high_box, back_low_box),
- XYSegment(back_middle2_box, back_middle1_box),
- XYSegment(back_middle2_box, back_low_box),
- XYSegment(back_middle1_box, back_low_box),
- AngleSegment(up, above_hang),
- AngleSegment(above_hang, below_hang),
- AngleSegment(up, below_hang),
- AngleSegment(up, self_hang),
- AngleSegment(up, partner_hang),
-] + front_paths + back_paths
-
-segments = []
-if isolate:
- segments += named_segments[:isolate]
-else:
- segments += named_segments + unnamed_segments
\ No newline at end of file
diff --git a/y2023/ssd_profiler.cc b/y2023/ssd_profiler.cc
new file mode 100644
index 0000000..f5f24f5
--- /dev/null
+++ b/y2023/ssd_profiler.cc
@@ -0,0 +1,101 @@
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <chrono>
+
+#include "aos/init.h"
+#include "aos/realtime.h"
+#include "aos/time/time.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+
+namespace chrono = std::chrono;
+
+DEFINE_string(file, "/media/sda1/foo", "File to write to.");
+
+DEFINE_uint32(write_size, 4096, "Size of hunk to write");
+DEFINE_bool(sync, false, "If true, sync the file after each written block.");
+DEFINE_bool(direct, false, "If true, O_DIRECT.");
+
+int main(int argc, char ** argv) {
+ aos::InitGoogle(&argc, &argv);
+
+ std::vector<uint8_t> data;
+
+ // We want uncompressible data. The easiest way to do this is to grab a good
+ // sized block from /dev/random, and then reuse it.
+ {
+ int random_fd = open("/dev/random", O_RDONLY | O_CLOEXEC);
+ PCHECK(random_fd != -1) << ": Failed to open /dev/random";
+ data.resize(FLAGS_write_size);
+ size_t written = 0;
+ while (written < data.size()) {
+ const size_t result =
+ read(random_fd, data.data() + written, data.size() - written);
+ PCHECK(result > 0);
+ written += result;
+ }
+
+ PCHECK(close(random_fd) == 0);
+ }
+
+ int fd = open(
+ FLAGS_file.c_str(),
+ O_RDWR | O_CLOEXEC | O_CREAT | O_EXCL | (FLAGS_direct ? O_DIRECT : 0),
+ 0774);
+ PCHECK(fd != -1);
+
+ const aos::monotonic_clock::time_point start_time =
+ aos::monotonic_clock::now();
+ aos::monotonic_clock::time_point last_time =
+ start_time;
+ size_t last_written_data = 0;
+ size_t written_data = 0;
+
+ while (true) {
+ PCHECK(write(fd, data.data(), data.size()) ==
+ static_cast<ssize_t>(data.size()))
+ << ": Failed after "
+ << chrono::duration<double>(aos::monotonic_clock::now() - start_time)
+ .count();
+
+ // Trigger a flush if asked.
+ if (FLAGS_sync) {
+ const aos::monotonic_clock::time_point monotonic_now =
+ aos::monotonic_clock::now();
+ sync_file_range(fd, written_data, data.size(), SYNC_FILE_RANGE_WRITE);
+
+ // Now, blocking flush the previous page so we don't get too far ahead.
+ // This is Linus' recommendation.
+ if (written_data > 0) {
+ sync_file_range(fd, written_data - data.size(), data.size(),
+ SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE |
+ SYNC_FILE_RANGE_WAIT_AFTER);
+ posix_fadvise(fd, written_data - data.size(), data.size(),
+ POSIX_FADV_DONTNEED);
+ }
+ VLOG(1) << "Took "
+ << chrono::duration<double>(aos::monotonic_clock::now() -
+ monotonic_now)
+ .count();
+ }
+
+ written_data += data.size();
+
+ const aos::monotonic_clock::time_point monotonic_now =
+ aos::monotonic_clock::now();
+ // Print out MB/s once it has been at least 1 second since last time.
+ if (monotonic_now > last_time + chrono::seconds(1)) {
+ LOG(INFO)
+ << ((written_data - last_written_data) /
+ chrono::duration<double>(monotonic_now - last_time).count() /
+ 1024. / 1024.)
+ << " MB/s";
+ last_time = monotonic_now;
+ last_written_data = written_data;
+ }
+ }
+
+ return 0;
+}
diff --git a/y2023/vision/aprilrobotics.cc b/y2023/vision/aprilrobotics.cc
index fbbc01d..23c5ada 100644
--- a/y2023/vision/aprilrobotics.cc
+++ b/y2023/vision/aprilrobotics.cc
@@ -9,15 +9,19 @@
namespace y2023 {
namespace vision {
+namespace chrono = std::chrono;
+
AprilRoboticsDetector::AprilRoboticsDetector(aos::EventLoop *event_loop,
std::string_view channel_name)
: calibration_data_(CalibrationData()),
ftrace_(),
- image_callback_(event_loop, channel_name,
- [&](cv::Mat image_color_mat,
- const aos::monotonic_clock::time_point eof) {
- HandleImage(image_color_mat, eof);
- }),
+ image_callback_(
+ event_loop, channel_name,
+ [&](cv::Mat image_color_mat,
+ const aos::monotonic_clock::time_point eof) {
+ HandleImage(image_color_mat, eof);
+ },
+ chrono::milliseconds(5)),
target_map_sender_(
event_loop->MakeSender<frc971::vision::TargetMap>("/camera")),
april_debug_sender_(
@@ -157,8 +161,8 @@
aos::monotonic_clock::now();
VLOG(1) << "Took "
- << std::chrono::duration<double>(after_pose_estimation -
- before_pose_estimation)
+ << chrono::duration<double>(after_pose_estimation -
+ before_pose_estimation)
.count()
<< " seconds for pose estimation";
@@ -196,8 +200,7 @@
timeprofile_display(tag_detector_->tp);
- VLOG(1) << "Took "
- << std::chrono::duration<double>(end_time - start_time).count()
+ VLOG(1) << "Took " << chrono::duration<double>(end_time - start_time).count()
<< " seconds to detect overall";
return results;
diff --git a/y2023/y2023_pi_template.json b/y2023/y2023_pi_template.json
index 881af55..05aaa96 100644
--- a/y2023/y2023_pi_template.json
+++ b/y2023/y2023_pi_template.json
@@ -369,6 +369,15 @@
"nodes": [
"pi{{ NUM }}"
]
+ },
+ {
+ "name": "aprilrobotics",
+ "executable_name": "aprilrobotics",
+ "args": ["--enable_ftrace"],
+ "user": "pi",
+ "nodes": [
+ "pi{{ NUM }}"
+ ]
}
],
"maps": [