Merge changes I47c31568,I1afbf524,I1a6ec295,I08d88554,I5af44085, ...
* changes:
Allow us to upgrade Rust version to 1.79.0
Fix ability to subclass LogReader
Refactor LogReader to allow sub-classing
Improve documentation for AlignedVectorAllocator
Reduce memory usage of the static flatbuffer API
Split SharedSpan out into a header
Pipe through exit statuses in AOS Run() methods
Force static flatbuffer memory to be aligned
Remove okay Status, rename Status->Error
Don't run status_test malloc tests on asan/msan
Prevent creation of watchers/fetchers with non-raw flatbuffers
Explicit instantiation of a function template
Remove shm_event_loop dependency from log_reader
Change LogReader API to be able to replace messages
diff --git a/aos/BUILD b/aos/BUILD
index de4ef4f..83adcd6 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -509,6 +509,7 @@
deps = [
"//aos:macros",
"//aos/containers:resizeable_buffer",
+ "//aos/ipc_lib:data_alignment",
"//aos/util:file",
"@com_github_google_flatbuffers//:flatbuffers",
"@com_github_google_glog//:glog",
@@ -849,3 +850,12 @@
"//aos/events:simulated_event_loop_rs",
],
)
+
+cc_library(
+ name = "shared_span",
+ hdrs = ["shared_span.h"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_google_absl//absl/types:span",
+ ],
+)
diff --git a/aos/containers/resizeable_buffer.h b/aos/containers/resizeable_buffer.h
index 484d93e..664fa2f 100644
--- a/aos/containers/resizeable_buffer.h
+++ b/aos/containers/resizeable_buffer.h
@@ -55,6 +55,8 @@
size_t size() const { return size_; }
size_t capacity() const { return capacity_; }
+ bool empty() const { return size_ == 0; }
+
void reserve(size_t new_size) {
if (new_size > capacity_) {
Allocate(new_size);
diff --git a/aos/events/BUILD b/aos/events/BUILD
index d799000..c61b3e5 100644
--- a/aos/events/BUILD
+++ b/aos/events/BUILD
@@ -116,13 +116,16 @@
"//aos:flatbuffers",
"//aos:ftrace",
"//aos:realtime",
+ "//aos:shared_span",
"//aos:uuid",
"//aos/flatbuffers:builder",
"//aos/ipc_lib:data_alignment",
"//aos/logging",
"//aos/time",
"//aos/util:phased_loop",
+ "//aos/util:status",
"@com_github_google_flatbuffers//:flatbuffers",
+ "@com_github_tartanllama_expected",
"@com_google_absl//absl/container:btree",
],
)
diff --git a/aos/events/event_loop.h b/aos/events/event_loop.h
index 83393c2..389ea08 100644
--- a/aos/events/event_loop.h
+++ b/aos/events/event_loop.h
@@ -1,6 +1,5 @@
#ifndef AOS_EVENTS_EVENT_LOOP_H_
#define AOS_EVENTS_EVENT_LOOP_H_
-
#include <sched.h>
#include <atomic>
@@ -11,6 +10,7 @@
#include "absl/container/btree_set.h"
#include "flatbuffers/flatbuffers.h"
#include "glog/logging.h"
+#include "tl/expected.hpp"
#include "aos/configuration.h"
#include "aos/configuration_generated.h"
@@ -24,8 +24,10 @@
#include "aos/ftrace.h"
#include "aos/ipc_lib/data_alignment.h"
#include "aos/json_to_flatbuffer.h"
+#include "aos/shared_span.h"
#include "aos/time/time.h"
#include "aos/util/phased_loop.h"
+#include "aos/util/status.h"
#include "aos/uuid.h"
DECLARE_bool(timing_reports);
@@ -89,8 +91,6 @@
Ftrace ftrace_;
};
-using SharedSpan = std::shared_ptr<const absl::Span<const uint8_t>>;
-
// Holds storage for a span object and the data referenced by that span for
// compatibility with SharedSpan users. If constructed with MakeSharedSpan, span
// points to only the aligned segment of the entire data.
@@ -112,8 +112,6 @@
// and as a building block to implement typed senders.
class RawSender {
public:
- using SharedSpan = std::shared_ptr<const absl::Span<const uint8_t>>;
-
enum class [[nodiscard]] Error {
// Represents success and no error
kOk,
@@ -166,7 +164,8 @@
// Sends a single block of data by refcounting it to avoid copies. The data
// must not change after being passed into Send. The remote arguments have the
- // same meaning as in Send above.
+ // same meaning as in Send above. Note: some implmementations will have to
+ // copy anyways, but other implementations can skip the copy.
Error Send(const SharedSpan data);
Error Send(const SharedSpan data,
monotonic_clock::time_point monotonic_remote_time,
@@ -693,6 +692,13 @@
// Fetcher before using it.
template <typename T>
Fetcher<T> TryMakeFetcher(const std::string_view channel_name) {
+ // Note: This could be done with SFINAE, but then you don't get as good an
+ // error message and the main benefit of SFINAE is to be able to make
+ // compilation *not* fail if we e.g. had another MakeFetcher overload that
+ // could take static flatbuffers.
+ static_assert(std::is_base_of<flatbuffers::Table, T>::value,
+ "Fetchers must be created with raw flatbuffer types---static "
+ "flatbuffers are currently not supported with fetchers.");
const Channel *const channel = GetChannel<T>(channel_name);
if (channel == nullptr) {
return Fetcher<T>();
@@ -1032,7 +1038,12 @@
//
// This means no more events will be processed, but any currently being
// processed will finish.
- virtual void Exit() = 0;
+ virtual void Exit(Result<void> result) = 0;
+ // Overload for a successful exit---equivalent to if we specified a default
+ // parameter for Exit(), except that autocxx does not understand default
+ // arguments and so needs an explicit overload to keep rust happy
+ // (https://github.com/google/autocxx/issues/563).
+ void Exit() { Exit({}); }
};
} // namespace aos
diff --git a/aos/events/event_loop_param_test.cc b/aos/events/event_loop_param_test.cc
index 4885f5c..4975540 100644
--- a/aos/events/event_loop_param_test.cc
+++ b/aos/events/event_loop_param_test.cc
@@ -1,6 +1,7 @@
#include "aos/events/event_loop_param_test.h"
#include <chrono>
+#include <filesystem>
#include <unordered_map>
#include <unordered_set>
@@ -3797,4 +3798,47 @@
EXPECT_EQ(SendTestMessage(sender1), RawSender::Error::kMessagesSentTooFast);
}
+// Tests that we can exit with a default constructor and that Run() will
+// indicate a successful exit.
+TEST_P(AbstractEventLoopTest, ExitHandleExitSuccessful) {
+ auto loop = MakePrimary();
+ std::unique_ptr<ExitHandle> exit_handle = MakeExitHandle();
+ bool happened = false;
+
+ loop->OnRun([&exit_handle, &happened]() {
+ happened = true;
+ exit_handle->Exit();
+ });
+
+ EXPECT_TRUE(Run().has_value());
+ EXPECT_TRUE(happened);
+}
+
+// Tests that we can exit with an error Status and have that returned via the
+// Run() method.
+TEST_P(AbstractEventLoopTest, ExitHandleExitFailure) {
+ auto loop = MakePrimary();
+ std::unique_ptr<ExitHandle> exit_handle = MakeExitHandle();
+ bool happened = false;
+
+ loop->OnRun([&exit_handle, &happened]() {
+ happened = true;
+ exit_handle->Exit(Error::MakeUnexpectedError("Hello, World!"));
+ // The second Exit() should not affect the final return value.
+ exit_handle->Exit(Error::MakeUnexpectedError("Hello, World! 2"));
+ });
+ const int line = __LINE__ - 4;
+
+ Result<void> status = Run();
+
+ EXPECT_TRUE(happened);
+ EXPECT_FALSE(status.has_value());
+ EXPECT_EQ(std::string("Hello, World!"), status.error().message());
+ ASSERT_TRUE(status.error().source_location().has_value());
+ EXPECT_EQ(std::string("event_loop_param_test.cc"),
+ std::filesystem::path(status.error().source_location()->file_name())
+ .filename());
+ EXPECT_EQ(line, status.error().source_location()->line());
+}
+
} // namespace aos::testing
diff --git a/aos/events/event_loop_param_test.h b/aos/events/event_loop_param_test.h
index f2b16c5..bbba5b7 100644
--- a/aos/events/event_loop_param_test.h
+++ b/aos/events/event_loop_param_test.h
@@ -68,7 +68,9 @@
virtual std::unique_ptr<EventLoop> MakePrimary(std::string_view name) = 0;
// Runs the loops until they quit.
- virtual void Run() = 0;
+ virtual Result<void> Run() = 0;
+
+ virtual std::unique_ptr<ExitHandle> MakeExitHandle() = 0;
// Quits the loops.
virtual void Exit() = 0;
@@ -350,7 +352,11 @@
void EnableNodes(std::string_view my_node) { factory_->EnableNodes(my_node); }
- void Run() { return factory_->Run(); }
+ Result<void> Run() { return factory_->Run(); }
+
+ std::unique_ptr<ExitHandle> MakeExitHandle() {
+ return factory_->MakeExitHandle();
+ }
void Exit() { return factory_->Exit(); }
diff --git a/aos/events/event_loop_tmpl.h b/aos/events/event_loop_tmpl.h
index 3f41ec1..25aa36c 100644
--- a/aos/events/event_loop_tmpl.h
+++ b/aos/events/event_loop_tmpl.h
@@ -39,6 +39,13 @@
void EventLoop::MakeWatcher(const std::string_view channel_name, Watch &&w) {
using MessageType = typename event_loop_internal::watch_message_type_trait<
decltype(&Watch::operator())>::message_type;
+ // Note: This could be done with SFINAE, but then you don't get as good an
+ // error message and the main benefit of SFINAE is to be able to make
+ // compilation *not* fail if we e.g. had another MakeWatcher overload that
+ // could take static flatbuffers.
+ static_assert(std::is_base_of<flatbuffers::Table, MessageType>::value,
+ "Watchers must be created with raw flatbuffer types---static "
+ "flatbuffers are currently not supported with watchers.");
const Channel *channel = configuration::GetChannel(
configuration_, channel_name, MessageType::GetFullyQualifiedName(),
name(), node());
diff --git a/aos/events/logging/BUILD b/aos/events/logging/BUILD
index f056bca..2bb87b6 100644
--- a/aos/events/logging/BUILD
+++ b/aos/events/logging/BUILD
@@ -463,7 +463,6 @@
"//aos:condition",
"//aos:uuid",
"//aos/events:event_loop",
- "//aos/events:shm_event_loop",
"//aos/events:simulated_event_loop",
"//aos/mutex",
"//aos/network:message_bridge_server_fbs",
@@ -929,6 +928,7 @@
target_compatible_with = ["@platforms//os:linux"],
deps = [
":multinode_logger_test_lib",
+ "//aos/flatbuffers:base",
],
)
diff --git a/aos/events/logging/log_reader.cc b/aos/events/logging/log_reader.cc
index df1ed8c..192dade 100644
--- a/aos/events/logging/log_reader.cc
+++ b/aos/events/logging/log_reader.cc
@@ -1301,7 +1301,7 @@
std::function<void()> notice_realtime_end, const Node *node,
LogReader::State::ThreadedBuffering threading,
std::unique_ptr<const ReplayChannelIndices> replay_channel_indices,
- const std::vector<std::function<void(void *message)>>
+ const std::vector<std::function<SharedSpan(TimestampedMessage &)>>
&before_send_callbacks)
: timestamp_mapper_(std::move(timestamp_mapper)),
timestamp_queue_strategy_(timestamp_queue_strategy),
@@ -1416,7 +1416,7 @@
timing_statistics_sender_.CheckOk(builder.Send(timing_builder.Finish()));
}
-bool LogReader::State::Send(const TimestampedMessage &×tamped_message) {
+bool LogReader::State::Send(TimestampedMessage &×tamped_message) {
aos::RawSender *sender = channels_[timestamped_message.channel_index].get();
CHECK(sender);
uint32_t remote_queue_index = 0xffffffff;
@@ -1506,21 +1506,31 @@
->boot_uuid());
}
+ SharedSpan to_send;
// Right before sending allow the user to process the message.
if (before_send_callbacks_[timestamped_message.channel_index]) {
- // Only channels that are forwarded and sent from this State's node will be
- // in the queue_index_map_
- if (queue_index_map_[timestamped_message.channel_index]) {
- before_send_callbacks_[timestamped_message.channel_index](
- timestamped_message.data->mutable_data());
+ // Only channels which are forwarded and on the destination node have
+ // channel_source_state_ set to non-null. See RegisterDuringStartup.
+ if (channel_source_state_[timestamped_message.channel_index] == nullptr) {
+ // It is safe in this case since there is only one caller to Send, and the
+ // data is not mutated after Send is called.
+ to_send = before_send_callbacks_[timestamped_message.channel_index](
+ timestamped_message);
+ *timestamped_message.data.get() = to_send;
+ } else {
+ to_send = *timestamped_message.data;
}
+ if (!to_send) {
+ return false;
+ }
+ } else {
+ to_send = *timestamped_message.data;
}
// Send! Use the replayed queue index here instead of the logged queue index
// for the remote queue index. This makes re-logging work.
const RawSender::Error err = sender->Send(
- SharedSpan(timestamped_message.data, ×tamped_message.data->span),
- timestamped_message.monotonic_remote_time.time,
+ std::move(to_send), timestamped_message.monotonic_remote_time.time,
timestamped_message.realtime_remote_time,
timestamped_message.monotonic_remote_transmit_time.time,
remote_queue_index,
diff --git a/aos/events/logging/log_reader.h b/aos/events/logging/log_reader.h
index fb19e78..f25f1f6 100644
--- a/aos/events/logging/log_reader.h
+++ b/aos/events/logging/log_reader.h
@@ -21,7 +21,6 @@
#include "aos/events/logging/logger_generated.h"
#include "aos/events/logging/replay_channels.h"
#include "aos/events/logging/replay_timing_generated.h"
-#include "aos/events/shm_event_loop.h"
#include "aos/events/simulated_event_loop.h"
#include "aos/mutex/mutex.h"
#include "aos/network/message_bridge_server_generated.h"
@@ -103,7 +102,7 @@
LogReader(LogFilesContainer log_files,
const Configuration *replay_configuration = nullptr,
const ReplayChannels *replay_channels = nullptr);
- ~LogReader();
+ virtual ~LogReader();
// Registers all the callbacks to send the log file data out on an event loop
// created in event_loop_factory. This also updates time to be at the start
@@ -116,7 +115,8 @@
// Registers all the callbacks to send the log file data out to an event loop
// factory. This does not start replaying or change the current distributed
// time of the factory. It does change the monotonic clocks to be right.
- void RegisterWithoutStarting(SimulatedEventLoopFactory *event_loop_factory);
+ virtual void RegisterWithoutStarting(
+ SimulatedEventLoopFactory *event_loop_factory);
// Runs the log until the last start time. Register above is defined as:
// Register(...) {
// RegisterWithoutStarting
@@ -323,6 +323,8 @@
std::string_view name() const { return log_files_.name(); }
+ const LogFilesContainer &log_files() const { return log_files_; }
+
// Set whether to exit the SimulatedEventLoopFactory when we finish reading
// the logfile.
void set_exit_on_finish(bool exit_on_finish) {
@@ -346,17 +348,25 @@
// implementation. And, the callback is called only once one the Sender's Node
// if the channel is forwarded.
//
+ // The callback should have a signature like:
+ // [](aos::examples::Ping *ping,
+ // const TimestampedMessage ×tamped_message) -> SharedSpan {
+ // if (drop) {
+ // return nullptr;
+ // } else {
+ // return *timestamped_message.data;
+ // }
+ // }
+ //
+ // If nullptr is returned, the message will not be sent.
+ //
// See multinode_logger_test for examples of usage.
- template <typename Callback>
+ template <typename MessageType, typename Callback>
void AddBeforeSendCallback(std::string_view channel_name,
Callback &&callback) {
CHECK(!AreStatesInitialized())
<< ": Cannot add callbacks after calling Register";
- using MessageType = typename std::remove_pointer<
- typename event_loop_internal::watch_message_type_trait<
- decltype(&Callback::operator())>::message_type>::type;
-
const Channel *channel = configuration::GetChannel(
logged_configuration(), channel_name,
MessageType::GetFullyQualifiedName(), "", nullptr);
@@ -373,9 +383,16 @@
<< ":{ \"name\": \"" << channel_name << "\", \"type\": \""
<< MessageType::GetFullyQualifiedName() << "\" }";
- before_send_callbacks_[channel_index] = [callback](void *message) {
- callback(flatbuffers::GetMutableRoot<MessageType>(
- reinterpret_cast<char *>(message)));
+ before_send_callbacks_[channel_index] =
+ [callback](TimestampedMessage ×tamped_message) -> SharedSpan {
+ // Note: the const_cast is because SharedSpan is defined to be a pointer
+ // to const data, even though it wraps mutable data.
+ // TODO(austin): Refactor to make it non-const properly to drop the const
+ // cast.
+ return callback(flatbuffers::GetMutableRoot<MessageType>(
+ reinterpret_cast<char *>(const_cast<uint8_t *>(
+ timestamped_message.data.get()->get()->data()))),
+ timestamped_message);
};
}
@@ -460,7 +477,7 @@
std::function<void()> notice_realtime_end, const Node *node,
ThreadedBuffering threading,
std::unique_ptr<const ReplayChannelIndices> replay_channel_indices,
- const std::vector<std::function<void(void *message)>>
+ const std::vector<std::function<SharedSpan(TimestampedMessage &)>>
&before_send_callbacks);
// Connects up the timestamp mappers.
@@ -705,8 +722,9 @@
std::max(monotonic_now(), next_time + clock_offset()));
}
- // Sends a buffer on the provided channel index.
- bool Send(const TimestampedMessage &×tamped_message);
+ // Sends a buffer on the provided channel index. Returns true if the
+ // message was actually sent, and false otherwise.
+ bool Send(TimestampedMessage &×tamped_message);
void MaybeSetClockOffset();
std::chrono::nanoseconds clock_offset() const { return clock_offset_; }
@@ -886,7 +904,7 @@
// indices of the channels to replay for the Node represented by
// the instance of LogReader::State.
std::unique_ptr<const ReplayChannelIndices> replay_channel_indices_;
- const std::vector<std::function<void(void *message)>>
+ const std::vector<std::function<SharedSpan(TimestampedMessage &)>>
before_send_callbacks_;
};
@@ -934,7 +952,8 @@
// The callbacks that will be called before sending a message indexed by the
// channel index from the logged_configuration
- std::vector<std::function<void(void *message)>> before_send_callbacks_;
+ std::vector<std::function<SharedSpan(TimestampedMessage &)>>
+ before_send_callbacks_;
// If true, the replay timer will ignore any missing data. This is used
// during startup when we are bootstrapping everything and trying to get to
diff --git a/aos/events/logging/logfile_utils.cc b/aos/events/logging/logfile_utils.cc
index c01026f..30ed3d7 100644
--- a/aos/events/logging/logfile_utils.cc
+++ b/aos/events/logging/logfile_utils.cc
@@ -1579,14 +1579,14 @@
os << "{.channel_index=" << msg.channel_index
<< ", .queue_index=" << msg.queue_index
<< ", .timestamp=" << msg.timestamp;
- if (msg.data != nullptr) {
- if (msg.data->remote_queue_index.has_value()) {
- os << ", .remote_queue_index=" << *msg.data->remote_queue_index;
+ if (msg.header != nullptr) {
+ if (msg.header->remote_queue_index.has_value()) {
+ os << ", .remote_queue_index=" << *msg.header->remote_queue_index;
}
- if (msg.data->monotonic_remote_time.has_value()) {
- os << ", .monotonic_remote_time=" << *msg.data->monotonic_remote_time;
+ if (msg.header->monotonic_remote_time.has_value()) {
+ os << ", .monotonic_remote_time=" << *msg.header->monotonic_remote_time;
}
- os << ", .data=" << msg.data;
+ os << ", .header=" << msg.header;
}
os << "}";
return os;
@@ -1614,7 +1614,7 @@
os << ", .monotonic_timestamp_time=" << msg.monotonic_timestamp_time;
}
if (msg.data != nullptr) {
- os << ", .data=" << *msg.data;
+ os << ", .data=" << msg.data.get();
} else {
os << ", .data=nullptr";
}
@@ -1666,15 +1666,20 @@
monotonic_remote_boot = *boot;
}
- messages_.insert(
- Message{.channel_index = msg->channel_index,
- .queue_index = BootQueueIndex{.boot = parts().boot_count,
- .index = msg->queue_index},
- .timestamp = BootTimestamp{.boot = parts().boot_count,
- .time = msg->monotonic_sent_time},
- .monotonic_remote_boot = monotonic_remote_boot,
- .monotonic_timestamp_boot = monotonic_timestamp_boot,
- .data = std::move(msg)});
+ std::shared_ptr<SharedSpan> data =
+ std::make_shared<SharedSpan>(msg, &msg->span);
+
+ messages_.insert(Message{
+ .channel_index = msg->channel_index,
+ .queue_index = BootQueueIndex{.boot = parts().boot_count,
+ .index = msg->queue_index},
+ .timestamp = BootTimestamp{.boot = parts().boot_count,
+ .time = msg->monotonic_sent_time},
+ .monotonic_remote_boot = monotonic_remote_boot,
+ .monotonic_timestamp_boot = monotonic_timestamp_boot,
+ .header = std::move(msg),
+ .data = std::move(data),
+ });
// Now, update sorted_until_ to match the new message.
if (parts_message_reader_.newest_timestamp() >
@@ -1827,15 +1832,15 @@
} else if (*msg == *oldest) {
// Found a duplicate. If there is a choice, we want the one which has
// the timestamp time.
- if (!msg->data->has_monotonic_timestamp_time) {
+ if (!msg->header->has_monotonic_timestamp_time) {
message_sorter.PopFront();
- } else if (!oldest->data->has_monotonic_timestamp_time) {
+ } else if (!oldest->header->has_monotonic_timestamp_time) {
current_->PopFront();
current_ = &message_sorter;
oldest = msg;
} else {
- CHECK_EQ(msg->data->monotonic_timestamp_time,
- oldest->data->monotonic_timestamp_time);
+ CHECK_EQ(msg->header->monotonic_timestamp_time,
+ oldest->header->monotonic_timestamp_time);
message_sorter.PopFront();
}
}
@@ -2037,26 +2042,30 @@
}
CHECK_LT(msg->channel_index, source_node.size());
if (source_node[msg->channel_index] != static_cast<size_t>(node())) {
- timestamp_messages_.emplace_back(TimestampedMessage{
+ TimestampedMessage timestamped_message{
.channel_index = msg->channel_index,
.queue_index = msg->queue_index,
.monotonic_event_time = msg->timestamp,
- .realtime_event_time = msg->data->realtime_sent_time,
+ .realtime_event_time = msg->header->realtime_sent_time,
.remote_queue_index =
BootQueueIndex{.boot = msg->monotonic_remote_boot,
- .index = msg->data->remote_queue_index.value()},
+ .index = msg->header->remote_queue_index.value()},
.monotonic_remote_time = {msg->monotonic_remote_boot,
- msg->data->monotonic_remote_time.value()},
- .realtime_remote_time = msg->data->realtime_remote_time.value(),
+ msg->header->monotonic_remote_time.value()},
+ .realtime_remote_time = msg->header->realtime_remote_time.value(),
.monotonic_remote_transmit_time =
{msg->monotonic_remote_boot,
- msg->data->monotonic_remote_transmit_time},
+ msg->header->monotonic_remote_transmit_time},
.monotonic_timestamp_time = {msg->monotonic_timestamp_boot,
- msg->data->monotonic_timestamp_time},
- .data = std::move(msg->data)});
+ msg->header->monotonic_timestamp_time},
+ .data = msg->data,
+ };
- VLOG(2) << this << " Queued timestamp of " << timestamp_messages_.back();
- fn(×tamp_messages_.back());
+ fn(×tamped_message);
+
+ VLOG(2) << this << " Queued timestamp of " << timestamped_message;
+
+ timestamp_messages_.emplace_back(std::move(*msg));
} else {
VLOG(2) << this << " Dropped data";
}
@@ -2100,25 +2109,12 @@
CHECK(queue_timestamps_ran_);
}
- // timestamp_messages_ is a queue of TimestampedMessage, but we are supposed
- // to return a Message. We need to convert the first message in the list
- // before returning it (and comparing, honestly). Fill next_timestamp_ in if
- // it is empty so the rest of the logic here can just look at next_timestamp_
- // and use that instead.
- if (!next_timestamp_ && !timestamp_messages_.empty()) {
- auto &front = timestamp_messages_.front();
- next_timestamp_ = Message{
- .channel_index = front.channel_index,
- .queue_index = front.queue_index,
- .timestamp = front.monotonic_event_time,
- .monotonic_remote_boot = front.remote_queue_index.boot,
- .monotonic_timestamp_boot = front.monotonic_timestamp_time.boot,
- .data = std::move(front.data),
- };
- timestamp_messages_.pop_front();
+ const Message *timestamp_messages_front = nullptr;
+ if (!timestamp_messages_.empty()) {
+ timestamp_messages_front = ×tamp_messages_.front();
}
- if (!next_timestamp_) {
+ if (!timestamp_messages_front) {
message_source_ = MessageSource::kBootMerger;
if (boot_merger_front != nullptr) {
VLOG(1) << this << " SplitTimestampBootMerger::Front " << node_name()
@@ -2134,15 +2130,15 @@
message_source_ = MessageSource::kTimestampMessage;
VLOG(1) << this << " SplitTimestampBootMerger::Front " << node_name() << " "
- << next_timestamp_.value();
- return &next_timestamp_.value();
+ << *timestamp_messages_front;
+ return timestamp_messages_front;
}
- if (*boot_merger_front <= next_timestamp_.value()) {
- if (*boot_merger_front == next_timestamp_.value()) {
+ if (*boot_merger_front <= *timestamp_messages_front) {
+ if (*boot_merger_front == *timestamp_messages_front) {
VLOG(1) << this << " SplitTimestampBootMerger::Front " << node_name()
<< " Dropping duplicate timestamp.";
- next_timestamp_.reset();
+ timestamp_messages_.pop_front();
}
message_source_ = MessageSource::kBootMerger;
if (boot_merger_front != nullptr) {
@@ -2156,16 +2152,16 @@
} else {
message_source_ = MessageSource::kTimestampMessage;
VLOG(1) << this << " SplitTimestampBootMerger::Front " << node_name() << " "
- << next_timestamp_.value();
- return &next_timestamp_.value();
+ << *timestamp_messages_front;
+ return timestamp_messages_front;
}
}
void SplitTimestampBootMerger::PopFront() {
switch (message_source_) {
case MessageSource::kTimestampMessage:
- CHECK(next_timestamp_.has_value());
- next_timestamp_.reset();
+ CHECK(!timestamp_messages_.empty());
+ timestamp_messages_.pop_front();
break;
case MessageSource::kBootMerger:
boot_merger_.PopFront();
@@ -2246,7 +2242,7 @@
.channel_index = msg->channel_index,
.queue_index = msg->queue_index,
.monotonic_event_time = msg->timestamp,
- .realtime_event_time = msg->data->realtime_sent_time,
+ .realtime_event_time = msg->header->realtime_sent_time,
.remote_queue_index = BootQueueIndex::Invalid(),
.monotonic_remote_time = BootTimestamp::min_time(),
.realtime_remote_time = realtime_clock::min_time,
@@ -2368,18 +2364,18 @@
.channel_index = msg->channel_index,
.queue_index = msg->queue_index,
.monotonic_event_time = msg->timestamp,
- .realtime_event_time = msg->data->realtime_sent_time,
+ .realtime_event_time = msg->header->realtime_sent_time,
.remote_queue_index =
BootQueueIndex{.boot = msg->monotonic_remote_boot,
- .index = msg->data->remote_queue_index.value()},
+ .index = msg->header->remote_queue_index.value()},
.monotonic_remote_time = {msg->monotonic_remote_boot,
- msg->data->monotonic_remote_time.value()},
- .realtime_remote_time = msg->data->realtime_remote_time.value(),
+ msg->header->monotonic_remote_time.value()},
+ .realtime_remote_time = msg->header->realtime_remote_time.value(),
.monotonic_remote_transmit_time =
{msg->monotonic_remote_boot,
- msg->data->monotonic_remote_transmit_time},
+ msg->header->monotonic_remote_transmit_time},
.monotonic_timestamp_time = {msg->monotonic_timestamp_boot,
- msg->data->monotonic_timestamp_time},
+ msg->header->monotonic_timestamp_time},
.data = std::move(data.data)});
VLOG(1) << node_name() << " Inserted timestamp "
<< matched_messages_.back();
@@ -2446,23 +2442,23 @@
Message TimestampMapper::MatchingMessageFor(const Message &message) {
// Figure out what queue index we are looking for.
- CHECK_NOTNULL(message.data);
- CHECK(message.data->remote_queue_index.has_value());
+ CHECK_NOTNULL(message.header);
+ CHECK(message.header->remote_queue_index.has_value());
const BootQueueIndex remote_queue_index =
BootQueueIndex{.boot = message.monotonic_remote_boot,
- .index = *message.data->remote_queue_index};
+ .index = *message.header->remote_queue_index};
- CHECK(message.data->monotonic_remote_time.has_value());
- CHECK(message.data->realtime_remote_time.has_value());
+ CHECK(message.header->monotonic_remote_time.has_value());
+ CHECK(message.header->realtime_remote_time.has_value());
const BootTimestamp monotonic_remote_time{
.boot = message.monotonic_remote_boot,
- .time = message.data->monotonic_remote_time.value()};
+ .time = message.header->monotonic_remote_time.value()};
const realtime_clock::time_point realtime_remote_time =
- *message.data->realtime_remote_time;
+ *message.header->realtime_remote_time;
TimestampMapper *peer =
- nodes_data_[source_node_[message.data->channel_index]].peer;
+ nodes_data_[source_node_[message.header->channel_index]].peer;
// We only register the peers which we have data for. So, if we are being
// asked to pull a timestamp from a peer which doesn't exist, return an
@@ -2475,6 +2471,7 @@
.timestamp = monotonic_remote_time,
.monotonic_remote_boot = 0xffffff,
.monotonic_timestamp_boot = 0xffffff,
+ .header = nullptr,
.data = nullptr};
}
@@ -2490,6 +2487,7 @@
.timestamp = monotonic_remote_time,
.monotonic_remote_boot = 0xffffff,
.monotonic_timestamp_boot = 0xffffff,
+ .header = nullptr,
.data = nullptr};
}
@@ -2500,6 +2498,7 @@
.timestamp = monotonic_remote_time,
.monotonic_remote_boot = 0xffffff,
.monotonic_timestamp_boot = 0xffffff,
+ .header = nullptr,
.data = nullptr};
}
@@ -2520,7 +2519,7 @@
CHECK_EQ(result.timestamp, monotonic_remote_time)
<< ": Queue index matches, but timestamp doesn't. Please investigate!";
- CHECK_EQ(result.data->realtime_sent_time, realtime_remote_time)
+ CHECK_EQ(result.header->realtime_sent_time, realtime_remote_time)
<< ": Queue index matches, but timestamp doesn't. Please investigate!";
// Now drop the data off the front. We have deduplicated timestamps, so we
// are done. And all the data is in order.
@@ -2544,6 +2543,7 @@
.timestamp = monotonic_remote_time,
.monotonic_remote_boot = 0xffffff,
.monotonic_timestamp_boot = 0xffffff,
+ .header = nullptr,
.data = nullptr};
}
@@ -2552,7 +2552,7 @@
CHECK_EQ(result.timestamp, monotonic_remote_time)
<< ": Queue index matches, but timestamp doesn't. Please "
"investigate!";
- CHECK_EQ(result.data->realtime_sent_time, realtime_remote_time)
+ CHECK_EQ(result.header->realtime_sent_time, realtime_remote_time)
<< ": Queue index matches, but timestamp doesn't. Please "
"investigate!";
diff --git a/aos/events/logging/logfile_utils.h b/aos/events/logging/logfile_utils.h
index f9a62b1..bb312eb 100644
--- a/aos/events/logging/logfile_utils.h
+++ b/aos/events/logging/logfile_utils.h
@@ -489,12 +489,6 @@
// pointer to track.
absl::Span<const uint8_t> span;
- // Used to be able to mutate the data in the span. This is only used for
- // mutating the message inside of LogReader for the Before Send Callback. It
- // is safe in this case since there is only one caller to Send, and the data
- // is not mutated after Send is called.
- uint8_t *mutable_data() { return const_cast<uint8_t *>(span.data()); }
-
char actual_data[];
private:
@@ -526,7 +520,13 @@
size_t monotonic_timestamp_boot = 0xffffff;
- std::shared_ptr<UnpackedMessageHeader> data;
+ // Pointer to the unpacked header.
+ std::shared_ptr<UnpackedMessageHeader> header;
+
+ // Pointer to a pointer to the span with the flatbuffer to publish in it. The
+ // second layer of indirection lets us modify all copies of a message when
+ // sending inside the log reader.
+ std::shared_ptr<SharedSpan> data;
bool operator<(const Message &m2) const;
bool operator<=(const Message &m2) const;
@@ -554,7 +554,10 @@
BootTimestamp monotonic_timestamp_time;
- std::shared_ptr<UnpackedMessageHeader> data;
+ // Pointer to a pointer to the data. If the outer pointer isn't populated, no
+ // data exists to send, we only have the timestamps. If the inner pointer is
+ // nullptr, the user has marked the message as something to not send.
+ std::shared_ptr<SharedSpan> data;
};
std::ostream &operator<<(std::ostream &os, const TimestampedMessage &m);
@@ -807,14 +810,8 @@
// Boot merger for just timestamps. Any data read from here is to be ignored.
std::unique_ptr<BootMerger> timestamp_boot_merger_;
- // The callback requires us to convert each message to a TimestampedMessage.
- std::deque<TimestampedMessage> timestamp_messages_;
-
- // Storage for the next timestamp message to return. This is separate so we
- // can convert them back to a Message.
- //
- // TODO(austin): It would be nice to not have to convert...
- std::optional<Message> next_timestamp_;
+ // Deque of all the timestamp messages.
+ std::deque<Message> timestamp_messages_;
// Start times for each boot.
std::vector<monotonic_clock::time_point> monotonic_start_time_;
diff --git a/aos/events/logging/logfile_utils_test.cc b/aos/events/logging/logfile_utils_test.cc
index 4ceca20..87c6f5d 100644
--- a/aos/events/logging/logfile_utils_test.cc
+++ b/aos/events/logging/logfile_utils_test.cc
@@ -274,6 +274,7 @@
BootTimestamp{.boot = 0, .time = e + chrono::milliseconds(1)},
.monotonic_remote_boot = 0xffffff,
.monotonic_timestamp_boot = 0xffffff,
+ .header = nullptr,
.data = nullptr};
Message m2{.channel_index = 0,
.queue_index = BootQueueIndex{.boot = 0, .index = 0u},
@@ -281,6 +282,7 @@
BootTimestamp{.boot = 0, .time = e + chrono::milliseconds(2)},
.monotonic_remote_boot = 0xffffff,
.monotonic_timestamp_boot = 0xffffff,
+ .header = nullptr,
.data = nullptr};
EXPECT_LT(m1, m2);
@@ -960,24 +962,24 @@
EXPECT_EQ(output[0].timestamp.boot, 0u);
EXPECT_EQ(output[0].timestamp.time, e + chrono::milliseconds(101000));
- EXPECT_FALSE(output[0].data->has_monotonic_timestamp_time);
+ EXPECT_FALSE(output[0].header->has_monotonic_timestamp_time);
EXPECT_EQ(output[1].timestamp.boot, 0u);
EXPECT_EQ(output[1].timestamp.time, e + chrono::milliseconds(101001));
- EXPECT_TRUE(output[1].data->has_monotonic_timestamp_time);
- EXPECT_EQ(output[1].data->monotonic_timestamp_time,
+ EXPECT_TRUE(output[1].header->has_monotonic_timestamp_time);
+ EXPECT_EQ(output[1].header->monotonic_timestamp_time,
monotonic_clock::time_point(std::chrono::nanoseconds(971)));
EXPECT_EQ(output[2].timestamp.boot, 0u);
EXPECT_EQ(output[2].timestamp.time, e + chrono::milliseconds(101002));
- EXPECT_TRUE(output[2].data->has_monotonic_timestamp_time);
- EXPECT_EQ(output[2].data->monotonic_timestamp_time,
+ EXPECT_TRUE(output[2].header->has_monotonic_timestamp_time);
+ EXPECT_EQ(output[2].header->monotonic_timestamp_time,
monotonic_clock::time_point(std::chrono::nanoseconds(972)));
EXPECT_EQ(output[3].timestamp.boot, 0u);
EXPECT_EQ(output[3].timestamp.time, e + chrono::milliseconds(101003));
- EXPECT_TRUE(output[3].data->has_monotonic_timestamp_time);
- EXPECT_EQ(output[3].data->monotonic_timestamp_time,
+ EXPECT_TRUE(output[3].header->has_monotonic_timestamp_time);
+ EXPECT_EQ(output[3].header->monotonic_timestamp_time,
monotonic_clock::time_point(std::chrono::nanoseconds(973)));
}
diff --git a/aos/events/logging/logger_test.cc b/aos/events/logging/logger_test.cc
index 99b2359..c810203 100644
--- a/aos/events/logging/logger_test.cc
+++ b/aos/events/logging/logger_test.cc
@@ -155,9 +155,15 @@
// passing in a separate config.
LogReader reader(logfile, &config_.message());
- reader.AddBeforeSendCallback("/test", [](aos::examples::Ping *ping) {
- ping->mutate_value(ping->value() + 1);
- });
+ const uint8_t *data_ptr = nullptr;
+ reader.AddBeforeSendCallback<aos::examples::Ping>(
+ "/test",
+ [&data_ptr](aos::examples::Ping *ping,
+ const TimestampedMessage ×tamped_message) -> SharedSpan {
+ ping->mutate_value(ping->value() + 10000);
+ data_ptr = timestamped_message.data.get()->get()->data();
+ return *timestamped_message.data;
+ });
// This sends out the fetched messages and advances time to the start of the
// log file.
@@ -170,15 +176,21 @@
// Confirm that the ping and pong counts both match, and the value also
// matches.
- int ping_count = 10;
- test_event_loop->MakeWatcher("/test",
- [&ping_count](const examples::Ping &ping) {
- ++ping_count;
- EXPECT_EQ(ping.value(), ping_count);
- });
+ int ping_count = 10010;
+ test_event_loop->MakeWatcher(
+ "/test",
+ [&test_event_loop, &data_ptr, &ping_count](const examples::Ping &ping) {
+ ++ping_count;
+ EXPECT_EQ(ping.value(), ping_count);
+ // Since simulated event loops (especially log replay) refcount the
+ // shared data, we can verify if the right data got published by
+ // verifying that the actual pointer to the flatbuffer matches. This
+ // only is guarenteed to hold during this callback.
+ EXPECT_EQ(test_event_loop->context().data, data_ptr);
+ });
reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
- EXPECT_EQ(ping_count, 2010);
+ EXPECT_EQ(ping_count, 12010);
}
// Tests calling StartLogging twice.
diff --git a/aos/events/logging/multinode_logger_test.cc b/aos/events/logging/multinode_logger_test.cc
index 2cf8ffe..1e26c3c 100644
--- a/aos/events/logging/multinode_logger_test.cc
+++ b/aos/events/logging/multinode_logger_test.cc
@@ -600,7 +600,7 @@
}
// MultinodeLoggerTest that tests the mutate callback works across multiple
-// nodes with remapping
+// nodes with remapping.
TEST_P(MultinodeLoggerTest, MultiNodeRemapMutateCallback) {
time_converter_.StartEqual();
std::vector<std::string> actual_filenames;
@@ -629,14 +629,18 @@
SimulatedEventLoopFactory log_reader_factory(reader.configuration());
- int pong_count = 0;
+ int pong_count = 10;
// Adds a callback which mutates the value of the pong message before the
// message is sent which is the feature we are testing here
- reader.AddBeforeSendCallback("/test",
- [&pong_count](aos::examples::Pong *pong) {
- pong->mutate_value(pong->value() + 1);
- pong_count = pong->value();
- });
+ reader.AddBeforeSendCallback<aos::examples::Pong>(
+ "/test",
+ [&pong_count](
+ aos::examples::Pong *pong,
+ const TimestampedMessage ×tamped_message) -> SharedSpan {
+ pong->mutate_value(pong_count + 1);
+ ++pong_count;
+ return *timestamped_message.data;
+ });
// This sends out the fetched messages and advances time to the start of the
// log file.
@@ -698,14 +702,18 @@
LogReader reader(sorted_parts, &config_.message());
- int pong_count = 0;
+ int pong_count = 10;
// Adds a callback which mutates the value of the pong message before the
// message is sent which is the feature we are testing here
- reader.AddBeforeSendCallback("/test",
- [&pong_count](aos::examples::Pong *pong) {
- pong->mutate_value(pong->value() + 1);
- pong_count = pong->value();
- });
+ reader.AddBeforeSendCallback<aos::examples::Pong>(
+ "/test",
+ [&pong_count](
+ aos::examples::Pong *pong,
+ const TimestampedMessage ×tamped_message) -> SharedSpan {
+ pong->mutate_value(pong_count + 1);
+ ++pong_count;
+ return *timestamped_message.data;
+ });
SimulatedEventLoopFactory log_reader_factory(reader.configuration());
@@ -772,11 +780,15 @@
int ping_count = 0;
// Adds a callback which mutates the value of the pong message before the
// message is sent which is the feature we are testing here
- reader.AddBeforeSendCallback("/test",
- [&ping_count](aos::examples::Ping *ping) {
- ++ping_count;
- ping->mutate_value(ping_count);
- });
+ reader.AddBeforeSendCallback<aos::examples::Ping>(
+ "/test",
+ [&ping_count](
+ aos::examples::Ping *ping,
+ const TimestampedMessage ×tamped_message) -> SharedSpan {
+ ++ping_count;
+ ping->mutate_value(ping_count);
+ return *timestamped_message.data;
+ });
SimulatedEventLoopFactory log_reader_factory(reader.configuration());
log_reader_factory.set_send_delay(chrono::microseconds(0));
@@ -821,6 +833,291 @@
reader.Deregister();
}
+// MultinodeLoggerTest that tests the mutate callback can fully replace the
+// message.
+TEST_P(MultinodeLoggerTest, MultiNodeMutateCallbackReplacement) {
+ time_converter_.StartEqual();
+ std::vector<std::string> actual_filenames;
+
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(actual_filenames);
+ EXPECT_TRUE(AllPartsMatchOutOfOrderDuration(sorted_parts));
+
+ LogReader reader(sorted_parts, &config_.message());
+
+ int pong_count = 10;
+ const uint8_t *data_ptr = nullptr;
+ // Adds a callback which replaces the pong message before the message is sent.
+ reader.AddBeforeSendCallback<aos::examples::Pong>(
+ "/test",
+ [&pong_count, &data_ptr](aos::examples::Pong *pong,
+ const TimestampedMessage &) -> SharedSpan {
+ fbs::AlignedVectorAllocator allocator;
+ aos::fbs::Builder<aos::examples::PongStatic> pong_static(&allocator);
+ CHECK(pong_static->FromFlatbuffer(*pong));
+
+ pong_static->set_value(pong_count + 101);
+ ++pong_count;
+
+ SharedSpan result = allocator.Release();
+
+ data_ptr = result->data();
+
+ return result;
+ });
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ int pi1_pong_count = 10;
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_event_loop, &pong_count, &pi1_pong_count,
+ &data_ptr](const examples::Pong &pong) {
+ ++pi1_pong_count;
+ // Since simulated event loops (especially log replay) refcount the
+ // shared data, we can verify if the right data got published by
+ // verifying that the actual pointer to the flatbuffer matches. This
+ // only is guarenteed to hold during this callback.
+ EXPECT_EQ(pi1_event_loop->context().data, data_ptr);
+ EXPECT_EQ(pong_count + 100, pong.value());
+ EXPECT_EQ(pi1_pong_count + 101, pong.value());
+ });
+
+ pi2_event_loop->MakeWatcher("/test", [&pi2_event_loop, &pong_count,
+ &data_ptr](const examples::Pong &pong) {
+ // Same goes for the forwarded side, that should be the same contents too.
+ EXPECT_EQ(pi2_event_loop->context().data, data_ptr);
+ EXPECT_EQ(pong_count + 100, pong.value());
+ });
+
+ reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
+ reader.Deregister();
+
+ EXPECT_EQ(pong_count, 2011);
+}
+
+// MultinodeLoggerTest that tests the mutate callback can delete messages by
+// returning nullptr.
+TEST_P(MultinodeLoggerTest, MultiNodeMutateCallbackDelete) {
+ time_converter_.StartEqual();
+ std::vector<std::string> actual_filenames;
+
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(actual_filenames);
+ EXPECT_TRUE(AllPartsMatchOutOfOrderDuration(sorted_parts));
+
+ LogReader reader(sorted_parts, &config_.message());
+
+ int pong_count = 10;
+ const uint8_t *data_ptr = nullptr;
+ // Adds a callback which mutates the value of the pong message before the
+ // message is sent which is the feature we are testing here
+ reader.AddBeforeSendCallback<aos::examples::Pong>(
+ "/test",
+ [&pong_count, &data_ptr](aos::examples::Pong *pong,
+ const TimestampedMessage &) -> SharedSpan {
+ fbs::AlignedVectorAllocator allocator;
+ aos::fbs::Builder<aos::examples::PongStatic> pong_static(&allocator);
+ CHECK(pong_static->FromFlatbuffer(*pong));
+
+ pong_static->set_value(pong_count + 101);
+ ++pong_count;
+
+ if ((pong_count % 2) == 0) {
+ data_ptr = nullptr;
+ return nullptr;
+ }
+
+ SharedSpan result = allocator.Release();
+
+ data_ptr = result->data();
+
+ return result;
+ });
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ int pi1_pong_count = 10;
+ pi1_event_loop->MakeWatcher(
+ "/test", [&pi1_event_loop, &pong_count, &pi1_pong_count,
+ &data_ptr](const examples::Pong &pong) {
+ pi1_pong_count += 2;
+ // Since simulated event loops (especially log replay) refcount the
+ // shared data, we can verify if the right data got published by
+ // verifying that the actual pointer to the flatbuffer matches. This
+ // only is guarenteed to hold during this callback.
+ EXPECT_EQ(pi1_event_loop->context().data, data_ptr);
+ EXPECT_EQ(pong_count + 100, pong.value());
+ EXPECT_EQ(pi1_pong_count + 101, pong.value());
+ });
+
+ pi2_event_loop->MakeWatcher("/test", [&pi2_event_loop, &pong_count,
+ &data_ptr](const examples::Pong &pong) {
+ // Same goes for the forwarded side, that should be the same contents too.
+ EXPECT_EQ(pi2_event_loop->context().data, data_ptr);
+ EXPECT_EQ(pong_count + 100, pong.value());
+ });
+
+ reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
+ reader.Deregister();
+
+ EXPECT_EQ(pong_count, 2011);
+ // Since we count up by 2 each time we get a message, and the last pong gets
+ // dropped since it is an odd number we expect the number on pi1 to be 1 less.
+ EXPECT_EQ(pi1_pong_count, 2010);
+}
+
+// MultinodeLoggerTest that tests that non-forwarded channels are able to be
+// mutated.
+TEST_P(MultinodeLoggerTest, MultiNodeMutateCallbackNotForwarded) {
+ time_converter_.StartEqual();
+ std::vector<std::string> actual_filenames;
+
+ {
+ LoggerState pi1_logger = MakeLogger(pi1_);
+ LoggerState pi2_logger = MakeLogger(pi2_);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(95));
+
+ StartLogger(&pi1_logger);
+ StartLogger(&pi2_logger);
+
+ event_loop_factory_.RunFor(chrono::milliseconds(20000));
+ pi1_logger.AppendAllFilenames(&actual_filenames);
+ pi2_logger.AppendAllFilenames(&actual_filenames);
+ }
+
+ const std::vector<LogFile> sorted_parts = SortParts(actual_filenames);
+ EXPECT_TRUE(AllPartsMatchOutOfOrderDuration(sorted_parts));
+
+ LogReader reader(sorted_parts, &config_.message());
+
+ int ping_count = 10;
+ const uint8_t *data_ptr = nullptr;
+ // Adds a callback which mutates the value of the pong message before the
+ // message is sent which is the feature we are testing here
+ reader.AddBeforeSendCallback<aos::examples::Ping>(
+ "/pi1/aos",
+ [&ping_count, &data_ptr](aos::examples::Ping *ping,
+ const TimestampedMessage &) -> SharedSpan {
+ fbs::AlignedVectorAllocator allocator;
+ aos::fbs::Builder<aos::examples::PingStatic> ping_static(&allocator);
+ CHECK(ping_static->FromFlatbuffer(*ping));
+
+ ping_static->set_value(ping_count + 101);
+ ++ping_count;
+
+ SharedSpan result = allocator.Release();
+
+ data_ptr = result->data();
+
+ return result;
+ });
+
+ SimulatedEventLoopFactory log_reader_factory(reader.configuration());
+
+ // This sends out the fetched messages and advances time to the start of the
+ // log file.
+ reader.Register(&log_reader_factory);
+
+ const Node *pi1 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi1");
+ const Node *pi2 =
+ configuration::GetNode(log_reader_factory.configuration(), "pi2");
+
+ EXPECT_THAT(reader.LoggedNodes(),
+ ::testing::ElementsAre(
+ configuration::GetNode(reader.logged_configuration(), pi1),
+ configuration::GetNode(reader.logged_configuration(), pi2)));
+
+ std::unique_ptr<EventLoop> pi1_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi1);
+ std::unique_ptr<EventLoop> pi2_event_loop =
+ log_reader_factory.MakeEventLoop("test", pi2);
+
+ int pi1_ping_count = 10;
+ pi1_event_loop->MakeWatcher(
+ "/aos", [&pi1_event_loop, &ping_count, &pi1_ping_count,
+ &data_ptr](const examples::Ping &ping) {
+ ++pi1_ping_count;
+ // Since simulated event loops (especially log replay) refcount the
+ // shared data, we can verify if the right data got published by
+ // verifying that the actual pointer to the flatbuffer matches. This
+ // only is guarenteed to hold during this callback.
+ EXPECT_EQ(pi1_event_loop->context().data, data_ptr);
+ EXPECT_EQ(ping_count + 100, ping.value());
+ EXPECT_EQ(pi1_ping_count + 101, ping.value());
+ });
+
+ reader.event_loop_factory()->RunFor(std::chrono::seconds(100));
+ reader.Deregister();
+
+ EXPECT_EQ(ping_count, 2011);
+}
+
// Tests that we do not allow adding callbacks after Register is called
TEST_P(MultinodeLoggerDeathTest, AddCallbackAfterRegister) {
time_converter_.StartEqual();
@@ -848,9 +1145,13 @@
reader.Register(&log_reader_factory);
EXPECT_DEATH(
{
- reader.AddBeforeSendCallback("/test", [](aos::examples::Pong *) {
- LOG(FATAL) << "This should not be called";
- });
+ reader.AddBeforeSendCallback<aos::examples::Pong>(
+ "/test",
+ [](aos::examples::Pong *,
+ const TimestampedMessage ×tamped_message) -> SharedSpan {
+ LOG(FATAL) << "This should not be called";
+ return *timestamped_message.data;
+ });
},
"Cannot add callbacks after calling Register");
reader.Deregister();
diff --git a/aos/events/shm_event_loop.cc b/aos/events/shm_event_loop.cc
index e15c014..8035235 100644
--- a/aos/events/shm_event_loop.cc
+++ b/aos/events/shm_event_loop.cc
@@ -411,8 +411,15 @@
CHECK_GT(event_loop_->exit_handle_count_, 0);
--event_loop_->exit_handle_count_;
}
+ // Because of how we handle reference counting, we either need to implement
+ // reference counting in the copy/move constructors or just not support them.
+ // If we ever develop a need for this object to be movable/copyable,
+ // supporting it should be straightforwards.
+ DISALLOW_COPY_AND_ASSIGN(ShmExitHandle);
- void Exit() override { event_loop_->Exit(); }
+ void Exit(Result<void> status) override {
+ event_loop_->ExitWithStatus(status);
+ }
private:
ShmEventLoop *const event_loop_;
@@ -1029,7 +1036,7 @@
struct sigaction old_action_term_;
};
-void ShmEventLoop::Run() {
+Result<void> ShmEventLoop::Run() {
CheckCurrentThread();
SignalHandler::global()->Register(this);
@@ -1119,9 +1126,30 @@
// created the timing reporter.
timing_report_sender_.reset();
ClearContext();
+ std::unique_lock<aos::stl_mutex> locker(exit_status_mutex_);
+ std::optional<Result<void>> exit_status;
+ // Clear the stored exit_status_ and extract it to be returned.
+ exit_status_.swap(exit_status);
+ return exit_status.value_or(Result<void>{});
}
-void ShmEventLoop::Exit() { epoll_.Quit(); }
+void ShmEventLoop::Exit() {
+ observed_exit_.test_and_set();
+ // Implicitly defaults exit_status_ to success by not setting it.
+
+ epoll_.Quit();
+}
+
+void ShmEventLoop::ExitWithStatus(Result<void> status) {
+ // Only set the exit status if no other Exit*() call got here first.
+ if (!observed_exit_.test_and_set()) {
+ std::unique_lock<aos::stl_mutex> locker(exit_status_mutex_);
+ exit_status_ = std::move(status);
+ } else {
+ VLOG(1) << "Exit status is already set; not setting it again.";
+ }
+ Exit();
+}
std::unique_ptr<ExitHandle> ShmEventLoop::MakeExitHandle() {
return std::make_unique<ShmExitHandle>(this);
diff --git a/aos/events/shm_event_loop.h b/aos/events/shm_event_loop.h
index 0e71f96..5da1632 100644
--- a/aos/events/shm_event_loop.h
+++ b/aos/events/shm_event_loop.h
@@ -46,10 +46,18 @@
void operator=(ShmEventLoop const &) = delete;
// Runs the event loop until Exit is called, or ^C is caught.
- void Run();
- // Exits the event loop. Async safe.
+ Result<void> Run();
+ // Exits the event loop. async-signal-safe (see
+ // https://man7.org/linux/man-pages/man7/signal-safety.7.html).
+ // Will result in Run() returning a successful result when called.
void Exit();
+ // Exits the event loop with the provided status. Thread-safe, but not
+ // async-safe.
+ void ExitWithStatus(Result<void> status = {});
+
+ // Constructs an exit handle for the EventLoop. The provided ExitHandle uses
+ // ExitWithStatus().
std::unique_ptr<ExitHandle> MakeExitHandle();
aos::monotonic_clock::time_point monotonic_now() const override {
@@ -183,6 +191,21 @@
// Only set during Run().
std::unique_ptr<ipc_lib::SignalFd> signalfd_;
+
+ // Calls to Exit() are guaranteed to be thread-safe, so the exit_status_mutex_
+ // guards access to the exit_status_.
+ aos::stl_mutex exit_status_mutex_;
+ // Once exit_status_ is set once, we will not set it again until we have
+ // actually exited. This is to try to provide consistent behavior in cases
+ // where Exit() is called multiple times before Run() is aactually terminates
+ // execution.
+ std::optional<Result<void>> exit_status_{};
+ // Used by the Exit() call to provide an async-safe way of indicating that
+ // Exit() was called.
+ // Will be set once Exit() or ExitWithStatus() has been called.
+ // Note: std::atomic<> is not necessarily guaranteed to be lock-free, although
+ // std::atomic_flag is, and so is safe to use in Exit().
+ std::atomic_flag observed_exit_ = ATOMIC_FLAG_INIT;
};
} // namespace aos
diff --git a/aos/events/shm_event_loop_test.cc b/aos/events/shm_event_loop_test.cc
index bae834a..2fbe735 100644
--- a/aos/events/shm_event_loop_test.cc
+++ b/aos/events/shm_event_loop_test.cc
@@ -57,7 +57,13 @@
return loop;
}
- void Run() override { CHECK_NOTNULL(primary_event_loop_)->Run(); }
+ Result<void> Run() override {
+ return CHECK_NOTNULL(primary_event_loop_)->Run();
+ }
+
+ std::unique_ptr<ExitHandle> MakeExitHandle() override {
+ return CHECK_NOTNULL(primary_event_loop_)->MakeExitHandle();
+ }
void Exit() override { CHECK_NOTNULL(primary_event_loop_)->Exit(); }
@@ -66,7 +72,7 @@
}
private:
- ::aos::ShmEventLoop *primary_event_loop_;
+ ::aos::ShmEventLoop *primary_event_loop_ = nullptr;
};
auto CommonParameters() {
@@ -296,6 +302,21 @@
EXPECT_EQ(times.size(), 2u);
}
+// Tests that the ShmEventLoop::Exit() method causes the ShmEventLoop to return
+// with a successful status.
+TEST_P(ShmEventLoopTest, SuccessfulExitTest) {
+ auto loop1 = factory()->MakePrimary("primary");
+ auto exit_handle = factory()->MakeExitHandle();
+
+ loop1->OnRun([this, &exit_handle]() {
+ factory()->Exit();
+ // The second Exit() call should get ignored.
+ exit_handle->Exit(aos::Error::MakeUnexpectedError("Hello, World!"));
+ });
+
+ EXPECT_TRUE(factory()->Run().has_value());
+}
+
// Test GetWatcherSharedMemory in a few basic scenarios.
TEST_P(ShmEventLoopDeathTest, GetWatcherSharedMemory) {
auto generic_loop1 = factory()->MakePrimary("primary");
diff --git a/aos/events/simulated_event_loop.cc b/aos/events/simulated_event_loop.cc
index 1b193db..3c84cfb 100644
--- a/aos/events/simulated_event_loop.cc
+++ b/aos/events/simulated_event_loop.cc
@@ -135,7 +135,7 @@
--factory_->exit_handle_count_;
}
- void Exit() override { factory_->Exit(); }
+ void Exit(Result<void> status) override { factory_->Exit(status); }
private:
SimulatedEventLoopFactory *const factory_;
@@ -1559,7 +1559,15 @@
channels_.clear();
}
-void SimulatedEventLoopFactory::RunFor(monotonic_clock::duration duration) {
+Result<void> SimulatedEventLoopFactory::GetAndClearExitStatus() {
+ std::optional<Result<void>> exit_status;
+ // Clear the stored exit_status_ and extract it to be returned.
+ exit_status_.swap(exit_status);
+ return exit_status.value_or(Result<void>{});
+}
+
+Result<void> SimulatedEventLoopFactory::RunFor(
+ monotonic_clock::duration duration) {
// This sets running to true too.
scheduler_scheduler_.RunFor(duration);
for (std::unique_ptr<NodeEventLoopFactory> &node : node_factories_) {
@@ -1569,14 +1577,20 @@
}
}
}
+ return GetAndClearExitStatus();
}
-bool SimulatedEventLoopFactory::RunUntil(realtime_clock::time_point now,
- const aos::Node *node) {
- bool ran_until_time = scheduler_scheduler_.RunUntil(
- now, &GetNodeEventLoopFactory(node)->scheduler_, [this, &node](void) {
- return GetNodeEventLoopFactory(node)->realtime_offset();
- });
+Result<SimulatedEventLoopFactory::RunEndState>
+SimulatedEventLoopFactory::RunUntil(realtime_clock::time_point now,
+ const aos::Node *node) {
+ RunEndState ran_until_time =
+ scheduler_scheduler_.RunUntil(
+ now, &GetNodeEventLoopFactory(node)->scheduler_,
+ [this, &node](void) {
+ return GetNodeEventLoopFactory(node)->realtime_offset();
+ })
+ ? RunEndState::kEventsRemaining
+ : RunEndState::kFinishedEventProcessing;
for (std::unique_ptr<NodeEventLoopFactory> &node : node_factories_) {
if (node) {
for (SimulatedEventLoop *loop : node->event_loops_) {
@@ -1584,9 +1598,11 @@
}
}
}
- return ran_until_time;
+ return GetAndClearExitStatus().transform(
+ [ran_until_time]() { return ran_until_time; });
}
-void SimulatedEventLoopFactory::Run() {
+
+Result<void> SimulatedEventLoopFactory::Run() {
// This sets running to true too.
scheduler_scheduler_.Run();
for (std::unique_ptr<NodeEventLoopFactory> &node : node_factories_) {
@@ -1596,9 +1612,17 @@
}
}
}
+ return GetAndClearExitStatus();
}
-void SimulatedEventLoopFactory::Exit() { scheduler_scheduler_.Exit(); }
+void SimulatedEventLoopFactory::Exit(Result<void> status) {
+ if (!exit_status_.has_value()) {
+ exit_status_ = std::move(status);
+ } else {
+ VLOG(1) << "Exit status is already set; not setting it again.";
+ }
+ scheduler_scheduler_.Exit();
+}
std::unique_ptr<ExitHandle> SimulatedEventLoopFactory::MakeExitHandle() {
return std::make_unique<SimulatedFactoryExitHandle>(this);
diff --git a/aos/events/simulated_event_loop.h b/aos/events/simulated_event_loop.h
index 5fad005..3bcd639 100644
--- a/aos/events/simulated_event_loop.h
+++ b/aos/events/simulated_event_loop.h
@@ -88,17 +88,25 @@
// Starts executing the event loops unconditionally until Exit is called or
// all the nodes have shut down.
- void Run();
+ // All Run*() methods return an unexpected value if there is either an
+ // internal fault that can still be recovered from gracefully or if a user
+ // application called Exit() with a Status.
+ Result<void> Run();
// Executes the event loops for a duration.
- void RunFor(distributed_clock::duration duration);
+ Result<void> RunFor(distributed_clock::duration duration);
// Executes the event loops until a time.
- // Returns true if there are still events remaining.
- bool RunUntil(aos::realtime_clock::time_point time,
- const aos::Node *node = nullptr);
+ // Returns kEventsRemaining if there are still events remaining.
+ // Returns an unexpected value if there was an error.
+ enum class RunEndState {
+ kEventsRemaining,
+ kFinishedEventProcessing,
+ };
+ Result<RunEndState> RunUntil(aos::realtime_clock::time_point time,
+ const aos::Node *node = nullptr);
// Stops executing all event loops. Meant to be called from within an event
// loop handler.
- void Exit();
+ void Exit(Result<void> status = {});
std::unique_ptr<ExitHandle> MakeExitHandle();
@@ -157,6 +165,10 @@
friend class NodeEventLoopFactory;
friend class SimulatedFactoryExitHandle;
+ // Returns the contents of exit_status_ (or a successful Result<> if
+ // exit_status_ is nullopt), and clears the exit status.
+ Result<void> GetAndClearExitStatus();
+
const Configuration *const configuration_;
EventSchedulerScheduler scheduler_scheduler_;
@@ -170,6 +182,12 @@
std::vector<const Node *> nodes_;
int exit_handle_count_ = 0;
+
+ // Once exit_status_ is set once, we will not set it again until we have
+ // actually exited. This is to try to provide consistent behavior in cases
+ // where Exit() is called multiple times before Run() is aactually terminates
+ // execution.
+ std::optional<Result<void>> exit_status_{};
};
// This class holds all the state required to be a single node.
diff --git a/aos/events/simulated_event_loop_test.cc b/aos/events/simulated_event_loop_test.cc
index a317db8..1588ede 100644
--- a/aos/events/simulated_event_loop_test.cc
+++ b/aos/events/simulated_event_loop_test.cc
@@ -42,7 +42,13 @@
return event_loop_factory_->MakeEventLoop(name, my_node());
}
- void Run() override { event_loop_factory_->Run(); }
+ Result<void> Run() override { return event_loop_factory_->Run(); }
+
+ std::unique_ptr<ExitHandle> MakeExitHandle() override {
+ MaybeMake();
+ return event_loop_factory_->MakeExitHandle();
+ }
+
void Exit() override { event_loop_factory_->Exit(); }
// TODO(austin): Implement this. It's used currently for a phased loop test.
diff --git a/aos/flatbuffers.h b/aos/flatbuffers.h
index 607303c..e5de120 100644
--- a/aos/flatbuffers.h
+++ b/aos/flatbuffers.h
@@ -9,6 +9,7 @@
#include "glog/logging.h"
#include "aos/containers/resizeable_buffer.h"
+#include "aos/ipc_lib/data_alignment.h"
#include "aos/macros.h"
#include "aos/util/file.h"
diff --git a/aos/flatbuffers/BUILD b/aos/flatbuffers/BUILD
index 32f1d39..e4f7d1d 100644
--- a/aos/flatbuffers/BUILD
+++ b/aos/flatbuffers/BUILD
@@ -19,13 +19,22 @@
cc_library(
name = "base",
- srcs = ["base.cc"],
- hdrs = ["base.h"],
+ srcs = [
+ "base.cc",
+ ],
+ hdrs = [
+ "base.h",
+ ],
target_compatible_with = ["@platforms//os:linux"],
visibility = ["//visibility:public"],
deps = [
+ "//aos:shared_span",
+ "//aos/containers:resizeable_buffer",
+ "//aos/ipc_lib:data_alignment",
"@com_github_google_flatbuffers//:flatbuffers",
"@com_github_google_glog//:glog",
+ "@com_google_absl//absl/base",
+ "@com_google_absl//absl/types:span",
],
)
diff --git a/aos/flatbuffers/base.cc b/aos/flatbuffers/base.cc
index 97b3b36..afd48a2 100644
--- a/aos/flatbuffers/base.cc
+++ b/aos/flatbuffers/base.cc
@@ -37,20 +37,17 @@
}
}
-bool ResizeableObject::InsertBytes(void *insertion_point, size_t bytes,
- SetZero set_zero) {
+std::optional<std::span<uint8_t>> ResizeableObject::InsertBytes(
+ void *insertion_point, size_t bytes, SetZero set_zero) {
// See comments on InsertBytes() declaration and in FixObjects()
// implementation below.
- CHECK_LT(buffer_.data(), reinterpret_cast<const uint8_t *>(insertion_point))
+ CHECK_LT(reinterpret_cast<const void *>(buffer_.data()),
+ reinterpret_cast<const void *>(insertion_point))
<< ": Insertion may not be prior to the start of the buffer.";
- // Check that we started off with a properly aligned size.
- // Doing this CHECK earlier is tricky because if done in the constructor then
- // it executes prior to the Alignment() implementation being available.
- CHECK_EQ(0u, buffer_.size() % Alignment());
// Note that we will round up the size to the current alignment, so that we
// ultimately end up only adjusting the buffer size by a multiple of its
// alignment, to avoid having to do any more complicated bookkeeping.
- const size_t aligned_bytes = PaddedSize(bytes, Alignment());
+ const size_t aligned_bytes = AlignOffset(bytes, Alignment());
if (parent_ != nullptr) {
return parent_->InsertBytes(insertion_point, aligned_bytes, set_zero);
} else {
@@ -59,14 +56,16 @@
->InsertBytes(insertion_point, aligned_bytes, Alignment(),
set_zero);
if (!new_buffer.has_value()) {
- return false;
+ return std::nullopt;
}
- UpdateBuffer(new_buffer.value(),
- new_buffer.value().data() +
- (reinterpret_cast<const uint8_t *>(insertion_point) -
- buffer_.data()),
- aligned_bytes);
- return true;
+ std::span<uint8_t> inserted_data(
+ new_buffer.value().data() +
+ (reinterpret_cast<const uint8_t *>(insertion_point) -
+ buffer_.data()),
+ aligned_bytes);
+ UpdateBuffer(new_buffer.value(), inserted_data.data(),
+ inserted_data.size());
+ return inserted_data;
}
}
@@ -78,16 +77,9 @@
ObserveBufferModification();
}
-std::span<uint8_t> ResizeableObject::BufferForObject(
- size_t absolute_offset, size_t size, size_t terminal_alignment) {
- const size_t padded_size = PaddedSize(size, terminal_alignment);
- std::span<uint8_t> padded_buffer =
- internal::GetSubSpan(buffer_, absolute_offset, padded_size);
- std::span<uint8_t> object_buffer =
- internal::GetSubSpan(padded_buffer, 0, size);
- std::span<uint8_t> padding = internal::GetSubSpan(padded_buffer, size);
- internal::ClearSpan(padding);
- return object_buffer;
+std::span<uint8_t> ResizeableObject::BufferForObject(size_t absolute_offset,
+ size_t size) {
+ return internal::GetSubSpan(buffer_, absolute_offset, size);
}
void ResizeableObject::FixObjects(void *modification_point,
@@ -103,8 +95,7 @@
object.inline_entry < modification_point) {
if (*object.inline_entry != 0) {
CHECK_EQ(static_cast<const void *>(
- static_cast<const uint8_t *>(absolute_offset) +
- CHECK_NOTNULL(object.object)->AbsoluteOffsetOffset()),
+ static_cast<const uint8_t *>(absolute_offset)),
DereferenceOffset(object.inline_entry));
*object.inline_entry += bytes_inserted;
CHECK_GE(DereferenceOffset(object.inline_entry), modification_point)
@@ -119,8 +110,7 @@
// We only need to update the object's buffer if it currently exists.
if (object.object != nullptr) {
std::span<uint8_t> subbuffer = BufferForObject(
- *object.absolute_offset, object.object->buffer_.size(),
- object.object->Alignment());
+ *object.absolute_offset, object.object->buffer_.size());
// By convention (enforced in InsertBytes()), the modification_point shall
// not be at the start of the subobjects data buffer; it may be the byte
// just past the end of the buffer. This makes it so that is unambiguous
@@ -136,39 +126,8 @@
}
}
-std::optional<std::span<uint8_t>> VectorAllocator::Allocate(
- size_t size, size_t /*alignment*/, SetZero set_zero) {
- CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate().";
- buffer_.resize(size);
- if (set_zero == SetZero::kYes) {
- memset(buffer_.data(), 0, buffer_.size());
- }
- return std::span<uint8_t>{buffer_.data(), buffer_.size()};
-}
-
-std::optional<std::span<uint8_t>> VectorAllocator::InsertBytes(
- void *insertion_point, size_t bytes, size_t /*alignment*/, SetZero) {
- const ssize_t insertion_index =
- reinterpret_cast<uint8_t *>(insertion_point) - buffer_.data();
- CHECK_LE(0, insertion_index);
- CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
- buffer_.insert(buffer_.begin() + insertion_index, bytes, 0);
- return std::span<uint8_t>{buffer_.data(), buffer_.size()};
-}
-
-std::span<uint8_t> VectorAllocator::RemoveBytes(
- std::span<uint8_t> remove_bytes) {
- const ssize_t removal_index = remove_bytes.data() - buffer_.data();
- CHECK_LE(0, removal_index);
- CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
- CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
- buffer_.erase(buffer_.begin() + removal_index,
- buffer_.begin() + removal_index + remove_bytes.size());
- return {buffer_.data(), buffer_.size()};
-}
-
std::optional<std::span<uint8_t>> SpanAllocator::Allocate(size_t size,
- size_t /*alignment*/,
+ size_t alignment,
SetZero set_zero) {
CHECK(!allocated_);
if (size > buffer_.size()) {
@@ -179,6 +138,10 @@
}
allocated_size_ = size;
allocated_ = true;
+ CHECK_GT(alignment, 0u);
+ CHECK_EQ(buffer_.size() % alignment, 0u)
+ << ": Buffer isn't a multiple of alignment " << alignment << " long, is "
+ << buffer_.size() << " long";
return internal::GetSubSpan(buffer_, buffer_.size() - size);
}
@@ -223,6 +186,97 @@
allocated_ = false;
}
+AlignedVectorAllocator::~AlignedVectorAllocator() {
+ CHECK(buffer_.empty())
+ << ": Must deallocate before destroying the AlignedVectorAllocator.";
+}
+
+std::optional<std::span<uint8_t>> AlignedVectorAllocator::Allocate(
+ size_t size, size_t /*alignment*/, fbs::SetZero set_zero) {
+ CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate().";
+ buffer_.resize(((size + kAlignment - 1) / kAlignment) * kAlignment);
+ allocated_size_ = size;
+ if (set_zero == fbs::SetZero::kYes) {
+ memset(buffer_.data(), 0, buffer_.size());
+ }
+
+ return std::span<uint8_t>{data(), allocated_size_};
+}
+
+std::optional<std::span<uint8_t>> AlignedVectorAllocator::InsertBytes(
+ void *insertion_point, size_t bytes, size_t /*alignment*/,
+ fbs::SetZero set_zero) {
+ DCHECK_GE(reinterpret_cast<const uint8_t *>(insertion_point), data());
+ DCHECK_LE(reinterpret_cast<const uint8_t *>(insertion_point),
+ data() + allocated_size_);
+ const size_t buffer_offset =
+ reinterpret_cast<const uint8_t *>(insertion_point) - data();
+ // TODO(austin): This has an extra memcpy in it that isn't strictly needed
+ // when we resize. Remove it if performance is a concern.
+ const size_t absolute_buffer_offset =
+ reinterpret_cast<const uint8_t *>(insertion_point) - buffer_.data();
+ const size_t previous_size = buffer_.size();
+
+ buffer_.resize(((allocated_size_ + bytes + kAlignment - 1) / kAlignment) *
+ kAlignment);
+
+ // Now, we've got space both before and after the block of data. Move the
+ // data after to the end, and the data before to the start.
+
+ const size_t new_space_after = buffer_.size() - previous_size;
+
+ // Move the rest of the data to be end aligned. If the buffer wasn't resized,
+ // this will be a nop.
+ memmove(buffer_.data() + absolute_buffer_offset + new_space_after,
+ buffer_.data() + absolute_buffer_offset,
+ previous_size - absolute_buffer_offset);
+
+ // Now, move the data at the front to be aligned too.
+ memmove(buffer_.data() + buffer_.size() - (allocated_size_ + bytes),
+ buffer_.data() + previous_size - allocated_size_,
+ allocated_size_ - (previous_size - absolute_buffer_offset));
+
+ if (set_zero == fbs::SetZero::kYes) {
+ memset(data() - bytes + buffer_offset, 0, bytes);
+ }
+ allocated_size_ += bytes;
+
+ return std::span<uint8_t>{data(), allocated_size_};
+}
+
+std::span<uint8_t> AlignedVectorAllocator::RemoveBytes(
+ std::span<uint8_t> remove_bytes) {
+ const ssize_t removal_index = remove_bytes.data() - buffer_.data();
+ const size_t old_start_index = buffer_.size() - allocated_size_;
+ CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
+ CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
+ CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
+ uint8_t *old_buffer_start = buffer_.data() + old_start_index;
+ memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
+ removal_index - old_start_index);
+ allocated_size_ -= remove_bytes.size();
+
+ return std::span<uint8_t>{data(), allocated_size_};
+}
+
+void AlignedVectorAllocator::Deallocate(std::span<uint8_t>) {
+ if (!released_) {
+ CHECK(!buffer_.empty())
+ << ": Called Deallocate() without a prior allocation.";
+ }
+ released_ = false;
+ buffer_.resize(0);
+}
+
+aos::SharedSpan AlignedVectorAllocator::Release() {
+ absl::Span<uint8_t> span{data(), allocated_size_};
+ std::shared_ptr<SharedSpanHolder> result = std::make_shared<SharedSpanHolder>(
+ std::move(buffer_), absl::Span<const uint8_t>());
+ result->span = span;
+ released_ = true;
+ return aos::SharedSpan(result, &(result->span));
+}
+
namespace internal {
std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os) {
constexpr size_t kRowSize = 8u;
diff --git a/aos/flatbuffers/base.h b/aos/flatbuffers/base.h
index ff81c9a..6e93f93 100644
--- a/aos/flatbuffers/base.h
+++ b/aos/flatbuffers/base.h
@@ -1,5 +1,6 @@
#ifndef AOS_FLATBUFFERS_BASE_H_
#define AOS_FLATBUFFERS_BASE_H_
+
#include <stdint.h>
#include <sys/types.h>
@@ -11,21 +12,31 @@
#include <utility>
#include <vector>
+#include "absl/types/span.h"
#include "flatbuffers/base.h"
#include "glog/logging.h"
+#include "aos/containers/resizeable_buffer.h"
+#include "aos/ipc_lib/data_alignment.h"
+#include "aos/shared_span.h"
+
namespace aos::fbs {
+
using ::flatbuffers::soffset_t;
using ::flatbuffers::uoffset_t;
using ::flatbuffers::voffset_t;
-// Returns the smallest multiple of alignment that is greater than or equal to
-// size.
-constexpr size_t PaddedSize(size_t size, size_t alignment) {
+// Returns the offset into the buffer needed to provide 'alignment' alignment
+// 'aligned_offset' bytes after the returned offset. This assumes that the
+// first 'starting_offset' bytes are spoken for.
+constexpr size_t AlignOffset(size_t starting_offset, size_t alignment,
+ size_t aligned_offset = 0) {
// We can be clever with bitwise operations by assuming that aligment is a
// power of two. Or we can just be clearer about what we mean and eat a few
// integer divides.
- return (((size - 1) / alignment) + 1) * alignment;
+ return (((starting_offset + aligned_offset - 1) / alignment) + 1) *
+ alignment -
+ aligned_offset;
}
// Used as a parameter to methods where we are messing with memory and may or
@@ -109,11 +120,6 @@
ResizeableObject(ResizeableObject &&other);
// Required alignment of this object.
virtual size_t Alignment() const = 0;
- // Offset from the start of buffer() to the actual start of the object in
- // question (this is important for vectors, where the vector itself cannot
- // have internal padding, and so the start of the vector may be offset from
- // the start of the buffer to handle alignment).
- virtual size_t AbsoluteOffsetOffset() const = 0;
// Causes bytes bytes to be inserted between insertion_point - 1 and
// insertion_point.
// If requested, the new bytes will be cleared to zero; otherwise they will be
@@ -123,9 +129,10 @@
// implementation, and is merely a requirement that any buffer growth occur
// only on the inside or past the end of the vector, and not prior to the
// start of the vector.
- // Returns true on success, false on failure (e.g., if the allocator has no
- // memory available).
- bool InsertBytes(void *insertion_point, size_t bytes, SetZero set_zero);
+ // Returns a span of the inserted bytes on success, nullopt on failure (e.g.,
+ // if the allocator has no memory available).
+ std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
+ size_t bytes, SetZero set_zero);
// Called *after* the internal buffer_ has been swapped out and *after* the
// object tree has been traversed and fixed.
virtual void ObserveBufferModification() {}
@@ -144,12 +151,9 @@
const void *PointerForAbsoluteOffset(const size_t absolute_offset) {
return buffer_.data() + absolute_offset;
}
- // Returns a span at the requested offset into the buffer. terminal_alignment
- // does not align the start of the buffer; instead, it ensures that the memory
- // from absolute_offset + size until the next multiple of terminal_alignment
- // is set to all zeroes.
- std::span<uint8_t> BufferForObject(size_t absolute_offset, size_t size,
- size_t terminal_alignment);
+ // Returns a span at the requested offset into the buffer for the requested
+ // size.
+ std::span<uint8_t> BufferForObject(size_t absolute_offset, size_t size);
// When memory has been inserted/removed, this iterates over the sub-objects
// and notifies/adjusts them appropriately.
// This will be called after buffer_ has been updated, and:
@@ -174,8 +178,9 @@
class Allocator {
public:
virtual ~Allocator() {}
- // Allocates memory of the requested size and alignment. alignment is not
+ // Allocates memory of the requested size and alignment. alignment is
// guaranteed.
+ //
// On failure to allocate the requested size, returns nullopt;
// Never returns a partial span.
// The span will be initialized to zero upon request.
@@ -183,16 +188,19 @@
// Deallocate() has been called. In order to adjust the size of the buffer,
// call InsertBytes() and RemoveBytes().
[[nodiscard]] virtual std::optional<std::span<uint8_t>> Allocate(
- size_t size, size_t alignment_hint, SetZero set_zero) = 0;
+ size_t size, size_t alignment, SetZero set_zero) = 0;
// Identical to Allocate(), but dies on failure.
- [[nodiscard]] std::span<uint8_t> AllocateOrDie(size_t size,
- size_t alignment_hint,
+ [[nodiscard]] std::span<uint8_t> AllocateOrDie(size_t size, size_t alignment,
SetZero set_zero) {
std::optional<std::span<uint8_t>> span =
- Allocate(size, alignment_hint, set_zero);
+ Allocate(size, alignment, set_zero);
CHECK(span.has_value()) << ": Failed to allocate " << size << " bytes.";
CHECK_EQ(size, span.value().size())
<< ": Failed to allocate " << size << " bytes.";
+ CHECK_EQ(reinterpret_cast<size_t>(span.value().data()) % alignment, 0u)
+ << "Failed to allocate data of length " << size << " with alignment "
+ << alignment;
+
return span.value();
}
// Increases the size of the buffer by inserting bytes bytes immediately
@@ -219,33 +227,6 @@
virtual void Deallocate(std::span<uint8_t> buffer) = 0;
};
-// Allocator that uses an std::vector to allow arbitrary-sized allocations.
-// Does not provide any alignment guarantees.
-class VectorAllocator : public Allocator {
- public:
- VectorAllocator() {}
- ~VectorAllocator() {
- CHECK(buffer_.empty())
- << ": Must deallocate before destroying the VectorAllocator.";
- }
- std::optional<std::span<uint8_t>> Allocate(size_t size, size_t /*alignment*/,
- SetZero set_zero) override;
- std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
- size_t bytes,
- size_t /*alignment*/,
- SetZero /*set_zero*/) override;
- std::span<uint8_t> RemoveBytes(std::span<uint8_t> remove_bytes) override;
-
- void Deallocate(std::span<uint8_t>) override {
- CHECK(!buffer_.empty())
- << ": Called Deallocate() without a prior allocation.";
- buffer_.resize(0);
- }
-
- private:
- std::vector<uint8_t> buffer_;
-};
-
// Allocator that allocates all of its memory within a provided span. To match
// the behavior of the FlatBufferBuilder, it will start its allocations at the
// end of the provided span.
@@ -278,17 +259,59 @@
size_t allocated_size_ = 0;
};
+// Allocator that uses an AllocatorResizeableBuffer to allow arbitrary-sized
+// allocations. Aligns the end of the buffer to an alignment of
+// kChannelDataAlignment.
+class AlignedVectorAllocator : public fbs::Allocator {
+ public:
+ static constexpr size_t kAlignment = aos::kChannelDataAlignment;
+ AlignedVectorAllocator() {}
+ ~AlignedVectorAllocator();
+
+ std::optional<std::span<uint8_t>> Allocate(size_t size, size_t alignment,
+ fbs::SetZero set_zero) override;
+
+ std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
+ size_t bytes, size_t alignment,
+ fbs::SetZero set_zero) override;
+
+ std::span<uint8_t> RemoveBytes(std::span<uint8_t> remove_bytes) override;
+
+ void Deallocate(std::span<uint8_t>) override;
+
+ // Releases the data which has been allocated from this allocator to the
+ // caller. This is needed because Deallocate actually frees the memory.
+ aos::SharedSpan Release();
+
+ private:
+ struct SharedSpanHolder {
+ aos::AllocatorResizeableBuffer<aos::AlignedReallocator<kAlignment>> buffer;
+ absl::Span<const uint8_t> span;
+ };
+ uint8_t *data() { return buffer_.data() + buffer_.size() - allocated_size_; }
+
+ aos::AllocatorResizeableBuffer<aos::AlignedReallocator<kAlignment>> buffer_;
+
+ // The size of the data that has been returned from Allocate. This counts
+ // from the end of buffer_.
+ size_t allocated_size_ = 0u;
+ // If true, the data has been released from buffer_, and we don't own it
+ // anymore. This enables Deallocate to properly handle the case when the user
+ // releases the memory, but the Builder still needs to clean up.
+ bool released_ = false;
+};
+
// Allocates and owns a fixed-size memory buffer on the stack.
//
// This provides a convenient Allocator for use with the aos::fbs::Builder
// in realtime code instead of trying to use the VectorAllocator.
-template <std::size_t N>
+template <std::size_t N, std::size_t alignment = 64>
class FixedStackAllocator : public SpanAllocator {
public:
FixedStackAllocator() : SpanAllocator({buffer_, sizeof(buffer_)}) {}
private:
- uint8_t buffer_[N];
+ alignas(alignment) uint8_t buffer_[N];
};
namespace internal {
@@ -323,4 +346,5 @@
};
} // namespace internal
} // namespace aos::fbs
+
#endif // AOS_FLATBUFFERS_BASE_H_
diff --git a/aos/flatbuffers/base_test.cc b/aos/flatbuffers/base_test.cc
index f0eaf04..b77f352 100644
--- a/aos/flatbuffers/base_test.cc
+++ b/aos/flatbuffers/base_test.cc
@@ -7,32 +7,46 @@
#include "gtest/gtest.h"
namespace aos::fbs::testing {
-// Tests that PaddedSize() behaves as expected.
-TEST(BaseTest, PaddedSize) {
- EXPECT_EQ(0, PaddedSize(0, 4));
- EXPECT_EQ(4, PaddedSize(4, 4));
- EXPECT_EQ(8, PaddedSize(5, 4));
- EXPECT_EQ(8, PaddedSize(6, 4));
- EXPECT_EQ(8, PaddedSize(7, 4));
+// Tests that AlignOffset() behaves as expected.
+TEST(BaseTest, AlignOffset) {
+ EXPECT_EQ(0, AlignOffset(0, 4));
+ EXPECT_EQ(4, AlignOffset(4, 4));
+ EXPECT_EQ(8, AlignOffset(5, 4));
+ EXPECT_EQ(8, AlignOffset(6, 4));
+ EXPECT_EQ(8, AlignOffset(7, 4));
}
-inline constexpr size_t kDefaultSize = 16;
+// Tests that AlignOffset handles the alignment point being nonzero. This shows
+// up when you want 8 byte alignment 4 bytes into the start of the buffer, and
+// don't want to pad out the front of the buffer.
+TEST(BaseTest, AlignOffsetWithOffset) {
+ EXPECT_EQ(4, AlignOffset(4, 4, 4));
+
+ EXPECT_EQ(4, AlignOffset(0, 8, 4));
+ EXPECT_EQ(4, AlignOffset(1, 8, 4));
+ EXPECT_EQ(4, AlignOffset(2, 8, 4));
+ EXPECT_EQ(4, AlignOffset(3, 8, 4));
+ EXPECT_EQ(4, AlignOffset(4, 8, 4));
+ EXPECT_EQ(12, AlignOffset(5, 8, 4));
+}
+
+inline constexpr size_t kDefaultSize = AlignedVectorAllocator::kAlignment * 2;
template <typename T>
class AllocatorTest : public ::testing::Test {
protected:
AllocatorTest() : allocator_(std::make_unique<T>()) {}
- std::vector<uint8_t> buffer_;
+ alignas(64) std::array<uint8_t, kDefaultSize> buffer_;
// unique_ptr so that we can destroy the allocator at will.
std::unique_ptr<T> allocator_;
};
template <>
AllocatorTest<SpanAllocator>::AllocatorTest()
- : buffer_(kDefaultSize),
- allocator_(std::make_unique<SpanAllocator>(
+ : allocator_(std::make_unique<SpanAllocator>(
std::span<uint8_t>{buffer_.data(), buffer_.size()})) {}
-using AllocatorTypes = ::testing::Types<SpanAllocator, VectorAllocator>;
+using AllocatorTypes = ::testing::Types<SpanAllocator, AlignedVectorAllocator,
+ FixedStackAllocator<kDefaultSize>>;
TYPED_TEST_SUITE(AllocatorTest, AllocatorTypes);
// Tests that we can create and not use a VectorAllocator.
@@ -77,8 +91,24 @@
this->allocator_->Deallocate(span);
}
+// Tests that all allocators return data aligned to the requested alignment.
+TYPED_TEST(AllocatorTest, Alignment) {
+ for (size_t alignment : {4, 8, 16, 32, 64}) {
+ std::span<uint8_t> span =
+ this->allocator_->Allocate(kDefaultSize, alignment, SetZero::kYes)
+ .value();
+ EXPECT_EQ(reinterpret_cast<size_t>(span.data()) % alignment, 0);
+ this->allocator_->Deallocate(span);
+ }
+}
+
// Tests that we can remove bytes from an arbitrary spot in the buffer.
TYPED_TEST(AllocatorTest, RemoveBytes) {
+ // Deletion doesn't require resizing, so we don't need to worry about it being
+ // larger than the alignment to test everything. The test requires the size
+ // to be < 255 to store the sentinal values.
+ const size_t kDefaultSize = 128;
+
const size_t half_size = kDefaultSize / 2;
std::span<uint8_t> span =
this->allocator_->Allocate(kDefaultSize, 4, SetZero::kYes).value();
@@ -134,7 +164,7 @@
std::vector<uint8_t> buffer(kDefaultSize);
SpanAllocator allocator({buffer.data(), buffer.size()});
std::span<uint8_t> span =
- allocator.Allocate(kDefaultSize, 0, SetZero::kYes).value();
+ allocator.Allocate(kDefaultSize, 1, SetZero::kYes).value();
EXPECT_EQ(kDefaultSize, span.size());
EXPECT_FALSE(
allocator.InsertBytes(span.data(), 1u, 0, SetZero::kYes).has_value());
@@ -154,7 +184,8 @@
virtual ~TestResizeableObject() {}
using ResizeableObject::SubObject;
bool InsertBytes(void *insertion_point, size_t bytes) {
- return ResizeableObject::InsertBytes(insertion_point, bytes, SetZero::kYes);
+ return ResizeableObject::InsertBytes(insertion_point, bytes, SetZero::kYes)
+ .has_value();
}
TestResizeableObject(TestResizeableObject &&) = default;
@@ -188,7 +219,6 @@
TestObject &GetObject(size_t index) { return objects_.at(index); }
size_t Alignment() const override { return 64; }
- size_t AbsoluteOffsetOffset() const override { return 0; }
private:
std::vector<TestObject> objects_;
@@ -201,7 +231,7 @@
: object_(allocator_.Allocate(kInitialSize, 4, SetZero::kYes).value(),
&allocator_) {}
~ResizeableObjectTest() { allocator_.Deallocate(object_.buffer()); }
- VectorAllocator allocator_;
+ AlignedVectorAllocator allocator_;
TestResizeableObject object_;
};
diff --git a/aos/flatbuffers/builder.h b/aos/flatbuffers/builder.h
index 36225c0..be41b63 100644
--- a/aos/flatbuffers/builder.h
+++ b/aos/flatbuffers/builder.h
@@ -18,7 +18,8 @@
template <typename T>
class Builder final : public ResizeableObject {
public:
- static constexpr size_t kBufferSize = T::kUnalignedBufferSize;
+ static constexpr size_t kBufferSize = T::kRootSize;
+ static constexpr size_t kAlign = T::kAlign;
// Note on memory initialization: We zero-initialize all the memory that we
// create at the start. While this can be overkill, it is simpler to manage
// the alternatives, and we don't currently have a clear performance need for
@@ -48,7 +49,7 @@
SetPrefix();
}
Builder(std::unique_ptr<Allocator> allocator =
- std::make_unique<VectorAllocator>())
+ std::make_unique<AlignedVectorAllocator>())
: ResizeableObject(
allocator->AllocateOrDie(kBufferSize, T::kAlign, SetZero::kYes),
std::move(allocator)),
@@ -93,7 +94,6 @@
private:
size_t Alignment() const override { return flatbuffer_.t.Alignment(); }
- size_t AbsoluteOffsetOffset() const override { return 0; }
size_t NumberOfSubObjects() const override { return 1; }
void SetPrefix() {
// We can't do much if the provided buffer isn't at least 4-byte aligned,
@@ -102,13 +102,16 @@
CHECK_EQ(reinterpret_cast<size_t>(buffer_.data()) % alignof(uoffset_t), 0u);
*reinterpret_cast<uoffset_t *>(buffer_.data()) = flatbuffer_start_;
}
- // Because the allocator API doesn't provide a way for us to request a
- // strictly aligned buffer, manually align the start of the actual flatbuffer
- // data if needed.
+ // Manually aligns the start of the actual flatbuffer to handle the alignment
+ // offset.
static size_t BufferStart(std::span<uint8_t> buffer) {
- return aos::fbs::PaddedSize(
+ CHECK_EQ(reinterpret_cast<size_t>(buffer.data()) % T::kAlign, 0u)
+ << "Failed to allocate data of length " << buffer.size()
+ << " with alignment " << T::kAlign;
+
+ return aos::fbs::AlignOffset(
reinterpret_cast<size_t>(buffer.data()) + sizeof(uoffset_t),
- T::kAlign) -
+ T::kAlign, T::kAlignOffset) -
reinterpret_cast<size_t>(buffer.data());
}
diff --git a/aos/flatbuffers/static_flatbuffers.cc b/aos/flatbuffers/static_flatbuffers.cc
index c2e0454..4c013a2 100644
--- a/aos/flatbuffers/static_flatbuffers.cc
+++ b/aos/flatbuffers/static_flatbuffers.cc
@@ -15,6 +15,7 @@
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
+#include "absl/strings/substitute.h"
#include "flatbuffers/base.h"
#include "flatbuffers/string.h"
#include "flatbuffers/vector.h"
@@ -32,6 +33,8 @@
std::string name;
// Whether it is an inline data type (scalar/struct vs vector/table/string).
bool is_inline = true;
+ // Whether the elements are inline (vector of ints vs vector of strings).
+ bool elements_are_inline = true;
// Whether this is a struct or not.
bool is_struct = false;
// Whether this is a repeated type (vector or string).
@@ -48,6 +51,8 @@
size_t inline_alignment = 0u;
// vtable offset of the field.
size_t vtable_offset = 0u;
+ // Size of the elements in the vector, if this is a vector.
+ size_t element_size = 0u;
};
const reflection::Object *GetObject(const reflection::Schema *schema,
@@ -168,6 +173,7 @@
const reflection::Type *type = field_fbs->type();
field->inline_size = type->base_size();
field->inline_alignment = type->base_size();
+ field->element_size = type->element_size();
switch (type->base_type()) {
case reflection::BaseType::Bool:
case reflection::BaseType::Byte:
@@ -183,6 +189,7 @@
// We have a scalar field, so things are relatively
// straightforwards.
field->is_inline = true;
+ field->elements_are_inline = true;
field->is_struct = false;
field->is_repeated = false;
field->full_type =
@@ -190,6 +197,7 @@
return;
case reflection::BaseType::String: {
field->is_inline = false;
+ field->elements_are_inline = true;
field->is_struct = false;
field->is_repeated = true;
field->full_type =
@@ -231,6 +239,7 @@
};
}
field->is_inline = false;
+ field->elements_are_inline = elements_are_inline;
field->is_struct = false;
field->full_type =
absl::StrFormat("::aos::fbs::Vector<%s, %d, %s, %s>", element_type,
@@ -242,6 +251,7 @@
case reflection::BaseType::Obj: {
const reflection::Object *object = GetObject(schema, type->index());
field->is_inline = object->is_struct();
+ field->elements_are_inline = field->is_inline;
field->is_struct = object->is_struct();
field->is_repeated = false;
const std::string flatbuffer_name =
@@ -281,7 +291,7 @@
const std::string constructor_body =
R"code(
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
)code";
return absl::StrFormat(R"code(
@@ -351,10 +361,14 @@
// table (scalars, structs, and enums) .
std::string MakeInlineAccessors(const FieldData &field,
const size_t inline_absolute_offset) {
- CHECK_EQ(inline_absolute_offset % field.inline_alignment, 0u)
+ constexpr size_t kVtablePointerSize = sizeof(soffset_t);
+ CHECK_EQ(
+ (inline_absolute_offset - kVtablePointerSize) % field.inline_alignment,
+ 0u)
<< ": Unaligned field " << field.name << " on " << field.full_type
<< " with inline offset of " << inline_absolute_offset
- << " and alignment of " << field.inline_alignment;
+ << " and alignment of " << field.inline_alignment
+ << " and an alignment offset of " << kVtablePointerSize;
const std::string setter =
absl::StrFormat(R"code(
// Sets the %s field, causing it to be populated if it is not already.
@@ -369,7 +383,7 @@
R"code(
// Returns the value of %s if set; nullopt otherwise.
std::optional<%s> %s() const {
- return has_%s() ? std::make_optional(Get<%s>(%s)) : std::nullopt;;
+ return has_%s() ? std::make_optional(Get<%s>(%s)) : std::nullopt;
}
// Returns a pointer to modify the %s field.
// The pointer may be invalidated by mutations/movements of the underlying buffer.
@@ -388,39 +402,82 @@
// Generates the accessors for fields which are not inline fields and have an
// offset to the actual field content stored inline in the flatbuffer table.
std::string MakeOffsetDataAccessors(const FieldData &field) {
- const std::string setter = absl::StrFormat(
+ const std::string setter = absl::Substitute(
R"code(
- // Creates an empty object for the %s field, which you can
+ // Creates an empty object for the $0 field, which you can
// then populate/modify as desired.
// The field must not be populated yet.
- %s* add_%s() {
- CHECK(!%s.has_value());
- constexpr size_t kVtableIndex = %d;
- // Construct the *Static object that we will use for managing this subtable.
- %s.emplace(BufferForObject(%s, %s::kSize, kAlign), this);
+ $1* add_$0() {
+ CHECK(!$2.has_value());
+ constexpr size_t kVtableIndex = $3;
+ // If this object does not normally have its initial memory statically allocated,
+ // allocate it now (this is used for zero-length vectors).
+ if constexpr ($1::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset = $4;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset, $1::kSize, ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for any other objects
+ // that may have been "sharing" this location. The effect of this logic
+ // is that the first object that gets populated at any given location will
+ // bump all other objects to later. This is fine, although it does mean
+ // that the order in which objects appear in memory may vary depending
+ // on the order in which they are constructed (if they start out sharing a start pointer).
+ $4 = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this subtable.
+ $5.emplace(BufferForObject($4, $1::$7), this);
+ } else {
+ // Construct the *Static object that we will use for managing this subtable.
+ $5.emplace(BufferForObject($4, $1::kSize), this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
- SetField<::flatbuffers::uoffset_t>(%s, kVtableIndex, %s + %s::kOffset - %s);
- return &%s.value().t;
+ SetField<::flatbuffers::uoffset_t>($6, kVtableIndex, $4 - $6);
+ return &$5.value().t;
}
)code",
- field.name, field.full_type, field.name, MemberName(field),
- field.vtable_offset, MemberName(field), ObjectAbsoluteOffsetName(field),
- field.full_type, InlineAbsoluteOffsetName(field),
- ObjectAbsoluteOffsetName(field), field.full_type,
- InlineAbsoluteOffsetName(field), MemberName(field));
- const std::string getters = absl::StrFormat(
+ field.name, // $0
+ field.full_type, // $1
+ MemberName(field), // $2
+ field.vtable_offset, // $3
+ ObjectAbsoluteOffsetName(field), // $4
+ MemberName(field), // $5
+ InlineAbsoluteOffsetName(field), // $6
+ (field.elements_are_inline
+ // When building vectors of inline elements, we want this object to
+ // consume as much of the memory that was allocated as possible. This
+ // lets the vector use the padding for storage, saving space. Round
+ // this down to the size of memory that the max number of elements
+ // fit in perfectly so the padding after that isn't owned.
+ ? "RoundedLength(inserted_bytes.value().size())"
+ // For vectors of elements, we need to pad out the inline portion of
+ // the vector storing offsets to the alignment of the actual elements
+ // so we can insert elements at the end without having to allocate
+ // padding. This saves space in the long run, and lets us consume
+ // the padding for offsets if needed.
+ : "kSize") // $7
+ );
+ const std::string getters = absl::Substitute(
R"code(
- // Returns a pointer to the %s field, if set. nullptr otherwise.
- const %s* %s() const {
- return %s.has_value() ? &%s.value().t : nullptr;
+ // Returns a pointer to the $0 field, if set. nullptr otherwise.
+ const $1* $0() const {
+ return $2.has_value() ? &$2.value().t : nullptr;
}
- %s* mutable_%s() {
- return %s.has_value() ? &%s.value().t : nullptr;
+ $1* mutable_$0() {
+ return $2.has_value() ? &$2.value().t : nullptr;
}
)code",
- field.name, field.full_type, field.name, MemberName(field),
- MemberName(field), field.full_type, field.name, MemberName(field),
- MemberName(field));
+ field.name, // $0
+ field.full_type, // $1
+ MemberName(field) // $2
+ );
return setter + getters + MakeClearer(field) + MakeHaser(field);
}
@@ -452,14 +509,15 @@
// Offset from the start of the buffer to the start of the actual
// data for this field. Will be updated even when the table is not
// populated, so that we know where to construct it when requested.
- size_t %s = %s;
+ static constexpr size_t kDefaultObjectAbsoluteOffset%s = %s;
+ size_t %s = kDefaultObjectAbsoluteOffset%s;
// Offset from the start of the buffer to the offset in the inline data for
// this field.
static constexpr size_t %s = %d;
)code",
- field.name, field.full_type, MemberName(field),
- ObjectAbsoluteOffsetName(field), offset_data_absolute_offset,
- InlineAbsoluteOffsetName(field), inline_absolute_offset);
+ field.name, field.full_type, MemberName(field), field.name,
+ offset_data_absolute_offset, ObjectAbsoluteOffsetName(field),
+ field.name, InlineAbsoluteOffsetName(field), inline_absolute_offset);
}
}
@@ -651,9 +709,10 @@
}
std::string AlignCppString(const std::string_view expression,
- const std::string_view alignment) {
- return absl::StrFormat("::aos::fbs::PaddedSize(%s, %s)", expression,
- alignment);
+ const std::string_view alignment,
+ const std::string_view offset) {
+ return absl::StrCat("::aos::fbs::AlignOffset(", expression, ", ", alignment,
+ ", ", offset, ")");
}
std::string MakeInclude(std::string_view path, bool system = false) {
@@ -679,11 +738,19 @@
PopulateTypeData(schema, field_fbs, &field);
fields.push_back(field);
}
+ std::sort(fields.begin(), fields.end(),
+ [](const FieldData &f1, const FieldData &f2) {
+ return std::make_tuple(f1.inline_alignment, f1.element_size,
+ f1.vtable_offset) >
+ std::make_tuple(f2.inline_alignment, f2.element_size,
+ f2.vtable_offset);
+ });
const size_t nominal_min_align = object->minalign();
std::string out_of_line_member_size = "";
// inline_absolute_offset tracks the current position of the inline table
// contents so that we can assign static offsets to each field.
- size_t inline_absolute_offset = sizeof(soffset_t);
+ constexpr size_t kVtablePointerSize = sizeof(soffset_t);
+ size_t inline_absolute_offset = kVtablePointerSize;
// offset_data_relative_offset tracks the current size of the various
// sub-tables/vectors/strings that get stored at the end of the buffer.
// For simplicity, the offset data will start at a fully aligned offset
@@ -691,11 +758,11 @@
// Note that this is a string because it's irritating to actually pipe the
// numbers for size/alignment up here, so we just accumulate them here and
// then write the expression directly into the C++.
- std::string offset_data_relative_offset = "0";
const std::string offset_data_start_expression =
- "::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign)";
- std::string accessors;
- std::string members;
+ "(kVtableStart + kVtableSize)";
+ std::string offset_data_relative_offset = offset_data_start_expression;
+ std::vector<std::string> accessors;
+ std::vector<std::string> members;
std::set<std::string> includes = {
MakeInclude("optional", true),
MakeInclude("aos/flatbuffers/static_table.h"),
@@ -712,28 +779,32 @@
std::vector<std::string> alignments;
std::set<std::string> subobject_names;
for (const FieldData &field : fields) {
- inline_absolute_offset =
- PaddedSize(inline_absolute_offset, field.inline_alignment);
+ inline_absolute_offset = AlignOffset(
+ inline_absolute_offset, field.inline_alignment, kVtablePointerSize);
if (!field.is_inline) {
- // All sub-fields will get aligned to the parent alignment. This makes
- // some book-keeping a bit easier, at the expense of some gratuitous
- // padding.
- offset_data_relative_offset =
- AlignCppString(offset_data_relative_offset, "kAlign");
alignments.push_back(field.full_type + "::kAlign");
+ // We care about aligning each field relative to the alignment point in
+ // this flatbuffer (which is kAlignOffset into the block of memory). We
+ // then need to report out the offset relative to the start, not the
+ // alignment point.
+ offset_data_relative_offset =
+ AlignCppString(offset_data_relative_offset + " - kAlignOffset",
+ alignments.back(),
+ field.full_type + "::kAlignOffset") +
+ " + kAlignOffset";
} else {
alignments.push_back(std::to_string(field.inline_alignment));
}
- const std::string offset_data_absolute_offset =
- offset_data_start_expression + " + " + offset_data_relative_offset;
- accessors += MakeAccessors(field, inline_absolute_offset);
- members +=
- MakeMembers(field, offset_data_absolute_offset, inline_absolute_offset);
+ const std::string offset_data_absolute_offset = offset_data_relative_offset;
+ accessors.emplace_back(MakeAccessors(field, inline_absolute_offset));
+ members.emplace_back(MakeMembers(field, offset_data_absolute_offset,
+ inline_absolute_offset));
inline_absolute_offset += field.inline_size;
if (!field.is_inline) {
- offset_data_relative_offset +=
- absl::StrFormat(" + %s::kSize", field.full_type);
+ offset_data_relative_offset = absl::StrFormat(
+ "kDefaultObjectAbsoluteOffset%s + %s::kPreallocatedSize", field.name,
+ field.full_type);
}
if (field.fbs_type.has_value()) {
// Is this not getting populate for the root schema?
@@ -744,39 +815,37 @@
const std::string alignment = absl::StrCat(
"static constexpr size_t kAlign = std::max<size_t>({kMinAlign, ",
absl::StrJoin(alignments, ", "), "});\n");
- const std::string size =
- absl::StrCat("static constexpr size_t kSize = ",
- AlignCppString(offset_data_start_expression + " + " +
- offset_data_relative_offset,
- "kAlign"),
- ";");
+ // Same here, we want to align the end relative to the alignment point, but
+ // then we want to report out the size including the offset.
+ const std::string size = absl::StrCat(
+ "static constexpr size_t kSize = ",
+ AlignCppString(offset_data_relative_offset + " - kAlignOffset", "kAlign",
+ "kAlignOffset"),
+ " + kAlignOffset;");
const size_t inline_data_size = inline_absolute_offset;
const std::string constants = absl::StrFormat(
R"code(
- // Space taken up by the inline portion of the flatbuffer table data, in bytes.
+ // Space taken up by the inline portion of the flatbuffer table data, in
+ // bytes.
static constexpr size_t kInlineDataSize = %d;
// Space taken up by the vtable for this object, in bytes.
- static constexpr size_t kVtableSize = sizeof(::flatbuffers::voffset_t) * (2 + %d);
- // Offset from the start of the internal memory buffer to the start of the vtable.
- static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(kInlineDataSize, alignof(::flatbuffers::voffset_t));
- // Required alignment of this object. The buffer that this object gets constructed
- // into must be aligned to this value.
+ static constexpr size_t kVtableSize =
+ sizeof(::flatbuffers::voffset_t) * (2 + %d);
+ // Offset from the start of the internal memory buffer to the start of the
+ // vtable.
+ static constexpr size_t kVtableStart = ::aos::fbs::AlignOffset(
+ kInlineDataSize, alignof(::flatbuffers::voffset_t));
+ // Required alignment of this object. The buffer that this object gets
+ // constructed into must be aligned to this value.
%s
- // Nominal size of this object, in bytes. The object may grow beyond this size,
- // but will always start at this size and so the initial buffer must match
- // this size.
- %s
- static_assert(%d <= kAlign, "Flatbuffer schema minalign should not exceed our required alignment.");
- // Offset from the start of the memory buffer to the start of any out-of-line data (subtables,
- // vectors, strings).
+ // Offset into this object to measure the alignment at.
+ static constexpr size_t kAlignOffset = sizeof(::flatbuffers::soffset_t);
+ static_assert(
+ %d <= kAlign,
+ "Flatbuffer schema minalign should not exceed our required alignment.");
+ // Offset from the start of the memory buffer to the start of any out-of-line
+ // data (subtables, vectors, strings).
static constexpr size_t kOffsetDataStart = %s;
- // Size required for a buffer that includes a root table offset at the start.
- static constexpr size_t kRootSize = ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
- // Minimum size required to build this flatbuffer in an entirely unaligned buffer
- // (including the root table offset). Made to be a multiple of kAlign for convenience.
- static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
- // Offset at which the table vtable offset occurs. This is only needed for vectors.
- static constexpr size_t kOffset = 0;
// Various overrides to support the Table parent class.
size_t FixedVtableOffset() const final { return kVtableStart; }
size_t VtableSize() const final { return kVtableSize; }
@@ -785,10 +854,12 @@
size_t Alignment() const final { return kAlign; }
// Exposes the name of the flatbuffer type to allow interchangeable use
// of the Flatbuffer and FlatbufferStatic types in various AOS methods.
- static const char *GetFullyQualifiedName() { return Flatbuffer::GetFullyQualifiedName(); }
+ static const char *GetFullyQualifiedName() {
+ return Flatbuffer::GetFullyQualifiedName();
+ }
)code",
- inline_data_size, object->fields()->size(), alignment, size,
- nominal_min_align, offset_data_start_expression);
+ inline_data_size, object->fields()->size(), alignment, nominal_min_align,
+ offset_data_start_expression);
const std::string_view fbs_type_name = object->name()->string_view();
const std::string type_namespace = FlatbufferNameToCppName(
fbs_type_name.substr(0, fbs_type_name.find_last_of(".")));
@@ -817,13 +888,25 @@
%s
%s
%s
+ public:
+ // Nominal size of this object, in bytes. The object may grow beyond this
+ // size, but will always start at this size and so the initial buffer must
+ // match this size.
+ %s
+ // Always statically allocate memory for tables (set for consistency with
+ // static_vector.h).
+ static constexpr size_t kPreallocatedSize = kSize;
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize =
+ ::aos::fbs::AlignOffset(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
};
}
)code",
type_namespace, type_name, FlatbufferNameToCppName(fbs_type_name),
- constants, MakeConstructor(type_name), type_name, accessors,
- MakeFullClearer(fields), MakeCopier(fields), MakeObjectCopier(fields),
- MakeMoveConstructor(type_name), members, MakeSubObjectList(fields));
+ constants, MakeConstructor(type_name), type_name,
+ absl::StrJoin(accessors, ""), MakeFullClearer(fields), MakeCopier(fields),
+ MakeObjectCopier(fields), MakeMoveConstructor(type_name),
+ absl::StrJoin(members, ""), MakeSubObjectList(fields), size);
GeneratedObject result;
result.name = fbs_type_name;
diff --git a/aos/flatbuffers/static_flatbuffers_test.cc b/aos/flatbuffers/static_flatbuffers_test.cc
index 66938f1..4a8b9e9 100644
--- a/aos/flatbuffers/static_flatbuffers_test.cc
+++ b/aos/flatbuffers/static_flatbuffers_test.cc
@@ -108,7 +108,7 @@
// Test that compiles the same code that is used by an example in
// //aos/documentation/aos/docs/flatbuffers.md.
TEST_F(StaticFlatbuffersTest, DocumentationExample) {
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<TestTableStatic> builder(&allocator);
TestTableStatic *object = builder.get();
object->set_scalar(123);
@@ -176,7 +176,7 @@
aos::FlatbufferDetachedBuffer<TestTable> fbb_finished = fbb.Release();
// Using the static flatbuffer API.
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<TestTableStatic> static_builder(&allocator);
PopulateStatic(CHECK_NOTNULL(static_builder.get()->add_subtable()));
@@ -218,7 +218,7 @@
// it stays valid at all points.
TEST_F(StaticFlatbuffersTest, ManuallyConstructFlatbuffer) {
{
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<SubTableStatic> builder(&allocator);
SubTableStatic *object = builder.get();
if (!builder.AsFlatbufferSpan().Verify()) {
@@ -241,9 +241,7 @@
TestMemory(builder.buffer());
}
{
- // aos::FixedAllocator
- // allocator(TestTableStatic::kUnalignedBufferSize);
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<TestTableStatic> builder(&allocator);
TestTableStatic *object = builder.get();
const aos::fbs::testing::TestTable &fbs = object->AsFlatbuffer();
@@ -593,7 +591,12 @@
{
auto aligned_vector = object->mutable_vector_aligned();
ASSERT_TRUE(aligned_vector->reserve(100));
- EXPECT_EQ(100, aligned_vector->capacity());
+
+ VLOG(1) << AnnotateBinaries(test_schema_, builder.buffer());
+ // Since the allocator is going to allocate in blocks of 64, we end up
+ // with more capacity than we asked for. Better to have it than to leave
+ // it as unusable padding.
+ EXPECT_EQ(115, aligned_vector->capacity());
ASSERT_TRUE(builder.AsFlatbufferSpan().Verify())
<< aligned_vector->SerializationDebugString();
EXPECT_EQ(expected_contents,
@@ -659,11 +662,16 @@
{
auto unspecified_vector = object->add_unspecified_length_vector();
ASSERT_NE(nullptr, unspecified_vector);
- ASSERT_EQ(0, unspecified_vector->capacity());
+ ASSERT_EQ(60, unspecified_vector->capacity());
+ for (size_t i = 0; i < 60; ++i) {
+ ASSERT_TRUE(unspecified_vector->emplace_back(0));
+ }
ASSERT_FALSE(unspecified_vector->emplace_back(0));
- ASSERT_TRUE(unspecified_vector->reserve(2));
- ASSERT_TRUE(unspecified_vector->emplace_back(1));
- ASSERT_TRUE(unspecified_vector->emplace_back(2));
+ ASSERT_TRUE(unspecified_vector->reserve(64));
+ ASSERT_EQ(124, unspecified_vector->capacity());
+ for (size_t i = 0; i < 64; ++i) {
+ ASSERT_TRUE(unspecified_vector->emplace_back(1));
+ }
ASSERT_FALSE(unspecified_vector->emplace_back(3));
ASSERT_TRUE(builder.AsFlatbufferSpan().Verify());
}
@@ -673,7 +681,7 @@
// Tests that field clearing (and subsequent resetting) works properly.
TEST_F(StaticFlatbuffersTest, ClearFields) {
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<TestTableStatic> builder(&allocator);
TestTableStatic *object = builder.get();
// For each field, we will confirm the following:
@@ -859,7 +867,7 @@
// Confirm that we can use the SpanAllocator with a span that provides exactly
// the required buffer size.
TEST_F(StaticFlatbuffersTest, ExactSizeSpanAllocator) {
- uint8_t buffer[Builder<TestTableStatic>::kBufferSize];
+ alignas(64) uint8_t buffer[Builder<TestTableStatic>::kBufferSize];
aos::fbs::SpanAllocator allocator({buffer, sizeof(buffer)});
Builder<TestTableStatic> builder(&allocator);
TestTableStatic *object = builder.get();
@@ -922,17 +930,21 @@
// Verify that if we create a span with extra headroom that that lets us
// dynamically alter the size of vectors in the flatbuffers.
TEST_F(StaticFlatbuffersTest, ExtraLargeSpanAllocator) {
- uint8_t buffer[Builder<TestTableStatic>::kBufferSize + 10000];
+ alignas(64) uint8_t buffer[Builder<TestTableStatic>::kBufferSize + 200 * 64];
aos::fbs::SpanAllocator allocator({buffer, sizeof(buffer)});
Builder<TestTableStatic> builder(&allocator);
TestTableStatic *object = builder.get();
{
auto vector = object->add_unspecified_length_vector();
// Confirm that the vector does indeed start out at zero length.
+ ASSERT_EQ(vector->capacity(), 60);
+ for (size_t i = 0; i < 60; ++i) {
+ ASSERT_TRUE(vector->emplace_back(i));
+ }
ASSERT_FALSE(vector->emplace_back(4));
ASSERT_TRUE(vector->reserve(9000));
vector->resize(256);
- for (size_t index = 0; index < 256; ++index) {
+ for (size_t index = 60; index < 256; ++index) {
vector->at(index) = static_cast<uint8_t>(index);
}
}
@@ -951,7 +963,7 @@
// Tests that the iterators on the Vector type work.
TEST_F(StaticFlatbuffersTest, IteratorTest) {
- Builder<TestTableStatic> builder(std::make_unique<VectorAllocator>());
+ Builder<TestTableStatic> builder(std::make_unique<AlignedVectorAllocator>());
{
auto vector = builder->add_unspecified_length_vector();
ASSERT_TRUE(vector->reserve(9000));
@@ -1019,7 +1031,8 @@
// Confirm that we can use the FixedStackAllocator
TEST_F(StaticFlatbuffersTest, FixedStackAllocator) {
- aos::fbs::FixedStackAllocator<Builder<TestTableStatic>::kBufferSize>
+ aos::fbs::FixedStackAllocator<Builder<TestTableStatic>::kBufferSize,
+ Builder<TestTableStatic>::kAlign>
allocator;
Builder<TestTableStatic> builder(&allocator);
TestTableStatic *object = builder.get();
@@ -1078,7 +1091,7 @@
object_t.vector_of_strings.push_back("971");
object_t.vector_of_structs.push_back({1, 2});
object_t.subtable = std::make_unique<SubTableT>();
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<TestTableStatic> builder(&allocator);
ASSERT_TRUE(builder->FromFlatbuffer(object_t));
ASSERT_TRUE(builder.AsFlatbufferSpan().Verify());
@@ -1123,7 +1136,7 @@
// Tests that we can use the move constructor on a Builder.
TEST_F(StaticFlatbuffersTest, BuilderMoveConstructor) {
- uint8_t buffer[Builder<TestTableStatic>::kBufferSize];
+ alignas(64) uint8_t buffer[Builder<TestTableStatic>::kBufferSize];
aos::fbs::SpanAllocator allocator({buffer, sizeof(buffer)});
Builder<TestTableStatic> builder_from(&allocator);
Builder<TestTableStatic> builder(std::move(builder_from));
diff --git a/aos/flatbuffers/static_table.h b/aos/flatbuffers/static_table.h
index e9619a7..a7f96d1 100644
--- a/aos/flatbuffers/static_table.h
+++ b/aos/flatbuffers/static_table.h
@@ -17,7 +17,13 @@
// Every table will be aligned to the greatest alignment of all of its members
// and its size will be equal to a multiple of the alignment. Each table shall
// have the following layout: [vtable offset; inline data with padding; vtable;
-// padding; table/vector data with padding]
+// table/vector data with padding]
+//
+// Within the table/vector data with padding section, there will be a chunk of
+// memory for the data associated with each sub-table/vector of the table.
+// That memory will start with some padding and then the memory actually
+// allocated to the table/vector in question will start at said
+// sub-table/vector's individual alignment.
class Table : public ResizeableObject {
public:
// Prints out a debug string of the raw flatbuffer memory. Does not currently
@@ -57,7 +63,6 @@
virtual size_t VtableSize() const = 0;
virtual size_t InlineTableSize() const = 0;
virtual size_t OffsetDataStart() const = 0;
- size_t AbsoluteOffsetOffset() const override { return 0; }
void PopulateVtable() {
// Zero out everything up to the start of the sub-messages/tables, which are
// responsible for doing their own memory initialization.
diff --git a/aos/flatbuffers/static_vector.h b/aos/flatbuffers/static_vector.h
index bf706e0..da250e2 100644
--- a/aos/flatbuffers/static_vector.h
+++ b/aos/flatbuffers/static_vector.h
@@ -26,10 +26,11 @@
// FlatbufferType: The type used by flatbuffers::Vector to store this type.
// ConstFlatbufferType: The type used by a const flatbuffers::Vector to store
// this type.
-// kDataAlign: Alignment required by the stored type.
-// kDataSize: Nominal size required by each non-inline data member. This is
-// what will be initially allocated; once created, individual members may
-// grow to accommodate dynamically lengthed vectors.
+// kDataElementAlign: Alignment required by the stored type.
+// kDataElementSize: Nominal size required by each non-inline data member.
+// This is what will be initially allocated; once created, individual
+// members may grow to accommodate dynamically lengthed vectors.
+// kDataElementAlignOffset: Alignment offset required by the stored type.
template <typename T, bool kInline, class Enable = void>
struct InlineWrapper;
} // namespace internal
@@ -92,15 +93,14 @@
// To maintain general simplicity, we will use the second condition and eat
// the cost of the potential extra few bytes of padding.
// * The layout of the buffer will thus be:
-// [padding; element_count; inline_data; padding; offset_data]
-// The first padding will be of size max(0, kAlign - 4).
+// [element_count; inline_data; padding; offset_data]
// The element_count is of size 4.
// The inline_data is of size sizeof(InlineType) * kStaticLength.
-// The second padding is of size
-// (kAlign - ((sizeof(InlineType) * kStaticLength) % kAlign)).
+// The padding is sized such that the sum of the size of inline_data and the
+// padding adds up to the alignment if we have offset_data.
// The remaining data is only present if kInline is false.
-// The offset data is of size T::kSize * kStaticLength. T::kSize % T::kAlign
-// must be zero.
+// The offset data is of size T::kSize * kStaticLength. T::kSize is rounded
+// up to a multiple of T::kAlign.
// Note that no padding is required on the end because T::kAlign will always
// end up being equal to the alignment (this can only be violated if
// kForceAlign is used, but we do not allow that).
@@ -196,7 +196,7 @@
// Type stored inline in the serialized vector (offsets for tables/strings; T
// otherwise).
using InlineType = typename internal::InlineWrapper<T, kInline>::Type;
- // OUt-of-line type for out-of-line T.
+ // Out-of-line type for out-of-line T.
using ObjectType = typename internal::InlineWrapper<T, kInline>::ObjectType;
// Type used as the template parameter to flatbuffers::Vector<>.
using FlatbufferType =
@@ -216,52 +216,96 @@
std::max(kForceAlign, alignof(InlineType));
// Type used for serializing the length of the vector.
typedef uint32_t LengthType;
+ static constexpr size_t kDataElementAlign =
+ internal::InlineWrapper<T, kInline>::kDataElementAlign;
+ static constexpr size_t kDataElementAlignOffset =
+ internal::InlineWrapper<T, kInline>::kDataElementAlignOffset;
+ // Per-element size of any out-of-line data.
+ static constexpr size_t kDataElementSize =
+ internal::InlineWrapper<T, kInline>::kDataElementSize;
// Overall alignment of this type, and required alignment of the buffer that
// must be provided to the Vector.
static constexpr size_t kAlign =
- std::max({alignof(LengthType), kInlineAlign,
- internal::InlineWrapper<T, kInline>::kDataAlign});
- // Padding inserted prior to the length element of the vector (to manage
- // alignment of the data properly; see class comment)
- static constexpr size_t kPadding1 =
- std::max<size_t>(0, kAlign - sizeof(LengthType));
+ std::max({alignof(LengthType), kInlineAlign, kDataElementAlign});
+ // Offset into the buffer of where things must be aligned to the specified
+ // alignment.
+ static constexpr size_t kAlignOffset = sizeof(LengthType);
+
// Size of the vector length field.
static constexpr size_t kLengthSize = sizeof(LengthType);
// Size of all the inline vector data, including null termination (prior to
// any dynamic increases in size).
static constexpr size_t kInlineSize =
sizeof(InlineType) * (kStaticLength + (kNullTerminate ? 1 : 0));
- // Per-element size of any out-of-line data.
- static constexpr size_t kDataElementSize =
- internal::InlineWrapper<T, kInline>::kDataSize;
+
// Padding between the inline data and any out-of-line data, to manage
// mismatches in alignment between the two.
- static constexpr size_t kPadding2 = kAlign - (kInlineSize % kAlign);
+ //
+ // For inline vectors, we don't want to add any extra padding. The allocator
+ // will add extra padding if needed and communicate it to our constructor.
+ //
+ // For non-inline vectors, we need to pad out the offsets so that their end
+ // ends up kDataElementAlignOffset before the aligned start of the elements.
+ //
+ // This pads kInlineSize out to
+ static constexpr size_t kPadding1 =
+ kInline
+ ? 0
+ : ((kAlign - ((kInlineSize + kAlign /* Add kAlign to guarentee we
+ don't mod a negative number */
+ - kDataElementAlignOffset) %
+ kAlign)) %
+ kAlign);
// Total statically allocated space for any out-of-line data ("offset data")
// (prior to any dynamic increases in size).
static constexpr size_t kOffsetOffsetDataSize =
kInline ? 0 : (kStaticLength * kDataElementSize);
// Total nominal size of the Vector.
static constexpr size_t kSize =
- kPadding1 + kLengthSize + kInlineSize + kPadding2 + kOffsetOffsetDataSize;
- // Offset from the start of the provided buffer to where the actual start of
- // the vector is.
- static constexpr size_t kOffset = kPadding1;
- // Constructors; the provided buffer must be aligned to kAlign and be kSize in
- // length. parent must be non-null.
+ kLengthSize + kInlineSize + kPadding1 + kOffsetOffsetDataSize;
+ // If this is 0, then the parent object will not plan to statically
+ // reserve any memory for this object and will only reserve memory when the
+ // user requests creation of this object. This makes it so that zero-length
+ // vectors (which would require dynamic allocation *anyways* to actually be
+ // helpful) do not use up memory when unpopulated.
+ static constexpr size_t kPreallocatedSize = (kStaticLength > 0) ? kSize : 0;
+
+ // Returns the buffer size (in bytes) needed to hold the largest number of
+ // elements that can fit fully in the provided length (in bytes). This lets
+ // us compute how much of the padding we can fill with elements.
+ static constexpr size_t RoundedLength(size_t length) {
+ constexpr size_t overall_element_size =
+ sizeof(InlineType) + (kInline ? 0 : kDataElementSize);
+ return ((length - kLengthSize) / overall_element_size) *
+ overall_element_size +
+ kLengthSize;
+ }
+
+ // Constructors; the provided buffer must be aligned to kAlign and be kSize
+ // in length. parent must be non-null.
Vector(std::span<uint8_t> buffer, ResizeableObject *parent)
: ResizeableObject(buffer, parent) {
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
- CHECK_EQ(kSize, buffer.size());
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
+ CHECK_LE(kSize, buffer.size());
+ if constexpr (kInline) {
+ // If everything is inline, it costs us nothing to consume the padding and
+ // use it for holding elements. For something like a short string in 8
+ // byte aligned space, this saves a second 8 byte allocation for the data.
+ allocated_length_ = (buffer.size() - kLengthSize) / sizeof(InlineType) -
+ (kNullTerminate ? 1 : 0);
+ }
SetLength(0u);
if (!kInline) {
// Initialize the offsets for any sub-tables. These are used to track
// where each table will get serialized in memory as memory gets
// resized/moved around.
+ //
+ // We don't want to expand allocated_length_ here because that would then
+ // imply we have more memory for elements too, which we don't.
for (size_t index = 0; index < kStaticLength; ++index) {
- object_absolute_offsets_.emplace_back(kPadding1 + kLengthSize +
- kInlineSize + kPadding2 +
- index * kDataElementSize);
+ object_absolute_offsets_.emplace_back(
+ kLengthSize + kInlineSize + kPadding1 + index * kDataElementSize);
}
}
}
@@ -298,9 +342,28 @@
if (new_length > allocated_length_) {
const size_t new_elements = new_length - allocated_length_;
// First, we must add space for our new inline elements.
- if (!InsertBytes(
- inline_data() + allocated_length_ + (kNullTerminate ? 1 : 0),
- new_elements * sizeof(InlineType), SetZero::kYes)) {
+ std::optional<std::span<uint8_t>> inserted_bytes;
+
+ if (allocated_length_ == 0) {
+ // If we have padding and the padding is enough to hold the buffer, use
+ // it. This only consumes the padding in the case where we have a
+ // non-inline object, but are allocating small enough data that the
+ // padding is big enough.
+ //
+ // TODO(austin): Use the padding when we are adding large numbers of
+ // elements too.
+ if (new_elements * sizeof(InlineType) <= kPadding1) {
+ inserted_bytes = internal::GetSubSpan(vector_buffer(), kLengthSize,
+ kPadding1 / sizeof(InlineType));
+ }
+ }
+
+ if (!inserted_bytes.has_value()) {
+ inserted_bytes = InsertBytes(
+ inline_data() + allocated_length_ + (kNullTerminate ? 1 : 0),
+ new_elements * sizeof(InlineType), SetZero::kYes);
+ }
+ if (!inserted_bytes.has_value()) {
return false;
}
if (!kInline) {
@@ -319,6 +382,14 @@
index * kDataElementSize);
}
objects_.reserve(new_length);
+ } else {
+ // If we allocated memory, and the elements are inline (so we don't have
+ // to deal with allocating elements too), consume any extra space
+ // allocated as extra elements.
+ if (new_elements * sizeof(InlineType) < inserted_bytes->size()) {
+ new_length +=
+ inserted_bytes->size() / sizeof(InlineType) - new_elements;
+ }
}
allocated_length_ = new_length;
}
@@ -545,7 +616,7 @@
std::stringstream str;
str << "Raw Size: " << kSize << " alignment: " << kAlign
<< " allocated length: " << allocated_length_ << " inline alignment "
- << kInlineAlign << " kPadding1 " << kPadding1 << "\n";
+ << kInlineAlign << " \n";
str << "Observed length " << GetLength() << " (expected " << length_
<< ")\n";
str << "Inline Size " << kInlineSize << " Inline bytes/value:\n";
@@ -555,7 +626,7 @@
internal::GetSubSpan(vector_buffer(), kLengthSize,
sizeof(InlineType) * allocated_length_),
str);
- str << "kPadding2 " << kPadding2 << " offset data size "
+ str << "kPadding1 " << kPadding1 << " offset data size "
<< kOffsetOffsetDataSize << "\n";
return str.str();
}
@@ -567,17 +638,12 @@
Vector(Vector &&) = default;
private:
- // See kAlign and kOffset.
+ // See kAlign.
size_t Alignment() const final { return kAlign; }
- size_t AbsoluteOffsetOffset() const override { return kOffset; }
// Returns a buffer that starts at the start of the vector itself (past any
// padding).
- std::span<uint8_t> vector_buffer() {
- return internal::GetSubSpan(buffer(), kPadding1);
- }
- std::span<const uint8_t> vector_buffer() const {
- return internal::GetSubSpan(buffer(), kPadding1);
- }
+ std::span<uint8_t> vector_buffer() { return buffer(); }
+ std::span<const uint8_t> vector_buffer() const { return buffer(); }
bool AddInlineElement(InlineType e) {
if (length_ == allocated_length_) {
@@ -767,9 +833,11 @@
typedef flatbuffers::Offset<typename T::Flatbuffer> FlatbufferType;
typedef flatbuffers::Offset<typename T::Flatbuffer> ConstFlatbufferType;
typedef T::FlatbufferObjectType FlatbufferObjectType;
- static_assert((T::kSize % T::kAlign) == 0);
- static constexpr size_t kDataAlign = T::kAlign;
- static constexpr size_t kDataSize = T::kSize;
+ static constexpr size_t kDataElementAlign = T::kAlign;
+ static constexpr size_t kDataElementAlignOffset = T::kAlignOffset;
+ static constexpr size_t kDataElementSize =
+ ((T::kSize + T::kAlign - 1) / T::kAlign) * T::kAlign;
+ static_assert((kDataElementSize % kDataElementAlign) == 0);
template <typename StaticVector>
static bool FromFlatbuffer(
StaticVector *to, const typename StaticVector::ConstFlatbuffer &from) {
@@ -790,8 +858,9 @@
typedef T FlatbufferType;
typedef T ConstFlatbufferType;
typedef T *FlatbufferObjectType;
- static constexpr size_t kDataAlign = alignof(T);
- static constexpr size_t kDataSize = sizeof(T);
+ static constexpr size_t kDataElementAlign = alignof(T);
+ static constexpr size_t kDataElementAlignOffset = 0;
+ static constexpr size_t kDataElementSize = sizeof(T);
template <typename StaticVector>
static bool FromFlatbuffer(
StaticVector *to, const typename StaticVector::ConstFlatbuffer &from) {
@@ -810,8 +879,9 @@
typedef uint8_t FlatbufferType;
typedef uint8_t ConstFlatbufferType;
typedef uint8_t *FlatbufferObjectType;
- static constexpr size_t kDataAlign = 1u;
- static constexpr size_t kDataSize = 1u;
+ static constexpr size_t kDataElementAlign = 1u;
+ static constexpr size_t kDataElementAlignOffset = 0;
+ static constexpr size_t kDataElementSize = 1u;
template <typename StaticVector>
static bool FromFlatbuffer(
StaticVector *to, const typename StaticVector::ConstFlatbuffer &from) {
@@ -833,8 +903,9 @@
typedef T *FlatbufferType;
typedef const T *ConstFlatbufferType;
typedef T *FlatbufferObjectType;
- static constexpr size_t kDataAlign = alignof(T);
- static constexpr size_t kDataSize = sizeof(T);
+ static constexpr size_t kDataElementAlign = alignof(T);
+ static constexpr size_t kDataElementAlignOffset = 0;
+ static constexpr size_t kDataElementSize = sizeof(T);
template <typename StaticVector>
static bool FromFlatbuffer(
StaticVector *to, const typename StaticVector::ConstFlatbuffer &from) {
diff --git a/aos/flatbuffers/test_dir/sample_test_static.h b/aos/flatbuffers/test_dir/sample_test_static.h
index 57ac57a..de88c92 100644
--- a/aos/flatbuffers/test_dir/sample_test_static.h
+++ b/aos/flatbuffers/test_dir/sample_test_static.h
@@ -31,34 +31,20 @@
sizeof(::flatbuffers::voffset_t) * (2 + 1);
// Offset from the start of the internal memory buffer to the start of the
// vtable.
- static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(
+ static constexpr size_t kVtableStart = ::aos::fbs::AlignOffset(
kInlineDataSize, alignof(::flatbuffers::voffset_t));
// Required alignment of this object. The buffer that this object gets
// constructed into must be aligned to this value.
static constexpr size_t kAlign = std::max<size_t>({kMinAlign, 1});
- // Nominal size of this object, in bytes. The object may grow beyond this
- // size, but will always start at this size and so the initial buffer must
- // match this size.
- static constexpr size_t kSize = ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) + 0, kAlign);
+ // Offset into this object to measure the alignment at.
+ static constexpr size_t kAlignOffset = sizeof(::flatbuffers::soffset_t);
static_assert(
1 <= kAlign,
"Flatbuffer schema minalign should not exceed our required alignment.");
// Offset from the start of the memory buffer to the start of any out-of-line
// data (subtables, vectors, strings).
- static constexpr size_t kOffsetDataStart =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign);
- // Size required for a buffer that includes a root table offset at the start.
- static constexpr size_t kRootSize =
- ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
- // Minimum size required to build this flatbuffer in an entirely unaligned
- // buffer (including the root table offset). Made to be a multiple of kAlign
- // for convenience.
- static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
- // Offset at which the table vtable offset occurs. This is only needed for
- // vectors.
- static constexpr size_t kOffset = 0;
+ static constexpr size_t kOffsetDataStart = (kVtableStart + kVtableSize);
// Various overrides to support the Table parent class.
size_t FixedVtableOffset() const final { return kVtableStart; }
size_t VtableSize() const final { return kVtableSize; }
@@ -82,14 +68,16 @@
::aos::fbs::ResizeableObject *parent)
: Table(buffer, parent) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
MinimallyAlignedTableStatic(std::span<uint8_t> buffer,
::aos::fbs::Allocator *allocator)
: Table(buffer, allocator) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
MinimallyAlignedTableStatic(
@@ -97,7 +85,8 @@
::std::unique_ptr<::aos::fbs::Allocator> allocator)
: Table(buffer, ::std::move(allocator)) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
@@ -114,7 +103,6 @@
return has_field()
? std::make_optional(Get<uint8_t>(kInlineAbsoluteOffset_field))
: std::nullopt;
- ;
}
// Returns a pointer to modify the field field.
// The pointer may be invalidated by mutations/movements of the underlying
@@ -186,6 +174,21 @@
size_t NumberOfSubObjects() const final { return 0; }
using ::aos::fbs::ResizeableObject::SubObject;
SubObject GetSubObject(size_t) final { LOG(FATAL) << "No subobjects."; }
+
+ public:
+ // Nominal size of this object, in bytes. The object may grow beyond this
+ // size, but will always start at this size and so the initial buffer must
+ // match this size.
+ static constexpr size_t kSize =
+ ::aos::fbs::AlignOffset((kVtableStart + kVtableSize) - kAlignOffset,
+ kAlign, kAlignOffset) +
+ kAlignOffset;
+ // Always statically allocate memory for tables (set for consistency with
+ // static_vector.h).
+ static constexpr size_t kPreallocatedSize = kSize;
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize =
+ ::aos::fbs::AlignOffset(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
};
} // namespace aos::fbs::testing
@@ -211,34 +214,20 @@
sizeof(::flatbuffers::voffset_t) * (2 + 3);
// Offset from the start of the internal memory buffer to the start of the
// vtable.
- static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(
+ static constexpr size_t kVtableStart = ::aos::fbs::AlignOffset(
kInlineDataSize, alignof(::flatbuffers::voffset_t));
// Required alignment of this object. The buffer that this object gets
// constructed into must be aligned to this value.
static constexpr size_t kAlign = std::max<size_t>({kMinAlign, 4, 2});
- // Nominal size of this object, in bytes. The object may grow beyond this
- // size, but will always start at this size and so the initial buffer must
- // match this size.
- static constexpr size_t kSize = ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) + 0, kAlign);
+ // Offset into this object to measure the alignment at.
+ static constexpr size_t kAlignOffset = sizeof(::flatbuffers::soffset_t);
static_assert(
1 <= kAlign,
"Flatbuffer schema minalign should not exceed our required alignment.");
// Offset from the start of the memory buffer to the start of any out-of-line
// data (subtables, vectors, strings).
- static constexpr size_t kOffsetDataStart =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign);
- // Size required for a buffer that includes a root table offset at the start.
- static constexpr size_t kRootSize =
- ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
- // Minimum size required to build this flatbuffer in an entirely unaligned
- // buffer (including the root table offset). Made to be a multiple of kAlign
- // for convenience.
- static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
- // Offset at which the table vtable offset occurs. This is only needed for
- // vectors.
- static constexpr size_t kOffset = 0;
+ static constexpr size_t kOffsetDataStart = (kVtableStart + kVtableSize);
// Various overrides to support the Table parent class.
size_t FixedVtableOffset() const final { return kVtableStart; }
size_t VtableSize() const final { return kVtableSize; }
@@ -262,20 +251,23 @@
::aos::fbs::ResizeableObject *parent)
: Table(buffer, parent) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
SubTableStatic(std::span<uint8_t> buffer, ::aos::fbs::Allocator *allocator)
: Table(buffer, allocator) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
SubTableStatic(std::span<uint8_t> buffer,
::std::unique_ptr<::aos::fbs::Allocator> allocator)
: Table(buffer, ::std::move(allocator)) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
@@ -291,7 +283,6 @@
std::optional<float> baz() const {
return has_baz() ? std::make_optional(Get<float>(kInlineAbsoluteOffset_baz))
: std::nullopt;
- ;
}
// Returns a pointer to modify the baz field.
// The pointer may be invalidated by mutations/movements of the underlying
@@ -317,7 +308,6 @@
return has_foo()
? std::make_optional(Get<int16_t>(kInlineAbsoluteOffset_foo))
: std::nullopt;
- ;
}
// Returns a pointer to modify the foo field.
// The pointer may be invalidated by mutations/movements of the underlying
@@ -400,6 +390,21 @@
size_t NumberOfSubObjects() const final { return 0; }
using ::aos::fbs::ResizeableObject::SubObject;
SubObject GetSubObject(size_t) final { LOG(FATAL) << "No subobjects."; }
+
+ public:
+ // Nominal size of this object, in bytes. The object may grow beyond this
+ // size, but will always start at this size and so the initial buffer must
+ // match this size.
+ static constexpr size_t kSize =
+ ::aos::fbs::AlignOffset((kVtableStart + kVtableSize) - kAlignOffset,
+ kAlign, kAlignOffset) +
+ kAlignOffset;
+ // Always statically allocate memory for tables (set for consistency with
+ // static_vector.h).
+ static constexpr size_t kPreallocatedSize = kSize;
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize =
+ ::aos::fbs::AlignOffset(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
};
} // namespace aos::fbs::testing
@@ -425,87 +430,33 @@
sizeof(::flatbuffers::voffset_t) * (2 + 13);
// Offset from the start of the internal memory buffer to the start of the
// vtable.
- static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(
+ static constexpr size_t kVtableStart = ::aos::fbs::AlignOffset(
kInlineDataSize, alignof(::flatbuffers::voffset_t));
// Required alignment of this object. The buffer that this object gets
// constructed into must be aligned to this value.
static constexpr size_t kAlign = std::max<size_t>(
- {kMinAlign, aos::fbs::testing::included::IncludedTableStatic::kAlign, 4,
- ::aos::fbs::String<20>::kAlign, 8,
- aos::fbs::testing::SubTableStatic::kAlign, ::aos::fbs::String<0>::kAlign,
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kAlign,
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kAlign,
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kAlign,
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kAlign,
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kAlign,
+ {kMinAlign, 8,
::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::kAlign,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kAlign,
::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
- 0>::kAlign});
+ 0>::kAlign,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kAlign,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kAlign,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kAlign,
+ ::aos::fbs::String<0>::kAlign,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kAlign,
+ aos::fbs::testing::included::IncludedTableStatic::kAlign,
+ aos::fbs::testing::SubTableStatic::kAlign,
+ ::aos::fbs::String<20>::kAlign, 4});
- // Nominal size of this object, in bytes. The object may grow beyond this
- // size, but will always start at this size and so the initial buffer must
- // match this size.
- static constexpr size_t kSize = ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- 0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::
- kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::
- SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0,
- false, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
- 0>::kSize,
- kAlign);
+ // Offset into this object to measure the alignment at.
+ static constexpr size_t kAlignOffset = sizeof(::flatbuffers::soffset_t);
static_assert(
1 <= kAlign,
"Flatbuffer schema minalign should not exceed our required alignment.");
// Offset from the start of the memory buffer to the start of any out-of-line
// data (subtables, vectors, strings).
- static constexpr size_t kOffsetDataStart =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign);
- // Size required for a buffer that includes a root table offset at the start.
- static constexpr size_t kRootSize =
- ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
- // Minimum size required to build this flatbuffer in an entirely unaligned
- // buffer (including the root table offset). Made to be a multiple of kAlign
- // for convenience.
- static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
- // Offset at which the table vtable offset occurs. This is only needed for
- // vectors.
- static constexpr size_t kOffset = 0;
+ static constexpr size_t kOffsetDataStart = (kVtableStart + kVtableSize);
// Various overrides to support the Table parent class.
size_t FixedVtableOffset() const final { return kVtableStart; }
size_t VtableSize() const final { return kVtableSize; }
@@ -529,129 +480,28 @@
::aos::fbs::ResizeableObject *parent)
: Table(buffer, parent) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
TestTableStatic(std::span<uint8_t> buffer, ::aos::fbs::Allocator *allocator)
: Table(buffer, allocator) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
TestTableStatic(std::span<uint8_t> buffer,
::std::unique_ptr<::aos::fbs::Allocator> allocator)
: Table(buffer, ::std::move(allocator)) {
CHECK_EQ(buffer.size(), kSize);
- CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(0u,
+ reinterpret_cast<size_t>(buffer.data() + kAlignOffset) % kAlign);
PopulateVtable();
}
virtual ~TestTableStatic() {}
- // Creates an empty object for the included_table field, which you can
- // then populate/modify as desired.
- // The field must not be populated yet.
- aos::fbs::testing::included::IncludedTableStatic *add_included_table() {
- CHECK(!included_table_.has_value());
- constexpr size_t kVtableIndex = 22;
- // Construct the *Static object that we will use for managing this subtable.
- included_table_.emplace(
- BufferForObject(object_absolute_offset_included_table,
- aos::fbs::testing::included::IncludedTableStatic::kSize,
- kAlign),
- this);
- // Actually set the appropriate fields in the flatbuffer memory itself.
- SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_included_table, kVtableIndex,
- object_absolute_offset_included_table +
- aos::fbs::testing::included::IncludedTableStatic::kOffset -
- kInlineAbsoluteOffset_included_table);
- return &included_table_.value().t;
- }
-
- // Returns a pointer to the included_table field, if set. nullptr otherwise.
- const aos::fbs::testing::included::IncludedTableStatic *included_table()
- const {
- return included_table_.has_value() ? &included_table_.value().t : nullptr;
- }
- aos::fbs::testing::included::IncludedTableStatic *mutable_included_table() {
- return included_table_.has_value() ? &included_table_.value().t : nullptr;
- }
-
- // Clears the included_table field. This will cause has_included_table() to
- // return false.
- void clear_included_table() {
- included_table_.reset();
- ClearField(kInlineAbsoluteOffset_included_table, 4, 22);
- }
-
- // Returns true if the included_table field is set and can be accessed.
- bool has_included_table() const {
- return AsFlatbuffer().has_included_table();
- }
-
- // Sets the scalar field, causing it to be populated if it is not already.
- // This will populate the field even if the specified value is the default.
- void set_scalar(const int32_t &value) {
- SetField<int32_t>(kInlineAbsoluteOffset_scalar, 4, value);
- }
-
- // Returns the value of scalar if set; nullopt otherwise.
- std::optional<int32_t> scalar() const {
- return has_scalar()
- ? std::make_optional(Get<int32_t>(kInlineAbsoluteOffset_scalar))
- : std::nullopt;
- ;
- }
- // Returns a pointer to modify the scalar field.
- // The pointer may be invalidated by mutations/movements of the underlying
- // buffer. Returns nullptr if the field is not set.
- int32_t *mutable_scalar() {
- return has_scalar() ? MutableGet<int32_t>(kInlineAbsoluteOffset_scalar)
- : nullptr;
- }
-
- // Clears the scalar field. This will cause has_scalar() to return false.
- void clear_scalar() { ClearField(kInlineAbsoluteOffset_scalar, 4, 4); }
-
- // Returns true if the scalar field is set and can be accessed.
- bool has_scalar() const { return AsFlatbuffer().has_scalar(); }
-
- // Creates an empty object for the string field, which you can
- // then populate/modify as desired.
- // The field must not be populated yet.
- ::aos::fbs::String<20> *add_string() {
- CHECK(!string_.has_value());
- constexpr size_t kVtableIndex = 8;
- // Construct the *Static object that we will use for managing this subtable.
- string_.emplace(BufferForObject(object_absolute_offset_string,
- ::aos::fbs::String<20>::kSize, kAlign),
- this);
- // Actually set the appropriate fields in the flatbuffer memory itself.
- SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_string, kVtableIndex,
- object_absolute_offset_string + ::aos::fbs::String<20>::kOffset -
- kInlineAbsoluteOffset_string);
- return &string_.value().t;
- }
-
- // Returns a pointer to the string field, if set. nullptr otherwise.
- const ::aos::fbs::String<20> *string() const {
- return string_.has_value() ? &string_.value().t : nullptr;
- }
- ::aos::fbs::String<20> *mutable_string() {
- return string_.has_value() ? &string_.value().t : nullptr;
- }
-
- // Clears the string field. This will cause has_string() to return false.
- void clear_string() {
- string_.reset();
- ClearField(kInlineAbsoluteOffset_string, 4, 8);
- }
-
- // Returns true if the string field is set and can be accessed.
- bool has_string() const { return AsFlatbuffer().has_string(); }
-
// Sets the substruct field, causing it to be populated if it is not already.
// This will populate the field even if the specified value is the default.
void set_substruct(const aos::fbs::testing::SubStruct &value) {
@@ -665,7 +515,6 @@
? std::make_optional(Get<aos::fbs::testing::SubStruct>(
kInlineAbsoluteOffset_substruct))
: std::nullopt;
- ;
}
// Returns a pointer to modify the substruct field.
// The pointer may be invalidated by mutations/movements of the underlying
@@ -685,58 +534,551 @@
// Returns true if the substruct field is set and can be accessed.
bool has_substruct() const { return AsFlatbuffer().has_substruct(); }
- // Creates an empty object for the subtable field, which you can
+ // Creates an empty object for the vector_of_structs field, which you can
// then populate/modify as desired.
// The field must not be populated yet.
- aos::fbs::testing::SubTableStatic *add_subtable() {
- CHECK(!subtable_.has_value());
- constexpr size_t kVtableIndex = 14;
- // Construct the *Static object that we will use for managing this subtable.
- subtable_.emplace(
- BufferForObject(object_absolute_offset_subtable,
- aos::fbs::testing::SubTableStatic::kSize, kAlign),
- this);
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
+ add_vector_of_structs() {
+ CHECK(!vector_of_structs_.has_value());
+ constexpr size_t kVtableIndex = 18;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
+ 0>::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_vector_of_structs;
+ std::optional<std::span<uint8_t>> inserted_bytes = InsertBytes(
+ buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_vector_of_structs = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_structs_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_of_structs,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::
+ RoundedLength(inserted_bytes.value().size())),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_structs_.emplace(
+ BufferForObject(object_absolute_offset_vector_of_structs,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3,
+ true, 0>::kSize),
+ this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_subtable, kVtableIndex,
- object_absolute_offset_subtable +
- aos::fbs::testing::SubTableStatic::kOffset -
- kInlineAbsoluteOffset_subtable);
- return &subtable_.value().t;
+ kInlineAbsoluteOffset_vector_of_structs, kVtableIndex,
+ object_absolute_offset_vector_of_structs -
+ kInlineAbsoluteOffset_vector_of_structs);
+ return &vector_of_structs_.value().t;
}
- // Returns a pointer to the subtable field, if set. nullptr otherwise.
- const aos::fbs::testing::SubTableStatic *subtable() const {
- return subtable_.has_value() ? &subtable_.value().t : nullptr;
+ // Returns a pointer to the vector_of_structs field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
+ vector_of_structs() const {
+ return vector_of_structs_.has_value() ? &vector_of_structs_.value().t
+ : nullptr;
}
- aos::fbs::testing::SubTableStatic *mutable_subtable() {
- return subtable_.has_value() ? &subtable_.value().t : nullptr;
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
+ mutable_vector_of_structs() {
+ return vector_of_structs_.has_value() ? &vector_of_structs_.value().t
+ : nullptr;
}
- // Clears the subtable field. This will cause has_subtable() to return false.
- void clear_subtable() {
- subtable_.reset();
- ClearField(kInlineAbsoluteOffset_subtable, 4, 14);
+ // Clears the vector_of_structs field. This will cause has_vector_of_structs()
+ // to return false.
+ void clear_vector_of_structs() {
+ vector_of_structs_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_structs, 4, 18);
}
- // Returns true if the subtable field is set and can be accessed.
- bool has_subtable() const { return AsFlatbuffer().has_subtable(); }
+ // Returns true if the vector_of_structs field is set and can be accessed.
+ bool has_vector_of_structs() const {
+ return AsFlatbuffer().has_vector_of_structs();
+ }
+
+ // Creates an empty object for the unspecified_length_vector_of_strings field,
+ // which you can then populate/modify as desired. The field must not be
+ // populated yet.
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
+ add_unspecified_length_vector_of_strings() {
+ CHECK(!unspecified_length_vector_of_strings_.has_value());
+ constexpr size_t kVtableIndex = 28;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
+ 0>::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_unspecified_length_vector_of_strings;
+ std::optional<std::span<uint8_t>> inserted_bytes = InsertBytes(
+ buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_unspecified_length_vector_of_strings =
+ object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ unspecified_length_vector_of_strings_.emplace(
+ BufferForObject(
+ object_absolute_offset_unspecified_length_vector_of_strings,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ unspecified_length_vector_of_strings_.emplace(
+ BufferForObject(
+ object_absolute_offset_unspecified_length_vector_of_strings,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize),
+ this);
+ }
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings,
+ kVtableIndex,
+ object_absolute_offset_unspecified_length_vector_of_strings -
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings);
+ return &unspecified_length_vector_of_strings_.value().t;
+ }
+
+ // Returns a pointer to the unspecified_length_vector_of_strings field, if
+ // set. nullptr otherwise.
+ const ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
+ unspecified_length_vector_of_strings() const {
+ return unspecified_length_vector_of_strings_.has_value()
+ ? &unspecified_length_vector_of_strings_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
+ mutable_unspecified_length_vector_of_strings() {
+ return unspecified_length_vector_of_strings_.has_value()
+ ? &unspecified_length_vector_of_strings_.value().t
+ : nullptr;
+ }
+
+ // Clears the unspecified_length_vector_of_strings field. This will cause
+ // has_unspecified_length_vector_of_strings() to return false.
+ void clear_unspecified_length_vector_of_strings() {
+ unspecified_length_vector_of_strings_.reset();
+ ClearField(kInlineAbsoluteOffset_unspecified_length_vector_of_strings, 4,
+ 28);
+ }
+
+ // Returns true if the unspecified_length_vector_of_strings field is set and
+ // can be accessed.
+ bool has_unspecified_length_vector_of_strings() const {
+ return AsFlatbuffer().has_unspecified_length_vector_of_strings();
+ }
+
+ // Creates an empty object for the vector_of_tables field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
+ add_vector_of_tables() {
+ CHECK(!vector_of_tables_.has_value());
+ constexpr size_t kVtableIndex = 20;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3,
+ false, 0>::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_vector_of_tables;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3,
+ false, 0>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_vector_of_tables = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_tables_.emplace(
+ BufferForObject(object_absolute_offset_vector_of_tables,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic,
+ 3, false, 0>::kSize),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_tables_.emplace(
+ BufferForObject(object_absolute_offset_vector_of_tables,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic,
+ 3, false, 0>::kSize),
+ this);
+ }
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_tables, kVtableIndex,
+ object_absolute_offset_vector_of_tables -
+ kInlineAbsoluteOffset_vector_of_tables);
+ return &vector_of_tables_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_tables field, if set. nullptr otherwise.
+ const ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
+ vector_of_tables() const {
+ return vector_of_tables_.has_value() ? &vector_of_tables_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
+ mutable_vector_of_tables() {
+ return vector_of_tables_.has_value() ? &vector_of_tables_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_tables field. This will cause has_vector_of_tables()
+ // to return false.
+ void clear_vector_of_tables() {
+ vector_of_tables_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_tables, 4, 20);
+ }
+
+ // Returns true if the vector_of_tables field is set and can be accessed.
+ bool has_vector_of_tables() const {
+ return AsFlatbuffer().has_vector_of_tables();
+ }
+
+ // Creates an empty object for the vector_aligned field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<int32_t, 3, true, 64> *add_vector_aligned() {
+ CHECK(!vector_aligned_.has_value());
+ constexpr size_t kVtableIndex = 16;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<int32_t, 3, true, 64>::kPreallocatedSize ==
+ 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_vector_aligned;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_vector_aligned = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_aligned_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_aligned,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::RoundedLength(
+ inserted_bytes.value().size())),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_aligned_.emplace(
+ BufferForObject(object_absolute_offset_vector_aligned,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize),
+ this);
+ }
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_aligned, kVtableIndex,
+ object_absolute_offset_vector_aligned -
+ kInlineAbsoluteOffset_vector_aligned);
+ return &vector_aligned_.value().t;
+ }
+
+ // Returns a pointer to the vector_aligned field, if set. nullptr otherwise.
+ const ::aos::fbs::Vector<int32_t, 3, true, 64> *vector_aligned() const {
+ return vector_aligned_.has_value() ? &vector_aligned_.value().t : nullptr;
+ }
+ ::aos::fbs::Vector<int32_t, 3, true, 64> *mutable_vector_aligned() {
+ return vector_aligned_.has_value() ? &vector_aligned_.value().t : nullptr;
+ }
+
+ // Clears the vector_aligned field. This will cause has_vector_aligned() to
+ // return false.
+ void clear_vector_aligned() {
+ vector_aligned_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_aligned, 4, 16);
+ }
+
+ // Returns true if the vector_aligned field is set and can be accessed.
+ bool has_vector_aligned() const {
+ return AsFlatbuffer().has_vector_aligned();
+ }
+
+ // Creates an empty object for the vector_of_strings field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
+ add_vector_of_strings() {
+ CHECK(!vector_of_strings_.has_value());
+ constexpr size_t kVtableIndex = 10;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
+ 0>::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_vector_of_strings;
+ std::optional<std::span<uint8_t>> inserted_bytes = InsertBytes(
+ buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_vector_of_strings = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_strings_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_of_strings,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_strings_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_of_strings,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize),
+ this);
+ }
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_strings, kVtableIndex,
+ object_absolute_offset_vector_of_strings -
+ kInlineAbsoluteOffset_vector_of_strings);
+ return &vector_of_strings_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_strings field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
+ vector_of_strings() const {
+ return vector_of_strings_.has_value() ? &vector_of_strings_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
+ mutable_vector_of_strings() {
+ return vector_of_strings_.has_value() ? &vector_of_strings_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_strings field. This will cause has_vector_of_strings()
+ // to return false.
+ void clear_vector_of_strings() {
+ vector_of_strings_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_strings, 4, 10);
+ }
+
+ // Returns true if the vector_of_strings field is set and can be accessed.
+ bool has_vector_of_strings() const {
+ return AsFlatbuffer().has_vector_of_strings();
+ }
+
+ // Creates an empty object for the vector_of_scalars field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<int32_t, 3, true, 0> *add_vector_of_scalars() {
+ CHECK(!vector_of_scalars_.has_value());
+ constexpr size_t kVtableIndex = 6;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<int32_t, 3, true, 0>::kPreallocatedSize ==
+ 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_vector_of_scalars;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_vector_of_scalars = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_scalars_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_of_scalars,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::RoundedLength(
+ inserted_bytes.value().size())),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ vector_of_scalars_.emplace(
+ BufferForObject(object_absolute_offset_vector_of_scalars,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize),
+ this);
+ }
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_scalars, kVtableIndex,
+ object_absolute_offset_vector_of_scalars -
+ kInlineAbsoluteOffset_vector_of_scalars);
+ return &vector_of_scalars_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_scalars field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<int32_t, 3, true, 0> *vector_of_scalars() const {
+ return vector_of_scalars_.has_value() ? &vector_of_scalars_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<int32_t, 3, true, 0> *mutable_vector_of_scalars() {
+ return vector_of_scalars_.has_value() ? &vector_of_scalars_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_scalars field. This will cause has_vector_of_scalars()
+ // to return false.
+ void clear_vector_of_scalars() {
+ vector_of_scalars_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_scalars, 4, 6);
+ }
+
+ // Returns true if the vector_of_scalars field is set and can be accessed.
+ bool has_vector_of_scalars() const {
+ return AsFlatbuffer().has_vector_of_scalars();
+ }
// Creates an empty object for the unspecified_length_string field, which you
// can then populate/modify as desired. The field must not be populated yet.
::aos::fbs::String<0> *add_unspecified_length_string() {
CHECK(!unspecified_length_string_.has_value());
constexpr size_t kVtableIndex = 26;
- // Construct the *Static object that we will use for managing this subtable.
- unspecified_length_string_.emplace(
- BufferForObject(object_absolute_offset_unspecified_length_string,
- ::aos::fbs::String<0>::kSize, kAlign),
- this);
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::String<0>::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_unspecified_length_string;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ ::aos::fbs::String<0>::kSize, ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_unspecified_length_string = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ unspecified_length_string_.emplace(
+ BufferForObject(object_absolute_offset_unspecified_length_string,
+ ::aos::fbs::String<0>::RoundedLength(
+ inserted_bytes.value().size())),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ unspecified_length_string_.emplace(
+ BufferForObject(object_absolute_offset_unspecified_length_string,
+ ::aos::fbs::String<0>::kSize),
+ this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
SetField<::flatbuffers::uoffset_t>(
kInlineAbsoluteOffset_unspecified_length_string, kVtableIndex,
- object_absolute_offset_unspecified_length_string +
- ::aos::fbs::String<0>::kOffset -
+ object_absolute_offset_unspecified_length_string -
kInlineAbsoluteOffset_unspecified_length_string);
return &unspecified_length_string_.value().t;
}
@@ -772,16 +1114,53 @@
::aos::fbs::Vector<uint8_t, 0, true, 0> *add_unspecified_length_vector() {
CHECK(!unspecified_length_vector_.has_value());
constexpr size_t kVtableIndex = 24;
- // Construct the *Static object that we will use for managing this subtable.
- unspecified_length_vector_.emplace(
- BufferForObject(object_absolute_offset_unspecified_length_vector,
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize, kAlign),
- this);
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::Vector<uint8_t, 0, true, 0>::kPreallocatedSize ==
+ 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_unspecified_length_vector;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_unspecified_length_vector = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ unspecified_length_vector_.emplace(
+ BufferForObject(
+ object_absolute_offset_unspecified_length_vector,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::RoundedLength(
+ inserted_bytes.value().size())),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ unspecified_length_vector_.emplace(
+ BufferForObject(object_absolute_offset_unspecified_length_vector,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize),
+ this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
SetField<::flatbuffers::uoffset_t>(
kInlineAbsoluteOffset_unspecified_length_vector, kVtableIndex,
- object_absolute_offset_unspecified_length_vector +
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kOffset -
+ object_absolute_offset_unspecified_length_vector -
kInlineAbsoluteOffset_unspecified_length_vector);
return &unspecified_length_vector_.value().t;
}
@@ -813,303 +1192,260 @@
return AsFlatbuffer().has_unspecified_length_vector();
}
- // Creates an empty object for the unspecified_length_vector_of_strings field,
- // which you can then populate/modify as desired. The field must not be
- // populated yet.
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
- add_unspecified_length_vector_of_strings() {
- CHECK(!unspecified_length_vector_of_strings_.has_value());
- constexpr size_t kVtableIndex = 28;
- // Construct the *Static object that we will use for managing this subtable.
- unspecified_length_vector_of_strings_.emplace(
- BufferForObject(
- object_absolute_offset_unspecified_length_vector_of_strings,
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
- kAlign),
- this);
- // Actually set the appropriate fields in the flatbuffer memory itself.
- SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_unspecified_length_vector_of_strings,
- kVtableIndex,
- object_absolute_offset_unspecified_length_vector_of_strings +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kOffset -
- kInlineAbsoluteOffset_unspecified_length_vector_of_strings);
- return &unspecified_length_vector_of_strings_.value().t;
- }
-
- // Returns a pointer to the unspecified_length_vector_of_strings field, if
- // set. nullptr otherwise.
- const ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
- unspecified_length_vector_of_strings() const {
- return unspecified_length_vector_of_strings_.has_value()
- ? &unspecified_length_vector_of_strings_.value().t
- : nullptr;
- }
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
- mutable_unspecified_length_vector_of_strings() {
- return unspecified_length_vector_of_strings_.has_value()
- ? &unspecified_length_vector_of_strings_.value().t
- : nullptr;
- }
-
- // Clears the unspecified_length_vector_of_strings field. This will cause
- // has_unspecified_length_vector_of_strings() to return false.
- void clear_unspecified_length_vector_of_strings() {
- unspecified_length_vector_of_strings_.reset();
- ClearField(kInlineAbsoluteOffset_unspecified_length_vector_of_strings, 4,
- 28);
- }
-
- // Returns true if the unspecified_length_vector_of_strings field is set and
- // can be accessed.
- bool has_unspecified_length_vector_of_strings() const {
- return AsFlatbuffer().has_unspecified_length_vector_of_strings();
- }
-
- // Creates an empty object for the vector_aligned field, which you can
+ // Creates an empty object for the included_table field, which you can
// then populate/modify as desired.
// The field must not be populated yet.
- ::aos::fbs::Vector<int32_t, 3, true, 64> *add_vector_aligned() {
- CHECK(!vector_aligned_.has_value());
- constexpr size_t kVtableIndex = 16;
- // Construct the *Static object that we will use for managing this subtable.
- vector_aligned_.emplace(
- BufferForObject(object_absolute_offset_vector_aligned,
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
- kAlign),
- this);
+ aos::fbs::testing::included::IncludedTableStatic *add_included_table() {
+ CHECK(!included_table_.has_value());
+ constexpr size_t kVtableIndex = 22;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (aos::fbs::testing::included::IncludedTableStatic::
+ kPreallocatedSize == 0) {
+ const size_t object_absolute_offset =
+ object_absolute_offset_included_table;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ aos::fbs::testing::included::IncludedTableStatic::kSize,
+ ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_included_table = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ included_table_.emplace(
+ BufferForObject(
+ object_absolute_offset_included_table,
+ aos::fbs::testing::included::IncludedTableStatic::kSize),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ included_table_.emplace(
+ BufferForObject(
+ object_absolute_offset_included_table,
+ aos::fbs::testing::included::IncludedTableStatic::kSize),
+ this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_vector_aligned, kVtableIndex,
- object_absolute_offset_vector_aligned +
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kOffset -
- kInlineAbsoluteOffset_vector_aligned);
- return &vector_aligned_.value().t;
+ kInlineAbsoluteOffset_included_table, kVtableIndex,
+ object_absolute_offset_included_table -
+ kInlineAbsoluteOffset_included_table);
+ return &included_table_.value().t;
}
- // Returns a pointer to the vector_aligned field, if set. nullptr otherwise.
- const ::aos::fbs::Vector<int32_t, 3, true, 64> *vector_aligned() const {
- return vector_aligned_.has_value() ? &vector_aligned_.value().t : nullptr;
+ // Returns a pointer to the included_table field, if set. nullptr otherwise.
+ const aos::fbs::testing::included::IncludedTableStatic *included_table()
+ const {
+ return included_table_.has_value() ? &included_table_.value().t : nullptr;
}
- ::aos::fbs::Vector<int32_t, 3, true, 64> *mutable_vector_aligned() {
- return vector_aligned_.has_value() ? &vector_aligned_.value().t : nullptr;
+ aos::fbs::testing::included::IncludedTableStatic *mutable_included_table() {
+ return included_table_.has_value() ? &included_table_.value().t : nullptr;
}
- // Clears the vector_aligned field. This will cause has_vector_aligned() to
+ // Clears the included_table field. This will cause has_included_table() to
// return false.
- void clear_vector_aligned() {
- vector_aligned_.reset();
- ClearField(kInlineAbsoluteOffset_vector_aligned, 4, 16);
+ void clear_included_table() {
+ included_table_.reset();
+ ClearField(kInlineAbsoluteOffset_included_table, 4, 22);
}
- // Returns true if the vector_aligned field is set and can be accessed.
- bool has_vector_aligned() const {
- return AsFlatbuffer().has_vector_aligned();
+ // Returns true if the included_table field is set and can be accessed.
+ bool has_included_table() const {
+ return AsFlatbuffer().has_included_table();
}
- // Creates an empty object for the vector_of_scalars field, which you can
+ // Creates an empty object for the subtable field, which you can
// then populate/modify as desired.
// The field must not be populated yet.
- ::aos::fbs::Vector<int32_t, 3, true, 0> *add_vector_of_scalars() {
- CHECK(!vector_of_scalars_.has_value());
- constexpr size_t kVtableIndex = 6;
- // Construct the *Static object that we will use for managing this subtable.
- vector_of_scalars_.emplace(
- BufferForObject(object_absolute_offset_vector_of_scalars,
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize, kAlign),
- this);
+ aos::fbs::testing::SubTableStatic *add_subtable() {
+ CHECK(!subtable_.has_value());
+ constexpr size_t kVtableIndex = 14;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (aos::fbs::testing::SubTableStatic::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset = object_absolute_offset_subtable;
+ std::optional<std::span<uint8_t>> inserted_bytes = InsertBytes(
+ buffer().data() + object_absolute_offset,
+ aos::fbs::testing::SubTableStatic::kSize, ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_subtable = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ subtable_.emplace(
+ BufferForObject(object_absolute_offset_subtable,
+ aos::fbs::testing::SubTableStatic::kSize),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ subtable_.emplace(
+ BufferForObject(object_absolute_offset_subtable,
+ aos::fbs::testing::SubTableStatic::kSize),
+ this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_vector_of_scalars, kVtableIndex,
- object_absolute_offset_vector_of_scalars +
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kOffset -
- kInlineAbsoluteOffset_vector_of_scalars);
- return &vector_of_scalars_.value().t;
+ kInlineAbsoluteOffset_subtable, kVtableIndex,
+ object_absolute_offset_subtable - kInlineAbsoluteOffset_subtable);
+ return &subtable_.value().t;
}
- // Returns a pointer to the vector_of_scalars field, if set. nullptr
- // otherwise.
- const ::aos::fbs::Vector<int32_t, 3, true, 0> *vector_of_scalars() const {
- return vector_of_scalars_.has_value() ? &vector_of_scalars_.value().t
- : nullptr;
+ // Returns a pointer to the subtable field, if set. nullptr otherwise.
+ const aos::fbs::testing::SubTableStatic *subtable() const {
+ return subtable_.has_value() ? &subtable_.value().t : nullptr;
}
- ::aos::fbs::Vector<int32_t, 3, true, 0> *mutable_vector_of_scalars() {
- return vector_of_scalars_.has_value() ? &vector_of_scalars_.value().t
- : nullptr;
+ aos::fbs::testing::SubTableStatic *mutable_subtable() {
+ return subtable_.has_value() ? &subtable_.value().t : nullptr;
}
- // Clears the vector_of_scalars field. This will cause has_vector_of_scalars()
- // to return false.
- void clear_vector_of_scalars() {
- vector_of_scalars_.reset();
- ClearField(kInlineAbsoluteOffset_vector_of_scalars, 4, 6);
+ // Clears the subtable field. This will cause has_subtable() to return false.
+ void clear_subtable() {
+ subtable_.reset();
+ ClearField(kInlineAbsoluteOffset_subtable, 4, 14);
}
- // Returns true if the vector_of_scalars field is set and can be accessed.
- bool has_vector_of_scalars() const {
- return AsFlatbuffer().has_vector_of_scalars();
- }
+ // Returns true if the subtable field is set and can be accessed.
+ bool has_subtable() const { return AsFlatbuffer().has_subtable(); }
- // Creates an empty object for the vector_of_strings field, which you can
+ // Creates an empty object for the string field, which you can
// then populate/modify as desired.
// The field must not be populated yet.
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
- add_vector_of_strings() {
- CHECK(!vector_of_strings_.has_value());
- constexpr size_t kVtableIndex = 10;
- // Construct the *Static object that we will use for managing this subtable.
- vector_of_strings_.emplace(
- BufferForObject(
- object_absolute_offset_vector_of_strings,
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize,
- kAlign),
- this);
+ ::aos::fbs::String<20> *add_string() {
+ CHECK(!string_.has_value());
+ constexpr size_t kVtableIndex = 8;
+ // If this object does not normally have its initial memory statically
+ // allocated, allocate it now (this is used for zero-length vectors).
+ if constexpr (::aos::fbs::String<20>::kPreallocatedSize == 0) {
+ const size_t object_absolute_offset = object_absolute_offset_string;
+ std::optional<std::span<uint8_t>> inserted_bytes =
+ InsertBytes(buffer().data() + object_absolute_offset,
+ ::aos::fbs::String<20>::kSize, ::aos::fbs::SetZero::kYes);
+ if (!inserted_bytes.has_value()) {
+ return nullptr;
+ }
+ // Undo changes to the object absolute offset that will have been made by
+ // the InsertBytes call.
+ // The InsertBytes() call normally goes through and "fixes" any offsets
+ // that will have been affected by the memory insertion. Unfortunately,
+ // if this object currently takes up zero bytes then the InsertBytes()
+ // cannot distinguish between this offset and the (identical) offsets for
+ // any other objects that may have been "sharing" this location. The
+ // effect of this logic is that the first object that gets populated at
+ // any given location will bump all other objects to later. This is fine,
+ // although it does mean that the order in which objects appear in memory
+ // may vary depending on the order in which they are constructed (if they
+ // start out sharing a start pointer).
+ object_absolute_offset_string = object_absolute_offset;
+
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ string_.emplace(BufferForObject(object_absolute_offset_string,
+ ::aos::fbs::String<20>::RoundedLength(
+ inserted_bytes.value().size())),
+ this);
+ } else {
+ // Construct the *Static object that we will use for managing this
+ // subtable.
+ string_.emplace(BufferForObject(object_absolute_offset_string,
+ ::aos::fbs::String<20>::kSize),
+ this);
+ }
// Actually set the appropriate fields in the flatbuffer memory itself.
SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_vector_of_strings, kVtableIndex,
- object_absolute_offset_vector_of_strings +
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kOffset -
- kInlineAbsoluteOffset_vector_of_strings);
- return &vector_of_strings_.value().t;
+ kInlineAbsoluteOffset_string, kVtableIndex,
+ object_absolute_offset_string - kInlineAbsoluteOffset_string);
+ return &string_.value().t;
}
- // Returns a pointer to the vector_of_strings field, if set. nullptr
- // otherwise.
- const ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
- vector_of_strings() const {
- return vector_of_strings_.has_value() ? &vector_of_strings_.value().t
- : nullptr;
+ // Returns a pointer to the string field, if set. nullptr otherwise.
+ const ::aos::fbs::String<20> *string() const {
+ return string_.has_value() ? &string_.value().t : nullptr;
}
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
- mutable_vector_of_strings() {
- return vector_of_strings_.has_value() ? &vector_of_strings_.value().t
- : nullptr;
+ ::aos::fbs::String<20> *mutable_string() {
+ return string_.has_value() ? &string_.value().t : nullptr;
}
- // Clears the vector_of_strings field. This will cause has_vector_of_strings()
- // to return false.
- void clear_vector_of_strings() {
- vector_of_strings_.reset();
- ClearField(kInlineAbsoluteOffset_vector_of_strings, 4, 10);
+ // Clears the string field. This will cause has_string() to return false.
+ void clear_string() {
+ string_.reset();
+ ClearField(kInlineAbsoluteOffset_string, 4, 8);
}
- // Returns true if the vector_of_strings field is set and can be accessed.
- bool has_vector_of_strings() const {
- return AsFlatbuffer().has_vector_of_strings();
+ // Returns true if the string field is set and can be accessed.
+ bool has_string() const { return AsFlatbuffer().has_string(); }
+
+ // Sets the scalar field, causing it to be populated if it is not already.
+ // This will populate the field even if the specified value is the default.
+ void set_scalar(const int32_t &value) {
+ SetField<int32_t>(kInlineAbsoluteOffset_scalar, 4, value);
}
- // Creates an empty object for the vector_of_structs field, which you can
- // then populate/modify as desired.
- // The field must not be populated yet.
- ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
- add_vector_of_structs() {
- CHECK(!vector_of_structs_.has_value());
- constexpr size_t kVtableIndex = 18;
- // Construct the *Static object that we will use for managing this subtable.
- vector_of_structs_.emplace(
- BufferForObject(
- object_absolute_offset_vector_of_structs,
- ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::kSize,
- kAlign),
- this);
- // Actually set the appropriate fields in the flatbuffer memory itself.
- SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_vector_of_structs, kVtableIndex,
- object_absolute_offset_vector_of_structs +
- ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
- 0>::kOffset -
- kInlineAbsoluteOffset_vector_of_structs);
- return &vector_of_structs_.value().t;
+ // Returns the value of scalar if set; nullopt otherwise.
+ std::optional<int32_t> scalar() const {
+ return has_scalar()
+ ? std::make_optional(Get<int32_t>(kInlineAbsoluteOffset_scalar))
+ : std::nullopt;
+ }
+ // Returns a pointer to modify the scalar field.
+ // The pointer may be invalidated by mutations/movements of the underlying
+ // buffer. Returns nullptr if the field is not set.
+ int32_t *mutable_scalar() {
+ return has_scalar() ? MutableGet<int32_t>(kInlineAbsoluteOffset_scalar)
+ : nullptr;
}
- // Returns a pointer to the vector_of_structs field, if set. nullptr
- // otherwise.
- const ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
- vector_of_structs() const {
- return vector_of_structs_.has_value() ? &vector_of_structs_.value().t
- : nullptr;
- }
- ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
- mutable_vector_of_structs() {
- return vector_of_structs_.has_value() ? &vector_of_structs_.value().t
- : nullptr;
- }
+ // Clears the scalar field. This will cause has_scalar() to return false.
+ void clear_scalar() { ClearField(kInlineAbsoluteOffset_scalar, 4, 4); }
- // Clears the vector_of_structs field. This will cause has_vector_of_structs()
- // to return false.
- void clear_vector_of_structs() {
- vector_of_structs_.reset();
- ClearField(kInlineAbsoluteOffset_vector_of_structs, 4, 18);
- }
-
- // Returns true if the vector_of_structs field is set and can be accessed.
- bool has_vector_of_structs() const {
- return AsFlatbuffer().has_vector_of_structs();
- }
-
- // Creates an empty object for the vector_of_tables field, which you can
- // then populate/modify as desired.
- // The field must not be populated yet.
- ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
- add_vector_of_tables() {
- CHECK(!vector_of_tables_.has_value());
- constexpr size_t kVtableIndex = 20;
- // Construct the *Static object that we will use for managing this subtable.
- vector_of_tables_.emplace(
- BufferForObject(object_absolute_offset_vector_of_tables,
- ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3,
- false, 0>::kSize,
- kAlign),
- this);
- // Actually set the appropriate fields in the flatbuffer memory itself.
- SetField<::flatbuffers::uoffset_t>(
- kInlineAbsoluteOffset_vector_of_tables, kVtableIndex,
- object_absolute_offset_vector_of_tables +
- ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
- 0>::kOffset -
- kInlineAbsoluteOffset_vector_of_tables);
- return &vector_of_tables_.value().t;
- }
-
- // Returns a pointer to the vector_of_tables field, if set. nullptr otherwise.
- const ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
- vector_of_tables() const {
- return vector_of_tables_.has_value() ? &vector_of_tables_.value().t
- : nullptr;
- }
- ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
- mutable_vector_of_tables() {
- return vector_of_tables_.has_value() ? &vector_of_tables_.value().t
- : nullptr;
- }
-
- // Clears the vector_of_tables field. This will cause has_vector_of_tables()
- // to return false.
- void clear_vector_of_tables() {
- vector_of_tables_.reset();
- ClearField(kInlineAbsoluteOffset_vector_of_tables, 4, 20);
- }
-
- // Returns true if the vector_of_tables field is set and can be accessed.
- bool has_vector_of_tables() const {
- return AsFlatbuffer().has_vector_of_tables();
- }
+ // Returns true if the scalar field is set and can be accessed.
+ bool has_scalar() const { return AsFlatbuffer().has_scalar(); }
// Clears every field of the table, removing any existing state.
void Clear() {
- clear_included_table();
- clear_scalar();
- clear_string();
clear_substruct();
- clear_subtable();
+ clear_vector_of_structs();
+ clear_unspecified_length_vector_of_strings();
+ clear_vector_of_tables();
+ clear_vector_aligned();
+ clear_vector_of_strings();
+ clear_vector_of_scalars();
clear_unspecified_length_string();
clear_unspecified_length_vector();
- clear_unspecified_length_vector_of_strings();
- clear_vector_aligned();
- clear_vector_of_scalars();
- clear_vector_of_strings();
- clear_vector_of_structs();
- clear_vector_of_tables();
+ clear_included_table();
+ clear_subtable();
+ clear_string();
+ clear_scalar();
}
// Copies the contents of the provided flatbuffer into this flatbuffer,
@@ -1119,33 +1455,58 @@
[[nodiscard]] bool FromFlatbuffer(const Flatbuffer &other) {
Clear();
- if (other.has_included_table()) {
- if (!CHECK_NOTNULL(add_included_table())
- ->FromFlatbuffer(other.included_table())) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
- }
-
- if (other.has_scalar()) {
- set_scalar(other.scalar());
- }
-
- if (other.has_string()) {
- if (!CHECK_NOTNULL(add_string())->FromFlatbuffer(other.string())) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
- }
-
if (other.has_substruct()) {
set_substruct(*other.substruct());
}
- if (other.has_subtable()) {
- if (!CHECK_NOTNULL(add_subtable())->FromFlatbuffer(other.subtable())) {
+ if (other.has_vector_of_structs()) {
+ if (!CHECK_NOTNULL(add_vector_of_structs())
+ ->FromFlatbuffer(other.vector_of_structs())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other.has_unspecified_length_vector_of_strings()) {
+ if (!CHECK_NOTNULL(add_unspecified_length_vector_of_strings())
+ ->FromFlatbuffer(other.unspecified_length_vector_of_strings())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other.has_vector_of_tables()) {
+ if (!CHECK_NOTNULL(add_vector_of_tables())
+ ->FromFlatbuffer(other.vector_of_tables())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other.has_vector_aligned()) {
+ if (!CHECK_NOTNULL(add_vector_aligned())
+ ->FromFlatbuffer(other.vector_aligned())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other.has_vector_of_strings()) {
+ if (!CHECK_NOTNULL(add_vector_of_strings())
+ ->FromFlatbuffer(other.vector_of_strings())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other.has_vector_of_scalars()) {
+ if (!CHECK_NOTNULL(add_vector_of_scalars())
+ ->FromFlatbuffer(other.vector_of_scalars())) {
// Fail if we were unable to copy (e.g., if we tried to copy in a long
// vector and do not have the space for it).
return false;
@@ -1170,58 +1531,33 @@
}
}
- if (other.has_unspecified_length_vector_of_strings()) {
- if (!CHECK_NOTNULL(add_unspecified_length_vector_of_strings())
- ->FromFlatbuffer(other.unspecified_length_vector_of_strings())) {
+ if (other.has_included_table()) {
+ if (!CHECK_NOTNULL(add_included_table())
+ ->FromFlatbuffer(other.included_table())) {
// Fail if we were unable to copy (e.g., if we tried to copy in a long
// vector and do not have the space for it).
return false;
}
}
- if (other.has_vector_aligned()) {
- if (!CHECK_NOTNULL(add_vector_aligned())
- ->FromFlatbuffer(other.vector_aligned())) {
+ if (other.has_subtable()) {
+ if (!CHECK_NOTNULL(add_subtable())->FromFlatbuffer(other.subtable())) {
// Fail if we were unable to copy (e.g., if we tried to copy in a long
// vector and do not have the space for it).
return false;
}
}
- if (other.has_vector_of_scalars()) {
- if (!CHECK_NOTNULL(add_vector_of_scalars())
- ->FromFlatbuffer(other.vector_of_scalars())) {
+ if (other.has_string()) {
+ if (!CHECK_NOTNULL(add_string())->FromFlatbuffer(other.string())) {
// Fail if we were unable to copy (e.g., if we tried to copy in a long
// vector and do not have the space for it).
return false;
}
}
- if (other.has_vector_of_strings()) {
- if (!CHECK_NOTNULL(add_vector_of_strings())
- ->FromFlatbuffer(other.vector_of_strings())) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
- }
-
- if (other.has_vector_of_structs()) {
- if (!CHECK_NOTNULL(add_vector_of_structs())
- ->FromFlatbuffer(other.vector_of_structs())) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
- }
-
- if (other.has_vector_of_tables()) {
- if (!CHECK_NOTNULL(add_vector_of_tables())
- ->FromFlatbuffer(other.vector_of_tables())) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
+ if (other.has_scalar()) {
+ set_scalar(other.scalar());
}
return true;
@@ -1242,36 +1578,68 @@
[[nodiscard]] bool FromFlatbuffer(const Flatbuffer::NativeTableType &other) {
Clear();
- if (other.included_table) {
- if (!CHECK_NOTNULL(add_included_table())
- ->FromFlatbuffer(*other.included_table)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
+ if (other.substruct) {
+ set_substruct(*other.substruct);
}
- set_scalar(other.scalar);
-
// Unconditionally copy strings/vectors, even if it will just end up
// being 0-length (this maintains consistency with the flatbuffer Pack()
// behavior).
- if (!CHECK_NOTNULL(add_string())->FromFlatbuffer(other.string)) {
+ if (!CHECK_NOTNULL(add_vector_of_structs())
+ ->FromFlatbuffer(other.vector_of_structs)) {
// Fail if we were unable to copy (e.g., if we tried to copy in a long
// vector and do not have the space for it).
return false;
}
- if (other.substruct) {
- set_substruct(*other.substruct);
+ // Unconditionally copy strings/vectors, even if it will just end up
+ // being 0-length (this maintains consistency with the flatbuffer Pack()
+ // behavior).
+ if (!CHECK_NOTNULL(add_unspecified_length_vector_of_strings())
+ ->FromFlatbuffer(other.unspecified_length_vector_of_strings)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
}
- if (other.subtable) {
- if (!CHECK_NOTNULL(add_subtable())->FromFlatbuffer(*other.subtable)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
+ // Unconditionally copy strings/vectors, even if it will just end up
+ // being 0-length (this maintains consistency with the flatbuffer Pack()
+ // behavior).
+ if (!CHECK_NOTNULL(add_vector_of_tables())
+ ->FromFlatbuffer(other.vector_of_tables)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+
+ // Unconditionally copy strings/vectors, even if it will just end up
+ // being 0-length (this maintains consistency with the flatbuffer Pack()
+ // behavior).
+ if (!CHECK_NOTNULL(add_vector_aligned())
+ ->FromFlatbuffer(other.vector_aligned)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+
+ // Unconditionally copy strings/vectors, even if it will just end up
+ // being 0-length (this maintains consistency with the flatbuffer Pack()
+ // behavior).
+ if (!CHECK_NOTNULL(add_vector_of_strings())
+ ->FromFlatbuffer(other.vector_of_strings)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+
+ // Unconditionally copy strings/vectors, even if it will just end up
+ // being 0-length (this maintains consistency with the flatbuffer Pack()
+ // behavior).
+ if (!CHECK_NOTNULL(add_vector_of_scalars())
+ ->FromFlatbuffer(other.vector_of_scalars)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
}
// Unconditionally copy strings/vectors, even if it will just end up
@@ -1294,65 +1662,33 @@
return false;
}
- // Unconditionally copy strings/vectors, even if it will just end up
- // being 0-length (this maintains consistency with the flatbuffer Pack()
- // behavior).
- if (!CHECK_NOTNULL(add_unspecified_length_vector_of_strings())
- ->FromFlatbuffer(other.unspecified_length_vector_of_strings)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
+ if (other.included_table) {
+ if (!CHECK_NOTNULL(add_included_table())
+ ->FromFlatbuffer(*other.included_table)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other.subtable) {
+ if (!CHECK_NOTNULL(add_subtable())->FromFlatbuffer(*other.subtable)) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
}
// Unconditionally copy strings/vectors, even if it will just end up
// being 0-length (this maintains consistency with the flatbuffer Pack()
// behavior).
- if (!CHECK_NOTNULL(add_vector_aligned())
- ->FromFlatbuffer(other.vector_aligned)) {
+ if (!CHECK_NOTNULL(add_string())->FromFlatbuffer(other.string)) {
// Fail if we were unable to copy (e.g., if we tried to copy in a long
// vector and do not have the space for it).
return false;
}
- // Unconditionally copy strings/vectors, even if it will just end up
- // being 0-length (this maintains consistency with the flatbuffer Pack()
- // behavior).
- if (!CHECK_NOTNULL(add_vector_of_scalars())
- ->FromFlatbuffer(other.vector_of_scalars)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
-
- // Unconditionally copy strings/vectors, even if it will just end up
- // being 0-length (this maintains consistency with the flatbuffer Pack()
- // behavior).
- if (!CHECK_NOTNULL(add_vector_of_strings())
- ->FromFlatbuffer(other.vector_of_strings)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
-
- // Unconditionally copy strings/vectors, even if it will just end up
- // being 0-length (this maintains consistency with the flatbuffer Pack()
- // behavior).
- if (!CHECK_NOTNULL(add_vector_of_structs())
- ->FromFlatbuffer(other.vector_of_structs)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
-
- // Unconditionally copy strings/vectors, even if it will just end up
- // being 0-length (this maintains consistency with the flatbuffer Pack()
- // behavior).
- if (!CHECK_NOTNULL(add_vector_of_tables())
- ->FromFlatbuffer(other.vector_of_tables)) {
- // Fail if we were unable to copy (e.g., if we tried to copy in a long
- // vector and do not have the space for it).
- return false;
- }
+ set_scalar(other.scalar);
return true;
}
@@ -1368,286 +1704,9 @@
TestTableStatic(TestTableStatic &&) = default;
friend struct ::aos::fbs::internal::TableMover<TestTableStatic>;
- // Members relating to the included_table field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<::aos::fbs::internal::TableMover<
- aos::fbs::testing::included::IncludedTableStatic>>
- included_table_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_included_table =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(0, kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_included_table = 4;
-
- // Offset from the start of the buffer to the inline data for the scalar
- // field.
- static constexpr size_t kInlineAbsoluteOffset_scalar = 8;
-
- // Members relating to the string field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<::aos::fbs::internal::TableMover<::aos::fbs::String<20>>>
- string_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_string =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::IncludedTableStatic::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_string = 12;
-
// Offset from the start of the buffer to the inline data for the substruct
// field.
- static constexpr size_t kInlineAbsoluteOffset_substruct = 16;
-
- // Members relating to the subtable field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<
- ::aos::fbs::internal::TableMover<aos::fbs::testing::SubTableStatic>>
- subtable_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_subtable =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_subtable = 32;
-
- // Members relating to the unspecified_length_string field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<::aos::fbs::internal::TableMover<::aos::fbs::String<0>>>
- unspecified_length_string_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_unspecified_length_string =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_unspecified_length_string = 36;
-
- // Members relating to the unspecified_length_vector field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<
- ::aos::fbs::internal::TableMover<::aos::fbs::Vector<uint8_t, 0, true, 0>>>
- unspecified_length_vector_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_unspecified_length_vector =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_unspecified_length_vector = 40;
-
- // Members relating to the unspecified_length_vector_of_strings field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<::aos::fbs::internal::TableMover<
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>>>
- unspecified_length_vector_of_strings_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_unspecified_length_vector_of_strings =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t
- kInlineAbsoluteOffset_unspecified_length_vector_of_strings = 44;
-
- // Members relating to the vector_aligned field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<::aos::fbs::internal::TableMover<
- ::aos::fbs::Vector<int32_t, 3, true, 64>>>
- vector_aligned_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_vector_aligned =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_vector_aligned = 48;
-
- // Members relating to the vector_of_scalars field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<
- ::aos::fbs::internal::TableMover<::aos::fbs::Vector<int32_t, 3, true, 0>>>
- vector_of_scalars_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_vector_of_scalars =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_vector_of_scalars = 52;
-
- // Members relating to the vector_of_strings field.
- //
- // *Static object used for managing this subtable. Will be nullopt
- // when the field is not populated.
- // We use the TableMover to be able to make this object moveable.
- std::optional<::aos::fbs::internal::TableMover<
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>>>
- vector_of_strings_;
- // Offset from the start of the buffer to the start of the actual
- // data for this field. Will be updated even when the table is not
- // populated, so that we know where to construct it when requested.
- size_t object_absolute_offset_vector_of_strings =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
- kAlign);
- // Offset from the start of the buffer to the offset in the inline data for
- // this field.
- static constexpr size_t kInlineAbsoluteOffset_vector_of_strings = 56;
+ static constexpr size_t kInlineAbsoluteOffset_substruct = 4;
// Members relating to the vector_of_structs field.
//
@@ -1660,41 +1719,47 @@
// Offset from the start of the buffer to the start of the actual
// data for this field. Will be updated even when the table is not
// populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetvector_of_structs =
+ ::aos::fbs::AlignOffset(
+ (kVtableStart + kVtableSize) - kAlignOffset,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::kAlign,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
+ 0>::kAlignOffset) +
+ kAlignOffset;
size_t object_absolute_offset_vector_of_structs =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0, kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize,
- kAlign);
+ kDefaultObjectAbsoluteOffsetvector_of_structs;
// Offset from the start of the buffer to the offset in the inline data for
// this field.
- static constexpr size_t kInlineAbsoluteOffset_vector_of_structs = 60;
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_structs = 20;
+
+ // Members relating to the unspecified_length_vector_of_strings field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>>>
+ unspecified_length_vector_of_strings_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t
+ kDefaultObjectAbsoluteOffsetunspecified_length_vector_of_strings =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetvector_of_structs +
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
+ 0>::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kAlign,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
+ 0>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_unspecified_length_vector_of_strings =
+ kDefaultObjectAbsoluteOffsetunspecified_length_vector_of_strings;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings = 24;
// Members relating to the vector_of_tables field.
//
@@ -1707,50 +1772,226 @@
// Offset from the start of the buffer to the start of the actual
// data for this field. Will be updated even when the table is not
// populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetvector_of_tables =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetunspecified_length_vector_of_strings +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
+ 0>::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
+ 0>::kAlign,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
+ 0>::kAlignOffset) +
+ kAlignOffset;
size_t object_absolute_offset_vector_of_tables =
- ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(
- ::aos::fbs::PaddedSize(0,
- kAlign) +
- aos::fbs::testing::included::
- IncludedTableStatic::
- kSize,
- kAlign) +
- ::aos::fbs::String<20>::kSize,
- kAlign) +
- aos::fbs::testing::SubTableStatic::
- kSize,
- kAlign) +
- ::aos::fbs::String<0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<uint8_t, 0, true,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<0>, 0,
- false, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
- kAlign) +
- ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
- 0>::kSize,
- kAlign) +
- ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
- 0>::kSize,
- kAlign);
+ kDefaultObjectAbsoluteOffsetvector_of_tables;
// Offset from the start of the buffer to the offset in the inline data for
// this field.
- static constexpr size_t kInlineAbsoluteOffset_vector_of_tables = 64;
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_tables = 28;
+
+ // Members relating to the vector_aligned field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<int32_t, 3, true, 64>>>
+ vector_aligned_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetvector_aligned =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetvector_of_tables +
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
+ 0>::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kAlign,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_vector_aligned =
+ kDefaultObjectAbsoluteOffsetvector_aligned;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_aligned = 32;
+
+ // Members relating to the vector_of_strings field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>>>
+ vector_of_strings_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetvector_of_strings =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetvector_aligned +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kAlign,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
+ 0>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_vector_of_strings =
+ kDefaultObjectAbsoluteOffsetvector_of_strings;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_strings = 36;
+
+ // Members relating to the vector_of_scalars field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<
+ ::aos::fbs::internal::TableMover<::aos::fbs::Vector<int32_t, 3, true, 0>>>
+ vector_of_scalars_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetvector_of_scalars =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetvector_of_strings +
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
+ 0>::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kAlign,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_vector_of_scalars =
+ kDefaultObjectAbsoluteOffsetvector_of_scalars;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_scalars = 40;
+
+ // Members relating to the unspecified_length_string field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<::aos::fbs::String<0>>>
+ unspecified_length_string_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t
+ kDefaultObjectAbsoluteOffsetunspecified_length_string =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetvector_of_scalars +
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::String<0>::kAlign,
+ ::aos::fbs::String<0>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_unspecified_length_string =
+ kDefaultObjectAbsoluteOffsetunspecified_length_string;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_unspecified_length_string = 44;
+
+ // Members relating to the unspecified_length_vector field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<
+ ::aos::fbs::internal::TableMover<::aos::fbs::Vector<uint8_t, 0, true, 0>>>
+ unspecified_length_vector_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t
+ kDefaultObjectAbsoluteOffsetunspecified_length_vector =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetunspecified_length_string +
+ ::aos::fbs::String<0>::kPreallocatedSize - kAlignOffset,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kAlign,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_unspecified_length_vector =
+ kDefaultObjectAbsoluteOffsetunspecified_length_vector;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_unspecified_length_vector = 48;
+
+ // Members relating to the included_table field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ aos::fbs::testing::included::IncludedTableStatic>>
+ included_table_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetincluded_table =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetunspecified_length_vector +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kPreallocatedSize -
+ kAlignOffset,
+ aos::fbs::testing::included::IncludedTableStatic::kAlign,
+ aos::fbs::testing::included::IncludedTableStatic::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_included_table =
+ kDefaultObjectAbsoluteOffsetincluded_table;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_included_table = 52;
+
+ // Members relating to the subtable field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<
+ ::aos::fbs::internal::TableMover<aos::fbs::testing::SubTableStatic>>
+ subtable_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetsubtable =
+ ::aos::fbs::AlignOffset(kDefaultObjectAbsoluteOffsetincluded_table +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kPreallocatedSize -
+ kAlignOffset,
+ aos::fbs::testing::SubTableStatic::kAlign,
+ aos::fbs::testing::SubTableStatic::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_subtable = kDefaultObjectAbsoluteOffsetsubtable;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_subtable = 56;
+
+ // Members relating to the string field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<::aos::fbs::String<20>>>
+ string_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ static constexpr size_t kDefaultObjectAbsoluteOffsetstring =
+ ::aos::fbs::AlignOffset(
+ kDefaultObjectAbsoluteOffsetsubtable +
+ aos::fbs::testing::SubTableStatic::kPreallocatedSize -
+ kAlignOffset,
+ ::aos::fbs::String<20>::kAlign,
+ ::aos::fbs::String<20>::kAlignOffset) +
+ kAlignOffset;
+ size_t object_absolute_offset_string = kDefaultObjectAbsoluteOffsetstring;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_string = 60;
+
+ // Offset from the start of the buffer to the inline data for the scalar
+ // field.
+ static constexpr size_t kInlineAbsoluteOffset_scalar = 64;
size_t NumberOfSubObjects() const final { return 11; }
using ::aos::fbs::ResizeableObject::SubObject;
@@ -1764,48 +2005,48 @@
// Pointers because these may need to be modified when memory is
// inserted into the buffer.
const std::array<size_t *, 11> subobject_object_offsets{
- &object_absolute_offset_included_table,
- &object_absolute_offset_string,
- &object_absolute_offset_subtable,
+ &object_absolute_offset_vector_of_structs,
+ &object_absolute_offset_unspecified_length_vector_of_strings,
+ &object_absolute_offset_vector_of_tables,
+ &object_absolute_offset_vector_aligned,
+ &object_absolute_offset_vector_of_strings,
+ &object_absolute_offset_vector_of_scalars,
&object_absolute_offset_unspecified_length_string,
&object_absolute_offset_unspecified_length_vector,
- &object_absolute_offset_unspecified_length_vector_of_strings,
- &object_absolute_offset_vector_aligned,
- &object_absolute_offset_vector_of_scalars,
- &object_absolute_offset_vector_of_strings,
- &object_absolute_offset_vector_of_structs,
- &object_absolute_offset_vector_of_tables};
+ &object_absolute_offset_included_table,
+ &object_absolute_offset_subtable,
+ &object_absolute_offset_string};
// Actual subobjects; note that the pointers will be invalid when the
// field is not populated.
const std::array<::aos::fbs::ResizeableObject *, 11> subobject_objects{
- &included_table_->t,
- &string_->t,
- &subtable_->t,
+ &vector_of_structs_->t,
+ &unspecified_length_vector_of_strings_->t,
+ &vector_of_tables_->t,
+ &vector_aligned_->t,
+ &vector_of_strings_->t,
+ &vector_of_scalars_->t,
&unspecified_length_string_->t,
&unspecified_length_vector_->t,
- &unspecified_length_vector_of_strings_->t,
- &vector_aligned_->t,
- &vector_of_scalars_->t,
- &vector_of_strings_->t,
- &vector_of_structs_->t,
- &vector_of_tables_->t};
+ &included_table_->t,
+ &subtable_->t,
+ &string_->t};
// Absolute offsets from the start of the buffer to where the inline
// entry is for each table. These offsets do not need to change at
// runtime (because memory is never inserted into the start of
// a given table), but the offsets pointed to by these offsets
// may need to be updated.
const std::array<size_t, 11> subobject_inline_offsets{
- kInlineAbsoluteOffset_included_table,
- kInlineAbsoluteOffset_string,
- kInlineAbsoluteOffset_subtable,
+ kInlineAbsoluteOffset_vector_of_structs,
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings,
+ kInlineAbsoluteOffset_vector_of_tables,
+ kInlineAbsoluteOffset_vector_aligned,
+ kInlineAbsoluteOffset_vector_of_strings,
+ kInlineAbsoluteOffset_vector_of_scalars,
kInlineAbsoluteOffset_unspecified_length_string,
kInlineAbsoluteOffset_unspecified_length_vector,
- kInlineAbsoluteOffset_unspecified_length_vector_of_strings,
- kInlineAbsoluteOffset_vector_aligned,
- kInlineAbsoluteOffset_vector_of_scalars,
- kInlineAbsoluteOffset_vector_of_strings,
- kInlineAbsoluteOffset_vector_of_structs,
- kInlineAbsoluteOffset_vector_of_tables};
+ kInlineAbsoluteOffset_included_table,
+ kInlineAbsoluteOffset_subtable,
+ kInlineAbsoluteOffset_string};
object.inline_entry =
MutableGet<::flatbuffers::uoffset_t>(subobject_inline_offsets[index]);
object.object =
@@ -1813,5 +2054,22 @@
object.absolute_offset = subobject_object_offsets[index];
return object;
}
+
+ public:
+ // Nominal size of this object, in bytes. The object may grow beyond this
+ // size, but will always start at this size and so the initial buffer must
+ // match this size.
+ static constexpr size_t kSize =
+ ::aos::fbs::AlignOffset(kDefaultObjectAbsoluteOffsetstring +
+ ::aos::fbs::String<20>::kPreallocatedSize -
+ kAlignOffset,
+ kAlign, kAlignOffset) +
+ kAlignOffset;
+ // Always statically allocate memory for tables (set for consistency with
+ // static_vector.h).
+ static constexpr size_t kPreallocatedSize = kSize;
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize =
+ ::aos::fbs::AlignOffset(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
};
} // namespace aos::fbs::testing
diff --git a/aos/init.rs b/aos/init.rs
index fe0408d..8fdb3d1 100644
--- a/aos/init.rs
+++ b/aos/init.rs
@@ -194,7 +194,7 @@
/// Sets the command gFlag to the specified value.
fn set_option(name: &str, value: &OsStr) -> Result<(), SetFlagError> {
unsafe {
- let name = CString::new(name.clone()).expect("Flag name may not have NUL");
+ let name = CString::new(name).expect("Flag name may not have NUL");
let value = CString::new(value.as_bytes()).expect("Arg may not have NUL");
if ffi::aos::SetCommandLineOption(name.as_ptr(), value.as_ptr()) {
Ok(())
diff --git a/aos/json_to_flatbuffer.h b/aos/json_to_flatbuffer.h
index deafaa7..f83bab9 100644
--- a/aos/json_to_flatbuffer.h
+++ b/aos/json_to_flatbuffer.h
@@ -43,7 +43,7 @@
inline fbs::Builder<T> JsonToStaticFlatbuffer(const std::string_view data) {
const aos::FlatbufferDetachedBuffer<typename T::Flatbuffer> fbs =
JsonToFlatbuffer<typename T::Flatbuffer>(data);
- fbs::Builder<T> builder(std::make_unique<aos::fbs::VectorAllocator>());
+ fbs::Builder<T> builder(std::make_unique<aos::fbs::AlignedVectorAllocator>());
CHECK(builder.get()->FromFlatbuffer(&fbs.message()));
return builder;
}
diff --git a/aos/shared_span.h b/aos/shared_span.h
new file mode 100644
index 0000000..515c0bf
--- /dev/null
+++ b/aos/shared_span.h
@@ -0,0 +1,16 @@
+#ifndef AOS_SHARED_SPAN_H_
+#define AOS_SHARED_SPAN_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "absl/types/span.h"
+
+namespace aos {
+
+// Shared pointer to a region of memory. The pointer needs to own the region.
+using SharedSpan = std::shared_ptr<const absl::Span<const uint8_t>>;
+
+} // namespace aos
+
+#endif // AOS_SHARED_SPAN_H_
diff --git a/aos/time/time.cc b/aos/time/time.cc
index fe8422b..0e02481 100644
--- a/aos/time/time.cc
+++ b/aos/time/time.cc
@@ -51,8 +51,8 @@
#ifdef __linux__
-namespace std::this_thread {
-template <>
+namespace aos::this_thread {
+
void sleep_until(const ::aos::monotonic_clock::time_point &end_time) {
struct timespec end_time_timespec;
::std::chrono::seconds sec =
@@ -73,7 +73,7 @@
} while (returnval != 0);
}
-} // namespace std::this_thread
+} // namespace aos::this_thread
#endif // __linux__
diff --git a/aos/time/time.h b/aos/time/time.h
index f0b3a06..5481bd6 100644
--- a/aos/time/time.h
+++ b/aos/time/time.h
@@ -121,13 +121,17 @@
#ifdef __linux__
-namespace std::this_thread {
+namespace aos::this_thread {
+void sleep_until(const ::aos::monotonic_clock::time_point &end_time);
+} // namespace aos::this_thread
+
// Template specialization for monotonic_clock, since we can use clock_nanosleep
// with TIMER_ABSTIME and get very precise absolute time sleeps.
template <>
-void sleep_until(const ::aos::monotonic_clock::time_point &end_time);
-
-} // namespace std::this_thread
+inline void std::this_thread::sleep_until(
+ const ::aos::monotonic_clock::time_point &end_time) {
+ ::aos::this_thread::sleep_until(end_time);
+}
#endif // __linux__
diff --git a/aos/util/status.cc b/aos/util/status.cc
index de372db..dbcd4af 100644
--- a/aos/util/status.cc
+++ b/aos/util/status.cc
@@ -1,19 +1,32 @@
#include "aos/util/status.h"
namespace aos {
-Status::Status(StatusCode code, std::string_view message,
- std::optional<std::source_location> source_location)
+namespace {
+// Constructs a string view from the provided buffer if it has data and
+// otherwise uses the provided string view. Used in copy/move constructors to
+// figure out whether we should use the buffer or keep the pointer to the
+// existing std::string_view (as is the case for when we store a pointer to a
+// string literal).
+static std::string_view MakeStringViewFromBufferOrView(
+ const aos::InlinedVector<char, Error::kStaticMessageLength> &buffer,
+ const std::string_view &view) {
+ return (buffer.size() > 0) ? std::string_view(buffer.begin(), buffer.end())
+ : view;
+}
+} // namespace
+Error::Error(StatusCode code, std::string_view message,
+ std::optional<std::source_location> source_location)
: code_(code),
owned_message_(message.begin(), message.end()),
message_(owned_message_.data(), owned_message_.size()),
source_location_(std::move(source_location)) {}
-Status::Status(StatusCode code, const char *message,
- std::optional<std::source_location> source_location)
+Error::Error(StatusCode code, const char *message,
+ std::optional<std::source_location> source_location)
: code_(code),
message_(message),
source_location_(std::move(source_location)) {}
-Status::Status(Status &&other)
+Error::Error(Error &&other)
: code_(other.code_),
owned_message_(std::move(other.owned_message_)),
message_(MakeStringViewFromBufferOrView(owned_message_, other.message_)),
@@ -22,17 +35,17 @@
// buffer, we need to have a manually written move constructor to manage it.
other.message_ = {};
}
-Status &Status::operator=(Status &&other) {
+Error &Error::operator=(Error &&other) {
std::swap(*this, other);
return *this;
}
-Status::Status(const Status &other)
+Error::Error(const Error &other)
: code_(other.code_),
owned_message_(other.owned_message_),
message_(MakeStringViewFromBufferOrView(owned_message_, other.message_)),
source_location_(other.source_location_) {}
-std::string Status::ToString() const {
+std::string Error::ToString() const {
std::string source_info = "";
if (source_location_.has_value()) {
source_info = absl::StrFormat(
@@ -40,16 +53,20 @@
source_location_->line(), source_location_->function_name());
}
- return absl::StrFormat("%sStatus is %s with code of %d and message: %s",
- source_info, ok() ? "okay" : "errored", code(),
- message());
+ return absl::StrFormat("%sErrored with code of %d and message: %s",
+ source_info, code(), message());
}
template <>
-void CheckExpected<void>(const tl::expected<void, Status> &expected) {
+void CheckExpected<void>(const Result<void> &expected) {
if (expected.has_value()) {
return;
}
LOG(FATAL) << expected.error().ToString();
}
+
+int ResultExitCode(const Result<void> &expected) {
+ return expected.has_value() ? static_cast<int>(Error::StatusCode::kOk)
+ : expected.error().code();
+}
} // namespace aos
diff --git a/aos/util/status.h b/aos/util/status.h
index 314936b..fa2f4f0 100644
--- a/aos/util/status.h
+++ b/aos/util/status.h
@@ -11,32 +11,43 @@
#include "aos/containers/inlined_vector.h"
namespace aos {
-// The Status class provides a means by which errors can be readily returned
+// The Error class provides a means by which errors can be readily returned
// from methods. It will typically be wrapped by an std::expected<> to
-// accommodate a return value or the Status, although an "ok" status can also be
-// used to indicate no-error.
+// accommodate a return value or the Error.
//
-// The Status class is similar to the absl::Status or std::error_code classes,
-// in that it consists of an integer error code of some sort (where 0 indicates
-// "ok") and a string error message of some sort. The main additions of this
+// The Error class is similar to the absl::Status or std::error_code classes,
+// in that it consists of an integer error code of some sort (where 0 implicitly
+// would indicate "ok", although we assume that if there is no error then
+// you will be using an expected<> to return void or your actual return type)
+// and a string error message of some sort. The main additions of this
// class are:
// 1. Adding a first-class exposure of an std::source_location to make exposure
// of the sources of errors easier.
-// 2. Providing an interface that allows for Status implementations that expose
+// 2. Providing an interface that allows for Error implementations that expose
// messages without malloc'ing (not possible with absl::Status, although it
// is possible with std::error_code).
// 3. Making it relatively easy to quickly return a simple error & message
// (specifying a custom error with std::error_code is possible but requires
// jumping through hoops and managing some global state).
+// 4. Does not support an "okay" state, to make it clear that the user is
+// supposed to use a wrapper that will itself indicate okay.
//
-// The goal of this class is that it should be easy to convert from exiting
+// The goal of this class is that it should be easy to convert from existing
// error types (absl::Status, std::error_code) to this type.
-class Status {
+//
+// Users should typically use the Result<T> convenience method when returning
+// Errors from methods. In the case where the method would normally return void,
+// use Result<void>. Result<> is just a wrapper for tl::expected; when our
+// compilers upgrade to support std::expected this should ease the transition,
+// in addition to just providing a convenience wrapper to encourage a standard
+// pattern of use.
+class Error {
public:
// In order to allow simple error messages without memory allocation, we
// reserve a small amount of stack space for error messages. This constant
// specifies the length of these strings.
static constexpr size_t kStaticMessageLength = 128;
+
// Attaches human-readable status enums to integer codes---the specific
// numeric codes are used as exit codes when terminating execution of the
// program.
@@ -49,56 +60,54 @@
kOk = 0,
kError = 1,
};
- // Constructs a status that indicates success, with no associated error
- // message our source location.
- static Status Ok() { return Status(StatusCode::kOk, "", std::nullopt); }
+
// Constructs an Error, copying the provided message. If the message is
// shorter than kStaticMessageLength, then the message will be stored entirely
// on the stack; longer messages will require dynamic memory allocation.
// The default source_location will correspond to the call-site of the
- // Status::Error() method. This should only be overridden by wrappers that
+ // Error::Error() method. This should only be overridden by wrappers that
// want to present a fancier interface to users.
- static Status Error(
+ static Error MakeError(
std::string_view message,
std::source_location source_location = std::source_location::current()) {
- return Status(StatusCode::kError, message, std::move(source_location));
+ return Error(StatusCode::kError, message, std::move(source_location));
}
- static tl::unexpected<Status> UnexpectedError(
+ static tl::unexpected<Error> MakeUnexpectedError(
std::string_view message,
std::source_location source_location = std::source_location::current()) {
- return tl::unexpected<Status>(Error(message, std::move(source_location)));
+ return tl::unexpected<Error>(
+ MakeError(message, std::move(source_location)));
}
+
// Constructs an error, retaining the provided pointer to a null-terminated
// error message. It is assumed that the message pointer will stay valid
// ~indefinitely. This is generally only appropriate to use with string
- // literals (e.g., Status::StringLiteralError("Hello, World!")).
+ // literals (e.g., Error::StringLiteralError("Hello, World!")).
// The default source_location will correspond to the call-site of the
- // Status::Error() method. This should only be overridden by wrappers that
+ // Error::Error() method. This should only be overridden by wrappers that
// want to present a fancier interface to users.
- static Status StringLiteralError(
+ static Error MakeStringLiteralError(
const char *message,
std::source_location source_location = std::source_location::current()) {
- return Status(StatusCode::kError, message, std::move(source_location));
+ return Error(StatusCode::kError, message, std::move(source_location));
}
- static tl::unexpected<Status> UnexpectedStringLiteralError(
+ static tl::unexpected<Error> MakeUnexpectedStringLiteralError(
const char *message,
std::source_location source_location = std::source_location::current()) {
- return tl::unexpected<Status>(
- StringLiteralError(message, std::move(source_location)));
+ return tl::unexpected<Error>(
+ MakeStringLiteralError(message, std::move(source_location)));
}
- Status(Status &&other);
- Status &operator=(Status &&other);
- Status(const Status &other);
+ Error(Error &&other);
+ Error &operator=(Error &&other);
+ Error(const Error &other);
- // Returns true if the Status indicates success.
- [[nodiscard]] bool ok() const { return code_ == StatusCode::kOk; }
// Returns a numeric value for the status code. Zero will always indicate
// success; non-zero values will always indicate an error.
[[nodiscard]] int code() const { return static_cast<int>(code_); }
// Returns a view of the error message.
[[nodiscard]] std::string_view message() const { return message_; }
- // Returns the source_location attached to the current Status. If the
+ // Returns the source_location attached to the current Error. If the
// source_location was never set, will return nullopt. The source_location
// will typically be left unset for successful ("ok") statuses.
[[nodiscard]] const std::optional<std::source_location> &source_location()
@@ -109,22 +118,10 @@
std::string ToString() const;
private:
- Status(StatusCode code, std::string_view message,
- std::optional<std::source_location> source_location);
- Status(StatusCode code, const char *message,
- std::optional<std::source_location> source_location);
-
- // Constructs a string view from the provided buffer if it has data and
- // otherwise uses the provided string view. Used in copy/move constructors to
- // figure out whether we should use the buffer or keep the pointer to the
- // existing std::string_view (as is the case for when we store a pointer to a
- // string literal).
- static std::string_view MakeStringViewFromBufferOrView(
- const aos::InlinedVector<char, kStaticMessageLength> &buffer,
- const std::string_view &view) {
- return (buffer.size() > 0) ? std::string_view(buffer.begin(), buffer.end())
- : view;
- }
+ Error(StatusCode code, std::string_view message,
+ std::optional<std::source_location> source_location);
+ Error(StatusCode code, const char *message,
+ std::optional<std::source_location> source_location);
StatusCode code_;
aos::InlinedVector<char, kStaticMessageLength> owned_message_;
@@ -132,11 +129,14 @@
std::optional<std::source_location> source_location_;
};
+template <typename T>
+using Result = tl::expected<T, Error>;
+
// Dies fatally if the provided expected does not include the value T, printing
-// out an error message that includes the Status on the way out.
+// out an error message that includes the Error on the way out.
// Returns the stored value on success.
template <typename T>
-T CheckExpected(const tl::expected<T, Status> &expected) {
+T CheckExpected(const Result<T> &expected) {
if (expected.has_value()) {
return expected.value();
}
@@ -144,6 +144,8 @@
}
template <>
-void CheckExpected<void>(const tl::expected<void, Status> &expected);
+void CheckExpected<void>(const Result<void> &expected);
+
+int ResultExitCode(const Result<void> &expected);
} // namespace aos
#endif // AOS_UTIL_STATUS_H_
diff --git a/aos/util/status_test.cc b/aos/util/status_test.cc
index 0caf73d..b838636 100644
--- a/aos/util/status_test.cc
+++ b/aos/util/status_test.cc
@@ -11,38 +11,20 @@
DECLARE_bool(die_on_malloc);
namespace aos::testing {
-class StatusTest : public ::testing::Test {
+class ErrorTest : public ::testing::Test {
protected:
- StatusTest() {}
+ ErrorTest() {}
};
-// Tests that we can construct an "Ok" status and that it presents the correct
-// interface.
-TEST_F(StatusTest, Okay) {
- std::optional<Status> ok;
- {
- aos::ScopedRealtime realtime;
- ok = Status::Ok();
- }
- ASSERT_TRUE(ok.has_value());
- EXPECT_TRUE(ok->ok());
- EXPECT_EQ(0, ok->code());
- EXPECT_EQ("", ok->message());
- EXPECT_FALSE(ok->source_location().has_value());
- EXPECT_EQ(std::string("Status is okay with code of 0 and message: "),
- ok->ToString());
-}
-
// Tests that we can construct an errored status in realtime code.
-TEST_F(StatusTest, RealtimeError) {
- std::optional<Status> error;
+TEST_F(ErrorTest, RealtimeError) {
+ std::optional<Error> error;
{
aos::ScopedRealtime realtime;
- error = Status::Error("Hello, World!");
+ error = Error::MakeError("Hello, World!");
}
const int line = __LINE__ - 2;
ASSERT_TRUE(error.has_value());
- EXPECT_FALSE(error->ok());
EXPECT_NE(0, error->code());
EXPECT_EQ(std::string("Hello, World!"), error->message());
ASSERT_TRUE(error->source_location().has_value());
@@ -51,7 +33,7 @@
std::filesystem::path(error->source_location()->file_name()).filename());
EXPECT_EQ(
std::string("virtual void "
- "aos::testing::StatusTest_RealtimeError_Test::TestBody()"),
+ "aos::testing::ErrorTest_RealtimeError_Test::TestBody()"),
error->source_location()->function_name());
EXPECT_EQ(line, error->source_location()->line());
EXPECT_LT(1, error->source_location()->column());
@@ -59,39 +41,52 @@
error->ToString(),
::testing::HasSubstr(absl::StrFormat(
"status_test.cc:%d in virtual void "
- "aos::testing::StatusTest_RealtimeError_Test::TestBody(): Status is "
- "errored with code of 1 and message: Hello, World!",
+ "aos::testing::ErrorTest_RealtimeError_Test::TestBody(): Errored "
+ "with code of 1 and message: Hello, World!",
line)));
}
+// Tests that the ResultExitCode() function will correctly transform a Result<>
+// object into an exit code suitable for exiting a program.
+TEST_F(ErrorTest, ExitCode) {
+ static_assert(0 == static_cast<int>(Error::StatusCode::kOk));
+ EXPECT_EQ(static_cast<int>(Error::StatusCode::kOk),
+ ResultExitCode(Result<void>{}));
+ EXPECT_EQ(static_cast<int>(Error::StatusCode::kError),
+ ResultExitCode(Error::MakeUnexpectedError("")));
+}
+
+// Malloc hooks don't work with asan/msan.
+#if !__has_feature(address_sanitizer) && !__has_feature(memory_sanitizer)
// Tests that we do indeed malloc (and catch it) on an extra-long error message
// (this is mostly intended to ensure that the test setup is working correctly).
-TEST(StatusDeatTest, BlowsUpOnRealtimeAllocation) {
- std::string message(" ", Status::kStaticMessageLength + 1);
+TEST(ErrorDeathTest, BlowsUpOnRealtimeAllocation) {
+ std::string message(" ", Error::kStaticMessageLength + 1);
EXPECT_DEATH(
{
aos::ScopedRealtime realtime;
aos::CheckRealtime();
- Status foo = Status::Error(message);
+ Error foo = Error::MakeError(message);
},
"Malloced");
}
+#endif
+
// Tests that we can use arbitrarily-sized string literals for error messages.
-TEST(StatusDeatTest, StringLiteralError) {
- std::optional<Status> error;
+TEST_F(ErrorTest, StringLiteralError) {
+ std::optional<Error> error;
const char *message =
"Hellllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll"
"llllllllllllllloooooooooooooooooooooooooooooooooooooooooooo, "
"World!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
"!!!!!!!!!!!!!!";
- ASSERT_LT(Status::kStaticMessageLength, strlen(message));
+ ASSERT_LT(Error::kStaticMessageLength, strlen(message));
{
aos::ScopedRealtime realtime;
- error = Status::StringLiteralError(message);
+ error = Error::MakeStringLiteralError(message);
}
ASSERT_TRUE(error.has_value());
- EXPECT_FALSE(error->ok());
EXPECT_EQ(message, error->message());
ASSERT_TRUE(error->source_location().has_value());
EXPECT_EQ(
@@ -100,16 +95,16 @@
}
// Tests that the CheckExpected() call works as intended.
-TEST(StatusDeathTest, CheckExpected) {
- tl::expected<int, Status> expected;
+TEST(ErrorDeathTest, CheckExpected) {
+ tl::expected<int, Error> expected;
expected.emplace(971);
EXPECT_EQ(971, CheckExpected(expected))
<< "Should have gotten out the emplaced value on no error.";
- expected = Status::UnexpectedError("Hello, World!");
+ expected = Error::MakeUnexpectedError("Hello, World!");
EXPECT_DEATH(CheckExpected(expected), "Hello, World!")
<< "An error message including the error string should have been printed "
"on death.";
- EXPECT_DEATH(CheckExpected<void>(Status::UnexpectedError("void expected")),
+ EXPECT_DEATH(CheckExpected<void>(Error::MakeUnexpectedError("void expected")),
"void expected")
<< "A void expected should work with CheckExpected().";
}
diff --git a/documentation/aos/docs/flatbuffers.md b/documentation/aos/docs/flatbuffers.md
index 9c82ee3..ea8c865 100644
--- a/documentation/aos/docs/flatbuffers.md
+++ b/documentation/aos/docs/flatbuffers.md
@@ -55,7 +55,7 @@
templated `aos::fbs::Builder` object is provided which can
take an allocator and then provide the relevant table class to the user.
* We provide an `Allocator` class and various implementations (e.g., a
- `VectorAllocator` backed by an `std::vector`) for managing the memory into
+ `AlignedVectorAllocator` backed by an `std::vector`) for managing the memory into
which the `Builder` will serialize the flatbuffer.
* A new `MakeStaticBuilder` method is provided on the `aos::Sender` class which
constructs an `aos::fbs::Builder` to allow you to construct a message to be
@@ -92,7 +92,7 @@
In order to handle alignment correctly in our `Builder` and `Allocator` classes,
we end up forcing the `Builder` to be able to accept semi-arbitrarily aligned
buffers in order to ease the `Allocator` implementation (e.g., the
-`VectorAllocator` uses a `std::vector` internally which does not necessarily
+`AlignedVectorAllocator` uses a `std::vector` internally which does not necessarily
align its memory). The `Builder` then adds padding as needed and passes an
appropriately aligned buffer down to the `Table` class.
@@ -299,7 +299,7 @@
aos::FlatbufferDetachedBuffer<TestTable> fbb_finished = fbb.Release();
// Using the static flatbuffer API.
- aos::fbs::VectorAllocator allocator;
+ aos::fbs::AlignedVectorAllocator allocator;
Builder<TestTableStatic> static_builder(&allocator);
PopulateStatic(CHECK_NOTNULL(static_builder.get()->add_subtable()));