Create a "static" flatbuffer API
This provides a generated API for working with flatbuffer objects that
generates a statically determined layout for the flatbuffer and uses
that layout to construct the flatbuffer without needing to dynamically
allocate any memory. For situations where dynamic sizing is appropriate,
this API does allow for increasing the size of any vectors in the
flatbuffer objects.
This change includes a checked-in version of the generated code so that
reviewers for this and future changes can readily examine what the
generated code looks like.
Future tasks:
* Support for unions?
* Consider precomputing some constants for sizes/alignments rather than
massive constant expressions.
Change-Id: I6bf72d6c722d5390ab2239289a8a2a4e118c8d47
Signed-off-by: James Kuszmaul <james.kuszmaul@bluerivertech.com>
diff --git a/BUILD b/BUILD
index b38e376..a629815 100644
--- a/BUILD
+++ b/BUILD
@@ -11,6 +11,8 @@
"tsconfig.json",
"tsconfig.node.json",
"rollup.config.js",
+ # Expose .clang-format so that the static flatbuffer codegen can format its files nicely.
+ ".clang-format",
])
# The root repo tsconfig
diff --git a/aos/BUILD b/aos/BUILD
index acef762..899d4eb 100644
--- a/aos/BUILD
+++ b/aos/BUILD
@@ -418,6 +418,7 @@
":flatbuffer_utils",
":flatbuffers",
":json_tokenizer",
+ "//aos/flatbuffers:builder",
"//aos/util:file",
"@com_github_google_flatbuffers//:flatbuffers",
"@com_github_google_glog//:glog",
diff --git a/aos/flatbuffers/BUILD b/aos/flatbuffers/BUILD
new file mode 100644
index 0000000..51ba041
--- /dev/null
+++ b/aos/flatbuffers/BUILD
@@ -0,0 +1,166 @@
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("//aos:flatbuffers.bzl", "cc_static_flatbuffer")
+load("//aos/flatbuffers:generate.bzl", "static_flatbuffer")
+
+cc_library(
+ name = "static_flatbuffers",
+ srcs = ["static_flatbuffers.cc"],
+ hdrs = ["static_flatbuffers.h"],
+ deps = [
+ ":static_table",
+ ":static_vector",
+ "//aos:json_to_flatbuffer",
+ "@com_github_google_flatbuffers//:flatbuffers",
+ "@com_github_google_glog//:glog",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
+ ],
+)
+
+cc_library(
+ name = "base",
+ srcs = ["base.cc"],
+ hdrs = ["base.h"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_github_google_flatbuffers//:flatbuffers",
+ "@com_github_google_glog//:glog",
+ ],
+)
+
+cc_test(
+ name = "base_test",
+ srcs = ["base_test.cc"],
+ deps = [
+ ":base",
+ "//aos/testing:googletest",
+ ],
+)
+
+cc_library(
+ name = "static_table",
+ hdrs = ["static_table.h"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":base",
+ "@com_github_google_flatbuffers//:flatbuffers",
+ "@com_github_google_glog//:glog",
+ ],
+)
+
+cc_library(
+ name = "static_vector",
+ hdrs = ["static_vector.h"],
+ target_compatible_with = ["@platforms//os:linux"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//aos/containers:inlined_vector",
+ "//aos/containers:sized_array",
+ "@com_github_google_flatbuffers//:flatbuffers",
+ "@com_github_google_glog//:glog",
+ ],
+)
+
+cc_library(
+ name = "builder",
+ hdrs = ["builder.h"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":static_table",
+ "//aos:flatbuffers",
+ ],
+)
+
+cc_static_flatbuffer(
+ name = "test_schema",
+ function = "aos::fbs::testing::TestTableSchema",
+ target = ":test_fbs_fbs_reflection_out",
+)
+
+cc_static_flatbuffer(
+ name = "interesting_schemas",
+ function = "aos::fbs::testing::UnsupportedSchema",
+ target = ":interesting_schemas_fbs_reflection_out",
+)
+
+flatbuffer_cc_library(
+ name = "interesting_schemas_fbs",
+ srcs = ["interesting_schemas.fbs"],
+ gen_reflections = True,
+)
+
+cc_test(
+ name = "static_flatbuffers_test",
+ srcs = ["static_flatbuffers_test.cc"],
+ data = ["//aos/flatbuffers/test_dir:test_data"],
+ deps = [
+ ":builder",
+ ":interesting_schemas",
+ ":static_flatbuffers",
+ ":test_fbs",
+ ":test_schema",
+ "//aos:flatbuffers",
+ "//aos:json_to_flatbuffer",
+ "//aos/flatbuffers/test_dir:type_coverage_fbs",
+ "//aos/testing:googletest",
+ "//aos/testing:path",
+ "@com_github_google_flatbuffers//src:flatc_library",
+ ],
+)
+
+cc_test(
+ name = "static_flatbuffers_fuzz_test",
+ # This does a combinatoric sweep over a variety of things, and can end up taking a while.
+ timeout = "eternal",
+ srcs = ["static_flatbuffers_fuzz_test.cc"],
+ data = ["//aos/flatbuffers/test_dir:test_data"],
+ deps = [
+ ":builder",
+ ":static_flatbuffers",
+ ":test_fbs",
+ "//aos:flatbuffers",
+ "//aos:json_to_flatbuffer",
+ "//aos/flatbuffers/test_dir:type_coverage_fbs",
+ "//aos/testing:googletest",
+ "//aos/testing:path",
+ ],
+)
+
+cc_binary(
+ name = "generate",
+ srcs = ["generate.cc"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":static_flatbuffers",
+ "//aos:flatbuffers",
+ "//aos:init",
+ "//aos:json_to_flatbuffer",
+ "//aos/util:file",
+ "@com_github_google_flatbuffers//:flatbuffers",
+ ],
+)
+
+sh_binary(
+ name = "generate_wrapper",
+ srcs = ["generate.sh"],
+ data = [
+ ":generate",
+ "//:.clang-format",
+ "@llvm_k8//:bin",
+ ],
+ visibility = ["//visibility:public"],
+ deps = ["@bazel_tools//tools/bash/runfiles"],
+)
+
+static_flatbuffer(
+ name = "test_fbs",
+ src = "test.fbs",
+ deps = ["//aos/flatbuffers/test_dir:include_fbs"],
+)
+
+filegroup(
+ name = "test_static_file",
+ srcs = ["test_static.h"],
+ visibility = [":__subpackages__"],
+)
diff --git a/aos/flatbuffers/base.cc b/aos/flatbuffers/base.cc
new file mode 100644
index 0000000..697f837
--- /dev/null
+++ b/aos/flatbuffers/base.cc
@@ -0,0 +1,212 @@
+#include "aos/flatbuffers/base.h"
+namespace aos::fbs {
+
+namespace {
+void *DereferenceOffset(uoffset_t *offset) {
+ return reinterpret_cast<uint8_t *>(offset) + *offset;
+}
+} // namespace
+
+bool ResizeableObject::InsertBytes(void *insertion_point, size_t bytes,
+ SetZero set_zero) {
+ // See comments on InsertBytes() declaration and in FixObjects()
+ // implementation below.
+ CHECK_LT(buffer_.data(), reinterpret_cast<const uint8_t *>(insertion_point))
+ << ": Insertion may not be prior to the start of the buffer.";
+ // Check that we started off with a properly aligned size.
+ // Doing this CHECK earlier is tricky because if done in the constructor then
+ // it executes prior to the Alignment() implementation being available.
+ CHECK_EQ(0u, buffer_.size() % Alignment());
+ // Note that we will round up the size to the current alignment, so that we
+ // ultimately end up only adjusting the buffer size by a multiple of its
+ // alignment, to avoid having to do any more complicated bookkeeping.
+ const size_t aligned_bytes = PaddedSize(bytes, Alignment());
+ if (parent_ != nullptr) {
+ return parent_->InsertBytes(insertion_point, aligned_bytes, set_zero);
+ } else {
+ std::optional<std::span<uint8_t>> new_buffer =
+ CHECK_NOTNULL(allocator_)
+ ->InsertBytes(insertion_point, aligned_bytes, Alignment(),
+ set_zero);
+ if (!new_buffer.has_value()) {
+ return false;
+ }
+ UpdateBuffer(new_buffer.value(),
+ new_buffer.value().data() +
+ (reinterpret_cast<const uint8_t *>(insertion_point) -
+ buffer_.data()),
+ aligned_bytes);
+ return true;
+ }
+}
+
+void ResizeableObject::UpdateBuffer(std::span<uint8_t> new_buffer,
+ void *modification_point,
+ ssize_t bytes_inserted) {
+ buffer_ = new_buffer;
+ FixObjects(modification_point, bytes_inserted);
+ ObserveBufferModification();
+}
+
+std::span<uint8_t> ResizeableObject::BufferForObject(
+ size_t absolute_offset, size_t size, size_t terminal_alignment) {
+ const size_t padded_size = PaddedSize(size, terminal_alignment);
+ std::span<uint8_t> padded_buffer =
+ internal::GetSubSpan(buffer_, absolute_offset, padded_size);
+ std::span<uint8_t> object_buffer =
+ internal::GetSubSpan(padded_buffer, 0, size);
+ std::span<uint8_t> padding = internal::GetSubSpan(padded_buffer, size);
+ internal::ClearSpan(padding);
+ return object_buffer;
+}
+
+void ResizeableObject::FixObjects(void *modification_point,
+ ssize_t bytes_inserted) {
+ CHECK_EQ(bytes_inserted % Alignment(), 0u)
+ << ": We only support inserting N * Alignment() bytes at a time. This "
+ "may change in the future.";
+ for (size_t index = 0; index < NumberOfSubObjects(); ++index) {
+ SubObject object = GetSubObject(index);
+ const void *const absolute_offset =
+ PointerForAbsoluteOffset(*object.absolute_offset);
+ if (absolute_offset >= modification_point &&
+ object.inline_entry < modification_point) {
+ if (*object.inline_entry != 0) {
+ CHECK_EQ(static_cast<const void *>(
+ static_cast<const uint8_t *>(absolute_offset) +
+ CHECK_NOTNULL(object.object)->AbsoluteOffsetOffset()),
+ DereferenceOffset(object.inline_entry));
+ *object.inline_entry += bytes_inserted;
+ CHECK_GE(DereferenceOffset(object.inline_entry), modification_point)
+ << ": Encountered offset which points to a now-deleted section "
+ "of memory. The offset should have been null'd out prior to "
+ "deleting the memory.";
+ } else {
+ CHECK_EQ(nullptr, object.object);
+ }
+ *object.absolute_offset += bytes_inserted;
+ }
+ // We only need to update the object's buffer if it currently exists.
+ if (object.object != nullptr) {
+ std::span<uint8_t> subbuffer = BufferForObject(
+ *object.absolute_offset, object.object->buffer_.size(),
+ object.object->Alignment());
+ // By convention (enforced in InsertBytes()), the modification_point shall
+ // not be at the start of the subobjects data buffer; it may be the byte
+ // just past the end of the buffer. This makes it so that is unambiguous
+ // which subobject(s) should get the extra space when a buffer size
+ // increase is requested on the edge of a buffer.
+ if (subbuffer.data() < modification_point &&
+ (subbuffer.data() + subbuffer.size()) >= modification_point) {
+ subbuffer = {subbuffer.data(), subbuffer.size() + bytes_inserted};
+ }
+ object.object->UpdateBuffer(subbuffer, modification_point,
+ bytes_inserted);
+ }
+ }
+}
+
+std::optional<std::span<uint8_t>> VectorAllocator::Allocate(
+ size_t size, size_t /*alignment*/, SetZero set_zero) {
+ CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate().";
+ buffer_.resize(size);
+ if (set_zero == SetZero::kYes) {
+ memset(buffer_.data(), 0, buffer_.size());
+ }
+ return std::span<uint8_t>{buffer_.data(), buffer_.size()};
+}
+
+std::optional<std::span<uint8_t>> VectorAllocator::InsertBytes(
+ void *insertion_point, size_t bytes, size_t /*alignment*/, SetZero) {
+ const ssize_t insertion_index =
+ reinterpret_cast<uint8_t *>(insertion_point) - buffer_.data();
+ CHECK_LE(0, insertion_index);
+ CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
+ buffer_.insert(buffer_.begin() + insertion_index, bytes, 0);
+ return std::span<uint8_t>{buffer_.data(), buffer_.size()};
+}
+
+std::span<uint8_t> VectorAllocator::RemoveBytes(
+ std::span<uint8_t> remove_bytes) {
+ const ssize_t removal_index = remove_bytes.data() - buffer_.data();
+ CHECK_LE(0, removal_index);
+ CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
+ CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
+ buffer_.erase(buffer_.begin() + removal_index,
+ buffer_.begin() + removal_index + remove_bytes.size());
+ return {buffer_.data(), buffer_.size()};
+}
+
+std::optional<std::span<uint8_t>> SpanAllocator::Allocate(size_t size,
+ size_t /*alignment*/,
+ SetZero set_zero) {
+ CHECK(!allocated_);
+ if (size > buffer_.size()) {
+ return std::nullopt;
+ }
+ if (set_zero == SetZero::kYes) {
+ memset(buffer_.data(), 0, buffer_.size());
+ }
+ allocated_size_ = size;
+ allocated_ = true;
+ return internal::GetSubSpan(buffer_, buffer_.size() - size);
+}
+
+std::optional<std::span<uint8_t>> SpanAllocator::InsertBytes(
+ void *insertion_point, size_t bytes, size_t /*alignment*/,
+ SetZero set_zero) {
+ uint8_t *insertion_point_typed = reinterpret_cast<uint8_t *>(insertion_point);
+ const ssize_t insertion_index = insertion_point_typed - buffer_.data();
+ CHECK_LE(0, insertion_index);
+ CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
+ const size_t new_size = allocated_size_ + bytes;
+ if (new_size > buffer_.size()) {
+ VLOG(1) << ": Insufficient space to grow by " << bytes << " bytes.";
+ return std::nullopt;
+ }
+ const size_t old_start_index = buffer_.size() - allocated_size_;
+ const size_t new_start_index = buffer_.size() - new_size;
+ memmove(buffer_.data() + new_start_index, buffer_.data() + old_start_index,
+ insertion_index - old_start_index);
+ if (set_zero == SetZero::kYes) {
+ memset(insertion_point_typed - bytes, 0, bytes);
+ }
+ allocated_size_ = new_size;
+ return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
+}
+
+std::span<uint8_t> SpanAllocator::RemoveBytes(std::span<uint8_t> remove_bytes) {
+ const ssize_t removal_index = remove_bytes.data() - buffer_.data();
+ const size_t old_start_index = buffer_.size() - allocated_size_;
+ CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
+ CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
+ CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
+ uint8_t *old_buffer_start = buffer_.data() + old_start_index;
+ memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
+ removal_index - old_start_index);
+ allocated_size_ -= remove_bytes.size();
+ return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
+}
+
+void SpanAllocator::Deallocate(std::span<uint8_t>) {
+ CHECK(allocated_) << ": Called Deallocate() without a prior allocation.";
+ allocated_ = false;
+}
+
+namespace internal {
+std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os) {
+ constexpr size_t kRowSize = 8u;
+ for (size_t index = 0; index < span.size(); index += kRowSize) {
+ os << std::hex << std::setw(4) << std::setfill('0') << std::uppercase
+ << index << ": ";
+ for (size_t subindex = 0;
+ subindex < kRowSize && (index + subindex) < span.size(); ++subindex) {
+ os << std::setw(2) << static_cast<int>(span[index + subindex]) << " ";
+ }
+ os << "\n";
+ }
+ os << std::resetiosflags(std::ios_base::basefield | std::ios_base::uppercase);
+ return os;
+}
+} // namespace internal
+} // namespace aos::fbs
diff --git a/aos/flatbuffers/base.h b/aos/flatbuffers/base.h
new file mode 100644
index 0000000..87e07c5
--- /dev/null
+++ b/aos/flatbuffers/base.h
@@ -0,0 +1,312 @@
+#ifndef AOS_FLATBUFFERS_BASE_H_
+#define AOS_FLATBUFFERS_BASE_H_
+#include <iomanip>
+#include <memory>
+#include <optional>
+#include <span>
+
+#include "flatbuffers/base.h"
+#include "glog/logging.h"
+namespace aos::fbs {
+using ::flatbuffers::soffset_t;
+using ::flatbuffers::uoffset_t;
+using ::flatbuffers::voffset_t;
+
+// Returns the smallest multiple of alignment that is greater than or equal to
+// size.
+constexpr size_t PaddedSize(size_t size, size_t alignment) {
+ // We can be clever with bitwise operations by assuming that aligment is a
+ // power of two. Or we can just be clearer about what we mean and eat a few
+ // integer divides.
+ return (((size - 1) / alignment) + 1) * alignment;
+}
+
+// Used as a parameter to methods where we are messing with memory and may or
+// may not want to clear it to zeroes.
+enum class SetZero { kYes, kNo };
+
+class Allocator;
+
+// Parent type of any object that may need to dynamically change size at
+// runtime. Used by the static table and vector types to request additional
+// blocks of memory when needed.
+//
+// The way that this works is that every ResizeableObject has some number of
+// children that are themselves ResizeableObject's and whose memory is entirely
+// contained within their parent's memory. A ResizeableObject without a parent
+// instead has an Allocator that it can use to allocate additional blocks
+// of memory. Whenever a child needs to grow in size, it will make a call to
+// InsertBytes() on its parent, which will percolate up until InsertBytes() gets
+// called on the root allocator. If the insert succeeds, then every single child
+// through the entire tree will get notified (this is because the allocator may
+// have shifted the entire memory buffer, so any pointers may need to be
+// updated). Child types will provide implementations of the GetObjects() method
+// to both allow tree traversal as well as to allow the various internal offsets
+// to be updated appropriately.
+class ResizeableObject {
+ public:
+ // Returns the underlying memory buffer into which the flatbuffer will be
+ // serialized.
+ std::span<uint8_t> buffer() { return buffer_; }
+ std::span<const uint8_t> buffer() const { return buffer_; }
+
+ // Updates the underlying memory buffer to new_buffer, with an indication of
+ // where bytes were inserted/removed from the buffer. It is assumed that
+ // new_buffer already has the state of the serialized flatbuffer
+ // copied into it.
+ // * When bytes have been inserted, modification_point will point to the first
+ // of the inserted bytes in new_buffer and bytes_inserted will be the number
+ // of new bytes.
+ // * Buffer shrinkage is not currently supported.
+ // * When bytes_inserted is zero, modification_point is ignored.
+ void UpdateBuffer(std::span<uint8_t> new_buffer, void *modification_point,
+ ssize_t bytes_inserted);
+
+ protected:
+ // Data associated with a sub-object of this object.
+ struct SubObject {
+ // A direct pointer to the inline entry in the flatbuffer table data. The
+ // pointer must be valid, but the entry itself may be zero if the object is
+ // not actually populated.
+ // If *inline_entry is non-zero, this will get updated if any new memory got
+ // added/removed in-between inline_entry and the actual data pointed to be
+ // inline_entry.
+ uoffset_t *inline_entry;
+ // The actual child object. Should be nullptr if *inline_entry is zero; must
+ // be valid if *inline_entry is non-zero.
+ ResizeableObject *object;
+ // The nominal offset from buffer_.data() to object->buffer_.data().
+ // Must be provided, and must always be valid, even if *inline_entry is
+ // zero.
+ // I.e., the following holds when object is not nullptr:
+ // SubObject object = parent.GetSubObject(index);
+ // CHECK_EQ(parent.buffer()->data() + *object.absolute_offset,
+ // object.object->buffer().data());
+ size_t *absolute_offset;
+ };
+
+ ResizeableObject(std::span<uint8_t> buffer, ResizeableObject *parent)
+ : buffer_(buffer), parent_(parent) {}
+ ResizeableObject(std::span<uint8_t> buffer, Allocator *allocator)
+ : buffer_(buffer), allocator_(allocator) {}
+ ResizeableObject(std::span<uint8_t> buffer,
+ std::unique_ptr<Allocator> allocator)
+ : buffer_(buffer),
+ owned_allocator_(std::move(allocator)),
+ allocator_(owned_allocator_.get()) {}
+ ResizeableObject(const ResizeableObject &) = delete;
+ ResizeableObject &operator=(const ResizeableObject &) = delete;
+ // Users do not end up using the move constructor; however, it is needed to
+ // handle the fact that a ResizeableObject may be a member of an std::vector
+ // in the various generated types.
+ ResizeableObject(ResizeableObject &&other)
+ : buffer_(other.buffer_),
+ owned_allocator_(std::move(other.owned_allocator_)),
+ allocator_(owned_allocator_.get()) {
+ other.buffer_ = {};
+ other.allocator_ = nullptr;
+ }
+ // Required alignment of this object.
+ virtual size_t Alignment() const = 0;
+ // Offset from the start of buffer() to the actual start of the object in
+ // question (this is important for vectors, where the vector itself cannot
+ // have internal padding, and so the start of the vector may be offset from
+ // the start of the buffer to handle alignment).
+ virtual size_t AbsoluteOffsetOffset() const = 0;
+ // Causes bytes bytes to be inserted between insertion_point - 1 and
+ // insertion_point.
+ // If requested, the new bytes will be cleared to zero; otherwise they will be
+ // left uninitialized.
+ // The insertion_point may not be equal to this->buffer_.data(); it may be a
+ // pointer just past the end of the buffer. This is to ease the
+ // implementation, and is merely a requirement that any buffer growth occur
+ // only on the inside or past the end of the vector, and not prior to the
+ // start of the vector.
+ // Returns true on success, false on failure (e.g., if the allocator has no
+ // memory available).
+ bool InsertBytes(void *insertion_point, size_t bytes, SetZero set_zero);
+ // Called *after* the internal buffer_ has been swapped out and *after* the
+ // object tree has been traversed and fixed.
+ virtual void ObserveBufferModification() {}
+
+ // Returns the index'th sub object of this object.
+ // index must be less than NumberOfSubObjects().
+ // This will include objects which are not currently populated but which may
+ // be populated in the future (so that we can track what the necessary offsets
+ // are when we do populate it).
+ virtual SubObject GetSubObject(size_t index) = 0;
+ // Number of sub-objects of this object. May be zero.
+ virtual size_t NumberOfSubObjects() const = 0;
+
+ // Treating the supplied absolute_offset as an offset into the internal memory
+ // buffer, return the pointer to the underlying memory.
+ const void *PointerForAbsoluteOffset(const size_t absolute_offset) {
+ return buffer_.data() + absolute_offset;
+ }
+ // Returns a span at the requested offset into the buffer. terminal_alignment
+ // does not align the start of the buffer; instead, it ensures that the memory
+ // from absolute_offset + size until the next multiple of terminal_alignment
+ // is set to all zeroes.
+ std::span<uint8_t> BufferForObject(size_t absolute_offset, size_t size,
+ size_t terminal_alignment);
+ // When memory has been inserted/removed, this iterates over the sub-objects
+ // and notifies/adjusts them appropriately.
+ // This will be called after buffer_ has been updated, and:
+ // * For insertion, modification_point will point into the new buffer_ to the
+ // first of the newly inserted bytes.
+ // * Removal is not entirely implemented yet, but for removal,
+ // modification_point should point to the first byte after the removed
+ // chunk.
+ void FixObjects(void *modification_point, ssize_t bytes_inserted);
+
+ Allocator *allocator() { return allocator_; }
+
+ std::span<uint8_t> buffer_;
+
+ private:
+ ResizeableObject *parent_ = nullptr;
+ std::unique_ptr<Allocator> owned_allocator_;
+ Allocator *allocator_ = nullptr;
+};
+
+// Interface to represent a memory allocator for use with ResizeableObject.
+class Allocator {
+ public:
+ virtual ~Allocator() {}
+ // Allocates memory of the requested size and alignment. alignment is not
+ // guaranteed.
+ // On failure to allocate the requested size, returns nullopt;
+ // Never returns a partial span.
+ // The span will be initialized to zero upon request.
+ // Once Allocate() has been called once, it may not be called again until
+ // Deallocate() has been called. In order to adjust the size of the buffer,
+ // call InsertBytes() and RemoveBytes().
+ [[nodiscard]] virtual std::optional<std::span<uint8_t>> Allocate(
+ size_t size, size_t alignment_hint, SetZero set_zero) = 0;
+ // Identical to Allocate(), but dies on failure.
+ [[nodiscard]] std::span<uint8_t> AllocateOrDie(size_t size,
+ size_t alignment_hint,
+ SetZero set_zero) {
+ std::optional<std::span<uint8_t>> span =
+ Allocate(size, alignment_hint, set_zero);
+ CHECK(span.has_value()) << ": Failed to allocate " << size << " bytes.";
+ CHECK_EQ(size, span.value().size())
+ << ": Failed to allocate " << size << " bytes.";
+ return span.value();
+ }
+ // Increases the size of the buffer by inserting bytes bytes immediately
+ // before insertion_point.
+ // alignment_hint specifies the alignment of the entire buffer, not of the
+ // inserted bytes.
+ // The returned span may or may not overlap with the original buffer in
+ // memory.
+ // The inserted bytes will be set to zero or uninitialized depending on the
+ // value of SetZero.
+ // insertion_point must be in (or 1 past the end of) the buffer.
+ // Returns nullopt on a failure to allocate the requested bytes.
+ [[nodiscard]] virtual std::optional<std::span<uint8_t>> InsertBytes(
+ void *insertion_point, size_t bytes, size_t alignment_hint,
+ SetZero set_zero) = 0;
+ // Removes the requested span of bytes from the buffer, returning the new
+ // buffer.
+ [[nodiscard]] virtual std::span<uint8_t> RemoveBytes(
+ std::span<uint8_t> remove_bytes) = 0;
+ // Deallocates the currently allocated buffer. The provided buffer must match
+ // the latest version of the buffer.
+ // If Allocate() has been called, Deallocate() must be called prior to
+ // destroying the Allocator.
+ virtual void Deallocate(std::span<uint8_t> buffer) = 0;
+};
+
+// Allocator that uses an std::vector to allow arbitrary-sized allocations.
+// Does not provide any alignment guarantees.
+class VectorAllocator : public Allocator {
+ public:
+ VectorAllocator() {}
+ ~VectorAllocator() {
+ CHECK(buffer_.empty())
+ << ": Must deallocate before destroying the VectorAllocator.";
+ }
+ std::optional<std::span<uint8_t>> Allocate(size_t size, size_t /*alignment*/,
+ SetZero set_zero) override;
+ std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
+ size_t bytes,
+ size_t /*alignment*/,
+ SetZero /*set_zero*/) override;
+ std::span<uint8_t> RemoveBytes(std::span<uint8_t> remove_bytes) override;
+
+ void Deallocate(std::span<uint8_t>) override {
+ CHECK(!buffer_.empty())
+ << ": Called Deallocate() without a prior allocation.";
+ buffer_.resize(0);
+ }
+
+ private:
+ std::vector<uint8_t> buffer_;
+};
+
+// Allocator that allocates all of its memory within a provided span. To match
+// the behavior of the FlatBufferBuilder, it will start its allocations at the
+// end of the provided span.
+//
+// Attempts to allocate more memory than is present in the provided buffer will
+// fail.
+class SpanAllocator : public Allocator {
+ public:
+ SpanAllocator(std::span<uint8_t> buffer) : buffer_(buffer) {}
+ ~SpanAllocator() {
+ CHECK(!allocated_)
+ << ": Must deallocate before destroying the SpanAllocator.";
+ }
+
+ std::optional<std::span<uint8_t>> Allocate(size_t size, size_t /*alignment*/,
+ SetZero set_zero) override;
+
+ std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
+ size_t bytes,
+ size_t /*alignment*/,
+ SetZero set_zero) override;
+
+ std::span<uint8_t> RemoveBytes(std::span<uint8_t> remove_bytes) override;
+
+ void Deallocate(std::span<uint8_t>) override;
+
+ private:
+ std::span<uint8_t> buffer_;
+ bool allocated_ = false;
+ size_t allocated_size_ = 0;
+};
+
+namespace internal {
+std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os);
+inline void ClearSpan(std::span<uint8_t> span) {
+ memset(span.data(), 0, span.size());
+}
+// std::span::subspan does not do bounds checking.
+template <typename T>
+inline std::span<T> GetSubSpan(std::span<T> span, size_t offset,
+ size_t count = std::dynamic_extent) {
+ if (count != std::dynamic_extent) {
+ CHECK_LE(offset + count, span.size());
+ }
+ return span.subspan(offset, count);
+}
+// Normal users should never move any of the special flatbuffer types that we
+// provide. However, they do need to be moveable in order to support the use of
+// resizeable vectors. As such, we make all the move constructors private and
+// friend the TableMover struct. The TableMover struct is then used in places
+// that need to have moveable objects. It should never be used by a user.
+template <typename T>
+struct TableMover {
+ TableMover(std::span<uint8_t> buffer, ResizeableObject *parent)
+ : t(buffer, parent) {}
+ TableMover(std::span<uint8_t> buffer, Allocator *allocator)
+ : t(buffer, allocator) {}
+ TableMover(std::span<uint8_t> buffer, ::std::unique_ptr<Allocator> allocator)
+ : t(buffer, ::std::move(allocator)) {}
+ TableMover(TableMover &&) = default;
+ T t;
+};
+} // namespace internal
+} // namespace aos::fbs
+#endif // AOS_FLATBUFFERS_BASE_H_
diff --git a/aos/flatbuffers/base_test.cc b/aos/flatbuffers/base_test.cc
new file mode 100644
index 0000000..522882d
--- /dev/null
+++ b/aos/flatbuffers/base_test.cc
@@ -0,0 +1,254 @@
+#include "aos/flatbuffers/base.h"
+
+#include "gtest/gtest.h"
+
+namespace aos::fbs::testing {
+// Tests that PaddedSize() behaves as expected.
+TEST(BaseTest, PaddedSize) {
+ EXPECT_EQ(0, PaddedSize(0, 4));
+ EXPECT_EQ(4, PaddedSize(4, 4));
+ EXPECT_EQ(8, PaddedSize(5, 4));
+ EXPECT_EQ(8, PaddedSize(6, 4));
+ EXPECT_EQ(8, PaddedSize(7, 4));
+}
+
+inline constexpr size_t kDefaultSize = 16;
+template <typename T>
+class AllocatorTest : public ::testing::Test {
+ protected:
+ AllocatorTest() : allocator_(std::make_unique<T>()) {}
+ std::vector<uint8_t> buffer_;
+ // unique_ptr so that we can destroy the allocator at will.
+ std::unique_ptr<T> allocator_;
+};
+
+template <>
+AllocatorTest<SpanAllocator>::AllocatorTest()
+ : buffer_(kDefaultSize),
+ allocator_(std::make_unique<SpanAllocator>(
+ std::span<uint8_t>{buffer_.data(), buffer_.size()})) {}
+
+using AllocatorTypes = ::testing::Types<SpanAllocator, VectorAllocator>;
+TYPED_TEST_SUITE(AllocatorTest, AllocatorTypes);
+
+// Tests that we can create and not use a VectorAllocator.
+TYPED_TEST(AllocatorTest, UnusedAllocator) {}
+
+// Tests that a simple allocate works.
+TYPED_TEST(AllocatorTest, BasicAllocate) {
+ std::span<uint8_t> span =
+ this->allocator_->Allocate(kDefaultSize, 4, SetZero::kYes).value();
+ ASSERT_EQ(kDefaultSize, span.size());
+ // We set SetZero::kYes; it should be zero-initialized.
+ EXPECT_EQ(kDefaultSize, std::count(span.begin(), span.end(), 0));
+ this->allocator_->Deallocate(span);
+}
+
+// Tests that we can insert bytes into an arbitrary spot in the buffer.
+TYPED_TEST(AllocatorTest, InsertBytes) {
+ const size_t half_size = kDefaultSize / 2;
+ std::span<uint8_t> span =
+ this->allocator_->Allocate(half_size, 4, SetZero::kYes).value();
+ ASSERT_EQ(half_size, span.size());
+ // Set the span with some sentinel values so that we can detect that the
+ // insertion occurred correctly.
+ for (size_t ii = 0; ii < span.size(); ++ii) {
+ span[ii] = ii + 1;
+ }
+
+ // Insert new bytes such that one old byte will still be at the start.
+ span = this->allocator_
+ ->InsertBytes(span.data() + 1u, half_size, 0, SetZero::kYes)
+ .value();
+ ASSERT_EQ(kDefaultSize, span.size());
+ size_t index = 0;
+ EXPECT_EQ(1u, span[index]);
+ index++;
+ for (; index < half_size + 1u; ++index) {
+ EXPECT_EQ(0u, span[index]);
+ }
+ for (; index < span.size(); ++index) {
+ EXPECT_EQ(index - half_size + 1, span[index]);
+ }
+ this->allocator_->Deallocate(span);
+}
+
+// Tests that we can remove bytes from an arbitrary spot in the buffer.
+TYPED_TEST(AllocatorTest, RemoveBytes) {
+ const size_t half_size = kDefaultSize / 2;
+ std::span<uint8_t> span =
+ this->allocator_->Allocate(kDefaultSize, 4, SetZero::kYes).value();
+ ASSERT_EQ(kDefaultSize, span.size());
+ // Set the span with some sentinel values so that we can detect that the
+ // removal occurred correctly.
+ for (size_t ii = 0; ii < span.size(); ++ii) {
+ span[ii] = ii + 1;
+ }
+
+ // Remove bytes such that one old byte will remain at the start, and a chunk
+ // of 8 bytes will be cut out after that..
+ span = this->allocator_->RemoveBytes(span.subspan(1, half_size));
+ ASSERT_EQ(half_size, span.size());
+ size_t index = 0;
+ EXPECT_EQ(1u, span[index]);
+ index++;
+ for (; index < span.size(); ++index) {
+ EXPECT_EQ(index + half_size + 1, span[index]);
+ }
+ this->allocator_->Deallocate(span);
+}
+
+// Tests that if we fail to deallocate that we fail during destruction.
+TYPED_TEST(AllocatorTest, NoDeallocate) {
+ EXPECT_DEATH(
+ {
+ EXPECT_EQ(
+ 4, this->allocator_->Allocate(4, 4, SetZero::kYes).value().size());
+ this->allocator_.reset();
+ },
+ "Must deallocate");
+}
+
+// Tests that if we never allocate that we cannot deallocate.
+TYPED_TEST(AllocatorTest, NoAllocateThenDeallocate) {
+ EXPECT_DEATH(this->allocator_->Deallocate(std::span<uint8_t>()),
+ "prior allocation");
+}
+
+// Tests that if we attempt to allocate more than the backing span allows that
+// we correctly return an empty span.
+TEST(SpanAllocatorTest, OverAllocate) {
+ std::vector<uint8_t> buffer(kDefaultSize);
+ SpanAllocator allocator({buffer.data(), buffer.size()});
+ EXPECT_FALSE(
+ allocator.Allocate(kDefaultSize + 1u, 0, SetZero::kYes).has_value());
+}
+
+// Tests that if we attempt to insert more than the backing span allows that
+// we correctly return an empty span.
+TEST(SpanAllocatorTest, OverInsert) {
+ std::vector<uint8_t> buffer(kDefaultSize);
+ SpanAllocator allocator({buffer.data(), buffer.size()});
+ std::span<uint8_t> span =
+ allocator.Allocate(kDefaultSize, 0, SetZero::kYes).value();
+ EXPECT_EQ(kDefaultSize, span.size());
+ EXPECT_FALSE(
+ allocator.InsertBytes(span.data(), 1u, 0, SetZero::kYes).has_value());
+ allocator.Deallocate(span);
+}
+
+// Because we really aren't meant to instantiate ResizeableObject's directly (if
+// nothing else it has virtual member functions), define a testing
+// implementation.
+
+class TestResizeableObject : public ResizeableObject {
+ public:
+ TestResizeableObject(std::span<uint8_t> buffer, ResizeableObject *parent)
+ : ResizeableObject(buffer, parent) {}
+ TestResizeableObject(std::span<uint8_t> buffer, Allocator *allocator)
+ : ResizeableObject(buffer, allocator) {}
+ virtual ~TestResizeableObject() {}
+ using ResizeableObject::SubObject;
+ bool InsertBytes(void *insertion_point, size_t bytes) {
+ return ResizeableObject::InsertBytes(insertion_point, bytes, SetZero::kYes);
+ }
+ TestResizeableObject(TestResizeableObject &&) = default;
+
+ struct TestObject {
+ uoffset_t inline_entry_offset;
+ std::unique_ptr<TestResizeableObject> object;
+ size_t absolute_offset;
+ };
+
+ // Adds a new object of the requested size.
+ void AddEntry(uoffset_t inline_entry_offset, size_t absolute_offset,
+ size_t buffer_size, bool set_object) {
+ *reinterpret_cast<uoffset_t *>(buffer_.data() + inline_entry_offset) =
+ set_object ? (absolute_offset - inline_entry_offset) : 0;
+ objects_.emplace_back(
+ TestObject{inline_entry_offset, nullptr, absolute_offset});
+ if (set_object) {
+ objects_.back().object = std::make_unique<TestResizeableObject>(
+ buffer().subspan(absolute_offset, buffer_size), this);
+ }
+ }
+
+ size_t NumberOfSubObjects() const override { return objects_.size(); }
+ SubObject GetSubObject(size_t index) override {
+ TestObject &subobject = objects_.at(index);
+ return {reinterpret_cast<uoffset_t *>(buffer_.data() +
+ subobject.inline_entry_offset),
+ subobject.object.get(), &subobject.absolute_offset};
+ }
+
+ TestObject &GetObject(size_t index) { return objects_.at(index); }
+
+ size_t Alignment() const override { return 64; }
+ size_t AbsoluteOffsetOffset() const override { return 0; }
+
+ private:
+ std::vector<TestObject> objects_;
+};
+
+class ResizeableObjectTest : public ::testing::Test {
+ protected:
+ static constexpr size_t kInitialSize = 128;
+ ResizeableObjectTest()
+ : object_(allocator_.Allocate(kInitialSize, 4, SetZero::kYes).value(),
+ &allocator_) {}
+ ~ResizeableObjectTest() { allocator_.Deallocate(object_.buffer()); }
+ VectorAllocator allocator_;
+ TestResizeableObject object_;
+};
+
+// Tests that if we created an object and then do nothing with it that nothing
+// untoward happens.
+TEST_F(ResizeableObjectTest, DoNothing) {}
+
+// Test that when we move the ResizeableObject we clear the reference to the old
+// buffer.
+TEST_F(ResizeableObjectTest, Move) {
+ TestResizeableObject target_object = std::move(object_);
+ ASSERT_EQ(0u, object_.buffer().size());
+ ASSERT_EQ(kInitialSize, target_object.buffer().size());
+}
+
+// Tests the pathways for resizing a nested ResizeableObject works.
+TEST_F(ResizeableObjectTest, ResizeNested) {
+ constexpr size_t kAbsoluteOffset = 64;
+ object_.AddEntry(4, kAbsoluteOffset, 64, true);
+ TestResizeableObject *subobject = object_.GetObject(0).object.get();
+ object_.AddEntry(0, kAbsoluteOffset, 64, false);
+ EXPECT_EQ(60, *object_.GetSubObject(0).inline_entry);
+ EXPECT_EQ(0, *object_.GetSubObject(1).inline_entry);
+ EXPECT_EQ(64, object_.GetObject(0).object->buffer().data() -
+ object_.buffer().data());
+
+ constexpr size_t kInsertBytes = 5;
+ // The insert should succeed.
+ ASSERT_TRUE(
+ subobject->InsertBytes(subobject->buffer().data() + 1u, kInsertBytes));
+ // We should now observe the size of the buffers increasing, but the start
+ // _not_ moving.
+ // We should've rounded the insert up to the alignment we areusing (64 bytes).
+ EXPECT_EQ(kInitialSize + 64, object_.buffer().size());
+ EXPECT_EQ(128, subobject->buffer().size());
+ EXPECT_EQ(60, *object_.GetSubObject(0).inline_entry);
+ EXPECT_EQ(0, *object_.GetSubObject(1).inline_entry);
+ EXPECT_EQ(kAbsoluteOffset, object_.GetObject(0).absolute_offset);
+ EXPECT_EQ(kAbsoluteOffset, object_.GetObject(1).absolute_offset);
+
+ // And next we insert before the subobjects, so that we can see their offsets
+ // shift. The insert should succeed.
+ ASSERT_TRUE(object_.InsertBytes(subobject->buffer().data(), kInsertBytes));
+ EXPECT_EQ(kInitialSize + 2 * 64, object_.buffer().size());
+ EXPECT_EQ(128, subobject->buffer().size());
+ EXPECT_EQ(60 + 64, *object_.GetSubObject(0).inline_entry);
+ // The unpopulated object's inline entry should not have changed since
+ // it was zero.
+ EXPECT_EQ(0, *object_.GetSubObject(1).inline_entry);
+ EXPECT_EQ(kAbsoluteOffset + 64, object_.GetObject(0).absolute_offset);
+ EXPECT_EQ(kAbsoluteOffset + 64, object_.GetObject(1).absolute_offset);
+}
+
+} // namespace aos::fbs::testing
diff --git a/aos/flatbuffers/builder.h b/aos/flatbuffers/builder.h
new file mode 100644
index 0000000..b556ed3
--- /dev/null
+++ b/aos/flatbuffers/builder.h
@@ -0,0 +1,120 @@
+#ifndef AOS_FLATBUFFERS_BUILDER_H_
+#define AOS_FLATBUFFERS_BUILDER_H_
+#include "aos/flatbuffers.h"
+#include "aos/flatbuffers/static_table.h"
+namespace aos::fbs {
+
+// Builder class to handle the memory for a static flatbuffer object. This
+// fulfills a similar role to the FlatBufferBuilder type in the traditional API.
+// Typical usage:
+// aos::fbs::VectorAllocator allocator;
+// Builder<TestTableStatic> builder(&allocator);
+// TestTableStatic *object = builder.get();
+// object->set_scalar(123);
+//
+// At all points you will have a valid and complete flatbuffer, so you never
+// need to call Finish() or anything. You can just directly use the flatbuffer
+// as if it is a real flatbuffer.
+template <typename T>
+class Builder final : public ResizeableObject {
+ public:
+ static constexpr size_t kBufferSize = T::kUnalignedBufferSize;
+ Builder(Allocator *allocator)
+ : ResizeableObject(
+ allocator->AllocateOrDie(kBufferSize, T::kAlign, SetZero::kNo),
+ allocator),
+ flatbuffer_start_(BufferStart(buffer_)),
+ flatbuffer_(internal::GetSubSpan(buffer_, flatbuffer_start_, T::kSize),
+ this) {
+ SetPrefix();
+ }
+ Builder(std::unique_ptr<Allocator> allocator)
+ : ResizeableObject(
+ allocator->AllocateOrDie(kBufferSize, T::kAlign, SetZero::kNo),
+ std::move(allocator)),
+ flatbuffer_start_(BufferStart(buffer_)),
+ flatbuffer_(internal::GetSubSpan(buffer_, flatbuffer_start_, T::kSize),
+ this) {
+ SetPrefix();
+ }
+ Builder(Builder &&other)
+ : ResizeableObject(std::move(other)),
+ flatbuffer_(std::move(other.flatbuffer_)) {
+ flatbuffer_start_ = other.flatbuffer_start_;
+ other.flatbuffer_start_ = 0;
+ }
+
+ ~Builder() {
+ if (allocator() != nullptr) {
+ allocator()->Deallocate(buffer_);
+ }
+ }
+
+ // Returns an object containing the current raw flatbuffer type. Note that if
+ // the allocator type allows changes to the structure/amount of allocated
+ // memory, the underlying buffer will not be stable and so the returned
+ // FlatbufferSpan may be invalidated by mutations to the flatbuffer.
+ FlatbufferSpan<typename T::Flatbuffer> AsFlatbufferSpan() {
+ return {buffer()};
+ }
+
+ // Returns true if the flatbuffer is validly constructed. Should always return
+ // true (barring some sort of memory corruption). Exposed for convenience.
+ bool Verify() { return AsFlatbufferSpan().Verify(); }
+
+ // Returns the actual object for you to operate on and construct the
+ // flatbuffer. Unlike AsFlatbufferSpan(), this will be stable.
+ T *get() { return &flatbuffer_.t; }
+
+ private:
+ size_t Alignment() const override { return flatbuffer_.t.Alignment(); }
+ size_t AbsoluteOffsetOffset() const override { return 0; }
+ size_t NumberOfSubObjects() const override { return 1; }
+ void SetPrefix() {
+ // We can't do much if the provided buffer isn't at least 4-byte aligned,
+ // because we are required to put the root table offset at the start of the
+ // buffer.
+ CHECK_EQ(reinterpret_cast<size_t>(buffer_.data()) % alignof(uoffset_t), 0u);
+ *reinterpret_cast<uoffset_t *>(buffer_.data()) = flatbuffer_start_;
+ }
+ // Because the allocator API doesn't provide a way for us to request a
+ // strictly aligned buffer, manually align the start of the actual flatbuffer
+ // data if needed.
+ static size_t BufferStart(std::span<uint8_t> buffer) {
+ return aos::fbs::PaddedSize(
+ reinterpret_cast<size_t>(buffer.data()) + sizeof(uoffset_t),
+ T::kAlign) -
+ reinterpret_cast<size_t>(buffer.data());
+ }
+
+ // Some allocators don't do a great job of supporting arbitrary alignments; if
+ // the alignment of the buffer changes, we need to reshuffle everything to
+ // continue guaranteeing alignment.
+ void ObserveBufferModification() override {
+ const size_t new_start = BufferStart(buffer_);
+ if (new_start != flatbuffer_start_) {
+ const size_t used_size = flatbuffer_.t.buffer().size();
+ CHECK_LT(flatbuffer_start_ + used_size, buffer_.size());
+ CHECK_LT(new_start + used_size, buffer_.size());
+ memmove(buffer_.data() + new_start, buffer_.data() + flatbuffer_start_,
+ used_size);
+ flatbuffer_.t.UpdateBuffer(
+ internal::GetSubSpan(buffer_, new_start, used_size),
+ buffer_.data() + new_start, 0);
+ flatbuffer_start_ = new_start;
+ SetPrefix();
+ }
+ }
+ using ResizeableObject::SubObject;
+ SubObject GetSubObject(size_t index) override {
+ CHECK_EQ(0u, index);
+ return {reinterpret_cast<uoffset_t *>(buffer_.data()), &flatbuffer_.t,
+ &flatbuffer_start_};
+ }
+ // Offset from the start of the buffer to the actual start of the flatbuffer
+ // (identical to the root offset of the flatbuffer).
+ size_t flatbuffer_start_;
+ internal::TableMover<T> flatbuffer_;
+};
+} // namespace aos::fbs
+#endif // AOS_FLATBUFFERS_BUILDER_H_
diff --git a/aos/flatbuffers/generate.bzl b/aos/flatbuffers/generate.bzl
new file mode 100644
index 0000000..ad428c3
--- /dev/null
+++ b/aos/flatbuffers/generate.bzl
@@ -0,0 +1,52 @@
+load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("@aspect_bazel_lib//lib:run_binary.bzl", "run_binary")
+
+def static_flatbuffer(name, src, visibility = None, deps = [], **kwargs):
+ """Generates the code for the static C++ flatbuffer API for the specified fbs file.
+
+ Generates a cc_library of name name that can be depended on by C++ code and other
+ static_flatbuffer rules.
+
+ The cc_library will consist of a single file suffixed with _static.h and prefixed
+ with the name of the flatbuffer file itself (i.e., if you have a src of foo.fbs, then
+ the resulting header will be foo_static.h).
+
+ Args:
+ name: Target name.
+ src: .fbs file to generated code for.
+ visibility: Desired rule visibility.
+ deps: List of static_flatbuffer dependencies of this rule.
+ """
+ fbs_suffix = "_fbs"
+ flatbuffer_cc_library(
+ name = name + fbs_suffix,
+ srcs = [src],
+ deps = [dep + fbs_suffix for dep in deps],
+ gen_reflections = True,
+ visibility = visibility,
+ **kwargs
+ )
+
+ # Until we make this a proper rule with providers or the such, we just manage headers
+ # by having a strong convention where the header will be a function of the fbs name
+ # rather than a function of the rule name.
+ header_name = src.removesuffix(".fbs") + "_static.h"
+ reflection_out = name + fbs_suffix + "_reflection_out"
+
+ run_binary(
+ name = name + "_gen",
+ tool = "@org_frc971//aos/flatbuffers:generate_wrapper",
+ srcs = [reflection_out],
+ outs = [header_name],
+ args = ["$(execpath %s)" % (reflection_out,), "$(execpath %s)" % (header_name,)],
+ )
+ native.cc_library(
+ name = name,
+ hdrs = [header_name],
+ deps = ["@org_frc971//aos/flatbuffers:static_table", "@org_frc971//aos/flatbuffers:static_vector", name + fbs_suffix] + deps,
+ visibility = visibility,
+ )
+ native.alias(
+ name = name + "_reflection_out",
+ actual = name + fbs_suffix + "_reflection_out",
+ )
diff --git a/aos/flatbuffers/generate.cc b/aos/flatbuffers/generate.cc
new file mode 100644
index 0000000..2e081f3
--- /dev/null
+++ b/aos/flatbuffers/generate.cc
@@ -0,0 +1,24 @@
+#include "flatbuffers/reflection_generated.h"
+
+#include "aos/flatbuffers.h"
+#include "aos/flatbuffers/static_flatbuffers.h"
+#include "aos/init.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/util/file.h"
+
+DEFINE_string(reflection_bfbs, "", "Path to the .bfbs reflection file.");
+DEFINE_string(output_file, "", "Path to the output header to write.");
+
+namespace aos::fbs {
+int Main() {
+ aos::FlatbufferVector<reflection::Schema> schema =
+ aos::FileToFlatbuffer<reflection::Schema>(FLAGS_reflection_bfbs);
+ aos::util::WriteStringToFileOrDie(
+ FLAGS_output_file, GenerateCodeForRootTableFile(&schema.message()));
+ return EXIT_SUCCESS;
+}
+} // namespace aos::fbs
+int main(int argc, char *argv[]) {
+ aos::InitGoogle(&argc, &argv);
+ return aos::fbs::Main();
+}
diff --git a/aos/flatbuffers/generate.sh b/aos/flatbuffers/generate.sh
new file mode 100755
index 0000000..aecab26
--- /dev/null
+++ b/aos/flatbuffers/generate.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Wrapper script to handle codegen for the static flatbuffer API. The actual
+# work is done in calling the generate C++ script, but we also clang-format
+# the output headers so that they are not completely unintelligible.
+
+# --- begin runfiles.bash initialization v2 ---
+# Copy-pasted from the Bazel Bash runfiles library v2.
+set -uo pipefail; f=bazel_tools/tools/bash/runfiles/runfiles.bash
+source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
+ source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
+ source "$0.runfiles/$f" 2>/dev/null || \
+ source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
+ source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
+ { echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
+# --- end runfiles.bash initialization v2 ---
+
+BFBS_FILE=$1
+OUT_FILE=$2
+
+$(rlocation org_frc971/aos/flatbuffers/generate) --reflection_bfbs "${BFBS_FILE}" --output_file "${OUT_FILE}"
+$(rlocation llvm_k8/bin/clang-format) --style=file:"$(rlocation org_frc971/.clang-format)" -i "${OUT_FILE}"
diff --git a/aos/flatbuffers/interesting_schemas.fbs b/aos/flatbuffers/interesting_schemas.fbs
new file mode 100644
index 0000000..99c64b6
--- /dev/null
+++ b/aos/flatbuffers/interesting_schemas.fbs
@@ -0,0 +1,44 @@
+namespace aos.fbs.testing;
+
+attribute "static_length";
+
+table FirstUnion {
+ foo:int (id: 0);
+}
+
+table SecondUnion {
+ bar:double (id: 0);
+}
+
+union UnionType { FirstUnion, SecondUnion }
+
+table TableWithUnion {
+ union_field:UnionType (id: 1);
+}
+
+table MissingVectorLength {
+ vector_no_length:[int] (id: 0);
+}
+
+table NonIntegerVectorLength {
+ vector_badlength:[int] (id: 0, static_length: "abc");
+}
+
+table NegativeVectorLength {
+ vector_badlength:[int] (id: 0, static_length: -1);
+}
+
+table ZeroVectorLength {
+ vector_badlength:[int] (id: 0, static_length: 0);
+}
+
+table MissingStringLength {
+ string_no_length:string (id: 0);
+}
+
+table MissingSubStringLength {
+ substring_no_length:[string] (id: 0, static_length: 3);
+}
+
+// Arbitrary.
+root_type TableWithUnion;
diff --git a/aos/flatbuffers/static_flatbuffers.cc b/aos/flatbuffers/static_flatbuffers.cc
new file mode 100644
index 0000000..502e33e
--- /dev/null
+++ b/aos/flatbuffers/static_flatbuffers.cc
@@ -0,0 +1,826 @@
+#include "aos/flatbuffers/static_flatbuffers.h"
+
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/str_join.h"
+#include "absl/strings/str_replace.h"
+#include "glog/logging.h"
+
+#include "aos/flatbuffers/static_table.h"
+#include "aos/json_to_flatbuffer.h"
+namespace aos::fbs {
+namespace {
+// Represents a given field within a type with all of the data that we actually
+// care about.
+struct FieldData {
+ // Field name.
+ std::string name;
+ // Whether it is an inline data type (scalar/struct vs vector/table/string).
+ bool is_inline = true;
+ // Whether this is a struct or not.
+ bool is_struct = false;
+ // Full C++ type of this field.
+ std::string full_type = "";
+ // Full flatbuffer type for this field.
+ // Only specified for Tables.
+ std::optional<std::string> fbs_type = std::nullopt;
+ // Size of this field in the inline field data (i.e., size of the field for
+ // is_inline fields; 4 bytes for the offset for vectors/tables/strings).
+ size_t inline_size = 0u;
+ // Alignment of the inline data.
+ size_t inline_alignment = 0u;
+ // vtable offset of the field.
+ size_t vtable_offset = 0u;
+};
+
+const reflection::Object *GetObject(const reflection::Schema *schema,
+ const int index) {
+ return (index == -1) ? schema->root_table() : schema->objects()->Get(index);
+}
+
+// Returns the flatbuffer field attribute with the specified name, if available.
+std::optional<std::string_view> GetAttribute(const reflection::Field *field,
+ std::string_view attribute) {
+ if (!field->has_attributes()) {
+ return std::nullopt;
+ }
+ const reflection::KeyValue *kv =
+ field->attributes()->LookupByKey(attribute.data());
+ if (kv == nullptr) {
+ return std::nullopt;
+ }
+ return kv->value()->string_view();
+}
+
+// Returns the implied value of an attribute that specifies a length (i.e., 0 if
+// the attribute is not specified; the integer value otherwise).
+int64_t GetLengthAttributeOrZero(const reflection::Field *field,
+ std::string_view attribute) {
+ std::optional<std::string_view> str = GetAttribute(field, attribute);
+ if (!str.has_value()) {
+ return 0;
+ }
+ int64_t value;
+ CHECK(absl::SimpleAtoi(str.value(), &value))
+ << ": Field " << field->name()->string_view()
+ << " must specify a positive integer for the " << attribute
+ << " attribute. Got \"" << str.value() << "\".";
+ CHECK_LE(0, value) << ": Field " << field->name()->string_view()
+ << " must have a non-negative " << attribute << ".";
+ return value;
+}
+
+const std::string ScalarCppType(const reflection::BaseType type) {
+ switch (type) {
+ case reflection::BaseType::Bool:
+ return "bool";
+ case reflection::BaseType::Byte:
+ return "int8_t";
+ case reflection::BaseType::UByte:
+ return "uint8_t";
+ case reflection::BaseType::Short:
+ return "int16_t";
+ case reflection::BaseType::UShort:
+ return "uint16_t";
+ case reflection::BaseType::Int:
+ return "int32_t";
+ case reflection::BaseType::UInt:
+ return "uint32_t";
+ case reflection::BaseType::Long:
+ return "int64_t";
+ case reflection::BaseType::ULong:
+ return "uint64_t";
+ case reflection::BaseType::Float:
+ return "float";
+ case reflection::BaseType::Double:
+ return "double";
+ case reflection::BaseType::UType:
+ case reflection::BaseType::String:
+ case reflection::BaseType::Vector:
+ case reflection::BaseType::Obj:
+ case reflection::BaseType::None:
+ case reflection::BaseType::Union:
+ case reflection::BaseType::Array:
+ case reflection::BaseType::MaxBaseType:
+ LOG(FATAL) << ": Type " << reflection::EnumNameBaseType(type)
+ << " not a scalar.";
+ }
+ LOG(FATAL) << "Unreachable";
+}
+
+const std::string FlatbufferNameToCppName(const std::string_view input) {
+ return absl::StrReplaceAll(input, {{".", "::"}});
+}
+
+const std::string AosNameForRawFlatbuffer(const std::string_view base_name) {
+ return absl::StrCat(base_name, "Static");
+}
+
+const std::string IncludePathForFbs(
+ std::string_view fbs_file, std::string_view include_suffix = "static") {
+ fbs_file.remove_suffix(4);
+ return absl::StrCat(fbs_file, "_", include_suffix, ".h");
+}
+
+std::string ScalarOrEnumType(const reflection::Schema *schema,
+ const reflection::BaseType type, int index) {
+ return (index < 0) ? ScalarCppType(type)
+ : FlatbufferNameToCppName(
+ schema->enums()->Get(index)->name()->string_view());
+}
+
+void PopulateTypeData(const reflection::Schema *schema,
+ const reflection::Field *field_fbs, FieldData *field) {
+ VLOG(1) << aos::FlatbufferToJson(field_fbs);
+ const reflection::Type *type = field_fbs->type();
+ field->inline_size = type->base_size();
+ field->inline_alignment = type->base_size();
+ switch (type->base_type()) {
+ case reflection::BaseType::Bool:
+ case reflection::BaseType::Byte:
+ case reflection::BaseType::UByte:
+ case reflection::BaseType::Short:
+ case reflection::BaseType::UShort:
+ case reflection::BaseType::Int:
+ case reflection::BaseType::UInt:
+ case reflection::BaseType::Long:
+ case reflection::BaseType::ULong:
+ case reflection::BaseType::Float:
+ case reflection::BaseType::Double:
+ // We have a scalar field, so things are relatively
+ // straightforwards.
+ field->is_inline = true;
+ field->is_struct = false;
+ field->full_type =
+ ScalarOrEnumType(schema, type->base_type(), type->index());
+ return;
+ case reflection::BaseType::String: {
+ field->is_inline = false;
+ field->is_struct = false;
+ field->full_type =
+ absl::StrFormat("::aos::fbs::String<%d>",
+ GetLengthAttributeOrZero(field_fbs, "static_length"));
+ return;
+ }
+ case reflection::BaseType::Vector: {
+ // We need to extract the name of the elements of the vector.
+ std::string element_type;
+ bool elements_are_inline = true;
+ if (type->base_type() == reflection::BaseType::Vector) {
+ switch (type->element()) {
+ case reflection::BaseType::Obj: {
+ const reflection::Object *element_object =
+ GetObject(schema, type->index());
+ element_type =
+ FlatbufferNameToCppName(element_object->name()->string_view());
+ elements_are_inline = element_object->is_struct();
+ if (!element_object->is_struct()) {
+ element_type = AosNameForRawFlatbuffer(element_type);
+ field->fbs_type = element_object->name()->string_view();
+ }
+ break;
+ }
+ case reflection::BaseType::String:
+ element_type =
+ absl::StrFormat("::aos::fbs::String<%d>",
+ GetLengthAttributeOrZero(
+ field_fbs, "static_vector_string_length"));
+ elements_are_inline = false;
+ break;
+ case reflection::BaseType::Vector:
+ LOG(FATAL) << "Vectors of vectors do not exist in flatbuffers.";
+ default:
+ element_type =
+ ScalarOrEnumType(schema, type->element(), type->index());
+ };
+ }
+ field->is_inline = false;
+ field->is_struct = false;
+ field->full_type =
+ absl::StrFormat("::aos::fbs::Vector<%s, %d, %s, %s>", element_type,
+ GetLengthAttributeOrZero(field_fbs, "static_length"),
+ elements_are_inline ? "true" : "false",
+ GetAttribute(field_fbs, "force_align").value_or("0"));
+ return;
+ }
+ case reflection::BaseType::Obj: {
+ const reflection::Object *object = GetObject(schema, type->index());
+ field->is_inline = object->is_struct();
+ field->is_struct = object->is_struct();
+ const std::string flatbuffer_name =
+ FlatbufferNameToCppName(object->name()->string_view());
+ if (field->is_inline) {
+ field->full_type = flatbuffer_name;
+ field->inline_size = object->bytesize();
+ field->inline_alignment = object->minalign();
+ } else {
+ field->fbs_type = object->name()->string_view();
+ field->full_type = AosNameForRawFlatbuffer(flatbuffer_name);
+ }
+ return;
+ }
+ case reflection::BaseType::None:
+ case reflection::BaseType::UType:
+ case reflection::BaseType::Union:
+ case reflection::BaseType::Array:
+ case reflection::BaseType::MaxBaseType:
+ LOG(FATAL) << ": Type " << reflection::EnumNameBaseType(type->base_type())
+ << " not supported currently.";
+ };
+}
+
+std::string MakeMoveConstructor(std::string_view type_name) {
+ return absl::StrFormat(R"code(
+ // We need to provide a MoveConstructor to allow this table to be
+ // used inside of vectors, but we do not want it readily available to
+ // users. See TableMover for more details.
+ %s(%s &&) = default;
+ friend struct ::aos::fbs::internal::TableMover<%s>;
+ )code",
+ type_name, type_name, type_name);
+}
+
+std::string MakeConstructor(std::string_view type_name) {
+ const std::string constructor_body =
+ R"code(
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+)code";
+ return absl::StrFormat(R"code(
+ // Constructors for creating a flatbuffer object.
+ // Users should typically use the Builder class to create these objects,
+ // in order to allow it to populate the root table offset.
+
+ // The buffer provided to these constructors should be aligned to kAlign
+ // and kSize in length.
+ // The parent/allocator may not be nullptr.
+ %s(std::span<uint8_t> buffer, ::aos::fbs::ResizeableObject *parent) : Table(buffer, parent) {
+ %s
+ }
+ %s(std::span<uint8_t> buffer, ::aos::fbs::Allocator *allocator) : Table(buffer, allocator) {
+ %s
+ }
+ %s(std::span<uint8_t> buffer, ::std::unique_ptr<::aos::fbs::Allocator> allocator) : Table(buffer, ::std::move(allocator)) {
+ %s
+ }
+)code",
+ type_name, constructor_body, type_name,
+ constructor_body, type_name, constructor_body);
+}
+
+std::string MemberName(const FieldData &data) {
+ return absl::StrCat(data.name, "_");
+}
+
+std::string ObjectAbsoluteOffsetName(const FieldData &data) {
+ return absl::StrCat("object_absolute_offset_", data.name);
+}
+
+std::string InlineAbsoluteOffsetName(const FieldData &data) {
+ return absl::StrCat("kInlineAbsoluteOffset_", data.name);
+}
+
+// Generate the clear_* method for the requested field.
+std::string MakeClearer(const FieldData &field) {
+ std::string logical_clearer;
+ if (!field.is_inline) {
+ logical_clearer = MemberName(field) + ".reset();";
+ }
+ return absl::StrFormat(R"code(
+ // Clears the %s field. This will cause has_%s() to return false.
+ void clear_%s() {
+ %s
+ ClearField(%s, %d, %d);
+ }
+ )code",
+ field.name, field.name, field.name, logical_clearer,
+ InlineAbsoluteOffsetName(field), field.inline_size,
+ field.vtable_offset);
+}
+
+// Generate the has_* method for the requested field.
+std::string MakeHaser(const FieldData &field) {
+ return absl::StrFormat(R"code(
+ // Returns true if the %s field is set and can be accessed.
+ bool has_%s() const {
+ return AsFlatbuffer().has_%s();
+ }
+ )code",
+ field.name, field.name, field.name);
+}
+
+// Generates the accessors for fields which are stored inline in the flatbuffer
+// table (scalars, structs, and enums) .
+std::string MakeInlineAccessors(const FieldData &field,
+ const size_t inline_absolute_offset) {
+ CHECK_EQ(inline_absolute_offset % field.inline_alignment, 0u)
+ << ": Unaligned field " << field.name << " on " << field.full_type
+ << " with inline offset of " << inline_absolute_offset
+ << " and alignment of " << field.inline_alignment;
+ const std::string setter =
+ absl::StrFormat(R"code(
+ // Sets the %s field, causing it to be populated if it is not already.
+ // This will populate the field even if the specified value is the default.
+ void set_%s(const %s &value) {
+ SetField<%s>(%s, %d, value);
+ }
+ )code",
+ field.name, field.name, field.full_type, field.full_type,
+ InlineAbsoluteOffsetName(field), field.vtable_offset);
+ const std::string getters = absl::StrFormat(
+ R"code(
+ // Returns the value of %s if set; nullopt otherwise.
+ std::optional<%s> %s() const {
+ return has_%s() ? std::make_optional(Get<%s>(%s)) : std::nullopt;;
+ }
+ // Returns a pointer to modify the %s field.
+ // The pointer may be invalidated by mutations/movements of the underlying buffer.
+ // Returns nullptr if the field is not set.
+ %s* mutable_%s() {
+ return has_%s() ? MutableGet<%s>(%s) : nullptr;
+ }
+ )code",
+ field.name, field.full_type, field.name, field.name, field.full_type,
+ InlineAbsoluteOffsetName(field), field.name, field.full_type, field.name,
+ field.name, field.full_type, InlineAbsoluteOffsetName(field));
+ const std::string clearer = MakeClearer(field);
+ return setter + getters + clearer + MakeHaser(field);
+}
+
+// Generates the accessors for fields which are not inline fields and have an
+// offset to the actual field content stored inline in the flatbuffer table.
+std::string MakeOffsetDataAccessors(const FieldData &field) {
+ const std::string setter = absl::StrFormat(
+ R"code(
+ // Creates an empty object for the %s field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ %s* add_%s() {
+ CHECK(!%s.has_value());
+ constexpr size_t kVtableIndex = %d;
+ // Construct the *Static object that we will use for managing this subtable.
+ %s.emplace(BufferForObject(%s, %s::kSize, kAlign), this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(%s, kVtableIndex, %s + %s::kOffset - %s);
+ return &%s.value().t;
+ }
+ )code",
+ field.name, field.full_type, field.name, MemberName(field),
+ field.vtable_offset, MemberName(field), ObjectAbsoluteOffsetName(field),
+ field.full_type, InlineAbsoluteOffsetName(field),
+ ObjectAbsoluteOffsetName(field), field.full_type,
+ InlineAbsoluteOffsetName(field), MemberName(field));
+ const std::string getters = absl::StrFormat(
+ R"code(
+ // Returns a pointer to the %s field, if set. nullptr otherwise.
+ const %s* %s() const {
+ return %s.has_value() ? &%s.value().t : nullptr;
+ }
+ %s* mutable_%s() {
+ return %s.has_value() ? &%s.value().t : nullptr;
+ }
+ )code",
+ field.name, field.full_type, field.name, MemberName(field),
+ MemberName(field), field.full_type, field.name, MemberName(field),
+ MemberName(field));
+ return setter + getters + MakeClearer(field) + MakeHaser(field);
+}
+
+std::string MakeAccessors(const FieldData &field,
+ size_t inline_absolute_offset) {
+ return field.is_inline ? MakeInlineAccessors(field, inline_absolute_offset)
+ : MakeOffsetDataAccessors(field);
+}
+
+std::string MakeMembers(const FieldData &field,
+ std::string_view offset_data_absolute_offset,
+ size_t inline_absolute_offset) {
+ if (field.is_inline) {
+ return absl::StrFormat(
+ R"code(
+ // Offset from the start of the buffer to the inline data for the %s field.
+ static constexpr size_t %s = %d;
+ )code",
+ field.name, InlineAbsoluteOffsetName(field), inline_absolute_offset);
+ } else {
+ return absl::StrFormat(
+ R"code(
+ // Members relating to the %s field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<%s>> %s;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t %s = %s;
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t %s = %d;
+ )code",
+ field.name, field.full_type, MemberName(field),
+ ObjectAbsoluteOffsetName(field), offset_data_absolute_offset,
+ InlineAbsoluteOffsetName(field), inline_absolute_offset);
+ }
+}
+
+std::string MakeFullClearer(const std::vector<FieldData> &fields) {
+ std::vector<std::string> clearers;
+ for (const FieldData &field : fields) {
+ clearers.emplace_back(absl::StrFormat("clear_%s();", field.name));
+ }
+ return absl::StrFormat(R"code(
+ // Clears every field of the table, removing any existing state.
+ void Clear() { %s })code",
+ absl::StrJoin(clearers, "\n"));
+}
+
+std::string MakeCopier(const std::vector<FieldData> &fields) {
+ std::vector<std::string> copiers;
+ for (const FieldData &field : fields) {
+ if (field.is_struct) {
+ copiers.emplace_back(absl::StrFormat(R"code(
+ if (other->has_%s()) {
+ set_%s(*other->%s());
+ }
+ )code",
+ field.name, field.name, field.name));
+ } else if (field.is_inline) {
+ copiers.emplace_back(absl::StrFormat(R"code(
+ if (other->has_%s()) {
+ set_%s(other->%s());
+ }
+ )code",
+ field.name, field.name, field.name));
+ } else {
+ copiers.emplace_back(absl::StrFormat(R"code(
+ if (other->has_%s()) {
+ if (!CHECK_NOTNULL(add_%s())->FromFlatbuffer(other->%s())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+ )code",
+ field.name, field.name, field.name));
+ }
+ }
+ return absl::StrFormat(
+ R"code(
+ // Copies the contents of the provided flatbuffer into this flatbuffer,
+ // returning true on success.
+ [[nodiscard]] bool FromFlatbuffer(const Flatbuffer *other) {
+ Clear();
+ %s
+ return true;
+ }
+)code",
+ absl::StrJoin(copiers, "\n"));
+}
+
+std::string MakeSubObjectList(const std::vector<FieldData> &fields) {
+ size_t num_object_fields = 0;
+ std::vector<std::string> object_offsets;
+ std::vector<std::string> objects;
+ std::vector<std::string> inline_offsets;
+ for (const FieldData &field : fields) {
+ if (!field.is_inline) {
+ ++num_object_fields;
+ object_offsets.push_back(
+ absl::StrFormat("&%s", ObjectAbsoluteOffsetName(field)));
+ objects.push_back(absl::StrFormat("&%s->t", MemberName(field)));
+ inline_offsets.push_back(InlineAbsoluteOffsetName(field));
+ }
+ }
+ if (num_object_fields == 0) {
+ return R"code(
+ // This object has no non-inline subobjects, so we don't have to do anything special.
+ size_t NumberOfSubObjects() const final { return 0; }
+ using ::aos::fbs::ResizeableObject::SubObject;
+ SubObject GetSubObject(size_t) final { LOG(FATAL) << "No subobjects."; }
+ )code";
+ }
+ return absl::StrFormat(R"code(
+ size_t NumberOfSubObjects() const final { return %d; }
+ using ::aos::fbs::ResizeableObject::SubObject;
+ SubObject GetSubObject(size_t index) final {
+ SubObject object;
+ // Note: The below arrays are local variables rather than class members to
+ // avoid having to deal with what happens to them if the object is moved.
+
+ // Array of the members that we use for tracking where the buffers for
+ // each subobject belong.
+ // Pointers because these may need to be modified when memory is
+ // inserted into the buffer.
+ const std::array<size_t*, %d> subobject_object_offsets{%s};
+ // Actual subobjects; note that the pointers will be invalid when the
+ // field is not populated.
+ const std::array<::aos::fbs::ResizeableObject*, %d> subobject_objects{%s};
+ // Absolute offsets from the start of the buffer to where the inline
+ // entry is for each table. These offsets do not need to change at
+ // runtime (because memory is never inserted into the start of
+ // a given table), but the offsets pointed to by these offsets
+ // may need to be updated.
+ const std::array<size_t, %d> subobject_inline_offsets{%s};
+ object.inline_entry = MutableGet<::flatbuffers::uoffset_t>(subobject_inline_offsets[index]);
+ object.object = (*object.inline_entry == 0) ? nullptr : subobject_objects[index];
+ object.absolute_offset = subobject_object_offsets[index];
+ return object;
+ }
+ )code",
+ num_object_fields, num_object_fields,
+ absl::StrJoin(object_offsets, ", "), num_object_fields,
+ absl::StrJoin(objects, ", "), num_object_fields,
+ absl::StrJoin(inline_offsets, ", "));
+}
+
+std::string AlignCppString(const std::string_view expression,
+ const std::string_view alignment) {
+ return absl::StrFormat("::aos::fbs::PaddedSize(%s, %s)", expression,
+ alignment);
+}
+
+std::string MakeInclude(std::string_view path, bool system = false) {
+ return absl::StrFormat("#include %s%s%s\n", system ? "<" : "\"", path,
+ system ? ">" : "\"");
+}
+
+} // namespace
+GeneratedObject GenerateCodeForObject(const reflection::Schema *schema,
+ int object_index) {
+ return GenerateCodeForObject(schema, GetObject(schema, object_index));
+}
+GeneratedObject GenerateCodeForObject(const reflection::Schema *schema,
+ const reflection::Object *object) {
+ std::vector<FieldData> fields;
+ for (const reflection::Field *field_fbs : *object->fields()) {
+ if (field_fbs->deprecated()) {
+ // Don't codegen anything for deprecated fields.
+ continue;
+ }
+ FieldData field{.name = field_fbs->name()->str(),
+ .vtable_offset = field_fbs->offset()};
+ PopulateTypeData(schema, field_fbs, &field);
+ fields.push_back(field);
+ }
+ const size_t nominal_min_align = object->minalign();
+ std::string out_of_line_member_size = "";
+ // inline_absolute_offset tracks the current position of the inline table
+ // contents so that we can assign static offsets to each field.
+ size_t inline_absolute_offset = sizeof(soffset_t);
+ // offset_data_relative_offset tracks the current size of the various
+ // sub-tables/vectors/strings that get stored at the end of the buffer.
+ // For simplicity, the offset data will start at a fully aligned offset
+ // (which may be larger than the soffset_t at the start of the table).
+ // Note that this is a string because it's irritating to actually pipe the
+ // numbers for size/alignment up here, so we just accumulate them here and
+ // then write the expression directly into the C++.
+ std::string offset_data_relative_offset = "0";
+ const std::string offset_data_start_expression =
+ "::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign)";
+ std::string accessors;
+ std::string members;
+ std::set<std::string> includes = {
+ MakeInclude("optional", true),
+ MakeInclude("aos/flatbuffers/static_table.h"),
+ MakeInclude("aos/flatbuffers/static_vector.h")};
+ for (const reflection::SchemaFile *file : *schema->fbs_files()) {
+ includes.insert(
+ MakeInclude(IncludePathForFbs(file->filename()->string_view())));
+ includes.insert(MakeInclude(
+ IncludePathForFbs(file->filename()->string_view(), "generated")));
+ for (const flatbuffers::String *included : *file->included_filenames()) {
+ includes.insert(MakeInclude(IncludePathForFbs(included->string_view())));
+ }
+ }
+ std::vector<std::string> alignments;
+ std::set<std::string> subobject_names;
+ for (const FieldData &field : fields) {
+ inline_absolute_offset =
+ PaddedSize(inline_absolute_offset, field.inline_alignment);
+ if (!field.is_inline) {
+ // All sub-fields will get aligned to the parent alignment. This makes
+ // some book-keeping a bit easier, at the expense of some gratuitous
+ // padding.
+ offset_data_relative_offset =
+ AlignCppString(offset_data_relative_offset, "kAlign");
+ alignments.push_back(field.full_type + "::kAlign");
+ } else {
+ alignments.push_back(std::to_string(field.inline_alignment));
+ }
+ const std::string offset_data_absolute_offset =
+ offset_data_start_expression + " + " + offset_data_relative_offset;
+ accessors += MakeAccessors(field, inline_absolute_offset);
+ members +=
+ MakeMembers(field, offset_data_absolute_offset, inline_absolute_offset);
+
+ inline_absolute_offset += field.inline_size;
+ if (!field.is_inline) {
+ offset_data_relative_offset +=
+ absl::StrFormat(" + %s::kSize", field.full_type);
+ }
+ if (field.fbs_type.has_value()) {
+ // Is this not getting populate for the root schema?
+ subobject_names.insert(field.fbs_type.value());
+ }
+ }
+
+ const std::string alignment =
+ absl::StrCat("static constexpr size_t kAlign = std::max<size_t>({",
+ absl::StrJoin(alignments, ", "), "});\n");
+ const std::string size =
+ absl::StrCat("static constexpr size_t kSize = ",
+ AlignCppString(offset_data_start_expression + " + " +
+ offset_data_relative_offset,
+ "kAlign"),
+ ";");
+ const size_t inline_data_size = inline_absolute_offset;
+ const std::string constants = absl::StrFormat(
+ R"code(
+ // Space taken up by the inline portion of the flatbuffer table data, in bytes.
+ static constexpr size_t kInlineDataSize = %d;
+ // Space taken up by the vtable for this object, in bytes.
+ static constexpr size_t kVtableSize = sizeof(::flatbuffers::voffset_t) * (2 + %d);
+ // Offset from the start of the internal memory buffer to the start of the vtable.
+ static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(kInlineDataSize, alignof(::flatbuffers::voffset_t));
+ // Required alignment of this object. The buffer that this object gets constructed
+ // into must be aligned to this value.
+ %s
+ // Nominal size of this object, in bytes. The object may grow beyond this size,
+ // but will always start at this size and so the initial buffer must match
+ // this size.
+ %s
+ static_assert(%d <= kAlign, "Flatbuffer schema minalign should not exceed our required alignment.");
+ // Offset from the start of the memory buffer to the start of any out-of-line data (subtables,
+ // vectors, strings).
+ static constexpr size_t kOffsetDataStart = %s;
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize = ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
+ // Minimum size required to build this flatbuffer in an entirely unaligned buffer
+ // (including the root table offset). Made to be a multiple of kAlign for convenience.
+ static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
+ // Offset at which the table vtable offset occurs. This is only needed for vectors.
+ static constexpr size_t kOffset = 0;
+ // Various overrides to support the Table parent class.
+ size_t FixedVtableOffset() const final { return kVtableStart; }
+ size_t VtableSize() const final { return kVtableSize; }
+ size_t InlineTableSize() const final { return kInlineDataSize; }
+ size_t OffsetDataStart() const final { return kOffsetDataStart; }
+ size_t Alignment() const final { return kAlign; }
+ // Exposes the name of the flatbuffer type to allow interchangeable use
+ // of the Flatbuffer and FlatbufferStatic types in various AOS methods.
+ static const char *GetFullyQualifiedName() { return Flatbuffer::GetFullyQualifiedName(); }
+)code",
+ inline_data_size, object->fields()->size(), alignment, size,
+ nominal_min_align, offset_data_start_expression);
+ const std::string_view fbs_type_name = object->name()->string_view();
+ const std::string type_namespace = FlatbufferNameToCppName(
+ fbs_type_name.substr(0, fbs_type_name.find_last_of(".")));
+ const std::string type_name = AosNameForRawFlatbuffer(
+ fbs_type_name.substr(fbs_type_name.find_last_of(".") + 1));
+ const std::string object_code = absl::StrFormat(
+ R"code(
+namespace %s {
+class %s : public ::aos::fbs::Table {
+ public:
+ // The underlying "raw" flatbuffer type for this type.
+ typedef %s Flatbuffer;
+ // Returns this object as a flatbuffer type. This reference may not be valid
+ // following mutations to the underlying flatbuffer, due to how memory may get
+ // may get moved around.
+ const Flatbuffer &AsFlatbuffer() const { return *GetFlatbuffer<Flatbuffer>(); }
+%s
+%s
+ virtual ~%s() {}
+%s
+%s
+%s
+ private:
+%s
+%s
+%s
+};
+}
+ )code",
+ type_namespace, type_name, FlatbufferNameToCppName(fbs_type_name),
+ constants, MakeConstructor(type_name), type_name, accessors,
+ MakeFullClearer(fields), MakeCopier(fields),
+ MakeMoveConstructor(type_name), members, MakeSubObjectList(fields));
+
+ GeneratedObject result;
+ result.name = fbs_type_name;
+ result.include_declarations = includes;
+ result.code = object_code;
+ result.subobjects = subobject_names;
+ return result;
+}
+
+namespace {
+
+// Generated C++ code for an entire fbs file.
+// This includes all of the actual C++ code that will be written to a file (call
+// GenerateCode() to actually get the desired contents of the file).
+struct GeneratedCode {
+ // Prefix (for include guards).
+ std::string contents_prefix;
+ // Full set of required #include declarations.
+ std::set<std::string> include_declarations;
+ // Ordered list of objects (order is necessary to ensure that any dependencies
+ // between objects are managed correctly).
+ std::vector<GeneratedObject> objects;
+ // Suffix (for include guards).
+ std::string contents_suffix;
+
+ // Combine the above things into the string that actually needs to be written
+ // to a file.
+ std::string GenerateCode() const;
+ // Combines the code for multiple objects into one.
+ static GeneratedCode MergeCode(const std::vector<GeneratedObject> &objects);
+};
+
+std::string GeneratedCode::GenerateCode() const {
+ std::string result =
+ contents_prefix + absl::StrJoin(include_declarations, "");
+ for (const auto &object : objects) {
+ result += object.code;
+ }
+ result += contents_suffix;
+ return result;
+}
+
+GeneratedCode GeneratedCode::MergeCode(
+ const std::vector<GeneratedObject> &objects) {
+ GeneratedCode result;
+ // TODO(james): Should we use #ifdef include guards instead?
+ result.contents_prefix =
+ "#pragma once\n// This is a generated file. Do not modify.\n";
+ // We need to get the ordering of objects correct in order to ensure that
+ // depended-on objects appear before their dependees.
+ // In order to do this, we:
+ // 1) Assume that any objects not in the provided vector must exist in
+ // #includes and so can be ignored.
+ // 2) Create a list of all the objects we have been provided but which we have
+ // not yet added to the results vector.
+ // 3) Until said list is empty, we iterate over it and find any object(s)
+ // which have no dependencies in the list itself, and add them to the
+ // result.
+ // We aren't going to worry about efficient graph traversal here or anything.
+ // We also don't currently attempt to support circular dependencies.
+ std::map<std::string_view, const GeneratedObject *> remaining_objects;
+ for (const auto &object : objects) {
+ remaining_objects[object.name] = &object;
+ }
+ while (!remaining_objects.empty()) {
+ std::string_view to_remove;
+ for (const auto &pair : remaining_objects) {
+ bool has_dependencies = false;
+ for (const std::string_view subobject : pair.second->subobjects) {
+ if (remaining_objects.contains(subobject)) {
+ has_dependencies = true;
+ }
+ }
+ if (has_dependencies) {
+ continue;
+ }
+ to_remove = pair.first;
+ result.objects.push_back(*pair.second);
+ result.include_declarations.insert(
+ pair.second->include_declarations.begin(),
+ pair.second->include_declarations.end());
+ break;
+ }
+ // In order to support circular dependencies, two main things have to
+ // change:
+ // 1. We have to dynamically allow depopulating table members (rather than
+ // just supporting dynamically lengthed vectors).
+ // 2. Some of the codegen needs to be tweaked so that we can have the
+ // generated
+ // C++ classes depend on one another.
+ CHECK(!to_remove.empty())
+ << ": Circular dependencies in flatbuffers schemas are not supported.";
+ CHECK_EQ(1u, remaining_objects.erase(to_remove))
+ << ": Failed to remove " << to_remove;
+ }
+ return result;
+}
+} // namespace
+
+std::string GenerateCodeForRootTableFile(const reflection::Schema *schema) {
+ const reflection::Object *root_object = CHECK_NOTNULL(GetObject(schema, -1));
+ const std::string_view root_file =
+ root_object->declaration_file()->string_view();
+ std::vector<GeneratedObject> objects = {
+ GenerateCodeForObject(schema, root_object)};
+ for (const reflection::Object *object : *schema->objects()) {
+ if (object->is_struct()) {
+ continue;
+ }
+ if (object->declaration_file()->string_view() == root_file) {
+ objects.push_back(GenerateCodeForObject(schema, object));
+ }
+ }
+ return GeneratedCode::MergeCode(objects).GenerateCode();
+}
+} // namespace aos::fbs
diff --git a/aos/flatbuffers/static_flatbuffers.h b/aos/flatbuffers/static_flatbuffers.h
new file mode 100644
index 0000000..47d79e6
--- /dev/null
+++ b/aos/flatbuffers/static_flatbuffers.h
@@ -0,0 +1,43 @@
+#ifndef AOS_FLATBUFFERS_STATIC_FLATBUFFERS_H_
+#define AOS_FLATBUFFERS_STATIC_FLATBUFFERS_H_
+#include <map>
+#include <set>
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include "flatbuffers/reflection_generated.h"
+namespace aos::fbs {
+
+// Raw C++ code needed to represent a single flatbuffer table.
+// The various strings in this struct represent the actual C++ code that will be
+// used for this object; it is split up into pieces in order to allow us to
+// combine multiple flatbuffer tables into a single generated file (namely,
+// pulling the include declarations out to the top and including a set of
+// dependencies so that we can order the code correctly).
+// Primarily exposed here to allow for testing of intermediate functions.
+struct GeneratedObject {
+ // Fully qualified name of the object, in flatbuffer schema rules (e.g.
+ // aos.examples.Ping).
+ std::string name;
+ // All #include statements required for this object.
+ std::set<std::string> include_declarations;
+ // Fully qualified names of all sub-objects, in flatbuffer schema rules (e.g.
+ // aos.examples.Ping). Used to manage ordering of codegen.
+ std::set<std::string> subobjects;
+ // Actual code specific to this object.
+ std::string code;
+};
+
+// Produces generated code for all flatbuffer tables in the file corresponding
+// to the provided Schema object.
+std::string GenerateCodeForRootTableFile(const reflection::Schema *schema);
+
+// Helper functions to generate the code for individual objects; primarily
+// exposed for testing.
+GeneratedObject GenerateCodeForObject(const reflection::Schema *schema,
+ int object_index);
+GeneratedObject GenerateCodeForObject(const reflection::Schema *schema,
+ const reflection::Object *object);
+} // namespace aos::fbs
+#endif // AOS_FLATBUFFERS_STATIC_FLATBUFFERS_H_
diff --git a/aos/flatbuffers/static_flatbuffers_fuzz_test.cc b/aos/flatbuffers/static_flatbuffers_fuzz_test.cc
new file mode 100644
index 0000000..fb1b3e1
--- /dev/null
+++ b/aos/flatbuffers/static_flatbuffers_fuzz_test.cc
@@ -0,0 +1,212 @@
+#include "absl/strings/str_format.h"
+#include "absl/strings/str_join.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "aos/flatbuffers.h"
+#include "aos/flatbuffers/builder.h"
+#include "aos/flatbuffers/static_flatbuffers.h"
+#include "aos/flatbuffers/test_dir/type_coverage_static.h"
+#include "aos/flatbuffers/test_static.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/testing/path.h"
+#include "aos/testing/tmpdir.h"
+#include "aos/util/file.h"
+
+namespace aos::fbs::testing {
+
+class StaticFlatbuffersFuzzTest : public ::testing::Test {
+ protected:
+ template <typename T>
+ void VerifyJson(const std::string_view data) {
+ Builder<T> json_builder = aos::JsonToStaticFlatbuffer<T>(data);
+
+ EXPECT_EQ(data, aos::FlatbufferToJson(json_builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ }
+};
+
+namespace {
+void Combine(const std::span<const std::vector<std::string_view>> &strings,
+ std::function<void(const std::vector<std::string_view> &)> handler,
+ const std::vector<std::string_view> ¤t_combination) {
+ if (strings.empty()) {
+ handler(current_combination);
+ return;
+ }
+ for (const std::string_view &str : strings.front()) {
+ std::vector<std::string_view> combination = current_combination;
+ combination.push_back(str);
+ Combine(strings.subspan(1), handler, combination);
+ }
+}
+void Combine(
+ const std::vector<std::vector<std::string_view>> &strings,
+ std::function<void(const std::vector<std::string_view> &)> handler) {
+ Combine(std::span<const std::vector<std::string_view>>{strings.data(),
+ strings.size()},
+ handler, {});
+}
+} // namespace
+
+// Iterate over lots of variations of different flatbuffers to try to see if we
+// can exercise weird corner-cases.
+TEST_F(StaticFlatbuffersFuzzTest, JsonFuzzing) {
+ std::vector<std::vector<std::string_view>> stanzas{
+ {"", "\"scalar\": 1323"},
+ {"", "\"vector_of_scalars\": [\n \n ]",
+ "\"vector_of_scalars\": [\n 123\n ]",
+ "\"vector_of_scalars\": [\n 123,\n 456\n ]"},
+ {"", "\"string\": \"\"", "\"string\": \"abcdef\"",
+ "\"string\": \"abcdefghijklmnopqrstuvwxyz\""},
+ {
+ "",
+ "\"vector_of_strings\": [\n \n ]",
+ "\"vector_of_strings\": [\n \"\",\n \"abcdef\"\n ]",
+ "\"vector_of_strings\": [\n \"\",\n \"abcdef\",\n "
+ "\"abcdefghijklmnopqrstuvwxyz\"\n ]",
+ "\"vector_of_strings\": [\n \"\",\n \"abcdef\",\n \"971\",\n "
+ "\"abcdefghijklmnopqrstuvwxyz\"\n ]",
+ "\"vector_of_strings\": [\n \"\",\n \"abcdef\",\n "
+ "\"abcdefghijklmnopqrstuvwxyz\",\n \"971\",\n \"123\"\n ]",
+ "\"vector_of_strings\": [\n \"\",\n \"abcdef\",\n \"xyz\",\n "
+ "\"971\",\n \"123\"\n ]",
+ },
+ {
+ "",
+ "\"substruct\": {\n \"x\": 971.0,\n \"y\": 123.0\n }",
+ },
+ {
+ "",
+ "\"subtable\": {\n\n }",
+ "\"subtable\": {\n \"baz\": 1.23\n }",
+ "\"subtable\": {\n \"foo\": 123,\n \"baz\": 1.23\n }",
+ },
+ {
+ "",
+ "\"vector_aligned\": [\n \n ]",
+ "\"vector_aligned\": [\n 678\n ]",
+ "\"vector_aligned\": [\n 678,\n 456\n ]",
+ "\"vector_aligned\": [\n 7,\n 6,\n 5,\n 4,\n 3,\n 2,\n 1,\n "
+ "0\n ]",
+ },
+ {
+ "",
+ "\"vector_of_structs\": [\n \n ]",
+ R"json("vector_of_structs": [
+ {
+ "x": 1.0,
+ "y": 2.0
+ }
+ ])json",
+ R"json("vector_of_structs": [
+ {
+ "x": 1.0,
+ "y": 2.0
+ },
+ {
+ "x": 3.0,
+ "y": 4.0
+ },
+ {
+ "x": 5.0,
+ "y": 6.0
+ }
+ ])json",
+ R"json("vector_of_structs": [
+ {
+ "x": 1.0,
+ "y": 2.0
+ },
+ {
+ "x": 3.0,
+ "y": 4.0
+ },
+ {
+ "x": 5.0,
+ "y": 6.0
+ },
+ {
+ "x": 7.0,
+ "y": 8.0
+ },
+ {
+ "x": 9.0,
+ "y": 10.0
+ }
+ ])json",
+ },
+ {
+ "",
+ "\"vector_of_tables\": [\n \n ]",
+ R"json("vector_of_tables": [
+ {
+
+ }
+ ])json",
+ R"json("vector_of_tables": [
+ {
+ "foo": 1
+ }
+ ])json",
+ R"json("vector_of_tables": [
+ {
+ "foo": 1
+ },
+ {
+ "foo": 2
+ },
+ {
+ "foo": 3
+ },
+ {
+ "foo": 4
+ },
+ {
+ "foo": 5
+ },
+ {
+ "foo": 6
+ }
+ ])json",
+ },
+ {
+ "",
+ "\"included_table\": {\n\n }",
+ "\"included_table\": {\n \"foo\": \"A\"\n }",
+ },
+ {
+ "",
+ "\"unspecified_length_vector\": [\n \n ]",
+ "\"unspecified_length_vector\": [\n 123\n ]",
+ "\"unspecified_length_vector\": [\n 123,\n 100\n ]",
+ },
+ {
+ "",
+ "\"unspecified_length_string\": \"\"",
+ "\"unspecified_length_string\": \"Hello, World!\"",
+ },
+ {
+ "",
+ "\"unspecified_length_vector_of_strings\": [\n \n ]",
+ "\"unspecified_length_vector_of_strings\": [\n \"\"\n ]",
+ "\"unspecified_length_vector_of_strings\": [\n \"Goodbye, \",\n "
+ "\"World!\"\n ]",
+ },
+ };
+ Combine(stanzas, [this](const std::vector<std::string_view> &strings) {
+ std::vector<std::string_view> no_empty_strings;
+ for (const std::string_view &str : strings) {
+ if (!str.empty()) {
+ no_empty_strings.push_back(str);
+ }
+ }
+ if (no_empty_strings.empty()) {
+ VerifyJson<TestTableStatic>("{\n\n}");
+ } else {
+ VerifyJson<TestTableStatic>(
+ "{\n " + absl::StrJoin(no_empty_strings, ",\n ") + "\n}");
+ }
+ });
+}
+} // namespace aos::fbs::testing
diff --git a/aos/flatbuffers/static_flatbuffers_test.cc b/aos/flatbuffers/static_flatbuffers_test.cc
new file mode 100644
index 0000000..ed737cb
--- /dev/null
+++ b/aos/flatbuffers/static_flatbuffers_test.cc
@@ -0,0 +1,897 @@
+#include "aos/flatbuffers/static_flatbuffers.h"
+
+#include "absl/strings/str_format.h"
+#include "absl/strings/str_join.h"
+#include "external/com_github_google_flatbuffers/src/annotated_binary_text_gen.h"
+#include "external/com_github_google_flatbuffers/src/binary_annotator.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "aos/flatbuffers.h"
+#include "aos/flatbuffers/builder.h"
+#include "aos/flatbuffers/interesting_schemas.h"
+#include "aos/flatbuffers/test_dir/type_coverage_static.h"
+#include "aos/flatbuffers/test_schema.h"
+#include "aos/flatbuffers/test_static.h"
+#include "aos/json_to_flatbuffer.h"
+#include "aos/testing/path.h"
+#include "aos/testing/tmpdir.h"
+#include "aos/util/file.h"
+
+namespace aos::fbs::testing {
+
+namespace {
+// Uses the binary schema to annotate a provided flatbuffer. Returns the
+// annotated flatbuffer.
+std::string AnnotateBinaries(
+ const aos::NonSizePrefixedFlatbuffer<reflection::Schema> &schema,
+ flatbuffers::span<uint8_t> binary_data) {
+ flatbuffers::BinaryAnnotator binary_annotator(
+ schema.span().data(), schema.span().size(), binary_data.data(),
+ binary_data.size());
+
+ auto annotations = binary_annotator.Annotate();
+ const std::string schema_filename =
+ aos::testing::TestTmpDir() + "/schema.bfbs";
+
+ aos::WriteFlatbufferToFile(schema_filename, schema);
+
+ flatbuffers::AnnotatedBinaryTextGenerator text_generator(
+ flatbuffers::AnnotatedBinaryTextGenerator::Options{}, annotations,
+ binary_data.data(), binary_data.size());
+
+ text_generator.Generate(aos::testing::TestTmpDir() + "/foo.bfbs",
+ schema_filename);
+
+ return aos::util::ReadFileToStringOrDie(aos::testing::TestTmpDir() +
+ "/foo.afb");
+}
+const reflection::Object *GetObjectByName(const reflection::Schema *schema,
+ std::string_view name) {
+ for (const reflection::Object *object : *schema->objects()) {
+ if (object->name()->string_view() == name) {
+ return object;
+ }
+ }
+ return nullptr;
+}
+} // namespace
+
+class StaticFlatbuffersTest : public ::testing::Test {
+ protected:
+ template <typename T>
+ void VerifyJson(const std::string_view data) {
+ Builder<T> json_builder = aos::JsonToStaticFlatbuffer<T>(data);
+
+ EXPECT_EQ(data, aos::FlatbufferToJson(json_builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ }
+ aos::FlatbufferSpan<reflection::Schema> test_schema_{TestTableSchema()};
+ aos::FlatbufferSpan<reflection::Schema> interesting_schemas_{
+ UnsupportedSchema()};
+};
+
+// Test that compiles the same code that is used by an example in
+// //aos/documentation/aos/docs/flatbuffers.md.
+TEST_F(StaticFlatbuffersTest, DocumentationExample) {
+ aos::fbs::VectorAllocator allocator;
+ Builder<TestTableStatic> builder(&allocator);
+ TestTableStatic *object = builder.get();
+ object->set_scalar(123);
+ {
+ auto vector = object->add_vector_of_scalars();
+ CHECK(vector->emplace_back(4));
+ CHECK(vector->emplace_back(5));
+ }
+ {
+ auto string = object->add_string();
+ string->SetString("Hello, World!");
+ }
+ {
+ auto vector_of_strings = object->add_vector_of_strings();
+ auto sub_string = CHECK_NOTNULL(vector_of_strings->emplace_back());
+ CHECK(sub_string->emplace_back('D'));
+ }
+ { object->set_substruct({971, 254}); }
+ {
+ auto subtable = object->add_subtable();
+ subtable->set_foo(1234);
+ }
+ {
+ auto vector = object->add_vector_of_structs();
+ CHECK(vector->emplace_back({48, 67}));
+ CHECK(vector->emplace_back({118, 148}));
+ CHECK(vector->emplace_back({971, 973}));
+ // Max vector size is three; this should fail.
+ CHECK(!vector->emplace_back({1114, 2056}));
+ }
+ {
+ auto vector = object->add_vector_of_tables();
+ auto subobject = vector->emplace_back();
+ subobject->set_foo(222);
+ }
+ {
+ auto subtable = object->add_included_table();
+ subtable->set_foo(included::TestEnum::B);
+ }
+ ASSERT_TRUE(builder.AsFlatbufferSpan().Verify());
+ LOG(INFO) << aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true});
+ LOG(INFO) << AnnotateBinaries(test_schema_, builder.buffer());
+}
+
+// Test that compiles the same code that is used by an example in
+// //aos/documentation/aos/docs/flatbuffers.md showing how to convert a
+// Populate*() method for adding a subtable to a flatbuffer.
+namespace {
+flatbuffers::Offset<SubTable> PopulateOld(flatbuffers::FlatBufferBuilder *fbb) {
+ SubTable::Builder builder(*fbb);
+ builder.add_foo(1234);
+ return builder.Finish();
+}
+void PopulateStatic(SubTableStatic *subtable) { subtable->set_foo(1234); }
+} // namespace
+TEST_F(StaticFlatbuffersTest, PopulateMethodConversionExample) {
+ // Using a FlatBufferBuilder:
+ flatbuffers::FlatBufferBuilder fbb;
+ // Note: the PopulateOld() *must* be called prior to creating the builder.
+ const flatbuffers::Offset<SubTable> subtable_offset = PopulateOld(&fbb);
+ TestTable::Builder testtable_builder(fbb);
+ testtable_builder.add_subtable(subtable_offset);
+ fbb.Finish(testtable_builder.Finish());
+ aos::FlatbufferDetachedBuffer<TestTable> fbb_finished = fbb.Release();
+
+ // Using the static flatbuffer API.
+ aos::fbs::VectorAllocator allocator;
+ Builder<TestTableStatic> static_builder(&allocator);
+ PopulateStatic(CHECK_NOTNULL(static_builder.get()->add_subtable()));
+
+ // And confirm that they both contain the expected flatbuffer:
+ const std::string expected = R"json({ "subtable": { "foo": 1234 } })json";
+ EXPECT_EQ(expected, aos::FlatbufferToJson(fbb_finished));
+ EXPECT_EQ(expected, aos::FlatbufferToJson(static_builder.AsFlatbufferSpan()));
+}
+
+TEST_F(StaticFlatbuffersTest, UnsupportedSchema) {
+ const reflection::Schema *schema = &interesting_schemas_.message();
+ EXPECT_DEATH(
+ GenerateCodeForObject(
+ schema, GetObjectByName(schema, "aos.fbs.testing.TableWithUnion")),
+ "Union not supported");
+ GenerateCodeForObject(
+ schema, GetObjectByName(schema, "aos.fbs.testing.MissingVectorLength"));
+ EXPECT_DEATH(
+ GenerateCodeForObject(
+ schema,
+ GetObjectByName(schema, "aos.fbs.testing.NonIntegerVectorLength")),
+ "vector_badlength must specify a positive integer for the "
+ "static_length attribute.");
+ EXPECT_DEATH(GenerateCodeForObject(
+ schema, GetObjectByName(
+ schema, "aos.fbs.testing.NegativeVectorLength")),
+ "Field vector_badlength must have a non-negative "
+ "static_length.");
+ GenerateCodeForObject(
+ schema, GetObjectByName(schema, "aos.fbs.testing.ZeroVectorLength"));
+ GenerateCodeForObject(
+ schema, GetObjectByName(schema, "aos.fbs.testing.MissingStringLength"));
+ GenerateCodeForObject(
+ schema,
+ GetObjectByName(schema, "aos.fbs.testing.MissingSubStringLength"));
+}
+
+// Tests that we can go through and manually build up a big flatbuffer and that
+// it stays valid at all points.
+TEST_F(StaticFlatbuffersTest, ManuallyConstructFlatbuffer) {
+ {
+ aos::fbs::VectorAllocator allocator;
+ Builder<SubTableStatic> builder(&allocator);
+ SubTableStatic *object = builder.get();
+ if (!builder.AsFlatbufferSpan().Verify()) {
+ LOG(ERROR) << object->SerializationDebugString() << "\nRoot table offset "
+ << *reinterpret_cast<const uoffset_t *>(
+ builder.buffer().data())
+ << "\nraw bytes\n";
+ aos::fbs::internal::DebugBytes(builder.buffer(), std::cerr);
+ FAIL();
+ return;
+ }
+ EXPECT_EQ("{ }", aos::FlatbufferToJson(builder.AsFlatbufferSpan()));
+ object->set_foo(123);
+ object->set_baz(971);
+ CHECK(builder.AsFlatbufferSpan().Verify());
+ EXPECT_EQ(123, object->AsFlatbuffer().foo());
+ EXPECT_EQ(971, object->AsFlatbuffer().baz());
+ EXPECT_EQ(R"json({ "foo": 123, "baz": 971.0 })json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan()));
+ }
+ {
+ // aos::FixedAllocator allocator(TestTableStatic::kUnalignedBufferSize);
+ aos::fbs::VectorAllocator allocator;
+ Builder<TestTableStatic> builder(&allocator);
+ TestTableStatic *object = builder.get();
+ const aos::fbs::testing::TestTable &fbs = object->AsFlatbuffer();
+ VLOG(1) << object->SerializationDebugString();
+ CHECK(builder.AsFlatbufferSpan().Verify());
+ EXPECT_EQ("{ }", aos::FlatbufferToJson(builder.AsFlatbufferSpan()));
+ {
+ ASSERT_FALSE(object->has_scalar());
+ object->set_scalar(123);
+ EXPECT_TRUE(fbs.has_scalar());
+ EXPECT_EQ(123, fbs.scalar());
+ }
+ EXPECT_EQ(R"json({ "scalar": 123 })json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan()));
+ {
+ ASSERT_FALSE(object->has_vector_of_scalars());
+ auto vector = object->add_vector_of_scalars();
+ ASSERT_TRUE(vector->emplace_back(4));
+ ASSERT_TRUE(vector->emplace_back(5));
+ ASSERT_TRUE(object->has_vector_of_scalars());
+ ASSERT_TRUE(fbs.has_vector_of_scalars());
+ VLOG(1) << vector->SerializationDebugString();
+ EXPECT_TRUE(fbs.has_vector_of_scalars());
+ EXPECT_EQ(2u, fbs.vector_of_scalars()->size());
+ EXPECT_EQ(4, fbs.vector_of_scalars()->Get(0));
+ EXPECT_EQ(5, fbs.vector_of_scalars()->Get(1));
+ }
+ EXPECT_EQ(R"json({ "scalar": 123, "vector_of_scalars": [ 4, 5 ] })json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan()));
+ {
+ EXPECT_FALSE(object->has_string());
+ auto string = object->add_string();
+ EXPECT_TRUE(object->has_string());
+ string->SetString("Hello, World!");
+ EXPECT_EQ(13u, object->string()->size());
+ ASSERT_TRUE(fbs.has_string());
+ ASSERT_EQ(13u, fbs.string()->size());
+ EXPECT_EQ("Hello, World!", fbs.string()->string_view());
+ // Check that we null-terminated correctly.
+ EXPECT_EQ(13u, strnlen(fbs.string()->c_str(), 20));
+ }
+ EXPECT_EQ(
+ R"json({ "scalar": 123, "vector_of_scalars": [ 4, 5 ], "string": "Hello, World!" })json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan()));
+ {
+ EXPECT_FALSE(object->has_vector_of_strings());
+ auto vector_of_strings = object->add_vector_of_strings();
+ EXPECT_TRUE(object->has_vector_of_strings());
+ auto sub_string = CHECK_NOTNULL(vector_of_strings->emplace_back());
+ ASSERT_TRUE(sub_string->emplace_back('D'));
+ EXPECT_TRUE(fbs.has_vector_of_strings());
+ ASSERT_EQ(1u, fbs.vector_of_strings()->size());
+ ASSERT_EQ(1u, fbs.vector_of_strings()->Get(0)->size());
+ EXPECT_EQ('D', fbs.vector_of_strings()->Get(0)->Get(0));
+ }
+ EXPECT_EQ(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ]
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ {
+ EXPECT_FALSE(object->has_substruct());
+ object->set_substruct({971, 254});
+ EXPECT_TRUE(object->has_substruct());
+ EXPECT_TRUE(fbs.has_substruct());
+ EXPECT_EQ(971, fbs.substruct()->x());
+ EXPECT_EQ(254, fbs.substruct()->y());
+ }
+ EXPECT_EQ(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ {
+ auto subtable = object->add_subtable();
+ subtable->set_foo(1234);
+ EXPECT_TRUE(fbs.has_subtable());
+ EXPECT_EQ(1234, fbs.subtable()->foo());
+ EXPECT_FALSE(fbs.subtable()->has_baz());
+ }
+ EXPECT_EQ(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ {
+ auto vector = object->add_vector_of_structs();
+ ASSERT_TRUE(vector->emplace_back({48, 67}));
+ ASSERT_TRUE(vector->emplace_back({118, 148}));
+ ASSERT_TRUE(vector->emplace_back({971, 973}));
+ ASSERT_FALSE(vector->emplace_back({1114, 2056}));
+ EXPECT_TRUE(fbs.has_vector_of_structs());
+ EXPECT_EQ(3u, fbs.vector_of_structs()->size());
+ EXPECT_EQ(48, fbs.vector_of_structs()->Get(0)->x());
+ EXPECT_EQ(67, fbs.vector_of_structs()->Get(0)->y());
+ EXPECT_EQ(118, fbs.vector_of_structs()->Get(1)->x());
+ EXPECT_EQ(object->vector_of_structs()->at(1).x(),
+ fbs.vector_of_structs()->Get(1)->x());
+ EXPECT_EQ((*object->vector_of_structs())[1].x(),
+ fbs.vector_of_structs()->Get(1)->x());
+ EXPECT_EQ(148, fbs.vector_of_structs()->Get(1)->y());
+ EXPECT_EQ(971, fbs.vector_of_structs()->Get(2)->x());
+ EXPECT_EQ(973, fbs.vector_of_structs()->Get(2)->y());
+ }
+ EXPECT_EQ(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ },
+ "vector_of_structs": [
+ {
+ "x": 48.0,
+ "y": 67.0
+ },
+ {
+ "x": 118.0,
+ "y": 148.0
+ },
+ {
+ "x": 971.0,
+ "y": 973.0
+ }
+ ]
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ {
+ EXPECT_FALSE(object->has_vector_of_tables());
+ auto vector = object->add_vector_of_tables();
+ EXPECT_TRUE(object->has_vector_of_tables());
+ auto subobject = vector->emplace_back();
+ subobject->set_foo(222);
+ EXPECT_TRUE(fbs.has_vector_of_tables());
+ EXPECT_EQ(1u, fbs.vector_of_tables()->size());
+ EXPECT_EQ(222, fbs.vector_of_tables()->Get(0)->foo());
+ EXPECT_EQ(object->vector_of_tables()->at(0).foo(),
+ fbs.vector_of_tables()->Get(0)->foo());
+ }
+ EXPECT_EQ(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ },
+ "vector_of_structs": [
+ {
+ "x": 48.0,
+ "y": 67.0
+ },
+ {
+ "x": 118.0,
+ "y": 148.0
+ },
+ {
+ "x": 971.0,
+ "y": 973.0
+ }
+ ],
+ "vector_of_tables": [
+ {
+ "foo": 222
+ }
+ ]
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ {
+ EXPECT_FALSE(object->has_included_table());
+ auto subtable = object->add_included_table();
+ EXPECT_TRUE(object->has_included_table());
+ subtable->set_foo(included::TestEnum::B);
+ ASSERT_TRUE(fbs.has_included_table());
+ ASSERT_TRUE(fbs.included_table()->has_foo());
+ EXPECT_EQ(included::TestEnum::B, fbs.included_table()->foo());
+ }
+ EXPECT_EQ(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ },
+ "vector_of_structs": [
+ {
+ "x": 48.0,
+ "y": 67.0
+ },
+ {
+ "x": 118.0,
+ "y": 148.0
+ },
+ {
+ "x": 971.0,
+ "y": 973.0
+ }
+ ],
+ "vector_of_tables": [
+ {
+ "foo": 222
+ }
+ ],
+ "included_table": {
+ "foo": "B"
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ {
+ auto aligned_vector = object->add_vector_aligned();
+ ASSERT_EQ(64,
+ std::remove_reference<decltype(*aligned_vector)>::type::kAlign);
+ ASSERT_EQ(64, TestTableStatic::kAlign);
+ ASSERT_TRUE(aligned_vector->emplace_back(444));
+ EXPECT_TRUE(fbs.has_vector_aligned());
+ EXPECT_EQ(1u, fbs.vector_aligned()->size());
+ EXPECT_EQ(0u,
+ reinterpret_cast<size_t>(fbs.vector_aligned()->data()) % 64);
+ EXPECT_EQ(444, fbs.vector_aligned()->Get(0));
+ }
+ VLOG(1) << object->SerializationDebugString();
+ CHECK(builder.AsFlatbufferSpan().Verify());
+ const std::string expected_contents =
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ },
+ "vector_aligned": [
+ 444
+ ],
+ "vector_of_structs": [
+ {
+ "x": 48.0,
+ "y": 67.0
+ },
+ {
+ "x": 118.0,
+ "y": 148.0
+ },
+ {
+ "x": 971.0,
+ "y": 973.0
+ }
+ ],
+ "vector_of_tables": [
+ {
+ "foo": 222
+ }
+ ],
+ "included_table": {
+ "foo": "B"
+ }
+})json";
+ EXPECT_EQ(expected_contents,
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ VLOG(1) << AnnotateBinaries(test_schema_, builder.buffer());
+ VerifyJson<TestTableStatic>(expected_contents);
+ {
+ auto aligned_vector = object->mutable_vector_aligned();
+ ASSERT_TRUE(aligned_vector->reserve(100));
+ EXPECT_EQ(100, aligned_vector->capacity());
+ ASSERT_TRUE(builder.AsFlatbufferSpan().Verify())
+ << aligned_vector->SerializationDebugString();
+ EXPECT_EQ(expected_contents,
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ std::vector<int> scalars;
+ scalars.push_back(aligned_vector->at(0));
+ while (aligned_vector->size() < 100u) {
+ scalars.push_back(aligned_vector->size());
+ CHECK(aligned_vector->emplace_back(aligned_vector->size()));
+ }
+ VLOG(1) << aligned_vector->SerializationDebugString();
+ VLOG(1) << AnnotateBinaries(test_schema_, builder.buffer());
+ EXPECT_EQ(absl::StrFormat(
+ R"json({
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ },
+ "vector_aligned": [
+ %s
+ ],
+ "vector_of_structs": [
+ {
+ "x": 48.0,
+ "y": 67.0
+ },
+ {
+ "x": 118.0,
+ "y": 148.0
+ },
+ {
+ "x": 971.0,
+ "y": 973.0
+ }
+ ],
+ "vector_of_tables": [
+ {
+ "foo": 222
+ }
+ ],
+ "included_table": {
+ "foo": "B"
+ }
+})json",
+ absl::StrJoin(scalars, ",\n ")),
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ }
+
+ {
+ auto unspecified_vector = object->add_unspecified_length_vector();
+ ASSERT_NE(nullptr, unspecified_vector);
+ ASSERT_EQ(0, unspecified_vector->capacity());
+ ASSERT_FALSE(unspecified_vector->emplace_back(0));
+ ASSERT_TRUE(unspecified_vector->reserve(2));
+ ASSERT_TRUE(unspecified_vector->emplace_back(1));
+ ASSERT_TRUE(unspecified_vector->emplace_back(2));
+ ASSERT_FALSE(unspecified_vector->emplace_back(3));
+ ASSERT_TRUE(builder.AsFlatbufferSpan().Verify());
+ }
+ }
+}
+
+// Tests that field clearing (and subsequent resetting) works properly.
+TEST_F(StaticFlatbuffersTest, ClearFields) {
+ aos::fbs::VectorAllocator allocator;
+ Builder<TestTableStatic> builder(&allocator);
+ TestTableStatic *object = builder.get();
+ // For each field, we will confirm the following:
+ // * Clearing a non-existent field causes no issues.
+ // * We can set a field, clear it, and have it not be present.
+ // * We can set the field again afterwards.
+ {
+ object->clear_scalar();
+ ASSERT_TRUE(builder.Verify());
+ object->set_scalar(123);
+ EXPECT_EQ(123, object->AsFlatbuffer().scalar());
+ object->clear_scalar();
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_FALSE(object->has_scalar());
+ object->set_scalar(456);
+ EXPECT_EQ(456, object->AsFlatbuffer().scalar());
+ }
+ {
+ object->clear_vector_of_scalars();
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_FALSE(object->has_vector_of_scalars());
+ auto vector = object->add_vector_of_scalars();
+ ASSERT_TRUE(vector->emplace_back(4));
+ ASSERT_TRUE(vector->emplace_back(5));
+ ASSERT_TRUE(vector->emplace_back(6));
+ // Deliberately force a resize of the vector to ensure that we can exercise
+ // what happens if we clear a non-standard size field.
+ ASSERT_FALSE(vector->emplace_back(7));
+ ASSERT_TRUE(vector->reserve(4));
+ ASSERT_TRUE(vector->emplace_back(7));
+ EXPECT_EQ(
+ R"json({
+ "scalar": 456,
+ "vector_of_scalars": [
+ 4,
+ 5,
+ 6,
+ 7
+ ]
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ ASSERT_TRUE(builder.Verify());
+ object->clear_vector_of_scalars();
+ ASSERT_TRUE(builder.Verify());
+ ASSERT_FALSE(object->has_vector_of_scalars())
+ << aos::FlatbufferToJson(builder.AsFlatbufferSpan());
+ vector = CHECK_NOTNULL(object->add_vector_of_scalars());
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_EQ(0u, object->AsFlatbuffer().vector_of_scalars()->size());
+ ASSERT_TRUE(vector->emplace_back(9));
+ ASSERT_TRUE(vector->emplace_back(7));
+ ASSERT_TRUE(vector->emplace_back(1));
+ // This vector has no knowledge of the past resizing; it should fail to add
+ // an extra number.
+ ASSERT_FALSE(vector->emplace_back(7));
+ }
+ {
+ object->clear_substruct();
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_FALSE(object->has_substruct());
+ object->set_substruct(SubStruct{2, 3});
+ EXPECT_EQ(
+ R"json({
+ "scalar": 456,
+ "vector_of_scalars": [
+ 9,
+ 7,
+ 1
+ ],
+ "substruct": {
+ "x": 2.0,
+ "y": 3.0
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ object->clear_substruct();
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_FALSE(object->has_substruct());
+ object->set_substruct(SubStruct{4, 5});
+ EXPECT_EQ(
+ R"json({
+ "scalar": 456,
+ "vector_of_scalars": [
+ 9,
+ 7,
+ 1
+ ],
+ "substruct": {
+ "x": 4.0,
+ "y": 5.0
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ }
+ {
+ object->clear_subtable();
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_FALSE(object->has_subtable());
+ auto subtable = CHECK_NOTNULL(object->add_subtable());
+ subtable->set_baz(9.71);
+ EXPECT_EQ(
+ R"json({
+ "scalar": 456,
+ "vector_of_scalars": [
+ 9,
+ 7,
+ 1
+ ],
+ "substruct": {
+ "x": 4.0,
+ "y": 5.0
+ },
+ "subtable": {
+ "baz": 9.71
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ object->clear_subtable();
+ ASSERT_TRUE(builder.Verify());
+ EXPECT_FALSE(object->has_subtable());
+ subtable = CHECK_NOTNULL(object->add_subtable());
+ subtable->set_baz(16.78);
+ EXPECT_EQ(
+ R"json({
+ "scalar": 456,
+ "vector_of_scalars": [
+ 9,
+ 7,
+ 1
+ ],
+ "substruct": {
+ "x": 4.0,
+ "y": 5.0
+ },
+ "subtable": {
+ "baz": 16.780001
+ }
+})json",
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true}));
+ }
+}
+
+// Try to cover ~all supported scalar/flatbuffer types using JSON convenience
+// functions.
+TEST_F(StaticFlatbuffersTest, FlatbufferTypeCoverage) {
+ VerifyJson<frc971::testing::ConfigurationStatic>("{\n\n}");
+ std::string populated_config =
+ aos::util::ReadFileToStringOrDie(aos::testing::ArtifactPath(
+ "aos/flatbuffers/test_dir/type_coverage.json"));
+ // Get rid of a pesky new line.
+ populated_config = populated_config.substr(0, populated_config.size() - 1);
+ VerifyJson<frc971::testing::ConfigurationStatic>(populated_config);
+
+ // And now play around with mutating the buffer.
+ Builder<frc971::testing::ConfigurationStatic> builder =
+ aos::JsonToStaticFlatbuffer<frc971::testing::ConfigurationStatic>(
+ populated_config);
+ ASSERT_TRUE(builder.Verify());
+ builder.get()->clear_foo_float();
+ ASSERT_TRUE(builder.Verify());
+ ASSERT_FALSE(builder.get()->AsFlatbuffer().has_foo_float());
+ builder.get()->set_foo_float(1.111);
+ ASSERT_TRUE(builder.Verify());
+ ASSERT_FLOAT_EQ(1.111, builder.get()->AsFlatbuffer().foo_float());
+}
+
+// Confirm that we can use the SpanAllocator with a span that provides exactly
+// the required buffer size.
+TEST_F(StaticFlatbuffersTest, ExactSizeSpanAllocator) {
+ std::vector<uint8_t> buffer;
+ buffer.resize(Builder<TestTableStatic>::kBufferSize, 0);
+ aos::fbs::SpanAllocator allocator({buffer.data(), buffer.size()});
+ Builder<TestTableStatic> builder(&allocator);
+ TestTableStatic *object = builder.get();
+ object->set_scalar(123);
+ {
+ auto vector = object->add_vector_of_scalars();
+ ASSERT_TRUE(vector->emplace_back(4));
+ ASSERT_TRUE(vector->emplace_back(5));
+ }
+ {
+ auto string = object->add_string();
+ string->SetString("Hello, World!");
+ }
+ {
+ auto vector_of_strings = object->add_vector_of_strings();
+ auto sub_string = CHECK_NOTNULL(vector_of_strings->emplace_back());
+ ASSERT_TRUE(sub_string->emplace_back('D'));
+ }
+ { object->set_substruct({971, 254}); }
+ {
+ auto subtable = object->add_subtable();
+ subtable->set_foo(1234);
+ }
+ {
+ auto vector = object->add_vector_of_structs();
+ ASSERT_TRUE(vector->emplace_back({48, 67}));
+ ASSERT_TRUE(vector->emplace_back({118, 148}));
+ ASSERT_TRUE(vector->emplace_back({971, 973}));
+ // Max vector size is three; this should fail.
+ ASSERT_FALSE(vector->emplace_back({1114, 2056}));
+ // We don't have any extra space available.
+ ASSERT_FALSE(vector->reserve(4));
+ ASSERT_FALSE(vector->emplace_back({1114, 2056}));
+ }
+ {
+ auto vector = object->add_vector_of_tables();
+ auto subobject = vector->emplace_back();
+ subobject->set_foo(222);
+ }
+ {
+ auto subtable = object->add_included_table();
+ subtable->set_foo(included::TestEnum::B);
+ }
+ ASSERT_TRUE(builder.AsFlatbufferSpan().Verify());
+ VLOG(1) << aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true});
+ VLOG(1) << AnnotateBinaries(test_schema_, builder.buffer());
+}
+
+// Test that when we provide too small of a span to the Builder that it
+// correctly fails.
+TEST_F(StaticFlatbuffersTest, TooSmallSpanAllocator) {
+ std::vector<uint8_t> buffer;
+ buffer.resize(10, 0);
+ aos::fbs::SpanAllocator allocator({buffer.data(), buffer.size()});
+ EXPECT_DEATH(Builder<TestTableStatic>{&allocator}, "Failed to allocate");
+}
+
+// Verify that if we create a span with extra headroom that that lets us
+// dynamically alter the size of vectors in the flatbuffers.
+TEST_F(StaticFlatbuffersTest, ExtraLargeSpanAllocator) {
+ std::vector<uint8_t> buffer;
+ buffer.resize(Builder<TestTableStatic>::kBufferSize + 10000, 0);
+ aos::fbs::SpanAllocator allocator({buffer.data(), buffer.size()});
+ Builder<TestTableStatic> builder(&allocator);
+ TestTableStatic *object = builder.get();
+ {
+ auto vector = object->add_unspecified_length_vector();
+ // Confirm that the vector does indeed start out at zero length.
+ ASSERT_FALSE(vector->emplace_back(4));
+ ASSERT_TRUE(vector->reserve(9000));
+ vector->resize(256);
+ for (size_t index = 0; index < 256; ++index) {
+ vector->at(index) = static_cast<uint8_t>(index);
+ }
+ }
+ ASSERT_EQ(256, object->AsFlatbuffer().unspecified_length_vector()->size());
+ size_t expected = 0;
+ for (const uint8_t value :
+ *object->AsFlatbuffer().unspecified_length_vector()) {
+ EXPECT_EQ(expected++, value);
+ }
+}
+} // namespace aos::fbs::testing
diff --git a/aos/flatbuffers/static_table.h b/aos/flatbuffers/static_table.h
new file mode 100644
index 0000000..f90ccde
--- /dev/null
+++ b/aos/flatbuffers/static_table.h
@@ -0,0 +1,117 @@
+#ifndef AOS_FLATBUFFERS_STATIC_TABLE_H_
+#define AOS_FLATBUFFERS_STATIC_TABLE_H_
+#include <algorithm>
+#include <span>
+
+#include "flatbuffers/base.h"
+#include "glog/logging.h"
+
+#include "aos/flatbuffers/base.h"
+namespace aos::fbs {
+
+// This Table object is used as the parent class to the generated code for every
+// flatbuffer table that we generate code for.
+// This object primarily serves to provide some useful common methods for
+// mutating the flatbuffer memory.
+//
+// Every table will be aligned to the greatest alignment of all of its members
+// and its size will be equal to a multiple of the alignment. Each table shall
+// have the following layout: [vtable offset; inline data with padding; vtable;
+// padding; table/vector data with padding]
+class Table : public ResizeableObject {
+ public:
+ // Prints out a debug string of the raw flatbuffer memory. Does not currently
+ // do anything intelligent ot traverse down into the subobjects of the
+ // flatbuffer (if you want that, then use the flatbuffer binary
+ // annotator---this code mostly exists for debugging the static flatbuffers
+ // implementation itself).
+ std::string SerializationDebugString() const {
+ std::stringstream str;
+ str << "Size: " << buffer_.size() << " alignment: " << Alignment() << "\n";
+ str << "Observed Vtable offset " << Get<soffset_t>(0) << "\n";
+ str << "Inline Size " << InlineTableSize() << " Inline Bytes:\n";
+ internal::DebugBytes(internal::GetSubSpan(buffer_, 4, InlineTableSize()),
+ str);
+ str << "Vtable offset " << FixedVtableOffset() << " Vtable size "
+ << VtableSize() << " Vtable contents:\n";
+ internal::DebugBytes(
+ internal::GetSubSpan(buffer_, FixedVtableOffset(), VtableSize()), str);
+ str << "Offset data offset " << OffsetDataStart() << "\n";
+ // Actual contents can be big; don't print them out until we run into a
+ // situation where we need to debug that.
+ return str.str();
+ }
+
+ protected:
+ Table(std::span<uint8_t> buffer, ResizeableObject *parent)
+ : ResizeableObject(buffer, parent) {}
+ Table(std::span<uint8_t> buffer, Allocator *allocator)
+ : ResizeableObject(buffer, allocator) {}
+ Table(std::span<uint8_t> buffer, ::std::unique_ptr<Allocator> allocator)
+ : ResizeableObject(buffer, ::std::move(allocator)) {}
+ Table(Table &&) = default;
+ virtual ~Table() {}
+ virtual size_t FixedVtableOffset() const = 0;
+ virtual size_t VtableSize() const = 0;
+ virtual size_t InlineTableSize() const = 0;
+ virtual size_t OffsetDataStart() const = 0;
+ size_t AbsoluteOffsetOffset() const override { return 0; }
+ void PopulateVtable() {
+ // Zero out everything up to the start of the sub-messages/tables, which are
+ // responsible for doing their own memory initialization.
+ internal::ClearSpan(internal::GetSubSpan(buffer_, 0, OffsetDataStart()));
+ // Set the offset to the start of the vtable (points backwards, hence the
+ // sign inversion).
+ Set<soffset_t>(0, -FixedVtableOffset());
+ // First element of the vtable is the size of the table.
+ Set<voffset_t>(FixedVtableOffset(), VtableSize());
+ // Second element of the vtable is the size of the inlined data (not really
+ // used by anything...).
+ Set<voffset_t>(FixedVtableOffset() + sizeof(voffset_t), InlineTableSize());
+ }
+
+ template <typename T>
+ void SetField(size_t absolute_offset, size_t vtable_offset, const T &value) {
+ Set<T>(absolute_offset, value);
+ CHECK_EQ(0u, (absolute_offset + reinterpret_cast<size_t>(buffer_.data())) %
+ alignof(T));
+ Set<voffset_t>(FixedVtableOffset() + vtable_offset, absolute_offset);
+ }
+
+ void ClearField(size_t absolute_offset, size_t inline_size,
+ size_t vtable_offset) {
+ // TODO: Remove/account for any excess allocated memory.
+ internal::ClearSpan(
+ internal::GetSubSpan(buffer_, absolute_offset, inline_size));
+ Set<voffset_t>(FixedVtableOffset() + vtable_offset, 0);
+ }
+
+ template <typename T>
+ const T &Get(size_t absolute_offset) const {
+ return *reinterpret_cast<const T *>(buffer_.data() + absolute_offset);
+ }
+
+ template <typename T>
+ T *MutableGet(size_t absolute_offset) {
+ return reinterpret_cast<T *>(buffer_.data() + absolute_offset);
+ }
+
+ template <typename T>
+ T *GetMutableFlatbuffer() {
+ return reinterpret_cast<T *>(buffer_.data());
+ }
+
+ template <typename T>
+ const T *GetFlatbuffer() const {
+ return reinterpret_cast<const T *>(buffer_.data());
+ }
+
+ private:
+ template <typename T>
+ void Set(size_t absolute_offset, const T &value) {
+ *reinterpret_cast<T *>(buffer_.data() + absolute_offset) = value;
+ }
+};
+
+} // namespace aos::fbs
+#endif // AOS_FLATBUFFERS_STATIC_TABLE_H_
diff --git a/aos/flatbuffers/static_vector.h b/aos/flatbuffers/static_vector.h
new file mode 100644
index 0000000..0b5d217
--- /dev/null
+++ b/aos/flatbuffers/static_vector.h
@@ -0,0 +1,700 @@
+#ifndef AOS_FLATBUFFERS_STATIC_VECTOR_H_
+#define AOS_FLATBUFFERS_STATIC_VECTOR_H_
+#include <span>
+
+#include "flatbuffers/base.h"
+#include "glog/logging.h"
+
+#include "aos/containers/inlined_vector.h"
+#include "aos/containers/sized_array.h"
+#include "aos/flatbuffers/base.h"
+
+namespace aos::fbs {
+
+namespace internal {
+// Helper class for managing how we specialize the Vector object for different
+// contained types.
+// Users of the Vector class should never need to care about this.
+// Template arguments:
+// T: The type that the vector stores.
+// kInline: Whether the type in question is stored inline or not.
+// Enable: Used for SFINAE around struct values; can be ignored.
+// The struct provides the following types:
+// Type: The type of the data that will be stored inline in the vector.
+// ObjectType: The type of the actual data (only used for non-inline objects).
+// FlatbufferType: The type used by flatbuffers::Vector to store this type.
+// ConstFlatbufferType: The type used by a const flatbuffers::Vector to store
+// this type.
+// kDataAlign: Alignment required by the stored type.
+// kDataSize: Nominal size required by each non-inline data member. This is
+// what will be initially allocated; once created, individual members may
+// grow to accommodate dynamically lengthed vectors.
+template <typename T, bool kInline, class Enable = void>
+struct InlineWrapper;
+} // namespace internal
+
+// This Vector class provides a mutable, resizeable, flatbuffer vector.
+//
+// Upon creation, the Vector will start with enough space allocated for
+// kStaticLength elements, and must be provided with a memory buffer that
+// is large enough to serialize all the kStaticLength members (kStaticLength may
+// be zero).
+//
+// Once created, the Vector may be grown using calls to reserve().
+// This will result in the Vector attempting to allocate memory via its
+// parent object; such calls may fail if there is no space available in the
+// allocator.
+//
+// Note that if you are using the Vector class in a realtime context (and thus
+// must avoid dynamic memory allocations) you must only be using a Vector of
+// inline data (i.e., scalars, enums, or structs). Flatbuffer tables and strings
+// require overhead to manage and so require some form of dynamic memory
+// allocation. If we discover a strong use-case for such things, then we may
+// provide some interface that allows managing said metadata on the stack or
+// in another realtime-safe manner.
+//
+// Template arguments:
+// T: Type contained by the vector; either a scalar/struct/enum type or a
+// static flatbuffer type of some sort (a String or an implementation of
+// aos::fbs::Table).
+// kStaticLength: Number of elements to statically allocate memory for.
+// May be zero.
+// kInline: Whether the type T will be stored inline in the vector.
+// kForceAlign: Alignment to force for the start of the vector (e.g., for
+// byte arrays it may be desirable to have the entire array aligned).
+// kNullTerminate: Whether to reserve an extra byte past the end of
+// the inline data for null termination. Not included in kStaticLength,
+// so if e.g. you want to store the string "abc" then kStaticLength can
+// be 3 and kNullTerminate can be true and the vector data will take
+// up 4 bytes of memory.
+//
+// Vector buffer memory layout:
+// * Requirements:
+// * Minimum alignment of 4 bytes (for element count).
+// * The start of the vector data must be aligned to either
+// alignof(InlineType) or a user-specified number.
+// * The element count for the vector must immediately precede the vector
+// data (and so may itself not be aligned to alignof(InlineType)).
+// * For non-inlined types, the individual types must be aligned to
+// their own alignment.
+// * In order to accommodate this, the vector buffer as a whole must
+// generally be aligned to the greatest of the above alignments. There
+// are two reasonable ways one could do this:
+// * Require that the 4th byte of the buffer provided by aligned to
+// the maximum alignment of its contents.
+// * Require that the buffer itself by aligned, and provide padding
+// ourselves. The Vector would then have to expose its own offset
+// because it would not start at the start of the buffer.
+// The former requires that the wrapping code understand the internals
+// of how vectors work; the latter generates extra padding and adds
+// extra logic around handling non-zero offsets.
+// To maintain general simplicity, we will use the second condition and eat
+// the cost of the potential extra few bytes of padding.
+// * The layout of the buffer will thus be:
+// [padding; element_count; inline_data; padding; offset_data]
+// The first padding will be of size max(0, kAlign - 4).
+// The element_count is of size 4.
+// The inline_data is of size sizeof(InlineType) * kStaticLength.
+// The second padding is of size
+// (kAlign - ((sizeof(InlineType) * kStaticLength) % kAlign)).
+// The remaining data is only present if kInline is false.
+// The offset data is of size T::kSize * kStaticLength. T::kSize % T::kAlign
+// must be zero.
+// Note that no padding is required on the end because T::kAlign will always
+// end up being equal to the alignment (this can only be violated if
+// kForceAlign is used, but we do not allow that).
+template <typename T, size_t kStaticLength, bool kInline,
+ size_t kForceAlign = 0, bool kNullTerminate = false>
+class Vector : public ResizeableObject {
+ public:
+ static_assert(kInline || !kNullTerminate,
+ "It does not make sense to null-terminate vectors of objects.");
+ // Type stored inline in the serialized vector (offsets for tables/strings; T
+ // otherwise).
+ using InlineType = typename internal::InlineWrapper<T, kInline>::Type;
+ // OUt-of-line type for out-of-line T.
+ using ObjectType = typename internal::InlineWrapper<T, kInline>::ObjectType;
+ // Type used as the template parameter to flatbuffers::Vector<>.
+ using FlatbufferType =
+ typename internal::InlineWrapper<T, kInline>::FlatbufferType;
+ using ConstFlatbufferType =
+ typename internal::InlineWrapper<T, kInline>::ConstFlatbufferType;
+ // flatbuffers::Vector type that corresponds to this Vector.
+ typedef flatbuffers::Vector<FlatbufferType> Flatbuffer;
+ typedef const flatbuffers::Vector<ConstFlatbufferType> ConstFlatbuffer;
+ // Alignment of the inline data.
+ static constexpr size_t kInlineAlign =
+ std::max(kForceAlign, alignof(InlineType));
+ // Type used for serializing the length of the vector.
+ typedef uint32_t LengthType;
+ // Overall alignment of this type, and required alignment of the buffer that
+ // must be provided to the Vector.
+ static constexpr size_t kAlign =
+ std::max({alignof(LengthType), kInlineAlign,
+ internal::InlineWrapper<T, kInline>::kDataAlign});
+ // Padding inserted prior to the length element of the vector (to manage
+ // alignment of the data properly; see class comment)
+ static constexpr size_t kPadding1 =
+ std::max<size_t>(0, kAlign - sizeof(LengthType));
+ // Size of the vector length field.
+ static constexpr size_t kLengthSize = sizeof(LengthType);
+ // Size of all the inline vector data, including null termination (prior to
+ // any dynamic increases in size).
+ static constexpr size_t kInlineSize =
+ sizeof(InlineType) * (kStaticLength + (kNullTerminate ? 1 : 0));
+ // Per-element size of any out-of-line data.
+ static constexpr size_t kDataElementSize =
+ internal::InlineWrapper<T, kInline>::kDataSize;
+ // Padding between the inline data and any out-of-line data, to manage
+ // mismatches in alignment between the two.
+ static constexpr size_t kPadding2 = kAlign - (kInlineSize % kAlign);
+ // Total statically allocated space for any out-of-line data ("offset data")
+ // (prior to any dynamic increases in size).
+ static constexpr size_t kOffsetOffsetDataSize =
+ kInline ? 0 : (kStaticLength * kDataElementSize);
+ // Total nominal size of the Vector.
+ static constexpr size_t kSize =
+ kPadding1 + kLengthSize + kInlineSize + kPadding2 + kOffsetOffsetDataSize;
+ // Offset from the start of the provided buffer to where the actual start of
+ // the vector is.
+ static constexpr size_t kOffset = kPadding1;
+ // Constructors; the provided buffer must be aligned to kAlign and be kSize in
+ // length. parent must be non-null.
+ Vector(std::span<uint8_t> buffer, ResizeableObject *parent)
+ : ResizeableObject(buffer, parent) {
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ CHECK_EQ(kSize, buffer.size());
+ // Set padding and length to zero.
+ internal::ClearSpan(internal::GetSubSpan(buffer, 0, kPadding1));
+ SetLength(0u);
+ internal::ClearSpan(internal::GetSubSpan(
+ buffer, kPadding1 + kLengthSize + kInlineSize, kPadding2));
+ if (!kInline) {
+ // Initialize the offsets for any sub-tables. These are used to track
+ // where each table will get serialized in memory as memory gets
+ // resized/moved around.
+ for (size_t index = 0; index < kStaticLength; ++index) {
+ object_absolute_offsets_.emplace_back(kPadding1 + kLengthSize +
+ kInlineSize + kPadding2 +
+ index * kDataElementSize);
+ }
+ }
+ }
+ Vector(const Vector &) = delete;
+ Vector &operator=(const Vector &) = delete;
+ virtual ~Vector() {}
+ // Current allocated length of this vector.
+ // Does not include null termination.
+ size_t capacity() const { return allocated_length_; }
+ // Current length of the vector.
+ // Does not include null termination.
+ size_t size() const { return length_; }
+
+ // Appends an element to the Vector. Used when kInline is false. Returns
+ // nullptr if the append failed due to insufficient capacity. If you need to
+ // increase the capacity() of the vector, call reserve().
+ [[nodiscard]] T *emplace_back();
+ // Appends an element to the Vector. Used when kInline is true. Returns false
+ // if there is insufficient capacity for a new element.
+ [[nodiscard]] bool emplace_back(T element) {
+ static_assert(kInline);
+ return AddInlineElement(element);
+ }
+
+ // Adjusts the allocated size of the vector (does not affect the actual
+ // current length as returned by size()). Returns true on success, and false
+ // if the allocation failed for some reason.
+ // Note that reductions in size will not currently result in the allocated
+ // size actually changing.
+ [[nodiscard]] bool reserve(size_t new_length) {
+ if (new_length > allocated_length_) {
+ const size_t new_elements = new_length - allocated_length_;
+ // First, we must add space for our new inline elements.
+ if (!InsertBytes(
+ inline_data() + allocated_length_ + (kNullTerminate ? 1 : 0),
+ new_elements * sizeof(InlineType), SetZero::kYes)) {
+ return false;
+ }
+ if (!kInline) {
+ // For non-inline objects, create the space required for all the new
+ // object data.
+ const size_t insertion_point = buffer_.size();
+ if (!InsertBytes(buffer_.data() + insertion_point,
+ new_elements * kDataElementSize, SetZero::kYes)) {
+ return false;
+ }
+ for (size_t index = 0; index < new_elements; ++index) {
+ // Note that the already-allocated data may be arbitrarily-sized, so
+ // we cannot use the same static calculation that we do in the
+ // constructor.
+ object_absolute_offsets_.emplace_back(insertion_point +
+ index * kDataElementSize);
+ }
+ objects_.reserve(new_length);
+ }
+ allocated_length_ = new_length;
+ }
+ return true;
+ }
+
+ // Accessors for using the Vector as a flatbuffers::Vector.
+ // Note that these pointers will be unstable if any memory allocations occur
+ // that cause memory to get shifted around.
+ Flatbuffer *AsMutableFlatbufferVector() {
+ return reinterpret_cast<Flatbuffer *>(vector_buffer().data());
+ }
+ ConstFlatbuffer *AsFlatbufferVector() const {
+ return reinterpret_cast<const Flatbuffer *>(vector_buffer().data());
+ }
+
+ // Copies the contents of the provided vector into this; returns false on
+ // failure (e.g., if the provided vector is too long for the amount of space
+ // we can allocate through reserve()).
+ [[nodiscard]] bool FromFlatbuffer(ConstFlatbuffer *vector);
+
+ // Returns the element at the provided index. index must be less than size().
+ const T &at(size_t index) const {
+ CHECK_LT(index, length_);
+ return unsafe_at(index);
+ }
+
+ // Same as at(), except that bounds checks are only performed in non-optimized
+ // builds.
+ // TODO(james): The GetInlineElement() call itself does some bounds-checking;
+ // consider down-grading that.
+ const T &unsafe_at(size_t index) const {
+ DCHECK_LT(index, length_);
+ if (kInline) {
+ // This reinterpret_cast is extremely wrong if T != InlineType (this is
+ // fine because we only do this if kInline is true).
+ // TODO(james): Get the templating improved so that we can get away with
+ // specializing at() instead of using if statements. Resolving this will
+ // also allow deduplicating the Resize() calls.
+ // This specialization is difficult because you cannot partially
+ // specialize a templated class method (online things seem to suggest e.g.
+ // using a struct as the template parameter rather than having separate
+ // parameters).
+ return reinterpret_cast<const T &>(GetInlineElement(index));
+ } else {
+ return objects_[index].t;
+ }
+ }
+
+ // Returns a mutable pointer to the element at the provided index. index must
+ // be less than size().
+ T &at(size_t index) {
+ CHECK_LT(index, length_);
+ return unsafe_at(index);
+ }
+
+ // Same as at(), except that bounds checks are only performed in non-optimized
+ // builds.
+ // TODO(james): The GetInlineElement() call itself does some bounds-checking;
+ // consider down-grading that.
+ T &unsafe_at(size_t index) {
+ DCHECK_LT(index, length_);
+ if (kInline) {
+ // This reinterpret_cast is extremely wrong if T != InlineType (this is
+ // fine because we only do this if kInline is true).
+ // TODO(james): Get the templating improved so that we can get away with
+ // specializing at() instead of using if statements. Resolving this will
+ // also allow deduplicating the Resize() calls.
+ // This specialization is difficult because you cannot partially
+ // specialize a templated class method (online things seem to suggest e.g.
+ // using a struct as the template parameter rather than having separate
+ // parameters).
+ return reinterpret_cast<T &>(GetInlineElement(index));
+ } else {
+ return objects_[index].t;
+ }
+ }
+
+ const T &operator[](size_t index) const { return at(index); }
+ T &operator[](size_t index) { return at(index); }
+
+ // Resizes the vector to the requested size.
+ // size must be less than or equal to the current capacity() of the vector.
+ // Does not allocate additional memory (call reserve() to allocate additional
+ // memory).
+ // Zero-initializes all inline element; initializes all subtable/string
+ // elements to extant but empty objects.
+ void resize(size_t size);
+
+ // Resizes an inline vector to the requested size.
+ // When changing the size of the vector, the removed/inserted elements will be
+ // set to zero if requested. Otherwise, they will be left uninitialized.
+ void resize_inline(size_t size, SetZero set_zero) {
+ CHECK_LE(size, allocated_length_);
+ static_assert(
+ kInline,
+ "Vector::resize_inline() only works for inline vector types (scalars, "
+ "enums, structs).");
+ if (size == length_) {
+ return;
+ }
+ if (set_zero == SetZero::kYes) {
+ memset(
+ reinterpret_cast<void *>(inline_data() + std::min(size, length_)), 0,
+ std::abs(static_cast<ssize_t>(length_) - static_cast<ssize_t>(size)) *
+ sizeof(InlineType));
+ }
+ length_ = size;
+ SetLength(length_);
+ }
+ // Resizes a vector of offsets to the requested size.
+ // If the size is increased, the new elements will be initialized
+ // to empty but extant objects for non-inlined types (so, zero-length
+ // vectors/strings; objects that exist but have no fields populated).
+ // Note that this is always equivalent to resize().
+ void resize_not_inline(size_t size) {
+ CHECK_LE(size, allocated_length_);
+ static_assert(!kInline,
+ "Vector::resize_not_inline() only works for offset vector "
+ "types (objects, strings).");
+ if (size == length_) {
+ return;
+ } else if (length_ > size) {
+ // TODO: Remove any excess allocated memory.
+ length_ = size;
+ SetLength(length_);
+ return;
+ } else {
+ while (length_ < size) {
+ CHECK_NOTNULL(emplace_back());
+ }
+ }
+ }
+
+ // Accessors directly to the inline data of a vector.
+ const T *data() const {
+ static_assert(kInline,
+ "If you have a use-case for directly accessing the "
+ "flatbuffer data pointer for vectors of "
+ "objects/strings, please start a discussion.");
+ return inline_data();
+ }
+
+ T *data() {
+ static_assert(kInline,
+ "If you have a use-case for directly accessing the "
+ "flatbuffer data pointer for vectors of "
+ "objects/strings, please start a discussion.");
+ return inline_data();
+ }
+
+ std::string SerializationDebugString() const {
+ std::stringstream str;
+ str << "Raw Size: " << kSize << " alignment: " << kAlign
+ << " allocated length: " << allocated_length_ << " inline alignment "
+ << kInlineAlign << " kPadding1 " << kPadding1 << "\n";
+ str << "Observed length " << GetLength() << " (expected " << length_
+ << ")\n";
+ str << "Inline Size " << kInlineSize << " Inline bytes/value:\n";
+ // TODO(james): Get pretty-printing for structs so we can provide better
+ // debug.
+ internal::DebugBytes(
+ internal::GetSubSpan(vector_buffer(), kLengthSize,
+ sizeof(InlineType) * allocated_length_),
+ str);
+ str << "kPadding2 " << kPadding2 << " offset data size "
+ << kOffsetOffsetDataSize << "\n";
+ return str.str();
+ }
+
+ protected:
+ friend struct internal::TableMover<
+ Vector<T, kStaticLength, kInline, kForceAlign, kNullTerminate>>;
+ // protected so that the String class can access the move constructor.
+ Vector(Vector &&) = default;
+
+ private:
+ // See kAlign and kOffset.
+ size_t Alignment() const final { return kAlign; }
+ size_t AbsoluteOffsetOffset() const override { return kOffset; }
+ // Returns a buffer that starts at the start of the vector itself (past any
+ // padding).
+ std::span<uint8_t> vector_buffer() {
+ return internal::GetSubSpan(buffer(), kPadding1);
+ }
+ std::span<const uint8_t> vector_buffer() const {
+ return internal::GetSubSpan(buffer(), kPadding1);
+ }
+
+ bool AddInlineElement(InlineType e) {
+ if (length_ == allocated_length_) {
+ return false;
+ }
+ SetInlineElement(length_, e);
+ ++length_;
+ SetLength(length_);
+ return true;
+ }
+
+ void SetInlineElement(size_t index, InlineType value) {
+ CHECK_LT(index, allocated_length_);
+ inline_data()[index] = value;
+ }
+
+ InlineType &GetInlineElement(size_t index) {
+ CHECK_LT(index, allocated_length_);
+ return inline_data()[index];
+ }
+
+ const InlineType &GetInlineElement(size_t index) const {
+ CHECK_LT(index, allocated_length_);
+ return inline_data()[index];
+ }
+
+ // Returns a pointer to the start of the inline data itself.
+ InlineType *inline_data() {
+ return reinterpret_cast<InlineType *>(vector_buffer().data() + kLengthSize);
+ }
+ const InlineType *inline_data() const {
+ return reinterpret_cast<const InlineType *>(vector_buffer().data() +
+ kLengthSize);
+ }
+
+ // Updates the length of the vector to match the provided length. Does not set
+ // the length_ member.
+ void SetLength(LengthType length) {
+ *reinterpret_cast<LengthType *>(vector_buffer().data()) = length;
+ if (kNullTerminate) {
+ memset(reinterpret_cast<void *>(inline_data() + length), 0,
+ sizeof(InlineType));
+ }
+ }
+ LengthType GetLength() const {
+ return *reinterpret_cast<const LengthType *>(vector_buffer().data());
+ }
+
+ // Overrides to allow ResizeableObject to manage memory adjustments.
+ size_t NumberOfSubObjects() const final {
+ return kInline ? 0 : allocated_length_;
+ }
+ using ResizeableObject::SubObject;
+ SubObject GetSubObject(size_t index) final {
+ return SubObject{
+ reinterpret_cast<uoffset_t *>(&GetInlineElement(index)),
+ // In order to let this compile regardless of whether type T is an
+ // object type or not, we just use a reinterpret_cast.
+ (index < length_)
+ ? reinterpret_cast<ResizeableObject *>(&objects_[index].t)
+ : nullptr,
+ &object_absolute_offsets_[index]};
+ }
+ // Implementation that handles copying from a flatbuffers::Vector of an inline
+ // data type.
+ [[nodiscard]] bool FromInlineFlatbuffer(ConstFlatbuffer *vector) {
+ if (!reserve(CHECK_NOTNULL(vector)->size())) {
+ return false;
+ }
+
+ // We will be overwriting the whole vector very shortly; there is no need to
+ // clear the buffer to zero.
+ resize_inline(vector->size(), SetZero::kNo);
+
+ memcpy(inline_data(), vector->Data(), size() * sizeof(InlineType));
+ return true;
+ }
+
+ // Implementation that handles copying from a flatbuffers::Vector of a
+ // not-inline data type.
+ [[nodiscard]] bool FromNotInlineFlatbuffer(const Flatbuffer *vector) {
+ if (!reserve(vector->size())) {
+ return false;
+ }
+ // "Clear" the vector.
+ resize_not_inline(0);
+
+ for (const typename T::Flatbuffer *entry : *vector) {
+ if (!CHECK_NOTNULL(emplace_back())->FromFlatbuffer(entry)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // In order to allow for easy partial template specialization, we use a
+ // non-member class to call FromInline/FromNotInlineFlatbuffer and
+ // resize_inline/resize_not_inline. There are not actually any great ways to
+ // do this with just our own class member functions, so instead we make these
+ // methods members of a friend of the Vector class; we then partially
+ // specialize the entire InlineWrapper class and use it to isolate anything
+ // that needs to have a common user interface while still having separate
+ // actual logic.
+ template <typename T_, bool kInline_, class Enable_>
+ friend struct internal::InlineWrapper;
+
+ // Note: The objects here really want to be owned by this object (as opposed
+ // to e.g. returning a stack-allocated object from the emplace_back() methods
+ // that the user then owns). There are two main challenges with have the user
+ // own the object on question:
+ // 1. We can't have >1 reference floating around, or else one object's state
+ // can become out of date. This forces us to do ref-counting and could
+ // make certain types of code obnoxious to write.
+ // 2. Once the user-created object goes out of scope, we lose all of its
+ // internal state. In _theory_ it should be possible to reconstruct most
+ // of the relevant state by examining the contents of the buffer, but
+ // doing so would be cumbersome.
+ aos::InlinedVector<internal::TableMover<ObjectType>,
+ kInline ? 0 : kStaticLength>
+ objects_;
+ aos::InlinedVector<size_t, kInline ? 0 : kStaticLength>
+ object_absolute_offsets_;
+ // Current actual length of the vector.
+ size_t length_ = 0;
+ // Current length that we have allocated space available for.
+ size_t allocated_length_ = kStaticLength;
+};
+
+template <typename T, size_t kStaticLength, bool kInline, size_t kForceAlign,
+ bool kNullTerminate>
+T *Vector<T, kStaticLength, kInline, kForceAlign,
+ kNullTerminate>::emplace_back() {
+ static_assert(!kInline);
+ if (length_ >= allocated_length_) {
+ return nullptr;
+ }
+ const size_t object_start = object_absolute_offsets_[length_];
+ std::span<uint8_t> object_buffer =
+ internal::GetSubSpan(buffer(), object_start, T::kSize);
+ objects_.emplace_back(object_buffer, this);
+ const uoffset_t offset =
+ object_start - (reinterpret_cast<size_t>(&GetInlineElement(length_)) -
+ reinterpret_cast<size_t>(buffer().data()));
+ CHECK(AddInlineElement(offset));
+ return &objects_[objects_.size() - 1].t;
+}
+
+// The String class is a special version of the Vector that is always
+// null-terminated, always contains 1-byte character elements, and which has a
+// few extra methods for convenient string access.
+template <size_t kStaticLength>
+class String : public Vector<char, kStaticLength, true, 0, true> {
+ public:
+ typedef Vector<char, kStaticLength, true, 0, true> VectorType;
+ typedef flatbuffers::String Flatbuffer;
+ String(std::span<uint8_t> buffer, ResizeableObject *parent)
+ : VectorType(buffer, parent) {}
+ virtual ~String() {}
+ void SetString(std::string_view string) {
+ CHECK_LT(string.size(), VectorType::capacity());
+ VectorType::resize_inline(string.size(), SetZero::kNo);
+ memcpy(VectorType::data(), string.data(), string.size());
+ }
+ std::string_view string_view() const {
+ return std::string_view(VectorType::data(), VectorType::size());
+ }
+ std::string str() const {
+ return std::string(VectorType::data(), VectorType::size());
+ }
+ const char *c_str() const { return VectorType::data(); }
+
+ private:
+ friend struct internal::TableMover<String<kStaticLength>>;
+ String(String &&) = default;
+};
+
+namespace internal {
+// Specialization for all non-inline vector types. All of these types will just
+// use offsets for their inline data and have appropriate member types/constants
+// for the remaining fields.
+template <typename T>
+struct InlineWrapper<T, false, void> {
+ typedef uoffset_t Type;
+ typedef T ObjectType;
+ typedef flatbuffers::Offset<typename T::Flatbuffer> FlatbufferType;
+ typedef flatbuffers::Offset<typename T::Flatbuffer> ConstFlatbufferType;
+ static_assert((T::kSize % T::kAlign) == 0);
+ static constexpr size_t kDataAlign = T::kAlign;
+ static constexpr size_t kDataSize = T::kSize;
+ template <typename StaticVector>
+ static bool FromFlatbuffer(
+ StaticVector *to, const typename StaticVector::ConstFlatbuffer *from) {
+ return to->FromNotInlineFlatbuffer(from);
+ }
+ template <typename StaticVector>
+ static void ResizeVector(StaticVector *target, size_t size) {
+ target->resize_not_inline(size);
+ }
+};
+// Specialization for "normal" scalar inline data (ints, floats, doubles,
+// enums).
+template <typename T>
+struct InlineWrapper<T, true,
+ typename std::enable_if_t<!std::is_class<T>::value>> {
+ typedef T Type;
+ typedef T ObjectType;
+ typedef T FlatbufferType;
+ typedef T ConstFlatbufferType;
+ static constexpr size_t kDataAlign = alignof(T);
+ static constexpr size_t kDataSize = sizeof(T);
+ template <typename StaticVector>
+ static bool FromFlatbuffer(
+ StaticVector *to, const typename StaticVector::ConstFlatbuffer *from) {
+ return to->FromInlineFlatbuffer(from);
+ }
+ template <typename StaticVector>
+ static void ResizeVector(StaticVector *target, size_t size) {
+ target->resize_inline(size, SetZero::kYes);
+ }
+};
+// Specialization for booleans, given that flatbuffers uses uint8_t's for bools.
+template <>
+struct InlineWrapper<bool, true, void> {
+ typedef uint8_t Type;
+ typedef uint8_t ObjectType;
+ typedef uint8_t FlatbufferType;
+ typedef uint8_t ConstFlatbufferType;
+ static constexpr size_t kDataAlign = 1u;
+ static constexpr size_t kDataSize = 1u;
+ template <typename StaticVector>
+ static bool FromFlatbuffer(
+ StaticVector *to, const typename StaticVector::ConstFlatbuffer *from) {
+ return to->FromInlineFlatbuffer(from);
+ }
+ template <typename StaticVector>
+ static void ResizeVector(StaticVector *target, size_t size) {
+ target->resize_inline(size, SetZero::kYes);
+ }
+};
+// Specialization for flatbuffer structs.
+// The flatbuffers codegen uses struct pointers rather than references or the
+// such, so it needs to be treated special.
+template <typename T>
+struct InlineWrapper<T, true,
+ typename std::enable_if_t<std::is_class<T>::value>> {
+ typedef T Type;
+ typedef T ObjectType;
+ typedef T *FlatbufferType;
+ typedef const T *ConstFlatbufferType;
+ static constexpr size_t kDataAlign = alignof(T);
+ static constexpr size_t kDataSize = sizeof(T);
+ template <typename StaticVector>
+ static bool FromFlatbuffer(
+ StaticVector *to, const typename StaticVector::ConstFlatbuffer *from) {
+ return to->FromInlineFlatbuffer(from);
+ }
+ template <typename StaticVector>
+ static void ResizeVector(StaticVector *target, size_t size) {
+ target->resize_inline(size, SetZero::kYes);
+ }
+};
+} // namespace internal
+ //
+template <typename T, size_t kStaticLength, bool kInline, size_t kForceAlign,
+ bool kNullTerminate>
+bool Vector<T, kStaticLength, kInline, kForceAlign,
+ kNullTerminate>::FromFlatbuffer(ConstFlatbuffer *vector) {
+ return internal::InlineWrapper<T, kInline>::FromFlatbuffer(this, vector);
+}
+
+template <typename T, size_t kStaticLength, bool kInline, size_t kForceAlign,
+ bool kNullTerminate>
+void Vector<T, kStaticLength, kInline, kForceAlign, kNullTerminate>::resize(
+ size_t size) {
+ internal::InlineWrapper<T, kInline>::ResizeVector(this, size);
+}
+
+} // namespace aos::fbs
+#endif // AOS_FLATBUFFERS_STATIC_VECTOR_H_
diff --git a/aos/flatbuffers/test.fbs b/aos/flatbuffers/test.fbs
new file mode 100644
index 0000000..9369bd1
--- /dev/null
+++ b/aos/flatbuffers/test.fbs
@@ -0,0 +1,40 @@
+include "aos/flatbuffers/test_dir/include.fbs";
+
+// TODO: test example with multiple namespaces per file.
+namespace aos.fbs.testing;
+
+struct SubStruct {
+ x:double;
+ y:double;
+}
+
+table SubTable {
+ foo:short (id: 0);
+ bar:short (id: 1, deprecated);
+ baz:float (id: 2);
+}
+
+attribute "static_length";
+attribute "static_vector_string_length";
+
+table TestTable {
+ scalar:int (id: 0);
+ vector_of_scalars:[int] (id: 1, static_length: 3);
+ string:string (id: 2, static_length: 20);
+ vector_of_strings:[string] (id: 3, static_length: 3, static_vector_string_length: 10);
+ substruct:SubStruct (id: 4);
+ subtable:SubTable (id: 5);
+ // The force-aligned vector is deliberately put in the middle of the table
+ // both by ID and alphabetically (both of these can affect the order in which
+ // certain things are evaluated, and during development there were some issues
+ // with this).
+ vector_aligned:[int] (id: 6, force_align: 64, static_length: 3);
+ vector_of_structs:[SubStruct] (id: 7, static_length: 3);
+ vector_of_tables:[SubTable] (id: 8, static_length: 3);
+ included_table:aos.fbs.testing.included.IncludedTable (id: 9);
+ unspecified_length_vector:[ubyte] (id: 10);
+ unspecified_length_string:string (id: 11);
+ unspecified_length_vector_of_strings:[string] (id: 12);
+}
+
+root_type TestTable;
diff --git a/aos/flatbuffers/test_dir/BUILD b/aos/flatbuffers/test_dir/BUILD
new file mode 100644
index 0000000..7015cc2
--- /dev/null
+++ b/aos/flatbuffers/test_dir/BUILD
@@ -0,0 +1,39 @@
+load("//aos/flatbuffers:generate.bzl", "static_flatbuffer")
+
+static_flatbuffer(
+ name = "include_fbs",
+ src = "include.fbs",
+ visibility = ["//visibility:public"],
+)
+
+static_flatbuffer(
+ name = "type_coverage_fbs",
+ src = "type_coverage.fbs",
+ visibility = ["//visibility:public"],
+)
+
+filegroup(
+ name = "test_data",
+ srcs = ["type_coverage.json"],
+ visibility = ["//visibility:public"],
+)
+
+sh_binary(
+ name = "copy_test_static_header",
+ srcs = ["copy_test_static_header.sh"],
+ args = ["$(location //aos/flatbuffers:test_static_file)"],
+ data = ["//aos/flatbuffers:test_static_file"],
+)
+
+sh_test(
+ name = "compare_generated_files",
+ srcs = ["compare_generated_files.sh"],
+ args = [
+ "$(location //aos/flatbuffers:test_static_file)",
+ "$(location sample_test_static.h)",
+ ],
+ data = [
+ "//aos/flatbuffers:test_static_file",
+ "@org_frc971//aos/flatbuffers/test_dir:sample_test_static.h",
+ ],
+)
diff --git a/aos/flatbuffers/test_dir/compare_generated_files.sh b/aos/flatbuffers/test_dir/compare_generated_files.sh
new file mode 100755
index 0000000..6b7d584
--- /dev/null
+++ b/aos/flatbuffers/test_dir/compare_generated_files.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# This test confirms that the sample_test_static.h is kept up to date.
+# This primarily serves to allow people to easily see what changes to
+# generated code will looks like when doing code reviews. The checked-in
+# file can be regenerated by running
+# $ bazel run @org_frc971//aos/flatbuffers/test_dir:copy_test_static_header
+diff $1 $2
diff --git a/aos/flatbuffers/test_dir/copy_test_static_header.sh b/aos/flatbuffers/test_dir/copy_test_static_header.sh
new file mode 100755
index 0000000..11e5b8e
--- /dev/null
+++ b/aos/flatbuffers/test_dir/copy_test_static_header.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+OUTPUT=${BUILD_WORKSPACE_DIRECTORY}/aos/flatbuffers/test_dir/sample_test_static.h
+cp $1 ${OUTPUT}
+chmod 644 ${OUTPUT}
diff --git a/aos/flatbuffers/test_dir/include.fbs b/aos/flatbuffers/test_dir/include.fbs
new file mode 100644
index 0000000..533db2b
--- /dev/null
+++ b/aos/flatbuffers/test_dir/include.fbs
@@ -0,0 +1,12 @@
+namespace aos.fbs.testing.included;
+enum TestEnum : ubyte {
+ A = 0,
+ B = 1,
+}
+
+table IncludedTable {
+ foo:TestEnum (id: 0);
+}
+
+root_type IncludedTable;
+
diff --git a/aos/flatbuffers/test_dir/sample_test_static.h b/aos/flatbuffers/test_dir/sample_test_static.h
new file mode 100644
index 0000000..6279f8b
--- /dev/null
+++ b/aos/flatbuffers/test_dir/sample_test_static.h
@@ -0,0 +1,1470 @@
+#pragma once
+// This is a generated file. Do not modify.
+#include <optional>
+
+#include "aos/flatbuffers/static_table.h"
+#include "aos/flatbuffers/static_vector.h"
+#include "aos/flatbuffers/test_dir/include_generated.h"
+#include "aos/flatbuffers/test_dir/include_static.h"
+#include "aos/flatbuffers/test_generated.h"
+#include "aos/flatbuffers/test_static.h"
+
+namespace aos::fbs::testing {
+class SubTableStatic : public ::aos::fbs::Table {
+ public:
+ // The underlying "raw" flatbuffer type for this type.
+ typedef aos::fbs::testing::SubTable Flatbuffer;
+ // Returns this object as a flatbuffer type. This reference may not be valid
+ // following mutations to the underlying flatbuffer, due to how memory may get
+ // may get moved around.
+ const Flatbuffer &AsFlatbuffer() const {
+ return *GetFlatbuffer<Flatbuffer>();
+ }
+
+ // Space taken up by the inline portion of the flatbuffer table data, in
+ // bytes.
+ static constexpr size_t kInlineDataSize = 10;
+ // Space taken up by the vtable for this object, in bytes.
+ static constexpr size_t kVtableSize =
+ sizeof(::flatbuffers::voffset_t) * (2 + 3);
+ // Offset from the start of the internal memory buffer to the start of the
+ // vtable.
+ static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(
+ kInlineDataSize, alignof(::flatbuffers::voffset_t));
+ // Required alignment of this object. The buffer that this object gets
+ // constructed into must be aligned to this value.
+ static constexpr size_t kAlign = std::max<size_t>({4, 2});
+
+ // Nominal size of this object, in bytes. The object may grow beyond this
+ // size, but will always start at this size and so the initial buffer must
+ // match this size.
+ static constexpr size_t kSize = ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) + 0, kAlign);
+ static_assert(
+ 1 <= kAlign,
+ "Flatbuffer schema minalign should not exceed our required alignment.");
+ // Offset from the start of the memory buffer to the start of any out-of-line
+ // data (subtables, vectors, strings).
+ static constexpr size_t kOffsetDataStart =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign);
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize =
+ ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
+ // Minimum size required to build this flatbuffer in an entirely unaligned
+ // buffer (including the root table offset). Made to be a multiple of kAlign
+ // for convenience.
+ static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
+ // Offset at which the table vtable offset occurs. This is only needed for
+ // vectors.
+ static constexpr size_t kOffset = 0;
+ // Various overrides to support the Table parent class.
+ size_t FixedVtableOffset() const final { return kVtableStart; }
+ size_t VtableSize() const final { return kVtableSize; }
+ size_t InlineTableSize() const final { return kInlineDataSize; }
+ size_t OffsetDataStart() const final { return kOffsetDataStart; }
+ size_t Alignment() const final { return kAlign; }
+ // Exposes the name of the flatbuffer type to allow interchangeable use
+ // of the Flatbuffer and FlatbufferStatic types in various AOS methods.
+ static const char *GetFullyQualifiedName() {
+ return Flatbuffer::GetFullyQualifiedName();
+ }
+
+ // Constructors for creating a flatbuffer object.
+ // Users should typically use the Builder class to create these objects,
+ // in order to allow it to populate the root table offset.
+
+ // The buffer provided to these constructors should be aligned to kAlign
+ // and kSize in length.
+ // The parent/allocator may not be nullptr.
+ SubTableStatic(std::span<uint8_t> buffer,
+ ::aos::fbs::ResizeableObject *parent)
+ : Table(buffer, parent) {
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+ }
+ SubTableStatic(std::span<uint8_t> buffer, ::aos::fbs::Allocator *allocator)
+ : Table(buffer, allocator) {
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+ }
+ SubTableStatic(std::span<uint8_t> buffer,
+ ::std::unique_ptr<::aos::fbs::Allocator> allocator)
+ : Table(buffer, ::std::move(allocator)) {
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+ }
+
+ virtual ~SubTableStatic() {}
+
+ // Sets the baz field, causing it to be populated if it is not already.
+ // This will populate the field even if the specified value is the default.
+ void set_baz(const float &value) {
+ SetField<float>(kInlineAbsoluteOffset_baz, 8, value);
+ }
+
+ // Returns the value of baz if set; nullopt otherwise.
+ std::optional<float> baz() const {
+ return has_baz() ? std::make_optional(Get<float>(kInlineAbsoluteOffset_baz))
+ : std::nullopt;
+ ;
+ }
+ // Returns a pointer to modify the baz field.
+ // The pointer may be invalidated by mutations/movements of the underlying
+ // buffer. Returns nullptr if the field is not set.
+ float *mutable_baz() {
+ return has_baz() ? MutableGet<float>(kInlineAbsoluteOffset_baz) : nullptr;
+ }
+
+ // Clears the baz field. This will cause has_baz() to return false.
+ void clear_baz() { ClearField(kInlineAbsoluteOffset_baz, 4, 8); }
+
+ // Returns true if the baz field is set and can be accessed.
+ bool has_baz() const { return AsFlatbuffer().has_baz(); }
+
+ // Sets the foo field, causing it to be populated if it is not already.
+ // This will populate the field even if the specified value is the default.
+ void set_foo(const int16_t &value) {
+ SetField<int16_t>(kInlineAbsoluteOffset_foo, 4, value);
+ }
+
+ // Returns the value of foo if set; nullopt otherwise.
+ std::optional<int16_t> foo() const {
+ return has_foo()
+ ? std::make_optional(Get<int16_t>(kInlineAbsoluteOffset_foo))
+ : std::nullopt;
+ ;
+ }
+ // Returns a pointer to modify the foo field.
+ // The pointer may be invalidated by mutations/movements of the underlying
+ // buffer. Returns nullptr if the field is not set.
+ int16_t *mutable_foo() {
+ return has_foo() ? MutableGet<int16_t>(kInlineAbsoluteOffset_foo) : nullptr;
+ }
+
+ // Clears the foo field. This will cause has_foo() to return false.
+ void clear_foo() { ClearField(kInlineAbsoluteOffset_foo, 2, 4); }
+
+ // Returns true if the foo field is set and can be accessed.
+ bool has_foo() const { return AsFlatbuffer().has_foo(); }
+
+ // Clears every field of the table, removing any existing state.
+ void Clear() {
+ clear_baz();
+ clear_foo();
+ }
+
+ // Copies the contents of the provided flatbuffer into this flatbuffer,
+ // returning true on success.
+ [[nodiscard]] bool FromFlatbuffer(const Flatbuffer *other) {
+ Clear();
+
+ if (other->has_baz()) {
+ set_baz(other->baz());
+ }
+
+ if (other->has_foo()) {
+ set_foo(other->foo());
+ }
+
+ return true;
+ }
+
+ private:
+ // We need to provide a MoveConstructor to allow this table to be
+ // used inside of vectors, but we do not want it readily available to
+ // users. See TableMover for more details.
+ SubTableStatic(SubTableStatic &&) = default;
+ friend struct ::aos::fbs::internal::TableMover<SubTableStatic>;
+
+ // Offset from the start of the buffer to the inline data for the baz field.
+ static constexpr size_t kInlineAbsoluteOffset_baz = 4;
+
+ // Offset from the start of the buffer to the inline data for the foo field.
+ static constexpr size_t kInlineAbsoluteOffset_foo = 8;
+
+ // This object has no non-inline subobjects, so we don't have to do anything
+ // special.
+ size_t NumberOfSubObjects() const final { return 0; }
+ using ::aos::fbs::ResizeableObject::SubObject;
+ SubObject GetSubObject(size_t) final { LOG(FATAL) << "No subobjects."; }
+};
+} // namespace aos::fbs::testing
+
+namespace aos::fbs::testing {
+class TestTableStatic : public ::aos::fbs::Table {
+ public:
+ // The underlying "raw" flatbuffer type for this type.
+ typedef aos::fbs::testing::TestTable Flatbuffer;
+ // Returns this object as a flatbuffer type. This reference may not be valid
+ // following mutations to the underlying flatbuffer, due to how memory may get
+ // may get moved around.
+ const Flatbuffer &AsFlatbuffer() const {
+ return *GetFlatbuffer<Flatbuffer>();
+ }
+
+ // Space taken up by the inline portion of the flatbuffer table data, in
+ // bytes.
+ static constexpr size_t kInlineDataSize = 68;
+ // Space taken up by the vtable for this object, in bytes.
+ static constexpr size_t kVtableSize =
+ sizeof(::flatbuffers::voffset_t) * (2 + 13);
+ // Offset from the start of the internal memory buffer to the start of the
+ // vtable.
+ static constexpr size_t kVtableStart = ::aos::fbs::PaddedSize(
+ kInlineDataSize, alignof(::flatbuffers::voffset_t));
+ // Required alignment of this object. The buffer that this object gets
+ // constructed into must be aligned to this value.
+ static constexpr size_t kAlign = std::max<size_t>(
+ {aos::fbs::testing::included::IncludedTableStatic::kAlign, 4,
+ ::aos::fbs::String<20>::kAlign, 8,
+ aos::fbs::testing::SubTableStatic::kAlign, ::aos::fbs::String<0>::kAlign,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kAlign,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kAlign,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kAlign,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kAlign,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kAlign,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::kAlign,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
+ 0>::kAlign});
+
+ // Nominal size of this object, in bytes. The object may grow beyond this
+ // size, but will always start at this size and so the initial buffer must
+ // match this size.
+ static constexpr size_t kSize = ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ 0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::
+ kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::
+ SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0,
+ false, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
+ 0>::kSize,
+ kAlign);
+ static_assert(
+ 1 <= kAlign,
+ "Flatbuffer schema minalign should not exceed our required alignment.");
+ // Offset from the start of the memory buffer to the start of any out-of-line
+ // data (subtables, vectors, strings).
+ static constexpr size_t kOffsetDataStart =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign);
+ // Size required for a buffer that includes a root table offset at the start.
+ static constexpr size_t kRootSize =
+ ::aos::fbs::PaddedSize(kSize + sizeof(::flatbuffers::uoffset_t), kAlign);
+ // Minimum size required to build this flatbuffer in an entirely unaligned
+ // buffer (including the root table offset). Made to be a multiple of kAlign
+ // for convenience.
+ static constexpr size_t kUnalignedBufferSize = kRootSize + kAlign;
+ // Offset at which the table vtable offset occurs. This is only needed for
+ // vectors.
+ static constexpr size_t kOffset = 0;
+ // Various overrides to support the Table parent class.
+ size_t FixedVtableOffset() const final { return kVtableStart; }
+ size_t VtableSize() const final { return kVtableSize; }
+ size_t InlineTableSize() const final { return kInlineDataSize; }
+ size_t OffsetDataStart() const final { return kOffsetDataStart; }
+ size_t Alignment() const final { return kAlign; }
+ // Exposes the name of the flatbuffer type to allow interchangeable use
+ // of the Flatbuffer and FlatbufferStatic types in various AOS methods.
+ static const char *GetFullyQualifiedName() {
+ return Flatbuffer::GetFullyQualifiedName();
+ }
+
+ // Constructors for creating a flatbuffer object.
+ // Users should typically use the Builder class to create these objects,
+ // in order to allow it to populate the root table offset.
+
+ // The buffer provided to these constructors should be aligned to kAlign
+ // and kSize in length.
+ // The parent/allocator may not be nullptr.
+ TestTableStatic(std::span<uint8_t> buffer,
+ ::aos::fbs::ResizeableObject *parent)
+ : Table(buffer, parent) {
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+ }
+ TestTableStatic(std::span<uint8_t> buffer, ::aos::fbs::Allocator *allocator)
+ : Table(buffer, allocator) {
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+ }
+ TestTableStatic(std::span<uint8_t> buffer,
+ ::std::unique_ptr<::aos::fbs::Allocator> allocator)
+ : Table(buffer, ::std::move(allocator)) {
+ CHECK_EQ(buffer.size(), kSize);
+ CHECK_EQ(0u, reinterpret_cast<size_t>(buffer.data()) % kAlign);
+ PopulateVtable();
+ }
+
+ virtual ~TestTableStatic() {}
+
+ // Creates an empty object for the included_table field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ aos::fbs::testing::included::IncludedTableStatic *add_included_table() {
+ CHECK(!included_table_.has_value());
+ constexpr size_t kVtableIndex = 22;
+ // Construct the *Static object that we will use for managing this subtable.
+ included_table_.emplace(
+ BufferForObject(object_absolute_offset_included_table,
+ aos::fbs::testing::included::IncludedTableStatic::kSize,
+ kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_included_table, kVtableIndex,
+ object_absolute_offset_included_table +
+ aos::fbs::testing::included::IncludedTableStatic::kOffset -
+ kInlineAbsoluteOffset_included_table);
+ return &included_table_.value().t;
+ }
+
+ // Returns a pointer to the included_table field, if set. nullptr otherwise.
+ const aos::fbs::testing::included::IncludedTableStatic *included_table()
+ const {
+ return included_table_.has_value() ? &included_table_.value().t : nullptr;
+ }
+ aos::fbs::testing::included::IncludedTableStatic *mutable_included_table() {
+ return included_table_.has_value() ? &included_table_.value().t : nullptr;
+ }
+
+ // Clears the included_table field. This will cause has_included_table() to
+ // return false.
+ void clear_included_table() {
+ included_table_.reset();
+ ClearField(kInlineAbsoluteOffset_included_table, 4, 22);
+ }
+
+ // Returns true if the included_table field is set and can be accessed.
+ bool has_included_table() const {
+ return AsFlatbuffer().has_included_table();
+ }
+
+ // Sets the scalar field, causing it to be populated if it is not already.
+ // This will populate the field even if the specified value is the default.
+ void set_scalar(const int32_t &value) {
+ SetField<int32_t>(kInlineAbsoluteOffset_scalar, 4, value);
+ }
+
+ // Returns the value of scalar if set; nullopt otherwise.
+ std::optional<int32_t> scalar() const {
+ return has_scalar()
+ ? std::make_optional(Get<int32_t>(kInlineAbsoluteOffset_scalar))
+ : std::nullopt;
+ ;
+ }
+ // Returns a pointer to modify the scalar field.
+ // The pointer may be invalidated by mutations/movements of the underlying
+ // buffer. Returns nullptr if the field is not set.
+ int32_t *mutable_scalar() {
+ return has_scalar() ? MutableGet<int32_t>(kInlineAbsoluteOffset_scalar)
+ : nullptr;
+ }
+
+ // Clears the scalar field. This will cause has_scalar() to return false.
+ void clear_scalar() { ClearField(kInlineAbsoluteOffset_scalar, 4, 4); }
+
+ // Returns true if the scalar field is set and can be accessed.
+ bool has_scalar() const { return AsFlatbuffer().has_scalar(); }
+
+ // Creates an empty object for the string field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::String<20> *add_string() {
+ CHECK(!string_.has_value());
+ constexpr size_t kVtableIndex = 8;
+ // Construct the *Static object that we will use for managing this subtable.
+ string_.emplace(BufferForObject(object_absolute_offset_string,
+ ::aos::fbs::String<20>::kSize, kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_string, kVtableIndex,
+ object_absolute_offset_string + ::aos::fbs::String<20>::kOffset -
+ kInlineAbsoluteOffset_string);
+ return &string_.value().t;
+ }
+
+ // Returns a pointer to the string field, if set. nullptr otherwise.
+ const ::aos::fbs::String<20> *string() const {
+ return string_.has_value() ? &string_.value().t : nullptr;
+ }
+ ::aos::fbs::String<20> *mutable_string() {
+ return string_.has_value() ? &string_.value().t : nullptr;
+ }
+
+ // Clears the string field. This will cause has_string() to return false.
+ void clear_string() {
+ string_.reset();
+ ClearField(kInlineAbsoluteOffset_string, 4, 8);
+ }
+
+ // Returns true if the string field is set and can be accessed.
+ bool has_string() const { return AsFlatbuffer().has_string(); }
+
+ // Sets the substruct field, causing it to be populated if it is not already.
+ // This will populate the field even if the specified value is the default.
+ void set_substruct(const aos::fbs::testing::SubStruct &value) {
+ SetField<aos::fbs::testing::SubStruct>(kInlineAbsoluteOffset_substruct, 12,
+ value);
+ }
+
+ // Returns the value of substruct if set; nullopt otherwise.
+ std::optional<aos::fbs::testing::SubStruct> substruct() const {
+ return has_substruct()
+ ? std::make_optional(Get<aos::fbs::testing::SubStruct>(
+ kInlineAbsoluteOffset_substruct))
+ : std::nullopt;
+ ;
+ }
+ // Returns a pointer to modify the substruct field.
+ // The pointer may be invalidated by mutations/movements of the underlying
+ // buffer. Returns nullptr if the field is not set.
+ aos::fbs::testing::SubStruct *mutable_substruct() {
+ return has_substruct() ? MutableGet<aos::fbs::testing::SubStruct>(
+ kInlineAbsoluteOffset_substruct)
+ : nullptr;
+ }
+
+ // Clears the substruct field. This will cause has_substruct() to return
+ // false.
+ void clear_substruct() {
+ ClearField(kInlineAbsoluteOffset_substruct, 16, 12);
+ }
+
+ // Returns true if the substruct field is set and can be accessed.
+ bool has_substruct() const { return AsFlatbuffer().has_substruct(); }
+
+ // Creates an empty object for the subtable field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ aos::fbs::testing::SubTableStatic *add_subtable() {
+ CHECK(!subtable_.has_value());
+ constexpr size_t kVtableIndex = 14;
+ // Construct the *Static object that we will use for managing this subtable.
+ subtable_.emplace(
+ BufferForObject(object_absolute_offset_subtable,
+ aos::fbs::testing::SubTableStatic::kSize, kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_subtable, kVtableIndex,
+ object_absolute_offset_subtable +
+ aos::fbs::testing::SubTableStatic::kOffset -
+ kInlineAbsoluteOffset_subtable);
+ return &subtable_.value().t;
+ }
+
+ // Returns a pointer to the subtable field, if set. nullptr otherwise.
+ const aos::fbs::testing::SubTableStatic *subtable() const {
+ return subtable_.has_value() ? &subtable_.value().t : nullptr;
+ }
+ aos::fbs::testing::SubTableStatic *mutable_subtable() {
+ return subtable_.has_value() ? &subtable_.value().t : nullptr;
+ }
+
+ // Clears the subtable field. This will cause has_subtable() to return false.
+ void clear_subtable() {
+ subtable_.reset();
+ ClearField(kInlineAbsoluteOffset_subtable, 4, 14);
+ }
+
+ // Returns true if the subtable field is set and can be accessed.
+ bool has_subtable() const { return AsFlatbuffer().has_subtable(); }
+
+ // Creates an empty object for the unspecified_length_string field, which you
+ // can then populate/modify as desired. The field must not be populated yet.
+ ::aos::fbs::String<0> *add_unspecified_length_string() {
+ CHECK(!unspecified_length_string_.has_value());
+ constexpr size_t kVtableIndex = 26;
+ // Construct the *Static object that we will use for managing this subtable.
+ unspecified_length_string_.emplace(
+ BufferForObject(object_absolute_offset_unspecified_length_string,
+ ::aos::fbs::String<0>::kSize, kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_unspecified_length_string, kVtableIndex,
+ object_absolute_offset_unspecified_length_string +
+ ::aos::fbs::String<0>::kOffset -
+ kInlineAbsoluteOffset_unspecified_length_string);
+ return &unspecified_length_string_.value().t;
+ }
+
+ // Returns a pointer to the unspecified_length_string field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::String<0> *unspecified_length_string() const {
+ return unspecified_length_string_.has_value()
+ ? &unspecified_length_string_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::String<0> *mutable_unspecified_length_string() {
+ return unspecified_length_string_.has_value()
+ ? &unspecified_length_string_.value().t
+ : nullptr;
+ }
+
+ // Clears the unspecified_length_string field. This will cause
+ // has_unspecified_length_string() to return false.
+ void clear_unspecified_length_string() {
+ unspecified_length_string_.reset();
+ ClearField(kInlineAbsoluteOffset_unspecified_length_string, 4, 26);
+ }
+
+ // Returns true if the unspecified_length_string field is set and can be
+ // accessed.
+ bool has_unspecified_length_string() const {
+ return AsFlatbuffer().has_unspecified_length_string();
+ }
+
+ // Creates an empty object for the unspecified_length_vector field, which you
+ // can then populate/modify as desired. The field must not be populated yet.
+ ::aos::fbs::Vector<uint8_t, 0, true, 0> *add_unspecified_length_vector() {
+ CHECK(!unspecified_length_vector_.has_value());
+ constexpr size_t kVtableIndex = 24;
+ // Construct the *Static object that we will use for managing this subtable.
+ unspecified_length_vector_.emplace(
+ BufferForObject(object_absolute_offset_unspecified_length_vector,
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize, kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_unspecified_length_vector, kVtableIndex,
+ object_absolute_offset_unspecified_length_vector +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kOffset -
+ kInlineAbsoluteOffset_unspecified_length_vector);
+ return &unspecified_length_vector_.value().t;
+ }
+
+ // Returns a pointer to the unspecified_length_vector field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<uint8_t, 0, true, 0> *unspecified_length_vector()
+ const {
+ return unspecified_length_vector_.has_value()
+ ? &unspecified_length_vector_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<uint8_t, 0, true, 0> *mutable_unspecified_length_vector() {
+ return unspecified_length_vector_.has_value()
+ ? &unspecified_length_vector_.value().t
+ : nullptr;
+ }
+
+ // Clears the unspecified_length_vector field. This will cause
+ // has_unspecified_length_vector() to return false.
+ void clear_unspecified_length_vector() {
+ unspecified_length_vector_.reset();
+ ClearField(kInlineAbsoluteOffset_unspecified_length_vector, 4, 24);
+ }
+
+ // Returns true if the unspecified_length_vector field is set and can be
+ // accessed.
+ bool has_unspecified_length_vector() const {
+ return AsFlatbuffer().has_unspecified_length_vector();
+ }
+
+ // Creates an empty object for the unspecified_length_vector_of_strings field,
+ // which you can then populate/modify as desired. The field must not be
+ // populated yet.
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
+ add_unspecified_length_vector_of_strings() {
+ CHECK(!unspecified_length_vector_of_strings_.has_value());
+ constexpr size_t kVtableIndex = 28;
+ // Construct the *Static object that we will use for managing this subtable.
+ unspecified_length_vector_of_strings_.emplace(
+ BufferForObject(
+ object_absolute_offset_unspecified_length_vector_of_strings,
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
+ kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings,
+ kVtableIndex,
+ object_absolute_offset_unspecified_length_vector_of_strings +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kOffset -
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings);
+ return &unspecified_length_vector_of_strings_.value().t;
+ }
+
+ // Returns a pointer to the unspecified_length_vector_of_strings field, if
+ // set. nullptr otherwise.
+ const ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
+ unspecified_length_vector_of_strings() const {
+ return unspecified_length_vector_of_strings_.has_value()
+ ? &unspecified_length_vector_of_strings_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0> *
+ mutable_unspecified_length_vector_of_strings() {
+ return unspecified_length_vector_of_strings_.has_value()
+ ? &unspecified_length_vector_of_strings_.value().t
+ : nullptr;
+ }
+
+ // Clears the unspecified_length_vector_of_strings field. This will cause
+ // has_unspecified_length_vector_of_strings() to return false.
+ void clear_unspecified_length_vector_of_strings() {
+ unspecified_length_vector_of_strings_.reset();
+ ClearField(kInlineAbsoluteOffset_unspecified_length_vector_of_strings, 4,
+ 28);
+ }
+
+ // Returns true if the unspecified_length_vector_of_strings field is set and
+ // can be accessed.
+ bool has_unspecified_length_vector_of_strings() const {
+ return AsFlatbuffer().has_unspecified_length_vector_of_strings();
+ }
+
+ // Creates an empty object for the vector_aligned field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<int32_t, 3, true, 64> *add_vector_aligned() {
+ CHECK(!vector_aligned_.has_value());
+ constexpr size_t kVtableIndex = 16;
+ // Construct the *Static object that we will use for managing this subtable.
+ vector_aligned_.emplace(
+ BufferForObject(object_absolute_offset_vector_aligned,
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_aligned, kVtableIndex,
+ object_absolute_offset_vector_aligned +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kOffset -
+ kInlineAbsoluteOffset_vector_aligned);
+ return &vector_aligned_.value().t;
+ }
+
+ // Returns a pointer to the vector_aligned field, if set. nullptr otherwise.
+ const ::aos::fbs::Vector<int32_t, 3, true, 64> *vector_aligned() const {
+ return vector_aligned_.has_value() ? &vector_aligned_.value().t : nullptr;
+ }
+ ::aos::fbs::Vector<int32_t, 3, true, 64> *mutable_vector_aligned() {
+ return vector_aligned_.has_value() ? &vector_aligned_.value().t : nullptr;
+ }
+
+ // Clears the vector_aligned field. This will cause has_vector_aligned() to
+ // return false.
+ void clear_vector_aligned() {
+ vector_aligned_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_aligned, 4, 16);
+ }
+
+ // Returns true if the vector_aligned field is set and can be accessed.
+ bool has_vector_aligned() const {
+ return AsFlatbuffer().has_vector_aligned();
+ }
+
+ // Creates an empty object for the vector_of_scalars field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<int32_t, 3, true, 0> *add_vector_of_scalars() {
+ CHECK(!vector_of_scalars_.has_value());
+ constexpr size_t kVtableIndex = 6;
+ // Construct the *Static object that we will use for managing this subtable.
+ vector_of_scalars_.emplace(
+ BufferForObject(object_absolute_offset_vector_of_scalars,
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize, kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_scalars, kVtableIndex,
+ object_absolute_offset_vector_of_scalars +
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kOffset -
+ kInlineAbsoluteOffset_vector_of_scalars);
+ return &vector_of_scalars_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_scalars field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<int32_t, 3, true, 0> *vector_of_scalars() const {
+ return vector_of_scalars_.has_value() ? &vector_of_scalars_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<int32_t, 3, true, 0> *mutable_vector_of_scalars() {
+ return vector_of_scalars_.has_value() ? &vector_of_scalars_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_scalars field. This will cause has_vector_of_scalars()
+ // to return false.
+ void clear_vector_of_scalars() {
+ vector_of_scalars_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_scalars, 4, 6);
+ }
+
+ // Returns true if the vector_of_scalars field is set and can be accessed.
+ bool has_vector_of_scalars() const {
+ return AsFlatbuffer().has_vector_of_scalars();
+ }
+
+ // Creates an empty object for the vector_of_strings field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
+ add_vector_of_strings() {
+ CHECK(!vector_of_strings_.has_value());
+ constexpr size_t kVtableIndex = 10;
+ // Construct the *Static object that we will use for managing this subtable.
+ vector_of_strings_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_of_strings,
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize,
+ kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_strings, kVtableIndex,
+ object_absolute_offset_vector_of_strings +
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kOffset -
+ kInlineAbsoluteOffset_vector_of_strings);
+ return &vector_of_strings_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_strings field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
+ vector_of_strings() const {
+ return vector_of_strings_.has_value() ? &vector_of_strings_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0> *
+ mutable_vector_of_strings() {
+ return vector_of_strings_.has_value() ? &vector_of_strings_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_strings field. This will cause has_vector_of_strings()
+ // to return false.
+ void clear_vector_of_strings() {
+ vector_of_strings_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_strings, 4, 10);
+ }
+
+ // Returns true if the vector_of_strings field is set and can be accessed.
+ bool has_vector_of_strings() const {
+ return AsFlatbuffer().has_vector_of_strings();
+ }
+
+ // Creates an empty object for the vector_of_structs field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
+ add_vector_of_structs() {
+ CHECK(!vector_of_structs_.has_value());
+ constexpr size_t kVtableIndex = 18;
+ // Construct the *Static object that we will use for managing this subtable.
+ vector_of_structs_.emplace(
+ BufferForObject(
+ object_absolute_offset_vector_of_structs,
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>::kSize,
+ kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_structs, kVtableIndex,
+ object_absolute_offset_vector_of_structs +
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
+ 0>::kOffset -
+ kInlineAbsoluteOffset_vector_of_structs);
+ return &vector_of_structs_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_structs field, if set. nullptr
+ // otherwise.
+ const ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
+ vector_of_structs() const {
+ return vector_of_structs_.has_value() ? &vector_of_structs_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0> *
+ mutable_vector_of_structs() {
+ return vector_of_structs_.has_value() ? &vector_of_structs_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_structs field. This will cause has_vector_of_structs()
+ // to return false.
+ void clear_vector_of_structs() {
+ vector_of_structs_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_structs, 4, 18);
+ }
+
+ // Returns true if the vector_of_structs field is set and can be accessed.
+ bool has_vector_of_structs() const {
+ return AsFlatbuffer().has_vector_of_structs();
+ }
+
+ // Creates an empty object for the vector_of_tables field, which you can
+ // then populate/modify as desired.
+ // The field must not be populated yet.
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
+ add_vector_of_tables() {
+ CHECK(!vector_of_tables_.has_value());
+ constexpr size_t kVtableIndex = 20;
+ // Construct the *Static object that we will use for managing this subtable.
+ vector_of_tables_.emplace(
+ BufferForObject(object_absolute_offset_vector_of_tables,
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3,
+ false, 0>::kSize,
+ kAlign),
+ this);
+ // Actually set the appropriate fields in the flatbuffer memory itself.
+ SetField<::flatbuffers::uoffset_t>(
+ kInlineAbsoluteOffset_vector_of_tables, kVtableIndex,
+ object_absolute_offset_vector_of_tables +
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false,
+ 0>::kOffset -
+ kInlineAbsoluteOffset_vector_of_tables);
+ return &vector_of_tables_.value().t;
+ }
+
+ // Returns a pointer to the vector_of_tables field, if set. nullptr otherwise.
+ const ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
+ vector_of_tables() const {
+ return vector_of_tables_.has_value() ? &vector_of_tables_.value().t
+ : nullptr;
+ }
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0> *
+ mutable_vector_of_tables() {
+ return vector_of_tables_.has_value() ? &vector_of_tables_.value().t
+ : nullptr;
+ }
+
+ // Clears the vector_of_tables field. This will cause has_vector_of_tables()
+ // to return false.
+ void clear_vector_of_tables() {
+ vector_of_tables_.reset();
+ ClearField(kInlineAbsoluteOffset_vector_of_tables, 4, 20);
+ }
+
+ // Returns true if the vector_of_tables field is set and can be accessed.
+ bool has_vector_of_tables() const {
+ return AsFlatbuffer().has_vector_of_tables();
+ }
+
+ // Clears every field of the table, removing any existing state.
+ void Clear() {
+ clear_included_table();
+ clear_scalar();
+ clear_string();
+ clear_substruct();
+ clear_subtable();
+ clear_unspecified_length_string();
+ clear_unspecified_length_vector();
+ clear_unspecified_length_vector_of_strings();
+ clear_vector_aligned();
+ clear_vector_of_scalars();
+ clear_vector_of_strings();
+ clear_vector_of_structs();
+ clear_vector_of_tables();
+ }
+
+ // Copies the contents of the provided flatbuffer into this flatbuffer,
+ // returning true on success.
+ [[nodiscard]] bool FromFlatbuffer(const Flatbuffer *other) {
+ Clear();
+
+ if (other->has_included_table()) {
+ if (!CHECK_NOTNULL(add_included_table())
+ ->FromFlatbuffer(other->included_table())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_scalar()) {
+ set_scalar(other->scalar());
+ }
+
+ if (other->has_string()) {
+ if (!CHECK_NOTNULL(add_string())->FromFlatbuffer(other->string())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_substruct()) {
+ set_substruct(*other->substruct());
+ }
+
+ if (other->has_subtable()) {
+ if (!CHECK_NOTNULL(add_subtable())->FromFlatbuffer(other->subtable())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_unspecified_length_string()) {
+ if (!CHECK_NOTNULL(add_unspecified_length_string())
+ ->FromFlatbuffer(other->unspecified_length_string())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_unspecified_length_vector()) {
+ if (!CHECK_NOTNULL(add_unspecified_length_vector())
+ ->FromFlatbuffer(other->unspecified_length_vector())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_unspecified_length_vector_of_strings()) {
+ if (!CHECK_NOTNULL(add_unspecified_length_vector_of_strings())
+ ->FromFlatbuffer(
+ other->unspecified_length_vector_of_strings())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_vector_aligned()) {
+ if (!CHECK_NOTNULL(add_vector_aligned())
+ ->FromFlatbuffer(other->vector_aligned())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_vector_of_scalars()) {
+ if (!CHECK_NOTNULL(add_vector_of_scalars())
+ ->FromFlatbuffer(other->vector_of_scalars())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_vector_of_strings()) {
+ if (!CHECK_NOTNULL(add_vector_of_strings())
+ ->FromFlatbuffer(other->vector_of_strings())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_vector_of_structs()) {
+ if (!CHECK_NOTNULL(add_vector_of_structs())
+ ->FromFlatbuffer(other->vector_of_structs())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ if (other->has_vector_of_tables()) {
+ if (!CHECK_NOTNULL(add_vector_of_tables())
+ ->FromFlatbuffer(other->vector_of_tables())) {
+ // Fail if we were unable to copy (e.g., if we tried to copy in a long
+ // vector and do not have the space for it).
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ // We need to provide a MoveConstructor to allow this table to be
+ // used inside of vectors, but we do not want it readily available to
+ // users. See TableMover for more details.
+ TestTableStatic(TestTableStatic &&) = default;
+ friend struct ::aos::fbs::internal::TableMover<TestTableStatic>;
+
+ // Members relating to the included_table field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ aos::fbs::testing::included::IncludedTableStatic>>
+ included_table_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_included_table =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(0, kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_included_table = 4;
+
+ // Offset from the start of the buffer to the inline data for the scalar
+ // field.
+ static constexpr size_t kInlineAbsoluteOffset_scalar = 8;
+
+ // Members relating to the string field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<::aos::fbs::String<20>>>
+ string_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_string =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::IncludedTableStatic::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_string = 12;
+
+ // Offset from the start of the buffer to the inline data for the substruct
+ // field.
+ static constexpr size_t kInlineAbsoluteOffset_substruct = 16;
+
+ // Members relating to the subtable field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<
+ ::aos::fbs::internal::TableMover<aos::fbs::testing::SubTableStatic>>
+ subtable_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_subtable =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_subtable = 32;
+
+ // Members relating to the unspecified_length_string field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<::aos::fbs::String<0>>>
+ unspecified_length_string_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_unspecified_length_string =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_unspecified_length_string = 36;
+
+ // Members relating to the unspecified_length_vector field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<
+ ::aos::fbs::internal::TableMover<::aos::fbs::Vector<uint8_t, 0, true, 0>>>
+ unspecified_length_vector_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_unspecified_length_vector =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_unspecified_length_vector = 40;
+
+ // Members relating to the unspecified_length_vector_of_strings field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>>>
+ unspecified_length_vector_of_strings_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_unspecified_length_vector_of_strings =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings = 44;
+
+ // Members relating to the vector_aligned field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<int32_t, 3, true, 64>>>
+ vector_aligned_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_vector_aligned =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_aligned = 48;
+
+ // Members relating to the vector_of_scalars field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<
+ ::aos::fbs::internal::TableMover<::aos::fbs::Vector<int32_t, 3, true, 0>>>
+ vector_of_scalars_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_vector_of_scalars =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_scalars = 52;
+
+ // Members relating to the vector_of_strings field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>>>
+ vector_of_strings_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_vector_of_strings =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_strings = 56;
+
+ // Members relating to the vector_of_structs field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true, 0>>>
+ vector_of_structs_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_vector_of_structs =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0, kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0, false,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false, 0>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_structs = 60;
+
+ // Members relating to the vector_of_tables field.
+ //
+ // *Static object used for managing this subtable. Will be nullopt
+ // when the field is not populated.
+ // We use the TableMover to be able to make this object moveable.
+ std::optional<::aos::fbs::internal::TableMover<
+ ::aos::fbs::Vector<aos::fbs::testing::SubTableStatic, 3, false, 0>>>
+ vector_of_tables_;
+ // Offset from the start of the buffer to the start of the actual
+ // data for this field. Will be updated even when the table is not
+ // populated, so that we know where to construct it when requested.
+ size_t object_absolute_offset_vector_of_tables =
+ ::aos::fbs::PaddedSize(kVtableStart + kVtableSize, kAlign) +
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(
+ ::aos::fbs::PaddedSize(0,
+ kAlign) +
+ aos::fbs::testing::included::
+ IncludedTableStatic::
+ kSize,
+ kAlign) +
+ ::aos::fbs::String<20>::kSize,
+ kAlign) +
+ aos::fbs::testing::SubTableStatic::
+ kSize,
+ kAlign) +
+ ::aos::fbs::String<0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<uint8_t, 0, true,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<0>, 0,
+ false, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 64>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<int32_t, 3, true, 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<::aos::fbs::String<10>, 3, false,
+ 0>::kSize,
+ kAlign) +
+ ::aos::fbs::Vector<aos::fbs::testing::SubStruct, 3, true,
+ 0>::kSize,
+ kAlign);
+ // Offset from the start of the buffer to the offset in the inline data for
+ // this field.
+ static constexpr size_t kInlineAbsoluteOffset_vector_of_tables = 64;
+
+ size_t NumberOfSubObjects() const final { return 11; }
+ using ::aos::fbs::ResizeableObject::SubObject;
+ SubObject GetSubObject(size_t index) final {
+ SubObject object;
+ // Note: The below arrays are local variables rather than class members to
+ // avoid having to deal with what happens to them if the object is moved.
+
+ // Array of the members that we use for tracking where the buffers for
+ // each subobject belong.
+ // Pointers because these may need to be modified when memory is
+ // inserted into the buffer.
+ const std::array<size_t *, 11> subobject_object_offsets{
+ &object_absolute_offset_included_table,
+ &object_absolute_offset_string,
+ &object_absolute_offset_subtable,
+ &object_absolute_offset_unspecified_length_string,
+ &object_absolute_offset_unspecified_length_vector,
+ &object_absolute_offset_unspecified_length_vector_of_strings,
+ &object_absolute_offset_vector_aligned,
+ &object_absolute_offset_vector_of_scalars,
+ &object_absolute_offset_vector_of_strings,
+ &object_absolute_offset_vector_of_structs,
+ &object_absolute_offset_vector_of_tables};
+ // Actual subobjects; note that the pointers will be invalid when the
+ // field is not populated.
+ const std::array<::aos::fbs::ResizeableObject *, 11> subobject_objects{
+ &included_table_->t,
+ &string_->t,
+ &subtable_->t,
+ &unspecified_length_string_->t,
+ &unspecified_length_vector_->t,
+ &unspecified_length_vector_of_strings_->t,
+ &vector_aligned_->t,
+ &vector_of_scalars_->t,
+ &vector_of_strings_->t,
+ &vector_of_structs_->t,
+ &vector_of_tables_->t};
+ // Absolute offsets from the start of the buffer to where the inline
+ // entry is for each table. These offsets do not need to change at
+ // runtime (because memory is never inserted into the start of
+ // a given table), but the offsets pointed to by these offsets
+ // may need to be updated.
+ const std::array<size_t, 11> subobject_inline_offsets{
+ kInlineAbsoluteOffset_included_table,
+ kInlineAbsoluteOffset_string,
+ kInlineAbsoluteOffset_subtable,
+ kInlineAbsoluteOffset_unspecified_length_string,
+ kInlineAbsoluteOffset_unspecified_length_vector,
+ kInlineAbsoluteOffset_unspecified_length_vector_of_strings,
+ kInlineAbsoluteOffset_vector_aligned,
+ kInlineAbsoluteOffset_vector_of_scalars,
+ kInlineAbsoluteOffset_vector_of_strings,
+ kInlineAbsoluteOffset_vector_of_structs,
+ kInlineAbsoluteOffset_vector_of_tables};
+ object.inline_entry =
+ MutableGet<::flatbuffers::uoffset_t>(subobject_inline_offsets[index]);
+ object.object =
+ (*object.inline_entry == 0) ? nullptr : subobject_objects[index];
+ object.absolute_offset = subobject_object_offsets[index];
+ return object;
+ }
+};
+} // namespace aos::fbs::testing
diff --git a/aos/flatbuffers/test_dir/type_coverage.fbs b/aos/flatbuffers/test_dir/type_coverage.fbs
new file mode 100644
index 0000000..d364fd5
--- /dev/null
+++ b/aos/flatbuffers/test_dir/type_coverage.fbs
@@ -0,0 +1,152 @@
+// This fbs file attempts to cover a wide variety of flatbuffer scenarios; it
+// is mostly just copied from json_to_flatbuffer.fbs, which itself steals from
+// other sources.
+
+// Use a namespace that has no overlap with the aos::fbs namespace of the underlying code.
+namespace frc971.testing;
+
+enum BaseType : byte {
+ None,
+ UType,
+ Bool,
+ Byte,
+ UByte,
+ Short,
+ UShort,
+ Int,
+ UInt,
+ Long,
+ ULong,
+ Float,
+ Double,
+ String,
+ Vector,
+ Obj, // Used for tables & structs.
+ Union,
+ Array
+}
+
+enum NonConsecutive : int {
+ Zero = 0,
+ Big = 10000000,
+}
+
+table Location {
+ name:string (id: 0);
+ type:string (id: 1);
+ frequency:int (id: 2);
+ max_size:int (id: 3);
+}
+
+table Map {
+ match:Location (id: 0);
+ rename:Location (id: 1);
+}
+
+table Application {
+ name:string (id: 0);
+ priority:int (id: 1);
+ maps:[Map] (id: 2);
+ long_thingy:uint64 (id: 3);
+}
+
+table VectorOfStrings {
+ str:[string] (id: 0);
+}
+
+table VectorOfVectorOfString {
+ v:[VectorOfStrings] (id: 0);
+}
+
+struct FooStructNested {
+ foo_byte:byte;
+}
+
+struct FooStruct {
+ foo_byte:byte;
+ nested_struct:FooStructNested;
+}
+
+struct ScalarSweepStruct {
+ foo_float:float;
+ foo_double:double;
+ foo_int32:int32;
+ foo_uint32:uint32;
+ foo_int64:int64;
+ foo_uint64:uint64;
+}
+
+struct StructEnum {
+ foo_enum:BaseType;
+}
+
+table Configuration {
+ locations:[Location] (id: 0);
+ maps:[Map] (id: 1);
+ apps:[Application] (id: 2);
+ imports:[string] (id: 3);
+
+ // 8 bit: byte ubyte bool
+ // 16 bit: short ushort
+ // 32 bit: int uint float
+ // 64 bit: long ulong double
+
+ // Simple values.
+ foo_byte:byte (id: 4);
+ foo_ubyte:ubyte (id: 5);
+ foo_bool:bool (id: 6);
+
+ foo_short:short (id: 7);
+ foo_ushort:ushort (id: 8);
+
+ foo_int:int (id: 9);
+ foo_uint:uint (id: 10);
+
+ foo_long:long (id: 11);
+ foo_ulong:ulong (id: 12);
+
+ foo_float:float (id: 13);
+ foo_double:double (id: 14);
+
+ foo_string:string (id: 15);
+
+ foo_enum:BaseType (id: 16);
+ foo_enum_default:BaseType = None (id: 17);
+
+ // Test vectors now.
+ vector_foo_byte:[byte] (id: 18);
+ vector_foo_ubyte:[ubyte] (id: 19);
+ vector_foo_bool:[bool] (id: 20);
+
+ vector_foo_short:[short] (id: 21);
+ vector_foo_ushort:[ushort] (id: 22);
+
+ vector_foo_int:[int] (id: 23);
+ vector_foo_uint:[uint] (id: 24);
+
+ vector_foo_long:[long] (id: 25);
+ vector_foo_ulong:[ulong] (id: 26);
+
+ vector_foo_float:[float] (id: 27);
+ vector_foo_double:[double] (id: 28);
+
+ vector_foo_string:[string] (id: 29);
+
+ vector_foo_enum:[BaseType] (id: 30);
+
+ // And a simple nested application.
+ single_application:Application (id: 31);
+
+ vov:VectorOfVectorOfString (id: 32);
+
+ foo_struct:FooStruct (id: 33);
+ vector_foo_struct:[FooStruct] (id: 34);
+ foo_struct_enum:StructEnum (id: 35);
+ foo_struct_scalars:ScalarSweepStruct (id: 36);
+ vector_foo_struct_scalars:[ScalarSweepStruct] (id: 37);
+
+ foo_enum_nonconsecutive:NonConsecutive (id: 38);
+ foo_enum_nonconsecutive_default:NonConsecutive = Big (id: 39);
+}
+
+root_type Configuration;
diff --git a/aos/flatbuffers/test_dir/type_coverage.json b/aos/flatbuffers/test_dir/type_coverage.json
new file mode 100644
index 0000000..6d2bb16
--- /dev/null
+++ b/aos/flatbuffers/test_dir/type_coverage.json
@@ -0,0 +1,216 @@
+{
+ "locations": [
+ {
+ "name": "foobar",
+ "type": "FooBar",
+ "max_size": 123
+ },
+ {
+ "name": "ten",
+ "type": "eleven",
+ "frequency": 12,
+ "max_size": 13
+ }
+ ],
+ "maps": [
+ {
+ "match": {
+ "type": ""
+ },
+ "rename": {
+ "type": "ABC"
+ }
+ }
+ ],
+ "apps": [
+ {
+ "name": "APPLICATION NAME",
+ "priority": 1,
+ "maps": [
+ {
+ "match": {
+ "type": "123"
+ },
+ "rename": {
+ "type": "456"
+ }
+ },
+ {
+
+ },
+ {
+ "match": {
+ "name": "foo",
+ "type": "Abc"
+ },
+ "rename": {
+ "name": "Def",
+ "type": "bar"
+ }
+ }
+ ]
+ }
+ ],
+ "imports": [
+ "file_one",
+ "file_twoooooooooooooooooooooooooooooooooooooooooooooooooooo",
+ "",
+ "",
+ "",
+ "file_three"
+ ],
+ "foo_byte": -123,
+ "foo_ubyte": 200,
+ "foo_bool": true,
+ "foo_short": -200,
+ "foo_ushort": 40000,
+ "foo_int": -40000,
+ "foo_uint": 3000000000,
+ "foo_long": -3000000000,
+ "foo_ulong": 9223372036854775809,
+ "foo_float": 1.23,
+ "foo_double": 1.45,
+ "foo_string": "Hello, Wooooooooooooooooooooooooooooooooooooooooooooorld!",
+ "foo_enum": "Byte",
+ "foo_enum_default": "UByte",
+ "vector_foo_byte": [
+ -1,
+ -2,
+ -3,
+ -4
+ ],
+ "vector_foo_ubyte": [
+ 1,
+ 2,
+ 3,
+ 4
+ ],
+ "vector_foo_bool": [
+ true,
+ false,
+ false,
+ true
+ ],
+ "vector_foo_short": [
+ 123,
+ 456,
+ 789,
+ -123
+ ],
+ "vector_foo_ushort": [
+ 123,
+ 456,
+ 789,
+ 1023
+ ],
+ "vector_foo_int": [
+ -10,
+ -20,
+ -30,
+ -40
+ ],
+ "vector_foo_uint": [
+ 10,
+ 20,
+ 30,
+ 40
+ ],
+ "vector_foo_long": [
+ 1,
+ 1,
+ 2,
+ 3,
+ 5,
+ 8,
+ 13
+ ],
+ "vector_foo_ulong": [
+ 21,
+ 34,
+ 55
+ ],
+ "vector_foo_float": [
+ 21.0,
+ 3.4,
+ 5.5
+ ],
+ "vector_foo_double": [
+ 21.1,
+ 3.5,
+ 5.6
+ ],
+ "vector_foo_string": [
+ "Hello",
+ ", ",
+ "World",
+ "!",
+ ""
+ ],
+ "vector_foo_enum": [
+ "None",
+ "Byte"
+ ],
+ "single_application": {
+
+ },
+ "vov": {
+ "v": [
+ {
+ "str": [
+ "one",
+ "two",
+ "three",
+ "four"
+ ]
+ },
+ {
+
+ }
+ ]
+ },
+ "foo_struct": {
+ "foo_byte": 123,
+ "nested_struct": {
+ "foo_byte": -20
+ }
+ },
+ "vector_foo_struct": [
+ {
+ "foo_byte": 123,
+ "nested_struct": {
+ "foo_byte": -20
+ }
+ }
+ ],
+ "foo_struct_enum": {
+ "foo_enum": "Float"
+ },
+ "foo_struct_scalars": {
+ "foo_float": 1.23,
+ "foo_double": 4.56,
+ "foo_int32": -789,
+ "foo_uint32": 789,
+ "foo_int64": -5000000000,
+ "foo_uint64": 5000000000
+ },
+ "vector_foo_struct_scalars": [
+ {
+ "foo_float": 1.0,
+ "foo_double": 2.0,
+ "foo_int32": 3,
+ "foo_uint32": 4,
+ "foo_int64": 5,
+ "foo_uint64": 6
+ },
+ {
+ "foo_float": 7.0,
+ "foo_double": 8.0,
+ "foo_int32": 9,
+ "foo_uint32": 10,
+ "foo_int64": 11,
+ "foo_uint64": 12
+ }
+ ],
+ "foo_enum_nonconsecutive": "Big",
+ "foo_enum_nonconsecutive_default": "Zero"
+}
diff --git a/aos/json_to_flatbuffer.h b/aos/json_to_flatbuffer.h
index b664036..6ef3544 100644
--- a/aos/json_to_flatbuffer.h
+++ b/aos/json_to_flatbuffer.h
@@ -12,6 +12,7 @@
#include "aos/fast_string_builder.h"
#include "aos/flatbuffer_utils.h"
#include "aos/flatbuffers.h"
+#include "aos/flatbuffers/builder.h"
#include "aos/util/file.h"
namespace aos {
@@ -38,6 +39,15 @@
JsonToFlatbuffer(data, FlatbufferType(T::MiniReflectTypeTable()), fbb).o);
}
+template <typename T>
+inline fbs::Builder<T> JsonToStaticFlatbuffer(const std::string_view data) {
+ const aos::FlatbufferDetachedBuffer<typename T::Flatbuffer> fbs =
+ JsonToFlatbuffer<typename T::Flatbuffer>(data);
+ fbs::Builder<T> builder(std::make_unique<aos::fbs::VectorAllocator>());
+ CHECK(builder.get()->FromFlatbuffer(&fbs.message()));
+ return builder;
+}
+
struct JsonOptions {
// controls if the Json is written out on multiple lines or one.
bool multi_line = false;
@@ -53,7 +63,7 @@
// Converts a flatbuffer into a Json string.
// The methods below are generally more useful than TableFlatbufferToJson.
::std::string TableFlatbufferToJson(const flatbuffers::Table *t,
- const ::flatbuffers::TypeTable *typetable,
+ const flatbuffers::TypeTable *typetable,
JsonOptions json_options = {});
// Converts a Flatbuffer<T> holding a flatbuffer to JSON.
diff --git a/documentation/aos/docs/flatbuffers.md b/documentation/aos/docs/flatbuffers.md
index 438b882..83a7f92 100644
--- a/documentation/aos/docs/flatbuffers.md
+++ b/documentation/aos/docs/flatbuffers.md
@@ -1 +1,549 @@
# FlatBuffers
+
+This document covers the "static flatbuffers API".
+
+This API is a custom C++ API for serializing flatbuffers developed for AOS. The
+serialized flatbuffers are fully compatible with the existing flatbuffers
+specification.
+
+## Design
+
+The overall goal of the static flatbuffers API is to make it so that a user can
+construct flatbuffers against a fixed-size memory buffer while being able to
+readily mutate any part of the flatbuffer object at any point during
+construction (rather than being forced to construct things from the bottom up).
+
+In particular:
+
+* The API should be able to both construct flatbuffers against fixed-size memory
+ buffers (for use in realtime code) as well as against variable-size buffers
+ (for easy offline flatbuffer manipulation).
+* We want to be able to select vector sizes at runtime (including in realtime
+ code) so that we can support using e.g. the same camera message schema (which
+ would generally contain a byte array of data) for multiple different
+ resolutions of an image.
+* The API should require minimal modifications to existing .fbs files (in fact,
+ it is usable without any modifications).
+* We want to be able to provide an option for deriving strict upper bounds for
+ AOS channel `max_size`'s (this is not currently fully implemented). This does
+ require specifying maximum vector sizes (and adhering to them) in the message
+ schemas.
+* There should be low performance impacts when using the API normally (as
+ compared to trying to use the `FlatBufferBuilder` API).
+* The API should be difficult to accidentally use incorrectly (e.g., the
+ existing flatbuffers API requires that you not build multiple tables at once,
+ and enforces this with runtime debug assertions; this is both hard to develop
+ and prone to memory corruption in situations where someone never runs debug
+ builds of their code).
+
+In order to accomplish this, we provide a codegen'd interface in place of the
+regular flatbuffer API. It provides the following objects/interfaces to work with:
+
+* For each table, a codegen'd class which inherits from the `aos::fbs::Table`
+ object. This provides accessors to let you set, get, clear, and mutate table
+ members at any time. The table objects take in an aligned `std::span` into
+ which they construct the flatbuffer. This class will be named `FooStatic`
+ for a given flatbuffer type `Foo`.
+* For flatbuffer vector/strings, `Vector` and `String` objects are provided
+ (the `Vector` object will generally be created by calling `add_*` on the
+ appropriate member of a table). These generally operate similarly in concept
+ to the table objects, allowing you to add/remove/modify elements at will.
+ `Vector`s and `String`s can have a nominal maximum length specified in order
+ to have the memory for those elements statically allocated, while also having
+ the ability to dynamically increase the size of the vectors.
+* In order to allow the construction of a flatbuffer table, a
+ templated `aos::fbs::Builder` object is provided which can
+ take an allocator and then provide the relevant table class to the user.
+* We provide an `Allocator` class and various implementations (e.g., a
+ `VectorAllocator` backed by an `std::vector`) for managing the memory into
+ which the `Builder` will serialize the flatbuffer.
+* A new `MakeStaticBuilder` method is provided on the `aos::Sender` class which
+ constructs an `aos::fbs::Builder` to allow you to construct a message to be
+ sent on an AOS channel.
+* Existing `flatbuffer_cc_library` bazel targets get turned into `static_flatbuffer`
+ targets of the same name. Libraries do not need to change how they depend on
+ the bazel target, although using the new API in your code will require
+ importing a different header and using a different class than before.
+
+### Alignment
+
+Significant effort must be made to ensure that all objects are correctly
+aligned. This includes the `force_align` attribute which can be added to vectors
+to, e.g., allow you to over-align byte vectors that may need to be used to
+store aligned data. The current alignment approach is relatively conservative,
+which may result in excessive padding (all padding in the serialized flatbuffers
+should get cleared to zero, hopefully allowing compression algorithms to handle
+the extra bytes reasonably efficiently).
+
+As a user, you should never need to do anything else to get correct alignment.
+Further discussion in this section is mostly relevant for those
+modifying/reviewing the internals.
+
+Internally, every `Vector` and `Table` type tracks its required alignment using
+a `kAlign` constant. This constant is set as the maximum alignment of any
+members of the object (this will always be a minimum of `4` because every
+flatbuffer table includes a 4-byte vtable offset and every flatbuffer vector
+includes a 4-byte length). The buffers provided to the constructors of these
+objects must be aligned, and the `kSize` constant that the objects provide will
+always be a multiple of the alignment. Additional discussion of the detailed
+layout of memory inside of the `Vector` and `Table` types can be found in the
+comments on the respective class declarations.
+
+In order to handle alignment correctly in our `Builder` and `Allocator` classes,
+we end up forcing the `Builder` to be able to accept semi-arbitrarily aligned
+buffers in order to ease the `Allocator` implementation (e.g., the
+`VectorAllocator` uses a `std::vector` internally which does not necessarily
+align its memory). The `Builder` then adds padding as needed and passes an
+appropriately aligned buffer down to the `Table` class.
+
+## Basic API Examples
+
+This example will walk through what the API for the following set of tables
+looks like (see `//aos/flatbuffers:test.fbs`:
+
+```cpp
+// Note: in the actual sample code, these are two separate files where one is
+// included in the other.
+namespace aos.fbs.testing.included;
+enum TestEnum : ubyte {
+ A = 0,
+ B = 1,
+}
+
+table IncludedTable {
+ foo:TestEnum (id: 0);
+}
+
+namespace aos.fbs.testing;
+
+struct SubStruct {
+ x:double;
+ y:double;
+}
+
+table SubTable {
+ foo:short (id: 0);
+ bar:short (id: 1, deprecated);
+ baz:float (id: 2);
+}
+
+attribute "static_length";
+attribute "static_vector_string_length";
+
+table TestTable {
+ scalar:int (id: 0);
+ vector_of_scalars:[int] (id: 1, static_length: 3);
+ string:string (id: 2, static_length: 20);
+ vector_of_strings:[string] (id: 3, static_length: 3, static_vector_string_length: 10);
+ substruct:SubStruct (id: 4);
+ subtable:SubTable (id: 5);
+ // The force-aligned vector is deliberately put in the middle of the table
+ // both by ID and alphabetically (both of these can affect the order in which
+ // certain things are evaluated, and during development there were some issues
+ // with this).
+ vector_aligned:[int] (id: 6, force_align: 64, static_length: 3);
+ vector_of_structs:[SubStruct] (id: 7, static_length: 3);
+ vector_of_tables:[SubTable] (id: 8, static_length: 3);
+ included_table:aos.fbs.testing.included.IncludedTable (id: 9);
+ unspecified_length_vector:[ubyte] (id: 10);
+ unspecified_length_string:string (id: 11);
+ unspecified_length_vector_of_strings:[string] (id: 12);
+}
+
+root_type TestTable;
+```
+
+All created types have an `AsFlatbuffer()` method which allows you to access the
+type using the regular generated flatbuffer API and a `FromFlatbuffer()` method
+which attempts to copy the specified flatbuffer into the current object.
+
+### Sample Usage
+
+The below example constructs a table of the above example `TestTable`:
+
+```cpp
+aos::FixedAllocator allocator(TestTableStatic::kUnalignedBufferSize);
+Builder<TestTableStatic> builder(&allocator);
+TestTableStatic *object = builder.get();
+object->set_scalar(123);
+{
+ auto vector = object->add_vector_of_scalars();
+ CHECK(vector->emplace_back(4));
+ CHECK(vector->emplace_back(5));
+}
+{
+ auto string = object->add_string();
+ string->SetString("Hello, World!");
+}
+{
+ auto vector_of_strings = object->add_vector_of_strings();
+ auto sub_string = CHECK_NOTNULL(vector_of_strings->emplace_back());
+ CHECK(sub_string->emplace_back('D'));
+}
+{
+ object->set_substruct({971, 254});
+}
+{
+ auto subtable = object->add_subtable();
+ subtable->set_foo(1234);
+}
+{
+ auto vector = object->add_vector_of_structs();
+ CHECK(vector->emplace_back({48, 67}));
+ CHECK(vector->emplace_back({118, 148}));
+ CHECK(vector->emplace_back({971, 973}));
+ // Max vector size is three; this should fail.
+ CHECK(!vector->emplace_back({1114, 2056}));
+}
+{
+ auto vector = object->add_vector_of_tables();
+ auto subobject = vector->emplace_back();
+ subobject->set_foo(222);
+}
+{
+ auto subtable = object->add_included_table();
+ subtable->set_foo(included::TestEnum::B);
+}
+LOG(INFO) <<
+ aos::FlatbufferToJson(builder.AsFlatbufferSpan(),
+ {.multi_line = true});
+```
+
+This will then output:
+
+```json
+{
+ "scalar": 123,
+ "vector_of_scalars": [
+ 4,
+ 5
+ ],
+ "string": "Hello, World!",
+ "vector_of_strings": [
+ "D"
+ ],
+ "substruct": {
+ "x": 971.0,
+ "y": 254.0
+ },
+ "subtable": {
+ "foo": 1234
+ },
+ "vector_of_structs": [
+ {
+ "x": 48.0,
+ "y": 67.0
+ },
+ {
+ "x": 118.0,
+ "y": 148.0
+ },
+ {
+ "x": 971.0,
+ "y": 973.0
+ }
+ ],
+ "vector_of_tables": [
+ {
+ "foo": 222
+ }
+ ],
+ "included_table": {
+ "foo": "B"
+ }
+}
+```
+
+### Converting `Populate*()` methods
+
+With existing flatbuffer code it is common to have
+`flatbuffers::Offset<> Populate*(FlatBufferBuilder*)` methods for populating
+subtables of a message. When converting these to the static API, you can
+keep the same patterns (although you have more flexibility available if you
+choose), but modify the `Populate` call slightly:
+
+```cpp
+namespace {
+flatbuffers::Offset<SubTable> PopulateOld(flatbuffers::FlatBufferBuilder *fbb) {
+ SubTable::Builder builder(*fbb);
+ builder.add_foo(1234);
+ return builder.Finish();
+}
+void PopulateStatic(SubTableStatic *subtable) { subtable->set_foo(1234); }
+} // namespace
+TEST_F(StaticFlatbuffersTest, PopulateMethodConversionExample) {
+ // Using a FlatBufferBuilder:
+ flatbuffers::FlatBufferBuilder fbb;
+ // Note: the PopulateOld() *must* be called prior to creating the builder.
+ const flatbuffers::Offset<SubTable> subtable_offset = PopulateOld(&fbb);
+ TestTable::Builder testtable_builder(fbb);
+ testtable_builder.add_subtable(subtable_offset);
+ fbb.Finish(testtable_builder.Finish());
+ aos::FlatbufferDetachedBuffer<TestTable> fbb_finished = fbb.Release();
+
+ // Using the static flatbuffer API.
+ aos::fbs::VectorAllocator allocator;
+ Builder<TestTableStatic> static_builder(&allocator);
+ PopulateStatic(CHECK_NOTNULL(static_builder.get()->add_subtable()));
+
+ // And confirm that they both contain the expected flatbuffer:
+ const std::string expected = R"json({ "subtable": { "foo": 1234 }})json";
+ EXPECT_EQ(expected, aos::FlatbufferToJson(fbb_finished));
+ EXPECT_EQ(expected, aos::FlatbufferToJson(static_builder.AsFlatbufferSpan()));
+}
+```
+
+### Scalar Fields
+
+Scalar fields have an API which is reasonably close to that of the base
+flatbuffer builder API. Because space for the scalar fields (as with everything)
+is pre-allocated, these accessors may be called at any time.
+
+For an `int` field named `scalar`, we will have the following methods. Note that
+prior to any `set_*` method being called, the value will not be populated and so
+`has_*` methods will return false and accessors will return `nullopt/nullptr`:
+
+```cpp
+// Populates the value and sets it to the requested value. Calling set_scalar()
+// will cause has_scalar() to return true.
+void set_scalar(const int32_t &value);
+
+// Returns the value of scalar, if populated. Otherwise, returns nullopt.
+std::optional<int32_t> scalar() const;
+
+// Returns a pointer to the scalar, if populated. Otherwise, returns nullptr.
+// Note that because of the nature of this API we _could_ support always
+// returning a valid pointer, but then it would be relatively easy for a user
+// to modify the value of a field without ever causing it to become "populated."
+int32_t *mutable_scalar();
+
+// Clears the field. Does not invalidate pointers returned by
+// `mutable_scalar()`, although it will set the value of the field to zero.
+void clear_scalar();
+
+// Returns true if the scalar field is populated.
+bool has_scalar() const;
+```
+
+### Enum fields
+
+Enum fields operate identically to scalar fields, except that the type in
+question is the flatbuffer enum type rather than a C++ scalar of some sort.
+
+### Struct fields
+
+Struct fields operate identically to scalar fields, except that the type in
+question is the flatbuffer C-struct type rather than a scalar.
+
+*Note*: This is different than how the raw flatbuffer API handles structs.
+Regular flatbuffers actually pass around pointers to the structs rather than
+references.
+
+### Table fields
+
+For fields of a table which are themselves tables, the accessors will return a
+pointer to an object that can be used to access/populate the subtable in
+question. The accessors are generally similar to those used by the scalar
+fields.
+
+The accessors that will be generated for a field named `subtable` of type
+`SubTable` are below:
+
+```cpp
+// Creates a SubTable at the subtable member.
+// Will die if the field is already populated (this aspect of the API is
+// subject to change if we discover that people like to be able to call
+// add_* multiple times).
+aos::fbs::testing::SubTableStatic *add_subtable();
+
+// The following will return pointers to the subtable member, or nullptr
+// if it is not populated.
+const aos::fbs::testing::SubTableStatic *subtable() const:
+aos::fbs::testing::SubTableStatic *mutable_subtable();
+
+// Depopulates the subtable member.
+void clear_subtable();
+
+// Returns true if the subtable has been populated. This does not
+// mean that there is necessarily anything interesting *in* the table,
+// just that it exists and can be modified.
+bool has_subtable() const;
+```
+
+### Vectors
+
+A vector may contain any other type, except for other vectors (with the
+exception of strings---vectors of strings are permitted). The APIs for
+inline types versus not-inline types (name improvements are welcome...
+maybe "object"?) are slightly different because of differences in how
+the underlying serialization works.
+
+As already mentioned, each vector will have a "static" size, which is specified
+in the flatbuffer schema by the `static_length` attribute (in order to use
+this attribute you must have an `attribute "static_length";` line
+somewhere in your `.fbs` file). This represents the number of elements that will
+have space pre-allocated in the vector. Changing this number does not cause any issues
+with backwards compatibility because the underlying flatbuffer representation
+permits arbitrary (up to 2^32) length vectors. This is necessary for
+choosing how much space to allocate when constructing the flatbuffer.
+The maximum size of a vector may be accessed at runtime using the `capacity()`
+accessor on the `aos::fbs::Vector` type.
+
+*Note*: You may not use dynamically sized vectors of strings or tables in
+realtime code, as allocating space for each additional string/table member
+requires overhead which cannot be conveniently allocated anywhere except the
+heap. The primary use-case for dynamically sized vectors in realtime code
+is for vectors of scalars; if this changes, we can try to add options to
+support this. Dynamically sized vectors of tables/strings are supported in
+non-realtime code.
+
+If you wish to increase the alignment of a vector beyond the base alignment, you
+can use the `force_align` attribute , as seen below:
+
+```
+vector_aligned:[int] (id: 6, force_align: 64, static_length: 3);
+```
+
+If you do this, the first element of the vector will be aligned to the requested
+alignment.
+
+The `aos::fbs::Vector` API is designed to mirror the `std::vector` API, with
+some changes to accommodate better error-handling. Common accessors:
+
+* `capacity()`: Maximum number of elements that this vector can accommodate.
+* `size()`: Current number of elements populated in this vector.
+* `T *emplace_back()`: Adds a not-inline (string or table) type to the vector and returns
+ the added object. If there is no more space, returns `nullptr` (call
+ `reserve()` to attempt to allocate more space).
+* `bool emplace_back(T)`: Adds an inline (scalar, enum, or struct) type to the vector and
+ returns true on success. Returns false if there is no more space in the
+ vector (call `reserve()` to attempt to allocate more space).
+* `AsFlatbufferVector()`, `AsMutableFlatbufferVector()`: Returns a
+ `flatbuffer::Vector` of the appropriate type pointing to the vector
+ that we are constructing.
+* `T &at(size_t index)`/`T& operator[](size_t index)`: Returns the
+ object at the requested index. Dies if `index >= size()` (unlike
+ `std::vector`, `operator[]` does do bounds checking. Use `unsafe_at()` if you
+ want to avoid the performance overhead of bounds checking).
+* `resize_inline(size_t size, SetZero set_zero)`/`resize(size_t size)`:
+ Resizes the vector to the requested size (dies if the vector cannot
+ accommodate the requested size). For inline types,
+ you may optionally leave any newly inserted elements uninitialized.
+ For not-inline types, will default construct new elements.
+* `T* data()`: Returns a pointer to the first element of the vector. Only valid
+ for inline data types.
+* `bool reserve(size_t new_length)`: Used to dynamically change the amount of
+ space allocated for the vector; returns false on failure (e.g., if you are in
+ a fixed-size allocator that does not support increasing the size past a
+ certain point).
+* `bool FromFlatbuffer(const flatbuffers::Vector<>*)`: Attempts to copy an
+ existing vector into this `Vector`. This may attempt to call `reserve()`
+ if the new vector is longer than `capacity()`. If the copy fails for
+ any reason, returns `false`.
+
+#### Managing Resizing of Vectors
+
+When dealing with sizes of vectors, there are two separate "lengths" that are
+relevant at any given time:
+
+1. The `capacity`/allocated length of the vector. This is the length for which
+ there is currently space allocated in the flatbuffer array and in the
+ `Vector` object itself. Upon initialization, this will be equal to the
+ `static_length` for the vector. This can only be changed by calling
+ `reserve()` or indirectly in `FromFlatbuffer()` (which calls `reserve()`).
+2. The `size`/current actual length of the vector. This is the number of
+ elements that are currently actually populated in the vector. The current
+ `size` of the vector cannot exceed the `capacity`. This will be modified by
+ calls to `emplace_back()`/`Resize*()` (and indirectly by `FromFlatbuffer()`).
+
+Because `emplace_back()` and `Resize*()` do not call `reserve()` themselves, they will
+return `false` if the capacity of the vector does not currently allow for the
+element to be added; when `emplace_back()` returns false, you may call `reserve()` to
+attempt to allocate the requisite space; if the allocation itself fails (e.g.,
+if you are allocating against a fixed size buffer and do not have sufficient
+space for the requested allocation). This means that the user will be forced to
+explicitly request changes to the memory layout and allocation of the flatbuffer
+rather than being able to hide it inside of calls to `emplace_back()` or the such.
+
+### Strings
+
+Strings are a special case of vectors. The key differences are:
+
+* They are always null-terminated (this is enforced by the API
+ itself).
+* Because they are null-terminated, the actual vector length will
+ be `static_length + 1`. Users should not attempt to
+ access this extra character.
+* The vector elements are of type `char`.
+* Strings are supposed to be UTF-8. This C++ API does not enforce
+ this constraint, but if you want a vector of bytes, then use
+ a vector of bytes (`[ubyte]`) instead.
+* For the special-case where you have a vector of strings, you may specify
+ the static length of the strings inside of the vector using the
+ `static_vector_string_length` attribute.
+
+
+## Use With EventLoop Senders
+
+In order to use the static API with `aos::Sender`s you need to:
+
+1. Change the `aos::Sender` to be templated on the `MessageStatic` instead of
+ the `Message` type.
+2. Use the `MakeStaticBuilder` method instead of the `MakeBuilder` method.
+3. Alter the actual flatbuffer API calls to use the new object.
+4. In the `Send()` call, remove the `builder.Finish()` as it is no longer necessary.
+
+### Simple Conversion Example
+
+This is an extremely simple example of converting a sender to the new API, taken
+from the `event_loop_param_test.cc`:
+
+First, the bazel targets must be updated to generate the new code:
+
+```python
+# This load must be added so that the static_flatbuffer rule is available.
+load("@org_frc971//aos/flatbuffers:generate.bzl", "static_flatbuffer")
+
+# Remove the prior `flatbuffer_cc_library` and replace it with something like
+# this.
+# This target had no dependencies, but any deps will also need to be
+# upgraded to static_flatbuffer rules.
+static_flatbuffer(
+ name = "test_message_fbs",
+ src = "test_message.fbs",
+)
+```
+
+Before:
+```cpp
+ aos::Sender<TestMessage> sender = loop1->MakeSender<TestMessage>("/test");
+
+ loop->OnRun([&]() {
+ aos::Sender<TestMessage>::Builder msg = sender.MakeBuilder();
+ TestMessage::Builder builder = msg.MakeBuilder<TestMessage>();
+ builder.add_value(200);
+ msg.CheckOk(msg.Send(builder.Finish()));
+ });
+```
+
+After:
+```cpp
+ aos::Sender<TestMessageStatic> sender =
+ loop1->MakeSender<TestMessageStatic>("/test");
+
+ loop->OnRun([&]() {
+ aos::Sender<TestMessageStatic>::StaticBuilder msg =
+ sender.MakeStaticBuilder();
+ msg.get()->set_value(200);
+ msg.CheckOk(msg.Send());
+ });
+```
+
+## Future Improvements
+
+### Suggested API Additions/Improvements
+
+* A `add_or_get_subtable` generated method that avoids the need for the user to
+ check `has_subtable()` before calling `add_subtable()`.
+* `operator->()` in places to reduce syntactic overhead.
+* Make naming of `StaticVector` methods more consistent with `std::vector`.
diff --git a/third_party/flatbuffers/BUILD.bazel b/third_party/flatbuffers/BUILD.bazel
index dc5e735..2c91f5c 100644
--- a/third_party/flatbuffers/BUILD.bazel
+++ b/third_party/flatbuffers/BUILD.bazel
@@ -41,6 +41,7 @@
name = "flatbuffers",
hdrs = ["//:public_headers"],
copts = ["-Wno-cast-align"],
+ defines = ["FLATBUFFERS_USE_STD_SPAN"],
linkstatic = 1,
strip_include_prefix = "/include",
deps = ["//src:flatbuffers"],