Copy flatbuffers efficiently and respecting DAGs.
Deduce the memory region needed to be copied by traversing the DAG, and
then copy it. This has no mallocs (yay!), and works with DAGs.
There are some cases where we actually want to copy things recursively.
Do that when asked.
Change-Id: Ia0ffde26d569fa92ee2bbb49706c17d9d657d125
diff --git a/aos/config_flattener.cc b/aos/config_flattener.cc
index e4bd192..a9500e3 100644
--- a/aos/config_flattener.cc
+++ b/aos/config_flattener.cc
@@ -43,6 +43,8 @@
aos::FlatbufferDetachedBuffer<Configuration> merged_config =
configuration::MergeConfiguration(config, schemas);
+ // In case we've done something overly weird to the flatbuffer...
+ CHECK(merged_config.Verify());
const std::string merged_config_json =
FlatbufferToJson(&merged_config.message(), {.multi_line = true});
diff --git a/aos/configuration.cc b/aos/configuration.cc
index 99c36c2..1d39f27 100644
--- a/aos/configuration.cc
+++ b/aos/configuration.cc
@@ -427,7 +427,7 @@
// which we do need to alter (hence why we can't use the input config
// directly), and then merge auto_merge_config back in at the end.
aos::FlatbufferDetachedBuffer<aos::Configuration> auto_merge_config =
- aos::CopyFlatBuffer(&config.message());
+ aos::RecursiveCopyFlatBuffer(&config.message());
// Store all the channels in a sorted set. This lets us track channels we
// have seen before and merge the updates in.
@@ -450,10 +450,11 @@
<< CleanedChannelToString(c);
// Attempt to insert the channel.
- auto result = channels.insert(CopyFlatBuffer(c));
+ auto result = channels.insert(RecursiveCopyFlatBuffer(c));
if (!result.second) {
// Already there, so merge the new table into the original.
- *result.first = MergeFlatBuffers(*result.first, CopyFlatBuffer(c));
+ *result.first =
+ MergeFlatBuffers(*result.first, RecursiveCopyFlatBuffer(c));
}
}
}
@@ -467,9 +468,10 @@
continue;
}
- auto result = applications.insert(CopyFlatBuffer(a));
+ auto result = applications.insert(RecursiveCopyFlatBuffer(a));
if (!result.second) {
- *result.first = MergeFlatBuffers(*result.first, CopyFlatBuffer(a));
+ *result.first =
+ MergeFlatBuffers(*result.first, RecursiveCopyFlatBuffer(a));
}
}
}
@@ -483,9 +485,10 @@
continue;
}
- auto result = nodes.insert(CopyFlatBuffer(n));
+ auto result = nodes.insert(RecursiveCopyFlatBuffer(n));
if (!result.second) {
- *result.first = MergeFlatBuffers(*result.first, CopyFlatBuffer(n));
+ *result.first =
+ MergeFlatBuffers(*result.first, RecursiveCopyFlatBuffer(n));
}
}
}
@@ -500,7 +503,8 @@
{
::std::vector<flatbuffers::Offset<Channel>> channel_offsets;
for (const FlatbufferDetachedBuffer<Channel> &c : channels) {
- channel_offsets.emplace_back(CopyFlatBuffer<Channel>(&c.message(), &fbb));
+ channel_offsets.emplace_back(
+ RecursiveCopyFlatBuffer<Channel>(&c.message(), &fbb));
}
channels_offset = fbb.CreateVector(channel_offsets);
}
@@ -512,7 +516,7 @@
::std::vector<flatbuffers::Offset<Application>> applications_offsets;
for (const FlatbufferDetachedBuffer<Application> &a : applications) {
applications_offsets.emplace_back(
- CopyFlatBuffer<Application>(&a.message(), &fbb));
+ RecursiveCopyFlatBuffer<Application>(&a.message(), &fbb));
}
applications_offset = fbb.CreateVector(applications_offsets);
}
@@ -523,7 +527,8 @@
{
::std::vector<flatbuffers::Offset<Node>> node_offsets;
for (const FlatbufferDetachedBuffer<Node> &n : nodes) {
- node_offsets.emplace_back(CopyFlatBuffer<Node>(&n.message(), &fbb));
+ node_offsets.emplace_back(
+ RecursiveCopyFlatBuffer<Node>(&n.message(), &fbb));
}
nodes_offset = fbb.CreateVector(node_offsets);
}
@@ -715,8 +720,8 @@
if (cached_schema != schema_cache.end()) {
schema_offset = cached_schema->second;
} else {
- schema_offset =
- CopyFlatBuffer<reflection::Schema>(&found_schema->message(), &fbb);
+ schema_offset = RecursiveCopyFlatBuffer<reflection::Schema>(
+ &found_schema->message(), &fbb);
schema_cache.emplace(c->type()->string_view(), schema_offset);
}
@@ -725,9 +730,8 @@
flatbuffers::Offset<flatbuffers::String> type_offset =
fbb.CreateSharedString(c->type()->str());
flatbuffers::Offset<flatbuffers::String> source_node_offset =
- c->has_source_node()
- ? fbb.CreateSharedString(c->source_node()->str())
- : 0;
+ c->has_source_node() ? fbb.CreateSharedString(c->source_node()->str())
+ : 0;
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Connection>>>
destination_nodes_offset =
@@ -773,7 +777,6 @@
channel_builder.add_num_readers(c->num_readers());
}
channel_offsets.emplace_back(channel_builder.Finish());
-
}
channels_offset = fbb.CreateVector(channel_offsets);
}
diff --git a/aos/events/logging/logger.cc b/aos/events/logging/logger.cc
index 3ecbf42..c38cf03 100644
--- a/aos/events/logging/logger.cc
+++ b/aos/events/logging/logger.cc
@@ -444,8 +444,9 @@
flatbuffers::Offset<Node> logger_node_offset;
if (configuration::MultiNode(configuration_)) {
- node_offset = CopyFlatBuffer(node, &fbb);
- logger_node_offset = CopyFlatBuffer(event_loop_->node(), &fbb);
+ // TODO(austin): Reuse the node we just copied in above.
+ node_offset = RecursiveCopyFlatBuffer(node, &fbb);
+ logger_node_offset = RecursiveCopyFlatBuffer(event_loop_->node(), &fbb);
}
aos::logger::LogFileHeader::Builder log_file_header_builder(fbb);
@@ -489,7 +490,12 @@
log_file_header_builder.add_parts_index(0);
fbb.FinishSizePrefixed(log_file_header_builder.Finish());
- return fbb.Release();
+ aos::SizePrefixedFlatbufferDetachedBuffer<LogFileHeader> result(
+ fbb.Release());
+
+ CHECK(result.Verify()) << ": Built a corrupted header.";
+
+ return result;
}
void Logger::ResetStatisics() {
diff --git a/aos/flatbuffer_merge.cc b/aos/flatbuffer_merge.cc
index e548f70..dd4b4fa 100644
--- a/aos/flatbuffer_merge.cc
+++ b/aos/flatbuffer_merge.cc
@@ -100,8 +100,8 @@
const flatbuffers::Table *sub_t2 =
reinterpret_cast<const flatbuffers::Table *>(val2);
- elements->emplace_back(field_offset,
- MergeFlatBuffers(sub_typetable, sub_t1, sub_t2, fbb));
+ elements->emplace_back(
+ field_offset, MergeFlatBuffers(sub_typetable, sub_t1, sub_t2, fbb));
}
}
@@ -549,4 +549,220 @@
fbb1.GetSize()) == 0;
}
+// Struct to track a range of memory.
+struct Bounds {
+ const uint8_t *min;
+ const uint8_t *max;
+
+ absl::Span<const uint8_t> span() {
+ return {min, static_cast<size_t>(max - min)};
+ }
+};
+
+// Grows the range of memory to contain the pointer.
+void Extend(Bounds *b, const uint8_t *ptr) {
+ b->min = std::min(ptr, b->min);
+ b->max = std::max(ptr, b->max);
+}
+
+// Grows the range of memory to contain the span.
+void Extend(Bounds *b, absl::Span<const uint8_t> data) {
+ b->min = std::min(data.data(), b->min);
+ b->max = std::max(data.data() + data.size(), b->max);
+}
+
+// Finds the extents of the provided string. Returns the containing span and
+// required alignment.
+std::pair<absl::Span<const uint8_t>, size_t> ExtentsString(
+ const flatbuffers::String *s) {
+ const uint8_t *s_uint8 = reinterpret_cast<const uint8_t *>(s);
+ // Strings are null terminated.
+ Bounds b{.min = s_uint8,
+ .max = s_uint8 + sizeof(flatbuffers::uoffset_t) + s->size() + 1};
+ return std::make_pair(b.span(), sizeof(flatbuffers::uoffset_t));
+}
+
+// Finds the extents of the provided table. Returns the containing span and the
+// required alignment.
+std::pair<absl::Span<const uint8_t>, size_t> ExtentsTable(
+ const flatbuffers::TypeTable *type_table, const flatbuffers::Table *t1) {
+ const uint8_t *t1_uint8 = reinterpret_cast<const uint8_t *>(t1);
+ // Count the offset to the vtable.
+ Bounds b{.min = t1_uint8, .max = t1_uint8 + sizeof(flatbuffers::soffset_t)};
+ // Find the limits of the vtable and start of table.
+ const uint8_t *vt = t1->GetVTable();
+ Extend(&b, vt);
+ Extend(&b, vt + flatbuffers::ReadScalar<flatbuffers::voffset_t>(vt));
+ // We need to be at least as aligned as the vtable pointer. Start there.
+ size_t alignment = sizeof(flatbuffers::uoffset_t);
+
+ // Now do all our fields.
+ for (size_t field_index = 0; field_index < type_table->num_elems;
+ ++field_index) {
+ const flatbuffers::TypeCode type_code = type_table->type_codes[field_index];
+ const flatbuffers::ElementaryType elementary_type =
+ static_cast<flatbuffers::ElementaryType>(type_code.base_type);
+ const flatbuffers::TypeTable *field_type_table =
+ type_code.sequence_ref >= 0
+ ? type_table->type_refs[type_code.sequence_ref]()
+ : nullptr;
+
+ // Note: we don't yet support enums, structs, or unions. That is mostly
+ // because we haven't had a use case yet.
+
+ // Compute the pointer to our field.
+ const uint8_t *val = nullptr;
+ if (type_table->st == flatbuffers::ST_TABLE) {
+ val = t1->GetAddressOf(flatbuffers::FieldIndexToOffset(
+ static_cast<flatbuffers::voffset_t>(field_index)));
+ // Bail on non-populated fields.
+ if (val == nullptr) continue;
+ } else {
+ val = t1_uint8 + type_table->values[field_index];
+ }
+
+ // Now make sure the field is aligned properly.
+ const size_t field_size =
+ flatbuffers::InlineSize(elementary_type, field_type_table);
+ alignment = std::max(
+ alignment, std::min(sizeof(flatbuffers::largest_scalar_t), field_size));
+
+ absl::Span<const uint8_t> field_span(val, field_size);
+
+ Extend(&b, field_span);
+
+ if (type_code.is_vector) {
+ // Go look inside the vector and track the base size.
+ val += flatbuffers::ReadScalar<flatbuffers::uoffset_t>(val);
+ const flatbuffers::Vector<uint8_t> *vec =
+ reinterpret_cast<const flatbuffers::Vector<uint8_t> *>(val);
+ absl::Span<const uint8_t> vec_span(
+ val, sizeof(flatbuffers::uoffset_t) +
+ vec->size() * flatbuffers::InlineSize(elementary_type,
+ field_type_table));
+ Extend(&b, vec_span);
+ // Non-scalar vectors need their pointers followed.
+ if (elementary_type == flatbuffers::ElementaryType::ET_STRING) {
+ for (size_t i = 0; i < vec->size(); ++i) {
+ const uint8_t *field_ptr =
+ vec->Data() + i * InlineSize(elementary_type, field_type_table);
+ std::pair<absl::Span<const uint8_t>, size_t> str_data =
+ ExtentsString(reinterpret_cast<const flatbuffers::String *>(
+ field_ptr +
+ flatbuffers::ReadScalar<flatbuffers::uoffset_t>(field_ptr)));
+ Extend(&b, str_data.first);
+ alignment = std::max(alignment, str_data.second);
+ }
+ } else if (elementary_type == flatbuffers::ElementaryType::ET_SEQUENCE) {
+ for (size_t i = 0; i < vec->size(); ++i) {
+ const uint8_t *field_ptr =
+ vec->Data() + i * InlineSize(elementary_type, field_type_table);
+ CHECK(type_table->st == flatbuffers::ST_TABLE)
+ << ": Only tables are supported right now. Patches welcome.";
+
+ std::pair<absl::Span<const uint8_t>, size_t> sub_data = ExtentsTable(
+ field_type_table,
+ reinterpret_cast<const flatbuffers::Table *>(
+ field_ptr +
+ flatbuffers::ReadScalar<flatbuffers::uoffset_t>(field_ptr)));
+ alignment = std::max(alignment, sub_data.second);
+ Extend(&b, sub_data.first);
+ }
+ }
+
+ continue;
+ }
+
+ switch (elementary_type) {
+ case flatbuffers::ElementaryType::ET_UTYPE:
+ case flatbuffers::ElementaryType::ET_BOOL:
+ case flatbuffers::ElementaryType::ET_CHAR:
+ case flatbuffers::ElementaryType::ET_UCHAR:
+ case flatbuffers::ElementaryType::ET_SHORT:
+ case flatbuffers::ElementaryType::ET_USHORT:
+ case flatbuffers::ElementaryType::ET_INT:
+ case flatbuffers::ElementaryType::ET_UINT:
+ case flatbuffers::ElementaryType::ET_LONG:
+ case flatbuffers::ElementaryType::ET_ULONG:
+ case flatbuffers::ElementaryType::ET_FLOAT:
+ case flatbuffers::ElementaryType::ET_DOUBLE:
+ // This is covered by the field and size above.
+ break;
+ case flatbuffers::ElementaryType::ET_STRING: {
+ std::pair<absl::Span<const uint8_t>, size_t> str_data =
+ ExtentsString(reinterpret_cast<const flatbuffers::String *>(
+ val + flatbuffers::ReadScalar<flatbuffers::uoffset_t>(val)));
+ alignment = std::max(alignment, str_data.second);
+ Extend(&b, str_data.first);
+ } break;
+ case flatbuffers::ElementaryType::ET_SEQUENCE: {
+ switch (type_table->st) {
+ case flatbuffers::ST_TABLE: {
+ const flatbuffers::Table *sub_table =
+ reinterpret_cast<const flatbuffers::Table *>(
+ val + flatbuffers::ReadScalar<flatbuffers::uoffset_t>(val));
+ std::pair<absl::Span<const uint8_t>, size_t> sub_data =
+ ExtentsTable(field_type_table, sub_table);
+ alignment = std::max(alignment, sub_data.second);
+ Extend(&b, sub_data.first);
+ } break;
+ case flatbuffers::ST_ENUM:
+ LOG(FATAL) << "Copying enums not implemented yet";
+ case flatbuffers::ST_STRUCT:
+ LOG(FATAL) << "Copying structs not implemented yet";
+ case flatbuffers::ST_UNION:
+ LOG(FATAL) << "Copying unions not implemented yet";
+ }
+ }
+ }
+ }
+
+ // To be a parsable flatbuffer, the flatbuffer needs to be aligned up to the
+ // maximum internal alignment. Both in length and starting point. We know
+ // that for this to be actually true, the start and end pointers will need to
+ // be aligned to the required alignment.
+ CHECK((alignment & (alignment - 1)) == 0)
+ << ": Invalid alignment: " << alignment << ", needs to be a power of 2.";
+ while (reinterpret_cast<uintptr_t>(b.min) & (alignment - 1)) {
+ --b.min;
+ }
+ while (reinterpret_cast<uintptr_t>(b.max) & (alignment - 1)) {
+ ++b.max;
+ }
+
+ return std::make_pair(b.span(), alignment);
+}
+
+// Computes the offset, containing span, and alignment of the provided
+// flatbuffer.
+std::tuple<flatbuffers::Offset<flatbuffers::Table>, absl::Span<const uint8_t>,
+ size_t>
+Extents(const flatbuffers::TypeTable *type_table,
+ const flatbuffers::Table *t1) {
+ std::pair<absl::Span<const uint8_t>, size_t> data =
+ ExtentsTable(type_table, t1);
+
+ return std::make_tuple(flatbuffers::Offset<flatbuffers::Table>(
+ static_cast<flatbuffers::uoffset_t>(
+ data.first.data() + data.first.size() -
+ reinterpret_cast<const uint8_t *>(t1))),
+ data.first, data.second);
+}
+
+flatbuffers::Offset<flatbuffers::Table> CopyFlatBuffer(
+ const flatbuffers::Table *t1, const flatbuffers::TypeTable *typetable,
+ flatbuffers::FlatBufferBuilder *fbb) {
+ std::tuple<flatbuffers::Offset<flatbuffers::Table>, absl::Span<const uint8_t>,
+ size_t>
+ r = Extents(typetable, t1);
+
+ // Pad out enough so that the flatbuffer alignment is preserved.
+ fbb->Align(std::get<2>(r));
+
+ // Now push everything we found. And offsets are tracked from the end of the
+ // buffer while building, so recompute the offset returned from the back.
+ fbb->PushBytes(std::get<1>(r).data(), std::get<1>(r).size());
+ return fbb->GetSize() + std::get<0>(r).o - std::get<1>(r).size();
+}
+
} // namespace aos
diff --git a/aos/flatbuffer_merge.h b/aos/flatbuffer_merge.h
index 7844227..7518bed 100644
--- a/aos/flatbuffer_merge.h
+++ b/aos/flatbuffer_merge.h
@@ -70,13 +70,57 @@
fbb);
}
+// Copies a flatbuffer by walking the tree and copying all the pieces. This
+// converts DAGs to trees.
template <class T>
-inline flatbuffers::Offset<T> CopyFlatBuffer(
+inline flatbuffers::Offset<T> RecursiveCopyFlatBuffer(
const T *t1, flatbuffers::FlatBufferBuilder *fbb) {
return MergeFlatBuffers<T>(reinterpret_cast<const flatbuffers::Table *>(t1),
nullptr, fbb);
}
+// Copies a flatbuffer by finding the extents of the memory using the typetable
+// and copying the containing memory. This doesn't allocate memory, and
+// preserves DAGs.
+flatbuffers::Offset<flatbuffers::Table> CopyFlatBuffer(
+ const flatbuffers::Table *t1, const flatbuffers::TypeTable *typetable,
+ flatbuffers::FlatBufferBuilder *fbb);
+
+template <class T>
+inline flatbuffers::Offset<T> CopyFlatBuffer(
+ const T *t1, flatbuffers::FlatBufferBuilder *fbb) {
+ return flatbuffers::Offset<T>(
+ CopyFlatBuffer(reinterpret_cast<const flatbuffers::Table *>(t1),
+ T::MiniReflectTypeTable(), fbb)
+ .o);
+}
+
+template <class T>
+inline flatbuffers::Offset<T> CopyFlatBuffer(
+ const Flatbuffer<T> &t1, flatbuffers::FlatBufferBuilder *fbb) {
+ return flatbuffers::Offset<T>(
+ CopyFlatBuffer(
+ reinterpret_cast<const flatbuffers::Table *>(&t1.message()),
+ T::MiniReflectTypeTable(), fbb)
+ .o);
+}
+
+// Copies a flatbuffer by copying all the data without looking inside and
+// pointing inside it.
+template <class T>
+inline flatbuffers::Offset<T> BlindCopyFlatBuffer(
+ const Flatbuffer<T> &t, flatbuffers::FlatBufferBuilder *fbb) {
+ // Enforce 8 byte alignment so anything inside the flatbuffer can be read.
+ fbb->Align(sizeof(flatbuffers::largest_scalar_t));
+
+ // We don't know how much of the start of the flatbuffer is padding. The
+ // safest thing to do from an alignment point of view (without looking inside)
+ // is to copy the initial offset and leave it as dead space.
+ fbb->PushBytes(t.data(), t.size());
+ return fbb->GetSize() -
+ flatbuffers::ReadScalar<flatbuffers::uoffset_t>(t.data());
+}
+
template <class T>
inline flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<T>>>
CopyVectorTable(const flatbuffers::Vector<flatbuffers::Offset<T>> *t1,
@@ -114,6 +158,14 @@
return FlatbufferDetachedBuffer<T>(fbb.Release());
}
+template <class T>
+inline FlatbufferDetachedBuffer<T> RecursiveCopyFlatBuffer(const T *t) {
+ flatbuffers::FlatBufferBuilder fbb;
+ fbb.ForceDefaults(true);
+ fbb.Finish(RecursiveCopyFlatBuffer<T>(t, &fbb));
+ return FlatbufferDetachedBuffer<T>(fbb.Release());
+}
+
// Compares 2 flatbuffers. Returns true if they match, false otherwise.
bool CompareFlatBuffer(const flatbuffers::TypeTable *typetable,
const flatbuffers::Table *t1,
diff --git a/aos/flatbuffer_merge_test.cc b/aos/flatbuffer_merge_test.cc
index 890a831..85773a3 100644
--- a/aos/flatbuffer_merge_test.cc
+++ b/aos/flatbuffer_merge_test.cc
@@ -1,5 +1,8 @@
#include "aos/flatbuffer_merge.h"
+#include <string_view>
+
+#include "absl/strings/escaping.h"
#include "gtest/gtest.h"
#include "aos/json_to_flatbuffer.h"
@@ -9,6 +12,20 @@
namespace aos {
namespace testing {
+std::string_view FromFbb(const flatbuffers::FlatBufferBuilder &fbb) {
+ return std::string_view(
+ reinterpret_cast<const char *>(fbb.GetCurrentBufferPointer()),
+ fbb.GetSize());
+}
+
+std::string_view FromFbb(const flatbuffers::DetachedBuffer &b) {
+ return std::string_view(reinterpret_cast<const char *>(b.data()), b.size());
+}
+
+absl::Span<const uint8_t> ToSpan(const flatbuffers::DetachedBuffer &b) {
+ return absl::Span<const uint8_t>(b.data(), b.size());
+}
+
class FlatbufferMerge : public ::testing::Test {
public:
FlatbufferMerge() {}
@@ -232,6 +249,103 @@
EXPECT_EQ(out_nested,
FlatbufferToJson(fb_merged, ConfigurationTypeTable()));
}
+
+ // TODO(austin): Try more flatbuffers...
+ // Now try copying various flatbuffers and confirming they match.
+ {
+ flatbuffers::FlatBufferBuilder aos_flatbuffer_copy_fbb;
+ aos_flatbuffer_copy_fbb.ForceDefaults(true);
+
+ LOG(INFO) << "Copying " << in1 << " "
+ << absl::BytesToHexString(FromFbb(fb1)) << " at "
+ << reinterpret_cast<const void *>(fb1.data()) << " size "
+ << fb1.size();
+ aos_flatbuffer_copy_fbb.Finish(CopyFlatBuffer<Configuration>(
+ flatbuffers::GetRoot<Configuration>(fb1.data()),
+ &aos_flatbuffer_copy_fbb));
+
+ aos::FlatbufferDetachedBuffer<Configuration> fb_copy(
+ aos_flatbuffer_copy_fbb.Release());
+ ASSERT_NE(fb_copy.size(), 0u);
+
+ flatbuffers::Verifier v(fb1.data(), fb1.size());
+ EXPECT_TRUE(
+ v.VerifyTable(flatbuffers::GetRoot<Configuration>(fb1.data())));
+
+ ASSERT_TRUE(fb_copy.Verify()) << in1;
+
+ EXPECT_EQ(in1, FlatbufferToJson(fb_copy));
+ }
+
+ {
+ flatbuffers::FlatBufferBuilder aos_flatbuffer_copy_message_ptr_fbb;
+ aos_flatbuffer_copy_message_ptr_fbb.ForceDefaults(true);
+
+ aos_flatbuffer_copy_message_ptr_fbb.Finish(CopyFlatBuffer<Configuration>(
+ flatbuffers::GetRoot<Configuration>(fb2.data()),
+ &aos_flatbuffer_copy_message_ptr_fbb));
+
+ aos::FlatbufferDetachedBuffer<Configuration> fb_copy_message_ptr(
+ aos_flatbuffer_copy_message_ptr_fbb.Release());
+ ASSERT_NE(fb_copy_message_ptr.size(), 0u);
+
+ flatbuffers::FlatBufferBuilder aos_flatbuffer_copy_full_fbb;
+ aos_flatbuffer_copy_full_fbb.ForceDefaults(true);
+
+ aos_flatbuffer_copy_full_fbb.Finish(BlindCopyFlatBuffer<Configuration>(
+ aos::FlatbufferSpan<Configuration>(ToSpan(fb2)),
+ &aos_flatbuffer_copy_full_fbb));
+
+ aos::FlatbufferDetachedBuffer<Configuration> fb_copy_full(
+ aos_flatbuffer_copy_full_fbb.Release());
+ ASSERT_NE(fb_copy_full.size(), 0u);
+
+ flatbuffers::Verifier v(fb2.data(), fb2.size());
+ EXPECT_TRUE(
+ v.VerifyTable(flatbuffers::GetRoot<Configuration>(fb2.data())));
+
+ LOG(INFO) << "Verifying copy of " << in2;
+ ASSERT_TRUE(fb_copy_message_ptr.Verify()) << in2;
+ LOG(INFO) << "Verifying full of " << in2;
+ ASSERT_TRUE(fb_copy_full.Verify()) << in2;
+
+ EXPECT_EQ(in2, FlatbufferToJson(fb_copy_message_ptr));
+ EXPECT_EQ(in2, FlatbufferToJson(fb_copy_full));
+ }
+
+ {
+ flatbuffers::FlatBufferBuilder aos_flatbuffer_copy_fbb;
+ aos_flatbuffer_copy_fbb.ForceDefaults(true);
+
+ aos_flatbuffer_copy_fbb.Finish(CopyFlatBuffer<Configuration>(
+ flatbuffers::GetRoot<Configuration>(fb1_nested.data()),
+ &aos_flatbuffer_copy_fbb));
+
+ aos::FlatbufferDetachedBuffer<Configuration> fb_copy(
+ aos_flatbuffer_copy_fbb.Release());
+ ASSERT_NE(fb_copy.size(), 0u);
+
+ ASSERT_TRUE(fb_copy.Verify());
+
+ EXPECT_EQ(in1_nested, FlatbufferToJson(fb_copy));
+ }
+
+ {
+ flatbuffers::FlatBufferBuilder aos_flatbuffer_copy_fbb;
+ aos_flatbuffer_copy_fbb.ForceDefaults(true);
+
+ aos_flatbuffer_copy_fbb.Finish(CopyFlatBuffer<Configuration>(
+ flatbuffers::GetRoot<Configuration>(fb2_nested.data()),
+ &aos_flatbuffer_copy_fbb));
+
+ aos::FlatbufferDetachedBuffer<Configuration> fb_copy(
+ aos_flatbuffer_copy_fbb.Release());
+ ASSERT_NE(fb_copy.size(), 0u);
+
+ ASSERT_TRUE(fb_copy.Verify());
+
+ EXPECT_EQ(in2_nested, FlatbufferToJson(fb_copy));
+ }
}
};
@@ -342,6 +456,126 @@
"\"name\": \"woo2\" }, { \"name\": \"wo3\" } ] }");
}
+// Test nested messages, and arrays of nested messages.
+TEST(FlatbufferCopy, WholesaleCopy) {
+ flatbuffers::FlatBufferBuilder fbb_expected;
+ fbb_expected.ForceDefaults(true);
+ fbb_expected.DedupVtables(false);
+
+ {
+ flatbuffers::Offset<flatbuffers::String> name1_offset =
+ fbb_expected.CreateString("wow");
+
+ std::vector<flatbuffers::Offset<Application>> application_offsets;
+ Application::Builder a1_builder(fbb_expected);
+ a1_builder.add_name(name1_offset);
+ a1_builder.add_priority(7);
+ a1_builder.add_long_thingy(0x2549713132LL);
+ application_offsets.emplace_back(a1_builder.Finish());
+
+ flatbuffers::Offset<flatbuffers::String> name2_offset =
+ fbb_expected.CreateString("foo");
+ Application::Builder a2_builder(fbb_expected);
+ a2_builder.add_name(name2_offset);
+ a2_builder.add_priority(3);
+ a2_builder.add_long_thingy(9);
+ application_offsets.emplace_back(a2_builder.Finish());
+
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Application>>>
+ applications_offset = fbb_expected.CreateVector(application_offsets);
+
+ Configuration::Builder configuration_builder(fbb_expected);
+ configuration_builder.add_apps(applications_offset);
+ fbb_expected.Finish(configuration_builder.Finish());
+ }
+
+ LOG(INFO) << "Initial alignment " << fbb_expected.GetBufferMinAlignment();
+
+ aos::FlatbufferDetachedBuffer<Configuration> expected(fbb_expected.Release());
+
+ LOG(INFO) << "Expected "
+ << absl::BytesToHexString(std::string_view(
+ reinterpret_cast<char *>(expected.span().data()),
+ expected.span().size()));
+
+ aos::FlatbufferDetachedBuffer<Application> a1 = []() {
+ flatbuffers::FlatBufferBuilder fbb;
+ fbb.ForceDefaults(true);
+
+ flatbuffers::Offset<flatbuffers::String> name1_offset =
+ fbb.CreateString("wow");
+
+ Application::Builder a1_builder(fbb);
+ a1_builder.add_name(name1_offset);
+ a1_builder.add_priority(7);
+ a1_builder.add_long_thingy(0x2549713132LL);
+ flatbuffers::Offset<Application> a1 = a1_builder.Finish();
+
+ fbb.Finish(a1);
+ return fbb.Release();
+ }();
+
+ aos::FlatbufferDetachedBuffer<Application> a2 =
+ JsonToFlatbuffer<Application>(static_cast<const char *>(
+ "{ \"name\": \"foo\", \"priority\": 3, \"long_thingy\": 9 }"));
+
+ aos::FlatbufferDetachedBuffer<Configuration> c1 = [&a1, &a2]() {
+ flatbuffers::FlatBufferBuilder fbb;
+ fbb.ForceDefaults(true);
+
+ std::vector<flatbuffers::Offset<Application>> application_offsets;
+ application_offsets.emplace_back(BlindCopyFlatBuffer(a1, &fbb));
+ application_offsets.emplace_back(BlindCopyFlatBuffer(a2, &fbb));
+
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Application>>>
+ applications_offset = fbb.CreateVector(application_offsets);
+
+ Configuration::Builder configuration_builder(fbb);
+ configuration_builder.add_apps(applications_offset);
+ fbb.Finish(configuration_builder.Finish());
+
+ return fbb.Release();
+ }();
+
+ LOG(INFO) << "Got "
+ << absl::BytesToHexString(
+ std::string_view(reinterpret_cast<char *>(c1.span().data()),
+ c1.span().size()));
+
+ aos::FlatbufferDetachedBuffer<Configuration> c2 = [&a1, &a2]() {
+ flatbuffers::FlatBufferBuilder fbb;
+ fbb.ForceDefaults(true);
+
+ std::vector<flatbuffers::Offset<Application>> application_offsets;
+ application_offsets.emplace_back(CopyFlatBuffer(&a1.message(), &fbb));
+ application_offsets.emplace_back(CopyFlatBuffer(&a2.message(), &fbb));
+
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Application>>>
+ applications_offset = fbb.CreateVector(application_offsets);
+
+ Configuration::Builder configuration_builder(fbb);
+ configuration_builder.add_apps(applications_offset);
+ fbb.Finish(configuration_builder.Finish());
+
+ return fbb.Release();
+ }();
+
+ LOG(INFO) << "Got "
+ << absl::BytesToHexString(
+ std::string_view(reinterpret_cast<char *>(c2.span().data()),
+ c2.span().size()));
+
+ ASSERT_TRUE(expected.Verify());
+ ASSERT_TRUE(c1.Verify());
+ ASSERT_TRUE(c2.Verify());
+
+ LOG(INFO) << FlatbufferToJson(expected);
+ LOG(INFO) << FlatbufferToJson(c1);
+ LOG(INFO) << FlatbufferToJson(c2);
+ EXPECT_EQ(FlatbufferToJson(expected), FlatbufferToJson(c1));
+ EXPECT_EQ(FlatbufferToJson(expected), FlatbufferToJson(c2));
+}
+
// Tests a compare of 2 basic (different) messages.
TEST_F(FlatbufferMerge, CompareDifferent) {
aos::FlatbufferDetachedBuffer<Configuration> message1(JsonToFlatbuffer(
diff --git a/aos/flatbuffers.h b/aos/flatbuffers.h
index b619452..21dd30c 100644
--- a/aos/flatbuffers.h
+++ b/aos/flatbuffers.h
@@ -128,6 +128,38 @@
// Wipes out the data buffer. This is handy to mark an instance as freed, and
// make attempts to use it fail more obviously.
void Wipe() { memset(data(), 0, size()); }
+
+ virtual bool Verify() const {
+ flatbuffers::Verifier v(data(), size());
+ return v.VerifyTable(&message());
+ }
+};
+
+// Non-owning Span backed flatbuffer.
+template <typename T>
+class FlatbufferSpan : public Flatbuffer<T> {
+ public:
+ // Builds a flatbuffer pointing to the contents of a span.
+ FlatbufferSpan(const absl::Span<const uint8_t> data) : data_(data) {}
+ // Builds a Flatbuffer pointing to the contents of another flatbuffer.
+ FlatbufferSpan(const Flatbuffer<T> &other) { data_ = other.span(); }
+
+ // Copies the data from the other flatbuffer.
+ FlatbufferSpan &operator=(const Flatbuffer<T> &other) {
+ data_ = other.span();
+ return *this;
+ }
+
+ virtual ~FlatbufferSpan() override {}
+
+ const uint8_t *data() const override {
+ return data_.data();
+ }
+ uint8_t *data() override { return CHECK_NOTNULL(nullptr); }
+ size_t size() const override { return data_.size(); }
+
+ private:
+ absl::Span<const uint8_t> data_;
};
// String backed flatbuffer.
@@ -360,6 +392,13 @@
return absl::Span<const uint8_t>(buffer_.data(), buffer_.size());
}
+ bool Verify() const override {
+ // TODO(austin): Should we push full_span up to Flatbuffer<> class?
+ // The base pointer has the wrong alignment if we strip off the size.
+ flatbuffers::Verifier v(full_span().data(), full_span().size());
+ return v.VerifyTable(&this->message());
+ }
+
private:
flatbuffers::DetachedBuffer buffer_;
};
diff --git a/aos/json_to_flatbuffer.fbs b/aos/json_to_flatbuffer.fbs
index 8c0402c..ad2c901 100644
--- a/aos/json_to_flatbuffer.fbs
+++ b/aos/json_to_flatbuffer.fbs
@@ -37,6 +37,7 @@
name:string;
priority:int;
maps:[Map];
+ long_thingy:uint64;
}
table VectorOfStrings {
diff --git a/aos/network/message_bridge_protocol.cc b/aos/network/message_bridge_protocol.cc
index 0a4a50e..8936da8 100644
--- a/aos/network/message_bridge_protocol.cc
+++ b/aos/network/message_bridge_protocol.cc
@@ -19,7 +19,8 @@
flatbuffers::FlatBufferBuilder fbb;
fbb.ForceDefaults(true);
- flatbuffers::Offset<Node> node_offset = CopyFlatBuffer<Node>(my_node, &fbb);
+ flatbuffers::Offset<Node> node_offset =
+ RecursiveCopyFlatBuffer<Node>(my_node, &fbb);
const std::string_view node_name = my_node->name()->string_view();
std::vector<flatbuffers::Offset<Channel>> channel_offsets;
@@ -30,7 +31,7 @@
channel->source_node()->string_view() == remote_name) {
// Remove the schema to save some space on the wire.
aos::FlatbufferDetachedBuffer<Channel> cleaned_channel =
- CopyFlatBuffer<Channel>(channel);
+ RecursiveCopyFlatBuffer<Channel>(channel);
cleaned_channel.mutable_message()->clear_schema();
channel_offsets.emplace_back(
CopyFlatBuffer<Channel>(&cleaned_channel.message(), &fbb));
diff --git a/aos/network/message_bridge_server_status.cc b/aos/network/message_bridge_server_status.cc
index d356bca..0f5546c 100644
--- a/aos/network/message_bridge_server_status.cc
+++ b/aos/network/message_bridge_server_status.cc
@@ -28,8 +28,8 @@
std::vector<flatbuffers::Offset<ServerConnection>> connection_offsets;
for (const std::string_view node_name : source_node_names) {
- flatbuffers::Offset<Node> node_offset =
- CopyFlatBuffer(configuration::GetNode(configuration, node_name), &fbb);
+ flatbuffers::Offset<Node> node_offset = RecursiveCopyFlatBuffer(
+ configuration::GetNode(configuration, node_name), &fbb);
ServerConnection::Builder connection_builder(fbb);
connection_builder.add_node(node_offset);
connection_builder.add_state(State::DISCONNECTED);
diff --git a/y2020/vision/camera_reader.cc b/y2020/vision/camera_reader.cc
index 63cabcf..fd79d09 100644
--- a/y2020/vision/camera_reader.cc
+++ b/y2020/vision/camera_reader.cc
@@ -244,7 +244,7 @@
const flatbuffers::Offset<sift::TransformationMatrix>
field_to_target_offset =
- aos::CopyFlatBuffer(FieldToTarget(i), builder.fbb());
+ aos::RecursiveCopyFlatBuffer(FieldToTarget(i), builder.fbb());
sift::CameraPose::Builder pose_builder(*builder.fbb());
pose_builder.add_camera_to_target(transform_offset);