Copy flatbuffers efficiently and respecting DAGs.
Deduce the memory region needed to be copied by traversing the DAG, and
then copy it. This has no mallocs (yay!), and works with DAGs.
There are some cases where we actually want to copy things recursively.
Do that when asked.
Change-Id: Ia0ffde26d569fa92ee2bbb49706c17d9d657d125
diff --git a/aos/flatbuffer_merge.h b/aos/flatbuffer_merge.h
index 7844227..7518bed 100644
--- a/aos/flatbuffer_merge.h
+++ b/aos/flatbuffer_merge.h
@@ -70,13 +70,57 @@
fbb);
}
+// Copies a flatbuffer by walking the tree and copying all the pieces. This
+// converts DAGs to trees.
template <class T>
-inline flatbuffers::Offset<T> CopyFlatBuffer(
+inline flatbuffers::Offset<T> RecursiveCopyFlatBuffer(
const T *t1, flatbuffers::FlatBufferBuilder *fbb) {
return MergeFlatBuffers<T>(reinterpret_cast<const flatbuffers::Table *>(t1),
nullptr, fbb);
}
+// Copies a flatbuffer by finding the extents of the memory using the typetable
+// and copying the containing memory. This doesn't allocate memory, and
+// preserves DAGs.
+flatbuffers::Offset<flatbuffers::Table> CopyFlatBuffer(
+ const flatbuffers::Table *t1, const flatbuffers::TypeTable *typetable,
+ flatbuffers::FlatBufferBuilder *fbb);
+
+template <class T>
+inline flatbuffers::Offset<T> CopyFlatBuffer(
+ const T *t1, flatbuffers::FlatBufferBuilder *fbb) {
+ return flatbuffers::Offset<T>(
+ CopyFlatBuffer(reinterpret_cast<const flatbuffers::Table *>(t1),
+ T::MiniReflectTypeTable(), fbb)
+ .o);
+}
+
+template <class T>
+inline flatbuffers::Offset<T> CopyFlatBuffer(
+ const Flatbuffer<T> &t1, flatbuffers::FlatBufferBuilder *fbb) {
+ return flatbuffers::Offset<T>(
+ CopyFlatBuffer(
+ reinterpret_cast<const flatbuffers::Table *>(&t1.message()),
+ T::MiniReflectTypeTable(), fbb)
+ .o);
+}
+
+// Copies a flatbuffer by copying all the data without looking inside and
+// pointing inside it.
+template <class T>
+inline flatbuffers::Offset<T> BlindCopyFlatBuffer(
+ const Flatbuffer<T> &t, flatbuffers::FlatBufferBuilder *fbb) {
+ // Enforce 8 byte alignment so anything inside the flatbuffer can be read.
+ fbb->Align(sizeof(flatbuffers::largest_scalar_t));
+
+ // We don't know how much of the start of the flatbuffer is padding. The
+ // safest thing to do from an alignment point of view (without looking inside)
+ // is to copy the initial offset and leave it as dead space.
+ fbb->PushBytes(t.data(), t.size());
+ return fbb->GetSize() -
+ flatbuffers::ReadScalar<flatbuffers::uoffset_t>(t.data());
+}
+
template <class T>
inline flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<T>>>
CopyVectorTable(const flatbuffers::Vector<flatbuffers::Offset<T>> *t1,
@@ -114,6 +158,14 @@
return FlatbufferDetachedBuffer<T>(fbb.Release());
}
+template <class T>
+inline FlatbufferDetachedBuffer<T> RecursiveCopyFlatBuffer(const T *t) {
+ flatbuffers::FlatBufferBuilder fbb;
+ fbb.ForceDefaults(true);
+ fbb.Finish(RecursiveCopyFlatBuffer<T>(t, &fbb));
+ return FlatbufferDetachedBuffer<T>(fbb.Release());
+}
+
// Compares 2 flatbuffers. Returns true if they match, false otherwise.
bool CompareFlatBuffer(const flatbuffers::TypeTable *typetable,
const flatbuffers::Table *t1,