blob: a0d38101bf9f7f0109b85392c8efb31b6542e9c9 [file] [log] [blame]
James Kuszmaulf5eb4682023-09-22 17:16:59 -07001#ifndef AOS_FLATBUFFERS_BASE_H_
2#define AOS_FLATBUFFERS_BASE_H_
Austin Schuh3c9f92c2024-04-30 17:56:42 -07003
Stephan Pleines6191f1d2024-05-30 20:44:45 -07004#include <stdint.h>
5#include <sys/types.h>
6
7#include <cstring>
James Kuszmaulf5eb4682023-09-22 17:16:59 -07008#include <memory>
9#include <optional>
Stephan Pleines6191f1d2024-05-30 20:44:45 -070010#include <ostream>
James Kuszmaulf5eb4682023-09-22 17:16:59 -070011#include <span>
Stephan Pleines6191f1d2024-05-30 20:44:45 -070012#include <utility>
13#include <vector>
James Kuszmaulf5eb4682023-09-22 17:16:59 -070014
Austin Schuh99f7c6a2024-06-25 22:07:44 -070015#include "absl/log/check.h"
16#include "absl/log/log.h"
Austin Schuh02e0d772024-05-30 16:41:06 -070017#include "absl/types/span.h"
James Kuszmaulf5eb4682023-09-22 17:16:59 -070018#include "flatbuffers/base.h"
Stephan Pleines6191f1d2024-05-30 20:44:45 -070019
Austin Schuh02e0d772024-05-30 16:41:06 -070020#include "aos/containers/resizeable_buffer.h"
21#include "aos/ipc_lib/data_alignment.h"
Austin Schuh8a399de2024-06-05 10:46:23 -070022#include "aos/shared_span.h"
Austin Schuh02e0d772024-05-30 16:41:06 -070023
Austin Schuh8a399de2024-06-05 10:46:23 -070024namespace aos::fbs {
Austin Schuh3c9f92c2024-04-30 17:56:42 -070025
James Kuszmaulf5eb4682023-09-22 17:16:59 -070026using ::flatbuffers::soffset_t;
27using ::flatbuffers::uoffset_t;
28using ::flatbuffers::voffset_t;
29
Austin Schuhf8440852024-05-31 10:46:50 -070030// Returns the offset into the buffer needed to provide 'alignment' alignment
31// 'aligned_offset' bytes after the returned offset. This assumes that the
32// first 'starting_offset' bytes are spoken for.
33constexpr size_t AlignOffset(size_t starting_offset, size_t alignment,
34 size_t aligned_offset = 0) {
James Kuszmaulf5eb4682023-09-22 17:16:59 -070035 // We can be clever with bitwise operations by assuming that aligment is a
36 // power of two. Or we can just be clearer about what we mean and eat a few
37 // integer divides.
Austin Schuhf8440852024-05-31 10:46:50 -070038 return (((starting_offset + aligned_offset - 1) / alignment) + 1) *
39 alignment -
40 aligned_offset;
James Kuszmaulf5eb4682023-09-22 17:16:59 -070041}
42
43// Used as a parameter to methods where we are messing with memory and may or
44// may not want to clear it to zeroes.
45enum class SetZero { kYes, kNo };
46
47class Allocator;
48
49// Parent type of any object that may need to dynamically change size at
50// runtime. Used by the static table and vector types to request additional
51// blocks of memory when needed.
52//
53// The way that this works is that every ResizeableObject has some number of
54// children that are themselves ResizeableObject's and whose memory is entirely
55// contained within their parent's memory. A ResizeableObject without a parent
56// instead has an Allocator that it can use to allocate additional blocks
57// of memory. Whenever a child needs to grow in size, it will make a call to
58// InsertBytes() on its parent, which will percolate up until InsertBytes() gets
59// called on the root allocator. If the insert succeeds, then every single child
60// through the entire tree will get notified (this is because the allocator may
61// have shifted the entire memory buffer, so any pointers may need to be
62// updated). Child types will provide implementations of the GetObjects() method
63// to both allow tree traversal as well as to allow the various internal offsets
64// to be updated appropriately.
65class ResizeableObject {
66 public:
67 // Returns the underlying memory buffer into which the flatbuffer will be
68 // serialized.
69 std::span<uint8_t> buffer() { return buffer_; }
70 std::span<const uint8_t> buffer() const { return buffer_; }
71
72 // Updates the underlying memory buffer to new_buffer, with an indication of
73 // where bytes were inserted/removed from the buffer. It is assumed that
74 // new_buffer already has the state of the serialized flatbuffer
75 // copied into it.
76 // * When bytes have been inserted, modification_point will point to the first
77 // of the inserted bytes in new_buffer and bytes_inserted will be the number
78 // of new bytes.
79 // * Buffer shrinkage is not currently supported.
80 // * When bytes_inserted is zero, modification_point is ignored.
81 void UpdateBuffer(std::span<uint8_t> new_buffer, void *modification_point,
82 ssize_t bytes_inserted);
83
84 protected:
85 // Data associated with a sub-object of this object.
86 struct SubObject {
87 // A direct pointer to the inline entry in the flatbuffer table data. The
88 // pointer must be valid, but the entry itself may be zero if the object is
89 // not actually populated.
90 // If *inline_entry is non-zero, this will get updated if any new memory got
91 // added/removed in-between inline_entry and the actual data pointed to be
92 // inline_entry.
93 uoffset_t *inline_entry;
94 // The actual child object. Should be nullptr if *inline_entry is zero; must
95 // be valid if *inline_entry is non-zero.
96 ResizeableObject *object;
97 // The nominal offset from buffer_.data() to object->buffer_.data().
98 // Must be provided, and must always be valid, even if *inline_entry is
99 // zero.
100 // I.e., the following holds when object is not nullptr:
101 // SubObject object = parent.GetSubObject(index);
102 // CHECK_EQ(parent.buffer()->data() + *object.absolute_offset,
103 // object.object->buffer().data());
104 size_t *absolute_offset;
105 };
106
107 ResizeableObject(std::span<uint8_t> buffer, ResizeableObject *parent)
108 : buffer_(buffer), parent_(parent) {}
109 ResizeableObject(std::span<uint8_t> buffer, Allocator *allocator)
110 : buffer_(buffer), allocator_(allocator) {}
111 ResizeableObject(std::span<uint8_t> buffer,
112 std::unique_ptr<Allocator> allocator)
113 : buffer_(buffer),
114 owned_allocator_(std::move(allocator)),
115 allocator_(owned_allocator_.get()) {}
116 ResizeableObject(const ResizeableObject &) = delete;
117 ResizeableObject &operator=(const ResizeableObject &) = delete;
118 // Users do not end up using the move constructor; however, it is needed to
119 // handle the fact that a ResizeableObject may be a member of an std::vector
120 // in the various generated types.
James Kuszmauld4b4f1d2024-03-13 15:57:35 -0700121 ResizeableObject(ResizeableObject &&other);
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700122 // Required alignment of this object.
123 virtual size_t Alignment() const = 0;
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700124 // Causes bytes bytes to be inserted between insertion_point - 1 and
125 // insertion_point.
126 // If requested, the new bytes will be cleared to zero; otherwise they will be
127 // left uninitialized.
128 // The insertion_point may not be equal to this->buffer_.data(); it may be a
129 // pointer just past the end of the buffer. This is to ease the
130 // implementation, and is merely a requirement that any buffer growth occur
131 // only on the inside or past the end of the vector, and not prior to the
132 // start of the vector.
Austin Schuhf8440852024-05-31 10:46:50 -0700133 // Returns a span of the inserted bytes on success, nullopt on failure (e.g.,
134 // if the allocator has no memory available).
135 std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
136 size_t bytes, SetZero set_zero);
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700137 // Called *after* the internal buffer_ has been swapped out and *after* the
138 // object tree has been traversed and fixed.
139 virtual void ObserveBufferModification() {}
140
141 // Returns the index'th sub object of this object.
142 // index must be less than NumberOfSubObjects().
143 // This will include objects which are not currently populated but which may
144 // be populated in the future (so that we can track what the necessary offsets
145 // are when we do populate it).
146 virtual SubObject GetSubObject(size_t index) = 0;
147 // Number of sub-objects of this object. May be zero.
148 virtual size_t NumberOfSubObjects() const = 0;
149
150 // Treating the supplied absolute_offset as an offset into the internal memory
151 // buffer, return the pointer to the underlying memory.
152 const void *PointerForAbsoluteOffset(const size_t absolute_offset) {
153 return buffer_.data() + absolute_offset;
154 }
Austin Schuhf8440852024-05-31 10:46:50 -0700155 // Returns a span at the requested offset into the buffer for the requested
156 // size.
157 std::span<uint8_t> BufferForObject(size_t absolute_offset, size_t size);
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700158 // When memory has been inserted/removed, this iterates over the sub-objects
159 // and notifies/adjusts them appropriately.
160 // This will be called after buffer_ has been updated, and:
161 // * For insertion, modification_point will point into the new buffer_ to the
162 // first of the newly inserted bytes.
163 // * Removal is not entirely implemented yet, but for removal,
164 // modification_point should point to the first byte after the removed
165 // chunk.
166 void FixObjects(void *modification_point, ssize_t bytes_inserted);
167
168 Allocator *allocator() { return allocator_; }
169
170 std::span<uint8_t> buffer_;
171
172 private:
173 ResizeableObject *parent_ = nullptr;
174 std::unique_ptr<Allocator> owned_allocator_;
175 Allocator *allocator_ = nullptr;
176};
177
178// Interface to represent a memory allocator for use with ResizeableObject.
179class Allocator {
180 public:
181 virtual ~Allocator() {}
Austin Schuh02e0d772024-05-30 16:41:06 -0700182 // Allocates memory of the requested size and alignment. alignment is
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700183 // guaranteed.
Austin Schuh02e0d772024-05-30 16:41:06 -0700184 //
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700185 // On failure to allocate the requested size, returns nullopt;
186 // Never returns a partial span.
187 // The span will be initialized to zero upon request.
188 // Once Allocate() has been called once, it may not be called again until
189 // Deallocate() has been called. In order to adjust the size of the buffer,
190 // call InsertBytes() and RemoveBytes().
191 [[nodiscard]] virtual std::optional<std::span<uint8_t>> Allocate(
Austin Schuh02e0d772024-05-30 16:41:06 -0700192 size_t size, size_t alignment, SetZero set_zero) = 0;
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700193 // Identical to Allocate(), but dies on failure.
Austin Schuh02e0d772024-05-30 16:41:06 -0700194 [[nodiscard]] std::span<uint8_t> AllocateOrDie(size_t size, size_t alignment,
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700195 SetZero set_zero) {
196 std::optional<std::span<uint8_t>> span =
Austin Schuh02e0d772024-05-30 16:41:06 -0700197 Allocate(size, alignment, set_zero);
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700198 CHECK(span.has_value()) << ": Failed to allocate " << size << " bytes.";
199 CHECK_EQ(size, span.value().size())
200 << ": Failed to allocate " << size << " bytes.";
Austin Schuh02e0d772024-05-30 16:41:06 -0700201 CHECK_EQ(reinterpret_cast<size_t>(span.value().data()) % alignment, 0u)
202 << "Failed to allocate data of length " << size << " with alignment "
203 << alignment;
204
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700205 return span.value();
206 }
207 // Increases the size of the buffer by inserting bytes bytes immediately
208 // before insertion_point.
209 // alignment_hint specifies the alignment of the entire buffer, not of the
210 // inserted bytes.
211 // The returned span may or may not overlap with the original buffer in
212 // memory.
213 // The inserted bytes will be set to zero or uninitialized depending on the
214 // value of SetZero.
215 // insertion_point must be in (or 1 past the end of) the buffer.
216 // Returns nullopt on a failure to allocate the requested bytes.
217 [[nodiscard]] virtual std::optional<std::span<uint8_t>> InsertBytes(
218 void *insertion_point, size_t bytes, size_t alignment_hint,
219 SetZero set_zero) = 0;
220 // Removes the requested span of bytes from the buffer, returning the new
221 // buffer.
222 [[nodiscard]] virtual std::span<uint8_t> RemoveBytes(
223 std::span<uint8_t> remove_bytes) = 0;
224 // Deallocates the currently allocated buffer. The provided buffer must match
225 // the latest version of the buffer.
226 // If Allocate() has been called, Deallocate() must be called prior to
227 // destroying the Allocator.
228 virtual void Deallocate(std::span<uint8_t> buffer) = 0;
229};
230
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700231// Allocator that allocates all of its memory within a provided span. To match
232// the behavior of the FlatBufferBuilder, it will start its allocations at the
233// end of the provided span.
234//
235// Attempts to allocate more memory than is present in the provided buffer will
236// fail.
237class SpanAllocator : public Allocator {
238 public:
239 SpanAllocator(std::span<uint8_t> buffer) : buffer_(buffer) {}
240 ~SpanAllocator() {
241 CHECK(!allocated_)
242 << ": Must deallocate before destroying the SpanAllocator.";
243 }
244
245 std::optional<std::span<uint8_t>> Allocate(size_t size, size_t /*alignment*/,
246 SetZero set_zero) override;
247
248 std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
249 size_t bytes,
250 size_t /*alignment*/,
251 SetZero set_zero) override;
252
253 std::span<uint8_t> RemoveBytes(std::span<uint8_t> remove_bytes) override;
254
255 void Deallocate(std::span<uint8_t>) override;
256
257 private:
258 std::span<uint8_t> buffer_;
259 bool allocated_ = false;
260 size_t allocated_size_ = 0;
261};
262
Austin Schuh02e0d772024-05-30 16:41:06 -0700263// Allocator that uses an AllocatorResizeableBuffer to allow arbitrary-sized
264// allocations. Aligns the end of the buffer to an alignment of
265// kChannelDataAlignment.
266class AlignedVectorAllocator : public fbs::Allocator {
267 public:
268 static constexpr size_t kAlignment = aos::kChannelDataAlignment;
269 AlignedVectorAllocator() {}
270 ~AlignedVectorAllocator();
271
272 std::optional<std::span<uint8_t>> Allocate(size_t size, size_t alignment,
273 fbs::SetZero set_zero) override;
274
275 std::optional<std::span<uint8_t>> InsertBytes(void *insertion_point,
276 size_t bytes, size_t alignment,
277 fbs::SetZero set_zero) override;
278
279 std::span<uint8_t> RemoveBytes(std::span<uint8_t> remove_bytes) override;
280
281 void Deallocate(std::span<uint8_t>) override;
282
Austin Schuha06531c2024-06-05 10:46:56 -0700283 // Releases the data which has been allocated from this allocator to the
284 // caller. This is needed because Deallocate actually frees the memory.
Austin Schuh02e0d772024-05-30 16:41:06 -0700285 aos::SharedSpan Release();
286
287 private:
288 struct SharedSpanHolder {
289 aos::AllocatorResizeableBuffer<aos::AlignedReallocator<kAlignment>> buffer;
290 absl::Span<const uint8_t> span;
291 };
292 uint8_t *data() { return buffer_.data() + buffer_.size() - allocated_size_; }
293
294 aos::AllocatorResizeableBuffer<aos::AlignedReallocator<kAlignment>> buffer_;
295
Austin Schuha06531c2024-06-05 10:46:56 -0700296 // The size of the data that has been returned from Allocate. This counts
297 // from the end of buffer_.
Austin Schuh02e0d772024-05-30 16:41:06 -0700298 size_t allocated_size_ = 0u;
Austin Schuha06531c2024-06-05 10:46:56 -0700299 // If true, the data has been released from buffer_, and we don't own it
300 // anymore. This enables Deallocate to properly handle the case when the user
301 // releases the memory, but the Builder still needs to clean up.
Austin Schuh02e0d772024-05-30 16:41:06 -0700302 bool released_ = false;
303};
304
Maxwell Hendersonfb1e3bc2024-02-04 13:55:22 -0800305// Allocates and owns a fixed-size memory buffer on the stack.
306//
307// This provides a convenient Allocator for use with the aos::fbs::Builder
308// in realtime code instead of trying to use the VectorAllocator.
Austin Schuh02e0d772024-05-30 16:41:06 -0700309template <std::size_t N, std::size_t alignment = 64>
Maxwell Hendersonfb1e3bc2024-02-04 13:55:22 -0800310class FixedStackAllocator : public SpanAllocator {
311 public:
312 FixedStackAllocator() : SpanAllocator({buffer_, sizeof(buffer_)}) {}
313
314 private:
Austin Schuh02e0d772024-05-30 16:41:06 -0700315 alignas(alignment) uint8_t buffer_[N];
Maxwell Hendersonfb1e3bc2024-02-04 13:55:22 -0800316};
317
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700318namespace internal {
319std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os);
320inline void ClearSpan(std::span<uint8_t> span) {
321 memset(span.data(), 0, span.size());
322}
323// std::span::subspan does not do bounds checking.
324template <typename T>
325inline std::span<T> GetSubSpan(std::span<T> span, size_t offset,
326 size_t count = std::dynamic_extent) {
327 if (count != std::dynamic_extent) {
328 CHECK_LE(offset + count, span.size());
329 }
330 return span.subspan(offset, count);
331}
332// Normal users should never move any of the special flatbuffer types that we
333// provide. However, they do need to be moveable in order to support the use of
334// resizeable vectors. As such, we make all the move constructors private and
335// friend the TableMover struct. The TableMover struct is then used in places
336// that need to have moveable objects. It should never be used by a user.
337template <typename T>
338struct TableMover {
339 TableMover(std::span<uint8_t> buffer, ResizeableObject *parent)
340 : t(buffer, parent) {}
341 TableMover(std::span<uint8_t> buffer, Allocator *allocator)
342 : t(buffer, allocator) {}
343 TableMover(std::span<uint8_t> buffer, ::std::unique_ptr<Allocator> allocator)
344 : t(buffer, ::std::move(allocator)) {}
345 TableMover(TableMover &&) = default;
346 T t;
347};
348} // namespace internal
Austin Schuh8a399de2024-06-05 10:46:23 -0700349} // namespace aos::fbs
Austin Schuh02e0d772024-05-30 16:41:06 -0700350
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700351#endif // AOS_FLATBUFFERS_BASE_H_