blob: 97b3b366c9a72d167db9de482af54b04275a20e8 [file] [log] [blame]
James Kuszmaulf5eb4682023-09-22 17:16:59 -07001#include "aos/flatbuffers/base.h"
Stephan Pleines6191f1d2024-05-30 20:44:45 -07002
3#include <string.h>
4
5#include <iomanip>
6
James Kuszmaulf5eb4682023-09-22 17:16:59 -07007namespace aos::fbs {
8
9namespace {
10void *DereferenceOffset(uoffset_t *offset) {
11 return reinterpret_cast<uint8_t *>(offset) + *offset;
12}
13} // namespace
14
James Kuszmauld4b4f1d2024-03-13 15:57:35 -070015ResizeableObject::ResizeableObject(ResizeableObject &&other)
16 : buffer_(other.buffer_),
17 parent_(other.parent_),
18 owned_allocator_(std::move(other.owned_allocator_)),
19 allocator_(other.allocator_) {
20 // At this stage in the move the move constructors of the inherited types have
21 // not yet been called, so we edit the state of the other object now so that
22 // when everything is moved over into the new objects they will have the
23 // correct pointers.
24 for (size_t index = 0; index < other.NumberOfSubObjects(); ++index) {
25 SubObject object = other.GetSubObject(index);
26 if (object.object != nullptr) {
27 object.object->parent_ = this;
28 }
29 }
30 other.buffer_ = {};
31 other.allocator_ = nullptr;
32 other.parent_ = nullptr;
33 // Sanity check that the std::unique_ptr move didn't reallocate/move memory
34 // around.
35 if (owned_allocator_.get() != nullptr) {
36 CHECK_EQ(owned_allocator_.get(), allocator_);
37 }
38}
39
James Kuszmaulf5eb4682023-09-22 17:16:59 -070040bool ResizeableObject::InsertBytes(void *insertion_point, size_t bytes,
41 SetZero set_zero) {
42 // See comments on InsertBytes() declaration and in FixObjects()
43 // implementation below.
44 CHECK_LT(buffer_.data(), reinterpret_cast<const uint8_t *>(insertion_point))
45 << ": Insertion may not be prior to the start of the buffer.";
46 // Check that we started off with a properly aligned size.
47 // Doing this CHECK earlier is tricky because if done in the constructor then
48 // it executes prior to the Alignment() implementation being available.
49 CHECK_EQ(0u, buffer_.size() % Alignment());
50 // Note that we will round up the size to the current alignment, so that we
51 // ultimately end up only adjusting the buffer size by a multiple of its
52 // alignment, to avoid having to do any more complicated bookkeeping.
53 const size_t aligned_bytes = PaddedSize(bytes, Alignment());
54 if (parent_ != nullptr) {
55 return parent_->InsertBytes(insertion_point, aligned_bytes, set_zero);
56 } else {
57 std::optional<std::span<uint8_t>> new_buffer =
58 CHECK_NOTNULL(allocator_)
59 ->InsertBytes(insertion_point, aligned_bytes, Alignment(),
60 set_zero);
61 if (!new_buffer.has_value()) {
62 return false;
63 }
64 UpdateBuffer(new_buffer.value(),
65 new_buffer.value().data() +
66 (reinterpret_cast<const uint8_t *>(insertion_point) -
67 buffer_.data()),
68 aligned_bytes);
69 return true;
70 }
71}
72
73void ResizeableObject::UpdateBuffer(std::span<uint8_t> new_buffer,
74 void *modification_point,
75 ssize_t bytes_inserted) {
76 buffer_ = new_buffer;
77 FixObjects(modification_point, bytes_inserted);
78 ObserveBufferModification();
79}
80
81std::span<uint8_t> ResizeableObject::BufferForObject(
82 size_t absolute_offset, size_t size, size_t terminal_alignment) {
83 const size_t padded_size = PaddedSize(size, terminal_alignment);
84 std::span<uint8_t> padded_buffer =
85 internal::GetSubSpan(buffer_, absolute_offset, padded_size);
86 std::span<uint8_t> object_buffer =
87 internal::GetSubSpan(padded_buffer, 0, size);
88 std::span<uint8_t> padding = internal::GetSubSpan(padded_buffer, size);
89 internal::ClearSpan(padding);
90 return object_buffer;
91}
92
93void ResizeableObject::FixObjects(void *modification_point,
94 ssize_t bytes_inserted) {
95 CHECK_EQ(bytes_inserted % Alignment(), 0u)
96 << ": We only support inserting N * Alignment() bytes at a time. This "
97 "may change in the future.";
98 for (size_t index = 0; index < NumberOfSubObjects(); ++index) {
99 SubObject object = GetSubObject(index);
100 const void *const absolute_offset =
101 PointerForAbsoluteOffset(*object.absolute_offset);
102 if (absolute_offset >= modification_point &&
103 object.inline_entry < modification_point) {
104 if (*object.inline_entry != 0) {
105 CHECK_EQ(static_cast<const void *>(
106 static_cast<const uint8_t *>(absolute_offset) +
107 CHECK_NOTNULL(object.object)->AbsoluteOffsetOffset()),
108 DereferenceOffset(object.inline_entry));
109 *object.inline_entry += bytes_inserted;
110 CHECK_GE(DereferenceOffset(object.inline_entry), modification_point)
111 << ": Encountered offset which points to a now-deleted section "
112 "of memory. The offset should have been null'd out prior to "
113 "deleting the memory.";
114 } else {
115 CHECK_EQ(nullptr, object.object);
116 }
117 *object.absolute_offset += bytes_inserted;
118 }
119 // We only need to update the object's buffer if it currently exists.
120 if (object.object != nullptr) {
121 std::span<uint8_t> subbuffer = BufferForObject(
122 *object.absolute_offset, object.object->buffer_.size(),
123 object.object->Alignment());
124 // By convention (enforced in InsertBytes()), the modification_point shall
125 // not be at the start of the subobjects data buffer; it may be the byte
126 // just past the end of the buffer. This makes it so that is unambiguous
127 // which subobject(s) should get the extra space when a buffer size
128 // increase is requested on the edge of a buffer.
129 if (subbuffer.data() < modification_point &&
130 (subbuffer.data() + subbuffer.size()) >= modification_point) {
131 subbuffer = {subbuffer.data(), subbuffer.size() + bytes_inserted};
132 }
133 object.object->UpdateBuffer(subbuffer, modification_point,
134 bytes_inserted);
135 }
136 }
137}
138
139std::optional<std::span<uint8_t>> VectorAllocator::Allocate(
140 size_t size, size_t /*alignment*/, SetZero set_zero) {
141 CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate().";
142 buffer_.resize(size);
143 if (set_zero == SetZero::kYes) {
144 memset(buffer_.data(), 0, buffer_.size());
145 }
146 return std::span<uint8_t>{buffer_.data(), buffer_.size()};
147}
148
149std::optional<std::span<uint8_t>> VectorAllocator::InsertBytes(
150 void *insertion_point, size_t bytes, size_t /*alignment*/, SetZero) {
151 const ssize_t insertion_index =
152 reinterpret_cast<uint8_t *>(insertion_point) - buffer_.data();
153 CHECK_LE(0, insertion_index);
154 CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
155 buffer_.insert(buffer_.begin() + insertion_index, bytes, 0);
156 return std::span<uint8_t>{buffer_.data(), buffer_.size()};
157}
158
159std::span<uint8_t> VectorAllocator::RemoveBytes(
160 std::span<uint8_t> remove_bytes) {
161 const ssize_t removal_index = remove_bytes.data() - buffer_.data();
162 CHECK_LE(0, removal_index);
163 CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
164 CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
165 buffer_.erase(buffer_.begin() + removal_index,
166 buffer_.begin() + removal_index + remove_bytes.size());
167 return {buffer_.data(), buffer_.size()};
168}
169
170std::optional<std::span<uint8_t>> SpanAllocator::Allocate(size_t size,
171 size_t /*alignment*/,
172 SetZero set_zero) {
173 CHECK(!allocated_);
174 if (size > buffer_.size()) {
175 return std::nullopt;
176 }
177 if (set_zero == SetZero::kYes) {
178 memset(buffer_.data(), 0, buffer_.size());
179 }
180 allocated_size_ = size;
181 allocated_ = true;
182 return internal::GetSubSpan(buffer_, buffer_.size() - size);
183}
184
185std::optional<std::span<uint8_t>> SpanAllocator::InsertBytes(
186 void *insertion_point, size_t bytes, size_t /*alignment*/,
187 SetZero set_zero) {
188 uint8_t *insertion_point_typed = reinterpret_cast<uint8_t *>(insertion_point);
189 const ssize_t insertion_index = insertion_point_typed - buffer_.data();
190 CHECK_LE(0, insertion_index);
191 CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
192 const size_t new_size = allocated_size_ + bytes;
193 if (new_size > buffer_.size()) {
194 VLOG(1) << ": Insufficient space to grow by " << bytes << " bytes.";
195 return std::nullopt;
196 }
197 const size_t old_start_index = buffer_.size() - allocated_size_;
198 const size_t new_start_index = buffer_.size() - new_size;
199 memmove(buffer_.data() + new_start_index, buffer_.data() + old_start_index,
200 insertion_index - old_start_index);
201 if (set_zero == SetZero::kYes) {
202 memset(insertion_point_typed - bytes, 0, bytes);
203 }
204 allocated_size_ = new_size;
205 return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
206}
207
208std::span<uint8_t> SpanAllocator::RemoveBytes(std::span<uint8_t> remove_bytes) {
209 const ssize_t removal_index = remove_bytes.data() - buffer_.data();
210 const size_t old_start_index = buffer_.size() - allocated_size_;
211 CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
212 CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
213 CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
214 uint8_t *old_buffer_start = buffer_.data() + old_start_index;
215 memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
216 removal_index - old_start_index);
217 allocated_size_ -= remove_bytes.size();
218 return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
219}
220
221void SpanAllocator::Deallocate(std::span<uint8_t>) {
222 CHECK(allocated_) << ": Called Deallocate() without a prior allocation.";
223 allocated_ = false;
224}
225
226namespace internal {
227std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os) {
228 constexpr size_t kRowSize = 8u;
229 for (size_t index = 0; index < span.size(); index += kRowSize) {
230 os << std::hex << std::setw(4) << std::setfill('0') << std::uppercase
231 << index << ": ";
232 for (size_t subindex = 0;
233 subindex < kRowSize && (index + subindex) < span.size(); ++subindex) {
234 os << std::setw(2) << static_cast<int>(span[index + subindex]) << " ";
235 }
236 os << "\n";
237 }
238 os << std::resetiosflags(std::ios_base::basefield | std::ios_base::uppercase);
239 return os;
240}
241} // namespace internal
242} // namespace aos::fbs