blob: afd48a21d12c12e3715844975ab84c82519d6fa4 [file] [log] [blame]
James Kuszmaulf5eb4682023-09-22 17:16:59 -07001#include "aos/flatbuffers/base.h"
Stephan Pleines6191f1d2024-05-30 20:44:45 -07002
3#include <string.h>
4
5#include <iomanip>
6
James Kuszmaulf5eb4682023-09-22 17:16:59 -07007namespace aos::fbs {
8
9namespace {
10void *DereferenceOffset(uoffset_t *offset) {
11 return reinterpret_cast<uint8_t *>(offset) + *offset;
12}
13} // namespace
14
James Kuszmauld4b4f1d2024-03-13 15:57:35 -070015ResizeableObject::ResizeableObject(ResizeableObject &&other)
16 : buffer_(other.buffer_),
17 parent_(other.parent_),
18 owned_allocator_(std::move(other.owned_allocator_)),
19 allocator_(other.allocator_) {
20 // At this stage in the move the move constructors of the inherited types have
21 // not yet been called, so we edit the state of the other object now so that
22 // when everything is moved over into the new objects they will have the
23 // correct pointers.
24 for (size_t index = 0; index < other.NumberOfSubObjects(); ++index) {
25 SubObject object = other.GetSubObject(index);
26 if (object.object != nullptr) {
27 object.object->parent_ = this;
28 }
29 }
30 other.buffer_ = {};
31 other.allocator_ = nullptr;
32 other.parent_ = nullptr;
33 // Sanity check that the std::unique_ptr move didn't reallocate/move memory
34 // around.
35 if (owned_allocator_.get() != nullptr) {
36 CHECK_EQ(owned_allocator_.get(), allocator_);
37 }
38}
39
Austin Schuhf8440852024-05-31 10:46:50 -070040std::optional<std::span<uint8_t>> ResizeableObject::InsertBytes(
41 void *insertion_point, size_t bytes, SetZero set_zero) {
James Kuszmaulf5eb4682023-09-22 17:16:59 -070042 // See comments on InsertBytes() declaration and in FixObjects()
43 // implementation below.
Austin Schuhf8440852024-05-31 10:46:50 -070044 CHECK_LT(reinterpret_cast<const void *>(buffer_.data()),
45 reinterpret_cast<const void *>(insertion_point))
James Kuszmaulf5eb4682023-09-22 17:16:59 -070046 << ": Insertion may not be prior to the start of the buffer.";
James Kuszmaulf5eb4682023-09-22 17:16:59 -070047 // Note that we will round up the size to the current alignment, so that we
48 // ultimately end up only adjusting the buffer size by a multiple of its
49 // alignment, to avoid having to do any more complicated bookkeeping.
Austin Schuhf8440852024-05-31 10:46:50 -070050 const size_t aligned_bytes = AlignOffset(bytes, Alignment());
James Kuszmaulf5eb4682023-09-22 17:16:59 -070051 if (parent_ != nullptr) {
52 return parent_->InsertBytes(insertion_point, aligned_bytes, set_zero);
53 } else {
54 std::optional<std::span<uint8_t>> new_buffer =
55 CHECK_NOTNULL(allocator_)
56 ->InsertBytes(insertion_point, aligned_bytes, Alignment(),
57 set_zero);
58 if (!new_buffer.has_value()) {
Austin Schuhf8440852024-05-31 10:46:50 -070059 return std::nullopt;
James Kuszmaulf5eb4682023-09-22 17:16:59 -070060 }
Austin Schuhf8440852024-05-31 10:46:50 -070061 std::span<uint8_t> inserted_data(
62 new_buffer.value().data() +
63 (reinterpret_cast<const uint8_t *>(insertion_point) -
64 buffer_.data()),
65 aligned_bytes);
66 UpdateBuffer(new_buffer.value(), inserted_data.data(),
67 inserted_data.size());
68 return inserted_data;
James Kuszmaulf5eb4682023-09-22 17:16:59 -070069 }
70}
71
72void ResizeableObject::UpdateBuffer(std::span<uint8_t> new_buffer,
73 void *modification_point,
74 ssize_t bytes_inserted) {
75 buffer_ = new_buffer;
76 FixObjects(modification_point, bytes_inserted);
77 ObserveBufferModification();
78}
79
Austin Schuhf8440852024-05-31 10:46:50 -070080std::span<uint8_t> ResizeableObject::BufferForObject(size_t absolute_offset,
81 size_t size) {
82 return internal::GetSubSpan(buffer_, absolute_offset, size);
James Kuszmaulf5eb4682023-09-22 17:16:59 -070083}
84
85void ResizeableObject::FixObjects(void *modification_point,
86 ssize_t bytes_inserted) {
87 CHECK_EQ(bytes_inserted % Alignment(), 0u)
88 << ": We only support inserting N * Alignment() bytes at a time. This "
89 "may change in the future.";
90 for (size_t index = 0; index < NumberOfSubObjects(); ++index) {
91 SubObject object = GetSubObject(index);
92 const void *const absolute_offset =
93 PointerForAbsoluteOffset(*object.absolute_offset);
94 if (absolute_offset >= modification_point &&
95 object.inline_entry < modification_point) {
96 if (*object.inline_entry != 0) {
97 CHECK_EQ(static_cast<const void *>(
Austin Schuhf8440852024-05-31 10:46:50 -070098 static_cast<const uint8_t *>(absolute_offset)),
James Kuszmaulf5eb4682023-09-22 17:16:59 -070099 DereferenceOffset(object.inline_entry));
100 *object.inline_entry += bytes_inserted;
101 CHECK_GE(DereferenceOffset(object.inline_entry), modification_point)
102 << ": Encountered offset which points to a now-deleted section "
103 "of memory. The offset should have been null'd out prior to "
104 "deleting the memory.";
105 } else {
106 CHECK_EQ(nullptr, object.object);
107 }
108 *object.absolute_offset += bytes_inserted;
109 }
110 // We only need to update the object's buffer if it currently exists.
111 if (object.object != nullptr) {
112 std::span<uint8_t> subbuffer = BufferForObject(
Austin Schuhf8440852024-05-31 10:46:50 -0700113 *object.absolute_offset, object.object->buffer_.size());
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700114 // By convention (enforced in InsertBytes()), the modification_point shall
115 // not be at the start of the subobjects data buffer; it may be the byte
116 // just past the end of the buffer. This makes it so that is unambiguous
117 // which subobject(s) should get the extra space when a buffer size
118 // increase is requested on the edge of a buffer.
119 if (subbuffer.data() < modification_point &&
120 (subbuffer.data() + subbuffer.size()) >= modification_point) {
121 subbuffer = {subbuffer.data(), subbuffer.size() + bytes_inserted};
122 }
123 object.object->UpdateBuffer(subbuffer, modification_point,
124 bytes_inserted);
125 }
126 }
127}
128
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700129std::optional<std::span<uint8_t>> SpanAllocator::Allocate(size_t size,
Austin Schuh02e0d772024-05-30 16:41:06 -0700130 size_t alignment,
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700131 SetZero set_zero) {
132 CHECK(!allocated_);
133 if (size > buffer_.size()) {
134 return std::nullopt;
135 }
136 if (set_zero == SetZero::kYes) {
137 memset(buffer_.data(), 0, buffer_.size());
138 }
139 allocated_size_ = size;
140 allocated_ = true;
Austin Schuh02e0d772024-05-30 16:41:06 -0700141 CHECK_GT(alignment, 0u);
142 CHECK_EQ(buffer_.size() % alignment, 0u)
143 << ": Buffer isn't a multiple of alignment " << alignment << " long, is "
144 << buffer_.size() << " long";
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700145 return internal::GetSubSpan(buffer_, buffer_.size() - size);
146}
147
148std::optional<std::span<uint8_t>> SpanAllocator::InsertBytes(
149 void *insertion_point, size_t bytes, size_t /*alignment*/,
150 SetZero set_zero) {
151 uint8_t *insertion_point_typed = reinterpret_cast<uint8_t *>(insertion_point);
152 const ssize_t insertion_index = insertion_point_typed - buffer_.data();
153 CHECK_LE(0, insertion_index);
154 CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
155 const size_t new_size = allocated_size_ + bytes;
156 if (new_size > buffer_.size()) {
157 VLOG(1) << ": Insufficient space to grow by " << bytes << " bytes.";
158 return std::nullopt;
159 }
160 const size_t old_start_index = buffer_.size() - allocated_size_;
161 const size_t new_start_index = buffer_.size() - new_size;
162 memmove(buffer_.data() + new_start_index, buffer_.data() + old_start_index,
163 insertion_index - old_start_index);
164 if (set_zero == SetZero::kYes) {
165 memset(insertion_point_typed - bytes, 0, bytes);
166 }
167 allocated_size_ = new_size;
168 return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
169}
170
171std::span<uint8_t> SpanAllocator::RemoveBytes(std::span<uint8_t> remove_bytes) {
172 const ssize_t removal_index = remove_bytes.data() - buffer_.data();
173 const size_t old_start_index = buffer_.size() - allocated_size_;
174 CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
175 CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
176 CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
177 uint8_t *old_buffer_start = buffer_.data() + old_start_index;
178 memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
179 removal_index - old_start_index);
180 allocated_size_ -= remove_bytes.size();
181 return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
182}
183
184void SpanAllocator::Deallocate(std::span<uint8_t>) {
185 CHECK(allocated_) << ": Called Deallocate() without a prior allocation.";
186 allocated_ = false;
187}
188
Austin Schuh02e0d772024-05-30 16:41:06 -0700189AlignedVectorAllocator::~AlignedVectorAllocator() {
190 CHECK(buffer_.empty())
191 << ": Must deallocate before destroying the AlignedVectorAllocator.";
192}
193
194std::optional<std::span<uint8_t>> AlignedVectorAllocator::Allocate(
195 size_t size, size_t /*alignment*/, fbs::SetZero set_zero) {
196 CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate().";
197 buffer_.resize(((size + kAlignment - 1) / kAlignment) * kAlignment);
198 allocated_size_ = size;
199 if (set_zero == fbs::SetZero::kYes) {
200 memset(buffer_.data(), 0, buffer_.size());
201 }
202
203 return std::span<uint8_t>{data(), allocated_size_};
204}
205
206std::optional<std::span<uint8_t>> AlignedVectorAllocator::InsertBytes(
207 void *insertion_point, size_t bytes, size_t /*alignment*/,
208 fbs::SetZero set_zero) {
209 DCHECK_GE(reinterpret_cast<const uint8_t *>(insertion_point), data());
210 DCHECK_LE(reinterpret_cast<const uint8_t *>(insertion_point),
211 data() + allocated_size_);
212 const size_t buffer_offset =
213 reinterpret_cast<const uint8_t *>(insertion_point) - data();
214 // TODO(austin): This has an extra memcpy in it that isn't strictly needed
215 // when we resize. Remove it if performance is a concern.
216 const size_t absolute_buffer_offset =
217 reinterpret_cast<const uint8_t *>(insertion_point) - buffer_.data();
218 const size_t previous_size = buffer_.size();
219
220 buffer_.resize(((allocated_size_ + bytes + kAlignment - 1) / kAlignment) *
221 kAlignment);
222
223 // Now, we've got space both before and after the block of data. Move the
224 // data after to the end, and the data before to the start.
225
226 const size_t new_space_after = buffer_.size() - previous_size;
227
228 // Move the rest of the data to be end aligned. If the buffer wasn't resized,
229 // this will be a nop.
230 memmove(buffer_.data() + absolute_buffer_offset + new_space_after,
231 buffer_.data() + absolute_buffer_offset,
232 previous_size - absolute_buffer_offset);
233
234 // Now, move the data at the front to be aligned too.
235 memmove(buffer_.data() + buffer_.size() - (allocated_size_ + bytes),
236 buffer_.data() + previous_size - allocated_size_,
237 allocated_size_ - (previous_size - absolute_buffer_offset));
238
239 if (set_zero == fbs::SetZero::kYes) {
240 memset(data() - bytes + buffer_offset, 0, bytes);
241 }
242 allocated_size_ += bytes;
243
244 return std::span<uint8_t>{data(), allocated_size_};
245}
246
247std::span<uint8_t> AlignedVectorAllocator::RemoveBytes(
248 std::span<uint8_t> remove_bytes) {
249 const ssize_t removal_index = remove_bytes.data() - buffer_.data();
250 const size_t old_start_index = buffer_.size() - allocated_size_;
251 CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
252 CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
253 CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
254 uint8_t *old_buffer_start = buffer_.data() + old_start_index;
255 memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
256 removal_index - old_start_index);
257 allocated_size_ -= remove_bytes.size();
258
259 return std::span<uint8_t>{data(), allocated_size_};
260}
261
262void AlignedVectorAllocator::Deallocate(std::span<uint8_t>) {
263 if (!released_) {
264 CHECK(!buffer_.empty())
265 << ": Called Deallocate() without a prior allocation.";
266 }
267 released_ = false;
268 buffer_.resize(0);
269}
270
271aos::SharedSpan AlignedVectorAllocator::Release() {
272 absl::Span<uint8_t> span{data(), allocated_size_};
273 std::shared_ptr<SharedSpanHolder> result = std::make_shared<SharedSpanHolder>(
274 std::move(buffer_), absl::Span<const uint8_t>());
275 result->span = span;
276 released_ = true;
277 return aos::SharedSpan(result, &(result->span));
278}
279
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700280namespace internal {
281std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os) {
282 constexpr size_t kRowSize = 8u;
283 for (size_t index = 0; index < span.size(); index += kRowSize) {
284 os << std::hex << std::setw(4) << std::setfill('0') << std::uppercase
285 << index << ": ";
286 for (size_t subindex = 0;
287 subindex < kRowSize && (index + subindex) < span.size(); ++subindex) {
288 os << std::setw(2) << static_cast<int>(span[index + subindex]) << " ";
289 }
290 os << "\n";
291 }
292 os << std::resetiosflags(std::ios_base::basefield | std::ios_base::uppercase);
293 return os;
294}
295} // namespace internal
296} // namespace aos::fbs