blob: 77988daf283a3a08906ece704b56318ffc5466eb [file] [log] [blame]
James Kuszmaulf5eb4682023-09-22 17:16:59 -07001#include "aos/flatbuffers/base.h"
Stephan Pleines6191f1d2024-05-30 20:44:45 -07002
3#include <string.h>
4
5#include <iomanip>
6
Austin Schuh99f7c6a2024-06-25 22:07:44 -07007#include "absl/log/check.h"
8#include "absl/log/log.h"
9
James Kuszmaulf5eb4682023-09-22 17:16:59 -070010namespace aos::fbs {
11
12namespace {
13void *DereferenceOffset(uoffset_t *offset) {
14 return reinterpret_cast<uint8_t *>(offset) + *offset;
15}
16} // namespace
17
James Kuszmauld4b4f1d2024-03-13 15:57:35 -070018ResizeableObject::ResizeableObject(ResizeableObject &&other)
19 : buffer_(other.buffer_),
20 parent_(other.parent_),
21 owned_allocator_(std::move(other.owned_allocator_)),
22 allocator_(other.allocator_) {
23 // At this stage in the move the move constructors of the inherited types have
24 // not yet been called, so we edit the state of the other object now so that
25 // when everything is moved over into the new objects they will have the
26 // correct pointers.
27 for (size_t index = 0; index < other.NumberOfSubObjects(); ++index) {
28 SubObject object = other.GetSubObject(index);
29 if (object.object != nullptr) {
30 object.object->parent_ = this;
31 }
32 }
33 other.buffer_ = {};
34 other.allocator_ = nullptr;
35 other.parent_ = nullptr;
36 // Sanity check that the std::unique_ptr move didn't reallocate/move memory
37 // around.
38 if (owned_allocator_.get() != nullptr) {
39 CHECK_EQ(owned_allocator_.get(), allocator_);
40 }
41}
42
Austin Schuhf8440852024-05-31 10:46:50 -070043std::optional<std::span<uint8_t>> ResizeableObject::InsertBytes(
44 void *insertion_point, size_t bytes, SetZero set_zero) {
James Kuszmaulf5eb4682023-09-22 17:16:59 -070045 // See comments on InsertBytes() declaration and in FixObjects()
46 // implementation below.
Austin Schuhf8440852024-05-31 10:46:50 -070047 CHECK_LT(reinterpret_cast<const void *>(buffer_.data()),
48 reinterpret_cast<const void *>(insertion_point))
James Kuszmaulf5eb4682023-09-22 17:16:59 -070049 << ": Insertion may not be prior to the start of the buffer.";
James Kuszmaulf5eb4682023-09-22 17:16:59 -070050 // Note that we will round up the size to the current alignment, so that we
51 // ultimately end up only adjusting the buffer size by a multiple of its
52 // alignment, to avoid having to do any more complicated bookkeeping.
Austin Schuhf8440852024-05-31 10:46:50 -070053 const size_t aligned_bytes = AlignOffset(bytes, Alignment());
James Kuszmaulf5eb4682023-09-22 17:16:59 -070054 if (parent_ != nullptr) {
55 return parent_->InsertBytes(insertion_point, aligned_bytes, set_zero);
56 } else {
Austin Schuh6bdcc372024-06-27 14:49:11 -070057 CHECK(allocator_ != nullptr);
58 std::optional<std::span<uint8_t>> new_buffer = allocator_->InsertBytes(
59 insertion_point, aligned_bytes, Alignment(), set_zero);
James Kuszmaulf5eb4682023-09-22 17:16:59 -070060 if (!new_buffer.has_value()) {
Austin Schuhf8440852024-05-31 10:46:50 -070061 return std::nullopt;
James Kuszmaulf5eb4682023-09-22 17:16:59 -070062 }
Austin Schuhf8440852024-05-31 10:46:50 -070063 std::span<uint8_t> inserted_data(
64 new_buffer.value().data() +
65 (reinterpret_cast<const uint8_t *>(insertion_point) -
66 buffer_.data()),
67 aligned_bytes);
68 UpdateBuffer(new_buffer.value(), inserted_data.data(),
69 inserted_data.size());
70 return inserted_data;
James Kuszmaulf5eb4682023-09-22 17:16:59 -070071 }
72}
73
74void ResizeableObject::UpdateBuffer(std::span<uint8_t> new_buffer,
75 void *modification_point,
76 ssize_t bytes_inserted) {
77 buffer_ = new_buffer;
78 FixObjects(modification_point, bytes_inserted);
79 ObserveBufferModification();
80}
81
Austin Schuhf8440852024-05-31 10:46:50 -070082std::span<uint8_t> ResizeableObject::BufferForObject(size_t absolute_offset,
83 size_t size) {
84 return internal::GetSubSpan(buffer_, absolute_offset, size);
James Kuszmaulf5eb4682023-09-22 17:16:59 -070085}
86
87void ResizeableObject::FixObjects(void *modification_point,
88 ssize_t bytes_inserted) {
89 CHECK_EQ(bytes_inserted % Alignment(), 0u)
90 << ": We only support inserting N * Alignment() bytes at a time. This "
91 "may change in the future.";
92 for (size_t index = 0; index < NumberOfSubObjects(); ++index) {
93 SubObject object = GetSubObject(index);
94 const void *const absolute_offset =
95 PointerForAbsoluteOffset(*object.absolute_offset);
96 if (absolute_offset >= modification_point &&
97 object.inline_entry < modification_point) {
98 if (*object.inline_entry != 0) {
Austin Schuh6bdcc372024-06-27 14:49:11 -070099 CHECK(object.object != nullptr);
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700100 CHECK_EQ(static_cast<const void *>(
Austin Schuhf8440852024-05-31 10:46:50 -0700101 static_cast<const uint8_t *>(absolute_offset)),
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700102 DereferenceOffset(object.inline_entry));
103 *object.inline_entry += bytes_inserted;
104 CHECK_GE(DereferenceOffset(object.inline_entry), modification_point)
105 << ": Encountered offset which points to a now-deleted section "
106 "of memory. The offset should have been null'd out prior to "
107 "deleting the memory.";
108 } else {
109 CHECK_EQ(nullptr, object.object);
110 }
111 *object.absolute_offset += bytes_inserted;
112 }
113 // We only need to update the object's buffer if it currently exists.
114 if (object.object != nullptr) {
115 std::span<uint8_t> subbuffer = BufferForObject(
Austin Schuhf8440852024-05-31 10:46:50 -0700116 *object.absolute_offset, object.object->buffer_.size());
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700117 // By convention (enforced in InsertBytes()), the modification_point shall
118 // not be at the start of the subobjects data buffer; it may be the byte
119 // just past the end of the buffer. This makes it so that is unambiguous
120 // which subobject(s) should get the extra space when a buffer size
121 // increase is requested on the edge of a buffer.
122 if (subbuffer.data() < modification_point &&
123 (subbuffer.data() + subbuffer.size()) >= modification_point) {
124 subbuffer = {subbuffer.data(), subbuffer.size() + bytes_inserted};
125 }
126 object.object->UpdateBuffer(subbuffer, modification_point,
127 bytes_inserted);
128 }
129 }
130}
131
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700132std::optional<std::span<uint8_t>> SpanAllocator::Allocate(size_t size,
Austin Schuh02e0d772024-05-30 16:41:06 -0700133 size_t alignment,
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700134 SetZero set_zero) {
135 CHECK(!allocated_);
136 if (size > buffer_.size()) {
137 return std::nullopt;
138 }
139 if (set_zero == SetZero::kYes) {
140 memset(buffer_.data(), 0, buffer_.size());
141 }
142 allocated_size_ = size;
143 allocated_ = true;
Austin Schuh02e0d772024-05-30 16:41:06 -0700144 CHECK_GT(alignment, 0u);
145 CHECK_EQ(buffer_.size() % alignment, 0u)
146 << ": Buffer isn't a multiple of alignment " << alignment << " long, is "
147 << buffer_.size() << " long";
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700148 return internal::GetSubSpan(buffer_, buffer_.size() - size);
149}
150
151std::optional<std::span<uint8_t>> SpanAllocator::InsertBytes(
152 void *insertion_point, size_t bytes, size_t /*alignment*/,
153 SetZero set_zero) {
154 uint8_t *insertion_point_typed = reinterpret_cast<uint8_t *>(insertion_point);
155 const ssize_t insertion_index = insertion_point_typed - buffer_.data();
156 CHECK_LE(0, insertion_index);
157 CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size()));
158 const size_t new_size = allocated_size_ + bytes;
159 if (new_size > buffer_.size()) {
160 VLOG(1) << ": Insufficient space to grow by " << bytes << " bytes.";
161 return std::nullopt;
162 }
163 const size_t old_start_index = buffer_.size() - allocated_size_;
164 const size_t new_start_index = buffer_.size() - new_size;
165 memmove(buffer_.data() + new_start_index, buffer_.data() + old_start_index,
166 insertion_index - old_start_index);
167 if (set_zero == SetZero::kYes) {
168 memset(insertion_point_typed - bytes, 0, bytes);
169 }
170 allocated_size_ = new_size;
171 return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
172}
173
174std::span<uint8_t> SpanAllocator::RemoveBytes(std::span<uint8_t> remove_bytes) {
175 const ssize_t removal_index = remove_bytes.data() - buffer_.data();
176 const size_t old_start_index = buffer_.size() - allocated_size_;
177 CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
178 CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
179 CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
180 uint8_t *old_buffer_start = buffer_.data() + old_start_index;
181 memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
182 removal_index - old_start_index);
183 allocated_size_ -= remove_bytes.size();
184 return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_);
185}
186
187void SpanAllocator::Deallocate(std::span<uint8_t>) {
188 CHECK(allocated_) << ": Called Deallocate() without a prior allocation.";
189 allocated_ = false;
190}
191
Austin Schuh02e0d772024-05-30 16:41:06 -0700192AlignedVectorAllocator::~AlignedVectorAllocator() {
193 CHECK(buffer_.empty())
194 << ": Must deallocate before destroying the AlignedVectorAllocator.";
195}
196
197std::optional<std::span<uint8_t>> AlignedVectorAllocator::Allocate(
198 size_t size, size_t /*alignment*/, fbs::SetZero set_zero) {
199 CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate().";
200 buffer_.resize(((size + kAlignment - 1) / kAlignment) * kAlignment);
201 allocated_size_ = size;
202 if (set_zero == fbs::SetZero::kYes) {
203 memset(buffer_.data(), 0, buffer_.size());
204 }
205
206 return std::span<uint8_t>{data(), allocated_size_};
207}
208
209std::optional<std::span<uint8_t>> AlignedVectorAllocator::InsertBytes(
210 void *insertion_point, size_t bytes, size_t /*alignment*/,
211 fbs::SetZero set_zero) {
212 DCHECK_GE(reinterpret_cast<const uint8_t *>(insertion_point), data());
213 DCHECK_LE(reinterpret_cast<const uint8_t *>(insertion_point),
214 data() + allocated_size_);
215 const size_t buffer_offset =
216 reinterpret_cast<const uint8_t *>(insertion_point) - data();
217 // TODO(austin): This has an extra memcpy in it that isn't strictly needed
218 // when we resize. Remove it if performance is a concern.
219 const size_t absolute_buffer_offset =
220 reinterpret_cast<const uint8_t *>(insertion_point) - buffer_.data();
221 const size_t previous_size = buffer_.size();
222
223 buffer_.resize(((allocated_size_ + bytes + kAlignment - 1) / kAlignment) *
224 kAlignment);
225
226 // Now, we've got space both before and after the block of data. Move the
227 // data after to the end, and the data before to the start.
228
229 const size_t new_space_after = buffer_.size() - previous_size;
230
231 // Move the rest of the data to be end aligned. If the buffer wasn't resized,
232 // this will be a nop.
233 memmove(buffer_.data() + absolute_buffer_offset + new_space_after,
234 buffer_.data() + absolute_buffer_offset,
235 previous_size - absolute_buffer_offset);
236
237 // Now, move the data at the front to be aligned too.
238 memmove(buffer_.data() + buffer_.size() - (allocated_size_ + bytes),
239 buffer_.data() + previous_size - allocated_size_,
240 allocated_size_ - (previous_size - absolute_buffer_offset));
241
242 if (set_zero == fbs::SetZero::kYes) {
243 memset(data() - bytes + buffer_offset, 0, bytes);
244 }
245 allocated_size_ += bytes;
246
247 return std::span<uint8_t>{data(), allocated_size_};
248}
249
250std::span<uint8_t> AlignedVectorAllocator::RemoveBytes(
251 std::span<uint8_t> remove_bytes) {
252 const ssize_t removal_index = remove_bytes.data() - buffer_.data();
253 const size_t old_start_index = buffer_.size() - allocated_size_;
254 CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index);
255 CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size()));
256 CHECK_LE(removal_index + remove_bytes.size(), buffer_.size());
257 uint8_t *old_buffer_start = buffer_.data() + old_start_index;
258 memmove(old_buffer_start + remove_bytes.size(), old_buffer_start,
259 removal_index - old_start_index);
260 allocated_size_ -= remove_bytes.size();
261
262 return std::span<uint8_t>{data(), allocated_size_};
263}
264
265void AlignedVectorAllocator::Deallocate(std::span<uint8_t>) {
266 if (!released_) {
267 CHECK(!buffer_.empty())
268 << ": Called Deallocate() without a prior allocation.";
269 }
270 released_ = false;
271 buffer_.resize(0);
272}
273
274aos::SharedSpan AlignedVectorAllocator::Release() {
275 absl::Span<uint8_t> span{data(), allocated_size_};
276 std::shared_ptr<SharedSpanHolder> result = std::make_shared<SharedSpanHolder>(
277 std::move(buffer_), absl::Span<const uint8_t>());
278 result->span = span;
279 released_ = true;
280 return aos::SharedSpan(result, &(result->span));
281}
282
James Kuszmaulf5eb4682023-09-22 17:16:59 -0700283namespace internal {
284std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os) {
285 constexpr size_t kRowSize = 8u;
286 for (size_t index = 0; index < span.size(); index += kRowSize) {
287 os << std::hex << std::setw(4) << std::setfill('0') << std::uppercase
288 << index << ": ";
289 for (size_t subindex = 0;
290 subindex < kRowSize && (index + subindex) < span.size(); ++subindex) {
291 os << std::setw(2) << static_cast<int>(span[index + subindex]) << " ";
292 }
293 os << "\n";
294 }
295 os << std::resetiosflags(std::ios_base::basefield | std::ios_base::uppercase);
296 return os;
297}
298} // namespace internal
299} // namespace aos::fbs