James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 1 | #include "aos/flatbuffers/base.h" |
Stephan Pleines | 6191f1d | 2024-05-30 20:44:45 -0700 | [diff] [blame] | 2 | |
| 3 | #include <string.h> |
| 4 | |
| 5 | #include <iomanip> |
| 6 | |
James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 7 | namespace aos::fbs { |
| 8 | |
| 9 | namespace { |
| 10 | void *DereferenceOffset(uoffset_t *offset) { |
| 11 | return reinterpret_cast<uint8_t *>(offset) + *offset; |
| 12 | } |
| 13 | } // namespace |
| 14 | |
James Kuszmaul | d4b4f1d | 2024-03-13 15:57:35 -0700 | [diff] [blame] | 15 | ResizeableObject::ResizeableObject(ResizeableObject &&other) |
| 16 | : buffer_(other.buffer_), |
| 17 | parent_(other.parent_), |
| 18 | owned_allocator_(std::move(other.owned_allocator_)), |
| 19 | allocator_(other.allocator_) { |
| 20 | // At this stage in the move the move constructors of the inherited types have |
| 21 | // not yet been called, so we edit the state of the other object now so that |
| 22 | // when everything is moved over into the new objects they will have the |
| 23 | // correct pointers. |
| 24 | for (size_t index = 0; index < other.NumberOfSubObjects(); ++index) { |
| 25 | SubObject object = other.GetSubObject(index); |
| 26 | if (object.object != nullptr) { |
| 27 | object.object->parent_ = this; |
| 28 | } |
| 29 | } |
| 30 | other.buffer_ = {}; |
| 31 | other.allocator_ = nullptr; |
| 32 | other.parent_ = nullptr; |
| 33 | // Sanity check that the std::unique_ptr move didn't reallocate/move memory |
| 34 | // around. |
| 35 | if (owned_allocator_.get() != nullptr) { |
| 36 | CHECK_EQ(owned_allocator_.get(), allocator_); |
| 37 | } |
| 38 | } |
| 39 | |
James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 40 | bool ResizeableObject::InsertBytes(void *insertion_point, size_t bytes, |
| 41 | SetZero set_zero) { |
| 42 | // See comments on InsertBytes() declaration and in FixObjects() |
| 43 | // implementation below. |
| 44 | CHECK_LT(buffer_.data(), reinterpret_cast<const uint8_t *>(insertion_point)) |
| 45 | << ": Insertion may not be prior to the start of the buffer."; |
| 46 | // Check that we started off with a properly aligned size. |
| 47 | // Doing this CHECK earlier is tricky because if done in the constructor then |
| 48 | // it executes prior to the Alignment() implementation being available. |
| 49 | CHECK_EQ(0u, buffer_.size() % Alignment()); |
| 50 | // Note that we will round up the size to the current alignment, so that we |
| 51 | // ultimately end up only adjusting the buffer size by a multiple of its |
| 52 | // alignment, to avoid having to do any more complicated bookkeeping. |
| 53 | const size_t aligned_bytes = PaddedSize(bytes, Alignment()); |
| 54 | if (parent_ != nullptr) { |
| 55 | return parent_->InsertBytes(insertion_point, aligned_bytes, set_zero); |
| 56 | } else { |
| 57 | std::optional<std::span<uint8_t>> new_buffer = |
| 58 | CHECK_NOTNULL(allocator_) |
| 59 | ->InsertBytes(insertion_point, aligned_bytes, Alignment(), |
| 60 | set_zero); |
| 61 | if (!new_buffer.has_value()) { |
| 62 | return false; |
| 63 | } |
| 64 | UpdateBuffer(new_buffer.value(), |
| 65 | new_buffer.value().data() + |
| 66 | (reinterpret_cast<const uint8_t *>(insertion_point) - |
| 67 | buffer_.data()), |
| 68 | aligned_bytes); |
| 69 | return true; |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | void ResizeableObject::UpdateBuffer(std::span<uint8_t> new_buffer, |
| 74 | void *modification_point, |
| 75 | ssize_t bytes_inserted) { |
| 76 | buffer_ = new_buffer; |
| 77 | FixObjects(modification_point, bytes_inserted); |
| 78 | ObserveBufferModification(); |
| 79 | } |
| 80 | |
| 81 | std::span<uint8_t> ResizeableObject::BufferForObject( |
| 82 | size_t absolute_offset, size_t size, size_t terminal_alignment) { |
| 83 | const size_t padded_size = PaddedSize(size, terminal_alignment); |
| 84 | std::span<uint8_t> padded_buffer = |
| 85 | internal::GetSubSpan(buffer_, absolute_offset, padded_size); |
| 86 | std::span<uint8_t> object_buffer = |
| 87 | internal::GetSubSpan(padded_buffer, 0, size); |
| 88 | std::span<uint8_t> padding = internal::GetSubSpan(padded_buffer, size); |
| 89 | internal::ClearSpan(padding); |
| 90 | return object_buffer; |
| 91 | } |
| 92 | |
| 93 | void ResizeableObject::FixObjects(void *modification_point, |
| 94 | ssize_t bytes_inserted) { |
| 95 | CHECK_EQ(bytes_inserted % Alignment(), 0u) |
| 96 | << ": We only support inserting N * Alignment() bytes at a time. This " |
| 97 | "may change in the future."; |
| 98 | for (size_t index = 0; index < NumberOfSubObjects(); ++index) { |
| 99 | SubObject object = GetSubObject(index); |
| 100 | const void *const absolute_offset = |
| 101 | PointerForAbsoluteOffset(*object.absolute_offset); |
| 102 | if (absolute_offset >= modification_point && |
| 103 | object.inline_entry < modification_point) { |
| 104 | if (*object.inline_entry != 0) { |
| 105 | CHECK_EQ(static_cast<const void *>( |
| 106 | static_cast<const uint8_t *>(absolute_offset) + |
| 107 | CHECK_NOTNULL(object.object)->AbsoluteOffsetOffset()), |
| 108 | DereferenceOffset(object.inline_entry)); |
| 109 | *object.inline_entry += bytes_inserted; |
| 110 | CHECK_GE(DereferenceOffset(object.inline_entry), modification_point) |
| 111 | << ": Encountered offset which points to a now-deleted section " |
| 112 | "of memory. The offset should have been null'd out prior to " |
| 113 | "deleting the memory."; |
| 114 | } else { |
| 115 | CHECK_EQ(nullptr, object.object); |
| 116 | } |
| 117 | *object.absolute_offset += bytes_inserted; |
| 118 | } |
| 119 | // We only need to update the object's buffer if it currently exists. |
| 120 | if (object.object != nullptr) { |
| 121 | std::span<uint8_t> subbuffer = BufferForObject( |
| 122 | *object.absolute_offset, object.object->buffer_.size(), |
| 123 | object.object->Alignment()); |
| 124 | // By convention (enforced in InsertBytes()), the modification_point shall |
| 125 | // not be at the start of the subobjects data buffer; it may be the byte |
| 126 | // just past the end of the buffer. This makes it so that is unambiguous |
| 127 | // which subobject(s) should get the extra space when a buffer size |
| 128 | // increase is requested on the edge of a buffer. |
| 129 | if (subbuffer.data() < modification_point && |
| 130 | (subbuffer.data() + subbuffer.size()) >= modification_point) { |
| 131 | subbuffer = {subbuffer.data(), subbuffer.size() + bytes_inserted}; |
| 132 | } |
| 133 | object.object->UpdateBuffer(subbuffer, modification_point, |
| 134 | bytes_inserted); |
| 135 | } |
| 136 | } |
| 137 | } |
| 138 | |
James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 139 | std::optional<std::span<uint8_t>> SpanAllocator::Allocate(size_t size, |
Austin Schuh | 02e0d77 | 2024-05-30 16:41:06 -0700 | [diff] [blame^] | 140 | size_t alignment, |
James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 141 | SetZero set_zero) { |
| 142 | CHECK(!allocated_); |
| 143 | if (size > buffer_.size()) { |
| 144 | return std::nullopt; |
| 145 | } |
| 146 | if (set_zero == SetZero::kYes) { |
| 147 | memset(buffer_.data(), 0, buffer_.size()); |
| 148 | } |
| 149 | allocated_size_ = size; |
| 150 | allocated_ = true; |
Austin Schuh | 02e0d77 | 2024-05-30 16:41:06 -0700 | [diff] [blame^] | 151 | CHECK_GT(alignment, 0u); |
| 152 | CHECK_EQ(buffer_.size() % alignment, 0u) |
| 153 | << ": Buffer isn't a multiple of alignment " << alignment << " long, is " |
| 154 | << buffer_.size() << " long"; |
James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 155 | return internal::GetSubSpan(buffer_, buffer_.size() - size); |
| 156 | } |
| 157 | |
| 158 | std::optional<std::span<uint8_t>> SpanAllocator::InsertBytes( |
| 159 | void *insertion_point, size_t bytes, size_t /*alignment*/, |
| 160 | SetZero set_zero) { |
| 161 | uint8_t *insertion_point_typed = reinterpret_cast<uint8_t *>(insertion_point); |
| 162 | const ssize_t insertion_index = insertion_point_typed - buffer_.data(); |
| 163 | CHECK_LE(0, insertion_index); |
| 164 | CHECK_LE(insertion_index, static_cast<ssize_t>(buffer_.size())); |
| 165 | const size_t new_size = allocated_size_ + bytes; |
| 166 | if (new_size > buffer_.size()) { |
| 167 | VLOG(1) << ": Insufficient space to grow by " << bytes << " bytes."; |
| 168 | return std::nullopt; |
| 169 | } |
| 170 | const size_t old_start_index = buffer_.size() - allocated_size_; |
| 171 | const size_t new_start_index = buffer_.size() - new_size; |
| 172 | memmove(buffer_.data() + new_start_index, buffer_.data() + old_start_index, |
| 173 | insertion_index - old_start_index); |
| 174 | if (set_zero == SetZero::kYes) { |
| 175 | memset(insertion_point_typed - bytes, 0, bytes); |
| 176 | } |
| 177 | allocated_size_ = new_size; |
| 178 | return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_); |
| 179 | } |
| 180 | |
| 181 | std::span<uint8_t> SpanAllocator::RemoveBytes(std::span<uint8_t> remove_bytes) { |
| 182 | const ssize_t removal_index = remove_bytes.data() - buffer_.data(); |
| 183 | const size_t old_start_index = buffer_.size() - allocated_size_; |
| 184 | CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index); |
| 185 | CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size())); |
| 186 | CHECK_LE(removal_index + remove_bytes.size(), buffer_.size()); |
| 187 | uint8_t *old_buffer_start = buffer_.data() + old_start_index; |
| 188 | memmove(old_buffer_start + remove_bytes.size(), old_buffer_start, |
| 189 | removal_index - old_start_index); |
| 190 | allocated_size_ -= remove_bytes.size(); |
| 191 | return internal::GetSubSpan(buffer_, buffer_.size() - allocated_size_); |
| 192 | } |
| 193 | |
| 194 | void SpanAllocator::Deallocate(std::span<uint8_t>) { |
| 195 | CHECK(allocated_) << ": Called Deallocate() without a prior allocation."; |
| 196 | allocated_ = false; |
| 197 | } |
| 198 | |
Austin Schuh | 02e0d77 | 2024-05-30 16:41:06 -0700 | [diff] [blame^] | 199 | AlignedVectorAllocator::~AlignedVectorAllocator() { |
| 200 | CHECK(buffer_.empty()) |
| 201 | << ": Must deallocate before destroying the AlignedVectorAllocator."; |
| 202 | } |
| 203 | |
| 204 | std::optional<std::span<uint8_t>> AlignedVectorAllocator::Allocate( |
| 205 | size_t size, size_t /*alignment*/, fbs::SetZero set_zero) { |
| 206 | CHECK(buffer_.empty()) << ": Must deallocate before calling Allocate()."; |
| 207 | buffer_.resize(((size + kAlignment - 1) / kAlignment) * kAlignment); |
| 208 | allocated_size_ = size; |
| 209 | if (set_zero == fbs::SetZero::kYes) { |
| 210 | memset(buffer_.data(), 0, buffer_.size()); |
| 211 | } |
| 212 | |
| 213 | return std::span<uint8_t>{data(), allocated_size_}; |
| 214 | } |
| 215 | |
| 216 | std::optional<std::span<uint8_t>> AlignedVectorAllocator::InsertBytes( |
| 217 | void *insertion_point, size_t bytes, size_t /*alignment*/, |
| 218 | fbs::SetZero set_zero) { |
| 219 | DCHECK_GE(reinterpret_cast<const uint8_t *>(insertion_point), data()); |
| 220 | DCHECK_LE(reinterpret_cast<const uint8_t *>(insertion_point), |
| 221 | data() + allocated_size_); |
| 222 | const size_t buffer_offset = |
| 223 | reinterpret_cast<const uint8_t *>(insertion_point) - data(); |
| 224 | // TODO(austin): This has an extra memcpy in it that isn't strictly needed |
| 225 | // when we resize. Remove it if performance is a concern. |
| 226 | const size_t absolute_buffer_offset = |
| 227 | reinterpret_cast<const uint8_t *>(insertion_point) - buffer_.data(); |
| 228 | const size_t previous_size = buffer_.size(); |
| 229 | |
| 230 | buffer_.resize(((allocated_size_ + bytes + kAlignment - 1) / kAlignment) * |
| 231 | kAlignment); |
| 232 | |
| 233 | // Now, we've got space both before and after the block of data. Move the |
| 234 | // data after to the end, and the data before to the start. |
| 235 | |
| 236 | const size_t new_space_after = buffer_.size() - previous_size; |
| 237 | |
| 238 | // Move the rest of the data to be end aligned. If the buffer wasn't resized, |
| 239 | // this will be a nop. |
| 240 | memmove(buffer_.data() + absolute_buffer_offset + new_space_after, |
| 241 | buffer_.data() + absolute_buffer_offset, |
| 242 | previous_size - absolute_buffer_offset); |
| 243 | |
| 244 | // Now, move the data at the front to be aligned too. |
| 245 | memmove(buffer_.data() + buffer_.size() - (allocated_size_ + bytes), |
| 246 | buffer_.data() + previous_size - allocated_size_, |
| 247 | allocated_size_ - (previous_size - absolute_buffer_offset)); |
| 248 | |
| 249 | if (set_zero == fbs::SetZero::kYes) { |
| 250 | memset(data() - bytes + buffer_offset, 0, bytes); |
| 251 | } |
| 252 | allocated_size_ += bytes; |
| 253 | |
| 254 | return std::span<uint8_t>{data(), allocated_size_}; |
| 255 | } |
| 256 | |
| 257 | std::span<uint8_t> AlignedVectorAllocator::RemoveBytes( |
| 258 | std::span<uint8_t> remove_bytes) { |
| 259 | const ssize_t removal_index = remove_bytes.data() - buffer_.data(); |
| 260 | const size_t old_start_index = buffer_.size() - allocated_size_; |
| 261 | CHECK_LE(static_cast<ssize_t>(old_start_index), removal_index); |
| 262 | CHECK_LE(removal_index, static_cast<ssize_t>(buffer_.size())); |
| 263 | CHECK_LE(removal_index + remove_bytes.size(), buffer_.size()); |
| 264 | uint8_t *old_buffer_start = buffer_.data() + old_start_index; |
| 265 | memmove(old_buffer_start + remove_bytes.size(), old_buffer_start, |
| 266 | removal_index - old_start_index); |
| 267 | allocated_size_ -= remove_bytes.size(); |
| 268 | |
| 269 | return std::span<uint8_t>{data(), allocated_size_}; |
| 270 | } |
| 271 | |
| 272 | void AlignedVectorAllocator::Deallocate(std::span<uint8_t>) { |
| 273 | if (!released_) { |
| 274 | CHECK(!buffer_.empty()) |
| 275 | << ": Called Deallocate() without a prior allocation."; |
| 276 | } |
| 277 | released_ = false; |
| 278 | buffer_.resize(0); |
| 279 | } |
| 280 | |
| 281 | aos::SharedSpan AlignedVectorAllocator::Release() { |
| 282 | absl::Span<uint8_t> span{data(), allocated_size_}; |
| 283 | std::shared_ptr<SharedSpanHolder> result = std::make_shared<SharedSpanHolder>( |
| 284 | std::move(buffer_), absl::Span<const uint8_t>()); |
| 285 | result->span = span; |
| 286 | released_ = true; |
| 287 | return aos::SharedSpan(result, &(result->span)); |
| 288 | } |
| 289 | |
James Kuszmaul | f5eb468 | 2023-09-22 17:16:59 -0700 | [diff] [blame] | 290 | namespace internal { |
| 291 | std::ostream &DebugBytes(std::span<const uint8_t> span, std::ostream &os) { |
| 292 | constexpr size_t kRowSize = 8u; |
| 293 | for (size_t index = 0; index < span.size(); index += kRowSize) { |
| 294 | os << std::hex << std::setw(4) << std::setfill('0') << std::uppercase |
| 295 | << index << ": "; |
| 296 | for (size_t subindex = 0; |
| 297 | subindex < kRowSize && (index + subindex) < span.size(); ++subindex) { |
| 298 | os << std::setw(2) << static_cast<int>(span[index + subindex]) << " "; |
| 299 | } |
| 300 | os << "\n"; |
| 301 | } |
| 302 | os << std::resetiosflags(std::ios_base::basefield | std::ios_base::uppercase); |
| 303 | return os; |
| 304 | } |
| 305 | } // namespace internal |
| 306 | } // namespace aos::fbs |