blob: 2dbaa60055a296d928a5c7450e01aaa074c1f7c3 [file] [log] [blame]
James Kuszmaul8e62b022022-03-22 09:33:25 -07001/*
2 * Copyright 2021 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
18#define FLATBUFFERS_VECTOR_DOWNWARD_H_
19
James Kuszmaul3b15b0c2022-11-08 14:03:16 -080020#include <algorithm>
21
James Kuszmaul8e62b022022-03-22 09:33:25 -070022#include "flatbuffers/base.h"
23#include "flatbuffers/default_allocator.h"
24#include "flatbuffers/detached_buffer.h"
25
26namespace flatbuffers {
27
28// This is a minimal replication of std::vector<uint8_t> functionality,
Austin Schuh2dd86a92022-09-14 21:19:23 -070029// except growing from higher to lower addresses. i.e. push_back() inserts data
James Kuszmaul8e62b022022-03-22 09:33:25 -070030// in the lowest address in the vector.
31// Since this vector leaves the lower part unused, we support a "scratch-pad"
32// that can be stored there for temporary data, to share the allocated space.
33// Essentially, this supports 2 std::vectors in a single buffer.
34class vector_downward {
35 public:
36 explicit vector_downward(size_t initial_size, Allocator *allocator,
37 bool own_allocator, size_t buffer_minalign)
38 : allocator_(allocator),
39 own_allocator_(own_allocator),
40 initial_size_(initial_size),
41 buffer_minalign_(buffer_minalign),
42 reserved_(0),
43 size_(0),
44 buf_(nullptr),
45 cur_(nullptr),
46 scratch_(nullptr) {}
47
48 vector_downward(vector_downward &&other)
49 // clang-format on
50 : allocator_(other.allocator_),
51 own_allocator_(other.own_allocator_),
52 initial_size_(other.initial_size_),
53 buffer_minalign_(other.buffer_minalign_),
54 reserved_(other.reserved_),
55 size_(other.size_),
56 buf_(other.buf_),
57 cur_(other.cur_),
58 scratch_(other.scratch_) {
59 // No change in other.allocator_
60 // No change in other.initial_size_
61 // No change in other.buffer_minalign_
62 other.own_allocator_ = false;
63 other.reserved_ = 0;
64 other.buf_ = nullptr;
65 other.cur_ = nullptr;
66 other.scratch_ = nullptr;
67 }
68
69 vector_downward &operator=(vector_downward &&other) {
70 // Move construct a temporary and swap idiom
71 vector_downward temp(std::move(other));
72 swap(temp);
73 return *this;
74 }
75
76 ~vector_downward() {
77 clear_buffer();
78 clear_allocator();
79 }
80
81 void reset() {
82 clear_buffer();
83 clear();
84 }
85
86 void clear() {
87 if (buf_) {
88 cur_ = buf_ + reserved_;
89 } else {
90 reserved_ = 0;
91 cur_ = nullptr;
92 }
93 size_ = 0;
94 clear_scratch();
95 }
96
97 void clear_scratch() { scratch_ = buf_; }
98
99 void clear_allocator() {
100 if (own_allocator_ && allocator_) { delete allocator_; }
101 allocator_ = nullptr;
102 own_allocator_ = false;
103 }
104
105 void clear_buffer() {
106 if (buf_) Deallocate(allocator_, buf_, reserved_);
107 buf_ = nullptr;
108 }
109
110 // Relinquish the pointer to the caller.
111 uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
112 auto *buf = buf_;
113 allocated_bytes = reserved_;
114 offset = static_cast<size_t>(cur_ - buf_);
115
116 // release_raw only relinquishes the buffer ownership.
117 // Does not deallocate or reset the allocator. Destructor will do that.
118 buf_ = nullptr;
119 clear();
120 return buf;
121 }
122
123 // Relinquish the pointer to the caller.
124 DetachedBuffer release() {
125 // allocator ownership (if any) is transferred to DetachedBuffer.
126 DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_,
127 size());
128 if (own_allocator_) {
129 allocator_ = nullptr;
130 own_allocator_ = false;
131 }
132 buf_ = nullptr;
133 clear();
134 return fb;
135 }
136
137 size_t ensure_space(size_t len) {
138 FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
139 if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
140 // Beyond this, signed offsets may not have enough range:
141 // (FlatBuffers > 2GB not supported).
142 FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
143 return len;
144 }
145
146 inline uint8_t *make_space(size_t len) {
147 if (len) {
148 ensure_space(len);
149 cur_ -= len;
150 size_ += static_cast<uoffset_t>(len);
151 }
152 return cur_;
153 }
154
155 // Returns nullptr if using the DefaultAllocator.
156 Allocator *get_custom_allocator() { return allocator_; }
157
158 inline uoffset_t size() const { return size_; }
159
160 uoffset_t scratch_size() const {
161 return static_cast<uoffset_t>(scratch_ - buf_);
162 }
163
164 size_t capacity() const { return reserved_; }
165
166 uint8_t *data() const {
167 FLATBUFFERS_ASSERT(cur_);
168 return cur_;
169 }
170
171 uint8_t *scratch_data() const {
172 FLATBUFFERS_ASSERT(buf_);
173 return buf_;
174 }
175
176 uint8_t *scratch_end() const {
177 FLATBUFFERS_ASSERT(scratch_);
178 return scratch_;
179 }
180
181 uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
182
183 void push(const uint8_t *bytes, size_t num) {
184 if (num > 0) { memcpy(make_space(num), bytes, num); }
185 }
186
187 // Specialized version of push() that avoids memcpy call for small data.
188 template<typename T> void push_small(const T &little_endian_t) {
189 make_space(sizeof(T));
190 *reinterpret_cast<T *>(cur_) = little_endian_t;
191 }
192
193 template<typename T> void scratch_push_small(const T &t) {
194 ensure_space(sizeof(T));
195 *reinterpret_cast<T *>(scratch_) = t;
196 scratch_ += sizeof(T);
197 }
198
199 // fill() is most frequently called with small byte counts (<= 4),
200 // which is why we're using loops rather than calling memset.
201 void fill(size_t zero_pad_bytes) {
202 make_space(zero_pad_bytes);
203 for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0;
204 }
205
206 // Version for when we know the size is larger.
207 // Precondition: zero_pad_bytes > 0
208 void fill_big(size_t zero_pad_bytes) {
209 memset(make_space(zero_pad_bytes), 0, zero_pad_bytes);
210 }
211
212 void pop(size_t bytes_to_remove) {
213 cur_ += bytes_to_remove;
214 size_ -= static_cast<uoffset_t>(bytes_to_remove);
215 }
216
217 void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
218
219 void swap(vector_downward &other) {
220 using std::swap;
221 swap(allocator_, other.allocator_);
222 swap(own_allocator_, other.own_allocator_);
223 swap(initial_size_, other.initial_size_);
224 swap(buffer_minalign_, other.buffer_minalign_);
225 swap(reserved_, other.reserved_);
226 swap(size_, other.size_);
227 swap(buf_, other.buf_);
228 swap(cur_, other.cur_);
229 swap(scratch_, other.scratch_);
230 }
231
232 void swap_allocator(vector_downward &other) {
233 using std::swap;
234 swap(allocator_, other.allocator_);
235 swap(own_allocator_, other.own_allocator_);
236 }
237
238 private:
239 // You shouldn't really be copying instances of this class.
240 FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
241 FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
242
243 Allocator *allocator_;
244 bool own_allocator_;
245 size_t initial_size_;
246 size_t buffer_minalign_;
247 size_t reserved_;
248 uoffset_t size_;
249 uint8_t *buf_;
250 uint8_t *cur_; // Points at location between empty (below) and used (above).
251 uint8_t *scratch_; // Points to the end of the scratchpad in use.
252
253 void reallocate(size_t len) {
254 auto old_reserved = reserved_;
255 auto old_size = size();
256 auto old_scratch_size = scratch_size();
257 reserved_ +=
258 (std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
259 reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
260 if (buf_) {
261 buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_,
262 old_size, old_scratch_size);
263 } else {
264 buf_ = Allocate(allocator_, reserved_);
265 }
266 cur_ = buf_ + reserved_ - old_size;
267 scratch_ = buf_ + old_scratch_size;
268 }
269};
270
271} // namespace flatbuffers
272
273#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_