blob: 7d0f408ba1fc8f25ae439208686f5fd014bf8cf4 [file] [log] [blame]
Austin Schuhe89fa2d2019-08-14 20:24:23 -07001/*
2 * Copyright 2018 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
James Kuszmaul3b15b0c2022-11-08 14:03:16 -080017#[cfg(not(feature = "std"))]
18use alloc::{vec, vec::Vec};
James Kuszmaul8e62b022022-03-22 09:33:25 -070019use core::cmp::max;
20use core::iter::{DoubleEndedIterator, ExactSizeIterator};
21use core::marker::PhantomData;
22use core::ptr::write_bytes;
Austin Schuhe89fa2d2019-08-14 20:24:23 -070023
James Kuszmaul3b15b0c2022-11-08 14:03:16 -080024use crate::endian_scalar::emplace_scalar;
Austin Schuh272c6132020-11-14 16:37:52 -080025use crate::primitives::*;
26use crate::push::{Push, PushAlignment};
James Kuszmaul3b15b0c2022-11-08 14:03:16 -080027use crate::read_scalar;
Austin Schuh272c6132020-11-14 16:37:52 -080028use crate::table::Table;
James Kuszmaul3b15b0c2022-11-08 14:03:16 -080029use crate::vector::Vector;
Austin Schuh272c6132020-11-14 16:37:52 -080030use crate::vtable::{field_index_to_field_offset, VTable};
31use crate::vtable_writer::VTableWriter;
Austin Schuhe89fa2d2019-08-14 20:24:23 -070032
Austin Schuhe89fa2d2019-08-14 20:24:23 -070033#[derive(Clone, Copy, Debug, Eq, PartialEq)]
34struct FieldLoc {
35 off: UOffsetT,
36 id: VOffsetT,
37}
38
39/// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
40/// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
41/// limit of 2GiB, which is set by the FlatBuffers format).
42#[derive(Clone, Debug, Eq, PartialEq)]
43pub struct FlatBufferBuilder<'fbb> {
44 owned_buf: Vec<u8>,
45 head: usize,
46
47 field_locs: Vec<FieldLoc>,
48 written_vtable_revpos: Vec<UOffsetT>,
49
50 nested: bool,
51 finished: bool,
52
53 min_align: usize,
Austin Schuh272c6132020-11-14 16:37:52 -080054 force_defaults: bool,
James Kuszmaul8e62b022022-03-22 09:33:25 -070055 strings_pool: Vec<WIPOffset<&'fbb str>>,
Austin Schuhe89fa2d2019-08-14 20:24:23 -070056
57 _phantom: PhantomData<&'fbb ()>,
58}
59
60impl<'fbb> FlatBufferBuilder<'fbb> {
61 /// Create a FlatBufferBuilder that is ready for writing.
62 pub fn new() -> Self {
James Kuszmaul8e62b022022-03-22 09:33:25 -070063 Self::with_capacity(0)
Austin Schuhe89fa2d2019-08-14 20:24:23 -070064 }
James Kuszmaul8e62b022022-03-22 09:33:25 -070065 #[deprecated(note = "replaced with `with_capacity`", since = "0.8.5")]
66 pub fn new_with_capacity(size: usize) -> Self {
67 Self::with_capacity(size)
68 }
Austin Schuhe89fa2d2019-08-14 20:24:23 -070069 /// Create a FlatBufferBuilder that is ready for writing, with a
70 /// ready-to-use capacity of the provided size.
71 ///
72 /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
James Kuszmaul8e62b022022-03-22 09:33:25 -070073 pub fn with_capacity(size: usize) -> Self {
74 Self::from_vec(vec![0; size])
75 }
76 /// Create a FlatBufferBuilder that is ready for writing, reusing
77 /// an existing vector.
78 pub fn from_vec(buffer: Vec<u8>) -> Self {
Austin Schuhe89fa2d2019-08-14 20:24:23 -070079 // we need to check the size here because we create the backing buffer
80 // directly, bypassing the typical way of using grow_owned_buf:
81 assert!(
James Kuszmaul8e62b022022-03-22 09:33:25 -070082 buffer.len() <= FLATBUFFERS_MAX_BUFFER_SIZE,
Austin Schuhe89fa2d2019-08-14 20:24:23 -070083 "cannot initialize buffer bigger than 2 gigabytes"
84 );
James Kuszmaul8e62b022022-03-22 09:33:25 -070085 let head = buffer.len();
Austin Schuhe89fa2d2019-08-14 20:24:23 -070086 FlatBufferBuilder {
James Kuszmaul8e62b022022-03-22 09:33:25 -070087 owned_buf: buffer,
88 head,
Austin Schuhe89fa2d2019-08-14 20:24:23 -070089
90 field_locs: Vec::new(),
91 written_vtable_revpos: Vec::new(),
92
93 nested: false,
94 finished: false,
95
96 min_align: 0,
Austin Schuh272c6132020-11-14 16:37:52 -080097 force_defaults: false,
James Kuszmaul8e62b022022-03-22 09:33:25 -070098 strings_pool: Vec::new(),
Austin Schuhe89fa2d2019-08-14 20:24:23 -070099
100 _phantom: PhantomData,
101 }
102 }
103
104 /// Reset the FlatBufferBuilder internal state. Use this method after a
105 /// call to a `finish` function in order to re-use a FlatBufferBuilder.
106 ///
107 /// This function is the only way to reset the `finished` state and start
108 /// again.
109 ///
110 /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
111 /// function, because it re-uses the FlatBufferBuilder's existing
112 /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
113 /// improvements as compared to creating a new FlatBufferBuilder for every
114 /// new object.
115 pub fn reset(&mut self) {
116 // memset only the part of the buffer that could be dirty:
117 {
118 let to_clear = self.owned_buf.len() - self.head;
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800119 let ptr = self.owned_buf[self.head..].as_mut_ptr();
120 // Safety:
121 // Verified ptr is valid for `to_clear` above
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700122 unsafe {
123 write_bytes(ptr, 0, to_clear);
124 }
125 }
126
127 self.head = self.owned_buf.len();
128 self.written_vtable_revpos.clear();
129
130 self.nested = false;
131 self.finished = false;
132
133 self.min_align = 0;
James Kuszmaul8e62b022022-03-22 09:33:25 -0700134 self.strings_pool.clear();
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700135 }
136
137 /// Destroy the FlatBufferBuilder, returning its internal byte vector
138 /// and the index into it that represents the start of valid data.
139 pub fn collapse(self) -> (Vec<u8>, usize) {
140 (self.owned_buf, self.head)
141 }
142
143 /// Push a Push'able value onto the front of the in-progress data.
144 ///
145 /// This function uses traits to provide a unified API for writing
146 /// scalars, tables, vectors, and WIPOffsets.
147 #[inline]
148 pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
149 let sz = P::size();
150 self.align(sz, P::alignment());
151 self.make_space(sz);
152 {
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800153 let (dst, rest) = self.owned_buf[self.head..].split_at_mut(sz);
154 // Safety:
155 // Called make_space above
156 unsafe { x.push(dst, rest.len()) };
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700157 }
158 WIPOffset::new(self.used_space() as UOffsetT)
159 }
160
161 /// Push a Push'able value onto the front of the in-progress data, and
162 /// store a reference to it in the in-progress vtable. If the value matches
163 /// the default, then this is a no-op.
164 #[inline]
165 pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
166 self.assert_nested("push_slot");
Austin Schuh272c6132020-11-14 16:37:52 -0800167 if x != default || self.force_defaults {
168 self.push_slot_always(slotoff, x);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700169 }
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700170 }
171
172 /// Push a Push'able value onto the front of the in-progress data, and
173 /// store a reference to it in the in-progress vtable.
174 #[inline]
175 pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
176 self.assert_nested("push_slot_always");
177 let off = self.push(x);
178 self.track_field(slotoff, off.value());
179 }
180
181 /// Retrieve the number of vtables that have been serialized into the
182 /// FlatBuffer. This is primarily used to check vtable deduplication.
183 #[inline]
184 pub fn num_written_vtables(&self) -> usize {
185 self.written_vtable_revpos.len()
186 }
187
188 /// Start a Table write.
189 ///
190 /// Asserts that the builder is not in a nested state.
191 ///
192 /// Users probably want to use `push_slot` to add values after calling this.
193 #[inline]
194 pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
195 self.assert_not_nested(
196 "start_table can not be called when a table or vector is under construction",
197 );
198 self.nested = true;
199
200 WIPOffset::new(self.used_space() as UOffsetT)
201 }
202
203 /// End a Table write.
204 ///
205 /// Asserts that the builder is in a nested state.
206 #[inline]
207 pub fn end_table(
208 &mut self,
209 off: WIPOffset<TableUnfinishedWIPOffset>,
210 ) -> WIPOffset<TableFinishedWIPOffset> {
211 self.assert_nested("end_table");
212
213 let o = self.write_vtable(off);
214
215 self.nested = false;
216 self.field_locs.clear();
217
218 WIPOffset::new(o.value())
219 }
220
221 /// Start a Vector write.
222 ///
223 /// Asserts that the builder is not in a nested state.
224 ///
225 /// Most users will prefer to call `create_vector`.
226 /// Speed optimizing users who choose to create vectors manually using this
227 /// function will want to use `push` to add values.
228 #[inline]
229 pub fn start_vector<T: Push>(&mut self, num_items: usize) {
230 self.assert_not_nested(
231 "start_vector can not be called when a table or vector is under construction",
232 );
233 self.nested = true;
234 self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
235 }
236
237 /// End a Vector write.
238 ///
239 /// Note that the `num_elems` parameter is the number of written items, not
240 /// the byte count.
241 ///
242 /// Asserts that the builder is in a nested state.
243 #[inline]
244 pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
245 self.assert_nested("end_vector");
246 self.nested = false;
247 let o = self.push::<UOffsetT>(num_elems as UOffsetT);
248 WIPOffset::new(o.value())
249 }
250
James Kuszmaul8e62b022022-03-22 09:33:25 -0700251 #[inline]
252 pub fn create_shared_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
253 self.assert_not_nested(
254 "create_shared_string can not be called when a table or vector is under construction",
255 );
256
257 // Saves a ref to owned_buf since rust doesnt like us refrencing it
258 // in the binary_search_by code.
259 let buf = &self.owned_buf;
260
261 let found = self.strings_pool.binary_search_by(|offset| {
262 let ptr = offset.value() as usize;
263 // Gets The pointer to the size of the string
264 let str_memory = &buf[buf.len() - ptr..];
265 // Gets the size of the written string from buffer
266 let size =
267 u32::from_le_bytes([str_memory[0], str_memory[1], str_memory[2], str_memory[3]])
268 as usize;
269 // Size of the string size
270 let string_size: usize = 4;
271 // Fetches actual string bytes from index of string after string size
272 // to the size of string plus string size
273 let iter = str_memory[string_size..size + string_size].iter();
274 // Compares bytes of fetched string and current writable string
275 iter.cloned().cmp(s.bytes())
276 });
277
278 match found {
279 Ok(index) => self.strings_pool[index],
280 Err(index) => {
281 let address = WIPOffset::new(self.create_byte_string(s.as_bytes()).value());
282 self.strings_pool.insert(index, address);
283 address
284 }
285 }
286 }
287
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700288 /// Create a utf8 string.
289 ///
290 /// The wire format represents this as a zero-terminated byte vector.
291 #[inline]
292 pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
293 self.assert_not_nested(
294 "create_string can not be called when a table or vector is under construction",
295 );
296 WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
297 }
298
299 /// Create a zero-terminated byte vector.
300 #[inline]
301 pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
302 self.assert_not_nested(
303 "create_byte_string can not be called when a table or vector is under construction",
304 );
305 self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
306 self.push(0u8);
307 self.push_bytes_unprefixed(data);
308 self.push(data.len() as UOffsetT);
309 WIPOffset::new(self.used_space() as UOffsetT)
310 }
311
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700312 /// Create a vector of Push-able objects.
313 ///
314 /// Speed-sensitive users may wish to reduce memory usage by creating the
315 /// vector manually: use `start_vector`, `push`, and `end_vector`.
316 #[inline]
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800317 pub fn create_vector<'a: 'b, 'b, T: Push + 'b>(
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700318 &'a mut self,
319 items: &'b [T],
320 ) -> WIPOffset<Vector<'fbb, T::Output>> {
321 let elem_size = T::size();
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800322 let slice_size = items.len() * elem_size;
323 self.align(slice_size, T::alignment().max_of(SIZE_UOFFSET));
324 self.ensure_capacity(slice_size + UOffsetT::size());
325
326 self.head -= slice_size;
327 let mut written_len = self.owned_buf.len() - self.head;
328
329 let buf = &mut self.owned_buf[self.head..self.head + slice_size];
330 for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
331 written_len -= elem_size;
332
333 // Safety:
334 // Called ensure_capacity and aligned to T above
335 unsafe { item.push(out, written_len) };
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700336 }
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800337
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700338 WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
339 }
340
Austin Schuh272c6132020-11-14 16:37:52 -0800341 /// Create a vector of Push-able objects.
342 ///
343 /// Speed-sensitive users may wish to reduce memory usage by creating the
344 /// vector manually: use `start_vector`, `push`, and `end_vector`.
345 #[inline]
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800346 pub fn create_vector_from_iter<T: Push>(
Austin Schuh272c6132020-11-14 16:37:52 -0800347 &mut self,
348 items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
349 ) -> WIPOffset<Vector<'fbb, T::Output>> {
350 let elem_size = T::size();
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800351 self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
352 let mut actual = 0;
Austin Schuh272c6132020-11-14 16:37:52 -0800353 for item in items.rev() {
354 self.push(item);
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800355 actual += 1;
Austin Schuh272c6132020-11-14 16:37:52 -0800356 }
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800357 WIPOffset::new(self.push::<UOffsetT>(actual).value())
Austin Schuh272c6132020-11-14 16:37:52 -0800358 }
359
360 /// Set whether default values are stored.
361 ///
362 /// In order to save space, fields that are set to their default value
363 /// aren't stored in the buffer. Setting `force_defaults` to `true`
364 /// disables this optimization.
365 ///
366 /// By default, `force_defaults` is `false`.
367 #[inline]
368 pub fn force_defaults(&mut self, force_defaults: bool) {
369 self.force_defaults = force_defaults;
370 }
371
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700372 /// Get the byte slice for the data that has been written, regardless of
373 /// whether it has been finished.
374 #[inline]
375 pub fn unfinished_data(&self) -> &[u8] {
376 &self.owned_buf[self.head..]
377 }
378 /// Get the byte slice for the data that has been written after a call to
379 /// one of the `finish` functions.
James Kuszmaul8e62b022022-03-22 09:33:25 -0700380 /// # Panics
381 /// Panics if the buffer is not finished.
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700382 #[inline]
383 pub fn finished_data(&self) -> &[u8] {
384 self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
385 &self.owned_buf[self.head..]
386 }
James Kuszmaul8e62b022022-03-22 09:33:25 -0700387 /// Returns a mutable view of a finished buffer and location of where the flatbuffer starts.
388 /// Note that modifying the flatbuffer data may corrupt it.
389 /// # Panics
390 /// Panics if the flatbuffer is not finished.
391 #[inline]
392 pub fn mut_finished_buffer(&mut self) -> (&mut [u8], usize) {
393 (&mut self.owned_buf, self.head)
394 }
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700395 /// Assert that a field is present in the just-finished Table.
396 ///
397 /// This is somewhat low-level and is mostly used by the generated code.
398 #[inline]
399 pub fn required(
400 &self,
401 tab_revloc: WIPOffset<TableFinishedWIPOffset>,
402 slot_byte_loc: VOffsetT,
403 assert_msg_name: &'static str,
404 ) {
405 let idx = self.used_space() - tab_revloc.value() as usize;
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800406
407 // Safety:
408 // The value of TableFinishedWIPOffset is the offset from the end of owned_buf
409 // to an SOffsetT pointing to a valid VTable
410 //
411 // `self.owned_buf.len() = self.used_space() + self.head`
412 // `self.owned_buf.len() - tab_revloc = self.used_space() - tab_revloc + self.head`
413 // `self.owned_buf.len() - tab_revloc = idx + self.head`
414 let tab = unsafe { Table::new(&self.owned_buf[self.head..], idx) };
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700415 let o = tab.vtable().get(slot_byte_loc) as usize;
416 assert!(o != 0, "missing required field {}", assert_msg_name);
417 }
418
419 /// Finalize the FlatBuffer by: aligning it, pushing an optional file
420 /// identifier on to it, pushing a size prefix on to it, and marking the
421 /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
422 /// users can call `finished_data` to get the resulting data.
423 #[inline]
424 pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
425 self.finish_with_opts(root, file_identifier, true);
426 }
427
428 /// Finalize the FlatBuffer by: aligning it, pushing an optional file
429 /// identifier on to it, and marking the internal state of the
430 /// FlatBufferBuilder as `finished`. Afterwards, users can call
431 /// `finished_data` to get the resulting data.
432 #[inline]
433 pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
434 self.finish_with_opts(root, file_identifier, false);
435 }
436
437 /// Finalize the FlatBuffer by: aligning it and marking the internal state
438 /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
439 /// `finished_data` to get the resulting data.
440 #[inline]
441 pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
442 self.finish_with_opts(root, None, false);
443 }
444
445 #[inline]
446 fn used_space(&self) -> usize {
447 self.owned_buf.len() - self.head as usize
448 }
449
450 #[inline]
451 fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
Austin Schuh272c6132020-11-14 16:37:52 -0800452 let fl = FieldLoc { id: slot_off, off };
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700453 self.field_locs.push(fl);
454 }
455
456 /// Write the VTable, if it is new.
457 fn write_vtable(
458 &mut self,
459 table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
460 ) -> WIPOffset<VTableWIPOffset> {
461 self.assert_nested("write_vtable");
462
463 // Write the vtable offset, which is the start of any Table.
464 // We fill its value later.
465 let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
James Kuszmaul8e62b022022-03-22 09:33:25 -0700466 WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0).value());
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700467
468 // Layout of the data this function will create when a new vtable is
469 // needed.
470 // --------------------------------------------------------------------
471 // vtable starts here
472 // | x, x -- vtable len (bytes) [u16]
473 // | x, x -- object inline len (bytes) [u16]
474 // | x, x -- zero, or num bytes from start of object to field #0 [u16]
475 // | ...
476 // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
477 // vtable ends here
478 // table starts here
479 // | x, x, x, x -- offset (negative direction) to the vtable [i32]
480 // | aka "vtableoffset"
481 // | -- table inline data begins here, we don't touch it --
482 // table ends here -- aka "table_start"
483 // --------------------------------------------------------------------
484 //
485 // Layout of the data this function will create when we re-use an
486 // existing vtable.
487 //
488 // We always serialize this particular vtable, then compare it to the
489 // other vtables we know about to see if there is a duplicate. If there
490 // is, then we erase the serialized vtable we just made.
491 // We serialize it first so that we are able to do byte-by-byte
492 // comparisons with already-serialized vtables. This 1) saves
493 // bookkeeping space (we only keep revlocs to existing vtables), 2)
494 // allows us to convert to little-endian once, then do
495 // fast memcmp comparisons, and 3) by ensuring we are comparing real
496 // serialized vtables, we can be more assured that we are doing the
497 // comparisons correctly.
498 //
499 // --------------------------------------------------------------------
500 // table starts here
501 // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
502 // | aka "vtableoffset"
503 // | -- table inline data begins here, we don't touch it --
504 // table starts here: aka "table_start"
505 // --------------------------------------------------------------------
506
507 // fill the WIP vtable with zeros:
508 let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
509 self.make_space(vtable_byte_len);
510
511 // compute the length of the table (not vtable!) in bytes:
512 let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
513 debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
514
515 // Write the VTable (we may delete it afterwards, if it is a duplicate):
516 let vt_start_pos = self.head;
517 let vt_end_pos = self.head + vtable_byte_len;
518 {
519 // write the vtable header:
520 let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
521 vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
522 vtfw.write_object_inline_size(table_object_size as VOffsetT);
523
524 // serialize every FieldLoc to the vtable:
525 for &fl in self.field_locs.iter() {
526 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700527 vtfw.write_field_offset(fl.id, pos);
528 }
529 }
James Kuszmaul8e62b022022-03-22 09:33:25 -0700530 let new_vt_bytes = &self.owned_buf[vt_start_pos..vt_end_pos];
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800531 let found = self
532 .written_vtable_revpos
533 .binary_search_by(|old_vtable_revpos: &UOffsetT| {
534 let old_vtable_pos = self.owned_buf.len() - *old_vtable_revpos as usize;
535 // Safety:
536 // Already written vtables are valid by construction
537 let old_vtable = unsafe { VTable::init(&self.owned_buf, old_vtable_pos) };
538 new_vt_bytes.cmp(old_vtable.as_bytes())
539 });
James Kuszmaul8e62b022022-03-22 09:33:25 -0700540 let final_vtable_revpos = match found {
541 Ok(i) => {
542 // The new vtable is a duplicate so clear it.
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700543 VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
544 self.head += vtable_byte_len;
James Kuszmaul8e62b022022-03-22 09:33:25 -0700545 self.written_vtable_revpos[i]
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700546 }
James Kuszmaul8e62b022022-03-22 09:33:25 -0700547 Err(i) => {
548 // This is a new vtable. Add it to the cache.
549 let new_vt_revpos = self.used_space() as UOffsetT;
550 self.written_vtable_revpos.insert(i, new_vt_revpos);
551 new_vt_revpos
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700552 }
553 };
James Kuszmaul8e62b022022-03-22 09:33:25 -0700554 // Write signed offset from table to its vtable.
555 let table_pos = self.owned_buf.len() - object_revloc_to_vtable.value() as usize;
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800556 if cfg!(debug_assertions) {
557 // Safety:
558 // Verified slice length
559 let tmp_soffset_to_vt = unsafe {
560 read_scalar::<UOffsetT>(&self.owned_buf[table_pos..table_pos + SIZE_UOFFSET])
561 };
562 assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
563 }
564
565 let buf = &mut self.owned_buf[table_pos..table_pos + SIZE_SOFFSET];
566 // Safety:
567 // Verified length of buf above
James Kuszmaul8e62b022022-03-22 09:33:25 -0700568 unsafe {
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700569 emplace_scalar::<SOffsetT>(
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800570 buf,
571 final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700572 );
573 }
574
575 self.field_locs.clear();
576
577 object_revloc_to_vtable
578 }
579
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700580 // Only call this when you know it is safe to double the size of the buffer.
581 #[inline]
582 fn grow_owned_buf(&mut self) {
583 let old_len = self.owned_buf.len();
584 let new_len = max(1, old_len * 2);
585
586 let starting_active_size = self.used_space();
587
588 let diff = new_len - old_len;
589 self.owned_buf.resize(new_len, 0);
590 self.head += diff;
591
592 let ending_active_size = self.used_space();
593 debug_assert_eq!(starting_active_size, ending_active_size);
594
595 if new_len == 1 {
596 return;
597 }
598
599 // calculate the midpoint, and safely copy the old end data to the new
600 // end position:
601 let middle = new_len / 2;
602 {
603 let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
604 right.copy_from_slice(left);
605 }
606 // finally, zero out the old end data.
607 {
James Kuszmaul3b15b0c2022-11-08 14:03:16 -0800608 let ptr = self.owned_buf[..middle].as_mut_ptr();
609 // Safety:
610 // ptr is byte aligned and of length middle
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700611 unsafe {
612 write_bytes(ptr, 0, middle);
613 }
614 }
615 }
616
617 // with or without a size prefix changes how we load the data, so finish*
618 // functions are split along those lines.
619 fn finish_with_opts<T>(
620 &mut self,
621 root: WIPOffset<T>,
622 file_identifier: Option<&str>,
623 size_prefixed: bool,
624 ) {
625 self.assert_not_finished("buffer cannot be finished when it is already finished");
626 self.assert_not_nested(
627 "buffer cannot be finished when a table or vector is under construction",
628 );
629 self.written_vtable_revpos.clear();
630
631 let to_align = {
632 // for the root offset:
633 let a = SIZE_UOFFSET;
634 // for the size prefix:
635 let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
636 // for the file identifier (a string that is not zero-terminated):
637 let c = if file_identifier.is_some() {
638 FILE_IDENTIFIER_LENGTH
639 } else {
640 0
641 };
642 a + b + c
643 };
644
645 {
646 let ma = PushAlignment::new(self.min_align);
647 self.align(to_align, ma);
648 }
649
650 if let Some(ident) = file_identifier {
651 debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
652 self.push_bytes_unprefixed(ident.as_bytes());
653 }
654
655 self.push(root);
656
657 if size_prefixed {
658 let sz = self.used_space() as UOffsetT;
659 self.push::<UOffsetT>(sz);
660 }
661 self.finished = true;
662 }
663
664 #[inline]
665 fn align(&mut self, len: usize, alignment: PushAlignment) {
666 self.track_min_align(alignment.value());
667 let s = self.used_space() as usize;
668 self.make_space(padding_bytes(s + len, alignment.value()));
669 }
670
671 #[inline]
672 fn track_min_align(&mut self, alignment: usize) {
673 self.min_align = max(self.min_align, alignment);
674 }
675
676 #[inline]
677 fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
678 let n = self.make_space(x.len());
Austin Schuh272c6132020-11-14 16:37:52 -0800679 self.owned_buf[n..n + x.len()].copy_from_slice(x);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700680
681 n as UOffsetT
682 }
683
684 #[inline]
685 fn make_space(&mut self, want: usize) -> usize {
686 self.ensure_capacity(want);
687 self.head -= want;
688 self.head
689 }
690
691 #[inline]
692 fn ensure_capacity(&mut self, want: usize) -> usize {
693 if self.unused_ready_space() >= want {
694 return want;
695 }
696 assert!(
697 want <= FLATBUFFERS_MAX_BUFFER_SIZE,
698 "cannot grow buffer beyond 2 gigabytes"
699 );
700
701 while self.unused_ready_space() < want {
702 self.grow_owned_buf();
703 }
704 want
705 }
706 #[inline]
707 fn unused_ready_space(&self) -> usize {
708 self.head
709 }
710 #[inline]
711 fn assert_nested(&self, fn_name: &'static str) {
712 // we don't assert that self.field_locs.len() >0 because the vtable
713 // could be empty (e.g. for empty tables, or for all-default values).
714 debug_assert!(
715 self.nested,
James Kuszmaul8e62b022022-03-22 09:33:25 -0700716 "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
717 fn_name
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700718 );
719 }
720 #[inline]
721 fn assert_not_nested(&self, msg: &'static str) {
James Kuszmaul8e62b022022-03-22 09:33:25 -0700722 debug_assert!(!self.nested, "{}", msg);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700723 }
724 #[inline]
725 fn assert_finished(&self, msg: &'static str) {
James Kuszmaul8e62b022022-03-22 09:33:25 -0700726 debug_assert!(self.finished, "{}", msg);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700727 }
728 #[inline]
729 fn assert_not_finished(&self, msg: &'static str) {
James Kuszmaul8e62b022022-03-22 09:33:25 -0700730 debug_assert!(!self.finished, "{}", msg);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700731 }
732}
733
734/// Compute the length of the vtable needed to represent the provided FieldLocs.
735/// If there are no FieldLocs, then provide the minimum number of bytes
736/// required: enough to write the VTable header.
737#[inline]
738fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
739 let max_voffset = field_locs.iter().map(|fl| fl.id).max();
740 match max_voffset {
741 None => field_index_to_field_offset(0) as usize,
742 Some(mv) => mv as usize + SIZE_VOFFSET,
743 }
744}
745
746#[inline]
747fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
748 // ((!buf_size) + 1) & (scalar_size - 1)
749 (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
750}
751
752impl<'fbb> Default for FlatBufferBuilder<'fbb> {
753 fn default() -> Self {
James Kuszmaul8e62b022022-03-22 09:33:25 -0700754 Self::with_capacity(0)
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700755 }
756}