blob: a3c15f265d66ab23686b5fedd85842d24fdb01ed [file] [log] [blame]
Austin Schuhe89fa2d2019-08-14 20:24:23 -07001/*
2 * Copyright 2018 Google Inc. All rights reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17extern crate smallvec;
18
19use std::cmp::max;
Austin Schuh272c6132020-11-14 16:37:52 -080020use std::iter::{DoubleEndedIterator, ExactSizeIterator};
Austin Schuhe89fa2d2019-08-14 20:24:23 -070021use std::marker::PhantomData;
22use std::ptr::write_bytes;
23use std::slice::from_raw_parts;
24
Austin Schuh272c6132020-11-14 16:37:52 -080025use crate::endian_scalar::{emplace_scalar, read_scalar_at};
26use crate::primitives::*;
27use crate::push::{Push, PushAlignment};
28use crate::table::Table;
29use crate::vector::{SafeSliceAccess, Vector};
30use crate::vtable::{field_index_to_field_offset, VTable};
31use crate::vtable_writer::VTableWriter;
Austin Schuhe89fa2d2019-08-14 20:24:23 -070032
33pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
34
35#[derive(Clone, Copy, Debug, Eq, PartialEq)]
36struct FieldLoc {
37 off: UOffsetT,
38 id: VOffsetT,
39}
40
41/// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
42/// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
43/// limit of 2GiB, which is set by the FlatBuffers format).
44#[derive(Clone, Debug, Eq, PartialEq)]
45pub struct FlatBufferBuilder<'fbb> {
46 owned_buf: Vec<u8>,
47 head: usize,
48
49 field_locs: Vec<FieldLoc>,
50 written_vtable_revpos: Vec<UOffsetT>,
51
52 nested: bool,
53 finished: bool,
54
55 min_align: usize,
Austin Schuh272c6132020-11-14 16:37:52 -080056 force_defaults: bool,
Austin Schuhe89fa2d2019-08-14 20:24:23 -070057
58 _phantom: PhantomData<&'fbb ()>,
59}
60
61impl<'fbb> FlatBufferBuilder<'fbb> {
62 /// Create a FlatBufferBuilder that is ready for writing.
63 pub fn new() -> Self {
64 Self::new_with_capacity(0)
65 }
66
67 /// Create a FlatBufferBuilder that is ready for writing, with a
68 /// ready-to-use capacity of the provided size.
69 ///
70 /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
71 pub fn new_with_capacity(size: usize) -> Self {
72 // we need to check the size here because we create the backing buffer
73 // directly, bypassing the typical way of using grow_owned_buf:
74 assert!(
75 size <= FLATBUFFERS_MAX_BUFFER_SIZE,
76 "cannot initialize buffer bigger than 2 gigabytes"
77 );
78
79 FlatBufferBuilder {
80 owned_buf: vec![0u8; size],
81 head: size,
82
83 field_locs: Vec::new(),
84 written_vtable_revpos: Vec::new(),
85
86 nested: false,
87 finished: false,
88
89 min_align: 0,
Austin Schuh272c6132020-11-14 16:37:52 -080090 force_defaults: false,
Austin Schuhe89fa2d2019-08-14 20:24:23 -070091
92 _phantom: PhantomData,
93 }
94 }
95
96 /// Reset the FlatBufferBuilder internal state. Use this method after a
97 /// call to a `finish` function in order to re-use a FlatBufferBuilder.
98 ///
99 /// This function is the only way to reset the `finished` state and start
100 /// again.
101 ///
102 /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
103 /// function, because it re-uses the FlatBufferBuilder's existing
104 /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
105 /// improvements as compared to creating a new FlatBufferBuilder for every
106 /// new object.
107 pub fn reset(&mut self) {
108 // memset only the part of the buffer that could be dirty:
109 {
110 let to_clear = self.owned_buf.len() - self.head;
111 let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
112 unsafe {
113 write_bytes(ptr, 0, to_clear);
114 }
115 }
116
117 self.head = self.owned_buf.len();
118 self.written_vtable_revpos.clear();
119
120 self.nested = false;
121 self.finished = false;
122
123 self.min_align = 0;
124 }
125
126 /// Destroy the FlatBufferBuilder, returning its internal byte vector
127 /// and the index into it that represents the start of valid data.
128 pub fn collapse(self) -> (Vec<u8>, usize) {
129 (self.owned_buf, self.head)
130 }
131
132 /// Push a Push'able value onto the front of the in-progress data.
133 ///
134 /// This function uses traits to provide a unified API for writing
135 /// scalars, tables, vectors, and WIPOffsets.
136 #[inline]
137 pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
138 let sz = P::size();
139 self.align(sz, P::alignment());
140 self.make_space(sz);
141 {
142 let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
143 x.push(dst, rest);
144 }
145 WIPOffset::new(self.used_space() as UOffsetT)
146 }
147
148 /// Push a Push'able value onto the front of the in-progress data, and
149 /// store a reference to it in the in-progress vtable. If the value matches
150 /// the default, then this is a no-op.
151 #[inline]
152 pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
153 self.assert_nested("push_slot");
Austin Schuh272c6132020-11-14 16:37:52 -0800154 if x != default || self.force_defaults {
155 self.push_slot_always(slotoff, x);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700156 }
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700157 }
158
159 /// Push a Push'able value onto the front of the in-progress data, and
160 /// store a reference to it in the in-progress vtable.
161 #[inline]
162 pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
163 self.assert_nested("push_slot_always");
164 let off = self.push(x);
165 self.track_field(slotoff, off.value());
166 }
167
168 /// Retrieve the number of vtables that have been serialized into the
169 /// FlatBuffer. This is primarily used to check vtable deduplication.
170 #[inline]
171 pub fn num_written_vtables(&self) -> usize {
172 self.written_vtable_revpos.len()
173 }
174
175 /// Start a Table write.
176 ///
177 /// Asserts that the builder is not in a nested state.
178 ///
179 /// Users probably want to use `push_slot` to add values after calling this.
180 #[inline]
181 pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
182 self.assert_not_nested(
183 "start_table can not be called when a table or vector is under construction",
184 );
185 self.nested = true;
186
187 WIPOffset::new(self.used_space() as UOffsetT)
188 }
189
190 /// End a Table write.
191 ///
192 /// Asserts that the builder is in a nested state.
193 #[inline]
194 pub fn end_table(
195 &mut self,
196 off: WIPOffset<TableUnfinishedWIPOffset>,
197 ) -> WIPOffset<TableFinishedWIPOffset> {
198 self.assert_nested("end_table");
199
200 let o = self.write_vtable(off);
201
202 self.nested = false;
203 self.field_locs.clear();
204
205 WIPOffset::new(o.value())
206 }
207
208 /// Start a Vector write.
209 ///
210 /// Asserts that the builder is not in a nested state.
211 ///
212 /// Most users will prefer to call `create_vector`.
213 /// Speed optimizing users who choose to create vectors manually using this
214 /// function will want to use `push` to add values.
215 #[inline]
216 pub fn start_vector<T: Push>(&mut self, num_items: usize) {
217 self.assert_not_nested(
218 "start_vector can not be called when a table or vector is under construction",
219 );
220 self.nested = true;
221 self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
222 }
223
224 /// End a Vector write.
225 ///
226 /// Note that the `num_elems` parameter is the number of written items, not
227 /// the byte count.
228 ///
229 /// Asserts that the builder is in a nested state.
230 #[inline]
231 pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
232 self.assert_nested("end_vector");
233 self.nested = false;
234 let o = self.push::<UOffsetT>(num_elems as UOffsetT);
235 WIPOffset::new(o.value())
236 }
237
238 /// Create a utf8 string.
239 ///
240 /// The wire format represents this as a zero-terminated byte vector.
241 #[inline]
242 pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
243 self.assert_not_nested(
244 "create_string can not be called when a table or vector is under construction",
245 );
246 WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
247 }
248
249 /// Create a zero-terminated byte vector.
250 #[inline]
251 pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
252 self.assert_not_nested(
253 "create_byte_string can not be called when a table or vector is under construction",
254 );
255 self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
256 self.push(0u8);
257 self.push_bytes_unprefixed(data);
258 self.push(data.len() as UOffsetT);
259 WIPOffset::new(self.used_space() as UOffsetT)
260 }
261
262 /// Create a vector by memcpy'ing. This is much faster than calling
263 /// `create_vector`, but the underlying type must be represented as
264 /// little-endian on the host machine. This property is encoded in the
265 /// type system through the SafeSliceAccess trait. The following types are
266 /// always safe, on any platform: bool, u8, i8, and any
267 /// FlatBuffers-generated struct.
268 #[inline]
269 pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
270 &'a mut self,
271 items: &'b [T],
272 ) -> WIPOffset<Vector<'fbb, T>> {
273 self.assert_not_nested(
274 "create_vector_direct can not be called when a table or vector is under construction",
275 );
276 let elem_size = T::size();
277 self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
278
279 let bytes = {
280 let ptr = items.as_ptr() as *const T as *const u8;
281 unsafe { from_raw_parts(ptr, items.len() * elem_size) }
282 };
283 self.push_bytes_unprefixed(bytes);
284 self.push(items.len() as UOffsetT);
285
286 WIPOffset::new(self.used_space() as UOffsetT)
287 }
288
289 /// Create a vector of strings.
290 ///
291 /// Speed-sensitive users may wish to reduce memory usage by creating the
292 /// vector manually: use `start_vector`, `push`, and `end_vector`.
293 #[inline]
294 pub fn create_vector_of_strings<'a, 'b>(
295 &'a mut self,
296 xs: &'b [&'b str],
297 ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
298 self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
299 // internally, smallvec can be a stack-allocated or heap-allocated vector:
300 // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
301 let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
302 smallvec::SmallVec::with_capacity(xs.len());
303 unsafe {
304 offsets.set_len(xs.len());
305 }
306
307 // note that this happens in reverse, because the buffer is built back-to-front:
308 for (i, &s) in xs.iter().enumerate().rev() {
309 let o = self.create_string(s);
310 offsets[i] = o;
311 }
312 self.create_vector(&offsets[..])
313 }
314
315 /// Create a vector of Push-able objects.
316 ///
317 /// Speed-sensitive users may wish to reduce memory usage by creating the
318 /// vector manually: use `start_vector`, `push`, and `end_vector`.
319 #[inline]
320 pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
321 &'a mut self,
322 items: &'b [T],
323 ) -> WIPOffset<Vector<'fbb, T::Output>> {
324 let elem_size = T::size();
325 self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
326 for i in (0..items.len()).rev() {
327 self.push(items[i]);
328 }
329 WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
330 }
331
Austin Schuh272c6132020-11-14 16:37:52 -0800332 /// Create a vector of Push-able objects.
333 ///
334 /// Speed-sensitive users may wish to reduce memory usage by creating the
335 /// vector manually: use `start_vector`, `push`, and `end_vector`.
336 #[inline]
337 pub fn create_vector_from_iter<T: Push + Copy>(
338 &mut self,
339 items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
340 ) -> WIPOffset<Vector<'fbb, T::Output>> {
341 let elem_size = T::size();
342 let len = items.len();
343 self.align(len * elem_size, T::alignment().max_of(SIZE_UOFFSET));
344 for item in items.rev() {
345 self.push(item);
346 }
347 WIPOffset::new(self.push::<UOffsetT>(len as UOffsetT).value())
348 }
349
350 /// Set whether default values are stored.
351 ///
352 /// In order to save space, fields that are set to their default value
353 /// aren't stored in the buffer. Setting `force_defaults` to `true`
354 /// disables this optimization.
355 ///
356 /// By default, `force_defaults` is `false`.
357 #[inline]
358 pub fn force_defaults(&mut self, force_defaults: bool) {
359 self.force_defaults = force_defaults;
360 }
361
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700362 /// Get the byte slice for the data that has been written, regardless of
363 /// whether it has been finished.
364 #[inline]
365 pub fn unfinished_data(&self) -> &[u8] {
366 &self.owned_buf[self.head..]
367 }
368 /// Get the byte slice for the data that has been written after a call to
369 /// one of the `finish` functions.
370 #[inline]
371 pub fn finished_data(&self) -> &[u8] {
372 self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
373 &self.owned_buf[self.head..]
374 }
375 /// Assert that a field is present in the just-finished Table.
376 ///
377 /// This is somewhat low-level and is mostly used by the generated code.
378 #[inline]
379 pub fn required(
380 &self,
381 tab_revloc: WIPOffset<TableFinishedWIPOffset>,
382 slot_byte_loc: VOffsetT,
383 assert_msg_name: &'static str,
384 ) {
385 let idx = self.used_space() - tab_revloc.value() as usize;
386 let tab = Table::new(&self.owned_buf[self.head..], idx);
387 let o = tab.vtable().get(slot_byte_loc) as usize;
388 assert!(o != 0, "missing required field {}", assert_msg_name);
389 }
390
391 /// Finalize the FlatBuffer by: aligning it, pushing an optional file
392 /// identifier on to it, pushing a size prefix on to it, and marking the
393 /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
394 /// users can call `finished_data` to get the resulting data.
395 #[inline]
396 pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
397 self.finish_with_opts(root, file_identifier, true);
398 }
399
400 /// Finalize the FlatBuffer by: aligning it, pushing an optional file
401 /// identifier on to it, and marking the internal state of the
402 /// FlatBufferBuilder as `finished`. Afterwards, users can call
403 /// `finished_data` to get the resulting data.
404 #[inline]
405 pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
406 self.finish_with_opts(root, file_identifier, false);
407 }
408
409 /// Finalize the FlatBuffer by: aligning it and marking the internal state
410 /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
411 /// `finished_data` to get the resulting data.
412 #[inline]
413 pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
414 self.finish_with_opts(root, None, false);
415 }
416
417 #[inline]
418 fn used_space(&self) -> usize {
419 self.owned_buf.len() - self.head as usize
420 }
421
422 #[inline]
423 fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
Austin Schuh272c6132020-11-14 16:37:52 -0800424 let fl = FieldLoc { id: slot_off, off };
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700425 self.field_locs.push(fl);
426 }
427
428 /// Write the VTable, if it is new.
429 fn write_vtable(
430 &mut self,
431 table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
432 ) -> WIPOffset<VTableWIPOffset> {
433 self.assert_nested("write_vtable");
434
435 // Write the vtable offset, which is the start of any Table.
436 // We fill its value later.
437 let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
Austin Schuh272c6132020-11-14 16:37:52 -0800438 WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700439
440 // Layout of the data this function will create when a new vtable is
441 // needed.
442 // --------------------------------------------------------------------
443 // vtable starts here
444 // | x, x -- vtable len (bytes) [u16]
445 // | x, x -- object inline len (bytes) [u16]
446 // | x, x -- zero, or num bytes from start of object to field #0 [u16]
447 // | ...
448 // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
449 // vtable ends here
450 // table starts here
451 // | x, x, x, x -- offset (negative direction) to the vtable [i32]
452 // | aka "vtableoffset"
453 // | -- table inline data begins here, we don't touch it --
454 // table ends here -- aka "table_start"
455 // --------------------------------------------------------------------
456 //
457 // Layout of the data this function will create when we re-use an
458 // existing vtable.
459 //
460 // We always serialize this particular vtable, then compare it to the
461 // other vtables we know about to see if there is a duplicate. If there
462 // is, then we erase the serialized vtable we just made.
463 // We serialize it first so that we are able to do byte-by-byte
464 // comparisons with already-serialized vtables. This 1) saves
465 // bookkeeping space (we only keep revlocs to existing vtables), 2)
466 // allows us to convert to little-endian once, then do
467 // fast memcmp comparisons, and 3) by ensuring we are comparing real
468 // serialized vtables, we can be more assured that we are doing the
469 // comparisons correctly.
470 //
471 // --------------------------------------------------------------------
472 // table starts here
473 // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
474 // | aka "vtableoffset"
475 // | -- table inline data begins here, we don't touch it --
476 // table starts here: aka "table_start"
477 // --------------------------------------------------------------------
478
479 // fill the WIP vtable with zeros:
480 let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
481 self.make_space(vtable_byte_len);
482
483 // compute the length of the table (not vtable!) in bytes:
484 let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
485 debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
486
487 // Write the VTable (we may delete it afterwards, if it is a duplicate):
488 let vt_start_pos = self.head;
489 let vt_end_pos = self.head + vtable_byte_len;
490 {
491 // write the vtable header:
492 let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
493 vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
494 vtfw.write_object_inline_size(table_object_size as VOffsetT);
495
496 // serialize every FieldLoc to the vtable:
497 for &fl in self.field_locs.iter() {
498 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
499 debug_assert_eq!(
500 vtfw.get_field_offset(fl.id),
501 0,
502 "tried to write a vtable field multiple times"
503 );
504 vtfw.write_field_offset(fl.id, pos);
505 }
506 }
507 let dup_vt_use = {
508 let this_vt = VTable::init(&self.owned_buf[..], self.head);
509 self.find_duplicate_stored_vtable_revloc(this_vt)
510 };
511
512 let vt_use = match dup_vt_use {
513 Some(n) => {
514 VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
515 self.head += vtable_byte_len;
516 n
517 }
518 None => {
519 let new_vt_use = self.used_space() as UOffsetT;
520 self.written_vtable_revpos.push(new_vt_use);
521 new_vt_use
522 }
523 };
524
525 {
526 let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
527 let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
Austin Schuh272c6132020-11-14 16:37:52 -0800528 debug_assert_eq!(saw, 0xF0F0_F0F0);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700529 emplace_scalar::<SOffsetT>(
530 &mut self.owned_buf[n..n + SIZE_SOFFSET],
531 vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
532 );
533 }
534
535 self.field_locs.clear();
536
537 object_revloc_to_vtable
538 }
539
540 #[inline]
541 fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
542 for &revloc in self.written_vtable_revpos.iter().rev() {
543 let o = VTable::init(
544 &self.owned_buf[..],
545 self.head + self.used_space() - revloc as usize,
546 );
547 if needle == o {
548 return Some(revloc);
549 }
550 }
551 None
552 }
553
554 // Only call this when you know it is safe to double the size of the buffer.
555 #[inline]
556 fn grow_owned_buf(&mut self) {
557 let old_len = self.owned_buf.len();
558 let new_len = max(1, old_len * 2);
559
560 let starting_active_size = self.used_space();
561
562 let diff = new_len - old_len;
563 self.owned_buf.resize(new_len, 0);
564 self.head += diff;
565
566 let ending_active_size = self.used_space();
567 debug_assert_eq!(starting_active_size, ending_active_size);
568
569 if new_len == 1 {
570 return;
571 }
572
573 // calculate the midpoint, and safely copy the old end data to the new
574 // end position:
575 let middle = new_len / 2;
576 {
577 let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
578 right.copy_from_slice(left);
579 }
580 // finally, zero out the old end data.
581 {
582 let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
583 unsafe {
584 write_bytes(ptr, 0, middle);
585 }
586 }
587 }
588
589 // with or without a size prefix changes how we load the data, so finish*
590 // functions are split along those lines.
591 fn finish_with_opts<T>(
592 &mut self,
593 root: WIPOffset<T>,
594 file_identifier: Option<&str>,
595 size_prefixed: bool,
596 ) {
597 self.assert_not_finished("buffer cannot be finished when it is already finished");
598 self.assert_not_nested(
599 "buffer cannot be finished when a table or vector is under construction",
600 );
601 self.written_vtable_revpos.clear();
602
603 let to_align = {
604 // for the root offset:
605 let a = SIZE_UOFFSET;
606 // for the size prefix:
607 let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
608 // for the file identifier (a string that is not zero-terminated):
609 let c = if file_identifier.is_some() {
610 FILE_IDENTIFIER_LENGTH
611 } else {
612 0
613 };
614 a + b + c
615 };
616
617 {
618 let ma = PushAlignment::new(self.min_align);
619 self.align(to_align, ma);
620 }
621
622 if let Some(ident) = file_identifier {
623 debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
624 self.push_bytes_unprefixed(ident.as_bytes());
625 }
626
627 self.push(root);
628
629 if size_prefixed {
630 let sz = self.used_space() as UOffsetT;
631 self.push::<UOffsetT>(sz);
632 }
633 self.finished = true;
634 }
635
636 #[inline]
637 fn align(&mut self, len: usize, alignment: PushAlignment) {
638 self.track_min_align(alignment.value());
639 let s = self.used_space() as usize;
640 self.make_space(padding_bytes(s + len, alignment.value()));
641 }
642
643 #[inline]
644 fn track_min_align(&mut self, alignment: usize) {
645 self.min_align = max(self.min_align, alignment);
646 }
647
648 #[inline]
649 fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
650 let n = self.make_space(x.len());
Austin Schuh272c6132020-11-14 16:37:52 -0800651 self.owned_buf[n..n + x.len()].copy_from_slice(x);
Austin Schuhe89fa2d2019-08-14 20:24:23 -0700652
653 n as UOffsetT
654 }
655
656 #[inline]
657 fn make_space(&mut self, want: usize) -> usize {
658 self.ensure_capacity(want);
659 self.head -= want;
660 self.head
661 }
662
663 #[inline]
664 fn ensure_capacity(&mut self, want: usize) -> usize {
665 if self.unused_ready_space() >= want {
666 return want;
667 }
668 assert!(
669 want <= FLATBUFFERS_MAX_BUFFER_SIZE,
670 "cannot grow buffer beyond 2 gigabytes"
671 );
672
673 while self.unused_ready_space() < want {
674 self.grow_owned_buf();
675 }
676 want
677 }
678 #[inline]
679 fn unused_ready_space(&self) -> usize {
680 self.head
681 }
682 #[inline]
683 fn assert_nested(&self, fn_name: &'static str) {
684 // we don't assert that self.field_locs.len() >0 because the vtable
685 // could be empty (e.g. for empty tables, or for all-default values).
686 debug_assert!(
687 self.nested,
688 format!(
689 "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
690 fn_name
691 )
692 );
693 }
694 #[inline]
695 fn assert_not_nested(&self, msg: &'static str) {
696 debug_assert!(!self.nested, msg);
697 }
698 #[inline]
699 fn assert_finished(&self, msg: &'static str) {
700 debug_assert!(self.finished, msg);
701 }
702 #[inline]
703 fn assert_not_finished(&self, msg: &'static str) {
704 debug_assert!(!self.finished, msg);
705 }
706}
707
708/// Compute the length of the vtable needed to represent the provided FieldLocs.
709/// If there are no FieldLocs, then provide the minimum number of bytes
710/// required: enough to write the VTable header.
711#[inline]
712fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
713 let max_voffset = field_locs.iter().map(|fl| fl.id).max();
714 match max_voffset {
715 None => field_index_to_field_offset(0) as usize,
716 Some(mv) => mv as usize + SIZE_VOFFSET,
717 }
718}
719
720#[inline]
721fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
722 // ((!buf_size) + 1) & (scalar_size - 1)
723 (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
724}
725
726impl<'fbb> Default for FlatBufferBuilder<'fbb> {
727 fn default() -> Self {
728 Self::new_with_capacity(0)
729 }
730}