Squashed 'third_party/flatbuffers/' changes from bc44fad35..8aa8b9139

8aa8b9139 Fix handling of +/-inf defaults in TS/rust/go/dart codegen (#7588)
001adf782 Add support for parsing proto map fields (#7613)
dbc58ab77 Fix help output for --gen-includes (#7611)
2facfeec7 Fix missing spaces in flatc help text (#7612)
4de2814c7 Fix: arduino platform build (#7625)
37b1acdaf Fix current official name of macOS (#7627)
a22434e2a Add missing #include <algorithm> for std::min/std::max uses, and #include <limits> for std::numeric_limits<> (#7624)
214cc9468 Bump Rust version to 22.10.26 before publication (#7622)
a4ff275d9 Added option to not requires an EoF token when parsing JSON (#7620)
15f32c690 python: object generation prefix and suffix (#7565)
051afd882 Add CreateSharedString to python builder (#7608)
728c033ad Add check for presence of realpath to CMakeLists.txt to support more platforms (#7603)
4c514483d Update DartTest.sh golden files (#7606)
c2d9c2080 [TS] Add support for fixed length arrays on Typescript (#5864) (#7021) (#7581)
e34ae4c6b `build.yml`: Fix missing 'v' in version
e54536127 `build.yml` Update to Kotlin Wrapper 1.0.5
49d9f941c `release.yml` Use env var for passphrase
cefc21c1f `release.yml` Add GPG key for Maven
3e64fa724 `release.yml`: Add Maven Steps
b15f3c57e `release_yml` Use new dotnet version
ff802c680 `release.yml` Use NuGet Key directly
b401957d5 `release.yml` Changed Push to follow examples
8c8151f8f `release.yml` Fix nuget push command
ebb7c203d `release.yml` Add Nuget support
203241ed3 FlatBuffers Version 22.10.26 (#7607)
ac485609c `setup.py`: Define version directly
de5b85aa6 `release.yml`: Switch to `python` directory
de3df2d88 `release.yml`: Add publishing to PyPi
043a24f2e [Python] Fixed the issue with nested unions relying on InitFromBuf. (#7576)
5a48b0d7d release.yml: Typo
ce307556f release.yml: Remove `npm ci`
cb616e27c Create release.yml (#7605)
a54ca1e75 FlatBuffers Version 22.10.25 (#7604)
5b3fadcc1 [vector] Allow to iterate with mutables (#7586)
872a49746 [Nim] Bfbs Nim Generator (#7534)
e30170296 Make type conversions explicit. (#7595)
f7b734438 Fix LongEnum definitions (#7596)
5792623df Rust fix compilation for no_std targets #2 (#7553)
0edb27528 Update Rust version (#7574)
acc6a20d3 tests/test.cpp contains a couple of tests that are only executed (#7571)
04cd037ba Fix #7580 by documenting union schema evolution rules (#7585)
e1c5db988 Turn on clippy for Rust and fix lints for non-generated code (#7575)
b80142b90 Update documentation to mention enum value attributes (#7570)
54418f371 Add support for metadata attributes for enum values (#7567) (#7568)
c92e78a9f FlatBuffers Version 22.9.29 (#7557)
d243b904c [TS] Make strict compliant and improve typings (#7549)
374f8fb5f Rust soundness fixes (#7518)
dadbff571 Moves swift package to root of repository so it can be used directly … (#7548)
76ddae006 FlatBuffers Version 22.9.24 (#7547)
cfe157ec5 Emit internal enums when swift_implementation_only (#7545)
413115858 [Python] Python fixed size array (#7529)
88046190e Upgrade grpc to 1.49.0 and make sure it builds (#7538)
72aa85a75 [C++] Rare bad buffer content alignment if sizeof(T) != alignof(T) (#7520)
bfceebb7f Fix conform (#7532)

git-subtree-dir: third_party/flatbuffers
git-subtree-split: 8aa8b9139eb330f27816a5b8b5bbef402fbe3632
Signed-off-by: James Kuszmaul <james.kuszmaul@bluerivertech.com>
Change-Id: I943faba499baf58e9f561b1e4734922188ba8626
diff --git a/rust/flatbuffers/Cargo.toml b/rust/flatbuffers/Cargo.toml
index b3276c0..2cba5b7 100644
--- a/rust/flatbuffers/Cargo.toml
+++ b/rust/flatbuffers/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "flatbuffers"
-version = "2.1.2"
+version = "22.10.26"
 edition = "2018"
 authors = ["Robert Winslow <hello@rwinslow.com>", "FlatBuffers Maintainers"]
 license = "Apache-2.0"
@@ -12,15 +12,13 @@
 rust = "1.51"
 
 [features]
-default = ["thiserror"]
-no_std = ["core2", "thiserror_core2"]
+default = ["std"]
+std = []
 serialize = ["serde"]
 
 [dependencies]
-smallvec = "1.6.1"
 bitflags = "1.2.1"
 serde = { version = "1.0", optional = true }
-thiserror = { version = "1.0.30", optional = true }
-core2 = { version = "0.4.0", optional = true }
-# This version is compliant with mainline 1.0.30
-thiserror_core2 = { version = "2.0.0", default-features = false, optional = true }
+
+[build-dependencies]
+rustc_version = "0.4.0"
diff --git a/rust/flatbuffers/build.rs b/rust/flatbuffers/build.rs
new file mode 100644
index 0000000..c13ed1d
--- /dev/null
+++ b/rust/flatbuffers/build.rs
@@ -0,0 +1,12 @@
+use rustc_version::{version_meta, Channel};
+
+fn main() {
+    let version_meta = version_meta().unwrap();
+
+    // To use nightly features we declare this and then we can use
+    // #[cfg(nightly)]
+    // for nightly only features
+    if version_meta.channel == Channel::Nightly {
+        println!("cargo:rustc-cfg=nightly")
+    }
+}
diff --git a/rust/flatbuffers/src/array.rs b/rust/flatbuffers/src/array.rs
index 2ce2e47..0895574 100644
--- a/rust/flatbuffers/src/array.rs
+++ b/rust/flatbuffers/src/array.rs
@@ -37,14 +37,18 @@
 #[allow(clippy::len_without_is_empty)]
 #[allow(clippy::from_over_into)] // TODO(caspern): Go from From to Into.
 impl<'a, T: 'a, const N: usize> Array<'a, T, N> {
+    /// # Safety
+    ///
+    /// buf must be a contiguous array of `T`
+    ///
+    /// # Panics
+    ///
+    /// Panics if `buf.len()` is not `size_of::<T>() * N`
     #[inline(always)]
-    pub fn new(buf: &'a [u8]) -> Self {
-        assert!(size_of::<T>() * N == buf.len());
+    pub unsafe fn new(buf: &'a [u8]) -> Self {
+        assert_eq!(size_of::<T>() * N, buf.len());
 
-        Array {
-            0: buf,
-            1: PhantomData,
-        }
+        Array(buf, PhantomData)
     }
 
     #[inline(always)]
@@ -61,34 +65,39 @@
     pub fn get(&self, idx: usize) -> T::Inner {
         assert!(idx < N);
         let sz = size_of::<T>();
-        T::follow(self.0, sz * idx)
+        // Safety:
+        // self.0 was valid for length `N` on construction and have verified `idx < N`
+        unsafe { T::follow(self.0, sz * idx) }
     }
 
     #[inline(always)]
     pub fn iter(&self) -> VectorIter<'a, T> {
-        VectorIter::from_slice(self.0, self.len())
+        // Safety:
+        // self.0 was valid for length N on construction
+        unsafe { VectorIter::from_slice(self.0, self.len()) }
     }
 }
 
-impl<'a, T: Follow<'a> + Debug, const N: usize> Into<[T::Inner; N]> for Array<'a, T, N> {
-    #[inline(always)]
-    fn into(self) -> [T::Inner; N] {
-        array_init(|i| self.get(i))
+impl<'a, T: Follow<'a> + Debug, const N: usize> From<Array<'a, T, N>> for [T::Inner; N] {
+    fn from(array: Array<'a, T, N>) -> Self {
+        array_init(|i| array.get(i))
     }
 }
 
-// TODO(caspern): Implement some future safe version of SafeSliceAccess.
-
 /// Implement Follow for all possible Arrays that have Follow-able elements.
 impl<'a, T: Follow<'a> + 'a, const N: usize> Follow<'a> for Array<'a, T, N> {
     type Inner = Array<'a, T, N>;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         Array::new(&buf[loc..loc + N * size_of::<T>()])
     }
 }
 
-pub fn emplace_scalar_array<T: EndianScalar, const N: usize>(
+/// Place an array of EndianScalar into the provided mutable byte slice. Performs
+/// endian conversion, if necessary.
+/// # Safety
+/// Caller must ensure `s.len() >= size_of::<[T; N]>()`
+pub unsafe fn emplace_scalar_array<T: EndianScalar, const N: usize>(
     buf: &mut [u8],
     loc: usize,
     src: &[T; N],
@@ -96,14 +105,12 @@
     let mut buf_ptr = buf[loc..].as_mut_ptr();
     for item in src.iter() {
         let item_le = item.to_little_endian();
-        unsafe {
-            core::ptr::copy_nonoverlapping(
-                &item_le as *const T as *const u8,
-                buf_ptr,
-                size_of::<T>(),
-            );
-            buf_ptr = buf_ptr.add(size_of::<T>());
-        }
+        core::ptr::copy_nonoverlapping(
+            &item_le as *const T::Scalar as *const u8,
+            buf_ptr,
+            size_of::<T::Scalar>(),
+        );
+        buf_ptr = buf_ptr.add(size_of::<T::Scalar>());
     }
 }
 
@@ -124,6 +131,8 @@
     let mut array: core::mem::MaybeUninit<[T; N]> = core::mem::MaybeUninit::uninit();
     let mut ptr_i = array.as_mut_ptr() as *mut T;
 
+    // Safety:
+    // array is aligned by T, and has length N
     unsafe {
         for i in 0..N {
             let value_i = initializer(i);
@@ -134,7 +143,7 @@
     }
 }
 
-#[cfg(feature="serialize")]
+#[cfg(feature = "serialize")]
 impl<'a, T: 'a, const N: usize> serde::ser::Serialize for Array<'a, T, N>
 where
     T: 'a + Follow<'a>,
diff --git a/rust/flatbuffers/src/builder.rs b/rust/flatbuffers/src/builder.rs
index 8a8b58c..7d0f408 100644
--- a/rust/flatbuffers/src/builder.rs
+++ b/rust/flatbuffers/src/builder.rs
@@ -14,26 +14,22 @@
  * limitations under the License.
  */
 
-extern crate smallvec;
-
+#[cfg(not(feature = "std"))]
+use alloc::{vec, vec::Vec};
 use core::cmp::max;
 use core::iter::{DoubleEndedIterator, ExactSizeIterator};
 use core::marker::PhantomData;
 use core::ptr::write_bytes;
-use core::slice::from_raw_parts;
-#[cfg(feature = "no_std")]
-use alloc::{vec, vec::Vec};
 
-use crate::endian_scalar::{emplace_scalar, read_scalar_at};
+use crate::endian_scalar::emplace_scalar;
 use crate::primitives::*;
 use crate::push::{Push, PushAlignment};
+use crate::read_scalar;
 use crate::table::Table;
-use crate::vector::{SafeSliceAccess, Vector};
+use crate::vector::Vector;
 use crate::vtable::{field_index_to_field_offset, VTable};
 use crate::vtable_writer::VTableWriter;
 
-pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
-
 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
 struct FieldLoc {
     off: UOffsetT,
@@ -120,7 +116,9 @@
         // memset only the part of the buffer that could be dirty:
         {
             let to_clear = self.owned_buf.len() - self.head;
-            let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
+            let ptr = self.owned_buf[self.head..].as_mut_ptr();
+            // Safety:
+            // Verified ptr is valid for `to_clear` above
             unsafe {
                 write_bytes(ptr, 0, to_clear);
             }
@@ -152,8 +150,10 @@
         self.align(sz, P::alignment());
         self.make_space(sz);
         {
-            let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
-            x.push(dst, rest);
+            let (dst, rest) = self.owned_buf[self.head..].split_at_mut(sz);
+            // Safety:
+            // Called make_space above
+            unsafe { x.push(dst, rest.len()) };
         }
         WIPOffset::new(self.used_space() as UOffsetT)
     }
@@ -309,73 +309,32 @@
         WIPOffset::new(self.used_space() as UOffsetT)
     }
 
-    /// Create a vector by memcpy'ing. This is much faster than calling
-    /// `create_vector`, but the underlying type must be represented as
-    /// little-endian on the host machine. This property is encoded in the
-    /// type system through the SafeSliceAccess trait. The following types are
-    /// always safe, on any platform: bool, u8, i8, and any
-    /// FlatBuffers-generated struct.
-    #[inline]
-    pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
-        &'a mut self,
-        items: &'b [T],
-    ) -> WIPOffset<Vector<'fbb, T>> {
-        self.assert_not_nested(
-            "create_vector_direct can not be called when a table or vector is under construction",
-        );
-        let elem_size = T::size();
-        self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
-
-        let bytes = {
-            let ptr = items.as_ptr() as *const T as *const u8;
-            unsafe { from_raw_parts(ptr, items.len() * elem_size) }
-        };
-        self.push_bytes_unprefixed(bytes);
-        self.push(items.len() as UOffsetT);
-
-        WIPOffset::new(self.used_space() as UOffsetT)
-    }
-
-    /// Create a vector of strings.
-    ///
-    /// Speed-sensitive users may wish to reduce memory usage by creating the
-    /// vector manually: use `start_vector`, `push`, and `end_vector`.
-    #[inline]
-    pub fn create_vector_of_strings<'a, 'b>(
-        &'a mut self,
-        xs: &'b [&'b str],
-    ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
-        self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
-        // internally, smallvec can be a stack-allocated or heap-allocated vector:
-        // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
-        let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
-            smallvec::SmallVec::with_capacity(xs.len());
-        unsafe {
-            offsets.set_len(xs.len());
-        }
-
-        // note that this happens in reverse, because the buffer is built back-to-front:
-        for (i, &s) in xs.iter().enumerate().rev() {
-            let o = self.create_string(s);
-            offsets[i] = o;
-        }
-        self.create_vector(&offsets[..])
-    }
-
     /// Create a vector of Push-able objects.
     ///
     /// Speed-sensitive users may wish to reduce memory usage by creating the
     /// vector manually: use `start_vector`, `push`, and `end_vector`.
     #[inline]
-    pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
+    pub fn create_vector<'a: 'b, 'b, T: Push + 'b>(
         &'a mut self,
         items: &'b [T],
     ) -> WIPOffset<Vector<'fbb, T::Output>> {
         let elem_size = T::size();
-        self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
-        for i in (0..items.len()).rev() {
-            self.push(items[i]);
+        let slice_size = items.len() * elem_size;
+        self.align(slice_size, T::alignment().max_of(SIZE_UOFFSET));
+        self.ensure_capacity(slice_size + UOffsetT::size());
+
+        self.head -= slice_size;
+        let mut written_len = self.owned_buf.len() - self.head;
+
+        let buf = &mut self.owned_buf[self.head..self.head + slice_size];
+        for (item, out) in items.iter().zip(buf.chunks_exact_mut(elem_size)) {
+            written_len -= elem_size;
+
+            // Safety:
+            // Called ensure_capacity and aligned to T above
+            unsafe { item.push(out, written_len) };
         }
+
         WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
     }
 
@@ -384,17 +343,18 @@
     /// Speed-sensitive users may wish to reduce memory usage by creating the
     /// vector manually: use `start_vector`, `push`, and `end_vector`.
     #[inline]
-    pub fn create_vector_from_iter<T: Push + Copy>(
+    pub fn create_vector_from_iter<T: Push>(
         &mut self,
         items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
     ) -> WIPOffset<Vector<'fbb, T::Output>> {
         let elem_size = T::size();
-        let len = items.len();
-        self.align(len * elem_size, T::alignment().max_of(SIZE_UOFFSET));
+        self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
+        let mut actual = 0;
         for item in items.rev() {
             self.push(item);
+            actual += 1;
         }
-        WIPOffset::new(self.push::<UOffsetT>(len as UOffsetT).value())
+        WIPOffset::new(self.push::<UOffsetT>(actual).value())
     }
 
     /// Set whether default values are stored.
@@ -443,7 +403,15 @@
         assert_msg_name: &'static str,
     ) {
         let idx = self.used_space() - tab_revloc.value() as usize;
-        let tab = Table::new(&self.owned_buf[self.head..], idx);
+
+        // Safety:
+        // The value of TableFinishedWIPOffset is the offset from the end of owned_buf
+        // to an SOffsetT pointing to a valid VTable
+        //
+        // `self.owned_buf.len() = self.used_space() + self.head`
+        // `self.owned_buf.len() - tab_revloc = self.used_space() - tab_revloc + self.head`
+        // `self.owned_buf.len() - tab_revloc = idx + self.head`
+        let tab = unsafe { Table::new(&self.owned_buf[self.head..], idx) };
         let o = tab.vtable().get(slot_byte_loc) as usize;
         assert!(o != 0, "missing required field {}", assert_msg_name);
     }
@@ -560,11 +528,15 @@
             }
         }
         let new_vt_bytes = &self.owned_buf[vt_start_pos..vt_end_pos];
-        let found = self.written_vtable_revpos.binary_search_by(|old_vtable_revpos: &UOffsetT| {
-            let old_vtable_pos = self.owned_buf.len() - *old_vtable_revpos as usize;
-            let old_vtable = VTable::init(&self.owned_buf, old_vtable_pos);
-            new_vt_bytes.cmp(old_vtable.as_bytes())
-        });
+        let found = self
+            .written_vtable_revpos
+            .binary_search_by(|old_vtable_revpos: &UOffsetT| {
+                let old_vtable_pos = self.owned_buf.len() - *old_vtable_revpos as usize;
+                // Safety:
+                // Already written vtables are valid by construction
+                let old_vtable = unsafe { VTable::init(&self.owned_buf, old_vtable_pos) };
+                new_vt_bytes.cmp(old_vtable.as_bytes())
+            });
         let final_vtable_revpos = match found {
             Ok(i) => {
                 // The new vtable is a duplicate so clear it.
@@ -581,12 +553,22 @@
         };
         // Write signed offset from table to its vtable.
         let table_pos = self.owned_buf.len() - object_revloc_to_vtable.value() as usize;
-        let tmp_soffset_to_vt = unsafe { read_scalar_at::<UOffsetT>(&self.owned_buf, table_pos) };
-        debug_assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
+        if cfg!(debug_assertions) {
+            // Safety:
+            // Verified slice length
+            let tmp_soffset_to_vt = unsafe {
+                read_scalar::<UOffsetT>(&self.owned_buf[table_pos..table_pos + SIZE_UOFFSET])
+            };
+            assert_eq!(tmp_soffset_to_vt, 0xF0F0_F0F0);
+        }
+
+        let buf = &mut self.owned_buf[table_pos..table_pos + SIZE_SOFFSET];
+        // Safety:
+        // Verified length of buf above
         unsafe {
             emplace_scalar::<SOffsetT>(
-                &mut self.owned_buf[table_pos..table_pos + SIZE_SOFFSET],
-                final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT
+                buf,
+                final_vtable_revpos as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
             );
         }
 
@@ -623,7 +605,9 @@
         }
         // finally, zero out the old end data.
         {
-            let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
+            let ptr = self.owned_buf[..middle].as_mut_ptr();
+            // Safety:
+            // ptr is byte aligned and of length middle
             unsafe {
                 write_bytes(ptr, 0, middle);
             }
diff --git a/rust/flatbuffers/src/endian_scalar.rs b/rust/flatbuffers/src/endian_scalar.rs
index 5f50cf1..48cb83e 100644
--- a/rust/flatbuffers/src/endian_scalar.rs
+++ b/rust/flatbuffers/src/endian_scalar.rs
@@ -17,6 +17,24 @@
 
 use core::mem::size_of;
 
+mod private {
+    /// Types that are trivially transmutable are those where any combination of bits
+    /// represents a valid value of that type
+    ///
+    /// For example integral types are TriviallyTransmutable as all bit patterns are valid,
+    /// however, `bool` is not trivially transmutable as only `0` and `1` are valid
+    pub trait TriviallyTransmutable {}
+
+    impl TriviallyTransmutable for i8 {}
+    impl TriviallyTransmutable for i16 {}
+    impl TriviallyTransmutable for i32 {}
+    impl TriviallyTransmutable for i64 {}
+    impl TriviallyTransmutable for u8 {}
+    impl TriviallyTransmutable for u16 {}
+    impl TriviallyTransmutable for u32 {}
+    impl TriviallyTransmutable for u64 {}
+}
+
 /// Trait for values that must be stored in little-endian byte order, but
 /// might be represented in memory as big-endian. Every type that implements
 /// EndianScalar is a valid FlatBuffers scalar value.
@@ -28,144 +46,118 @@
 /// "too much". For example, num-traits provides i128 support, but that is an
 /// invalid FlatBuffers type.
 pub trait EndianScalar: Sized + PartialEq + Copy + Clone {
-    fn to_little_endian(self) -> Self;
-    fn from_little_endian(self) -> Self;
-}
+    type Scalar: private::TriviallyTransmutable;
 
-/// Macro for implementing a no-op endian conversion. This is used for types
-/// that are one byte wide.
-macro_rules! impl_endian_scalar_noop {
-    ($ty:ident) => {
-        impl EndianScalar for $ty {
-            #[inline]
-            fn to_little_endian(self) -> Self {
-                self
-            }
-            #[inline]
-            fn from_little_endian(self) -> Self {
-                self
-            }
-        }
-    };
+    fn to_little_endian(self) -> Self::Scalar;
+
+    fn from_little_endian(v: Self::Scalar) -> Self;
 }
 
 /// Macro for implementing an endian conversion using the stdlib `to_le` and
 /// `from_le` functions. This is used for integer types. It is not used for
 /// floats, because the `to_le` and `from_le` are not implemented for them in
 /// the stdlib.
-macro_rules! impl_endian_scalar_stdlib_le_conversion {
+macro_rules! impl_endian_scalar {
     ($ty:ident) => {
         impl EndianScalar for $ty {
+            type Scalar = Self;
+
             #[inline]
-            fn to_little_endian(self) -> Self {
+            fn to_little_endian(self) -> Self::Scalar {
                 Self::to_le(self)
             }
             #[inline]
-            fn from_little_endian(self) -> Self {
-                Self::from_le(self)
+            fn from_little_endian(v: Self::Scalar) -> Self {
+                Self::from_le(v)
             }
         }
     };
 }
 
-impl_endian_scalar_noop!(bool);
-impl_endian_scalar_noop!(u8);
-impl_endian_scalar_noop!(i8);
+impl_endian_scalar!(u8);
+impl_endian_scalar!(i8);
+impl_endian_scalar!(u16);
+impl_endian_scalar!(u32);
+impl_endian_scalar!(u64);
+impl_endian_scalar!(i16);
+impl_endian_scalar!(i32);
+impl_endian_scalar!(i64);
 
-impl_endian_scalar_stdlib_le_conversion!(u16);
-impl_endian_scalar_stdlib_le_conversion!(u32);
-impl_endian_scalar_stdlib_le_conversion!(u64);
-impl_endian_scalar_stdlib_le_conversion!(i16);
-impl_endian_scalar_stdlib_le_conversion!(i32);
-impl_endian_scalar_stdlib_le_conversion!(i64);
+impl EndianScalar for bool {
+    type Scalar = u8;
+
+    fn to_little_endian(self) -> Self::Scalar {
+        self as u8
+    }
+
+    fn from_little_endian(v: Self::Scalar) -> Self {
+        v != 0
+    }
+}
 
 impl EndianScalar for f32 {
+    type Scalar = u32;
     /// Convert f32 from host endian-ness to little-endian.
     #[inline]
-    fn to_little_endian(self) -> Self {
-        #[cfg(target_endian = "little")]
-        {
-            self
-        }
-        #[cfg(not(target_endian = "little"))]
-        {
-            byte_swap_f32(self)
-        }
+    fn to_little_endian(self) -> u32 {
+        // Floats and Ints have the same endianness on all supported platforms.
+        // <https://doc.rust-lang.org/std/primitive.f32.html#method.from_bits>
+        self.to_bits().to_le()
     }
     /// Convert f32 from little-endian to host endian-ness.
     #[inline]
-    fn from_little_endian(self) -> Self {
-        #[cfg(target_endian = "little")]
-        {
-            self
-        }
-        #[cfg(not(target_endian = "little"))]
-        {
-            byte_swap_f32(self)
-        }
+    fn from_little_endian(v: u32) -> Self {
+        // Floats and Ints have the same endianness on all supported platforms.
+        // <https://doc.rust-lang.org/std/primitive.f32.html#method.from_bits>
+        f32::from_bits(u32::from_le(v))
     }
 }
 
 impl EndianScalar for f64 {
+    type Scalar = u64;
+
     /// Convert f64 from host endian-ness to little-endian.
     #[inline]
-    fn to_little_endian(self) -> Self {
-        #[cfg(target_endian = "little")]
-        {
-            self
-        }
-        #[cfg(not(target_endian = "little"))]
-        {
-            byte_swap_f64(self)
-        }
+    fn to_little_endian(self) -> u64 {
+        // Floats and Ints have the same endianness on all supported platforms.
+        // <https://doc.rust-lang.org/std/primitive.f64.html#method.from_bits>
+        self.to_bits().to_le()
     }
     /// Convert f64 from little-endian to host endian-ness.
     #[inline]
-    fn from_little_endian(self) -> Self {
-        #[cfg(target_endian = "little")]
-        {
-            self
-        }
-        #[cfg(not(target_endian = "little"))]
-        {
-            byte_swap_f64(self)
-        }
+    fn from_little_endian(v: u64) -> Self {
+        // Floats and Ints have the same endianness on all supported platforms.
+        // <https://doc.rust-lang.org/std/primitive.f64.html#method.from_bits>
+        f64::from_bits(u64::from_le(v))
     }
 }
 
-/// Swaps the bytes of an f32.
-#[allow(dead_code)]
-#[inline]
-pub fn byte_swap_f32(x: f32) -> f32 {
-    f32::from_bits(x.to_bits().swap_bytes())
-}
-
-/// Swaps the bytes of an f64.
-#[allow(dead_code)]
-#[inline]
-pub fn byte_swap_f64(x: f64) -> f64 {
-    f64::from_bits(x.to_bits().swap_bytes())
-}
-
 /// Place an EndianScalar into the provided mutable byte slice. Performs
 /// endian conversion, if necessary.
 /// # Safety
-/// Caller must ensure `s.len() > size_of::<T>()`
-/// and `x` does not overlap with `s`.
+/// Caller must ensure `s.len() >= size_of::<T>()`
 #[inline]
 pub unsafe fn emplace_scalar<T: EndianScalar>(s: &mut [u8], x: T) {
+    let size = size_of::<T::Scalar>();
+    debug_assert!(
+        s.len() >= size,
+        "insufficient capacity for emplace_scalar, needed {} got {}",
+        size,
+        s.len()
+    );
+
     let x_le = x.to_little_endian();
     core::ptr::copy_nonoverlapping(
-        &x_le as *const T as *const u8,
+        &x_le as *const T::Scalar as *const u8,
         s.as_mut_ptr() as *mut u8,
-        size_of::<T>(),
+        size,
     );
 }
 
 /// Read an EndianScalar from the provided byte slice at the specified location.
 /// Performs endian conversion, if necessary.
 /// # Safety
-/// Caller must ensure `s.len() > loc + size_of::<T>()`.
+/// Caller must ensure `s.len() >= loc + size_of::<T>()`.
 #[inline]
 pub unsafe fn read_scalar_at<T: EndianScalar>(s: &[u8], loc: usize) -> T {
     read_scalar(&s[loc..])
@@ -177,8 +169,16 @@
 /// Caller must ensure `s.len() > size_of::<T>()`.
 #[inline]
 pub unsafe fn read_scalar<T: EndianScalar>(s: &[u8]) -> T {
-    let mut mem = core::mem::MaybeUninit::<T>::uninit();
+    let size = size_of::<T::Scalar>();
+    debug_assert!(
+        s.len() >= size,
+        "insufficient capacity for emplace_scalar, needed {} got {}",
+        size,
+        s.len()
+    );
+
+    let mut mem = core::mem::MaybeUninit::<T::Scalar>::uninit();
     // Since [u8] has alignment 1, we copy it into T which may have higher alignment.
-    core::ptr::copy_nonoverlapping(s.as_ptr(), mem.as_mut_ptr() as *mut u8, size_of::<T>());
-    mem.assume_init().from_little_endian()
+    core::ptr::copy_nonoverlapping(s.as_ptr(), mem.as_mut_ptr() as *mut u8, size);
+    T::from_little_endian(mem.assume_init())
 }
diff --git a/rust/flatbuffers/src/follow.rs b/rust/flatbuffers/src/follow.rs
index d1d6483..7488487 100644
--- a/rust/flatbuffers/src/follow.rs
+++ b/rust/flatbuffers/src/follow.rs
@@ -29,7 +29,11 @@
 /// continue traversing the FlatBuffer.
 pub trait Follow<'buf> {
     type Inner;
-    fn follow(buf: &'buf [u8], loc: usize) -> Self::Inner;
+    /// # Safety
+    ///
+    /// `buf[loc..]` must contain a valid value of `Self` and anything it
+    /// transitively refers to by offset must also be valid
+    unsafe fn follow(buf: &'buf [u8], loc: usize) -> Self::Inner;
 }
 
 /// FollowStart wraps a Follow impl in a struct type. This can make certain
@@ -39,17 +43,21 @@
 impl<'a, T: Follow<'a> + 'a> FollowStart<T> {
     #[inline]
     pub fn new() -> Self {
-        Self { 0: PhantomData }
+        Self(PhantomData)
     }
+
+    /// # Safety
+    ///
+    /// `buf[loc..]` must contain a valid value of `T`
     #[inline]
-    pub fn self_follow(&'a self, buf: &'a [u8], loc: usize) -> T::Inner {
+    pub unsafe fn self_follow(&'a self, buf: &'a [u8], loc: usize) -> T::Inner {
         T::follow(buf, loc)
     }
 }
 impl<'a, T: Follow<'a>> Follow<'a> for FollowStart<T> {
     type Inner = T::Inner;
     #[inline]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         T::follow(buf, loc)
     }
 }
diff --git a/rust/flatbuffers/src/get_root.rs b/rust/flatbuffers/src/get_root.rs
index 3305efa..372d02f 100644
--- a/rust/flatbuffers/src/get_root.rs
+++ b/rust/flatbuffers/src/get_root.rs
@@ -45,6 +45,8 @@
 {
     let mut v = Verifier::new(opts, data);
     <ForwardsUOffset<T>>::run_verifier(&mut v, 0)?;
+    // Safety:
+    // Run verifier above
     Ok(unsafe { root_unchecked::<T>(data) })
 }
 
@@ -75,6 +77,8 @@
 {
     let mut v = Verifier::new(opts, data);
     <SkipSizePrefix<ForwardsUOffset<T>>>::run_verifier(&mut v, 0)?;
+    // Safety:
+    // Run verifier above
     Ok(unsafe { size_prefixed_root_unchecked::<T>(data) })
 }
 
diff --git a/rust/flatbuffers/src/lib.rs b/rust/flatbuffers/src/lib.rs
index bc114e6..deb8ff7 100644
--- a/rust/flatbuffers/src/lib.rs
+++ b/rust/flatbuffers/src/lib.rs
@@ -28,9 +28,10 @@
 //! At this time, to generate Rust code, you will need the latest `master` version of `flatc`, available from here: <https://github.com/google/flatbuffers>
 //! (On OSX, you can install FlatBuffers from `HEAD` with the Homebrew package manager.)
 
-#![cfg_attr(feature = "no_std", no_std)]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(all(nightly, not(feature = "std")), feature(error_in_core))]
 
-#[cfg(feature = "no_std")]
+#[cfg(not(feature = "std"))]
 extern crate alloc;
 
 mod array;
@@ -48,14 +49,12 @@
 
 pub use crate::array::{array_init, emplace_scalar_array, Array};
 pub use crate::builder::FlatBufferBuilder;
-pub use crate::endian_scalar::{
-    byte_swap_f32, byte_swap_f64, emplace_scalar, read_scalar, read_scalar_at, EndianScalar,
-};
+pub use crate::endian_scalar::{emplace_scalar, read_scalar, read_scalar_at, EndianScalar};
 pub use crate::follow::{Follow, FollowStart};
 pub use crate::primitives::*;
 pub use crate::push::Push;
 pub use crate::table::{buffer_has_identifier, Table};
-pub use crate::vector::{follow_cast_ref, SafeSliceAccess, Vector, VectorIter};
+pub use crate::vector::{follow_cast_ref, Vector, VectorIter};
 pub use crate::verifier::{
     ErrorTraceDetail, InvalidFlatbuffer, SimpleToVerifyInSlice, Verifiable, Verifier,
     VerifierOptions,
@@ -64,6 +63,4 @@
 pub use bitflags;
 pub use get_root::*;
 
-// TODO(rw): Unify `create_vector` and `create_vector_direct` by using
-//           `Into<Vector<...>>`.
 // TODO(rw): Split fill ops in builder into fill_small, fill_big like in C++.
diff --git a/rust/flatbuffers/src/primitives.rs b/rust/flatbuffers/src/primitives.rs
index 72764b2..ac35511 100644
--- a/rust/flatbuffers/src/primitives.rs
+++ b/rust/flatbuffers/src/primitives.rs
@@ -112,10 +112,7 @@
     /// Create a new WIPOffset.
     #[inline]
     pub fn new(o: UOffsetT) -> WIPOffset<T> {
-        WIPOffset {
-            0: o,
-            1: PhantomData,
-        }
+        WIPOffset(o, PhantomData)
     }
 
     /// Return a wrapped value that brings its meaning as a union WIPOffset
@@ -135,11 +132,9 @@
     type Output = ForwardsUOffset<T>;
 
     #[inline(always)]
-    fn push(&self, dst: &mut [u8], rest: &[u8]) {
-        let n = (SIZE_UOFFSET + rest.len() - self.value() as usize) as UOffsetT;
-        unsafe {
-            emplace_scalar::<UOffsetT>(dst, n);
-        }
+    unsafe fn push(&self, dst: &mut [u8], written_len: usize) {
+        let n = (SIZE_UOFFSET + written_len - self.value() as usize) as UOffsetT;
+        emplace_scalar::<UOffsetT>(dst, n);
     }
 }
 
@@ -147,8 +142,8 @@
     type Output = Self;
 
     #[inline(always)]
-    fn push(&self, dst: &mut [u8], rest: &[u8]) {
-        self.value().push(dst, rest);
+    unsafe fn push(&self, dst: &mut [u8], written_len: usize) {
+        self.value().push(dst, written_len);
     }
 }
 
@@ -179,9 +174,9 @@
 impl<'a, T: Follow<'a>> Follow<'a> for ForwardsUOffset<T> {
     type Inner = T::Inner;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         let slice = &buf[loc..loc + SIZE_UOFFSET];
-        let off = unsafe { read_scalar::<u32>(slice) as usize };
+        let off = read_scalar::<u32>(slice) as usize;
         T::follow(buf, loc + off)
     }
 }
@@ -200,9 +195,9 @@
 impl<'a, T: Follow<'a>> Follow<'a> for ForwardsVOffset<T> {
     type Inner = T::Inner;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         let slice = &buf[loc..loc + SIZE_VOFFSET];
-        let off = unsafe { read_scalar::<VOffsetT>(slice) as usize };
+        let off = read_scalar::<VOffsetT>(slice) as usize;
         T::follow(buf, loc + off)
     }
 }
@@ -211,8 +206,8 @@
     type Output = Self;
 
     #[inline]
-    fn push(&self, dst: &mut [u8], rest: &[u8]) {
-        self.value().push(dst, rest);
+    unsafe fn push(&self, dst: &mut [u8], written_len: usize) {
+        self.value().push(dst, written_len);
     }
 }
 
@@ -230,9 +225,9 @@
 impl<'a, T: Follow<'a>> Follow<'a> for BackwardsSOffset<T> {
     type Inner = T::Inner;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         let slice = &buf[loc..loc + SIZE_SOFFSET];
-        let off = unsafe { read_scalar::<SOffsetT>(slice) };
+        let off = read_scalar::<SOffsetT>(slice);
         T::follow(buf, (loc as SOffsetT - off) as usize)
     }
 }
@@ -241,8 +236,8 @@
     type Output = Self;
 
     #[inline]
-    fn push(&self, dst: &mut [u8], rest: &[u8]) {
-        self.value().push(dst, rest);
+    unsafe fn push(&self, dst: &mut [u8], written_len: usize) {
+        self.value().push(dst, written_len);
     }
 }
 
@@ -252,7 +247,7 @@
 impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipSizePrefix<T> {
     type Inner = T::Inner;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         T::follow(buf, loc + SIZE_SIZEPREFIX)
     }
 }
@@ -263,7 +258,7 @@
 impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipRootOffset<T> {
     type Inner = T::Inner;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         T::follow(buf, loc + SIZE_UOFFSET)
     }
 }
@@ -274,7 +269,7 @@
 impl<'a> Follow<'a> for FileIdentifier {
     type Inner = &'a [u8];
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         &buf[loc..loc + FILE_IDENTIFIER_LENGTH]
     }
 }
@@ -286,7 +281,7 @@
 impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipFileIdentifier<T> {
     type Inner = T::Inner;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         T::follow(buf, loc + FILE_IDENTIFIER_LENGTH)
     }
 }
@@ -294,8 +289,8 @@
 impl<'a> Follow<'a> for bool {
     type Inner = bool;
     #[inline(always)]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
-        unsafe { read_scalar_at::<u8>(buf, loc) != 0 }
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+        read_scalar_at::<u8>(buf, loc) != 0
     }
 }
 
@@ -309,8 +304,8 @@
         impl<'a> Follow<'a> for $ty {
             type Inner = $ty;
             #[inline(always)]
-            fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
-                unsafe { read_scalar_at::<$ty>(buf, loc) }
+            unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+                read_scalar_at::<$ty>(buf, loc)
             }
         }
     };
diff --git a/rust/flatbuffers/src/push.rs b/rust/flatbuffers/src/push.rs
index 8bb8fe9..20bacd8 100644
--- a/rust/flatbuffers/src/push.rs
+++ b/rust/flatbuffers/src/push.rs
@@ -24,7 +24,11 @@
 /// types.
 pub trait Push: Sized {
     type Output;
-    fn push(&self, dst: &mut [u8], _rest: &[u8]);
+
+    /// # Safety
+    ///
+    /// dst is aligned to [`Self::alignment`] and has length greater than or equal to [`Self::size`]
+    unsafe fn push(&self, dst: &mut [u8], written_len: usize);
     #[inline]
     fn size() -> usize {
         size_of::<Self::Output>()
@@ -35,13 +39,29 @@
     }
 }
 
+impl<'a, T: Push> Push for &'a T {
+    type Output = T::Output;
+
+    unsafe fn push(&self, dst: &mut [u8], written_len: usize) {
+        T::push(self, dst, written_len)
+    }
+
+    fn size() -> usize {
+        T::size()
+    }
+
+    fn alignment() -> PushAlignment {
+        T::alignment()
+    }
+}
+
 /// Ensure Push alignment calculations are typesafe (because this helps reduce
 /// implementation issues when using FlatBufferBuilder::align).
 pub struct PushAlignment(usize);
 impl PushAlignment {
     #[inline]
     pub fn new(x: usize) -> Self {
-        PushAlignment { 0: x }
+        PushAlignment(x)
     }
     #[inline]
     pub fn value(&self) -> usize {
@@ -60,10 +80,8 @@
             type Output = $ty;
 
             #[inline]
-            fn push(&self, dst: &mut [u8], _rest: &[u8]) {
-                unsafe {
-                    emplace_scalar::<$ty>(dst, *self);
-                }
+            unsafe fn push(&self, dst: &mut [u8], _written_len: usize) {
+                emplace_scalar::<$ty>(dst, *self);
             }
         }
     };
diff --git a/rust/flatbuffers/src/table.rs b/rust/flatbuffers/src/table.rs
index cfb8559..f5001f6 100644
--- a/rust/flatbuffers/src/table.rs
+++ b/rust/flatbuffers/src/table.rs
@@ -18,23 +18,36 @@
 use crate::primitives::*;
 use crate::vtable::VTable;
 
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub struct Table<'a> {
-    pub buf: &'a [u8],
-    pub loc: usize,
+    buf: &'a [u8],
+    loc: usize,
 }
 
 impl<'a> Table<'a> {
+    /// # Safety
+    ///
+    /// `buf` must contain a `soffset_t` at `loc`, which points to a valid vtable
     #[inline]
-    pub fn new(buf: &'a [u8], loc: usize) -> Self {
+    pub unsafe fn new(buf: &'a [u8], loc: usize) -> Self {
         Table { buf, loc }
     }
+
     #[inline]
     pub fn vtable(&self) -> VTable<'a> {
-        <BackwardsSOffset<VTable<'a>>>::follow(self.buf, self.loc)
+        // Safety:
+        // Table::new is created with a valid buf and location
+        unsafe { <BackwardsSOffset<VTable<'a>>>::follow(self.buf, self.loc) }
     }
+
+    /// Retrieves the value at the provided `slot_byte_loc` returning `default`
+    /// if no value present
+    ///
+    /// # Safety
+    ///
+    /// The value of the corresponding slot must have type T
     #[inline]
-    pub fn get<T: Follow<'a> + 'a>(
+    pub unsafe fn get<T: Follow<'a> + 'a>(
         &self,
         slot_byte_loc: VOffsetT,
         default: Option<T::Inner>,
@@ -50,19 +63,26 @@
 impl<'a> Follow<'a> for Table<'a> {
     type Inner = Table<'a>;
     #[inline]
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         Table { buf, loc }
     }
 }
 
+/// Returns true if data contains a prefix of `ident`
 #[inline]
 pub fn buffer_has_identifier(data: &[u8], ident: &str, size_prefixed: bool) -> bool {
     assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
 
     let got = if size_prefixed {
-        <SkipSizePrefix<SkipRootOffset<FileIdentifier>>>::follow(data, 0)
+        assert!(data.len() >= SIZE_SIZEPREFIX + SIZE_UOFFSET + FILE_IDENTIFIER_LENGTH);
+        // Safety:
+        // Verified data has sufficient bytes
+        unsafe { <SkipSizePrefix<SkipRootOffset<FileIdentifier>>>::follow(data, 0) }
     } else {
-        <SkipRootOffset<FileIdentifier>>::follow(data, 0)
+        assert!(data.len() >= SIZE_UOFFSET + FILE_IDENTIFIER_LENGTH);
+        // Safety:
+        // Verified data has sufficient bytes
+        unsafe { <SkipRootOffset<FileIdentifier>>::follow(data, 0) }
     };
 
     ident.as_bytes() == got
diff --git a/rust/flatbuffers/src/vector.rs b/rust/flatbuffers/src/vector.rs
index da04ef6..b486ff3 100644
--- a/rust/flatbuffers/src/vector.rs
+++ b/rust/flatbuffers/src/vector.rs
@@ -17,13 +17,10 @@
 use core::fmt::{Debug, Formatter, Result};
 use core::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator};
 use core::marker::PhantomData;
-use core::mem::size_of;
-use core::slice::from_raw_parts;
+use core::mem::{align_of, size_of};
 use core::str::from_utf8_unchecked;
 
 use crate::endian_scalar::read_scalar_at;
-#[cfg(target_endian = "little")]
-use crate::endian_scalar::EndianScalar;
 use crate::follow::Follow;
 use crate::primitives::*;
 
@@ -55,6 +52,7 @@
 // and Clone for `T: Copy` and `T: Clone` respectively. However `Vector<'a, T>`
 // can always be copied, no matter that `T` you have.
 impl<'a, T> Copy for Vector<'a, T> {}
+
 impl<'a, T> Clone for Vector<'a, T> {
     fn clone(&self) -> Self {
         *self
@@ -62,32 +60,46 @@
 }
 
 impl<'a, T: 'a> Vector<'a, T> {
+    /// # Safety
+    ///
+    /// `buf` contains a valid vector at `loc` consisting of
+    ///
+    /// - UOffsetT element count
+    /// - Consecutive list of `T` elements
     #[inline(always)]
-    pub fn new(buf: &'a [u8], loc: usize) -> Self {
-        Vector {
-            0: buf,
-            1: loc,
-            2: PhantomData,
-        }
+    pub unsafe fn new(buf: &'a [u8], loc: usize) -> Self {
+        Vector(buf, loc, PhantomData)
     }
 
     #[inline(always)]
     pub fn len(&self) -> usize {
+        // Safety:
+        // Valid vector at time of construction starting with UOffsetT element count
         unsafe { read_scalar_at::<UOffsetT>(self.0, self.1) as usize }
     }
+
     #[inline(always)]
     pub fn is_empty(&self) -> bool {
         self.len() == 0
     }
+
+    #[inline(always)]
+    pub fn bytes(&self) -> &'a [u8] {
+        let sz = size_of::<T>();
+        let len = self.len();
+        &self.0[self.1 + SIZE_UOFFSET..self.1 + SIZE_UOFFSET + sz * len]
+    }
 }
 
 impl<'a, T: Follow<'a> + 'a> Vector<'a, T> {
     #[inline(always)]
     pub fn get(&self, idx: usize) -> T::Inner {
-        assert!(idx < self.len() as usize);
+        assert!(idx < self.len());
         let sz = size_of::<T>();
         debug_assert!(sz > 0);
-        T::follow(self.0, self.1 as usize + SIZE_UOFFSET + sz * idx)
+        // Safety:
+        // Valid vector at time of construction, verified that idx < element count
+        unsafe { T::follow(self.0, self.1 as usize + SIZE_UOFFSET + sz * idx) }
     }
 
     #[inline(always)]
@@ -96,84 +108,40 @@
     }
 }
 
-pub trait SafeSliceAccess {}
-impl<'a, T: SafeSliceAccess + 'a> Vector<'a, T> {
-    pub fn safe_slice(self) -> &'a [T] {
-        let buf = self.0;
-        let loc = self.1;
-        let sz = size_of::<T>();
-        debug_assert!(sz > 0);
-        let len = unsafe { read_scalar_at::<UOffsetT>(buf, loc) } as usize;
-        let data_buf = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len * sz];
-        let ptr = data_buf.as_ptr() as *const T;
-        let s: &'a [T] = unsafe { from_raw_parts(ptr, len) };
-        s
-    }
-}
-
-impl SafeSliceAccess for u8 {}
-impl SafeSliceAccess for i8 {}
-impl SafeSliceAccess for bool {}
-
-// TODO(caspern): Get rid of this. Conditional compliation is unnecessary complexity.
-// Vectors of primitives just don't work on big endian machines!!!
-#[cfg(target_endian = "little")]
-mod le_safe_slice_impls {
-    impl super::SafeSliceAccess for u16 {}
-    impl super::SafeSliceAccess for u32 {}
-    impl super::SafeSliceAccess for u64 {}
-
-    impl super::SafeSliceAccess for i16 {}
-    impl super::SafeSliceAccess for i32 {}
-    impl super::SafeSliceAccess for i64 {}
-
-    impl super::SafeSliceAccess for f32 {}
-    impl super::SafeSliceAccess for f64 {}
-}
-
-#[cfg(target_endian = "little")]
-pub use self::le_safe_slice_impls::*;
-
-pub fn follow_cast_ref<'a, T: Sized + 'a>(buf: &'a [u8], loc: usize) -> &'a T {
+/// # Safety
+///
+/// `buf` must contain a value of T at `loc` and have alignment of 1
+pub unsafe fn follow_cast_ref<'a, T: Sized + 'a>(buf: &'a [u8], loc: usize) -> &'a T {
+    assert_eq!(align_of::<T>(), 1);
     let sz = size_of::<T>();
     let buf = &buf[loc..loc + sz];
     let ptr = buf.as_ptr() as *const T;
-    unsafe { &*ptr }
+    // SAFETY
+    // buf contains a value at loc of type T and T has no alignment requirements
+    &*ptr
 }
 
 impl<'a> Follow<'a> for &'a str {
     type Inner = &'a str;
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
-        let len = unsafe { read_scalar_at::<UOffsetT>(buf, loc) } as usize;
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+        let len = read_scalar_at::<UOffsetT>(buf, loc) as usize;
         let slice = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len];
-        unsafe { from_utf8_unchecked(slice) }
+        from_utf8_unchecked(slice)
     }
 }
 
-#[cfg(target_endian = "little")]
-fn follow_slice_helper<T>(buf: &[u8], loc: usize) -> &[T] {
-    let sz = size_of::<T>();
-    debug_assert!(sz > 0);
-    let len = unsafe { read_scalar_at::<UOffsetT>(buf, loc) as usize };
-    let data_buf = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len * sz];
-    let ptr = data_buf.as_ptr() as *const T;
-    let s: &[T] = unsafe { from_raw_parts(ptr, len) };
-    s
-}
-
-/// Implement direct slice access if the host is little-endian.
-#[cfg(target_endian = "little")]
-impl<'a, T: EndianScalar> Follow<'a> for &'a [T] {
-    type Inner = &'a [T];
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
-        follow_slice_helper::<T>(buf, loc)
+impl<'a> Follow<'a> for &'a [u8] {
+    type Inner = &'a [u8];
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+        let len = read_scalar_at::<UOffsetT>(buf, loc) as usize;
+        &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len]
     }
 }
 
 /// Implement Follow for all possible Vectors that have Follow-able elements.
 impl<'a, T: Follow<'a> + 'a> Follow<'a> for Vector<'a, T> {
     type Inner = Vector<'a, T>;
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         Vector::new(buf, loc)
     }
 }
@@ -201,8 +169,14 @@
         }
     }
 
+    /// Creates a new `VectorIter` from the provided slice
+    ///
+    /// # Safety
+    ///
+    /// buf must contain a contiguous sequence of `items_num` values of `T`
+    ///
     #[inline]
-    pub fn from_slice(buf: &'a [u8], items_num: usize) -> Self {
+    pub unsafe fn from_slice(buf: &'a [u8], items_num: usize) -> Self {
         VectorIter {
             buf,
             loc: 0,
@@ -235,7 +209,10 @@
         if self.remaining == 0 {
             None
         } else {
-            let result = T::follow(self.buf, self.loc);
+            // Safety:
+            // VectorIter can only be created from a contiguous sequence of `items_num`
+            // And remaining is initialized to `items_num`
+            let result = unsafe { T::follow(self.buf, self.loc) };
             self.loc += sz;
             self.remaining -= 1;
             Some(result)
@@ -272,7 +249,10 @@
             None
         } else {
             self.remaining -= 1;
-            Some(T::follow(self.buf, self.loc + sz * self.remaining))
+            // Safety:
+            // VectorIter can only be created from a contiguous sequence of `items_num`
+            // And remaining is initialized to `items_num`
+            Some(unsafe { T::follow(self.buf, self.loc + sz * self.remaining) })
         }
     }
 
@@ -309,7 +289,7 @@
     }
 }
 
-#[cfg(feature="serialize")]
+#[cfg(feature = "serialize")]
 impl<'a, T> serde::ser::Serialize for Vector<'a, T>
 where
     T: 'a + Follow<'a>,
diff --git a/rust/flatbuffers/src/verifier.rs b/rust/flatbuffers/src/verifier.rs
index 36a5775..047d4f6 100644
--- a/rust/flatbuffers/src/verifier.rs
+++ b/rust/flatbuffers/src/verifier.rs
@@ -1,14 +1,14 @@
-#[cfg(feature = "no_std")]
+use crate::follow::Follow;
+use crate::{ForwardsUOffset, SOffsetT, SkipSizePrefix, UOffsetT, VOffsetT, Vector, SIZE_UOFFSET};
+#[cfg(not(feature = "std"))]
 use alloc::vec::Vec;
 use core::ops::Range;
 use core::option::Option;
-use crate::follow::Follow;
-use crate::{ForwardsUOffset, SOffsetT, SkipSizePrefix, UOffsetT, VOffsetT, Vector, SIZE_UOFFSET};
 
-#[cfg(feature="no_std")]
-use thiserror_core2::Error;
-#[cfg(not(feature="no_std"))]
-use thiserror::Error;
+#[cfg(all(nightly, not(feature = "std")))]
+use core::error::Error;
+#[cfg(feature = "std")]
+use std::error::Error;
 
 /// Traces the location of data errors. Not populated for Dos detecting errors.
 /// Useful for MissingRequiredField and Utf8Error in particular, though
@@ -28,8 +28,10 @@
         position: usize,
     },
 }
+
 #[derive(PartialEq, Eq, Default, Debug, Clone)]
 pub struct ErrorTrace(Vec<ErrorTraceDetail>);
+
 impl core::convert::AsRef<[ErrorTraceDetail]> for ErrorTrace {
     #[inline]
     fn as_ref(&self) -> &[ErrorTraceDetail] {
@@ -39,64 +41,138 @@
 
 /// Describes how a flatuffer is invalid and, for data errors, roughly where. No extra tracing
 /// information is given for DoS detecting errors since it will probably be a lot.
-#[derive(Clone, Error, Debug, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum InvalidFlatbuffer {
-    #[error("Missing required field `{required}`.\n{error_trace}")]
     MissingRequiredField {
         required: &'static str,
         error_trace: ErrorTrace,
     },
-    #[error(
-        "Union exactly one of union discriminant (`{field_type}`) and value \
-             (`{field}`) are present.\n{error_trace}"
-    )]
     InconsistentUnion {
         field: &'static str,
         field_type: &'static str,
         error_trace: ErrorTrace,
     },
-    #[error("Utf8 error for string in {range:?}: {error}\n{error_trace}")]
     Utf8Error {
-        #[source]
         error: core::str::Utf8Error,
         range: Range<usize>,
         error_trace: ErrorTrace,
     },
-    #[error("String in range [{}, {}) is missing its null terminator.\n{error_trace}",
-            range.start, range.end)]
     MissingNullTerminator {
         range: Range<usize>,
         error_trace: ErrorTrace,
     },
-    #[error("Type `{unaligned_type}` at position {position} is unaligned.\n{error_trace}")]
     Unaligned {
         position: usize,
         unaligned_type: &'static str,
         error_trace: ErrorTrace,
     },
-    #[error("Range [{}, {}) is out of bounds.\n{error_trace}", range.start, range.end)]
     RangeOutOfBounds {
         range: Range<usize>,
         error_trace: ErrorTrace,
     },
-    #[error(
-        "Signed offset at position {position} has value {soffset} which points out of bounds.\
-             \n{error_trace}"
-    )]
     SignedOffsetOutOfBounds {
         soffset: SOffsetT,
         position: usize,
         error_trace: ErrorTrace,
     },
     // Dos detecting errors. These do not get error traces since it will probably be very large.
-    #[error("Too many tables.")]
     TooManyTables,
-    #[error("Apparent size too large.")]
     ApparentSizeTooLarge,
-    #[error("Nested table depth limit reached.")]
     DepthLimitReached,
 }
 
+#[cfg(any(nightly, feature = "std"))]
+impl Error for InvalidFlatbuffer {
+    fn source(&self) -> Option<&(dyn Error + 'static)> {
+        if let InvalidFlatbuffer::Utf8Error { error: source, .. } = self {
+            Some(source)
+        } else {
+            None
+        }
+    }
+}
+
+impl core::fmt::Display for InvalidFlatbuffer {
+    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+        match self {
+            InvalidFlatbuffer::MissingRequiredField {
+                required,
+                error_trace,
+            } => {
+                writeln!(f, "Missing required field `{}`.\n{}", required, error_trace)?;
+            }
+            InvalidFlatbuffer::InconsistentUnion {
+                field,
+                field_type,
+                error_trace,
+            } => {
+                writeln!(
+                    f,
+                    "Exactly one of union discriminant (`{}`) and value (`{}`) are present.\n{}",
+                    field_type, field, error_trace
+                )?;
+            }
+            InvalidFlatbuffer::Utf8Error {
+                error,
+                range,
+                error_trace,
+            } => {
+                writeln!(
+                    f,
+                    "Utf8 error for string in {:?}: {}\n{}",
+                    range, error, error_trace
+                )?;
+            }
+            InvalidFlatbuffer::MissingNullTerminator { range, error_trace } => {
+                writeln!(
+                    f,
+                    "String in range [{}, {}) is missing its null terminator.\n{}",
+                    range.start, range.end, error_trace
+                )?;
+            }
+            InvalidFlatbuffer::Unaligned {
+                position,
+                unaligned_type,
+                error_trace,
+            } => {
+                writeln!(
+                    f,
+                    "Type `{}` at position {} is unaligned.\n{}",
+                    unaligned_type, position, error_trace
+                )?;
+            }
+            InvalidFlatbuffer::RangeOutOfBounds { range, error_trace } => {
+                writeln!(
+                    f,
+                    "Range [{}, {}) is out of bounds.\n{}",
+                    range.start, range.end, error_trace
+                )?;
+            }
+            InvalidFlatbuffer::SignedOffsetOutOfBounds {
+                soffset,
+                position,
+                error_trace,
+            } => {
+                writeln!(
+                    f,
+                    "Signed offset at position {} has value {} which points out of bounds.\n{}",
+                    position, soffset, error_trace
+                )?;
+            }
+            InvalidFlatbuffer::TooManyTables {} => {
+                writeln!(f, "Too many tables.")?;
+            }
+            InvalidFlatbuffer::ApparentSizeTooLarge {} => {
+                writeln!(f, "Apparent size too large.")?;
+            }
+            InvalidFlatbuffer::DepthLimitReached {} => {
+                writeln!(f, "Nested table depth limit reached.")?;
+            }
+        }
+        Ok(())
+    }
+}
+
 impl core::fmt::Display for ErrorTrace {
     fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
         use ErrorTraceDetail::*;
@@ -184,6 +260,7 @@
         },
     )
 }
+
 /// Adds a TableField trace detail if `res` is a data error.
 fn trace_elem<T>(res: Result<T>, index: usize, position: usize) -> Result<T> {
     append_trace(res, ErrorTraceDetail::VectorElement { index, position })
@@ -205,6 +282,7 @@
     // options to error un-recognized enums and unions? possible footgun.
     // Ignore nested flatbuffers, etc?
 }
+
 impl Default for VerifierOptions {
     fn default() -> Self {
         Self {
@@ -226,6 +304,7 @@
     num_tables: usize,
     apparent_size: usize,
 }
+
 impl<'opts, 'buf> Verifier<'opts, 'buf> {
     pub fn new(opts: &'opts VerifierOptions, buffer: &'buf [u8]) -> Self {
         Self {
@@ -247,9 +326,12 @@
     /// memory since `buffer: &[u8]` has alignment 1.
     ///
     /// ### WARNING
+    ///
     /// This does not work for flatbuffers-structs as they have alignment 1 according to
     /// `core::mem::align_of` but are meant to have higher alignment within a Flatbuffer w.r.t.
     /// `buffer[0]`. TODO(caspern).
+    ///
+    /// Note this does not impact soundness as this crate does not assume alignment of structs
     #[inline]
     fn is_aligned<T>(&self, pos: usize) -> Result<()> {
         if pos % core::mem::align_of::<T>() == 0 {
@@ -307,9 +389,9 @@
 
         // signed offsets are subtracted.
         let derefed = if offset > 0 {
-            pos.checked_sub(offset.abs() as usize)
+            pos.checked_sub(offset.unsigned_abs() as usize)
         } else {
-            pos.checked_add(offset.abs() as usize)
+            pos.checked_add(offset.unsigned_abs() as usize)
         };
         if let Some(x) = derefed {
             if x < self.buffer.len() {
@@ -372,6 +454,7 @@
     // Verifier struct which holds the surrounding state and options.
     verifier: &'ver mut Verifier<'opts, 'buf>,
 }
+
 impl<'ver, 'opts, 'buf> TableVerifier<'ver, 'opts, 'buf> {
     fn deref(&mut self, field: VOffsetT) -> Result<Option<usize>> {
         let field = field as usize;
@@ -439,7 +522,9 @@
             }
             (Some(k), Some(v)) => {
                 trace_field(Key::run_verifier(self.verifier, k), key_field_name, k)?;
-                let discriminant = Key::follow(self.verifier.buffer, k);
+                // Safety:
+                // Run verifier on `k` above
+                let discriminant = unsafe { Key::follow(self.verifier.buffer, k) };
                 trace_field(
                     verify_union(discriminant, self.verifier, v),
                     val_field_name,
@@ -486,16 +571,27 @@
 }
 
 pub trait SimpleToVerifyInSlice {}
+
 impl SimpleToVerifyInSlice for bool {}
+
 impl SimpleToVerifyInSlice for i8 {}
+
 impl SimpleToVerifyInSlice for u8 {}
+
 impl SimpleToVerifyInSlice for i16 {}
+
 impl SimpleToVerifyInSlice for u16 {}
+
 impl SimpleToVerifyInSlice for i32 {}
+
 impl SimpleToVerifyInSlice for u32 {}
+
 impl SimpleToVerifyInSlice for f32 {}
+
 impl SimpleToVerifyInSlice for i64 {}
+
 impl SimpleToVerifyInSlice for u64 {}
+
 impl SimpleToVerifyInSlice for f64 {}
 
 impl<T: SimpleToVerifyInSlice> Verifiable for Vector<'_, T> {
diff --git a/rust/flatbuffers/src/vtable.rs b/rust/flatbuffers/src/vtable.rs
index bbb7190..1516153 100644
--- a/rust/flatbuffers/src/vtable.rs
+++ b/rust/flatbuffers/src/vtable.rs
@@ -33,24 +33,42 @@
 }
 
 impl<'a> VTable<'a> {
-    pub fn init(buf: &'a [u8], loc: usize) -> Self {
+    /// SAFETY
+    /// `buf` must contain a valid vtable at `loc`
+    ///
+    /// This consists of a number of `VOffsetT`
+    /// - size of vtable in bytes including size element
+    /// - size of object in bytes including the vtable offset
+    /// - n fields where n is the number of fields in the table's schema when the code was compiled
+    pub unsafe fn init(buf: &'a [u8], loc: usize) -> Self {
         VTable { buf, loc }
     }
+
     pub fn num_fields(&self) -> usize {
         (self.num_bytes() / SIZE_VOFFSET) - 2
     }
+
     pub fn num_bytes(&self) -> usize {
+        // Safety:
+        // Valid VTable at time of construction
         unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc) as usize }
     }
+
     pub fn object_inline_num_bytes(&self) -> usize {
+        // Safety:
+        // Valid VTable at time of construction
         let n = unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc + SIZE_VOFFSET) };
         n as usize
     }
+
     pub fn get_field(&self, idx: usize) -> VOffsetT {
         // TODO(rw): distinguish between None and 0?
         if idx > self.num_fields() {
             return 0;
         }
+
+        // Safety:
+        // Valid VTable at time of construction
         unsafe {
             read_scalar_at::<VOffsetT>(
                 self.buf,
@@ -58,13 +76,17 @@
             )
         }
     }
+
     pub fn get(&self, byte_loc: VOffsetT) -> VOffsetT {
         // TODO(rw): distinguish between None and 0?
-        if byte_loc as usize >= self.num_bytes() {
+        if byte_loc as usize + 2 > self.num_bytes() {
             return 0;
         }
+        // Safety:
+        // byte_loc is within bounds of vtable, which was valid at time of construction
         unsafe { read_scalar_at::<VOffsetT>(self.buf, self.loc + byte_loc as usize) }
     }
+
     pub fn as_bytes(&self) -> &[u8] {
         let len = self.num_bytes();
         &self.buf[self.loc..self.loc + len]
@@ -87,7 +109,7 @@
 
 impl<'a> Follow<'a> for VTable<'a> {
     type Inner = VTable<'a>;
-    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+    unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         VTable::init(buf, loc)
     }
 }
diff --git a/rust/flatbuffers/src/vtable_writer.rs b/rust/flatbuffers/src/vtable_writer.rs
index 9b61dac..4bcde9a 100644
--- a/rust/flatbuffers/src/vtable_writer.rs
+++ b/rust/flatbuffers/src/vtable_writer.rs
@@ -40,8 +40,11 @@
     /// to the provided value.
     #[inline(always)]
     pub fn write_vtable_byte_length(&mut self, n: VOffsetT) {
+        let buf = &mut self.buf[..SIZE_VOFFSET];
+        // Safety:
+        // Validated range above
         unsafe {
-            emplace_scalar::<VOffsetT>(&mut self.buf[..SIZE_VOFFSET], n);
+            emplace_scalar::<VOffsetT>(buf, n);
         }
         debug_assert_eq!(n as usize, self.buf.len());
     }
@@ -49,8 +52,11 @@
     /// Writes an object length (in bytes) into the vtable.
     #[inline(always)]
     pub fn write_object_inline_size(&mut self, n: VOffsetT) {
+        let buf = &mut self.buf[SIZE_VOFFSET..2 * SIZE_VOFFSET];
+        // Safety:
+        // Validated range above
         unsafe {
-            emplace_scalar::<VOffsetT>(&mut self.buf[SIZE_VOFFSET..2 * SIZE_VOFFSET], n);
+            emplace_scalar::<VOffsetT>(buf, n);
         }
     }
 
@@ -61,8 +67,11 @@
     #[inline(always)]
     pub fn write_field_offset(&mut self, vtable_offset: VOffsetT, object_data_offset: VOffsetT) {
         let idx = vtable_offset as usize;
+        let buf = &mut self.buf[idx..idx + SIZE_VOFFSET];
+        // Safety:
+        // Validated range above
         unsafe {
-            emplace_scalar::<VOffsetT>(&mut self.buf[idx..idx + SIZE_VOFFSET], object_data_offset);
+            emplace_scalar::<VOffsetT>(buf, object_data_offset);
         }
     }
 
@@ -73,6 +82,9 @@
         // This is the closest thing to memset in Rust right now.
         let len = self.buf.len();
         let p = self.buf.as_mut_ptr() as *mut u8;
+
+        // Safety:
+        // p is byte aligned and of length `len`
         unsafe {
             write_bytes(p, 0, len);
         }