Squashed 'third_party/flatbuffers/' content from commit acc9990ab

Change-Id: I48550d40d78fea996ebe74e9723a5d1f910de491
git-subtree-dir: third_party/flatbuffers
git-subtree-split: acc9990abd2206491480291b0f85f925110102ea
diff --git a/go/BUILD.bazel b/go/BUILD.bazel
new file mode 100644
index 0000000..78bd8d8
--- /dev/null
+++ b/go/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+alias(
+    name = "go_default_library",
+    actual = ":go",
+    visibility = ["//visibility:public"],
+)
+
+go_library(
+    name = "go",
+    srcs = [
+        "builder.go",
+        "doc.go",
+        "encode.go",
+        "grpc.go",
+        "lib.go",
+        "sizes.go",
+        "struct.go",
+        "table.go",
+    ],
+    importpath = "github.com/google/flatbuffers/go",
+    visibility = ["//visibility:public"],
+)
diff --git a/go/builder.go b/go/builder.go
new file mode 100644
index 0000000..8d75700
--- /dev/null
+++ b/go/builder.go
@@ -0,0 +1,771 @@
+package flatbuffers
+
+// Builder is a state machine for creating FlatBuffer objects.
+// Use a Builder to construct object(s) starting from leaf nodes.
+//
+// A Builder constructs byte buffers in a last-first manner for simplicity and
+// performance.
+type Builder struct {
+	// `Bytes` gives raw access to the buffer. Most users will want to use
+	// FinishedBytes() instead.
+	Bytes []byte
+
+	minalign  int
+	vtable    []UOffsetT
+	objectEnd UOffsetT
+	vtables   []UOffsetT
+	head      UOffsetT
+	nested    bool
+	finished  bool
+}
+
+const fileIdentifierLength = 4
+
+// NewBuilder initializes a Builder of size `initial_size`.
+// The internal buffer is grown as needed.
+func NewBuilder(initialSize int) *Builder {
+	if initialSize <= 0 {
+		initialSize = 0
+	}
+
+	b := &Builder{}
+	b.Bytes = make([]byte, initialSize)
+	b.head = UOffsetT(initialSize)
+	b.minalign = 1
+	b.vtables = make([]UOffsetT, 0, 16) // sensible default capacity
+
+	return b
+}
+
+// Reset truncates the underlying Builder buffer, facilitating alloc-free
+// reuse of a Builder. It also resets bookkeeping data.
+func (b *Builder) Reset() {
+	if b.Bytes != nil {
+		b.Bytes = b.Bytes[:cap(b.Bytes)]
+	}
+
+	if b.vtables != nil {
+		b.vtables = b.vtables[:0]
+	}
+
+	if b.vtable != nil {
+		b.vtable = b.vtable[:0]
+	}
+
+	b.head = UOffsetT(len(b.Bytes))
+	b.minalign = 1
+	b.nested = false
+	b.finished = false
+}
+
+// FinishedBytes returns a pointer to the written data in the byte buffer.
+// Panics if the builder is not in a finished state (which is caused by calling
+// `Finish()`).
+func (b *Builder) FinishedBytes() []byte {
+	b.assertFinished()
+	return b.Bytes[b.Head():]
+}
+
+// StartObject initializes bookkeeping for writing a new object.
+func (b *Builder) StartObject(numfields int) {
+	b.assertNotNested()
+	b.nested = true
+
+	// use 32-bit offsets so that arithmetic doesn't overflow.
+	if cap(b.vtable) < numfields || b.vtable == nil {
+		b.vtable = make([]UOffsetT, numfields)
+	} else {
+		b.vtable = b.vtable[:numfields]
+		for i := 0; i < len(b.vtable); i++ {
+			b.vtable[i] = 0
+		}
+	}
+
+	b.objectEnd = b.Offset()
+}
+
+// WriteVtable serializes the vtable for the current object, if applicable.
+//
+// Before writing out the vtable, this checks pre-existing vtables for equality
+// to this one. If an equal vtable is found, point the object to the existing
+// vtable and return.
+//
+// Because vtable values are sensitive to alignment of object data, not all
+// logically-equal vtables will be deduplicated.
+//
+// A vtable has the following format:
+//   <VOffsetT: size of the vtable in bytes, including this value>
+//   <VOffsetT: size of the object in bytes, including the vtable offset>
+//   <VOffsetT: offset for a field> * N, where N is the number of fields in
+//	        the schema for this type. Includes deprecated fields.
+// Thus, a vtable is made of 2 + N elements, each SizeVOffsetT bytes wide.
+//
+// An object has the following format:
+//   <SOffsetT: offset to this object's vtable (may be negative)>
+//   <byte: data>+
+func (b *Builder) WriteVtable() (n UOffsetT) {
+	// Prepend a zero scalar to the object. Later in this function we'll
+	// write an offset here that points to the object's vtable:
+	b.PrependSOffsetT(0)
+
+	objectOffset := b.Offset()
+	existingVtable := UOffsetT(0)
+
+	// Trim vtable of trailing zeroes.
+	i := len(b.vtable) - 1
+	for ; i >= 0 && b.vtable[i] == 0; i-- {
+	}
+	b.vtable = b.vtable[:i+1]
+
+	// Search backwards through existing vtables, because similar vtables
+	// are likely to have been recently appended. See
+	// BenchmarkVtableDeduplication for a case in which this heuristic
+	// saves about 30% of the time used in writing objects with duplicate
+	// tables.
+	for i := len(b.vtables) - 1; i >= 0; i-- {
+		// Find the other vtable, which is associated with `i`:
+		vt2Offset := b.vtables[i]
+		vt2Start := len(b.Bytes) - int(vt2Offset)
+		vt2Len := GetVOffsetT(b.Bytes[vt2Start:])
+
+		metadata := VtableMetadataFields * SizeVOffsetT
+		vt2End := vt2Start + int(vt2Len)
+		vt2 := b.Bytes[vt2Start+metadata : vt2End]
+
+		// Compare the other vtable to the one under consideration.
+		// If they are equal, store the offset and break:
+		if vtableEqual(b.vtable, objectOffset, vt2) {
+			existingVtable = vt2Offset
+			break
+		}
+	}
+
+	if existingVtable == 0 {
+		// Did not find a vtable, so write this one to the buffer.
+
+		// Write out the current vtable in reverse , because
+		// serialization occurs in last-first order:
+		for i := len(b.vtable) - 1; i >= 0; i-- {
+			var off UOffsetT
+			if b.vtable[i] != 0 {
+				// Forward reference to field;
+				// use 32bit number to assert no overflow:
+				off = objectOffset - b.vtable[i]
+			}
+
+			b.PrependVOffsetT(VOffsetT(off))
+		}
+
+		// The two metadata fields are written last.
+
+		// First, store the object bytesize:
+		objectSize := objectOffset - b.objectEnd
+		b.PrependVOffsetT(VOffsetT(objectSize))
+
+		// Second, store the vtable bytesize:
+		vBytes := (len(b.vtable) + VtableMetadataFields) * SizeVOffsetT
+		b.PrependVOffsetT(VOffsetT(vBytes))
+
+		// Next, write the offset to the new vtable in the
+		// already-allocated SOffsetT at the beginning of this object:
+		objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset)
+		WriteSOffsetT(b.Bytes[objectStart:],
+			SOffsetT(b.Offset())-SOffsetT(objectOffset))
+
+		// Finally, store this vtable in memory for future
+		// deduplication:
+		b.vtables = append(b.vtables, b.Offset())
+	} else {
+		// Found a duplicate vtable.
+
+		objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset)
+		b.head = UOffsetT(objectStart)
+
+		// Write the offset to the found vtable in the
+		// already-allocated SOffsetT at the beginning of this object:
+		WriteSOffsetT(b.Bytes[b.head:],
+			SOffsetT(existingVtable)-SOffsetT(objectOffset))
+	}
+
+	b.vtable = b.vtable[:0]
+	return objectOffset
+}
+
+// EndObject writes data necessary to finish object construction.
+func (b *Builder) EndObject() UOffsetT {
+	b.assertNested()
+	n := b.WriteVtable()
+	b.nested = false
+	return n
+}
+
+// Doubles the size of the byteslice, and copies the old data towards the
+// end of the new byteslice (since we build the buffer backwards).
+func (b *Builder) growByteBuffer() {
+	if (int64(len(b.Bytes)) & int64(0xC0000000)) != 0 {
+		panic("cannot grow buffer beyond 2 gigabytes")
+	}
+	newLen := len(b.Bytes) * 2
+	if newLen == 0 {
+		newLen = 1
+	}
+
+	if cap(b.Bytes) >= newLen {
+		b.Bytes = b.Bytes[:newLen]
+	} else {
+		extension := make([]byte, newLen-len(b.Bytes))
+		b.Bytes = append(b.Bytes, extension...)
+	}
+
+	middle := newLen / 2
+	copy(b.Bytes[middle:], b.Bytes[:middle])
+}
+
+// Head gives the start of useful data in the underlying byte buffer.
+// Note: unlike other functions, this value is interpreted as from the left.
+func (b *Builder) Head() UOffsetT {
+	return b.head
+}
+
+// Offset relative to the end of the buffer.
+func (b *Builder) Offset() UOffsetT {
+	return UOffsetT(len(b.Bytes)) - b.head
+}
+
+// Pad places zeros at the current offset.
+func (b *Builder) Pad(n int) {
+	for i := 0; i < n; i++ {
+		b.PlaceByte(0)
+	}
+}
+
+// Prep prepares to write an element of `size` after `additional_bytes`
+// have been written, e.g. if you write a string, you need to align such
+// the int length field is aligned to SizeInt32, and the string data follows it
+// directly.
+// If all you need to do is align, `additionalBytes` will be 0.
+func (b *Builder) Prep(size, additionalBytes int) {
+	// Track the biggest thing we've ever aligned to.
+	if size > b.minalign {
+		b.minalign = size
+	}
+	// Find the amount of alignment needed such that `size` is properly
+	// aligned after `additionalBytes`:
+	alignSize := (^(len(b.Bytes) - int(b.Head()) + additionalBytes)) + 1
+	alignSize &= (size - 1)
+
+	// Reallocate the buffer if needed:
+	for int(b.head) <= alignSize+size+additionalBytes {
+		oldBufSize := len(b.Bytes)
+		b.growByteBuffer()
+		b.head += UOffsetT(len(b.Bytes) - oldBufSize)
+	}
+	b.Pad(alignSize)
+}
+
+// PrependSOffsetT prepends an SOffsetT, relative to where it will be written.
+func (b *Builder) PrependSOffsetT(off SOffsetT) {
+	b.Prep(SizeSOffsetT, 0) // Ensure alignment is already done.
+	if !(UOffsetT(off) <= b.Offset()) {
+		panic("unreachable: off <= b.Offset()")
+	}
+	off2 := SOffsetT(b.Offset()) - off + SOffsetT(SizeSOffsetT)
+	b.PlaceSOffsetT(off2)
+}
+
+// PrependUOffsetT prepends an UOffsetT, relative to where it will be written.
+func (b *Builder) PrependUOffsetT(off UOffsetT) {
+	b.Prep(SizeUOffsetT, 0) // Ensure alignment is already done.
+	if !(off <= b.Offset()) {
+		panic("unreachable: off <= b.Offset()")
+	}
+	off2 := b.Offset() - off + UOffsetT(SizeUOffsetT)
+	b.PlaceUOffsetT(off2)
+}
+
+// StartVector initializes bookkeeping for writing a new vector.
+//
+// A vector has the following format:
+//   <UOffsetT: number of elements in this vector>
+//   <T: data>+, where T is the type of elements of this vector.
+func (b *Builder) StartVector(elemSize, numElems, alignment int) UOffsetT {
+	b.assertNotNested()
+	b.nested = true
+	b.Prep(SizeUint32, elemSize*numElems)
+	b.Prep(alignment, elemSize*numElems) // Just in case alignment > int.
+	return b.Offset()
+}
+
+// EndVector writes data necessary to finish vector construction.
+func (b *Builder) EndVector(vectorNumElems int) UOffsetT {
+	b.assertNested()
+
+	// we already made space for this, so write without PrependUint32
+	b.PlaceUOffsetT(UOffsetT(vectorNumElems))
+
+	b.nested = false
+	return b.Offset()
+}
+
+// CreateString writes a null-terminated string as a vector.
+func (b *Builder) CreateString(s string) UOffsetT {
+	b.assertNotNested()
+	b.nested = true
+
+	b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte)
+	b.PlaceByte(0)
+
+	l := UOffsetT(len(s))
+
+	b.head -= l
+	copy(b.Bytes[b.head:b.head+l], s)
+
+	return b.EndVector(len(s))
+}
+
+// CreateByteString writes a byte slice as a string (null-terminated).
+func (b *Builder) CreateByteString(s []byte) UOffsetT {
+	b.assertNotNested()
+	b.nested = true
+
+	b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte)
+	b.PlaceByte(0)
+
+	l := UOffsetT(len(s))
+
+	b.head -= l
+	copy(b.Bytes[b.head:b.head+l], s)
+
+	return b.EndVector(len(s))
+}
+
+// CreateByteVector writes a ubyte vector
+func (b *Builder) CreateByteVector(v []byte) UOffsetT {
+	b.assertNotNested()
+	b.nested = true
+
+	b.Prep(int(SizeUOffsetT), len(v)*SizeByte)
+
+	l := UOffsetT(len(v))
+
+	b.head -= l
+	copy(b.Bytes[b.head:b.head+l], v)
+
+	return b.EndVector(len(v))
+}
+
+func (b *Builder) assertNested() {
+	// If you get this assert, you're in an object while trying to write
+	// data that belongs outside of an object.
+	// To fix this, write non-inline data (like vectors) before creating
+	// objects.
+	if !b.nested {
+		panic("Incorrect creation order: must be inside object.")
+	}
+}
+
+func (b *Builder) assertNotNested() {
+	// If you hit this, you're trying to construct a Table/Vector/String
+	// during the construction of its parent table (between the MyTableBuilder
+	// and builder.Finish()).
+	// Move the creation of these sub-objects to above the MyTableBuilder to
+	// not get this assert.
+	// Ignoring this assert may appear to work in simple cases, but the reason
+	// it is here is that storing objects in-line may cause vtable offsets
+	// to not fit anymore. It also leads to vtable duplication.
+	if b.nested {
+		panic("Incorrect creation order: object must not be nested.")
+	}
+}
+
+func (b *Builder) assertFinished() {
+	// If you get this assert, you're attempting to get access a buffer
+	// which hasn't been finished yet. Be sure to call builder.Finish()
+	// with your root table.
+	// If you really need to access an unfinished buffer, use the Bytes
+	// buffer directly.
+	if !b.finished {
+		panic("Incorrect use of FinishedBytes(): must call 'Finish' first.")
+	}
+}
+
+// PrependBoolSlot prepends a bool onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependBoolSlot(o int, x, d bool) {
+	val := byte(0)
+	if x {
+		val = 1
+	}
+	def := byte(0)
+	if d {
+		def = 1
+	}
+	b.PrependByteSlot(o, val, def)
+}
+
+// PrependByteSlot prepends a byte onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependByteSlot(o int, x, d byte) {
+	if x != d {
+		b.PrependByte(x)
+		b.Slot(o)
+	}
+}
+
+// PrependUint8Slot prepends a uint8 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint8Slot(o int, x, d uint8) {
+	if x != d {
+		b.PrependUint8(x)
+		b.Slot(o)
+	}
+}
+
+// PrependUint16Slot prepends a uint16 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint16Slot(o int, x, d uint16) {
+	if x != d {
+		b.PrependUint16(x)
+		b.Slot(o)
+	}
+}
+
+// PrependUint32Slot prepends a uint32 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint32Slot(o int, x, d uint32) {
+	if x != d {
+		b.PrependUint32(x)
+		b.Slot(o)
+	}
+}
+
+// PrependUint64Slot prepends a uint64 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint64Slot(o int, x, d uint64) {
+	if x != d {
+		b.PrependUint64(x)
+		b.Slot(o)
+	}
+}
+
+// PrependInt8Slot prepends a int8 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt8Slot(o int, x, d int8) {
+	if x != d {
+		b.PrependInt8(x)
+		b.Slot(o)
+	}
+}
+
+// PrependInt16Slot prepends a int16 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt16Slot(o int, x, d int16) {
+	if x != d {
+		b.PrependInt16(x)
+		b.Slot(o)
+	}
+}
+
+// PrependInt32Slot prepends a int32 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt32Slot(o int, x, d int32) {
+	if x != d {
+		b.PrependInt32(x)
+		b.Slot(o)
+	}
+}
+
+// PrependInt64Slot prepends a int64 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt64Slot(o int, x, d int64) {
+	if x != d {
+		b.PrependInt64(x)
+		b.Slot(o)
+	}
+}
+
+// PrependFloat32Slot prepends a float32 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependFloat32Slot(o int, x, d float32) {
+	if x != d {
+		b.PrependFloat32(x)
+		b.Slot(o)
+	}
+}
+
+// PrependFloat64Slot prepends a float64 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependFloat64Slot(o int, x, d float64) {
+	if x != d {
+		b.PrependFloat64(x)
+		b.Slot(o)
+	}
+}
+
+// PrependUOffsetTSlot prepends an UOffsetT onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUOffsetTSlot(o int, x, d UOffsetT) {
+	if x != d {
+		b.PrependUOffsetT(x)
+		b.Slot(o)
+	}
+}
+
+// PrependStructSlot prepends a struct onto the object at vtable slot `o`.
+// Structs are stored inline, so nothing additional is being added.
+// In generated code, `d` is always 0.
+func (b *Builder) PrependStructSlot(voffset int, x, d UOffsetT) {
+	if x != d {
+		b.assertNested()
+		if x != b.Offset() {
+			panic("inline data write outside of object")
+		}
+		b.Slot(voffset)
+	}
+}
+
+// Slot sets the vtable key `voffset` to the current location in the buffer.
+func (b *Builder) Slot(slotnum int) {
+	b.vtable[slotnum] = UOffsetT(b.Offset())
+}
+
+// FinishWithFileIdentifier finalizes a buffer, pointing to the given `rootTable`.
+// as well as applys a file identifier
+func (b *Builder) FinishWithFileIdentifier(rootTable UOffsetT, fid []byte) {
+	if fid == nil || len(fid) != fileIdentifierLength {
+		panic("incorrect file identifier length")
+	}
+	// In order to add a file identifier to the flatbuffer message, we need
+	// to prepare an alignment and file identifier length
+	b.Prep(b.minalign, SizeInt32+fileIdentifierLength)
+	for i := fileIdentifierLength - 1; i >= 0; i-- {
+		// place the file identifier
+		b.PlaceByte(fid[i])
+	}
+	// finish
+	b.Finish(rootTable)
+}
+
+// Finish finalizes a buffer, pointing to the given `rootTable`.
+func (b *Builder) Finish(rootTable UOffsetT) {
+	b.assertNotNested()
+	b.Prep(b.minalign, SizeUOffsetT)
+	b.PrependUOffsetT(rootTable)
+	b.finished = true
+}
+
+// vtableEqual compares an unwritten vtable to a written vtable.
+func vtableEqual(a []UOffsetT, objectStart UOffsetT, b []byte) bool {
+	if len(a)*SizeVOffsetT != len(b) {
+		return false
+	}
+
+	for i := 0; i < len(a); i++ {
+		x := GetVOffsetT(b[i*SizeVOffsetT : (i+1)*SizeVOffsetT])
+
+		// Skip vtable entries that indicate a default value.
+		if x == 0 && a[i] == 0 {
+			continue
+		}
+
+		y := SOffsetT(objectStart) - SOffsetT(a[i])
+		if SOffsetT(x) != y {
+			return false
+		}
+	}
+	return true
+}
+
+// PrependBool prepends a bool to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependBool(x bool) {
+	b.Prep(SizeBool, 0)
+	b.PlaceBool(x)
+}
+
+// PrependUint8 prepends a uint8 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint8(x uint8) {
+	b.Prep(SizeUint8, 0)
+	b.PlaceUint8(x)
+}
+
+// PrependUint16 prepends a uint16 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint16(x uint16) {
+	b.Prep(SizeUint16, 0)
+	b.PlaceUint16(x)
+}
+
+// PrependUint32 prepends a uint32 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint32(x uint32) {
+	b.Prep(SizeUint32, 0)
+	b.PlaceUint32(x)
+}
+
+// PrependUint64 prepends a uint64 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint64(x uint64) {
+	b.Prep(SizeUint64, 0)
+	b.PlaceUint64(x)
+}
+
+// PrependInt8 prepends a int8 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt8(x int8) {
+	b.Prep(SizeInt8, 0)
+	b.PlaceInt8(x)
+}
+
+// PrependInt16 prepends a int16 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt16(x int16) {
+	b.Prep(SizeInt16, 0)
+	b.PlaceInt16(x)
+}
+
+// PrependInt32 prepends a int32 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt32(x int32) {
+	b.Prep(SizeInt32, 0)
+	b.PlaceInt32(x)
+}
+
+// PrependInt64 prepends a int64 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt64(x int64) {
+	b.Prep(SizeInt64, 0)
+	b.PlaceInt64(x)
+}
+
+// PrependFloat32 prepends a float32 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependFloat32(x float32) {
+	b.Prep(SizeFloat32, 0)
+	b.PlaceFloat32(x)
+}
+
+// PrependFloat64 prepends a float64 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependFloat64(x float64) {
+	b.Prep(SizeFloat64, 0)
+	b.PlaceFloat64(x)
+}
+
+// PrependByte prepends a byte to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependByte(x byte) {
+	b.Prep(SizeByte, 0)
+	b.PlaceByte(x)
+}
+
+// PrependVOffsetT prepends a VOffsetT to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependVOffsetT(x VOffsetT) {
+	b.Prep(SizeVOffsetT, 0)
+	b.PlaceVOffsetT(x)
+}
+
+// PlaceBool prepends a bool to the Builder, without checking for space.
+func (b *Builder) PlaceBool(x bool) {
+	b.head -= UOffsetT(SizeBool)
+	WriteBool(b.Bytes[b.head:], x)
+}
+
+// PlaceUint8 prepends a uint8 to the Builder, without checking for space.
+func (b *Builder) PlaceUint8(x uint8) {
+	b.head -= UOffsetT(SizeUint8)
+	WriteUint8(b.Bytes[b.head:], x)
+}
+
+// PlaceUint16 prepends a uint16 to the Builder, without checking for space.
+func (b *Builder) PlaceUint16(x uint16) {
+	b.head -= UOffsetT(SizeUint16)
+	WriteUint16(b.Bytes[b.head:], x)
+}
+
+// PlaceUint32 prepends a uint32 to the Builder, without checking for space.
+func (b *Builder) PlaceUint32(x uint32) {
+	b.head -= UOffsetT(SizeUint32)
+	WriteUint32(b.Bytes[b.head:], x)
+}
+
+// PlaceUint64 prepends a uint64 to the Builder, without checking for space.
+func (b *Builder) PlaceUint64(x uint64) {
+	b.head -= UOffsetT(SizeUint64)
+	WriteUint64(b.Bytes[b.head:], x)
+}
+
+// PlaceInt8 prepends a int8 to the Builder, without checking for space.
+func (b *Builder) PlaceInt8(x int8) {
+	b.head -= UOffsetT(SizeInt8)
+	WriteInt8(b.Bytes[b.head:], x)
+}
+
+// PlaceInt16 prepends a int16 to the Builder, without checking for space.
+func (b *Builder) PlaceInt16(x int16) {
+	b.head -= UOffsetT(SizeInt16)
+	WriteInt16(b.Bytes[b.head:], x)
+}
+
+// PlaceInt32 prepends a int32 to the Builder, without checking for space.
+func (b *Builder) PlaceInt32(x int32) {
+	b.head -= UOffsetT(SizeInt32)
+	WriteInt32(b.Bytes[b.head:], x)
+}
+
+// PlaceInt64 prepends a int64 to the Builder, without checking for space.
+func (b *Builder) PlaceInt64(x int64) {
+	b.head -= UOffsetT(SizeInt64)
+	WriteInt64(b.Bytes[b.head:], x)
+}
+
+// PlaceFloat32 prepends a float32 to the Builder, without checking for space.
+func (b *Builder) PlaceFloat32(x float32) {
+	b.head -= UOffsetT(SizeFloat32)
+	WriteFloat32(b.Bytes[b.head:], x)
+}
+
+// PlaceFloat64 prepends a float64 to the Builder, without checking for space.
+func (b *Builder) PlaceFloat64(x float64) {
+	b.head -= UOffsetT(SizeFloat64)
+	WriteFloat64(b.Bytes[b.head:], x)
+}
+
+// PlaceByte prepends a byte to the Builder, without checking for space.
+func (b *Builder) PlaceByte(x byte) {
+	b.head -= UOffsetT(SizeByte)
+	WriteByte(b.Bytes[b.head:], x)
+}
+
+// PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space.
+func (b *Builder) PlaceVOffsetT(x VOffsetT) {
+	b.head -= UOffsetT(SizeVOffsetT)
+	WriteVOffsetT(b.Bytes[b.head:], x)
+}
+
+// PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space.
+func (b *Builder) PlaceSOffsetT(x SOffsetT) {
+	b.head -= UOffsetT(SizeSOffsetT)
+	WriteSOffsetT(b.Bytes[b.head:], x)
+}
+
+// PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space.
+func (b *Builder) PlaceUOffsetT(x UOffsetT) {
+	b.head -= UOffsetT(SizeUOffsetT)
+	WriteUOffsetT(b.Bytes[b.head:], x)
+}
diff --git a/go/doc.go b/go/doc.go
new file mode 100644
index 0000000..694edc7
--- /dev/null
+++ b/go/doc.go
@@ -0,0 +1,3 @@
+// Package flatbuffers provides facilities to read and write flatbuffers
+// objects.
+package flatbuffers
diff --git a/go/encode.go b/go/encode.go
new file mode 100644
index 0000000..72d4f3a
--- /dev/null
+++ b/go/encode.go
@@ -0,0 +1,238 @@
+package flatbuffers
+
+import (
+	"math"
+)
+
+type (
+	// A SOffsetT stores a signed offset into arbitrary data.
+	SOffsetT int32
+	// A UOffsetT stores an unsigned offset into vector data.
+	UOffsetT uint32
+	// A VOffsetT stores an unsigned offset in a vtable.
+	VOffsetT uint16
+)
+
+const (
+	// VtableMetadataFields is the count of metadata fields in each vtable.
+	VtableMetadataFields = 2
+)
+
+// GetByte decodes a little-endian byte from a byte slice.
+func GetByte(buf []byte) byte {
+	return byte(GetUint8(buf))
+}
+
+// GetBool decodes a little-endian bool from a byte slice.
+func GetBool(buf []byte) bool {
+	return buf[0] == 1
+}
+
+// GetUint8 decodes a little-endian uint8 from a byte slice.
+func GetUint8(buf []byte) (n uint8) {
+	n = uint8(buf[0])
+	return
+}
+
+// GetUint16 decodes a little-endian uint16 from a byte slice.
+func GetUint16(buf []byte) (n uint16) {
+	_ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+	n |= uint16(buf[0])
+	n |= uint16(buf[1]) << 8
+	return
+}
+
+// GetUint32 decodes a little-endian uint32 from a byte slice.
+func GetUint32(buf []byte) (n uint32) {
+	_ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+	n |= uint32(buf[0])
+	n |= uint32(buf[1]) << 8
+	n |= uint32(buf[2]) << 16
+	n |= uint32(buf[3]) << 24
+	return
+}
+
+// GetUint64 decodes a little-endian uint64 from a byte slice.
+func GetUint64(buf []byte) (n uint64) {
+	_ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+	n |= uint64(buf[0])
+	n |= uint64(buf[1]) << 8
+	n |= uint64(buf[2]) << 16
+	n |= uint64(buf[3]) << 24
+	n |= uint64(buf[4]) << 32
+	n |= uint64(buf[5]) << 40
+	n |= uint64(buf[6]) << 48
+	n |= uint64(buf[7]) << 56
+	return
+}
+
+// GetInt8 decodes a little-endian int8 from a byte slice.
+func GetInt8(buf []byte) (n int8) {
+	n = int8(buf[0])
+	return
+}
+
+// GetInt16 decodes a little-endian int16 from a byte slice.
+func GetInt16(buf []byte) (n int16) {
+	_ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+	n |= int16(buf[0])
+	n |= int16(buf[1]) << 8
+	return
+}
+
+// GetInt32 decodes a little-endian int32 from a byte slice.
+func GetInt32(buf []byte) (n int32) {
+	_ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+	n |= int32(buf[0])
+	n |= int32(buf[1]) << 8
+	n |= int32(buf[2]) << 16
+	n |= int32(buf[3]) << 24
+	return
+}
+
+// GetInt64 decodes a little-endian int64 from a byte slice.
+func GetInt64(buf []byte) (n int64) {
+	_ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+	n |= int64(buf[0])
+	n |= int64(buf[1]) << 8
+	n |= int64(buf[2]) << 16
+	n |= int64(buf[3]) << 24
+	n |= int64(buf[4]) << 32
+	n |= int64(buf[5]) << 40
+	n |= int64(buf[6]) << 48
+	n |= int64(buf[7]) << 56
+	return
+}
+
+// GetFloat32 decodes a little-endian float32 from a byte slice.
+func GetFloat32(buf []byte) float32 {
+	x := GetUint32(buf)
+	return math.Float32frombits(x)
+}
+
+// GetFloat64 decodes a little-endian float64 from a byte slice.
+func GetFloat64(buf []byte) float64 {
+	x := GetUint64(buf)
+	return math.Float64frombits(x)
+}
+
+// GetUOffsetT decodes a little-endian UOffsetT from a byte slice.
+func GetUOffsetT(buf []byte) UOffsetT {
+	return UOffsetT(GetInt32(buf))
+}
+
+// GetSOffsetT decodes a little-endian SOffsetT from a byte slice.
+func GetSOffsetT(buf []byte) SOffsetT {
+	return SOffsetT(GetInt32(buf))
+}
+
+// GetVOffsetT decodes a little-endian VOffsetT from a byte slice.
+func GetVOffsetT(buf []byte) VOffsetT {
+	return VOffsetT(GetUint16(buf))
+}
+
+// WriteByte encodes a little-endian uint8 into a byte slice.
+func WriteByte(buf []byte, n byte) {
+	WriteUint8(buf, uint8(n))
+}
+
+// WriteBool encodes a little-endian bool into a byte slice.
+func WriteBool(buf []byte, b bool) {
+	buf[0] = 0
+	if b {
+		buf[0] = 1
+	}
+}
+
+// WriteUint8 encodes a little-endian uint8 into a byte slice.
+func WriteUint8(buf []byte, n uint8) {
+	buf[0] = byte(n)
+}
+
+// WriteUint16 encodes a little-endian uint16 into a byte slice.
+func WriteUint16(buf []byte, n uint16) {
+	_ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+	buf[0] = byte(n)
+	buf[1] = byte(n >> 8)
+}
+
+// WriteUint32 encodes a little-endian uint32 into a byte slice.
+func WriteUint32(buf []byte, n uint32) {
+	_ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+	buf[0] = byte(n)
+	buf[1] = byte(n >> 8)
+	buf[2] = byte(n >> 16)
+	buf[3] = byte(n >> 24)
+}
+
+// WriteUint64 encodes a little-endian uint64 into a byte slice.
+func WriteUint64(buf []byte, n uint64) {
+	_ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+	buf[0] = byte(n)
+	buf[1] = byte(n >> 8)
+	buf[2] = byte(n >> 16)
+	buf[3] = byte(n >> 24)
+	buf[4] = byte(n >> 32)
+	buf[5] = byte(n >> 40)
+	buf[6] = byte(n >> 48)
+	buf[7] = byte(n >> 56)
+}
+
+// WriteInt8 encodes a little-endian int8 into a byte slice.
+func WriteInt8(buf []byte, n int8) {
+	buf[0] = byte(n)
+}
+
+// WriteInt16 encodes a little-endian int16 into a byte slice.
+func WriteInt16(buf []byte, n int16) {
+	_ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+	buf[0] = byte(n)
+	buf[1] = byte(n >> 8)
+}
+
+// WriteInt32 encodes a little-endian int32 into a byte slice.
+func WriteInt32(buf []byte, n int32) {
+	_ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+	buf[0] = byte(n)
+	buf[1] = byte(n >> 8)
+	buf[2] = byte(n >> 16)
+	buf[3] = byte(n >> 24)
+}
+
+// WriteInt64 encodes a little-endian int64 into a byte slice.
+func WriteInt64(buf []byte, n int64) {
+	_ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+	buf[0] = byte(n)
+	buf[1] = byte(n >> 8)
+	buf[2] = byte(n >> 16)
+	buf[3] = byte(n >> 24)
+	buf[4] = byte(n >> 32)
+	buf[5] = byte(n >> 40)
+	buf[6] = byte(n >> 48)
+	buf[7] = byte(n >> 56)
+}
+
+// WriteFloat32 encodes a little-endian float32 into a byte slice.
+func WriteFloat32(buf []byte, n float32) {
+	WriteUint32(buf, math.Float32bits(n))
+}
+
+// WriteFloat64 encodes a little-endian float64 into a byte slice.
+func WriteFloat64(buf []byte, n float64) {
+	WriteUint64(buf, math.Float64bits(n))
+}
+
+// WriteVOffsetT encodes a little-endian VOffsetT into a byte slice.
+func WriteVOffsetT(buf []byte, n VOffsetT) {
+	WriteUint16(buf, uint16(n))
+}
+
+// WriteSOffsetT encodes a little-endian SOffsetT into a byte slice.
+func WriteSOffsetT(buf []byte, n SOffsetT) {
+	WriteInt32(buf, int32(n))
+}
+
+// WriteUOffsetT encodes a little-endian UOffsetT into a byte slice.
+func WriteUOffsetT(buf []byte, n UOffsetT) {
+	WriteUint32(buf, uint32(n))
+}
diff --git a/go/grpc.go b/go/grpc.go
new file mode 100644
index 0000000..e7dabd3
--- /dev/null
+++ b/go/grpc.go
@@ -0,0 +1,23 @@
+package flatbuffers
+
+// Codec implements gRPC-go Codec which is used to encode and decode messages.
+var Codec = "flatbuffers"
+
+type FlatbuffersCodec struct{}
+
+func (FlatbuffersCodec) Marshal(v interface{}) ([]byte, error) {
+	return v.(*Builder).FinishedBytes(), nil
+}
+
+func (FlatbuffersCodec) Unmarshal(data []byte, v interface{}) error {
+	v.(flatbuffersInit).Init(data, GetUOffsetT(data))
+	return nil
+}
+
+func (FlatbuffersCodec) String() string {
+	return Codec
+}
+
+type flatbuffersInit interface {
+	Init(data []byte, i UOffsetT)
+}
diff --git a/go/lib.go b/go/lib.go
new file mode 100644
index 0000000..adfce52
--- /dev/null
+++ b/go/lib.go
@@ -0,0 +1,13 @@
+package flatbuffers
+
+// FlatBuffer is the interface that represents a flatbuffer.
+type FlatBuffer interface {
+	Table() Table
+	Init(buf []byte, i UOffsetT)
+}
+
+// GetRootAs is a generic helper to initialize a FlatBuffer with the provided buffer bytes and its data offset.
+func GetRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) {
+	n := GetUOffsetT(buf[offset:])
+	fb.Init(buf, n+offset)
+}
diff --git a/go/sizes.go b/go/sizes.go
new file mode 100644
index 0000000..ba22169
--- /dev/null
+++ b/go/sizes.go
@@ -0,0 +1,55 @@
+package flatbuffers
+
+import (
+	"unsafe"
+)
+
+const (
+	// See http://golang.org/ref/spec#Numeric_types
+
+	// SizeUint8 is the byte size of a uint8.
+	SizeUint8 = 1
+	// SizeUint16 is the byte size of a uint16.
+	SizeUint16 = 2
+	// SizeUint32 is the byte size of a uint32.
+	SizeUint32 = 4
+	// SizeUint64 is the byte size of a uint64.
+	SizeUint64 = 8
+
+	// SizeInt8 is the byte size of a int8.
+	SizeInt8 = 1
+	// SizeInt16 is the byte size of a int16.
+	SizeInt16 = 2
+	// SizeInt32 is the byte size of a int32.
+	SizeInt32 = 4
+	// SizeInt64 is the byte size of a int64.
+	SizeInt64 = 8
+
+	// SizeFloat32 is the byte size of a float32.
+	SizeFloat32 = 4
+	// SizeFloat64 is the byte size of a float64.
+	SizeFloat64 = 8
+
+	// SizeByte is the byte size of a byte.
+	// The `byte` type is aliased (by Go definition) to uint8.
+	SizeByte = 1
+
+	// SizeBool is the byte size of a bool.
+	// The `bool` type is aliased (by flatbuffers convention) to uint8.
+	SizeBool = 1
+
+	// SizeSOffsetT is the byte size of an SOffsetT.
+	// The `SOffsetT` type is aliased (by flatbuffers convention) to int32.
+	SizeSOffsetT = 4
+	// SizeUOffsetT is the byte size of an UOffsetT.
+	// The `UOffsetT` type is aliased (by flatbuffers convention) to uint32.
+	SizeUOffsetT = 4
+	// SizeVOffsetT is the byte size of an VOffsetT.
+	// The `VOffsetT` type is aliased (by flatbuffers convention) to uint16.
+	SizeVOffsetT = 2
+)
+
+// byteSliceToString converts a []byte to string without a heap allocation.
+func byteSliceToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
diff --git a/go/struct.go b/go/struct.go
new file mode 100644
index 0000000..11258f7
--- /dev/null
+++ b/go/struct.go
@@ -0,0 +1,8 @@
+package flatbuffers
+
+// Struct wraps a byte slice and provides read access to its data.
+//
+// Structs do not have a vtable.
+type Struct struct {
+	Table
+}
diff --git a/go/table.go b/go/table.go
new file mode 100644
index 0000000..b273146
--- /dev/null
+++ b/go/table.go
@@ -0,0 +1,505 @@
+package flatbuffers
+
+// Table wraps a byte slice and provides read access to its data.
+//
+// The variable `Pos` indicates the root of the FlatBuffers object therein.
+type Table struct {
+	Bytes []byte
+	Pos   UOffsetT // Always < 1<<31.
+}
+
+// Offset provides access into the Table's vtable.
+//
+// Fields which are deprecated are ignored by checking against the vtable's length.
+func (t *Table) Offset(vtableOffset VOffsetT) VOffsetT {
+	vtable := UOffsetT(SOffsetT(t.Pos) - t.GetSOffsetT(t.Pos))
+	if vtableOffset < t.GetVOffsetT(vtable) {
+		return t.GetVOffsetT(vtable + UOffsetT(vtableOffset))
+	}
+	return 0
+}
+
+// Indirect retrieves the relative offset stored at `offset`.
+func (t *Table) Indirect(off UOffsetT) UOffsetT {
+	return off + GetUOffsetT(t.Bytes[off:])
+}
+
+// String gets a string from data stored inside the flatbuffer.
+func (t *Table) String(off UOffsetT) string {
+	b := t.ByteVector(off)
+	return byteSliceToString(b)
+}
+
+// ByteVector gets a byte slice from data stored inside the flatbuffer.
+func (t *Table) ByteVector(off UOffsetT) []byte {
+	off += GetUOffsetT(t.Bytes[off:])
+	start := off + UOffsetT(SizeUOffsetT)
+	length := GetUOffsetT(t.Bytes[off:])
+	return t.Bytes[start : start+length]
+}
+
+// VectorLen retrieves the length of the vector whose offset is stored at
+// "off" in this object.
+func (t *Table) VectorLen(off UOffsetT) int {
+	off += t.Pos
+	off += GetUOffsetT(t.Bytes[off:])
+	return int(GetUOffsetT(t.Bytes[off:]))
+}
+
+// Vector retrieves the start of data of the vector whose offset is stored
+// at "off" in this object.
+func (t *Table) Vector(off UOffsetT) UOffsetT {
+	off += t.Pos
+	x := off + GetUOffsetT(t.Bytes[off:])
+	// data starts after metadata containing the vector length
+	x += UOffsetT(SizeUOffsetT)
+	return x
+}
+
+// Union initializes any Table-derived type to point to the union at the given
+// offset.
+func (t *Table) Union(t2 *Table, off UOffsetT) {
+	off += t.Pos
+	t2.Pos = off + t.GetUOffsetT(off)
+	t2.Bytes = t.Bytes
+}
+
+// GetBool retrieves a bool at the given offset.
+func (t *Table) GetBool(off UOffsetT) bool {
+	return GetBool(t.Bytes[off:])
+}
+
+// GetByte retrieves a byte at the given offset.
+func (t *Table) GetByte(off UOffsetT) byte {
+	return GetByte(t.Bytes[off:])
+}
+
+// GetUint8 retrieves a uint8 at the given offset.
+func (t *Table) GetUint8(off UOffsetT) uint8 {
+	return GetUint8(t.Bytes[off:])
+}
+
+// GetUint16 retrieves a uint16 at the given offset.
+func (t *Table) GetUint16(off UOffsetT) uint16 {
+	return GetUint16(t.Bytes[off:])
+}
+
+// GetUint32 retrieves a uint32 at the given offset.
+func (t *Table) GetUint32(off UOffsetT) uint32 {
+	return GetUint32(t.Bytes[off:])
+}
+
+// GetUint64 retrieves a uint64 at the given offset.
+func (t *Table) GetUint64(off UOffsetT) uint64 {
+	return GetUint64(t.Bytes[off:])
+}
+
+// GetInt8 retrieves a int8 at the given offset.
+func (t *Table) GetInt8(off UOffsetT) int8 {
+	return GetInt8(t.Bytes[off:])
+}
+
+// GetInt16 retrieves a int16 at the given offset.
+func (t *Table) GetInt16(off UOffsetT) int16 {
+	return GetInt16(t.Bytes[off:])
+}
+
+// GetInt32 retrieves a int32 at the given offset.
+func (t *Table) GetInt32(off UOffsetT) int32 {
+	return GetInt32(t.Bytes[off:])
+}
+
+// GetInt64 retrieves a int64 at the given offset.
+func (t *Table) GetInt64(off UOffsetT) int64 {
+	return GetInt64(t.Bytes[off:])
+}
+
+// GetFloat32 retrieves a float32 at the given offset.
+func (t *Table) GetFloat32(off UOffsetT) float32 {
+	return GetFloat32(t.Bytes[off:])
+}
+
+// GetFloat64 retrieves a float64 at the given offset.
+func (t *Table) GetFloat64(off UOffsetT) float64 {
+	return GetFloat64(t.Bytes[off:])
+}
+
+// GetUOffsetT retrieves a UOffsetT at the given offset.
+func (t *Table) GetUOffsetT(off UOffsetT) UOffsetT {
+	return GetUOffsetT(t.Bytes[off:])
+}
+
+// GetVOffsetT retrieves a VOffsetT at the given offset.
+func (t *Table) GetVOffsetT(off UOffsetT) VOffsetT {
+	return GetVOffsetT(t.Bytes[off:])
+}
+
+// GetSOffsetT retrieves a SOffsetT at the given offset.
+func (t *Table) GetSOffsetT(off UOffsetT) SOffsetT {
+	return GetSOffsetT(t.Bytes[off:])
+}
+
+// GetBoolSlot retrieves the bool that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetBoolSlot(slot VOffsetT, d bool) bool {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetBool(t.Pos + UOffsetT(off))
+}
+
+// GetByteSlot retrieves the byte that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetByteSlot(slot VOffsetT, d byte) byte {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetByte(t.Pos + UOffsetT(off))
+}
+
+// GetInt8Slot retrieves the int8 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt8Slot(slot VOffsetT, d int8) int8 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetInt8(t.Pos + UOffsetT(off))
+}
+
+// GetUint8Slot retrieves the uint8 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint8Slot(slot VOffsetT, d uint8) uint8 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetUint8(t.Pos + UOffsetT(off))
+}
+
+// GetInt16Slot retrieves the int16 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt16Slot(slot VOffsetT, d int16) int16 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetInt16(t.Pos + UOffsetT(off))
+}
+
+// GetUint16Slot retrieves the uint16 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint16Slot(slot VOffsetT, d uint16) uint16 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetUint16(t.Pos + UOffsetT(off))
+}
+
+// GetInt32Slot retrieves the int32 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt32Slot(slot VOffsetT, d int32) int32 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetInt32(t.Pos + UOffsetT(off))
+}
+
+// GetUint32Slot retrieves the uint32 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint32Slot(slot VOffsetT, d uint32) uint32 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetUint32(t.Pos + UOffsetT(off))
+}
+
+// GetInt64Slot retrieves the int64 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt64Slot(slot VOffsetT, d int64) int64 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetInt64(t.Pos + UOffsetT(off))
+}
+
+// GetUint64Slot retrieves the uint64 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint64Slot(slot VOffsetT, d uint64) uint64 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetUint64(t.Pos + UOffsetT(off))
+}
+
+// GetFloat32Slot retrieves the float32 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetFloat32Slot(slot VOffsetT, d float32) float32 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetFloat32(t.Pos + UOffsetT(off))
+}
+
+// GetFloat64Slot retrieves the float64 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetFloat64Slot(slot VOffsetT, d float64) float64 {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+
+	return t.GetFloat64(t.Pos + UOffsetT(off))
+}
+
+// GetVOffsetTSlot retrieves the VOffsetT that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetVOffsetTSlot(slot VOffsetT, d VOffsetT) VOffsetT {
+	off := t.Offset(slot)
+	if off == 0 {
+		return d
+	}
+	return VOffsetT(off)
+}
+
+// MutateBool updates a bool at the given offset.
+func (t *Table) MutateBool(off UOffsetT, n bool) bool {
+	WriteBool(t.Bytes[off:], n)
+	return true
+}
+
+// MutateByte updates a Byte at the given offset.
+func (t *Table) MutateByte(off UOffsetT, n byte) bool {
+	WriteByte(t.Bytes[off:], n)
+	return true
+}
+
+// MutateUint8 updates a Uint8 at the given offset.
+func (t *Table) MutateUint8(off UOffsetT, n uint8) bool {
+	WriteUint8(t.Bytes[off:], n)
+	return true
+}
+
+// MutateUint16 updates a Uint16 at the given offset.
+func (t *Table) MutateUint16(off UOffsetT, n uint16) bool {
+	WriteUint16(t.Bytes[off:], n)
+	return true
+}
+
+// MutateUint32 updates a Uint32 at the given offset.
+func (t *Table) MutateUint32(off UOffsetT, n uint32) bool {
+	WriteUint32(t.Bytes[off:], n)
+	return true
+}
+
+// MutateUint64 updates a Uint64 at the given offset.
+func (t *Table) MutateUint64(off UOffsetT, n uint64) bool {
+	WriteUint64(t.Bytes[off:], n)
+	return true
+}
+
+// MutateInt8 updates a Int8 at the given offset.
+func (t *Table) MutateInt8(off UOffsetT, n int8) bool {
+	WriteInt8(t.Bytes[off:], n)
+	return true
+}
+
+// MutateInt16 updates a Int16 at the given offset.
+func (t *Table) MutateInt16(off UOffsetT, n int16) bool {
+	WriteInt16(t.Bytes[off:], n)
+	return true
+}
+
+// MutateInt32 updates a Int32 at the given offset.
+func (t *Table) MutateInt32(off UOffsetT, n int32) bool {
+	WriteInt32(t.Bytes[off:], n)
+	return true
+}
+
+// MutateInt64 updates a Int64 at the given offset.
+func (t *Table) MutateInt64(off UOffsetT, n int64) bool {
+	WriteInt64(t.Bytes[off:], n)
+	return true
+}
+
+// MutateFloat32 updates a Float32 at the given offset.
+func (t *Table) MutateFloat32(off UOffsetT, n float32) bool {
+	WriteFloat32(t.Bytes[off:], n)
+	return true
+}
+
+// MutateFloat64 updates a Float64 at the given offset.
+func (t *Table) MutateFloat64(off UOffsetT, n float64) bool {
+	WriteFloat64(t.Bytes[off:], n)
+	return true
+}
+
+// MutateUOffsetT updates a UOffsetT at the given offset.
+func (t *Table) MutateUOffsetT(off UOffsetT, n UOffsetT) bool {
+	WriteUOffsetT(t.Bytes[off:], n)
+	return true
+}
+
+// MutateVOffsetT updates a VOffsetT at the given offset.
+func (t *Table) MutateVOffsetT(off UOffsetT, n VOffsetT) bool {
+	WriteVOffsetT(t.Bytes[off:], n)
+	return true
+}
+
+// MutateSOffsetT updates a SOffsetT at the given offset.
+func (t *Table) MutateSOffsetT(off UOffsetT, n SOffsetT) bool {
+	WriteSOffsetT(t.Bytes[off:], n)
+	return true
+}
+
+// MutateBoolSlot updates the bool at given vtable location
+func (t *Table) MutateBoolSlot(slot VOffsetT, n bool) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateBool(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateByteSlot updates the byte at given vtable location
+func (t *Table) MutateByteSlot(slot VOffsetT, n byte) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateByte(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateInt8Slot updates the int8 at given vtable location
+func (t *Table) MutateInt8Slot(slot VOffsetT, n int8) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateInt8(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateUint8Slot updates the uint8 at given vtable location
+func (t *Table) MutateUint8Slot(slot VOffsetT, n uint8) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateUint8(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateInt16Slot updates the int16 at given vtable location
+func (t *Table) MutateInt16Slot(slot VOffsetT, n int16) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateInt16(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateUint16Slot updates the uint16 at given vtable location
+func (t *Table) MutateUint16Slot(slot VOffsetT, n uint16) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateUint16(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateInt32Slot updates the int32 at given vtable location
+func (t *Table) MutateInt32Slot(slot VOffsetT, n int32) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateInt32(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateUint32Slot updates the uint32 at given vtable location
+func (t *Table) MutateUint32Slot(slot VOffsetT, n uint32) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateUint32(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateInt64Slot updates the int64 at given vtable location
+func (t *Table) MutateInt64Slot(slot VOffsetT, n int64) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateInt64(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateUint64Slot updates the uint64 at given vtable location
+func (t *Table) MutateUint64Slot(slot VOffsetT, n uint64) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateUint64(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateFloat32Slot updates the float32 at given vtable location
+func (t *Table) MutateFloat32Slot(slot VOffsetT, n float32) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateFloat32(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}
+
+// MutateFloat64Slot updates the float64 at given vtable location
+func (t *Table) MutateFloat64Slot(slot VOffsetT, n float64) bool {
+	if off := t.Offset(slot); off != 0 {
+		t.MutateFloat64(t.Pos+UOffsetT(off), n)
+		return true
+	}
+
+	return false
+}