Squashed 'third_party/flatbuffers/' changes from acc9990ab..d6a8dbd26

d6a8dbd26 Experimental fix for failing oss-fuzz coverage build (#6259)
ed391e177 BREAKING: Rust flexbuffers serde human readable set to false (#6257)
a49531414 Update to flags in fuzzing-cmake file (#6256)
de1f0342c Remove _POSIX_C_SOURCE and _XOPEN_SOURCE definitions when compiling o… (#6205)
d0d51e2a5 flatc should support --binary --schema with optional scalar fields. (#6252)
33ab26017 Bump version of rules_go to 0.24.5 (#6234)
febb9d87c Union As Accessors for C# (#6251)
8778dc7c2 Resets buffer without deallocating current pointer (#6247)
aae376e9a Add GetBufferSpan() function to bufferbuilder (#6235)
0ff047148 Modernize android build and sample (#6229)
46a8c7e95 Added required-nested-flatbuffer to monster_test and fixed rust (#6236)
bc56c553e Notify based on Labelling issues and PR (#6241)
07d7cd78a Converted globs to use single quotes (#6240)
cdef70e24 More adjustments to the auto labeler (#6239)
9dd44df35 Updated Lua labeller glob (#6238)
c9b29d088 Support size-prefixed buffers and add tests for size-prefixed messages (#6232)
fba93e0ab Removes duplicate swift in labeler (#6228)
d1a545b1f Added more labels for auto labeler (#6227)
ea92a668d [C#] Optional Scalars (#6217)
6034de286 [Label Bot] Add Java and Kotlin support for the label bot (#6226)
b08b0a440 Implement `Debug` trait for Rust flatbuffers. (#6207)
17ae48dec [Label Bot] Adds some languages to labeler bot (#6222)
fc8097925 Auto Labeler Setup, Just C# for now (#6221)
55658f523 Auto Labeler Setup, Just C# for now (#6218)
14ecfe423 Updated comments and fixed a fundamental type error. (#6214)
a0182cdb1 optional scalars for ts/js (#6215)
0dfcc0a37 Adds NetTest.bat to run .NET Core tests on Windows (#6216)
f9a18ea63 [Java] Implement optional scalars (#6212)
c7586e85a Empties the sharedString map on reset on go and csharp (#6187)
914c64601 Removed C# references from java generator. Move annotations closer to definitions (#6204)
42d7c7997 Adds readable size to asserts in read functions (#6210)
e68e8d7de Refactor idl_gen_rust (#6206)
84809be7e Fix typo in flatbuffers::span declaration. (#6202)
1606fb637 Kotlin test optional enum (#6201)
fe8e3c7e5 Mass Refactoring to use `IsString` and other BASE_TYPE helpers (#6193)
8f6fa4b71 Updated SupportsAdvancedUnionFeatures to look out for string (#6190)
b46db38f5 [JS/TS] Rewrite flexbuffers JS to TS (#6148)
9fa1d2705 Rework enums in rust. (#6098)
a402b3aba idl_gen_json_schema Fix generation of arrays of enums (#6184)
0e1415b99 fix(go_test): remove deprecated grpc call (#6183)
5cd713710 Add generation of JSON Schema to library (#6165)
5be777e1d Bump junit from 4.12 to 4.13.1 in /grpc/tests (#6173)
a49d440ec Bump junit from 4.12 to 4.13.1 in /grpc (#6172)
4ec5e8db9 [C++] Add option to not generate direct copy methods. (#6166)
04bec23a3 Add Array initialization from struct constructor (#5865) (#6147)
77d57fd07 Cast to right type for reserved_ subtraction (#6167)
543c1bbeb Fixed rust nested flatbuffers for tables other than self (#6062)
cb971eece [C++] Fix -Wnarrowing and -Woverflow due to signed bitfields on G++ ARM (#6163)
7b9e61fcc [TS] GRPC  Implementation (#6141)
3359e3042 Moved C++ to optional_scalars2 and added some tests. (#6162)
187a4787f [Rust] Upgrade flatbuffers library to 2018 edition (#6159)
08943aa26 Flatbuffer C++ UnpackTo optimization for vectors of non-bool bytes. (#6154)
5975658eb Enables optional enums in swift (#6160)
5d3cf440e Updated Lobster test for optional bools/enums
8ec8322f0 Ruopt enum (#6156)
bbcc85fd4 Fix generation of C++ code with Optional<Enum> (#6155)
0bdf2fa15 [C#] Fix and improve project files (#6142)
2eedc769d possibility to create a vector from an iterator (#6135)
ab01ae162 flatc should output a warning, when an attribute is attached more than once (#6146)
689bfafa7 [Python/JS/TS] Codegen SizeOf method for structs (#6136)
641309a5b unix2dos on tests/FlatBuffers.Test/FlatBuffers.Core.Test.csproj (#6133)
52e362879 SpanT is available in .Net Standard 2.0. (#6137)
dca12522a Add static cast to avoid implicit double promotion. (#6132)
e0bbaa6f9 [C#]Change to ENABLE_SPAN_T that doesn't require UNSAFE_BYTEBUFFER. (#6073)
ab139d6be Revert "[C#] Fix and improve project files (#6116)" (#6130)
34d67b425 Minireflect fixed array (#6129)
96d5e3597 [JS/TS] fix flatbuffers default export (#6123)
eb686a86f Add missed file generated by optional_scalar.fbs (#6125)
750281630 [C#] Fix and improve project files (#6116)
fb4e1c34f Add CharToLower and CharToUpper into util.s (#6126)
8c67b5b12 Add support of Optional<T> scalars to C++ code generator (#6092)
6228b66d3 [Kotlin] Support for optional scalars. (#6115)
e1be8aaad Bump version for latest swift version (#6121)
94873e595 [JS/TS] Modernize TypeScript / JavaScript flatbuffers support (#6095)
b8e87fafe [JS] Add getFullyQualifiedName() (#6119)
f96d1ef74 [Java] allowing larger buffer sizes when growing a byte buffer (#6118)
89435303b [Swift] Migrates struct write APIS to write directly to the buffer (#6093)
c75ae2429 Optional-ness in reflection (#6097)
338944d3d Rename Nullable scalars to Optional scalars (#6112)
f5ab24bc4 Avoid memcpy call for empty vectors (#6111)
92a8c1a0f [JS] FlexBuffers Fix for wrong type of offset and length values (#6107)
6cea45dcd fix c# json serializer commandline argument docs (#6104)
fec58aa12 Fix for issue 6100: incorrect shrinking logic in ResizeContext() (#6102)
71aca81ff [JS] FlexBuffers Support (#5973)
04d87ffec [C++] Small refactoring of the C++ code generator (#6091)
bb25956f0 Wrap verify file id condition in Check call (#6085)
49f4948f0 + Add `removable-media` plug to the snapcraft config (#6083)
eeacc53d2 Adds proper access types for swift object api & flatbuffers & grpc (#6081)
f3003e08d [Lobster] missed a test
d713a0084 [CMake] enabled multi-core builds in VS projects
77f966f89 [Lobster] optional scalars support
e86d5b8e9 [Kotlin] Attach JvmStatic annotation to each method in companion object (#6052)
db2aa9b4e [C#] Cleaned up .NET testing script for Mono (#6016)
63cc0eec4 Adds a serialize helper function to native table (#6059)
c30a87de6 [TS] Fix four bugs with imported types in TypeScript. (#6054)
a0fb30575 [Swift] Append namespace for Swift Grpc implementation (#6049)
77c18c1d6 export a __version__ variable for python module (#5309)
f1f23d08e adding fb import when no other imports are present (#6030)
f1025b284 [Feature] Checks for Nullable strings (#6050)
5d052f4e5 [Swift] RFC: Switch Swift namespace from public enum to ordinary concat with _ (#6045)
18b015d25 Rust codegen improvements and lint fixes (#6046)
d76e93f27 adds code gen for optional scalars in swift (#6038)
82fac326c [C++] Fix compiler error from deleted assignment operator (#6036) (#6047)
043b52bd4 Optional Scalars support for Rust (#6034)
c8fa0afdf Allow to run cpp tests under grpc/tests/ using bazel. (#6040)
6d0aae73c Fix git command executed in wrong folder when doing cmake superbuild (#6039)
ff1b73128 [Swift] Optional Scalars Preparation (#6028)
2e48c8dd3 tests: Check for both quiet and signaling NaN on mipsel/hppa (#6029)
6942704f2 support deprecated flag in json schema (#6022)
9ecd2e16c Flatc parser support for nullable scalars (#6026)
33e2d8079 [Dart] Generate constant values map for enums (#6025)
969d0f7a6 Using proper symbol name for reexport (#6021)
515a4052a Silence false positive "-Wstringop-overflow" on GCC 10.0 to 11.0 (#6020)
36fbe6f13 Updated FB import (#6019)
b69fc8cc9 [Java] Add support for shared strings on FlatBufferBuilder. (#6012)
ab6af18d9 Not using non-existent create method for obj api (#6015)
37a5dee10 Code cleanup + updates test and readme (#6004)
8a721f69a Serde with bytes maps to Blob (#6009)
e810635ea [Swift] FlatBuffers createMonster method doesn't treat struct properly (#5992)
4995e1527 Manage grpc dependency in Bazel's WORKSPACE file. (#5995)
60b6066fe Add warning to schema parser if field name is not snake_case. (#6005)
35d45cac7 [Rust] Flexbuffers dependency cleanup and fixes (#5998)
165a6e3d1 Re-added Evolution Schema Code Generation Command (#5999)
13d3fb2ea Fix RPM install conflict (#6003)
d64078eb2 [Swift] Initialize memory when clear ByteBuffer (#5982)
ca1190a3d [TS] Use proper TypedArray in create*Vector (#5991)
7571b2ac5 [C++] Updates real_path to be truly portable (#5787)
e5a8f76a4 [C++] Generate default member initializers for >= C++11 (#5989)
413bb9b55 [Kotlin] Fix Access to union of vector element (#5994)
f35184aef [Swift] Add parsing from unowned UnsafeMutableRawPointer for ByteBuffer (#5981)
b124b7625 Removed requirement that enums be declared in ascending order. (#5887)
0ec7600c6 Do not remove the last digit from float values (#5974)
14baf45c9 Mark GetBufferMinAlignment() const (#5985)
9abb2ec2c TypeScript/JavaScript docs improvements (#5984)
2e57d80b1 [Swift] Internal library improvements (#5965)
cfc7753a4 [Doc] Added missing letters to compiler options (#5976)
12ddc8a92 Rust Flexbuffers Documentation update (#5979)
24ad35709 [docs] typo: updates monsterdata.json to be valid json (#5978)
cc44a4442 [idl_parser] Mark typefield as deprecated (#5958)
9ab4a5c0e Deleted old stale bot
6682cfe87 Increased Operations per run in stale.yml
64922904b Adding Stale Action to clean up PR and Issues
8e505cb67 [C++] Fixed/Enabled --cpp-ptr-type std::shared_ptr [#5813] (#5959)
a28357d7a Propagate boolean default values from proto to fbs (#5964)
7cb4762a6 [Swift] Improving reallocation time by using memcpy and moving reallocation code to storage (#5960)
4e45f7c9e Fix error in SimpleQSort (#5955)
7ac026405 fix error on GRPC Python - ignore namespace tree if not specified (#5862) (#5922)
108e981db Required is now implemented in swift (#5952)
94a78e385 Fixed: Access violation and ASAN/UNSAN failures with sorted tables
53fb453e0 [rust] Add FlatBufferBuilder::force_defaults API (#5946)
17c1f35fa [FlexBuffer][Java] ReadWriteBuf and ReadBuf interface public (#5948)
2eaf57778 [Java] Grow ArrayReadWriteBuf enough to match requested capacity. (#5921)
666800da3 Adds bool support in structs + updates grpc support + CI upgrades (#5943)
38ed69eb3 fixed mutating inline values (#5942)
d026e6f07 Add static asserts to ensure that reflection API arrays are kept in sync (#5934)
988164f6e [C++] Got rid of memset's in constructors (#5938)
7179a5a8b General Codebase clean up (#5939)
a0da0c08c Add GetStringView like GetString, GetCstring (#5937)
ac203b209 [C#] Add file identifier to ObjectAPI Serialization Utility. (#5920)
8dd1bf25b not creating creation methods when using structs (#5919)
5aa443d98 [Dart] Adding FlexBuffers support (#5853)
0fa087657 [Dart] Getting tests/DartTest.sh to work on master. (#5915)
424a473e1 Schema parser: prohibit declaration of an array of pointers inside structs (#5907)
c3faa8346 Fix Cargo.toml dependencies (#5911)
91399ad05 fix union type names (#5902)
32782e4ad Update Rust Flexbuffers metadata before publishing (#5905)
e7f3b1690 [TS] Make Obj-API work with --short-names (#5898)
12ed1fe4a fixed invalid imports with --gen-all (#5895)
85ee4df7a [C#] Thread safe reads of Double and Float values from a ByteBuffer (#5900)
de89bd193 Implement flexbuffers in python (#5880)
8be05f6bd Rust Flexbuffers (#5669)
870ecbc09 [swift] Moves code to use VTablesStorage (#5888)
c2da8d5d8 [Java][FlexBuffers] Make FlexBuffersBuilder reusable by adding clear() (#5889) (#5890)
e84cbff67 Align package name to FindFlatBuffers.cmake (#5899)
f94e6c84e Small tutorial improvements - documentation only (#5894)
f12cca8bc Attempt at adding Github Actions CI
7e4124d6e Handle git program or .git folder absence (#5878)
a875d247a only add native_include_files if object based api is enabled (#5886)
6e9f5d981 Respect shared attribute in Parser (#5885)
ff1c78233 include/flatbuffers: typo fixes in comments (#5884)
2e9a19673 Updates swift docs for package management (#5883)
e3cb07d32 [Rust] idl_gen_rust.cpp: (Option/required-aware codegen for unions) (#5850)
712866d57 Propagate use_string_pooling in CopyTable (#5879)
44c919a9e Not using reexports with --gen-all (#5873)
99aa1ef21 Added INCLUDE_PREFIX option for flatbuffers_generate_headers (#5866)
40ba170c9 Fixed text in internals doc that implied structs can be root
cb4d0f72e [Swift] Object API support  (#5826)
003e16405 [TS] Add Obj API (#5788)
21cf300f4 fix cpp usage markdown error (#5845)
9655e12d6 Upgraded swift implementation for grpc (#5843)
fb96fadc2 [C#] Fix nested structs and arrays in Object API (#5765)
408f11fbd [ts] Fix empty source/dest namespaces when reexporting. (#5841)
a83caf591 Improves performance for the swift library by using structs + a storage class (#5835)
925fab6b1 [Java][FlexBuffers] Optimize Map access (#5735)
d9fecc332 [CMake] : Add precompiled header support with FLATBUFFERS_ENABLE_PCH (#5827)
e9d453240 Added flatbuffers_generate_headers and flatbuffers_generate_binary_files cmake functions. (#5830)
c37c989ed Correct calculation of vector element size (#5831)
6b271b7ec Fix Clang-trunk warnings about special members deprecated in C++20. (#5829)
90f3b8e8c Fix `float_constant` definition in './doc/Grammar.md` (#5828)
3af735934 [csharp] flatc should generate a 'Create…' method for tables with struct field… (#5818)
c4231c3cb Updated doxyfile - added missing files (#5824)
9657df184 Update Grammar.md (#5820)
97ffc590e Include CPack only when explictly packaging (#5817)
8b52af65b [C++] Add max_depth and max_tables parameters to reflection::Verify (#5815)
9b034eee1 Fix interpretation of 'nan(number)' by the idl_parser (#5810)
3e9ac3cff [Scripts] Adds swift to generated code (#5806)
697147a2e updated maven build files
6df40a247 pre-tag version bump for 1.12
0dba63909 Removes the inner loop in the endtable check written tables (#5803)
0e3fdd0ee Escape characters in jsonschema descriptions (#5644)
45a2b07cb Remove `noexcept` qualifier copy-ctor of `union` type (#5800) (#5802)
d10c16314 Replace 'assert' by 'FLATBUFFERS_ASSERT' inside idl_parser.cpp (#5799)
35abb7f89 Add non-nullable modifier to return type of functions never returning null (#5797)
9954e09ab [C++] Generate code for vector force_align attribute. (#5796)
95a21327f rust: pub export the VectorIter type (#5736)
89b6183ee Fix Python min alignment
5a98d65e8 [Rust] Add gen-name-strings for Rust (#5757)
f73d205bc Removed assert that wasn't backwards compatible.
7c37abe92 [C#] add ObjectAPI Serialization Utility (#5785)
4749e77b0 Fixed docs on how to obtain parser error.
6ff189841 Added --filename-suffix and --filename-ext to flatc (#5778)
c9a30c9ca Fixed refractoring issue in reflection/generate_code.sh. Also, mv deletes the original file, so I don't need to clean it up manually in that case. (#5777)
8c02d17be Skip writing reflection_generated.h if not changed (#5776)
34305c4ce [Swift] Adds GRPC to Swift (#5758)
cd88e6b2a [Java][FlexBuffers] Abstract buffer access from ByteBuffer (#5743)
3ec7a53c6 Adds cocoapods and a readme of how to get the package (#5771)
6d44cede7 [snap] Fix versioning (#5727)
cc08c0835 [Python] Fixed potential allignment issue (#5768)
54f8b787c Fix memory leak on cpp object api (#5761)
17557f913 [Python] Fixed issue #5499 (#5764)
d54af8cd4 [C++] Use strong enum type for vectors when scoped-enums is on. (#5750)
173e10fdf [C#] support Json Serialization (#5752)
8f56990f6 FlexBuffers: C++: scalar-only typed vectors were not aligned.
6400c9b05 Bump Rust port to 0.6.1 (#5747)
7418d8587 [C#] support Object API (#5710)
c580fa284 Adds min and max, comments, and all of swift's keywords + fix docs (#5737)
f2a127230 Use VS 2017 and 2019 on CI, fix cast issue in dart_idl (#5740)
316d7c208 Creates a flatbuffers validation function + small fix (#5725)
47026ea6b Added the code to embed the binary schema to the source (--bfbs-gen-embed). (#5701)
3f677f241 [Java][FlexBuffers] Deprecate typed vector strings due to design flaw (#5722)
a593a11e5 [Go] Implements a SharedStrings function (#5733)
7cdfc8475 [Swift] Fix padding function overflow when bufSize is 0 (#5721)
bab2b0db4 Add vcpkg installation instructions (#5732)
89418eb84 [Dart] Fix deprecated field support, inf/nan (#5724)
9cadf05d8 [typescript] Size-prefixed root accessors not taking into account size prefix (#5717)
6da1cf79d [rust] Add use declarations to Rust-generated bindings for imported FB definitions (#5645)
bee1df96d [Go] Replace references to hardcoded ”Monster" etc with idiomatic go wherever possible (#5716)
01189d7ed [C++] Fix for printing of enum in case output_enum_identifiers=1. (#5706)
c4b2b0a25 [Swift] Support create long string (#5709)
a4b2884e4 Added create function for swift (#5707)
04d80f255 [Swift] Swift implementation 🎉🎉 (#5603)
55686100a Changed direct calls to strtod to use StringToNumber
718351831 Document JSON compatibility guarantees. (#5704)
d1b34f0f2 Add CMake 'generated_code' target to simplify resolution of build dependencies (#5697)
21b706196 (Optionally) add an additional suffix namespace to generated fbs files. (#5698)
35daaf83d [Java] Replace Table.UTF8_CHARSET with StandardCharsets.UTF_8 (#5696)
3b458f7a1 Rust: Temporarily disable 2 endianness unit tests (#5695)
a5d9d0f7d [C++17] Add Traits class for Tables and Factory function within it. (#5678)
3cd9b6434 Removed code_generators.cpp from library targets
355dfd48d [rust] Make enum names public (#5690)
bcd58a159 Correct inverted logic around include prefixes. (#5689)
a2c12900a Optimize Pack method using numpy (#5662)
901b89e73 [C++] Add Builder and Table typedefs (#5685)
31f879908 Minor doc updates: FlexBuffers C#, Discord, CppUsage.
8023d99e2 Upgrade rules_go (#5684)
b4154405d Fix --incompatible_load_cc_rules_from_bzl (#5683)
04c17c7a7 Add support for absl::string_view when available (#5682)
62ec7d52c [Bazel] Add support for compatible_with and restricted_to (#5681)
7de668053 CI: New Docker tests for Python with numpy (#5677)
3a70e0b30 Fixed struct initialization error on older versions of C#
9b1320135 Fixed warnings in FlexBuffers.java
5e3916050 Fixed out of date licenses on gRPC Python files.
c95755051 Removed test proto output.
44bf71988 Add flatc '--cpp_std' switch (#5656)
3e8f15df9 Fix for FlexBuffers FBT_VECTOR_STRING size bit-width.
602721a73 Added Check to VerifyAlignment (#5675)
13c05f4da Improve import handling for proto conversion (#5673)
ce3a1c43a [Dart] Fix prepare space for writeListInt64 and writeListUint64 (#5654)
aa75e5734 Make Rust constants public (#5659)
2790fee25 Add namespace qualification to union types (#5666)
eddebec1b Bugfix for Rust generation of union fields named with language keywords (#5592)
030fee36a wrap multiple statements in do {} while(!IsConstTrue(true)) (#5655)
f9724d1bd [gRPC] Uncomment MessageBuilder (#5658)
b20801ca4 Supress unsigned-integer-overflow for PaddingBytes (#5647)
a8e800bd7 Add --force-empty-vectors option (#5653)
d7530ae96 Fixed enum min/max values not properly escaped.
99d11e279 Split Bazel targets into multiple packages (#5640)
4fd8eb214 Remove a static_assert (#5643)
65f870357 Flatbuffers Python Object API (#5616)
75823cc27 [Clang 10]: definition of implicit copy constructor for 'TableKeyComparatoris deprecated #5649 (#5650)
58e279244 [docs]: add missing semicolon (#5648)
3c964e10a [GO] Fix support for enums with underscores and Unions with imported members (#5600)
c3c32ec94 Fix ambiguity of a type deduction in TEST_EQ macro if arguments have `enum class` type. (#5630)
075e8d676 Simplify declarations of x-macro FLATBUFFERS_TD (#5638)
bcf1bd5c9 read vtable size through ReadScalar() (#5636)
136d75fa6 Changed null checks in test. Removed verifier pointer usage (#5634)
091fa1fd1 Add testing of C++ with sanitizers (CI-Docker) (#5631)
ff3781dc2 add namespace prefix in FLATBUFFERS_MAX_BUFFER_SIZE (#5629)
6beb9f49c Support for python grpc - continuing the work from the pull request #4270 #4705 (#5613)
80988ea86 Removed idl_gen_general.cpp and move contents to code_generators.cpp (#5625)
0f2ff7eaa Lua cleanup (#5624)
dda095023 [C++] Adds basic schema evolution tests (#5611)
adbcbba5d [C++, C#, Java] Separated C# and Java generators into their own classes (#5618)
cbbd6aca0 add check for root_type specified for json schema generation (#5622)
405c64e07 [Rust] Bump smallvec version to 1.0 (#5621)
42c08cbca Ran src/clang-format-all.sh (#5617)
33d5dd9bd Improved pull request & clang-format instructions.
105dd528e Change monster_extra generation to use flatbuffers::unique_ptr (#5612)
f0f0efe7b [C++] Refactor to conform to Google C++ style guide (#5608)
e837d5a29 Fixed deprecated method in GRPC Java test.
9834ee978 Fixed Apache license not using canonical version.
44b2ab087 include/flatbuffers/base.h: fix no_sanitize issue with old clang (#5610)
46ae3f80a [C++, Java, C#, TypeScript, JavaScript] Skip generation of mutable union types (#5599)
7b38aa71e flatbuffers.h: fix documentation warning (#5607)
661bedd83 Add Lua FlatbufferBuilder Clean() method to enable reuseable builders (#5606)
8526e12d7 [Kotlin] Fix union vector accessor after change in Java API (#5605)
3c7b660d6 [flatc] Remove an always true condition for flexbuffers (#5604)
964365ba6 [Go] Add UnPackTo functions (#5598)
32254b7ac [Go] Object API support (#5339)
521e255ad Rust: Add idiomatic iterator for Vector type (#5579)
1b85292fd Fix typos in comments (#5590)
480815447 C++ verifier for evolved union fields should return true (#5586)
8d5e424c6 Add ByteBuffer copy for vector of bytes in Java (#5587)
b4774d235 Rust: Fix Copy and Clone impls for a few generic types (#5577)
26f238c24 Add `--clean-first` to the cmake-build command (travis) (#5574)
e93c8c46e Fix Follow implementation for bool (#5554)
e21516b9d Fix issue #5557 (#5573)
fbc11e8ae Avoid intentional unsigned integer overflow getting caught by sanitizers (#5572)
e9d29c21a Python: Add forceDefaults opt to python Builder (#5564)
8bfafc76d Java: Don't annotate vector-of-tables item getters with @nullable. (#5562)
df3e8bf4a Fixed warnings generated by recent JSON sorting feature.
5665cfe49 [Java] byte buffer factory returned buffer capcity is used instead of the requested size (#5558)
5797540ed #5544 Fix of Array of table is not sorted if Create<type>Direct() is used (#5546)
7f1af7cb0 Fix build with gcc version 7.4.0 (#5570)
32f47ad24 Fixed JSON parser not sorting vectors of tables/structs with key.
842f672ba [FlexBuffers][Java] Cache size of Sized objects in FlexBuffers (#5551)
d4cae0a62 Fix issue #5542 (#5543)
f1147f65b Fixed Android STLPort related error.
69d3fec48 Fix namespaced struct/field name collision detection (#5540) (#5545)
cfb4ecf6f [flac] Add FlexBuffers option for generating data (#5519)
a92039687 Update Rust versions under test from 1.30.1 to 1.37.0 (#5538)
625338d09 Adds XOPEN_SOURCE for PATH_MAX and POSIX 1993 for stat (#5529)
3f8ce99c5 [FlexBuffers][Java] Add override Key::toString (#5533)
0798b7b69 [FlexBuffers][Java] Fix wrong access to a string using Reference::asString(). (#5532)
cbdf82e2f Fix Mutate() methods of Array<scalar/struct> (override 5508) (#5526)
e365c502f Java: Added access object for vector of struct and vector of tables. (#5233)
97f3aa917 Fixed DetachedBuffer self move assignment (#5521)
2f5bb2eec Fix buildifier warnings found in new bazel (#5517)
917687c7a Fixed Reflection Verifier not handling vectors of unions.
f9277e691 Fixed GenerateText not handling vectors of unions.
2706381ee Add element size parameter to __vector_as_arraysegment [c#] (#5512)
b5560fcd5 [Java][FlexBuffers] Improve documentation for FlexBuffers in Java. (#5506)
782b865c5 Annotate getters with @Pure when --java-checkerframework is specified. (#5510)
3bfc86eaf [Dart]fix: segment fault with empty namespace when generating dart file (#5507)
c0282873f Rust: Fixed cargo clippy on non-generated code (#5485)
4b870aca9 [Javascript] Fix syntax error for signed enum (#5503)
d0e3870c0 [C#] Fix retrieving enumeration vectors as arrays (#5457)
fb25eb87f Doc typo fixes (#5505)
cb35d3a0e Use all of the available space in the buffer returned by ByteBufferFactory to allow the factory to keep a pool of larger than initialsize sized buffers. (#5500)
8e6cabb31 [FlexBuffers][Java] Implementation of FlexBuffers API (#5476)
bd31dd242 Clarified value reuse in FlexBuffers
65b67d213 Fixed test build invocation of arrays_test.fbs
1fbb71132 FlexBuffers: allow any values to be shared.
cd75a3658 Android: remove app_dummy() calls
ec6b0bf29 Fixed STLPort Android compile error
c11b5d744 [bugfix]flexbuffers isvector bugfix (#5488)
4525c91be Fix incorrect padding in arrays of structs (Issue #5484) (#5491)
b97b342f5 Fixed missing generated code.
c1058a903 C++ IDL generation adds superfluous semicolon in GenTablePost, causing (#5483)
303044934 [go]add Name() for ForceCodec  interface (#5486)
a2485d4ec reflection: check for valid union enum value during object verification (#5475)
a20e71ac9 has_method support for primitive fields in java runtime. Changed: idl.h, FlatBufferBuilder.java ,  idl_gen_general.cpp, idl_parser.cpp, flatc.cpp (#5468)

Change-Id: I836f4b43e6818bb16425a87899e6234ac86242aa
git-subtree-dir: third_party/flatbuffers
git-subtree-split: d6a8dbd26ff08a8868e0d0c1b4b67d31b40e4a7f
diff --git a/rust/flatbuffers/Cargo.toml b/rust/flatbuffers/Cargo.toml
index 32d9b1b..460c552 100644
--- a/rust/flatbuffers/Cargo.toml
+++ b/rust/flatbuffers/Cargo.toml
@@ -1,6 +1,7 @@
 [package]
 name = "flatbuffers"
-version = "0.6.0"
+version = "0.7.0"
+edition = "2018"
 authors = ["Robert Winslow <hello@rwinslow.com>", "FlatBuffers Maintainers"]
 license = "Apache-2.0"
 description = "Official FlatBuffers Rust runtime library."
@@ -10,4 +11,5 @@
 categories = ["encoding", "data-structures", "memory-management"]
 
 [dependencies]
-smallvec = "0.6"
+smallvec = "1.0"
+bitflags = "1.2"
diff --git a/rust/flatbuffers/src/builder.rs b/rust/flatbuffers/src/builder.rs
index 36d6c6a..a3c15f2 100644
--- a/rust/flatbuffers/src/builder.rs
+++ b/rust/flatbuffers/src/builder.rs
@@ -17,17 +17,18 @@
 extern crate smallvec;
 
 use std::cmp::max;
+use std::iter::{DoubleEndedIterator, ExactSizeIterator};
 use std::marker::PhantomData;
 use std::ptr::write_bytes;
 use std::slice::from_raw_parts;
 
-use endian_scalar::{emplace_scalar, read_scalar_at};
-use primitives::*;
-use push::{Push, PushAlignment};
-use table::Table;
-use vector::{SafeSliceAccess, Vector};
-use vtable::{field_index_to_field_offset, VTable};
-use vtable_writer::VTableWriter;
+use crate::endian_scalar::{emplace_scalar, read_scalar_at};
+use crate::primitives::*;
+use crate::push::{Push, PushAlignment};
+use crate::table::Table;
+use crate::vector::{SafeSliceAccess, Vector};
+use crate::vtable::{field_index_to_field_offset, VTable};
+use crate::vtable_writer::VTableWriter;
 
 pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
 
@@ -52,6 +53,7 @@
     finished: bool,
 
     min_align: usize,
+    force_defaults: bool,
 
     _phantom: PhantomData<&'fbb ()>,
 }
@@ -85,6 +87,7 @@
             finished: false,
 
             min_align: 0,
+            force_defaults: false,
 
             _phantom: PhantomData,
         }
@@ -148,10 +151,9 @@
     #[inline]
     pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
         self.assert_nested("push_slot");
-        if x == default {
-            return;
+        if x != default || self.force_defaults {
+            self.push_slot_always(slotoff, x);
         }
-        self.push_slot_always(slotoff, x);
     }
 
     /// Push a Push'able value onto the front of the in-progress data, and
@@ -327,6 +329,36 @@
         WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
     }
 
+    /// Create a vector of Push-able objects.
+    ///
+    /// Speed-sensitive users may wish to reduce memory usage by creating the
+    /// vector manually: use `start_vector`, `push`, and `end_vector`.
+    #[inline]
+    pub fn create_vector_from_iter<T: Push + Copy>(
+        &mut self,
+        items: impl ExactSizeIterator<Item = T> + DoubleEndedIterator,
+    ) -> WIPOffset<Vector<'fbb, T::Output>> {
+        let elem_size = T::size();
+        let len = items.len();
+        self.align(len * elem_size, T::alignment().max_of(SIZE_UOFFSET));
+        for item in items.rev() {
+            self.push(item);
+        }
+        WIPOffset::new(self.push::<UOffsetT>(len as UOffsetT).value())
+    }
+
+    /// Set whether default values are stored.
+    ///
+    /// In order to save space, fields that are set to their default value
+    /// aren't stored in the buffer. Setting `force_defaults` to `true`
+    /// disables this optimization.
+    ///
+    /// By default, `force_defaults` is `false`.
+    #[inline]
+    pub fn force_defaults(&mut self, force_defaults: bool) {
+        self.force_defaults = force_defaults;
+    }
+
     /// Get the byte slice for the data that has been written, regardless of
     /// whether it has been finished.
     #[inline]
@@ -389,10 +421,7 @@
 
     #[inline]
     fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
-        let fl = FieldLoc {
-            id: slot_off,
-            off: off,
-        };
+        let fl = FieldLoc { id: slot_off, off };
         self.field_locs.push(fl);
     }
 
@@ -406,7 +435,7 @@
         // Write the vtable offset, which is the start of any Table.
         // We fill its value later.
         let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
-            WIPOffset::new(self.push::<UOffsetT>(0xF0F0F0F0 as UOffsetT).value());
+            WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
 
         // Layout of the data this function will create when a new vtable is
         // needed.
@@ -496,7 +525,7 @@
         {
             let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
             let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
-            debug_assert_eq!(saw, 0xF0F0F0F0);
+            debug_assert_eq!(saw, 0xF0F0_F0F0);
             emplace_scalar::<SOffsetT>(
                 &mut self.owned_buf[n..n + SIZE_SOFFSET],
                 vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
@@ -619,7 +648,7 @@
     #[inline]
     fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
         let n = self.make_space(x.len());
-        &mut self.owned_buf[n..n + x.len()].copy_from_slice(x);
+        self.owned_buf[n..n + x.len()].copy_from_slice(x);
 
         n as UOffsetT
     }
diff --git a/rust/flatbuffers/src/follow.rs b/rust/flatbuffers/src/follow.rs
index 4d3eff7..8dd70da 100644
--- a/rust/flatbuffers/src/follow.rs
+++ b/rust/flatbuffers/src/follow.rs
@@ -41,7 +41,7 @@
 
 /// FollowStart wraps a Follow impl in a struct type. This can make certain
 /// programming patterns more ergonomic.
-#[derive(Debug)]
+#[derive(Debug, Default)]
 pub struct FollowStart<T>(PhantomData<T>);
 impl<'a, T: Follow<'a> + 'a> FollowStart<T> {
     #[inline]
diff --git a/rust/flatbuffers/src/lib.rs b/rust/flatbuffers/src/lib.rs
index ef54132..3abd33b 100644
--- a/rust/flatbuffers/src/lib.rs
+++ b/rust/flatbuffers/src/lib.rs
@@ -38,16 +38,17 @@
 mod vtable;
 mod vtable_writer;
 
-pub use builder::FlatBufferBuilder;
-pub use endian_scalar::{
+pub use bitflags;
+pub use crate::builder::FlatBufferBuilder;
+pub use crate::endian_scalar::{
     byte_swap_f32, byte_swap_f64, emplace_scalar, read_scalar, read_scalar_at, EndianScalar,
 };
-pub use follow::{Follow, FollowStart};
-pub use primitives::*;
-pub use push::Push;
-pub use table::{buffer_has_identifier, get_root, get_size_prefixed_root, Table};
-pub use vector::{follow_cast_ref, SafeSliceAccess, Vector};
-pub use vtable::field_index_to_field_offset;
+pub use crate::follow::{Follow, FollowStart};
+pub use crate::primitives::*;
+pub use crate::push::Push;
+pub use crate::table::{buffer_has_identifier, get_root, get_size_prefixed_root, Table};
+pub use crate::vector::{follow_cast_ref, SafeSliceAccess, Vector, VectorIter};
+pub use crate::vtable::field_index_to_field_offset;
 
 // TODO(rw): Unify `create_vector` and `create_vector_direct` by using
 //           `Into<Vector<...>>`.
diff --git a/rust/flatbuffers/src/primitives.rs b/rust/flatbuffers/src/primitives.rs
index cfd4140..350e984 100644
--- a/rust/flatbuffers/src/primitives.rs
+++ b/rust/flatbuffers/src/primitives.rs
@@ -18,9 +18,9 @@
 use std::mem::size_of;
 use std::ops::Deref;
 
-use endian_scalar::{emplace_scalar, read_scalar, read_scalar_at};
-use follow::Follow;
-use push::Push;
+use crate::endian_scalar::{emplace_scalar, read_scalar, read_scalar_at};
+use crate::follow::Follow;
+use crate::push::Push;
 
 pub const FLATBUFFERS_MAX_BUFFER_SIZE: usize = (1u64 << 31) as usize;
 
@@ -49,26 +49,29 @@
 
 pub const SIZE_SIZEPREFIX: usize = SIZE_UOFFSET;
 
-/// SOffsetT is an i32 that is used by tables to reference their vtables.
+/// SOffsetT is a relative pointer from tables to their vtables.
 pub type SOffsetT = i32;
 
-/// UOffsetT is a u32 that is used by pervasively to represent both pointers
-/// and lengths of vectors.
+/// UOffsetT is used represent both for relative pointers and lengths of vectors.
 pub type UOffsetT = u32;
 
-/// VOffsetT is a i32 that is used by vtables to store field data.
-pub type VOffsetT = i16;
+/// VOffsetT is a relative pointer in vtables to point from tables to field data.
+pub type VOffsetT = u16;
 
 /// TableFinishedWIPOffset marks a WIPOffset as being for a finished table.
+#[derive(Clone, Copy)]
 pub struct TableFinishedWIPOffset {}
 
 /// TableUnfinishedWIPOffset marks a WIPOffset as being for an unfinished table.
+#[derive(Clone, Copy)]
 pub struct TableUnfinishedWIPOffset {}
 
 /// UnionWIPOffset marks a WIPOffset as being for a union value.
+#[derive(Clone, Copy)]
 pub struct UnionWIPOffset {}
 
 /// VTableWIPOffset marks a WIPOffset as being for a vtable.
+#[derive(Clone, Copy)]
 pub struct VTableWIPOffset {}
 
 /// WIPOffset contains an UOffsetT with a special meaning: it is the location of
@@ -78,15 +81,18 @@
 #[derive(Debug)]
 pub struct WIPOffset<T>(UOffsetT, PhantomData<T>);
 
-// TODO(rw): why do we need to reimplement (with a default impl) Copy to
-//           avoid ownership errors?
+// We cannot use derive for these two impls, as the derived impls would only
+// implement `Copy` and `Clone` for `T: Copy` and `T: Clone` respectively.
+// However `WIPOffset<T>` can always be copied, no matter that `T` you
+// have.
 impl<T> Copy for WIPOffset<T> {}
 impl<T> Clone for WIPOffset<T> {
-    #[inline]
-    fn clone(&self) -> WIPOffset<T> {
-        WIPOffset::new(self.0.clone())
+    #[inline(always)]
+    fn clone(&self) -> Self {
+        *self
     }
 }
+
 impl<T> PartialEq for WIPOffset<T> {
     fn eq(&self, o: &WIPOffset<T>) -> bool {
         self.value() == o.value()
@@ -113,12 +119,12 @@
     /// Return a wrapped value that brings its meaning as a union WIPOffset
     /// into the type system.
     #[inline(always)]
-    pub fn as_union_value(&self) -> WIPOffset<UnionWIPOffset> {
+    pub fn as_union_value(self) -> WIPOffset<UnionWIPOffset> {
         WIPOffset::new(self.0)
     }
     /// Get the underlying value.
     #[inline(always)]
-    pub fn value(&self) -> UOffsetT {
+    pub fn value(self) -> UOffsetT {
         self.0
     }
 }
@@ -146,9 +152,22 @@
 /// is incremented by the value contained in this type.
 #[derive(Debug)]
 pub struct ForwardsUOffset<T>(UOffsetT, PhantomData<T>);
+
+// We cannot use derive for these two impls, as the derived impls would only
+// implement `Copy` and `Clone` for `T: Copy` and `T: Clone` respectively.
+// However `ForwardsUOffset<T>` can always be copied, no matter that `T` you
+// have.
+impl<T> Copy for ForwardsUOffset<T> {}
+impl<T> Clone for ForwardsUOffset<T> {
+    #[inline(always)]
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
 impl<T> ForwardsUOffset<T> {
     #[inline(always)]
-    pub fn value(&self) -> UOffsetT {
+    pub fn value(self) -> UOffsetT {
         self.0
     }
 }
@@ -268,6 +287,14 @@
     }
 }
 
+impl<'a> Follow<'a> for bool {
+    type Inner = bool;
+    #[inline(always)]
+    fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
+        read_scalar_at::<u8>(buf, loc) != 0
+    }
+}
+
 /// Follow trait impls for primitive types.
 ///
 /// Ideally, these would be implemented as a single impl using trait bounds on
@@ -285,7 +312,6 @@
     };
 }
 
-impl_follow_for_endian_scalar!(bool);
 impl_follow_for_endian_scalar!(u8);
 impl_follow_for_endian_scalar!(u16);
 impl_follow_for_endian_scalar!(u32);
diff --git a/rust/flatbuffers/src/push.rs b/rust/flatbuffers/src/push.rs
index 1863058..c461372 100644
--- a/rust/flatbuffers/src/push.rs
+++ b/rust/flatbuffers/src/push.rs
@@ -17,7 +17,7 @@
 use std::cmp::max;
 use std::mem::{align_of, size_of};
 
-use endian_scalar::emplace_scalar;
+use crate::endian_scalar::emplace_scalar;
 
 /// Trait to abstract over functionality needed to write values (either owned
 /// or referenced). Used in FlatBufferBuilder and implemented for generated
diff --git a/rust/flatbuffers/src/table.rs b/rust/flatbuffers/src/table.rs
index 7b1c4a5..46728cd 100644
--- a/rust/flatbuffers/src/table.rs
+++ b/rust/flatbuffers/src/table.rs
@@ -14,9 +14,9 @@
  * limitations under the License.
  */
 
-use follow::Follow;
-use primitives::*;
-use vtable::VTable;
+use crate::follow::Follow;
+use crate::primitives::*;
+use crate::vtable::VTable;
 
 #[derive(Clone, Copy, Debug, PartialEq)]
 pub struct Table<'a> {
@@ -27,7 +27,7 @@
 impl<'a> Table<'a> {
     #[inline]
     pub fn new(buf: &'a [u8], loc: usize) -> Self {
-        Table { buf: buf, loc: loc }
+        Table { buf, loc }
     }
     #[inline]
     pub fn vtable(&self) -> VTable<'a> {
@@ -51,7 +51,7 @@
     type Inner = Table<'a>;
     #[inline]
     fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
-        Table { buf: buf, loc: loc }
+        Table { buf, loc }
     }
 }
 
diff --git a/rust/flatbuffers/src/vector.rs b/rust/flatbuffers/src/vector.rs
index 66653eb..5236ea1 100644
--- a/rust/flatbuffers/src/vector.rs
+++ b/rust/flatbuffers/src/vector.rs
@@ -14,20 +14,42 @@
  * limitations under the License.
  */
 
+use std::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator};
 use std::marker::PhantomData;
 use std::mem::size_of;
 use std::slice::from_raw_parts;
 use std::str::from_utf8_unchecked;
+use std::fmt::{Debug, Result, Formatter};
 
+use crate::endian_scalar::read_scalar_at;
 #[cfg(target_endian = "little")]
-use endian_scalar::EndianScalar;
-use endian_scalar::{read_scalar, read_scalar_at};
-use follow::Follow;
-use primitives::*;
+use crate::endian_scalar::EndianScalar;
+use crate::follow::Follow;
+use crate::primitives::*;
 
-#[derive(Debug)]
 pub struct Vector<'a, T: 'a>(&'a [u8], usize, PhantomData<T>);
 
+impl<'a, T> Debug for Vector<'a, T>
+where
+    T: 'a + Follow<'a>,
+    <T as Follow<'a>>::Inner : Debug
+{
+    fn fmt(&self, f: &mut Formatter) -> Result {
+        f.debug_list().entries(self.iter()).finish()
+    }
+}
+
+
+// We cannot use derive for these two impls, as it would only implement Copy
+// and Clone for `T: Copy` and `T: Clone` respectively. However `Vector<'a, T>`
+// can always be copied, no matter that `T` you have.
+impl<'a, T> Copy for Vector<'a, T> {}
+impl<'a, T> Clone for Vector<'a, T> {
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
 impl<'a, T: 'a> Vector<'a, T> {
     #[inline(always)]
     pub fn new(buf: &'a [u8], loc: usize) -> Self {
@@ -40,18 +62,27 @@
 
     #[inline(always)]
     pub fn len(&self) -> usize {
-        read_scalar::<UOffsetT>(&self.0[self.1 as usize..]) as usize
+        read_scalar_at::<UOffsetT>(&self.0, self.1) as usize
+    }
+    #[inline(always)]
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
     }
 }
 
 impl<'a, T: Follow<'a> + 'a> Vector<'a, T> {
     #[inline(always)]
     pub fn get(&self, idx: usize) -> T::Inner {
-        debug_assert!(idx < read_scalar::<u32>(&self.0[self.1 as usize..]) as usize);
+        debug_assert!(idx < read_scalar_at::<u32>(&self.0, self.1) as usize);
         let sz = size_of::<T>();
         debug_assert!(sz > 0);
         T::follow(self.0, self.1 as usize + SIZE_UOFFSET + sz * idx)
     }
+
+    #[inline(always)]
+    pub fn iter(&self) -> VectorIter<'a, T> {
+        VectorIter::new(*self)
+    }
 }
 
 pub trait SafeSliceAccess {}
@@ -102,8 +133,7 @@
     fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {
         let len = read_scalar_at::<UOffsetT>(&buf, loc) as usize;
         let slice = &buf[loc + SIZE_UOFFSET..loc + SIZE_UOFFSET + len];
-        let s = unsafe { from_utf8_unchecked(slice) };
-        s
+        unsafe { from_utf8_unchecked(slice) }
     }
 }
 
@@ -134,3 +164,124 @@
         Vector::new(buf, loc)
     }
 }
+
+/// An iterator over a `Vector`.
+#[derive(Debug)]
+pub struct VectorIter<'a, T: 'a> {
+    buf: &'a [u8],
+    loc: usize,
+    remaining: usize,
+    phantom: PhantomData<T>,
+}
+
+impl<'a, T: 'a> VectorIter<'a, T> {
+    #[inline]
+    pub fn new(inner: Vector<'a, T>) -> Self {
+        VectorIter {
+            buf: inner.0,
+            // inner.1 is the location of the data for the vector.
+            // The first SIZE_UOFFSET bytes is the length. We skip
+            // that to get to the actual vector content.
+            loc: inner.1 + SIZE_UOFFSET,
+            remaining: inner.len(),
+            phantom: PhantomData,
+        }
+    }
+}
+
+impl<'a, T: Follow<'a> + 'a> Clone for VectorIter<'a, T> {
+    #[inline]
+    fn clone(&self) -> Self {
+        VectorIter {
+            buf: self.buf,
+            loc: self.loc,
+            remaining: self.remaining,
+            phantom: self.phantom,
+        }
+    }
+}
+
+impl<'a, T: Follow<'a> + 'a> Iterator for VectorIter<'a, T> {
+    type Item = T::Inner;
+
+    #[inline]
+    fn next(&mut self) -> Option<T::Inner> {
+        let sz = size_of::<T>();
+        debug_assert!(sz > 0);
+
+        if self.remaining == 0 {
+            None
+        } else {
+            let result = T::follow(self.buf, self.loc);
+            self.loc += sz;
+            self.remaining -= 1;
+            Some(result)
+        }
+    }
+
+    #[inline]
+    fn nth(&mut self, n: usize) -> Option<T::Inner> {
+        let sz = size_of::<T>();
+        debug_assert!(sz > 0);
+
+        self.remaining = self.remaining.saturating_sub(n);
+
+        // Note that this might overflow, but that is okay because
+        // in that case self.remaining will have been set to zero.
+        self.loc = self.loc.wrapping_add(sz * n);
+
+        self.next()
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (self.remaining, Some(self.remaining))
+    }
+}
+
+impl<'a, T: Follow<'a> + 'a> DoubleEndedIterator for VectorIter<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<T::Inner> {
+        let sz = size_of::<T>();
+        debug_assert!(sz > 0);
+
+        if self.remaining == 0 {
+            None
+        } else {
+            self.remaining -= 1;
+            Some(T::follow(self.buf, self.loc + sz * self.remaining))
+        }
+    }
+
+    #[inline]
+    fn nth_back(&mut self, n: usize) -> Option<T::Inner> {
+        self.remaining = self.remaining.saturating_sub(n);
+        self.next_back()
+    }
+}
+
+impl<'a, T: 'a + Follow<'a>> ExactSizeIterator for VectorIter<'a, T> {
+    #[inline]
+    fn len(&self) -> usize {
+        self.remaining
+    }
+}
+
+impl<'a, T: 'a + Follow<'a>> FusedIterator for VectorIter<'a, T> {}
+
+impl<'a, T: Follow<'a> + 'a> IntoIterator for Vector<'a, T> {
+    type Item = T::Inner;
+    type IntoIter = VectorIter<'a, T>;
+    #[inline]
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+impl<'a, 'b, T: Follow<'a> + 'a> IntoIterator for &'b Vector<'a, T> {
+    type Item = T::Inner;
+    type IntoIter = VectorIter<'a, T>;
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
diff --git a/rust/flatbuffers/src/vtable.rs b/rust/flatbuffers/src/vtable.rs
index 5808670..98fb1e2 100644
--- a/rust/flatbuffers/src/vtable.rs
+++ b/rust/flatbuffers/src/vtable.rs
@@ -14,9 +14,9 @@
  * limitations under the License.
  */
 
-use endian_scalar::read_scalar_at;
-use follow::Follow;
-use primitives::*;
+use crate::endian_scalar::read_scalar_at;
+use crate::follow::Follow;
+use crate::primitives::*;
 
 /// VTable encapsulates read-only usage of a vtable. It is only to be used
 /// by generated code.
@@ -34,7 +34,7 @@
 
 impl<'a> VTable<'a> {
     pub fn init(buf: &'a [u8], loc: usize) -> Self {
-        VTable { buf: buf, loc: loc }
+        VTable { buf, loc }
     }
     pub fn num_fields(&self) -> usize {
         (self.num_bytes() / SIZE_VOFFSET) - 2
diff --git a/rust/flatbuffers/src/vtable_writer.rs b/rust/flatbuffers/src/vtable_writer.rs
index d1e87dd..57380bd 100644
--- a/rust/flatbuffers/src/vtable_writer.rs
+++ b/rust/flatbuffers/src/vtable_writer.rs
@@ -16,8 +16,8 @@
 
 use std::ptr::write_bytes;
 
-use endian_scalar::{emplace_scalar, read_scalar_at};
-use primitives::*;
+use crate::endian_scalar::{emplace_scalar, read_scalar_at};
+use crate::primitives::*;
 
 /// VTableWriter compartmentalizes actions needed to create a vtable.
 #[derive(Debug)]
@@ -28,7 +28,7 @@
 impl<'a> VTableWriter<'a> {
     #[inline(always)]
     pub fn init(buf: &'a mut [u8]) -> Self {
-        VTableWriter { buf: buf }
+        VTableWriter { buf }
     }
 
     /// Writes the vtable length (in bytes) into the vtable.
diff --git a/rust/flexbuffers/.gitignore b/rust/flexbuffers/.gitignore
new file mode 100644
index 0000000..6936990
--- /dev/null
+++ b/rust/flexbuffers/.gitignore
@@ -0,0 +1,3 @@
+/target
+**/*.rs.bk
+Cargo.lock
diff --git a/rust/flexbuffers/Cargo.toml b/rust/flexbuffers/Cargo.toml
new file mode 100644
index 0000000..44737a0
--- /dev/null
+++ b/rust/flexbuffers/Cargo.toml
@@ -0,0 +1,29 @@
+[package]
+name = "flexbuffers"
+version = "0.2.1"
+authors = ["Casper Neo <cneo@google.com>", "FlatBuffers Maintainers"]
+edition = "2018"
+license = "Apache-2.0"
+description = "Official FlexBuffers Rust runtime library."
+homepage = "https://google.github.io/flatbuffers/flexbuffers"
+repository = "https://github.com/google/flatbuffers"
+keywords = ["flatbuffers", "flexbuffers", "serialization", "zero-copy"]
+categories = ["encoding", "data-structures", "memory-management"]
+
+[features]
+# Sets serde::Serializer::is_human_readable() to true.
+# The default was changed from true to false in version "0.2.1".
+# You basically never need this to be true unless writing data for old binaries.
+serialize_human_readable = []
+# Sets serde::Deserializer::is_human_readable() to true.
+# The default was changed from true to false in version "0.2.1".
+# You basically never need this to be true unless reading data from old binaries.
+deserialize_human_readable = []
+
+
+[dependencies]
+serde = "1.0"
+serde_derive = "1.0"
+byteorder = "1.3.2"
+num_enum = "0.5.0"
+bitflags = "1.2.1"
diff --git a/rust/flexbuffers/README.md b/rust/flexbuffers/README.md
new file mode 100644
index 0000000..0b3331b
--- /dev/null
+++ b/rust/flexbuffers/README.md
@@ -0,0 +1,22 @@
+# Flexbuffers
+
+[Flexbuffers](https://google.github.io/flatbuffers/flexbuffers.html) is a
+schema-less binary format developed at Google. FlexBuffers can be accessed
+without parsing, copying, or allocation. This is a huge win for efficiency,
+memory friendly-ness, and allows for unique use cases such as mmap-ing large
+amounts of free-form data.
+
+FlexBuffers' design and implementation allows for a very compact encoding,
+with automatic sizing of containers to their smallest possible representation
+(8/16/32/64 bits). Many values and offsets can be encoded in just 8 bits.
+
+FlexBuffers supports [Serde](https://serde.rs/) for automatically serializing
+Rust data structures into its binary format.
+
+## See Examples for Usage:
+* [Example](https://github.com/google/flatbuffers/blob/master/samples/sample_flexbuffers.rs)
+* [Serde Example](https://github.com/google/flatbuffers/blob/master/samples/sample_flexbuffers_serde.rs)
+* [Documentation](https://docs.rs/flexbuffers)
+
+Flexbuffers is the schema-less cousin of
+[Flatbuffers](https://google.github.io/flatbuffers/).
diff --git a/rust/flexbuffers/src/bitwidth.rs b/rust/flexbuffers/src/bitwidth.rs
new file mode 100644
index 0000000..8e0bfed
--- /dev/null
+++ b/rust/flexbuffers/src/bitwidth.rs
@@ -0,0 +1,113 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::bitwidth::BitWidth::*;
+use std::slice::Iter;
+
+/// Represents the size of Flexbuffers data.
+///
+/// Flexbuffers automatically compresses numbers to the smallest possible width
+/// (`250u64` is stored as `250u8`).
+#[derive(
+    Debug,
+    Clone,
+    Copy,
+    PartialEq,
+    Eq,
+    PartialOrd,
+    Serialize,
+    Deserialize,
+    Ord,
+    num_enum::TryFromPrimitive,
+)]
+#[repr(u8)]
+pub enum BitWidth {
+    W8 = 0,
+    W16 = 1,
+    W32 = 2,
+    W64 = 3,
+}
+impl BitWidth {
+    pub(crate) fn iter() -> Iter<'static, Self> {
+        [W8, W16, W32, W64].iter()
+    }
+    pub fn n_bytes(self) -> usize {
+        1 << self as usize
+    }
+    pub fn from_nbytes(n: impl std::convert::Into<usize>) -> Option<Self> {
+        match n.into() {
+            1 => Some(W8),
+            2 => Some(W16),
+            4 => Some(W32),
+            8 => Some(W64),
+            _ => None,
+        }
+    }
+}
+
+impl Default for BitWidth {
+    fn default() -> Self {
+        W8
+    }
+}
+
+// TODO(cneo): Overloading with `from` is probably not the most readable idea in hindsight.
+macro_rules! impl_bitwidth_from {
+    ($from: ident, $w64: ident, $w32: ident, $w16: ident, $w8: ident) => {
+        impl From<$from> for BitWidth {
+            fn from(x: $from) -> BitWidth {
+                let x = x as $w64;
+                if x >= $w8::min_value() as $w64 && x <= $w8::max_value() as $w64 {
+                    return W8;
+                }
+                if x >= $w16::min_value() as $w64 && x <= $w16::max_value() as $w64 {
+                    return W16;
+                }
+                if x >= $w32::min_value() as $w64 && x <= $w32::max_value() as $w64 {
+                    return W32;
+                }
+                W64
+            }
+        }
+    };
+}
+impl_bitwidth_from!(u64, u64, u32, u16, u8);
+impl_bitwidth_from!(usize, u64, u32, u16, u8);
+impl_bitwidth_from!(i64, i64, i32, i16, i8);
+
+#[allow(clippy::float_cmp)]
+impl From<f64> for BitWidth {
+    fn from(x: f64) -> BitWidth {
+        if x != x as f32 as f64 {
+            W64
+        } else {
+            W32
+        }
+    }
+}
+impl From<f32> for BitWidth {
+    fn from(_: f32) -> BitWidth {
+        W32
+    }
+}
+
+/// Zero pad `v` until `T` will be byte aligned when pushed.
+pub fn align(buffer: &mut Vec<u8>, width: BitWidth) {
+    let bytes = 1 << width as u8;
+    let alignment = (bytes - buffer.len() % bytes) % bytes;
+    // Profiling reveals the loop is faster than Vec::resize.
+    for _ in 0..alignment as usize {
+        buffer.push(0);
+    }
+}
diff --git a/rust/flexbuffers/src/builder/map.rs b/rust/flexbuffers/src/builder/map.rs
new file mode 100644
index 0000000..1635f64
--- /dev/null
+++ b/rust/flexbuffers/src/builder/map.rs
@@ -0,0 +1,118 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{Builder, Pushable, Value, VectorBuilder};
+
+/// Builds a Flexbuffer map, returned by a [Builder](struct.Builder.html).
+///
+/// ## Side effect when dropped:
+/// When this is dropped, or `end_map` is called, the map is
+/// commited to the buffer. If this map is the root of the flexbuffer, then the
+/// root is written and the flexbuffer is complete.
+/// ## Panics:
+/// -  Duplicate keys will result in a panic in both debug and release mode.
+/// -  Keys with internal nulls results in a panic in debug mode and result in silent truncaction
+///    in release mode.
+pub struct MapBuilder<'a> {
+    pub(super) builder: &'a mut Builder,
+    // If the root is this map then start == None. Otherwise start is the
+    // number of values in the 'values stack' before adding this map.
+    pub(super) start: Option<usize>,
+}
+impl<'a> MapBuilder<'a> {
+    /// Push `p` onto this map with key `key`.
+    /// This will panic (in debug mode) if `key` contains internal nulls.
+    #[inline]
+    pub fn push<P: Pushable>(&mut self, key: &str, p: P) {
+        self.builder.push_key(key);
+        self.builder.push(p);
+    }
+    /// Starts a nested vector that will be pushed onto this map
+    /// with key `key` when it is dropped.
+    ///
+    /// This will panic (in debug mode) if `key` contains internal nulls.
+    #[inline]
+    pub fn start_vector(&mut self, key: &str) -> VectorBuilder {
+        // Push the key that refers to this nested vector.
+        self.builder.push_key(key);
+        // Nested vector.
+        let start = Some(self.builder.values.len());
+        VectorBuilder {
+            builder: &mut self.builder,
+            start,
+        }
+    }
+    /// Starts a nested map which that will be pushed onto this map
+    /// with key `key` when it is dropped.
+    ///
+    /// This will panic (in debug mode) if `key` contains internal nulls.
+    #[inline]
+    pub fn start_map(&mut self, key: &str) -> MapBuilder {
+        // Push the key that refers to this nested vector.
+        self.builder.push_key(key);
+        // Nested map.
+        let start = Some(self.builder.values.len());
+        MapBuilder {
+            builder: &mut self.builder,
+            start,
+        }
+    }
+    /// `end_map` sorts the map by key and writes it to the buffer. This happens anyway
+    /// when the map builder is dropped.
+    #[inline]
+    pub fn end_map(self) {}
+}
+impl<'a> Drop for MapBuilder<'a> {
+    #[inline]
+    fn drop(&mut self) {
+        self.builder.end_map_or_vector(true, self.start);
+    }
+}
+
+// Read known keys / strings as iterators over bytes -- skipping utf8 validation and strlen.
+pub(super) fn get_key(buffer: &[u8], address: usize) -> impl Iterator<Item = &u8> {
+    buffer[address..].iter().take_while(|&&b| b != b'\0')
+}
+
+// `values` is assumed to be of the format [key1, value1, ..., keyN, valueN].
+// The keys refer to cstrings in `buffer`. When this function returns,
+// `values` is sorted in place by key.
+pub(super) fn sort_map_by_keys(values: &mut [Value], buffer: &[u8]) {
+    debug_assert_eq!(values.len() % 2, 0);
+    debug_assert!(values.iter().step_by(2).all(Value::is_key));
+    let raw_pairs = values.as_mut_ptr() as *mut [Value; 2];
+    let pairs_len = values.len() / 2;
+    // Unsafe code needed to treat the slice as key-value pairs when sorting in place. This is
+    // preferred over custom sorting or adding another dependency. By construction, this part
+    // of the values stack must be alternating (key, value) pairs. The public API must not be
+    // able to trigger the above debug_assets that protect this unsafe usage.
+    let pairs: &mut [[Value; 2]] =
+        unsafe { std::slice::from_raw_parts_mut(raw_pairs, pairs_len) };
+    #[rustfmt::skip]
+    pairs.sort_unstable_by(|[key1, _], [key2, _]| {
+        if let Value::Key(a1) = *key1 {
+            if let Value::Key(a2) = *key2 {
+                let s1 = get_key(buffer, a1);
+                let s2 = get_key(buffer, a2);
+                let ord = s1.cmp(s2);
+                if ord == std::cmp::Ordering::Equal {
+                    let dup: String = get_key(buffer, a1).map(|&b| b as char).collect();
+                    panic!("Duplicated key in map {:?}", dup);
+                }
+                return ord;
+            }
+        }
+        unreachable!();
+    });
+}
diff --git a/rust/flexbuffers/src/builder/mod.rs b/rust/flexbuffers/src/builder/mod.rs
new file mode 100644
index 0000000..e71acd0
--- /dev/null
+++ b/rust/flexbuffers/src/builder/mod.rs
@@ -0,0 +1,404 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::bitwidth::{align, BitWidth};
+mod value;
+use crate::FlexBufferType;
+use std::cmp::max;
+use value::{find_vector_type, store_value, Value};
+mod map;
+mod push;
+mod ser;
+mod vector;
+use map::sort_map_by_keys;
+pub use map::MapBuilder;
+pub use push::Pushable;
+pub use ser::{Error, FlexbufferSerializer};
+pub use vector::VectorBuilder;
+
+macro_rules! push_slice {
+    ($push_name: ident, $scalar: ty, $Val: ident, $new_vec: ident) => {
+        fn $push_name<T, S>(&mut self, xs: S)
+        where
+            T: Into<$scalar> + Copy,
+            S: AsRef<[T]>
+        {
+            let mut value = Value::$new_vec(xs.as_ref().len());
+            let mut width = xs.as_ref()
+                .iter()
+                .map(|x| BitWidth::from((*x).into()))
+                .max()
+                .unwrap_or_default();
+            if !value.is_fixed_length_vector() {
+                let length = Value::UInt(xs.as_ref().len() as u64);
+                width = std::cmp::max(width, length.width_or_child_width());
+                align(&mut self.buffer, width);
+                store_value(&mut self.buffer, length, width);
+            } else {
+                align(&mut self.buffer, width);
+            }
+            let address = self.buffer.len();
+            for &x in xs.as_ref().iter() {
+                store_value(&mut self.buffer, Value::$Val(x.into()), width);
+            }
+            value.set_address_or_panic(address);
+            value.set_child_width_or_panic(width);
+            self.values.push(value);
+        }
+    }
+}
+macro_rules! push_indirect {
+    ($push_name: ident, $scalar: ty, $Direct: ident, $Indirect: ident) => {
+        fn $push_name<T: Into<$scalar>>(&mut self, x: T) {
+            let x = Value::$Direct(x.into());
+            let child_width = x.width_or_child_width();
+            let address = self.buffer.len();
+            store_value(&mut self.buffer, x, child_width);
+            self.values.push(
+                Value::Reference {
+                    address,
+                    child_width,
+                    fxb_type: FlexBufferType::$Indirect,
+                }
+            );
+        }
+    }
+}
+
+bitflags! {
+    /// Options for sharing data within a flexbuffer.
+    ///
+    /// These increase serialization time but decrease the size of the resulting buffer. By
+    /// default, `SHARE_KEYS`. You may wish to turn on `SHARE_STRINGS` if you know your data has
+    /// many duplicate strings or `SHARE_KEY_VECTORS` if your data has many maps with identical
+    /// keys.
+    ///
+    /// ## Not Yet Implemented
+    /// - `SHARE_STRINGS`
+    /// - `SHARE_KEY_VECTORS`
+    pub struct BuilderOptions: u8 {
+        const SHARE_NONE = 0;
+        const SHARE_KEYS = 1;
+        const SHARE_STRINGS = 2;
+        const SHARE_KEYS_AND_STRINGS = 3;
+        const SHARE_KEY_VECTORS = 4;
+        const SHARE_ALL = 7;
+    }
+}
+impl Default for BuilderOptions {
+    fn default() -> Self {
+        Self::SHARE_KEYS
+    }
+}
+
+#[derive(Debug, Clone, Copy)]
+// Address of a Key inside of the buffer.
+struct CachedKey(usize);
+
+/// **Use this struct to build a Flexbuffer.**
+///
+/// Flexbuffers may only have a single root value, which may be constructed
+/// with  one of the following functions.
+/// * `build_singleton` will push 1 value to the buffer and serialize it as the root.
+/// * `start_vector` returns a `VectorBuilder`, into which many (potentially
+/// heterogenous) values can be pushed. The vector itself is the root and is serialized
+/// when the `VectorBuilder` is dropped (or `end` is called).
+/// * `start_map` returns a `MapBuilder`, which is similar to a `VectorBuilder` except
+/// every value must be pushed with an associated key. The map is serialized when the
+/// `MapBuilder` is dropped (or `end` is called).
+///
+/// These functions reset and overwrite the Builder which means, while there are no
+/// active `MapBuilder` or `VectorBuilder`, the internal buffer is empty or contains a
+/// finished Flexbuffer. The internal buffer is accessed with `view`.
+#[derive(Debug, Clone)]
+pub struct Builder {
+    buffer: Vec<u8>,
+    values: Vec<Value>,
+    key_pool: Option<Vec<CachedKey>>,
+}
+impl Default for Builder {
+    fn default() -> Self {
+        let opts = Default::default();
+        Builder::new(opts)
+    }
+}
+
+impl<'a> Builder {
+    pub fn new(opts: BuilderOptions) -> Self {
+        let key_pool = if opts.contains(BuilderOptions::SHARE_KEYS) {
+            Some(vec![])
+        } else {
+            None
+        };
+        Builder {
+            key_pool,
+            values: Vec::new(),
+            buffer: Vec::new(),
+        }
+    }
+    /// Shows the internal flexbuffer. It will either be empty or populated with the most
+    /// recently built flexbuffer.
+    pub fn view(&self) -> &[u8] {
+        &self.buffer
+    }
+    /// Returns the internal buffer, replacing it with a new vector. The returned buffer will
+    /// either be empty or populated with the most recently built flexbuffer.
+    pub fn take_buffer(&mut self) -> Vec<u8> {
+        let mut b = Vec::new();
+        std::mem::swap(&mut self.buffer, &mut b);
+        b
+    }
+    /// Resets the internal state. Automatically called before building a new flexbuffer.
+    pub fn reset(&mut self) {
+        self.buffer.clear();
+        self.values.clear();
+        if let Some(pool) = self.key_pool.as_mut() {
+            pool.clear();
+        }
+    }
+    fn push_key(&mut self, key: &str) {
+        debug_assert!(
+            key.bytes().all(|b| b != b'\0'),
+            "Keys must not have internal nulls."
+        );
+        // Search key pool if there is one.
+        let found = self.key_pool.as_ref().map(|pool| {
+            pool.binary_search_by(|&CachedKey(addr)| {
+                let old_key = map::get_key(&self.buffer, addr);
+                old_key.cloned().cmp(key.bytes())
+            })
+        });
+        let address = if let Some(Ok(idx)) = found {
+            // Found key in key pool.
+            self.key_pool.as_ref().unwrap()[idx].0
+        } else {
+            // Key not in pool (or no pool).
+            let address = self.buffer.len();
+            self.buffer.extend_from_slice(key.as_bytes());
+            self.buffer.push(b'\0');
+            address
+        };
+        if let Some(Err(idx)) = found {
+            // Insert into key pool.
+            let pool = self.key_pool.as_mut().unwrap();
+            pool.insert(idx, CachedKey(address));
+        }
+        self.values.push(Value::Key(address));
+    }
+    fn push_uint<T: Into<u64>>(&mut self, x: T) {
+        self.values.push(Value::UInt(x.into()));
+    }
+    fn push_int<T: Into<i64>>(&mut self, x: T) {
+        self.values.push(Value::Int(x.into()));
+    }
+    fn push_float<T: Into<f64>>(&mut self, x: T) {
+        self.values.push(Value::Float(x.into()));
+    }
+    fn push_null(&mut self) {
+        self.values.push(Value::Null);
+    }
+    fn push_bool(&mut self, x: bool) {
+        self.values.push(Value::Bool(x));
+    }
+    fn store_blob(&mut self, xs: &[u8]) -> Value {
+        let length = Value::UInt(xs.len() as u64);
+        let width = length.width_or_child_width();
+        align(&mut self.buffer, width);
+        store_value(&mut self.buffer, length, width);
+        let address = self.buffer.len();
+        self.buffer.extend_from_slice(xs);
+        Value::Reference {
+            fxb_type: FlexBufferType::Blob,
+            address,
+            child_width: width,
+        }
+    }
+    fn push_str(&mut self, x: &str) {
+        let mut string = self.store_blob(x.as_bytes());
+        self.buffer.push(b'\0');
+        string.set_fxb_type_or_panic(FlexBufferType::String);
+        self.values.push(string);
+    }
+    fn push_blob(&mut self, x: &[u8]) {
+        let blob = self.store_blob(x);
+        self.values.push(blob);
+    }
+    fn push_bools(&mut self, xs: &[bool]) {
+        let length = Value::UInt(xs.len() as u64);
+        let width = length.width_or_child_width();
+        align(&mut self.buffer, width);
+        store_value(&mut self.buffer, length, width);
+        let address = self.buffer.len();
+        for &b in xs.iter() {
+            self.buffer.push(b as u8);
+            for _ in 0..width as u8 {
+                self.buffer.push(0); // Well this seems wasteful.
+            }
+        }
+        self.values.push(Value::Reference {
+            fxb_type: FlexBufferType::VectorBool,
+            address,
+            child_width: width,
+        });
+    }
+
+    push_slice!(push_uints, u64, UInt, new_uint_vector);
+    push_slice!(push_ints, i64, Int, new_int_vector);
+    push_slice!(push_floats, f64, Float, new_float_vector);
+    push_indirect!(push_indirect_int, i64, Int, IndirectInt);
+    push_indirect!(push_indirect_uint, u64, UInt, IndirectUInt);
+    push_indirect!(push_indirect_float, f64, Float, IndirectFloat);
+
+    /// Resets the builder and starts a new flexbuffer with a vector at the root.
+    /// The exact Flexbuffer vector type is dynamically inferred.
+    pub fn start_vector(&'a mut self) -> VectorBuilder<'a> {
+        self.reset();
+        VectorBuilder {
+            builder: self,
+            start: None,
+        }
+    }
+    /// Resets the builder and builds a new flexbuffer with a map at the root.
+    pub fn start_map(&'a mut self) -> MapBuilder<'a> {
+        self.reset();
+        MapBuilder {
+            builder: self,
+            start: None,
+        }
+    }
+    /// Resets the builder and builds a new flexbuffer with the pushed value at the root.
+    pub fn build_singleton<P: Pushable>(&mut self, p: P) {
+        self.reset();
+        p.push_to_builder(self);
+        let root = self.values.pop().unwrap();
+        store_root(&mut self.buffer, root);
+    }
+    fn push<P: Pushable>(&mut self, p: P) {
+        p.push_to_builder(self);
+    }
+    /// Stores the values past `previous_end` as a map or vector depending on `is_map`.
+    /// If `previous_end` is None then this was a root map / vector and the last value
+    /// is stored as the root.
+    fn end_map_or_vector(&mut self, is_map: bool, previous_end: Option<usize>) {
+        let split = previous_end.unwrap_or(0);
+        let value = if is_map {
+            let key_vals = &mut self.values[split..];
+            sort_map_by_keys(key_vals, &self.buffer);
+            let key_vector = store_vector(&mut self.buffer, key_vals, StoreOption::MapKeys);
+            store_vector(&mut self.buffer, key_vals, StoreOption::Map(key_vector))
+        } else {
+            store_vector(&mut self.buffer, &self.values[split..], StoreOption::Vector)
+        };
+        self.values.truncate(split);
+        if previous_end.is_some() {
+            self.values.push(value);
+        } else {
+            store_root(&mut self.buffer, value);
+        }
+    }
+}
+
+/// Builds a Flexbuffer with the single pushed value as the root.
+pub fn singleton<P: Pushable>(p: P) -> Vec<u8> {
+    let mut b = Builder::default();
+    b.build_singleton(p);
+    let Builder { buffer, .. } = b;
+    buffer
+}
+
+/// Stores the root value, root type and root width.
+/// This should be called to finish the Flexbuffer.
+fn store_root(buffer: &mut Vec<u8>, root: Value) {
+    let root_width = root.width_in_vector(buffer.len(), 0);
+    align(buffer, root_width);
+    store_value(buffer, root, root_width);
+    buffer.push(root.packed_type(root_width));
+    buffer.push(root_width.n_bytes() as u8);
+}
+
+pub enum StoreOption {
+    Vector,
+    Map(Value),
+    MapKeys,
+}
+/// Writes a Flexbuffer Vector or Map.
+/// StoreOption::Map(Keys) must be a Value::Key or this will panic.
+// #[inline(always)]
+pub fn store_vector(buffer: &mut Vec<u8>, values: &[Value], opt: StoreOption) -> Value {
+    let (skip, stride) = match opt {
+        StoreOption::Vector => (0, 1),
+        StoreOption::MapKeys => (0, 2),
+        StoreOption::Map(_) => (1, 2),
+    };
+    let iter_values = || values.iter().skip(skip).step_by(stride);
+
+    // Figure out vector type and how long is the prefix.
+    let mut result = if let StoreOption::Map(_) = opt {
+        Value::new_map()
+    } else {
+        find_vector_type(iter_values())
+    };
+    let length_slot = if !result.is_fixed_length_vector() {
+        let length = iter_values().count();
+        Some(Value::UInt(length as u64))
+    } else {
+        None
+    };
+    // Measure required width and align to it.
+    let mut width = BitWidth::W8;
+    if let StoreOption::Map(keys) = opt {
+        width = max(width, keys.width_in_vector(buffer.len(), 0))
+    }
+    if let Some(l) = length_slot {
+        width = max(width, l.width_or_child_width());
+    }
+    let prefix_length = result.prefix_length();
+    for (i, &val) in iter_values().enumerate() {
+        width = max(width, val.width_in_vector(buffer.len(), i + prefix_length));
+    }
+    align(buffer, width);
+    #[allow(deprecated)]
+    {
+        debug_assert_ne!(
+            result.fxb_type(),
+            FlexBufferType::VectorString,
+            "VectorString is deprecated and cannot be written.\
+             (https://github.com/google/flatbuffers/issues/5627)"
+        );
+    }
+    // Write Prefix.
+    if let StoreOption::Map(keys) = opt {
+        let key_width = Value::UInt(keys.width_or_child_width().n_bytes() as u64);
+        store_value(buffer, keys, width);
+        store_value(buffer, key_width, width);
+    }
+    if let Some(len) = length_slot {
+        store_value(buffer, len, width);
+    }
+    // Write data.
+    let address = buffer.len();
+    for &v in iter_values() {
+        store_value(buffer, v, width);
+    }
+    // Write types
+    if result.is_typed_vector_or_map() {
+        for v in iter_values() {
+            buffer.push(v.packed_type(width));
+        }
+    }
+    // Return Value representing this Vector.
+    result.set_address_or_panic(address);
+    result.set_child_width_or_panic(width);
+    result
+}
diff --git a/rust/flexbuffers/src/builder/push.rs b/rust/flexbuffers/src/builder/push.rs
new file mode 100644
index 0000000..d22b47f
--- /dev/null
+++ b/rust/flexbuffers/src/builder/push.rs
@@ -0,0 +1,167 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::builder::Builder;
+use crate::private::Sealed;
+use crate::{Blob, IndirectFloat, IndirectInt, IndirectUInt};
+
+impl<'a> Sealed for Blob<'a> {}
+impl Sealed for () {}
+
+// TODO: String interning
+// TODO: Pushable for Map types?
+
+/// Types that implement the Pushable trait can be written into a Flexbuffer.
+///
+/// All Rust's standard numbers, `u8, u16, u32, u64, i8, i16, i32, i64, f32, f64`,
+/// can all be pushed. They are  `FlexBufferType::{UInt, Int, Float}`.
+/// Flexbuffers chooses the smallest width that can represent the given number.
+/// Strings can pe pushed, they become `FlexBufferType::String` and are stored
+/// with both a length and null terminator.
+///
+/// * For convenience and speed push typed vectors using rust arrays and slices.
+/// Doing so will immediately serialize the data, skipping the `Builder`'s
+/// internal cache.
+///
+/// * Pushable cannot not be implemented by any downstream crates.
+pub trait Pushable: Sealed + Sized {
+    fn push_to_builder(self, _: &mut Builder) {}
+}
+
+impl Pushable for () {
+    fn push_to_builder(self, builder: &mut Builder) {
+        builder.push_null();
+    }
+}
+impl<'a> Pushable for Blob<'a> {
+    fn push_to_builder(self, builder: &mut Builder) {
+        builder.push_blob(self.0);
+    }
+}
+
+macro_rules! forward_to_builder {
+    ($T: ty, $method: ident) => {
+        impl Sealed for $T {}
+        impl Pushable for $T {
+            fn push_to_builder(self, builder: &mut Builder) {
+                builder.$method(self);
+            }
+        }
+    };
+    ($T: ty, $method: ident, $asT: ty) => {
+        impl Sealed for $T {}
+        impl Pushable for $T {
+            fn push_to_builder(self, builder: &mut Builder) {
+                builder.$method(self as $asT);
+            }
+        }
+    };
+}
+forward_to_builder!(&str, push_str);
+forward_to_builder!(bool, push_bool);
+forward_to_builder!(u8, push_uint);
+forward_to_builder!(u16, push_uint);
+forward_to_builder!(u32, push_uint);
+forward_to_builder!(u64, push_uint);
+forward_to_builder!(i8, push_int);
+forward_to_builder!(i16, push_int);
+forward_to_builder!(i32, push_int);
+forward_to_builder!(i64, push_int);
+forward_to_builder!(f32, push_float);
+forward_to_builder!(f64, push_float);
+forward_to_builder!(&[u8], push_uints);
+forward_to_builder!(&[u16], push_uints);
+forward_to_builder!(&[u32], push_uints);
+forward_to_builder!(&[u64], push_uints);
+forward_to_builder!(&[i8], push_ints);
+forward_to_builder!(&[i16], push_ints);
+forward_to_builder!(&[i32], push_ints);
+forward_to_builder!(&[i64], push_ints);
+forward_to_builder!(&[f32], push_floats);
+forward_to_builder!(&[f64], push_floats);
+forward_to_builder!(&[bool], push_bools);
+forward_to_builder!(&Vec<u8>, push_uints);
+forward_to_builder!(&Vec<u16>, push_uints);
+forward_to_builder!(&Vec<u32>, push_uints);
+forward_to_builder!(&Vec<u64>, push_uints);
+forward_to_builder!(&Vec<i8>, push_ints);
+forward_to_builder!(&Vec<i16>, push_ints);
+forward_to_builder!(&Vec<i32>, push_ints);
+forward_to_builder!(&Vec<i64>, push_ints);
+forward_to_builder!(&Vec<f32>, push_floats);
+forward_to_builder!(&Vec<f64>, push_floats);
+forward_to_builder!(&Vec<bool>, push_bools);
+
+macro_rules! impl_indirects {
+    ($Indirect: ident, $method: ident) => {
+        impl Sealed for $Indirect {}
+        impl Pushable for $Indirect {
+            fn push_to_builder(self, builder: &mut Builder) {
+                builder.$method(self.0);
+            }
+        }
+    };
+}
+impl_indirects!(IndirectInt, push_indirect_int);
+impl_indirects!(IndirectUInt, push_indirect_uint);
+impl_indirects!(IndirectFloat, push_indirect_float);
+
+macro_rules! impl_arrays {
+    ($num: expr) => {
+        forward_to_builder!(&[u8; $num], push_uints, &[u8]);
+        forward_to_builder!(&[u16; $num], push_uints, &[u16]);
+        forward_to_builder!(&[u32; $num], push_uints, &[u32]);
+        forward_to_builder!(&[u64; $num], push_uints, &[u64]);
+        forward_to_builder!(&[i8; $num], push_ints, &[i8]);
+        forward_to_builder!(&[i16; $num], push_ints, &[i16]);
+        forward_to_builder!(&[i32; $num], push_ints, &[i32]);
+        forward_to_builder!(&[i64; $num], push_ints, &[i64]);
+        forward_to_builder!(&[f32; $num], push_floats, &[f32]);
+        forward_to_builder!(&[f64; $num], push_floats, &[f64]);
+        forward_to_builder!(&[bool; $num], push_bools, &[bool]);
+    };
+}
+impl_arrays!(0);
+impl_arrays!(1);
+impl_arrays!(2);
+impl_arrays!(3);
+impl_arrays!(4);
+impl_arrays!(5);
+impl_arrays!(6);
+// impl_arrays!(7);
+// impl_arrays!(8);
+// impl_arrays!(9);
+// impl_arrays!(10);
+// impl_arrays!(11);
+// impl_arrays!(12);
+// impl_arrays!(13);
+// impl_arrays!(14);
+// impl_arrays!(15);
+// impl_arrays!(16);
+// impl_arrays!(17);
+// impl_arrays!(18);
+// impl_arrays!(19);
+// impl_arrays!(20);
+// impl_arrays!(21);
+// impl_arrays!(22);
+// impl_arrays!(23);
+// impl_arrays!(24);
+// impl_arrays!(25);
+// impl_arrays!(26);
+// impl_arrays!(27);
+// impl_arrays!(28);
+// impl_arrays!(29);
+// impl_arrays!(30);
+// impl_arrays!(31);
+// impl_arrays!(32);
diff --git a/rust/flexbuffers/src/builder/ser.rs b/rust/flexbuffers/src/builder/ser.rs
new file mode 100644
index 0000000..8e483ba
--- /dev/null
+++ b/rust/flexbuffers/src/builder/ser.rs
@@ -0,0 +1,533 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::Builder;
+use serde::ser;
+use serde::ser::*;
+use std::fmt::Display;
+
+// This struct internally tracks the nested vectors representing
+// nested structs and such.
+// TODO: Add an option field names in a map.
+/// Flexbuffer Serializer. This should be used to serialize structs.
+#[derive(Debug, Default)]
+pub struct FlexbufferSerializer {
+    builder: Builder,
+    nesting: Vec<Option<usize>>,
+}
+impl FlexbufferSerializer {
+    pub fn new() -> Self {
+        Self::default()
+    }
+    pub fn view(&self) -> &[u8] {
+        self.builder.view()
+    }
+    pub fn take_buffer(&mut self) -> Vec<u8> {
+        self.builder.take_buffer()
+    }
+    fn finish_if_not_nested(&mut self) -> Result<(), Error> {
+        if self.nesting.is_empty() {
+            assert_eq!(self.builder.values.len(), 1);
+            let root = self.builder.values.pop().unwrap();
+            super::store_root(&mut self.builder.buffer, root);
+        }
+        Ok(())
+    }
+    fn start_vector(&mut self) {
+        let previous_end = if self.nesting.is_empty() {
+            None
+        } else {
+            Some(self.builder.values.len())
+        };
+        self.nesting.push(previous_end);
+    }
+    fn start_map(&mut self) {
+        let previous_end = if self.nesting.is_empty() {
+            None
+        } else {
+            Some(self.builder.values.len())
+        };
+        self.nesting.push(previous_end);
+    }
+    fn end_vector(&mut self) -> Result<(), Error> {
+        let previous_end = self.nesting.pop().unwrap();
+        self.builder.end_map_or_vector(false, previous_end);
+        Ok(())
+    }
+    fn end_map(&mut self) -> Result<(), Error> {
+        let previous_end = self.nesting.pop().unwrap();
+        self.builder.end_map_or_vector(true, previous_end);
+        Ok(())
+    }
+}
+
+#[derive(Debug)]
+/// Errors that may happen with Serde.
+pub enum Error {
+    /// Only `str` and `String` can be serialized as keys in serde maps.
+    KeyMustBeString,
+    Serde(String),
+}
+
+impl std::fmt::Display for Error {
+    fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+        write!(f, "{:?}", self)
+    }
+}
+impl std::error::Error for Error {}
+impl ser::Error for Error {
+    fn custom<T>(msg: T) -> Self
+    where
+        T: Display,
+    {
+        Self::Serde(format!("{}", msg))
+    }
+}
+impl<'a> ser::SerializeSeq for &mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_vector()
+    }
+}
+// This is unlike a flexbuffers map which requires CString like keys.
+// Its implemented as alternating keys and values (hopefully).
+impl<'a> ser::SerializeMap for &'a mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_key<T: ?Sized>(&mut self, key: &T) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        key.serialize(MapKeySerializer(&mut **self))
+    }
+    fn serialize_value<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_map()
+    }
+}
+impl<'a> ser::SerializeTuple for &mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_element<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_vector()
+    }
+}
+impl<'a> ser::SerializeTupleStruct for &mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_vector()
+    }
+}
+impl<'a> ser::SerializeStruct for &mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_field<T: ?Sized>(
+        &mut self,
+        key: &'static str,
+        value: &T,
+    ) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        self.builder.push_key(key);
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_map()
+    }
+}
+impl<'a> ser::SerializeTupleVariant for &mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_field<T: ?Sized>(&mut self, value: &T) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_vector()?;
+        self.end_map()
+    }
+}
+impl<'a> ser::SerializeStructVariant for &mut FlexbufferSerializer {
+    type Ok = ();
+    type Error = Error;
+    fn serialize_field<T: ?Sized>(
+        &mut self,
+        key: &'static str,
+        value: &T,
+    ) -> Result<(), Self::Error>
+    where
+        T: Serialize,
+    {
+        self.builder.push_key(key);
+        value.serialize(&mut **self)
+    }
+    fn end(self) -> Result<Self::Ok, Self::Error> {
+        self.end_map()?;
+        self.end_map()
+    }
+    // TODO: skip field?
+}
+
+impl<'a> ser::Serializer for &'a mut FlexbufferSerializer {
+    type SerializeSeq = &'a mut FlexbufferSerializer;
+    type SerializeTuple = &'a mut FlexbufferSerializer;
+    type SerializeTupleStruct = &'a mut FlexbufferSerializer;
+    type SerializeTupleVariant = &'a mut FlexbufferSerializer;
+    type SerializeMap = &'a mut FlexbufferSerializer;
+    type SerializeStruct = &'a mut FlexbufferSerializer;
+    type SerializeStructVariant = &'a mut FlexbufferSerializer;
+    type Ok = ();
+    type Error = Error;
+    fn is_human_readable(&self) -> bool {
+        cfg!(serialize_human_readable)
+    }
+    fn serialize_bool(self, v: bool) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_i8(self, v: i8) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_i16(self, v: i16) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_i32(self, v: i32) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_i64(self, v: i64) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_f32(self, v: f32) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_f64(self, v: f64) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_char(self, v: char) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v as u8);
+        self.finish_if_not_nested()
+    }
+    fn serialize_str(self, v: &str) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(v);
+        self.finish_if_not_nested()
+    }
+    fn serialize_bytes(self, v: &[u8]) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(crate::Blob(v));
+        self.finish_if_not_nested()
+    }
+    fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(());
+        self.finish_if_not_nested()
+    }
+    fn serialize_some<T: ?Sized>(self, t: &T) -> Result<Self::Ok, Self::Error>
+    where
+        T: Serialize,
+    {
+        t.serialize(self)
+    }
+    fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(());
+        self.finish_if_not_nested()
+    }
+    fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(());
+        self.finish_if_not_nested()
+    }
+    fn serialize_unit_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+    ) -> Result<Self::Ok, Self::Error> {
+        self.builder.push(variant);
+        self.finish_if_not_nested()
+    }
+    fn serialize_newtype_struct<T: ?Sized>(
+        self,
+        _name: &'static str,
+        value: &T,
+    ) -> Result<Self::Ok, Self::Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(self)
+    }
+    fn serialize_newtype_variant<T: ?Sized>(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+        value: &T,
+    ) -> Result<Self::Ok, Self::Error>
+    where
+        T: Serialize,
+    {
+        self.start_map();
+        self.builder.push_key(variant);
+        value.serialize(&mut *self)?;
+        self.end_map()
+    }
+    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
+        self.start_vector();
+        Ok(self)
+    }
+    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
+        self.start_vector();
+        Ok(self)
+    }
+    fn serialize_tuple_struct(
+        self,
+        _name: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeTupleStruct, Self::Error> {
+        self.start_map();
+        Ok(self)
+    }
+    fn serialize_tuple_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeTupleVariant, Self::Error> {
+        self.start_map();
+        self.builder.push_key(variant);
+        self.start_vector();
+        Ok(self)
+    }
+    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
+        self.start_map();
+        Ok(self)
+    }
+    fn serialize_struct(
+        self,
+        _name: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeStruct, Self::Error> {
+        self.start_map();
+        Ok(self)
+    }
+    fn serialize_struct_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeStructVariant, Self::Error> {
+        self.start_map();
+        self.builder.push_key(variant);
+        self.start_map();
+        Ok(self)
+    }
+}
+
+fn key_must_be_a_string<T>() -> Result<T, Error> {
+    Err(Error::KeyMustBeString)
+}
+struct MapKeySerializer<'a>(&'a mut FlexbufferSerializer);
+impl<'a> Serializer for MapKeySerializer<'a> {
+    type Ok = ();
+    type Error = Error;
+    #[inline]
+    fn serialize_str(self, value: &str) -> Result<(), Error> {
+        self.0.builder.push_key(value);
+        Ok(())
+    }
+    #[inline]
+    fn serialize_unit_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        variant: &'static str,
+    ) -> Result<(), Error> {
+        self.0.builder.push_key(variant);
+        Ok(())
+    }
+    #[inline]
+    fn serialize_newtype_struct<T: ?Sized>(
+        self,
+        _name: &'static str,
+        value: &T,
+    ) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        value.serialize(self)
+    }
+    type SerializeSeq = Impossible<(), Error>;
+    type SerializeTuple = Impossible<(), Error>;
+    type SerializeTupleStruct = Impossible<(), Error>;
+    type SerializeTupleVariant = Impossible<(), Error>;
+    type SerializeMap = Impossible<(), Error>;
+    type SerializeStruct = Impossible<(), Error>;
+    type SerializeStructVariant = Impossible<(), Error>;
+
+    fn serialize_bool(self, _value: bool) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_i8(self, _value: i8) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_i16(self, _value: i16) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_i32(self, _value: i32) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_i64(self, _value: i64) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_u8(self, _value: u8) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_u16(self, _value: u16) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_u32(self, _value: u32) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_u64(self, _value: u64) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_f32(self, _value: f32) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_f64(self, _value: f64) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_char(self, _value: char) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_bytes(self, _value: &[u8]) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_unit(self) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_newtype_variant<T: ?Sized>(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        _variant: &'static str,
+        _value: &T,
+    ) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        key_must_be_a_string()
+    }
+    fn serialize_none(self) -> Result<(), Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_some<T: ?Sized>(self, _value: &T) -> Result<(), Error>
+    where
+        T: Serialize,
+    {
+        key_must_be_a_string()
+    }
+    fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_tuple_struct(
+        self,
+        _name: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeTupleStruct, Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_tuple_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        _variant: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeTupleVariant, Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_struct(
+        self,
+        _name: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeStruct, Error> {
+        key_must_be_a_string()
+    }
+    fn serialize_struct_variant(
+        self,
+        _name: &'static str,
+        _variant_index: u32,
+        _variant: &'static str,
+        _len: usize,
+    ) -> Result<Self::SerializeStructVariant, Error> {
+        key_must_be_a_string()
+    }
+}
diff --git a/rust/flexbuffers/src/builder/value.rs b/rust/flexbuffers/src/builder/value.rs
new file mode 100644
index 0000000..f230c34
--- /dev/null
+++ b/rust/flexbuffers/src/builder/value.rs
@@ -0,0 +1,306 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use byteorder::{LittleEndian, WriteBytesExt};
+
+use crate::bitwidth::BitWidth;
+use crate::bitwidth::BitWidth::*;
+use crate::flexbuffer_type::FlexBufferType;
+use crate::flexbuffer_type::FlexBufferType::*;
+
+/// Internal representation of FlexBuffer Types and Data before writing.
+/// These get placed on the builder's stack and are eventually commited.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum Value {
+    // Inline types
+    Null,
+    Int(i64),
+    UInt(u64),
+    Float(f64),
+    Bool(bool),
+    /// Null termintated, c_string. Only used with `Map`s.
+    Key(usize),
+    /// The other ~20 or so types.
+    Reference {
+        address: usize,
+        child_width: BitWidth,
+        fxb_type: FlexBufferType,
+    },
+}
+
+macro_rules! new_typed_vector {
+    ($name: ident, $v2: ident, $v3: ident, $v4: ident, $vn: ident) => {
+        /// Returns a typed vector, fixed length if possible.
+        /// Address and child width are zero initialized and must be set.
+        pub fn $name(n: usize) -> Value {
+            let address = 0;
+            let child_width = W8;
+            match n {
+                2 => Value::Reference {
+                    address,
+                    child_width,
+                    fxb_type: $v2,
+                },
+                3 => Value::Reference {
+                    address,
+                    child_width,
+                    fxb_type: $v3,
+                },
+                4 => Value::Reference {
+                    address,
+                    child_width,
+                    fxb_type: $v4,
+                },
+                _ => Value::Reference {
+                    address,
+                    child_width,
+                    fxb_type: $vn,
+                },
+            }
+        }
+    };
+}
+
+impl Value {
+    pub fn new_vector() -> Self {
+        Value::Reference {
+            address: 0,
+            child_width: W8,
+            fxb_type: Vector,
+        }
+    }
+    pub fn new_map() -> Self {
+        Value::Reference {
+            address: 0,
+            child_width: W8,
+            fxb_type: Map,
+        }
+    }
+    new_typed_vector!(
+        new_int_vector,
+        VectorInt2,
+        VectorInt3,
+        VectorInt4,
+        VectorInt
+    );
+    new_typed_vector!(
+        new_uint_vector,
+        VectorUInt2,
+        VectorUInt3,
+        VectorUInt4,
+        VectorUInt
+    );
+    new_typed_vector!(
+        new_float_vector,
+        VectorFloat2,
+        VectorFloat3,
+        VectorFloat4,
+        VectorFloat
+    );
+    pub fn fxb_type(&self) -> FlexBufferType {
+        match *self {
+            Value::Null => Null,
+            Value::Int(_) => Int,
+            Value::UInt(_) => UInt,
+            Value::Float(_) => Float,
+            Value::Bool(_) => Bool,
+            Value::Key(_) => Key,
+            Value::Reference { fxb_type, .. } => fxb_type,
+        }
+    }
+    pub fn is_fixed_length_vector(&self) -> bool {
+        self.fxb_type().is_fixed_length_vector()
+    }
+    pub fn is_inline(&self) -> bool {
+        self.fxb_type().is_inline()
+    }
+    pub fn is_reference(&self) -> bool {
+        !self.is_inline()
+    }
+    pub fn is_key(&self) -> bool {
+        match self {
+            Value::Key(_) => true,
+            _ => false,
+        }
+    }
+    pub fn is_typed_vector_or_map(&self) -> bool {
+        if let Value::Reference { fxb_type, .. } = self {
+            fxb_type.is_heterogenous()
+        } else {
+            false
+        }
+    }
+    pub fn prefix_length(&self) -> usize {
+        if self.is_fixed_length_vector() || self.is_inline() {
+            return 0;
+        }
+        if let Value::Reference { fxb_type, .. } = self {
+            if *fxb_type == Map {
+                return 3;
+            }
+        }
+        1
+    }
+    pub fn set_fxb_type_or_panic(&mut self, new_type: FlexBufferType) {
+        if let Value::Reference { fxb_type, .. } = self {
+            *fxb_type = new_type;
+        } else {
+            panic!("`set_fxb_type_or_panic` called on {:?}", self)
+        }
+    }
+    pub fn set_child_width_or_panic(&mut self, new_width: BitWidth) {
+        if let Value::Reference { child_width, .. } = self {
+            *child_width = new_width;
+        } else {
+            panic!("`set_child_width_or_panic` called on {:?}", self);
+        }
+    }
+    pub fn get_address(&self) -> Option<usize> {
+        if let Value::Reference { address, .. } | Value::Key(address) = self {
+            Some(*address)
+        } else {
+            None
+        }
+    }
+    pub fn set_address_or_panic(&mut self, new_address: usize) {
+        if let Value::Reference { address, .. } | Value::Key(address) = self {
+            *address = new_address;
+        } else {
+            panic!("`set_address_or_panic` called on {:?}", self);
+        }
+    }
+    /// For inline types - the width of the value to be stored.
+    /// For reference types, the width of the referred.
+    /// Note Key types always refer to 8 bit data.
+    pub fn width_or_child_width(&self) -> BitWidth {
+        match *self {
+            Value::Int(x) => x.into(),
+            Value::UInt(x) => x.into(),
+            Value::Float(x) => x.into(),
+            Value::Key(_) | Value::Bool(_) | Value::Null => W8,
+            Value::Reference { child_width, .. } => child_width,
+        }
+    }
+    pub fn relative_address(self, written_at: usize) -> Option<Value> {
+        self.get_address().map(|address| {
+            let offset = written_at
+                .checked_sub(address)
+                .expect("Error: References may only refer backwards in buffer.");
+            Value::UInt(offset as u64)
+        })
+    }
+    /// Computes the minimum required width of `value` when stored in a vector
+    /// starting at `vector_start` at index `idx` (this index includes the prefix).
+    /// `Value::Reference{..}` variants require location information because
+    /// offsets are relative.
+    pub fn width_in_vector(self, vector_start: usize, idx: usize) -> BitWidth {
+        match self {
+            Value::Bool(_) => W8,
+            Value::Null => W8,
+            Value::Int(x) => x.into(),
+            Value::UInt(x) => x.into(),
+            Value::Float(x) => x.into(),
+            _ => {
+                debug_assert!(self.is_reference());
+                for &width in BitWidth::iter() {
+                    let bytes = width as usize + 1;
+                    let alignment = (bytes - vector_start % bytes) % bytes;
+                    let written_at = vector_start + alignment + idx * bytes;
+                    // This match must always succeed.
+                    if let Some(Value::UInt(offset)) = self.relative_address(written_at) {
+                        if BitWidth::from(offset) == width {
+                            return width;
+                        }
+                    }
+                }
+                unreachable!()
+            }
+        }
+    }
+    pub fn packed_type(self, parent_width: BitWidth) -> u8 {
+        let width = if self.is_inline() {
+            std::cmp::max(parent_width, self.width_or_child_width())
+        } else {
+            self.width_or_child_width()
+        };
+        (self.fxb_type() as u8) << 2 | width as u8
+    }
+}
+
+pub fn find_vector_type<'a, T>(mut values: T) -> Value
+where
+    T: std::iter::Iterator<Item = &'a Value>,
+{
+    let first = values.next();
+    if first.is_none() {
+        return Value::new_vector();
+    }
+    let mut len = 1;
+    let init = first.unwrap().fxb_type();
+    for v in values {
+        if v.fxb_type() != init {
+            return Value::new_vector();
+        }
+        len += 1;
+    }
+    let vector_type = match init {
+        Bool => VectorBool,
+        UInt => return Value::new_uint_vector(len),
+        Int => return Value::new_int_vector(len),
+        Float => return Value::new_float_vector(len),
+        Key => VectorKey,
+        // Note that VectorString is deprecated for writing
+        _ => return Value::new_vector(),
+    };
+    Value::Reference {
+        address: 0,
+        child_width: W8,
+        fxb_type: vector_type,
+    }
+}
+
+#[inline]
+pub fn store_value(buffer: &mut Vec<u8>, mut value: Value, width: BitWidth) {
+    // Remap to number types.
+    use Value::*;
+    if let Some(offset) = value.relative_address(buffer.len()) {
+        value = offset;
+    } else {
+        value = match value {
+            Bool(x) => UInt(x.into()),
+            Null => UInt(0), // Should this be 0 bytes?
+            _ => value,
+        }
+    }
+    let write_result = match (value, width) {
+        (UInt(x), W8) => buffer.write_u8(x as u8),
+        (UInt(x), W16) => buffer.write_u16::<LittleEndian>(x as u16),
+        (UInt(x), W32) => buffer.write_u32::<LittleEndian>(x as u32),
+        (UInt(x), W64) => buffer.write_u64::<LittleEndian>(x),
+        (Int(x), W8) => buffer.write_i8(x as i8),
+        (Int(x), W16) => buffer.write_i16::<LittleEndian>(x as i16),
+        (Int(x), W32) => buffer.write_i32::<LittleEndian>(x as i32),
+        (Int(x), W64) => buffer.write_i64::<LittleEndian>(x),
+        (Float(x), W32) => buffer.write_f32::<LittleEndian>(x as f32),
+        (Float(x), W64) => buffer.write_f64::<LittleEndian>(x),
+        (Float(_), _) => unreachable!("Error: Flatbuffers does not support 8 and 16 bit floats."),
+        _ => unreachable!("Variant not considered: {:?}", value),
+    };
+    write_result.unwrap_or_else(|err| {
+        panic!(
+            "Error writing value {:?} with width {:?}: {:?}",
+            value, width, err
+        )
+    });
+}
diff --git a/rust/flexbuffers/src/builder/vector.rs b/rust/flexbuffers/src/builder/vector.rs
new file mode 100644
index 0000000..4d73da3
--- /dev/null
+++ b/rust/flexbuffers/src/builder/vector.rs
@@ -0,0 +1,65 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{Builder, MapBuilder, Pushable};
+
+/// Builds a Flexbuffer vector, returned by a [Builder](struct.Builder.html).
+///
+/// ## Side effect when dropped:
+/// When this is dropped, or `end_vector` is called, the vector is
+/// commited to the buffer. If this vector is the root of the flexbuffer, then the
+/// root is written and the flexbuffer is complete. The FlexBufferType of this vector
+/// is determined by the pushed values when this is dropped. The most compact vector type is
+/// automatically chosen.
+pub struct VectorBuilder<'a> {
+    pub(crate) builder: &'a mut Builder,
+    // If the root is this vector then start == None. Otherwise start is the
+    // number of values in the 'values stack' before adding this vector.
+    pub(crate) start: Option<usize>,
+}
+impl<'a> VectorBuilder<'a> {
+    /// Pushes `p` onto the vector.
+    #[inline]
+    pub fn push<P: Pushable>(&mut self, p: P) {
+        self.builder.push(p);
+    }
+    /// Starts a nested vector that will be pushed onto this vector when it is dropped.
+    #[inline]
+    pub fn start_vector(&mut self) -> VectorBuilder {
+        let start = Some(self.builder.values.len());
+        VectorBuilder {
+            builder: &mut self.builder,
+            start,
+        }
+    }
+    /// Starts a nested map that will be pushed onto this vector when it is dropped.
+    #[inline]
+    pub fn start_map(&mut self) -> MapBuilder {
+        let start = Some(self.builder.values.len());
+        MapBuilder {
+            builder: &mut self.builder,
+            start,
+        }
+    }
+    /// `end_vector` determines the type of the vector and writes it to the buffer.
+    /// This will happen automatically if the VectorBuilder is dropped.
+    #[inline]
+    pub fn end_vector(self) {}
+}
+impl<'a> Drop for VectorBuilder<'a> {
+    #[inline]
+    fn drop(&mut self) {
+        self.builder.end_map_or_vector(false, self.start);
+    }
+}
diff --git a/rust/flexbuffers/src/flexbuffer_type.rs b/rust/flexbuffers/src/flexbuffer_type.rs
new file mode 100644
index 0000000..5b57de8
--- /dev/null
+++ b/rust/flexbuffers/src/flexbuffer_type.rs
@@ -0,0 +1,240 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#![allow(deprecated)]
+/// Represents all the valid types in a flexbuffer.
+///
+/// Flexbuffers supports
+/// heterogenous maps, heterogenous vectors, typed vectors, and fixed length
+/// typed vectors for some lengths and types. Rust types are converted into
+/// Flexbuffers via the [Pushable](trait.Pushable.html) trait.
+///
+/// For exact details see the [internals document](
+/// https://google.github.io/flatbuffers/flatbuffers_internals.html)
+///
+/// ### Notes:
+/// * In the binary format, Each element of a `Map` or (heterogenous) `Vector`
+/// is stored with a byte describing its FlexBufferType and BitWidth.
+///
+/// * Typed vectors do not store this extra type information and fixed length
+/// typed vectors do not store length. Whether a vector is stored as a typed
+/// vector or fixed length typed vector is determined dymaically from the
+/// given data.
+///
+/// * Indirect numbers are stored as an offset instead of inline. Using
+/// indirect numbers instead of their inline counterparts in maps and typed
+/// vectors can reduce the minimum element width and therefore bytes used.
+
+#[repr(u8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, num_enum::TryFromPrimitive)]
+pub enum FlexBufferType {
+    /// Nulls are represented with `()` in Rust.
+    Null = 0,
+    /// Variable width signed integer: `i8, i16, i32, i64`
+    Int = 1,
+    /// Variable width unsigned integer: `u8, u16, u32, u64`
+    UInt = 2,
+    /// Variable width floating point: `f32, f64`
+    Float = 3,
+    Bool = 26,
+    /// Null termintated, utf8 string. Typically used with `Map`s.
+    Key = 4,
+    /// Stored with a unsigned integer length, then UTF-8 bytes, and an extra null terminator that
+    /// is not counted with the length.
+    String = 5,
+    /// An Int, stored by offset rather than inline. Indirect types can keep the bitwidth of a
+    /// vector or map small when the inline value would have increased the bitwidth.
+    IndirectInt = 6,
+    /// A UInt, stored by offset rather than inline. Indirect types can keep the bitwidth of a
+    /// vector or map small when the inline value would have increased the bitwidth.
+    IndirectUInt = 7,
+    /// A Float, stored by offset rather than inline. Indirect types can keep the bitwidth of a
+    /// vector or map small when the inline value would have increased the bitwidth.
+    IndirectFloat = 8,
+    /// Maps are like Vectors except elements are associated with, and sorted by, keys.
+    Map = 9,
+    /// Heterogenous Vector (stored with a type table).
+    Vector = 10,
+    /// Homogenous Vector of Ints.
+    VectorInt = 11,
+    /// Homogenous Vector of UInts.
+    VectorUInt = 12,
+    /// Homogenous Vector of Floats.
+    VectorFloat = 13,
+    /// Homogenous Vector of Keys.
+    VectorKey = 14,
+    /// Homogenous Vector of Strings.
+    #[deprecated(
+        note = "Please use Vector or VectorKey instead. See https://github.com/google/flatbuffers/issues/5627"
+    )]
+    VectorString = 15,
+    /// Since the elements of a vector use the same `BitWidth` as the length,
+    /// Blob is more efficient for >255 element boolean vectors.
+    VectorBool = 36,
+    /// Homogenous vector of two Ints
+    VectorInt2 = 16,
+    /// Homogenous vector of two UInts
+    VectorUInt2 = 17,
+    /// Homogenous vector of two Floats
+    VectorFloat2 = 18,
+    /// Homogenous vector of three Ints
+    VectorInt3 = 19,
+    /// Homogenous vector of three UInts
+    VectorUInt3 = 20,
+    /// Homogenous vector of three Floats
+    VectorFloat3 = 21,
+    /// Homogenous vector of four Ints
+    VectorInt4 = 22,
+    /// Homogenous vector of four UInts
+    VectorUInt4 = 23,
+    /// Homogenous vector of four Floats
+    VectorFloat4 = 24,
+    /// An array of bytes. Stored with a variable width length.
+    Blob = 25,
+}
+use FlexBufferType::*;
+
+impl Default for FlexBufferType {
+    fn default() -> Self {
+        Null
+    }
+}
+
+macro_rules! is_ty {
+    ($is_T: ident, $FTy: ident) => {
+        #[inline(always)]
+        pub fn $is_T(self) -> bool {
+            self == $FTy
+        }
+    };
+}
+
+impl FlexBufferType {
+    /// Returns true for flexbuffer types that are stored inline.
+    pub fn is_inline(self) -> bool {
+        match self {
+            Null | Int | UInt | Float | Bool => true,
+            _ => false,
+        }
+    }
+    /// Returns true for flexbuffer types that are stored by offset.
+    pub fn is_reference(self) -> bool {
+        !self.is_inline()
+    }
+    /// Returns true if called on a map, vector, typed vector, or fixed length typed vector.
+    pub fn is_vector(self) -> bool {
+        let d = self as u8;
+        9 <= d && d < 25 || self == VectorBool
+    }
+    /// True iff the binary format stores the length.
+    /// This applies to Blob, String, Maps, and Vectors of variable length.
+    pub fn has_length_slot(self) -> bool {
+        !self.is_fixed_length_vector() && self.is_vector() || self == String || self == Blob
+    }
+    /// Returns true if called on a fixed length typed vector.
+    pub fn is_fixed_length_vector(self) -> bool {
+        self.fixed_length_vector_length().is_some()
+    }
+    /// If called on a fixed type vector, returns the type of the elements.
+    pub fn typed_vector_type(self) -> Option<FlexBufferType> {
+        match self {
+            VectorInt | VectorInt2 | VectorInt3 | VectorInt4 => Some(Int),
+            VectorUInt | VectorUInt2 | VectorUInt3 | VectorUInt4 => Some(UInt),
+            VectorFloat | VectorFloat2 | VectorFloat3 | VectorFloat4 => Some(Float),
+            VectorKey => Some(Key),
+            // Treat them as keys because we do not know width of length slot.
+            // see deprecation link.
+            VectorString => Some(Key),
+            VectorBool => Some(Bool),
+            _ => None,
+        }
+    }
+    /// Return the length of the fixed length vector or None.
+    pub fn fixed_length_vector_length(self) -> Option<usize> {
+        match self {
+            VectorInt2 | VectorUInt2 | VectorFloat2 => Some(2),
+            VectorInt3 | VectorUInt3 | VectorFloat3 => Some(3),
+            VectorInt4 | VectorUInt4 | VectorFloat4 => Some(4),
+            _ => None,
+        }
+    }
+    /// Returns true if self is a Map or Vector. Typed vectors are not heterogenous.
+    pub fn is_heterogenous(self) -> bool {
+        self == Map || self == Vector
+    }
+    /// If `self` is an indirect scalar, remap it to the scalar. Otherwise do nothing.
+    pub fn to_direct(self) -> Option<Self> {
+        match self {
+            IndirectInt => Some(Int),
+            IndirectUInt => Some(UInt),
+            IndirectFloat => Some(Float),
+            _ => None,
+        }
+    }
+    // returns true if and only if the flexbuffer type is `Null`.
+    is_ty!(is_null, Null);
+    // returns true if and only if the flexbuffer type is `Int`.
+    is_ty!(is_int, Int);
+    // returns true if and only if the flexbuffer type is `UInt`.
+    is_ty!(is_uint, UInt);
+    // returns true if and only if the flexbuffer type is `Float`.
+    is_ty!(is_float, Float);
+    // returns true if and only if the flexbuffer type is `Bool`.
+    is_ty!(is_bool, Bool);
+    // returns true if and only if the flexbuffer type is `Key`.
+    is_ty!(is_key, Key);
+    // returns true if and only if the flexbuffer type is `String`.
+    is_ty!(is_string, String);
+    // returns true if and only if the flexbuffer type is `IndirectInt`.
+    is_ty!(is_indirect_int, IndirectInt);
+    // returns true if and only if the flexbuffer type is `IndirectUInt`.
+    is_ty!(is_indirect_uint, IndirectUInt);
+    // returns true if and only if the flexbuffer type is `IndirectFloat`.
+    is_ty!(is_indirect_float, IndirectFloat);
+    // returns true if and only if the flexbuffer type is `Map`.
+    is_ty!(is_map, Map);
+    // returns true if and only if the flexbuffer type is `Vector`.
+    is_ty!(is_heterogenous_vector, Vector);
+    // returns true if and only if the flexbuffer type is `VectorInt`.
+    is_ty!(is_vector_int, VectorInt);
+    // returns true if and only if the flexbuffer type is `VectorUInt`.
+    is_ty!(is_vector_uint, VectorUInt);
+    // returns true if and only if the flexbuffer type is `VectorFloat`.
+    is_ty!(is_vector_float, VectorFloat);
+    // returns true if and only if the flexbuffer type is `VectorKey`.
+    is_ty!(is_vector_key, VectorKey);
+    // returns true if and only if the flexbuffer type is `VectorString`.
+    is_ty!(is_vector_string, VectorString);
+    // returns true if and only if the flexbuffer type is `VectorBool`.
+    is_ty!(is_vector_bool, VectorBool);
+    // returns true if and only if the flexbuffer type is `VectorInt2`.
+    is_ty!(is_vector_int2, VectorInt2);
+    // returns true if and only if the flexbuffer type is `VectorUInt2`.
+    is_ty!(is_vector_uint2, VectorUInt2);
+    // returns true if and only if the flexbuffer type is `VectorFloat2`.
+    is_ty!(is_vector_float2, VectorFloat2);
+    // returns true if and only if the flexbuffer type is `VectorInt3`.
+    is_ty!(is_vector_int3, VectorInt3);
+    // returns true if and only if the flexbuffer type is `VectorUInt3`.
+    is_ty!(is_vector_uint3, VectorUInt3);
+    // returns true if and only if the flexbuffer type is `VectorFloat3`.
+    is_ty!(is_vector_float3, VectorFloat3);
+    // returns true if and only if the flexbuffer type is `VectorInt4`.
+    is_ty!(is_vector_int4, VectorInt4);
+    // returns true if and only if the flexbuffer type is `VectorUInt4`.
+    is_ty!(is_vector_uint4, VectorUInt4);
+    // returns true if and only if the flexbuffer type is `VectorFloat4`.
+    is_ty!(is_vector_float4, VectorFloat4);
+    // returns true if and only if the flexbuffer type is `Blob`.
+    is_ty!(is_blob, Blob);
+}
diff --git a/rust/flexbuffers/src/lib.rs b/rust/flexbuffers/src/lib.rs
new file mode 100644
index 0000000..20983b7
--- /dev/null
+++ b/rust/flexbuffers/src/lib.rs
@@ -0,0 +1,94 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Flexbuffers is a high performance schemaless binary data format designed at Google.
+//! It is complementary to the schema-ed format [Flatbuffers](http://docs.rs/flatbuffers/).
+//! See [Flexbuffer Internals](https://google.github.io/flatbuffers/flatbuffers_internals.html)
+//! for details on the binary format.
+//!
+//! See the examples for usage:
+//! * [Example](https://github.com/google/flatbuffers/blob/master/samples/sample_flexbuffers.rs)
+//! * [Serde Example](https://github.com/google/flatbuffers/blob/master/samples/sample_flexbuffers_serde.rs)
+//!
+//! This rust implementation is in progress and, until the 1.0 release, breaking API changes may
+//! happen between minor versions.
+// TODO(cneo): serde stuff are behind a default-on feature flag
+//             Reader to Json is behind a default-off feature flag
+//             Serializable structs are Pushable
+//             Serde with maps - field names and type names.
+
+#[macro_use]
+extern crate bitflags;
+extern crate byteorder;
+#[macro_use]
+extern crate serde_derive;
+extern crate num_enum;
+extern crate serde;
+
+mod bitwidth;
+mod builder;
+mod flexbuffer_type;
+mod reader;
+pub use bitwidth::BitWidth;
+pub use builder::Error as SerializationError;
+pub use builder::{
+    singleton, Builder, BuilderOptions, FlexbufferSerializer, MapBuilder, Pushable, VectorBuilder,
+};
+pub use flexbuffer_type::FlexBufferType;
+pub use reader::Error as ReaderError;
+pub use reader::{DeserializationError, MapReader, Reader, ReaderIterator, VectorReader};
+use serde::{Deserialize, Serialize};
+
+mod private {
+    pub trait Sealed {}
+}
+
+/// Serialize as a flexbuffer into a vector.
+pub fn to_vec<T: Serialize>(x: T) -> Result<Vec<u8>, SerializationError> {
+    let mut s = FlexbufferSerializer::new();
+    x.serialize(&mut s)?;
+    Ok(s.take_buffer())
+}
+/// Deserialize a type from a flexbuffer.
+pub fn from_slice<'de, T: Deserialize<'de>>(buf: &'de [u8]) -> Result<T, DeserializationError> {
+    let r = Reader::get_root(buf)?;
+    T::deserialize(r)
+}
+
+/// This struct, when pushed will be serialized as a `FlexBufferType::Blob`.
+///
+/// A `Blob` is a variable width `length` followed by that many bytes of data.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct Blob<'a>(pub &'a [u8]);
+
+/// This struct, when pushed, will be serialized as a `FlexBufferType::IndirectUInt`.
+///
+/// It is an unsigned integer stored by reference in the flexbuffer. This can reduce the
+/// size of vectors and maps containing the `IndirectUInt`.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct IndirectUInt(pub u64);
+
+/// This struct, when pushed, will be serialized as a `FlexBufferType::IndirectInt`.
+///
+/// It is a signed integer stored by reference in the flexbuffer. This can reduce the
+/// size of vectors and maps containing the `IndirectInt`.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct IndirectInt(pub i64);
+
+/// This struct, when pushed, will be serialized as a `FlexBufferType::IndirectFloat`.
+///
+/// It is a floating point stored by reference in the flexbuffer. This can reduce the
+/// size of vectors and maps containing the `IndirectFloat`.
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct IndirectFloat(pub f64);
diff --git a/rust/flexbuffers/src/reader/de.rs b/rust/flexbuffers/src/reader/de.rs
new file mode 100644
index 0000000..8cc3b6e
--- /dev/null
+++ b/rust/flexbuffers/src/reader/de.rs
@@ -0,0 +1,254 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::Error;
+use crate::{FlexBufferType, Reader, ReaderIterator};
+use serde::de::{
+    DeserializeSeed, Deserializer, EnumAccess, IntoDeserializer, MapAccess, SeqAccess,
+    VariantAccess, Visitor,
+};
+
+/// Errors that may happen when deserializing a flexbuffer with serde.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum DeserializationError {
+    Reader(Error),
+    Serde(String),
+}
+
+impl std::error::Error for DeserializationError {}
+impl std::fmt::Display for DeserializationError {
+    fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+        match self {
+            Self::Reader(r) => write!(f, "Flexbuffer Read Error: {:?}", r),
+            Self::Serde(s) => write!(f, "Serde Error: {}", s),
+        }
+    }
+}
+impl serde::de::Error for DeserializationError {
+    fn custom<T>(msg: T) -> Self
+    where
+        T: std::fmt::Display,
+    {
+        Self::Serde(format!("{}", msg))
+    }
+}
+impl std::convert::From<super::Error> for DeserializationError {
+    fn from(e: super::Error) -> Self {
+        Self::Reader(e)
+    }
+}
+
+impl<'de> SeqAccess<'de> for ReaderIterator<'de> {
+    type Error = DeserializationError;
+    fn next_element_seed<T>(
+        &mut self,
+        seed: T,
+    ) -> Result<Option<<T as DeserializeSeed<'de>>::Value>, Self::Error>
+    where
+        T: DeserializeSeed<'de>,
+    {
+        if let Some(elem) = self.next() {
+            seed.deserialize(elem).map(Some)
+        } else {
+            Ok(None)
+        }
+    }
+    fn size_hint(&self) -> Option<usize> {
+        Some(self.len())
+    }
+}
+
+struct EnumReader<'de> {
+    variant: &'de str,
+    value: Option<Reader<'de>>,
+}
+
+impl<'de> EnumAccess<'de> for EnumReader<'de> {
+    type Error = DeserializationError;
+    type Variant = Reader<'de>;
+    fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
+    where
+        V: DeserializeSeed<'de>,
+    {
+        seed.deserialize(self.variant.into_deserializer())
+            .map(|v| (v, self.value.unwrap_or_default()))
+    }
+}
+
+struct MapAccessor<'de> {
+    keys: ReaderIterator<'de>,
+    vals: ReaderIterator<'de>,
+}
+impl<'de> MapAccess<'de> for MapAccessor<'de> {
+    type Error = DeserializationError;
+
+    fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
+    where
+        K: DeserializeSeed<'de>,
+    {
+        if let Some(k) = self.keys.next() {
+            seed.deserialize(k).map(Some)
+        } else {
+            Ok(None)
+        }
+    }
+    fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
+    where
+        V: DeserializeSeed<'de>,
+    {
+        let val = self.vals.next().ok_or(Error::IndexOutOfBounds)?;
+        seed.deserialize(val)
+    }
+}
+
+impl<'de> VariantAccess<'de> for Reader<'de> {
+    type Error = DeserializationError;
+    fn unit_variant(self) -> Result<(), Self::Error> {
+        Ok(())
+    }
+    fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value, Self::Error>
+    where
+        T: DeserializeSeed<'de>,
+    {
+        seed.deserialize(self)
+    }
+    // Tuple variants have an internally tagged representation. They are vectors where Index 0 is
+    // the discriminant and index N is field N-1.
+    fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        visitor.visit_seq(self.as_vector().iter())
+    }
+    // Struct variants have an internally tagged representation. They are vectors where Index 0 is
+    // the discriminant and index N is field N-1.
+    fn struct_variant<V>(
+        self,
+        _fields: &'static [&'static str],
+        visitor: V,
+    ) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        let m = self.get_map()?;
+        visitor.visit_map(MapAccessor {
+            keys: m.keys_vector().iter(),
+            vals: m.iter_values(),
+        })
+    }
+}
+
+impl<'de> Deserializer<'de> for crate::Reader<'de> {
+    type Error = DeserializationError;
+    fn is_human_readable(&self) -> bool {
+        cfg!(deserialize_human_readable)
+    }
+
+    fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        use crate::BitWidth::*;
+        use crate::FlexBufferType::*;
+        match (self.flexbuffer_type(), self.bitwidth()) {
+            (Bool, _) => visitor.visit_bool(self.as_bool()),
+            (UInt, W8) => visitor.visit_u8(self.as_u8()),
+            (UInt, W16) => visitor.visit_u16(self.as_u16()),
+            (UInt, W32) => visitor.visit_u32(self.as_u32()),
+            (UInt, W64) => visitor.visit_u64(self.as_u64()),
+            (Int, W8) => visitor.visit_i8(self.as_i8()),
+            (Int, W16) => visitor.visit_i16(self.as_i16()),
+            (Int, W32) => visitor.visit_i32(self.as_i32()),
+            (Int, W64) => visitor.visit_i64(self.as_i64()),
+            (Float, W32) => visitor.visit_f32(self.as_f32()),
+            (Float, W64) => visitor.visit_f64(self.as_f64()),
+            (Float, _) => Err(Error::InvalidPackedType.into()), // f8 and f16 are not supported.
+            (Null, _) => visitor.visit_unit(),
+            (String, _) | (Key, _) => visitor.visit_borrowed_str(self.as_str()),
+            (Blob, _) => visitor.visit_borrowed_bytes(self.get_blob()?.0),
+            (Map, _) => {
+                let m = self.get_map()?;
+                visitor.visit_map(MapAccessor {
+                    keys: m.keys_vector().iter(),
+                    vals: m.iter_values(),
+                })
+            }
+            (ty, _) if ty.is_vector() => visitor.visit_seq(self.as_vector().iter()),
+            (ty, bw) => unreachable!("TODO deserialize_any {:?} {:?}.", ty, bw),
+        }
+    }
+    serde::forward_to_deserialize_any! {
+        bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 str unit unit_struct bytes
+        ignored_any map identifier struct tuple tuple_struct seq string
+    }
+    fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        visitor.visit_char(self.as_u8() as char)
+    }
+    fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        visitor.visit_byte_buf(self.get_blob()?.0.to_vec())
+    }
+    fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        if self.flexbuffer_type() == FlexBufferType::Null {
+            visitor.visit_none()
+        } else {
+            visitor.visit_some(self)
+        }
+    }
+    fn deserialize_newtype_struct<V>(
+        self,
+        _name: &'static str,
+        visitor: V,
+    ) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        visitor.visit_newtype_struct(self)
+    }
+    fn deserialize_enum<V>(
+        self,
+        _name: &'static str,
+        _variants: &'static [&'static str],
+        visitor: V,
+    ) -> Result<V::Value, Self::Error>
+    where
+        V: Visitor<'de>,
+    {
+        let (variant, value) = match self.fxb_type {
+            FlexBufferType::String => (self.as_str(), None),
+            FlexBufferType::Map => {
+                let m = self.get_map()?;
+                let variant = m.keys_vector().idx(0).get_key()?;
+                let value = Some(m.idx(0));
+                (variant, value)
+            }
+            _ => {
+                return Err(Error::UnexpectedFlexbufferType {
+                    expected: FlexBufferType::Map,
+                    actual: self.fxb_type,
+                }
+                .into());
+            }
+        };
+        visitor.visit_enum(EnumReader { variant, value })
+    }
+}
diff --git a/rust/flexbuffers/src/reader/iter.rs b/rust/flexbuffers/src/reader/iter.rs
new file mode 100644
index 0000000..8e06171
--- /dev/null
+++ b/rust/flexbuffers/src/reader/iter.rs
@@ -0,0 +1,63 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{Reader, VectorReader};
+use std::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator};
+
+/// Iterates over a flexbuffer vector, typed vector, or map. Yields [Readers](struct.Reader.html).
+///
+/// If any error occurs, the Reader is defaulted to a Null flexbuffer Reader.
+pub struct ReaderIterator<'de> {
+    pub(super) reader: VectorReader<'de>,
+    pub(super) front: usize,
+    end: usize,
+}
+impl<'de> ReaderIterator<'de> {
+    pub(super) fn new(reader: VectorReader<'de>) -> Self {
+        let end = reader.len();
+        ReaderIterator {
+            reader,
+            front: 0,
+            end,
+        }
+    }
+}
+impl<'de> Iterator for ReaderIterator<'de> {
+    type Item = Reader<'de>;
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.front < self.end {
+            let r = self.reader.idx(self.front);
+            self.front += 1;
+            Some(r)
+        } else {
+            None
+        }
+    }
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let remaining = self.end - self.front;
+        (remaining, Some(remaining))
+    }
+}
+impl<'de> DoubleEndedIterator for ReaderIterator<'de> {
+    fn next_back(&mut self) -> Option<Self::Item> {
+        if self.front < self.end {
+            self.end -= 1;
+            Some(self.reader.idx(self.end))
+        } else {
+            None
+        }
+    }
+}
+impl<'de> ExactSizeIterator for ReaderIterator<'de> {}
+impl<'de> FusedIterator for ReaderIterator<'de> {}
diff --git a/rust/flexbuffers/src/reader/map.rs b/rust/flexbuffers/src/reader/map.rs
new file mode 100644
index 0000000..cdf59e0
--- /dev/null
+++ b/rust/flexbuffers/src/reader/map.rs
@@ -0,0 +1,157 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{deref_offset, unpack_type, Error, Reader, ReaderIterator, VectorReader};
+use crate::BitWidth;
+use std::cmp::Ordering;
+use std::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator};
+
+/// Allows indexing on a flexbuffer map.
+///
+/// MapReaders may be indexed with strings or usizes. `index` returns a result type,
+/// which may indicate failure due to a missing key or bad data, `idx` returns an Null Reader in
+/// cases of error.
+#[derive(Default, Clone)]
+pub struct MapReader<'de> {
+    pub(super) buffer: &'de [u8],
+    pub(super) values_address: usize,
+    pub(super) keys_address: usize,
+    pub(super) values_width: BitWidth,
+    pub(super) keys_width: BitWidth,
+    pub(super) length: usize,
+}
+
+// manual implementation of Debug because buffer slice can't be automatically displayed
+impl<'de> std::fmt::Debug for MapReader<'de> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // skips buffer field
+        f.debug_struct("MapReader")
+            .field("values_address", &self.values_address)
+            .field("keys_address", &self.keys_address)
+            .field("values_width", &self.values_width)
+            .field("keys_width", &self.keys_width)
+            .field("length", &self.length)
+            .finish()
+    }
+}
+
+impl<'de> MapReader<'de> {
+    /// Returns the number of key/value pairs are in the map.
+    pub fn len(&self) -> usize {
+        self.length
+    }
+    /// Returns true if the map has zero key/value pairs.
+    pub fn is_empty(&self) -> bool {
+        self.length == 0
+    }
+    // Using &CStr will eagerly compute the length of the key. &str needs length info AND utf8
+    // validation. This version is faster than both.
+    fn lazy_strcmp(&self, key_addr: usize, key: &str) -> Ordering {
+        // TODO: Can we know this won't OOB and panic?
+        let k = self.buffer[key_addr..].iter().take_while(|&&b| b != b'\0');
+        k.cmp(key.as_bytes().iter())
+    }
+    /// Returns the index of a given key in the map.
+    pub fn index_key(&self, key: &str) -> Option<usize> {
+        let (mut low, mut high) = (0, self.length);
+        while low < high {
+            let i = (low + high) / 2;
+            let key_offset_address = self.keys_address + i * self.keys_width.n_bytes();
+            let key_address =
+                deref_offset(self.buffer, key_offset_address, self.keys_width).ok()?;
+            match self.lazy_strcmp(key_address, key) {
+                Ordering::Equal => return Some(i),
+                Ordering::Less => low = if i == low { i + 1 } else { i },
+                Ordering::Greater => high = i,
+            }
+        }
+        None
+    }
+    /// Index into a map with a key or usize.
+    pub fn index<I: MapReaderIndexer>(&self, i: I) -> Result<Reader<'de>, Error> {
+        i.index_map_reader(self)
+    }
+    /// Index into a map with a key or usize. If any errors occur a Null reader is returned.
+    pub fn idx<I: MapReaderIndexer>(&self, i: I) -> Reader<'de> {
+        i.index_map_reader(self).unwrap_or_default()
+    }
+    fn usize_index(&self, i: usize) -> Result<Reader<'de>, Error> {
+        if i >= self.length {
+            return Err(Error::IndexOutOfBounds);
+        }
+        let data_address = self.values_address + self.values_width.n_bytes() * i;
+        let type_address = self.values_address + self.values_width.n_bytes() * self.length + i;
+        let (fxb_type, width) = self
+            .buffer
+            .get(type_address)
+            .ok_or(Error::FlexbufferOutOfBounds)
+            .and_then(|&b| unpack_type(b))?;
+        Reader::new(
+            &self.buffer,
+            data_address,
+            fxb_type,
+            width,
+            self.values_width,
+        )
+    }
+    fn key_index(&self, k: &str) -> Result<Reader<'de>, Error> {
+        let i = self.index_key(k).ok_or(Error::KeyNotFound)?;
+        self.usize_index(i)
+    }
+    /// Iterate over the values of the map.
+    pub fn iter_values(&self) -> ReaderIterator<'de> {
+        ReaderIterator::new(VectorReader {
+            reader: Reader {
+                buffer: self.buffer,
+                fxb_type: crate::FlexBufferType::Map,
+                width: self.values_width,
+                address: self.values_address,
+            },
+            length: self.length,
+        })
+    }
+    /// Iterate over the keys of the map.
+    pub fn iter_keys(
+        &self,
+    ) -> impl Iterator<Item = &'de str> + DoubleEndedIterator + ExactSizeIterator + FusedIterator
+    {
+        self.keys_vector().iter().map(|k| k.as_str())
+    }
+    pub fn keys_vector(&self) -> VectorReader<'de> {
+        VectorReader {
+            reader: Reader {
+                buffer: self.buffer,
+                fxb_type: crate::FlexBufferType::VectorKey,
+                width: self.keys_width,
+                address: self.keys_address,
+            },
+            length: self.length,
+        }
+    }
+}
+pub trait MapReaderIndexer {
+    fn index_map_reader<'de>(self, r: &MapReader<'de>) -> Result<Reader<'de>, Error>;
+}
+impl MapReaderIndexer for usize {
+    #[inline]
+    fn index_map_reader<'de>(self, r: &MapReader<'de>) -> Result<Reader<'de>, Error> {
+        r.usize_index(self)
+    }
+}
+impl MapReaderIndexer for &str {
+    #[inline]
+    fn index_map_reader<'de>(self, r: &MapReader<'de>) -> Result<Reader<'de>, Error> {
+        r.key_index(self)
+    }
+}
diff --git a/rust/flexbuffers/src/reader/mod.rs b/rust/flexbuffers/src/reader/mod.rs
new file mode 100644
index 0000000..4a3f472
--- /dev/null
+++ b/rust/flexbuffers/src/reader/mod.rs
@@ -0,0 +1,604 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::bitwidth::BitWidth;
+use crate::flexbuffer_type::FlexBufferType;
+use crate::Blob;
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::ops::Rem;
+use std::str::FromStr;
+mod de;
+mod iter;
+mod map;
+mod vector;
+pub use de::DeserializationError;
+pub use iter::ReaderIterator;
+pub use map::{MapReader, MapReaderIndexer};
+pub use vector::VectorReader;
+
+/// All the possible errors when reading a flexbuffer.
+#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
+pub enum Error {
+    /// One of the following data errors occured:
+    ///
+    /// *    The read flexbuffer had an offset that pointed outside the flexbuffer.
+    /// *    The 'negative indicies' where length and map keys are stored were out of bounds
+    /// *    The buffer was too small to contain a flexbuffer root.
+    FlexbufferOutOfBounds,
+    /// Failed to parse a valid FlexbufferType and Bitwidth from a type byte.
+    InvalidPackedType,
+    /// Flexbuffer type of the read data does not match function used.
+    UnexpectedFlexbufferType {
+        expected: FlexBufferType,
+        actual: FlexBufferType,
+    },
+    /// BitWidth type of the read data does not match function used.
+    UnexpectedBitWidth {
+        expected: BitWidth,
+        actual: BitWidth,
+    },
+    /// Read a flexbuffer offset or length that overflowed usize.
+    ReadUsizeOverflowed,
+    /// Tried to index a type that's not one of the Flexbuffer vector types.
+    CannotIndexAsVector,
+    /// Tried to index a Flexbuffer vector or map out of bounds.
+    IndexOutOfBounds,
+    /// A Map was indexed with a key that it did not contain.
+    KeyNotFound,
+    /// Failed to parse a Utf8 string.
+    /// The Option will be `None` if and only if this Error was deserialized.
+    // NOTE: std::str::Utf8Error does not implement Serialize, Deserialize, nor Default. We tell
+    // serde to skip the field and default to None. We prefer to have the boxed error so it can be
+    // used with std::error::Error::source, though another (worse) option could be to drop that
+    // information.
+    Utf8Error(#[serde(skip)] Option<Box<std::str::Utf8Error>>),
+    /// get_slice failed because the given data buffer is misaligned.
+    AlignmentError,
+    InvalidRootWidth,
+    InvalidMapKeysVectorWidth,
+}
+impl std::convert::From<std::str::Utf8Error> for Error {
+    fn from(e: std::str::Utf8Error) -> Self {
+        Self::Utf8Error(Some(Box::new(e)))
+    }
+}
+impl fmt::Display for Error {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::UnexpectedBitWidth { expected, actual } => write!(
+                f,
+                "Error reading flexbuffer: Expected bitwidth: {:?}, found bitwidth: {:?}",
+                expected, actual
+            ),
+            Self::UnexpectedFlexbufferType { expected, actual } => write!(
+                f,
+                "Error reading flexbuffer: Expected type: {:?}, found type: {:?}",
+                expected, actual
+            ),
+            _ => write!(f, "Error reading flexbuffer: {:?}", self),
+        }
+    }
+}
+impl std::error::Error for Error {
+    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+        if let Self::Utf8Error(Some(e)) = self {
+            Some(e)
+        } else {
+            None
+        }
+    }
+}
+
+pub trait ReadLE: crate::private::Sealed + std::marker::Sized {
+    const VECTOR_TYPE: FlexBufferType;
+    const WIDTH: BitWidth;
+}
+macro_rules! rle {
+    ($T: ty, $VECTOR_TYPE: ident, $WIDTH: ident) => {
+        impl ReadLE for $T {
+            const VECTOR_TYPE: FlexBufferType = FlexBufferType::$VECTOR_TYPE;
+            const WIDTH: BitWidth = BitWidth::$WIDTH;
+        }
+    };
+}
+rle!(u8, VectorUInt, W8);
+rle!(u16, VectorUInt, W16);
+rle!(u32, VectorUInt, W32);
+rle!(u64, VectorUInt, W64);
+rle!(i8, VectorInt, W8);
+rle!(i16, VectorInt, W16);
+rle!(i32, VectorInt, W32);
+rle!(i64, VectorInt, W64);
+rle!(f32, VectorFloat, W32);
+rle!(f64, VectorFloat, W64);
+
+macro_rules! as_default {
+    ($as: ident, $get: ident, $T: ty) => {
+        pub fn $as(&self) -> $T {
+            self.$get().unwrap_or_default()
+        }
+    };
+}
+
+/// `Reader`s allow access to data stored in a Flexbuffer.
+///
+/// Each reader represents a single address in the buffer so data is read lazily. Start a reader
+/// by calling `get_root` on your flexbuffer `&[u8]`.
+///
+/// - The `get_T` methods return a `Result<T, Error>`. They return an OK value if and only if the
+/// flexbuffer type matches `T`. This is analogous to the behavior of Rust's json library, though
+/// with Result instead of Option.
+/// - The `as_T` methods will try their best to return to a value of type `T`
+/// (by casting or even parsing a string if necessary) but ultimately returns `T::default` if it
+/// fails. This behavior is analogous to that of flexbuffers C++.
+#[derive(Default, Clone)]
+pub struct Reader<'de> {
+    fxb_type: FlexBufferType,
+    width: BitWidth,
+    address: usize,
+    buffer: &'de [u8],
+}
+
+// manual implementation of Debug because buffer slice can't be automatically displayed
+impl<'de> std::fmt::Debug for Reader<'de> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        // skips buffer field
+        f.debug_struct("Reader")
+            .field("fxb_type", &self.fxb_type)
+            .field("width", &self.width)
+            .field("address", &self.address)
+            .finish()
+    }
+}
+
+
+macro_rules! try_cast_fn {
+    ($name: ident, $full_width: ident, $Ty: ident) => {
+        pub fn $name(&self) -> $Ty {
+            self.$full_width().try_into().unwrap_or_default()
+        }
+    }
+}
+
+fn safe_sub(a: usize, b: usize) -> Result<usize, Error> {
+    a.checked_sub(b).ok_or(Error::FlexbufferOutOfBounds)
+}
+
+fn deref_offset(buffer: &[u8], address: usize, width: BitWidth) -> Result<usize, Error> {
+    let off = read_usize(buffer, address, width);
+    safe_sub(address, off)
+}
+
+impl<'de> Reader<'de> {
+    fn new(
+        buffer: &'de [u8],
+        mut address: usize,
+        mut fxb_type: FlexBufferType,
+        width: BitWidth,
+        parent_width: BitWidth,
+    ) -> Result<Self, Error> {
+        if fxb_type.is_reference() {
+            address = deref_offset(buffer, address, parent_width)?;
+            // Indirects were dereferenced.
+            if let Some(t) = fxb_type.to_direct() {
+                fxb_type = t;
+            }
+        }
+        Ok(Reader {
+            address,
+            fxb_type,
+            width,
+            buffer,
+        })
+    }
+    /// Parses the flexbuffer from the given buffer. Assumes the flexbuffer root is the last byte
+    /// of the buffer.
+    pub fn get_root(buffer: &'de [u8]) -> Result<Self, Error> {
+        let end = buffer.len();
+        if end < 3 {
+            return Err(Error::FlexbufferOutOfBounds);
+        }
+        // Last byte is the root width.
+        let root_width = BitWidth::from_nbytes(buffer[end - 1]).ok_or(Error::InvalidRootWidth)?;
+        // Second last byte is root type.
+        let (fxb_type, width) = unpack_type(buffer[end - 2])?;
+        // Location of root data. (BitWidth bits before root type)
+        let address = safe_sub(end - 2, root_width.n_bytes())?;
+        Self::new(buffer, address, fxb_type, width, root_width)
+    }
+    /// Returns the FlexBufferType of this Reader.
+    pub fn flexbuffer_type(&self) -> FlexBufferType {
+        self.fxb_type
+    }
+    /// Returns the bitwidth of this Reader.
+    pub fn bitwidth(&self) -> BitWidth {
+        self.width
+    }
+    /// Returns the length of the Flexbuffer. If the type has no length, or if an error occurs,
+    /// 0 is returned.
+    pub fn length(&self) -> usize {
+        if let Some(len) = self.fxb_type.fixed_length_vector_length() {
+            len
+        } else if self.fxb_type.has_length_slot() && self.address >= self.width.n_bytes() {
+            read_usize(self.buffer, self.address - self.width.n_bytes(), self.width)
+        } else {
+            0
+        }
+    }
+    /// Returns true if the flexbuffer is aligned to 8 bytes. This guarantees, for valid
+    /// flexbuffers, that the data is correctly aligned in memory and slices can be read directly
+    /// e.g. with `get_f64s` or `get_i16s`.
+    pub fn is_aligned(&self) -> bool {
+        (self.buffer.as_ptr() as usize).rem(8) == 0
+    }
+    as_default!(as_vector, get_vector, VectorReader<'de>);
+    as_default!(as_map, get_map, MapReader<'de>);
+
+    fn expect_type(&self, ty: FlexBufferType) -> Result<(), Error> {
+        if self.fxb_type == ty {
+            Ok(())
+        } else {
+            Err(Error::UnexpectedFlexbufferType {
+                expected: ty,
+                actual: self.fxb_type,
+            })
+        }
+    }
+    fn expect_bw(&self, bw: BitWidth) -> Result<(), Error> {
+        if self.width == bw {
+            Ok(())
+        } else {
+            Err(Error::UnexpectedBitWidth {
+                expected: bw,
+                actual: self.width,
+            })
+        }
+    }
+    /// Directly reads a slice of type `T`where `T` is one of `u8,u16,u32,u64,i8,i16,i32,i64,f32,f64`.
+    /// Returns Err if the type, bitwidth, or memory alignment does not match. Since the bitwidth is
+    /// dynamic, its better to use a VectorReader unless you know your data and performance is critical.
+    #[cfg(target_endian = "little")]
+    pub fn get_slice<T: ReadLE>(&self) -> Result<&'de [T], Error> {
+        if self.flexbuffer_type().typed_vector_type() != T::VECTOR_TYPE.typed_vector_type() {
+            self.expect_type(T::VECTOR_TYPE)?;
+        }
+        if self.bitwidth().n_bytes() != std::mem::size_of::<T>() {
+            self.expect_bw(T::WIDTH)?;
+        }
+        let end = self.address + self.length() * std::mem::size_of::<T>();
+        let slice = &self
+            .buffer
+            .get(self.address..end)
+            .ok_or(Error::FlexbufferOutOfBounds)?;
+        // `align_to` is required because the point of this function is to directly hand back a
+        // slice of scalars. This can fail because Rust's default allocator is not 16byte aligned
+        // (though in practice this only happens for small buffers).
+        let (pre, mid, suf) = unsafe { slice.align_to::<T>() };
+        if pre.is_empty() && suf.is_empty() {
+            Ok(mid)
+        } else {
+            Err(Error::AlignmentError)
+        }
+    }
+
+    pub fn get_bool(&self) -> Result<bool, Error> {
+        self.expect_type(FlexBufferType::Bool)?;
+        Ok(
+            self.buffer[self.address..self.address + self.width.n_bytes()]
+                .iter()
+                .any(|&b| b != 0),
+        )
+    }
+    pub fn get_key(&self) -> Result<&'de str, Error> {
+        self.expect_type(FlexBufferType::Key)?;
+        let (length, _) = self.buffer[self.address..]
+            .iter()
+            .enumerate()
+            .find(|(_, &b)| b == b'\0')
+            .unwrap_or((0, &0));
+        let bytes = &self.buffer[self.address..self.address + length];
+        Ok(std::str::from_utf8(bytes)?)
+    }
+    pub fn get_blob(&self) -> Result<Blob<'de>, Error> {
+        self.expect_type(FlexBufferType::Blob)?;
+        Ok(Blob(
+            &self.buffer[self.address..self.address + self.length()],
+        ))
+    }
+    pub fn as_blob(&self) -> Blob<'de> {
+        self.get_blob().unwrap_or(Blob(&[]))
+    }
+    pub fn get_str(&self) -> Result<&'de str, Error> {
+        self.expect_type(FlexBufferType::String)?;
+        let bytes = &self.buffer[self.address..self.address + self.length()];
+        Ok(std::str::from_utf8(bytes)?)
+    }
+    fn get_map_info(&self) -> Result<(usize, BitWidth), Error> {
+        self.expect_type(FlexBufferType::Map)?;
+        if 3 * self.width.n_bytes() >= self.address {
+            return Err(Error::FlexbufferOutOfBounds);
+        }
+        let keys_offset_address = self.address - 3 * self.width.n_bytes();
+        let keys_width = {
+            let kw_addr = self.address - 2 * self.width.n_bytes();
+            let kw = read_usize(self.buffer, kw_addr, self.width);
+            BitWidth::from_nbytes(kw).ok_or(Error::InvalidMapKeysVectorWidth)
+        }?;
+        Ok((keys_offset_address, keys_width))
+    }
+    pub fn get_map(&self) -> Result<MapReader<'de>, Error> {
+        let (keys_offset_address, keys_width) = self.get_map_info()?;
+        let keys_address = deref_offset(self.buffer, keys_offset_address, self.width)?;
+        // TODO(cneo): Check that vectors length equals keys length.
+        Ok(MapReader {
+            buffer: self.buffer,
+            values_address: self.address,
+            values_width: self.width,
+            keys_address,
+            keys_width,
+            length: self.length(),
+        })
+    }
+    /// Tries to read a FlexBufferType::UInt. Returns Err if the type is not a UInt or if the
+    /// address is out of bounds.
+    pub fn get_u64(&self) -> Result<u64, Error> {
+        self.expect_type(FlexBufferType::UInt)?;
+        let cursor = self
+            .buffer
+            .get(self.address..self.address + self.width.n_bytes());
+        match self.width {
+            BitWidth::W8 => cursor.map(|s| s[0] as u8).map(Into::into),
+            BitWidth::W16 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(<u16>::from_le_bytes)
+                .map(Into::into),
+            BitWidth::W32 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(<u32>::from_le_bytes)
+                .map(Into::into),
+            BitWidth::W64 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(<u64>::from_le_bytes),
+        }
+        .ok_or(Error::FlexbufferOutOfBounds)
+    }
+    /// Tries to read a FlexBufferType::Int. Returns Err if the type is not a UInt or if the
+    /// address is out of bounds.
+    pub fn get_i64(&self) -> Result<i64, Error> {
+        self.expect_type(FlexBufferType::Int)?;
+        let cursor = self
+            .buffer
+            .get(self.address..self.address + self.width.n_bytes());
+        match self.width {
+            BitWidth::W8 => cursor.map(|s| s[0] as i8).map(Into::into),
+            BitWidth::W16 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(<i16>::from_le_bytes)
+                .map(Into::into),
+            BitWidth::W32 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(<i32>::from_le_bytes)
+                .map(Into::into),
+            BitWidth::W64 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(<i64>::from_le_bytes),
+        }
+        .ok_or(Error::FlexbufferOutOfBounds)
+    }
+    /// Tries to read a FlexBufferType::Float. Returns Err if the type is not a UInt, if the
+    /// address is out of bounds, or if its a f16 or f8 (not currently supported).
+    pub fn get_f64(&self) -> Result<f64, Error> {
+        self.expect_type(FlexBufferType::Float)?;
+        let cursor = self
+            .buffer
+            .get(self.address..self.address + self.width.n_bytes());
+        match self.width {
+            BitWidth::W8 | BitWidth::W16 => return Err(Error::InvalidPackedType),
+            BitWidth::W32 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(f32_from_le_bytes)
+                .map(Into::into),
+            BitWidth::W64 => cursor
+                .and_then(|s| s.try_into().ok())
+                .map(f64_from_le_bytes),
+        }
+        .ok_or(Error::FlexbufferOutOfBounds)
+    }
+    pub fn as_bool(&self) -> bool {
+        use FlexBufferType::*;
+        match self.fxb_type {
+            Bool => self.get_bool().unwrap_or_default(),
+            UInt => self.as_u64() != 0,
+            Int => self.as_i64() != 0,
+            Float => self.as_f64().abs() > std::f64::EPSILON,
+            String | Key => !self.as_str().is_empty(),
+            Null => false,
+            Blob => self.length() != 0,
+            ty if ty.is_vector() => self.length() != 0,
+            _ => unreachable!(),
+        }
+    }
+    /// Returns a u64, casting if necessary. For Maps and Vectors, their length is
+    /// returned. If anything fails, 0 is returned.
+    pub fn as_u64(&self) -> u64 {
+        match self.fxb_type {
+            FlexBufferType::UInt => self.get_u64().unwrap_or_default(),
+            FlexBufferType::Int => self
+                .get_i64()
+                .unwrap_or_default()
+                .try_into()
+                .unwrap_or_default(),
+            FlexBufferType::Float => self.get_f64().unwrap_or_default() as u64,
+            FlexBufferType::String => {
+                if let Ok(s) = self.get_str() {
+                    if let Ok(f) = u64::from_str(s) {
+                        return f;
+                    }
+                }
+                0
+            }
+            _ if self.fxb_type.is_vector() => self.length() as u64,
+            _ => 0,
+        }
+    }
+    try_cast_fn!(as_u32, as_u64, u32);
+    try_cast_fn!(as_u16, as_u64, u16);
+    try_cast_fn!(as_u8, as_u64, u8);
+
+    /// Returns an i64, casting if necessary. For Maps and Vectors, their length is
+    /// returned. If anything fails, 0 is returned.
+    pub fn as_i64(&self) -> i64 {
+        match self.fxb_type {
+            FlexBufferType::Int => self.get_i64().unwrap_or_default(),
+            FlexBufferType::UInt => self
+                .get_u64()
+                .unwrap_or_default()
+                .try_into()
+                .unwrap_or_default(),
+            FlexBufferType::Float => self.get_f64().unwrap_or_default() as i64,
+            FlexBufferType::String => {
+                if let Ok(s) = self.get_str() {
+                    if let Ok(f) = i64::from_str(s) {
+                        return f;
+                    }
+                }
+                0
+            }
+            _ if self.fxb_type.is_vector() => self.length() as i64,
+            _ => 0,
+        }
+    }
+    try_cast_fn!(as_i32, as_i64, i32);
+    try_cast_fn!(as_i16, as_i64, i16);
+    try_cast_fn!(as_i8, as_i64, i8);
+
+    /// Returns an f64, casting if necessary. For Maps and Vectors, their length is
+    /// returned. If anything fails, 0 is returned.
+    pub fn as_f64(&self) -> f64 {
+        match self.fxb_type {
+            FlexBufferType::Int => self.get_i64().unwrap_or_default() as f64,
+            FlexBufferType::UInt => self.get_u64().unwrap_or_default() as f64,
+            FlexBufferType::Float => self.get_f64().unwrap_or_default(),
+            FlexBufferType::String => {
+                if let Ok(s) = self.get_str() {
+                    if let Ok(f) = f64::from_str(s) {
+                        return f;
+                    }
+                }
+                0.0
+            }
+            _ if self.fxb_type.is_vector() => self.length() as f64,
+            _ => 0.0,
+        }
+    }
+    pub fn as_f32(&self) -> f32 {
+        self.as_f64() as f32
+    }
+
+    /// Returns empty string if you're not trying to read a string.
+    pub fn as_str(&self) -> &'de str {
+        match self.fxb_type {
+            FlexBufferType::String => self.get_str().unwrap_or_default(),
+            FlexBufferType::Key => self.get_key().unwrap_or_default(),
+            _ => "",
+        }
+    }
+    pub fn get_vector(&self) -> Result<VectorReader<'de>, Error> {
+        if !self.fxb_type.is_vector() {
+            self.expect_type(FlexBufferType::Vector)?;
+        };
+        Ok(VectorReader {
+            reader: self.clone(),
+            length: self.length(),
+        })
+    }
+}
+
+impl<'de> fmt::Display for Reader<'de> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        use FlexBufferType::*;
+        match self.flexbuffer_type() {
+            Null => write!(f, "null"),
+            UInt => write!(f, "{}", self.as_u64()),
+            Int => write!(f, "{}", self.as_i64()),
+            Float => write!(f, "{}", self.as_f64()),
+            Key | String => write!(f, "{:?}", self.as_str()),
+            Bool => write!(f, "{}", self.as_bool()),
+            Blob => write!(f, "blob"),
+            Map => {
+                write!(f, "{{")?;
+                let m = self.as_map();
+                let mut pairs = m.iter_keys().zip(m.iter_values());
+                if let Some((k, v)) = pairs.next() {
+                    write!(f, "{:?}: {}", k, v)?;
+                    for (k, v) in pairs {
+                        write!(f, ", {:?}: {}", k, v)?;
+                    }
+                }
+                write!(f, "}}")
+            }
+            t if t.is_vector() => {
+                write!(f, "[")?;
+                let mut elems = self.as_vector().iter();
+                if let Some(first) = elems.next() {
+                    write!(f, "{}", first)?;
+                    for e in elems {
+                        write!(f, ", {}", e)?;
+                    }
+                }
+                write!(f, "]")
+            }
+            _ => unreachable!("Display not implemented for {:?}", self),
+        }
+    }
+}
+
+// TODO(cneo): Use <f..>::from_le_bytes when we move past rustc 1.39.
+fn f32_from_le_bytes(bytes: [u8; 4]) -> f32 {
+    let bits = <u32>::from_le_bytes(bytes);
+    <f32>::from_bits(bits)
+}
+fn f64_from_le_bytes(bytes: [u8; 8]) -> f64 {
+    let bits = <u64>::from_le_bytes(bytes);
+    <f64>::from_bits(bits)
+}
+
+fn read_usize(buffer: &[u8], address: usize, width: BitWidth) -> usize {
+    let cursor = &buffer[address..];
+    match width {
+        BitWidth::W8 => cursor[0] as usize,
+        BitWidth::W16 => cursor
+            .get(0..2)
+            .and_then(|s| s.try_into().ok())
+            .map(<u16>::from_le_bytes)
+            .unwrap_or_default() as usize,
+        BitWidth::W32 => cursor
+            .get(0..4)
+            .and_then(|s| s.try_into().ok())
+            .map(<u32>::from_le_bytes)
+            .unwrap_or_default() as usize,
+        BitWidth::W64 => cursor
+            .get(0..8)
+            .and_then(|s| s.try_into().ok())
+            .map(<u64>::from_le_bytes)
+            .unwrap_or_default() as usize,
+    }
+}
+
+fn unpack_type(ty: u8) -> Result<(FlexBufferType, BitWidth), Error> {
+    let w = BitWidth::try_from(ty & 3u8).map_err(|_| Error::InvalidPackedType)?;
+    let t = FlexBufferType::try_from(ty >> 2).map_err(|_| Error::InvalidPackedType)?;
+    Ok((t, w))
+}
diff --git a/rust/flexbuffers/src/reader/vector.rs b/rust/flexbuffers/src/reader/vector.rs
new file mode 100644
index 0000000..8ba8fe5
--- /dev/null
+++ b/rust/flexbuffers/src/reader/vector.rs
@@ -0,0 +1,74 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{unpack_type, Error, Reader, ReaderIterator};
+use crate::{BitWidth, FlexBufferType};
+
+#[derive(Default, Clone)]
+/// Allows indexing on any flexbuffer vector type, (heterogenous vector, typed vector, or fixed
+/// length typed vector).
+///
+/// VectorReaders may be indexed with usize, `index` returns a result type
+/// which may indicate failure due to indexing out of bounds or bad data. `idx` returns a
+/// Null Reader in the event of any failure.
+pub struct VectorReader<'de> {
+    pub(super) reader: Reader<'de>,
+    // Cache the length because read_usize can be slow.
+    pub(super) length: usize,
+}
+
+impl<'de> VectorReader<'de> {
+    /// Returns the number of elements in the vector.
+    pub fn len(&self) -> usize {
+        self.length
+    }
+    /// Returns true if there are 0 elements in the vector.
+    pub fn is_empty(&self) -> bool {
+        self.length == 0
+    }
+    fn get_elem_type(&self, i: usize) -> Result<(FlexBufferType, BitWidth), Error> {
+        if let Some(ty) = self.reader.fxb_type.typed_vector_type() {
+            Ok((ty, self.reader.width))
+        } else {
+            let types_addr = self.reader.address + self.length * self.reader.width.n_bytes();
+            self.reader
+                .buffer
+                .get(types_addr + i)
+                .ok_or(Error::FlexbufferOutOfBounds)
+                .and_then(|&t| unpack_type(t))
+        }
+    }
+    /// Index into a flexbuffer vector. Any errors are defaulted to Null Readers.
+    pub fn idx(&self, i: usize) -> Reader<'de> {
+        self.index(i).unwrap_or_default()
+    }
+    /// Index into a flexbuffer.
+    pub fn index(&self, i: usize) -> Result<Reader<'de>, Error> {
+        if i >= self.length {
+            return Err(Error::IndexOutOfBounds);
+        }
+        let (fxb_type, bw) = self.get_elem_type(i)?;
+        let data_address = self.reader.address + self.reader.width.n_bytes() * i;
+        Reader::new(
+            self.reader.buffer,
+            data_address,
+            fxb_type,
+            bw,
+            self.reader.width,
+        )
+    }
+    pub fn iter(&self) -> ReaderIterator<'de> {
+        ReaderIterator::new(self.clone())
+    }
+}