Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Google Inc. All rights reserved. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef FLATBUFFERS_FLEXBUFFERS_H_ |
| 18 | #define FLATBUFFERS_FLEXBUFFERS_H_ |
| 19 | |
| 20 | #include <map> |
| 21 | // Used to select STL variant. |
| 22 | #include "flatbuffers/base.h" |
| 23 | // We use the basic binary writing functions from the regular FlatBuffers. |
| 24 | #include "flatbuffers/util.h" |
| 25 | |
| 26 | #ifdef _MSC_VER |
| 27 | # include <intrin.h> |
| 28 | #endif |
| 29 | |
| 30 | #if defined(_MSC_VER) |
| 31 | # pragma warning(push) |
| 32 | # pragma warning(disable : 4127) // C4127: conditional expression is constant |
| 33 | #endif |
| 34 | |
| 35 | namespace flexbuffers { |
| 36 | |
| 37 | class Reference; |
| 38 | class Map; |
| 39 | |
| 40 | // These are used in the lower 2 bits of a type field to determine the size of |
| 41 | // the elements (and or size field) of the item pointed to (e.g. vector). |
| 42 | enum BitWidth { |
| 43 | BIT_WIDTH_8 = 0, |
| 44 | BIT_WIDTH_16 = 1, |
| 45 | BIT_WIDTH_32 = 2, |
| 46 | BIT_WIDTH_64 = 3, |
| 47 | }; |
| 48 | |
| 49 | // These are used as the upper 6 bits of a type field to indicate the actual |
| 50 | // type. |
| 51 | enum Type { |
| 52 | FBT_NULL = 0, |
| 53 | FBT_INT = 1, |
| 54 | FBT_UINT = 2, |
| 55 | FBT_FLOAT = 3, |
| 56 | // Types above stored inline, types below store an offset. |
| 57 | FBT_KEY = 4, |
| 58 | FBT_STRING = 5, |
| 59 | FBT_INDIRECT_INT = 6, |
| 60 | FBT_INDIRECT_UINT = 7, |
| 61 | FBT_INDIRECT_FLOAT = 8, |
| 62 | FBT_MAP = 9, |
| 63 | FBT_VECTOR = 10, // Untyped. |
| 64 | FBT_VECTOR_INT = 11, // Typed any size (stores no type table). |
| 65 | FBT_VECTOR_UINT = 12, |
| 66 | FBT_VECTOR_FLOAT = 13, |
| 67 | FBT_VECTOR_KEY = 14, |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 68 | // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead. |
| 69 | // Read test.cpp/FlexBuffersDeprecatedTest() for details on why. |
| 70 | FBT_VECTOR_STRING_DEPRECATED = 15, |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 71 | FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). |
| 72 | FBT_VECTOR_UINT2 = 17, |
| 73 | FBT_VECTOR_FLOAT2 = 18, |
| 74 | FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). |
| 75 | FBT_VECTOR_UINT3 = 20, |
| 76 | FBT_VECTOR_FLOAT3 = 21, |
| 77 | FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). |
| 78 | FBT_VECTOR_UINT4 = 23, |
| 79 | FBT_VECTOR_FLOAT4 = 24, |
| 80 | FBT_BLOB = 25, |
| 81 | FBT_BOOL = 26, |
| 82 | FBT_VECTOR_BOOL = |
| 83 | 36, // To Allow the same type of conversion of type to vector type |
| 84 | }; |
| 85 | |
| 86 | inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } |
| 87 | |
| 88 | inline bool IsTypedVectorElementType(Type t) { |
| 89 | return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; |
| 90 | } |
| 91 | |
| 92 | inline bool IsTypedVector(Type t) { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 93 | return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) || |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 94 | t == FBT_VECTOR_BOOL; |
| 95 | } |
| 96 | |
| 97 | inline bool IsFixedTypedVector(Type t) { |
| 98 | return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; |
| 99 | } |
| 100 | |
| 101 | inline Type ToTypedVector(Type t, size_t fixed_len = 0) { |
| 102 | FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); |
| 103 | switch (fixed_len) { |
| 104 | case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT); |
| 105 | case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2); |
| 106 | case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3); |
| 107 | case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4); |
| 108 | default: FLATBUFFERS_ASSERT(0); return FBT_NULL; |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | inline Type ToTypedVectorElementType(Type t) { |
| 113 | FLATBUFFERS_ASSERT(IsTypedVector(t)); |
| 114 | return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT); |
| 115 | } |
| 116 | |
| 117 | inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { |
| 118 | FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); |
| 119 | auto fixed_type = t - FBT_VECTOR_INT2; |
| 120 | *len = static_cast<uint8_t>(fixed_type / 3 + |
| 121 | 2); // 3 types each, starting from length 2. |
| 122 | return static_cast<Type>(fixed_type % 3 + FBT_INT); |
| 123 | } |
| 124 | |
| 125 | // TODO: implement proper support for 8/16bit floats, or decide not to |
| 126 | // support them. |
| 127 | typedef int16_t half; |
| 128 | typedef int8_t quarter; |
| 129 | |
| 130 | // TODO: can we do this without conditionals using intrinsics or inline asm |
| 131 | // on some platforms? Given branch prediction the method below should be |
| 132 | // decently quick, but it is the most frequently executed function. |
| 133 | // We could do an (unaligned) 64-bit read if we ifdef out the platforms for |
| 134 | // which that doesn't work (or where we'd read into un-owned memory). |
| 135 | template<typename R, typename T1, typename T2, typename T4, typename T8> |
| 136 | R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { |
| 137 | return byte_width < 4 |
| 138 | ? (byte_width < 2 |
| 139 | ? static_cast<R>(flatbuffers::ReadScalar<T1>(data)) |
| 140 | : static_cast<R>(flatbuffers::ReadScalar<T2>(data))) |
| 141 | : (byte_width < 8 |
| 142 | ? static_cast<R>(flatbuffers::ReadScalar<T4>(data)) |
| 143 | : static_cast<R>(flatbuffers::ReadScalar<T8>(data))); |
| 144 | } |
| 145 | |
| 146 | inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { |
| 147 | return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>( |
| 148 | data, byte_width); |
| 149 | } |
| 150 | |
| 151 | inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { |
| 152 | // This is the "hottest" function (all offset lookups use this), so worth |
| 153 | // optimizing if possible. |
| 154 | // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a |
| 155 | // constant, which here it isn't. Test if memcpy is still faster than |
| 156 | // the conditionals in ReadSizedScalar. Can also use inline asm. |
| 157 | // clang-format off |
| 158 | #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86) |
| 159 | uint64_t u = 0; |
| 160 | __movsb(reinterpret_cast<uint8_t *>(&u), |
| 161 | reinterpret_cast<const uint8_t *>(data), byte_width); |
| 162 | return flatbuffers::EndianScalar(u); |
| 163 | #else |
| 164 | return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>( |
| 165 | data, byte_width); |
| 166 | #endif |
| 167 | // clang-format on |
| 168 | } |
| 169 | |
| 170 | inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { |
| 171 | return ReadSizedScalar<double, quarter, half, float, double>(data, |
| 172 | byte_width); |
| 173 | } |
| 174 | |
| 175 | inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { |
| 176 | return offset - ReadUInt64(offset, byte_width); |
| 177 | } |
| 178 | |
| 179 | template<typename T> const uint8_t *Indirect(const uint8_t *offset) { |
| 180 | return offset - flatbuffers::ReadScalar<T>(offset); |
| 181 | } |
| 182 | |
| 183 | inline BitWidth WidthU(uint64_t u) { |
| 184 | #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ |
| 185 | { \ |
| 186 | if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ |
| 187 | } |
| 188 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); |
| 189 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); |
| 190 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); |
| 191 | #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH |
| 192 | return BIT_WIDTH_64; |
| 193 | } |
| 194 | |
| 195 | inline BitWidth WidthI(int64_t i) { |
| 196 | auto u = static_cast<uint64_t>(i) << 1; |
| 197 | return WidthU(i >= 0 ? u : ~u); |
| 198 | } |
| 199 | |
| 200 | inline BitWidth WidthF(double f) { |
| 201 | return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32 |
| 202 | : BIT_WIDTH_64; |
| 203 | } |
| 204 | |
| 205 | // Base class of all types below. |
| 206 | // Points into the data buffer and allows access to one type. |
| 207 | class Object { |
| 208 | public: |
| 209 | Object(const uint8_t *data, uint8_t byte_width) |
| 210 | : data_(data), byte_width_(byte_width) {} |
| 211 | |
| 212 | protected: |
| 213 | const uint8_t *data_; |
| 214 | uint8_t byte_width_; |
| 215 | }; |
| 216 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 217 | // Object that has a size, obtained either from size prefix, or elsewhere. |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 218 | class Sized : public Object { |
| 219 | public: |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 220 | // Size prefix. |
| 221 | Sized(const uint8_t *data, uint8_t byte_width) |
| 222 | : Object(data, byte_width), size_(read_size()) {} |
| 223 | // Manual size. |
| 224 | Sized(const uint8_t *data, uint8_t byte_width, size_t sz) |
| 225 | : Object(data, byte_width), size_(sz) {} |
| 226 | size_t size() const { return size_; } |
| 227 | // Access size stored in `byte_width_` bytes before data_ pointer. |
| 228 | size_t read_size() const { |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 229 | return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_)); |
| 230 | } |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 231 | |
| 232 | protected: |
| 233 | size_t size_; |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 234 | }; |
| 235 | |
| 236 | class String : public Sized { |
| 237 | public: |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 238 | // Size prefix. |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 239 | String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 240 | // Manual size. |
| 241 | String(const uint8_t *data, uint8_t byte_width, size_t sz) |
| 242 | : Sized(data, byte_width, sz) {} |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 243 | |
| 244 | size_t length() const { return size(); } |
| 245 | const char *c_str() const { return reinterpret_cast<const char *>(data_); } |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 246 | std::string str() const { return std::string(c_str(), size()); } |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 247 | |
| 248 | static String EmptyString() { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 249 | static const char *empty_string = ""; |
| 250 | return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0); |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 251 | } |
| 252 | bool IsTheEmptyString() const { return data_ == EmptyString().data_; } |
| 253 | }; |
| 254 | |
| 255 | class Blob : public Sized { |
| 256 | public: |
| 257 | Blob(const uint8_t *data_buf, uint8_t byte_width) |
| 258 | : Sized(data_buf, byte_width) {} |
| 259 | |
| 260 | static Blob EmptyBlob() { |
| 261 | static const uint8_t empty_blob[] = { 0 /*len*/ }; |
| 262 | return Blob(empty_blob + 1, 1); |
| 263 | } |
| 264 | bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } |
| 265 | const uint8_t *data() const { return data_; } |
| 266 | }; |
| 267 | |
| 268 | class Vector : public Sized { |
| 269 | public: |
| 270 | Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} |
| 271 | |
| 272 | Reference operator[](size_t i) const; |
| 273 | |
| 274 | static Vector EmptyVector() { |
| 275 | static const uint8_t empty_vector[] = { 0 /*len*/ }; |
| 276 | return Vector(empty_vector + 1, 1); |
| 277 | } |
| 278 | bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } |
| 279 | }; |
| 280 | |
| 281 | class TypedVector : public Sized { |
| 282 | public: |
| 283 | TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) |
| 284 | : Sized(data, byte_width), type_(element_type) {} |
| 285 | |
| 286 | Reference operator[](size_t i) const; |
| 287 | |
| 288 | static TypedVector EmptyTypedVector() { |
| 289 | static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; |
| 290 | return TypedVector(empty_typed_vector + 1, 1, FBT_INT); |
| 291 | } |
| 292 | bool IsTheEmptyVector() const { |
| 293 | return data_ == TypedVector::EmptyTypedVector().data_; |
| 294 | } |
| 295 | |
| 296 | Type ElementType() { return type_; } |
| 297 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 298 | friend Reference; |
| 299 | |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 300 | private: |
| 301 | Type type_; |
| 302 | |
| 303 | friend Map; |
| 304 | }; |
| 305 | |
| 306 | class FixedTypedVector : public Object { |
| 307 | public: |
| 308 | FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, |
| 309 | uint8_t len) |
| 310 | : Object(data, byte_width), type_(element_type), len_(len) {} |
| 311 | |
| 312 | Reference operator[](size_t i) const; |
| 313 | |
| 314 | static FixedTypedVector EmptyFixedTypedVector() { |
| 315 | static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; |
| 316 | return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); |
| 317 | } |
| 318 | bool IsTheEmptyFixedTypedVector() const { |
| 319 | return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; |
| 320 | } |
| 321 | |
| 322 | Type ElementType() { return type_; } |
| 323 | uint8_t size() { return len_; } |
| 324 | |
| 325 | private: |
| 326 | Type type_; |
| 327 | uint8_t len_; |
| 328 | }; |
| 329 | |
| 330 | class Map : public Vector { |
| 331 | public: |
| 332 | Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} |
| 333 | |
| 334 | Reference operator[](const char *key) const; |
| 335 | Reference operator[](const std::string &key) const; |
| 336 | |
| 337 | Vector Values() const { return Vector(data_, byte_width_); } |
| 338 | |
| 339 | TypedVector Keys() const { |
| 340 | const size_t num_prefixed_fields = 3; |
| 341 | auto keys_offset = data_ - byte_width_ * num_prefixed_fields; |
| 342 | return TypedVector(Indirect(keys_offset, byte_width_), |
| 343 | static_cast<uint8_t>( |
| 344 | ReadUInt64(keys_offset + byte_width_, byte_width_)), |
| 345 | FBT_KEY); |
| 346 | } |
| 347 | |
| 348 | static Map EmptyMap() { |
| 349 | static const uint8_t empty_map[] = { |
| 350 | 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ |
| 351 | }; |
| 352 | return Map(empty_map + 4, 1); |
| 353 | } |
| 354 | |
| 355 | bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } |
| 356 | }; |
| 357 | |
| 358 | template<typename T> |
| 359 | void AppendToString(std::string &s, T &&v, bool keys_quoted) { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 360 | s += "[ "; |
| 361 | for (size_t i = 0; i < v.size(); i++) { |
| 362 | if (i) s += ", "; |
| 363 | v[i].ToString(true, keys_quoted, s); |
| 364 | } |
| 365 | s += " ]"; |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | class Reference { |
| 369 | public: |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 370 | Reference() |
| 371 | : data_(nullptr), |
| 372 | parent_width_(0), |
| 373 | byte_width_(BIT_WIDTH_8), |
| 374 | type_(FBT_NULL) {} |
| 375 | |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 376 | Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, |
| 377 | Type type) |
| 378 | : data_(data), |
| 379 | parent_width_(parent_width), |
| 380 | byte_width_(byte_width), |
| 381 | type_(type) {} |
| 382 | |
| 383 | Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) |
| 384 | : data_(data), parent_width_(parent_width) { |
| 385 | byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3); |
| 386 | type_ = static_cast<Type>(packed_type >> 2); |
| 387 | } |
| 388 | |
| 389 | Type GetType() const { return type_; } |
| 390 | |
| 391 | bool IsNull() const { return type_ == FBT_NULL; } |
| 392 | bool IsBool() const { return type_ == FBT_BOOL; } |
| 393 | bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } |
| 394 | bool IsUInt() const { |
| 395 | return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; |
| 396 | } |
| 397 | bool IsIntOrUint() const { return IsInt() || IsUInt(); } |
| 398 | bool IsFloat() const { |
| 399 | return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; |
| 400 | } |
| 401 | bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } |
| 402 | bool IsString() const { return type_ == FBT_STRING; } |
| 403 | bool IsKey() const { return type_ == FBT_KEY; } |
| 404 | bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 405 | bool IsUntypedVector() const { return type_ == FBT_VECTOR; } |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 406 | bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 407 | bool IsFixedTypedVector() const { |
| 408 | return flexbuffers::IsFixedTypedVector(type_); |
| 409 | } |
| 410 | bool IsAnyVector() const { |
| 411 | return (IsTypedVector() || IsFixedTypedVector() || IsVector()); |
| 412 | } |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 413 | bool IsMap() const { return type_ == FBT_MAP; } |
| 414 | bool IsBlob() const { return type_ == FBT_BLOB; } |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 415 | bool AsBool() const { |
| 416 | return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 417 | : AsUInt64()) != 0; |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | // Reads any type as a int64_t. Never fails, does most sensible conversion. |
| 421 | // Truncates floats, strings are attempted to be parsed for a number, |
| 422 | // vectors/maps return their size. Returns 0 if all else fails. |
| 423 | int64_t AsInt64() const { |
| 424 | if (type_ == FBT_INT) { |
| 425 | // A fast path for the common case. |
| 426 | return ReadInt64(data_, parent_width_); |
| 427 | } else |
| 428 | switch (type_) { |
| 429 | case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); |
| 430 | case FBT_UINT: return ReadUInt64(data_, parent_width_); |
| 431 | case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); |
| 432 | case FBT_FLOAT: |
| 433 | return static_cast<int64_t>(ReadDouble(data_, parent_width_)); |
| 434 | case FBT_INDIRECT_FLOAT: |
| 435 | return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_)); |
| 436 | case FBT_NULL: return 0; |
| 437 | case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); |
| 438 | case FBT_VECTOR: return static_cast<int64_t>(AsVector().size()); |
| 439 | case FBT_BOOL: return ReadInt64(data_, parent_width_); |
| 440 | default: |
| 441 | // Convert other things to int. |
| 442 | return 0; |
| 443 | } |
| 444 | } |
| 445 | |
| 446 | // TODO: could specialize these to not use AsInt64() if that saves |
| 447 | // extension ops in generated code, and use a faster op than ReadInt64. |
| 448 | int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); } |
| 449 | int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); } |
| 450 | int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); } |
| 451 | |
| 452 | uint64_t AsUInt64() const { |
| 453 | if (type_ == FBT_UINT) { |
| 454 | // A fast path for the common case. |
| 455 | return ReadUInt64(data_, parent_width_); |
| 456 | } else |
| 457 | switch (type_) { |
| 458 | case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); |
| 459 | case FBT_INT: return ReadInt64(data_, parent_width_); |
| 460 | case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); |
| 461 | case FBT_FLOAT: |
| 462 | return static_cast<uint64_t>(ReadDouble(data_, parent_width_)); |
| 463 | case FBT_INDIRECT_FLOAT: |
| 464 | return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_)); |
| 465 | case FBT_NULL: return 0; |
| 466 | case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); |
| 467 | case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size()); |
| 468 | case FBT_BOOL: return ReadUInt64(data_, parent_width_); |
| 469 | default: |
| 470 | // Convert other things to uint. |
| 471 | return 0; |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); } |
| 476 | uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); } |
| 477 | uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); } |
| 478 | |
| 479 | double AsDouble() const { |
| 480 | if (type_ == FBT_FLOAT) { |
| 481 | // A fast path for the common case. |
| 482 | return ReadDouble(data_, parent_width_); |
| 483 | } else |
| 484 | switch (type_) { |
| 485 | case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); |
| 486 | case FBT_INT: |
| 487 | return static_cast<double>(ReadInt64(data_, parent_width_)); |
| 488 | case FBT_UINT: |
| 489 | return static_cast<double>(ReadUInt64(data_, parent_width_)); |
| 490 | case FBT_INDIRECT_INT: |
| 491 | return static_cast<double>(ReadInt64(Indirect(), byte_width_)); |
| 492 | case FBT_INDIRECT_UINT: |
| 493 | return static_cast<double>(ReadUInt64(Indirect(), byte_width_)); |
| 494 | case FBT_NULL: return 0.0; |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 495 | case FBT_STRING: { |
| 496 | double d; |
| 497 | flatbuffers::StringToNumber(AsString().c_str(), &d); |
| 498 | return d; |
| 499 | } |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 500 | case FBT_VECTOR: return static_cast<double>(AsVector().size()); |
| 501 | case FBT_BOOL: |
| 502 | return static_cast<double>(ReadUInt64(data_, parent_width_)); |
| 503 | default: |
| 504 | // Convert strings and other things to float. |
| 505 | return 0; |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | float AsFloat() const { return static_cast<float>(AsDouble()); } |
| 510 | |
| 511 | const char *AsKey() const { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 512 | if (type_ == FBT_KEY || type_ == FBT_STRING) { |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 513 | return reinterpret_cast<const char *>(Indirect()); |
| 514 | } else { |
| 515 | return ""; |
| 516 | } |
| 517 | } |
| 518 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 519 | // This function returns the empty string if you try to read something that |
| 520 | // is not a string or key. |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 521 | String AsString() const { |
| 522 | if (type_ == FBT_STRING) { |
| 523 | return String(Indirect(), byte_width_); |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 524 | } else if (type_ == FBT_KEY) { |
| 525 | auto key = Indirect(); |
| 526 | return String(key, byte_width_, |
| 527 | strlen(reinterpret_cast<const char *>(key))); |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 528 | } else { |
| 529 | return String::EmptyString(); |
| 530 | } |
| 531 | } |
| 532 | |
| 533 | // Unlike AsString(), this will convert any type to a std::string. |
| 534 | std::string ToString() const { |
| 535 | std::string s; |
| 536 | ToString(false, false, s); |
| 537 | return s; |
| 538 | } |
| 539 | |
| 540 | // Convert any type to a JSON-like string. strings_quoted determines if |
| 541 | // string values at the top level receive "" quotes (inside other values |
| 542 | // they always do). keys_quoted determines if keys are quoted, at any level. |
| 543 | // TODO(wvo): add further options to have indentation/newlines. |
| 544 | void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { |
| 545 | if (type_ == FBT_STRING) { |
| 546 | String str(Indirect(), byte_width_); |
| 547 | if (strings_quoted) { |
| 548 | flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false); |
| 549 | } else { |
| 550 | s.append(str.c_str(), str.length()); |
| 551 | } |
| 552 | } else if (IsKey()) { |
| 553 | auto str = AsKey(); |
| 554 | if (keys_quoted) { |
| 555 | flatbuffers::EscapeString(str, strlen(str), &s, true, false); |
| 556 | } else { |
| 557 | s += str; |
| 558 | } |
| 559 | } else if (IsInt()) { |
| 560 | s += flatbuffers::NumToString(AsInt64()); |
| 561 | } else if (IsUInt()) { |
| 562 | s += flatbuffers::NumToString(AsUInt64()); |
| 563 | } else if (IsFloat()) { |
| 564 | s += flatbuffers::NumToString(AsDouble()); |
| 565 | } else if (IsNull()) { |
| 566 | s += "null"; |
| 567 | } else if (IsBool()) { |
| 568 | s += AsBool() ? "true" : "false"; |
| 569 | } else if (IsMap()) { |
| 570 | s += "{ "; |
| 571 | auto m = AsMap(); |
| 572 | auto keys = m.Keys(); |
| 573 | auto vals = m.Values(); |
| 574 | for (size_t i = 0; i < keys.size(); i++) { |
| 575 | keys[i].ToString(true, keys_quoted, s); |
| 576 | s += ": "; |
| 577 | vals[i].ToString(true, keys_quoted, s); |
| 578 | if (i < keys.size() - 1) s += ", "; |
| 579 | } |
| 580 | s += " }"; |
| 581 | } else if (IsVector()) { |
| 582 | AppendToString<Vector>(s, AsVector(), keys_quoted); |
| 583 | } else if (IsTypedVector()) { |
| 584 | AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted); |
| 585 | } else if (IsFixedTypedVector()) { |
| 586 | AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted); |
| 587 | } else if (IsBlob()) { |
| 588 | auto blob = AsBlob(); |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 589 | flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()), |
| 590 | blob.size(), &s, true, false); |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 591 | } else { |
| 592 | s += "(?)"; |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | // This function returns the empty blob if you try to read a not-blob. |
| 597 | // Strings can be viewed as blobs too. |
| 598 | Blob AsBlob() const { |
| 599 | if (type_ == FBT_BLOB || type_ == FBT_STRING) { |
| 600 | return Blob(Indirect(), byte_width_); |
| 601 | } else { |
| 602 | return Blob::EmptyBlob(); |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | // This function returns the empty vector if you try to read a not-vector. |
| 607 | // Maps can be viewed as vectors too. |
| 608 | Vector AsVector() const { |
| 609 | if (type_ == FBT_VECTOR || type_ == FBT_MAP) { |
| 610 | return Vector(Indirect(), byte_width_); |
| 611 | } else { |
| 612 | return Vector::EmptyVector(); |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | TypedVector AsTypedVector() const { |
| 617 | if (IsTypedVector()) { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 618 | auto tv = |
| 619 | TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_)); |
| 620 | if (tv.type_ == FBT_STRING) { |
| 621 | // These can't be accessed as strings, since we don't know the bit-width |
| 622 | // of the size field, see the declaration of |
| 623 | // FBT_VECTOR_STRING_DEPRECATED above for details. |
| 624 | // We change the type here to be keys, which are a subtype of strings, |
| 625 | // and will ignore the size field. This will truncate strings with |
| 626 | // embedded nulls. |
| 627 | tv.type_ = FBT_KEY; |
| 628 | } |
| 629 | return tv; |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 630 | } else { |
| 631 | return TypedVector::EmptyTypedVector(); |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | FixedTypedVector AsFixedTypedVector() const { |
| 636 | if (IsFixedTypedVector()) { |
| 637 | uint8_t len = 0; |
| 638 | auto vtype = ToFixedTypedVectorElementType(type_, &len); |
| 639 | return FixedTypedVector(Indirect(), byte_width_, vtype, len); |
| 640 | } else { |
| 641 | return FixedTypedVector::EmptyFixedTypedVector(); |
| 642 | } |
| 643 | } |
| 644 | |
| 645 | Map AsMap() const { |
| 646 | if (type_ == FBT_MAP) { |
| 647 | return Map(Indirect(), byte_width_); |
| 648 | } else { |
| 649 | return Map::EmptyMap(); |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | template<typename T> T As() const; |
| 654 | |
| 655 | // Experimental: Mutation functions. |
| 656 | // These allow scalars in an already created buffer to be updated in-place. |
| 657 | // Since by default scalars are stored in the smallest possible space, |
| 658 | // the new value may not fit, in which case these functions return false. |
| 659 | // To avoid this, you can construct the values you intend to mutate using |
| 660 | // Builder::ForceMinimumBitWidth. |
| 661 | bool MutateInt(int64_t i) { |
| 662 | if (type_ == FBT_INT) { |
| 663 | return Mutate(data_, i, parent_width_, WidthI(i)); |
| 664 | } else if (type_ == FBT_INDIRECT_INT) { |
| 665 | return Mutate(Indirect(), i, byte_width_, WidthI(i)); |
| 666 | } else if (type_ == FBT_UINT) { |
| 667 | auto u = static_cast<uint64_t>(i); |
| 668 | return Mutate(data_, u, parent_width_, WidthU(u)); |
| 669 | } else if (type_ == FBT_INDIRECT_UINT) { |
| 670 | auto u = static_cast<uint64_t>(i); |
| 671 | return Mutate(Indirect(), u, byte_width_, WidthU(u)); |
| 672 | } else { |
| 673 | return false; |
| 674 | } |
| 675 | } |
| 676 | |
| 677 | bool MutateBool(bool b) { |
| 678 | return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); |
| 679 | } |
| 680 | |
| 681 | bool MutateUInt(uint64_t u) { |
| 682 | if (type_ == FBT_UINT) { |
| 683 | return Mutate(data_, u, parent_width_, WidthU(u)); |
| 684 | } else if (type_ == FBT_INDIRECT_UINT) { |
| 685 | return Mutate(Indirect(), u, byte_width_, WidthU(u)); |
| 686 | } else if (type_ == FBT_INT) { |
| 687 | auto i = static_cast<int64_t>(u); |
| 688 | return Mutate(data_, i, parent_width_, WidthI(i)); |
| 689 | } else if (type_ == FBT_INDIRECT_INT) { |
| 690 | auto i = static_cast<int64_t>(u); |
| 691 | return Mutate(Indirect(), i, byte_width_, WidthI(i)); |
| 692 | } else { |
| 693 | return false; |
| 694 | } |
| 695 | } |
| 696 | |
| 697 | bool MutateFloat(float f) { |
| 698 | if (type_ == FBT_FLOAT) { |
| 699 | return MutateF(data_, f, parent_width_, BIT_WIDTH_32); |
| 700 | } else if (type_ == FBT_INDIRECT_FLOAT) { |
| 701 | return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); |
| 702 | } else { |
| 703 | return false; |
| 704 | } |
| 705 | } |
| 706 | |
| 707 | bool MutateFloat(double d) { |
| 708 | if (type_ == FBT_FLOAT) { |
| 709 | return MutateF(data_, d, parent_width_, WidthF(d)); |
| 710 | } else if (type_ == FBT_INDIRECT_FLOAT) { |
| 711 | return MutateF(Indirect(), d, byte_width_, WidthF(d)); |
| 712 | } else { |
| 713 | return false; |
| 714 | } |
| 715 | } |
| 716 | |
| 717 | bool MutateString(const char *str, size_t len) { |
| 718 | auto s = AsString(); |
| 719 | if (s.IsTheEmptyString()) return false; |
| 720 | // This is very strict, could allow shorter strings, but that creates |
| 721 | // garbage. |
| 722 | if (s.length() != len) return false; |
| 723 | memcpy(const_cast<char *>(s.c_str()), str, len); |
| 724 | return true; |
| 725 | } |
| 726 | bool MutateString(const char *str) { return MutateString(str, strlen(str)); } |
| 727 | bool MutateString(const std::string &str) { |
| 728 | return MutateString(str.data(), str.length()); |
| 729 | } |
| 730 | |
| 731 | private: |
| 732 | const uint8_t *Indirect() const { |
| 733 | return flexbuffers::Indirect(data_, parent_width_); |
| 734 | } |
| 735 | |
| 736 | template<typename T> |
| 737 | bool Mutate(const uint8_t *dest, T t, size_t byte_width, |
| 738 | BitWidth value_width) { |
| 739 | auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <= |
| 740 | byte_width; |
| 741 | if (fits) { |
| 742 | t = flatbuffers::EndianScalar(t); |
| 743 | memcpy(const_cast<uint8_t *>(dest), &t, byte_width); |
| 744 | } |
| 745 | return fits; |
| 746 | } |
| 747 | |
| 748 | template<typename T> |
| 749 | bool MutateF(const uint8_t *dest, T t, size_t byte_width, |
| 750 | BitWidth value_width) { |
| 751 | if (byte_width == sizeof(double)) |
| 752 | return Mutate(dest, static_cast<double>(t), byte_width, value_width); |
| 753 | if (byte_width == sizeof(float)) |
| 754 | return Mutate(dest, static_cast<float>(t), byte_width, value_width); |
| 755 | FLATBUFFERS_ASSERT(false); |
| 756 | return false; |
| 757 | } |
| 758 | |
| 759 | const uint8_t *data_; |
| 760 | uint8_t parent_width_; |
| 761 | uint8_t byte_width_; |
| 762 | Type type_; |
| 763 | }; |
| 764 | |
| 765 | // Template specialization for As(). |
| 766 | template<> inline bool Reference::As<bool>() const { return AsBool(); } |
| 767 | |
| 768 | template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); } |
| 769 | template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); } |
| 770 | template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); } |
| 771 | template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); } |
| 772 | |
| 773 | template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); } |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 774 | template<> inline uint16_t Reference::As<uint16_t>() const { |
| 775 | return AsUInt16(); |
| 776 | } |
| 777 | template<> inline uint32_t Reference::As<uint32_t>() const { |
| 778 | return AsUInt32(); |
| 779 | } |
| 780 | template<> inline uint64_t Reference::As<uint64_t>() const { |
| 781 | return AsUInt64(); |
| 782 | } |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 783 | |
| 784 | template<> inline double Reference::As<double>() const { return AsDouble(); } |
| 785 | template<> inline float Reference::As<float>() const { return AsFloat(); } |
| 786 | |
| 787 | template<> inline String Reference::As<String>() const { return AsString(); } |
| 788 | template<> inline std::string Reference::As<std::string>() const { |
| 789 | return AsString().str(); |
| 790 | } |
| 791 | |
| 792 | template<> inline Blob Reference::As<Blob>() const { return AsBlob(); } |
| 793 | template<> inline Vector Reference::As<Vector>() const { return AsVector(); } |
| 794 | template<> inline TypedVector Reference::As<TypedVector>() const { |
| 795 | return AsTypedVector(); |
| 796 | } |
| 797 | template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const { |
| 798 | return AsFixedTypedVector(); |
| 799 | } |
| 800 | template<> inline Map Reference::As<Map>() const { return AsMap(); } |
| 801 | |
| 802 | inline uint8_t PackedType(BitWidth bit_width, Type type) { |
| 803 | return static_cast<uint8_t>(bit_width | (type << 2)); |
| 804 | } |
| 805 | |
| 806 | inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } |
| 807 | |
| 808 | // Vector accessors. |
| 809 | // Note: if you try to access outside of bounds, you get a Null value back |
| 810 | // instead. Normally this would be an assert, but since this is "dynamically |
| 811 | // typed" data, you may not want that (someone sends you a 2d vector and you |
| 812 | // wanted 3d). |
| 813 | // The Null converts seamlessly into a default value for any other type. |
| 814 | // TODO(wvo): Could introduce an #ifdef that makes this into an assert? |
| 815 | inline Reference Vector::operator[](size_t i) const { |
| 816 | auto len = size(); |
| 817 | if (i >= len) return Reference(nullptr, 1, NullPackedType()); |
| 818 | auto packed_type = (data_ + len * byte_width_)[i]; |
| 819 | auto elem = data_ + i * byte_width_; |
| 820 | return Reference(elem, byte_width_, packed_type); |
| 821 | } |
| 822 | |
| 823 | inline Reference TypedVector::operator[](size_t i) const { |
| 824 | auto len = size(); |
| 825 | if (i >= len) return Reference(nullptr, 1, NullPackedType()); |
| 826 | auto elem = data_ + i * byte_width_; |
| 827 | return Reference(elem, byte_width_, 1, type_); |
| 828 | } |
| 829 | |
| 830 | inline Reference FixedTypedVector::operator[](size_t i) const { |
| 831 | if (i >= len_) return Reference(nullptr, 1, NullPackedType()); |
| 832 | auto elem = data_ + i * byte_width_; |
| 833 | return Reference(elem, byte_width_, 1, type_); |
| 834 | } |
| 835 | |
| 836 | template<typename T> int KeyCompare(const void *key, const void *elem) { |
| 837 | auto str_elem = reinterpret_cast<const char *>( |
| 838 | Indirect<T>(reinterpret_cast<const uint8_t *>(elem))); |
| 839 | auto skey = reinterpret_cast<const char *>(key); |
| 840 | return strcmp(skey, str_elem); |
| 841 | } |
| 842 | |
| 843 | inline Reference Map::operator[](const char *key) const { |
| 844 | auto keys = Keys(); |
| 845 | // We can't pass keys.byte_width_ to the comparison function, so we have |
| 846 | // to pick the right one ahead of time. |
| 847 | int (*comp)(const void *, const void *) = nullptr; |
| 848 | switch (keys.byte_width_) { |
| 849 | case 1: comp = KeyCompare<uint8_t>; break; |
| 850 | case 2: comp = KeyCompare<uint16_t>; break; |
| 851 | case 4: comp = KeyCompare<uint32_t>; break; |
| 852 | case 8: comp = KeyCompare<uint64_t>; break; |
| 853 | } |
| 854 | auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); |
| 855 | if (!res) return Reference(nullptr, 1, NullPackedType()); |
| 856 | auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_; |
| 857 | return (*static_cast<const Vector *>(this))[i]; |
| 858 | } |
| 859 | |
| 860 | inline Reference Map::operator[](const std::string &key) const { |
| 861 | return (*this)[key.c_str()]; |
| 862 | } |
| 863 | |
| 864 | inline Reference GetRoot(const uint8_t *buffer, size_t size) { |
| 865 | // See Finish() below for the serialization counterpart of this. |
| 866 | // The root starts at the end of the buffer, so we parse backwards from there. |
| 867 | auto end = buffer + size; |
| 868 | auto byte_width = *--end; |
| 869 | auto packed_type = *--end; |
| 870 | end -= byte_width; // The root data item. |
| 871 | return Reference(end, byte_width, packed_type); |
| 872 | } |
| 873 | |
| 874 | inline Reference GetRoot(const std::vector<uint8_t> &buffer) { |
| 875 | return GetRoot(flatbuffers::vector_data(buffer), buffer.size()); |
| 876 | } |
| 877 | |
| 878 | // Flags that configure how the Builder behaves. |
| 879 | // The "Share" flags determine if the Builder automatically tries to pool |
| 880 | // this type. Pooling can reduce the size of serialized data if there are |
| 881 | // multiple maps of the same kind, at the expense of slightly slower |
| 882 | // serialization (the cost of lookups) and more memory use (std::set). |
| 883 | // By default this is on for keys, but off for strings. |
| 884 | // Turn keys off if you have e.g. only one map. |
| 885 | // Turn strings on if you expect many non-unique string values. |
| 886 | // Additionally, sharing key vectors can save space if you have maps with |
| 887 | // identical field populations. |
| 888 | enum BuilderFlag { |
| 889 | BUILDER_FLAG_NONE = 0, |
| 890 | BUILDER_FLAG_SHARE_KEYS = 1, |
| 891 | BUILDER_FLAG_SHARE_STRINGS = 2, |
| 892 | BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, |
| 893 | BUILDER_FLAG_SHARE_KEY_VECTORS = 4, |
| 894 | BUILDER_FLAG_SHARE_ALL = 7, |
| 895 | }; |
| 896 | |
| 897 | class Builder FLATBUFFERS_FINAL_CLASS { |
| 898 | public: |
| 899 | Builder(size_t initial_size = 256, |
| 900 | BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) |
| 901 | : buf_(initial_size), |
| 902 | finished_(false), |
| 903 | flags_(flags), |
| 904 | force_min_bit_width_(BIT_WIDTH_8), |
| 905 | key_pool(KeyOffsetCompare(buf_)), |
| 906 | string_pool(StringOffsetCompare(buf_)) { |
| 907 | buf_.clear(); |
| 908 | } |
| 909 | |
| 910 | /// @brief Get the serialized buffer (after you call `Finish()`). |
| 911 | /// @return Returns a vector owned by this class. |
| 912 | const std::vector<uint8_t> &GetBuffer() const { |
| 913 | Finished(); |
| 914 | return buf_; |
| 915 | } |
| 916 | |
| 917 | // Size of the buffer. Does not include unfinished values. |
| 918 | size_t GetSize() const { return buf_.size(); } |
| 919 | |
| 920 | // Reset all state so we can re-use the buffer. |
| 921 | void Clear() { |
| 922 | buf_.clear(); |
| 923 | stack_.clear(); |
| 924 | finished_ = false; |
| 925 | // flags_ remains as-is; |
| 926 | force_min_bit_width_ = BIT_WIDTH_8; |
| 927 | key_pool.clear(); |
| 928 | string_pool.clear(); |
| 929 | } |
| 930 | |
| 931 | // All value constructing functions below have two versions: one that |
| 932 | // takes a key (for placement inside a map) and one that doesn't (for inside |
| 933 | // vectors and elsewhere). |
| 934 | |
| 935 | void Null() { stack_.push_back(Value()); } |
| 936 | void Null(const char *key) { |
| 937 | Key(key); |
| 938 | Null(); |
| 939 | } |
| 940 | |
| 941 | void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } |
| 942 | void Int(const char *key, int64_t i) { |
| 943 | Key(key); |
| 944 | Int(i); |
| 945 | } |
| 946 | |
| 947 | void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } |
| 948 | void UInt(const char *key, uint64_t u) { |
| 949 | Key(key); |
| 950 | UInt(u); |
| 951 | } |
| 952 | |
| 953 | void Float(float f) { stack_.push_back(Value(f)); } |
| 954 | void Float(const char *key, float f) { |
| 955 | Key(key); |
| 956 | Float(f); |
| 957 | } |
| 958 | |
| 959 | void Double(double f) { stack_.push_back(Value(f)); } |
| 960 | void Double(const char *key, double d) { |
| 961 | Key(key); |
| 962 | Double(d); |
| 963 | } |
| 964 | |
| 965 | void Bool(bool b) { stack_.push_back(Value(b)); } |
| 966 | void Bool(const char *key, bool b) { |
| 967 | Key(key); |
| 968 | Bool(b); |
| 969 | } |
| 970 | |
| 971 | void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } |
| 972 | void IndirectInt(const char *key, int64_t i) { |
| 973 | Key(key); |
| 974 | IndirectInt(i); |
| 975 | } |
| 976 | |
| 977 | void IndirectUInt(uint64_t u) { |
| 978 | PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); |
| 979 | } |
| 980 | void IndirectUInt(const char *key, uint64_t u) { |
| 981 | Key(key); |
| 982 | IndirectUInt(u); |
| 983 | } |
| 984 | |
| 985 | void IndirectFloat(float f) { |
| 986 | PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); |
| 987 | } |
| 988 | void IndirectFloat(const char *key, float f) { |
| 989 | Key(key); |
| 990 | IndirectFloat(f); |
| 991 | } |
| 992 | |
| 993 | void IndirectDouble(double f) { |
| 994 | PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); |
| 995 | } |
| 996 | void IndirectDouble(const char *key, double d) { |
| 997 | Key(key); |
| 998 | IndirectDouble(d); |
| 999 | } |
| 1000 | |
| 1001 | size_t Key(const char *str, size_t len) { |
| 1002 | auto sloc = buf_.size(); |
| 1003 | WriteBytes(str, len + 1); |
| 1004 | if (flags_ & BUILDER_FLAG_SHARE_KEYS) { |
| 1005 | auto it = key_pool.find(sloc); |
| 1006 | if (it != key_pool.end()) { |
| 1007 | // Already in the buffer. Remove key we just serialized, and use |
| 1008 | // existing offset instead. |
| 1009 | buf_.resize(sloc); |
| 1010 | sloc = *it; |
| 1011 | } else { |
| 1012 | key_pool.insert(sloc); |
| 1013 | } |
| 1014 | } |
| 1015 | stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8)); |
| 1016 | return sloc; |
| 1017 | } |
| 1018 | |
| 1019 | size_t Key(const char *str) { return Key(str, strlen(str)); } |
| 1020 | size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } |
| 1021 | |
| 1022 | size_t String(const char *str, size_t len) { |
| 1023 | auto reset_to = buf_.size(); |
| 1024 | auto sloc = CreateBlob(str, len, 1, FBT_STRING); |
| 1025 | if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { |
| 1026 | StringOffset so(sloc, len); |
| 1027 | auto it = string_pool.find(so); |
| 1028 | if (it != string_pool.end()) { |
| 1029 | // Already in the buffer. Remove string we just serialized, and use |
| 1030 | // existing offset instead. |
| 1031 | buf_.resize(reset_to); |
| 1032 | sloc = it->first; |
| 1033 | stack_.back().u_ = sloc; |
| 1034 | } else { |
| 1035 | string_pool.insert(so); |
| 1036 | } |
| 1037 | } |
| 1038 | return sloc; |
| 1039 | } |
| 1040 | size_t String(const char *str) { return String(str, strlen(str)); } |
| 1041 | size_t String(const std::string &str) { |
| 1042 | return String(str.c_str(), str.size()); |
| 1043 | } |
| 1044 | void String(const flexbuffers::String &str) { |
| 1045 | String(str.c_str(), str.length()); |
| 1046 | } |
| 1047 | |
| 1048 | void String(const char *key, const char *str) { |
| 1049 | Key(key); |
| 1050 | String(str); |
| 1051 | } |
| 1052 | void String(const char *key, const std::string &str) { |
| 1053 | Key(key); |
| 1054 | String(str); |
| 1055 | } |
| 1056 | void String(const char *key, const flexbuffers::String &str) { |
| 1057 | Key(key); |
| 1058 | String(str); |
| 1059 | } |
| 1060 | |
| 1061 | size_t Blob(const void *data, size_t len) { |
| 1062 | return CreateBlob(data, len, 0, FBT_BLOB); |
| 1063 | } |
| 1064 | size_t Blob(const std::vector<uint8_t> &v) { |
| 1065 | return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB); |
| 1066 | } |
| 1067 | |
| 1068 | // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), |
| 1069 | // e.g. Vector etc. Also in overloaded versions. |
| 1070 | // Also some FlatBuffers types? |
| 1071 | |
| 1072 | size_t StartVector() { return stack_.size(); } |
| 1073 | size_t StartVector(const char *key) { |
| 1074 | Key(key); |
| 1075 | return stack_.size(); |
| 1076 | } |
| 1077 | size_t StartMap() { return stack_.size(); } |
| 1078 | size_t StartMap(const char *key) { |
| 1079 | Key(key); |
| 1080 | return stack_.size(); |
| 1081 | } |
| 1082 | |
| 1083 | // TODO(wvo): allow this to specify an aligment greater than the natural |
| 1084 | // alignment. |
| 1085 | size_t EndVector(size_t start, bool typed, bool fixed) { |
| 1086 | auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); |
| 1087 | // Remove temp elements and return vector. |
| 1088 | stack_.resize(start); |
| 1089 | stack_.push_back(vec); |
| 1090 | return static_cast<size_t>(vec.u_); |
| 1091 | } |
| 1092 | |
| 1093 | size_t EndMap(size_t start) { |
| 1094 | // We should have interleaved keys and values on the stack. |
| 1095 | // Make sure it is an even number: |
| 1096 | auto len = stack_.size() - start; |
| 1097 | FLATBUFFERS_ASSERT(!(len & 1)); |
| 1098 | len /= 2; |
| 1099 | // Make sure keys are all strings: |
| 1100 | for (auto key = start; key < stack_.size(); key += 2) { |
| 1101 | FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); |
| 1102 | } |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1103 | // Now sort values, so later we can do a binary search lookup. |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1104 | // We want to sort 2 array elements at a time. |
| 1105 | struct TwoValue { |
| 1106 | Value key; |
| 1107 | Value val; |
| 1108 | }; |
| 1109 | // TODO(wvo): strict aliasing? |
| 1110 | // TODO(wvo): allow the caller to indicate the data is already sorted |
| 1111 | // for maximum efficiency? With an assert to check sortedness to make sure |
| 1112 | // we're not breaking binary search. |
| 1113 | // Or, we can track if the map is sorted as keys are added which would be |
| 1114 | // be quite cheap (cheaper than checking it here), so we can skip this |
| 1115 | // step automatically when appliccable, and encourage people to write in |
| 1116 | // sorted fashion. |
| 1117 | // std::sort is typically already a lot faster on sorted data though. |
| 1118 | auto dict = |
| 1119 | reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start); |
| 1120 | std::sort(dict, dict + len, |
| 1121 | [&](const TwoValue &a, const TwoValue &b) -> bool { |
| 1122 | auto as = reinterpret_cast<const char *>( |
| 1123 | flatbuffers::vector_data(buf_) + a.key.u_); |
| 1124 | auto bs = reinterpret_cast<const char *>( |
| 1125 | flatbuffers::vector_data(buf_) + b.key.u_); |
| 1126 | auto comp = strcmp(as, bs); |
| 1127 | // If this assertion hits, you've added two keys with the same |
| 1128 | // value to this map. |
| 1129 | // TODO: Have to check for pointer equality, as some sort |
| 1130 | // implementation apparently call this function with the same |
| 1131 | // element?? Why? |
| 1132 | FLATBUFFERS_ASSERT(comp || &a == &b); |
| 1133 | return comp < 0; |
| 1134 | }); |
| 1135 | // First create a vector out of all keys. |
| 1136 | // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share |
| 1137 | // the first vector. |
| 1138 | auto keys = CreateVector(start, len, 2, true, false); |
| 1139 | auto vec = CreateVector(start + 1, len, 2, false, false, &keys); |
| 1140 | // Remove temp elements and return map. |
| 1141 | stack_.resize(start); |
| 1142 | stack_.push_back(vec); |
| 1143 | return static_cast<size_t>(vec.u_); |
| 1144 | } |
| 1145 | |
| 1146 | template<typename F> size_t Vector(F f) { |
| 1147 | auto start = StartVector(); |
| 1148 | f(); |
| 1149 | return EndVector(start, false, false); |
| 1150 | } |
| 1151 | template<typename F, typename T> size_t Vector(F f, T &state) { |
| 1152 | auto start = StartVector(); |
| 1153 | f(state); |
| 1154 | return EndVector(start, false, false); |
| 1155 | } |
| 1156 | template<typename F> size_t Vector(const char *key, F f) { |
| 1157 | auto start = StartVector(key); |
| 1158 | f(); |
| 1159 | return EndVector(start, false, false); |
| 1160 | } |
| 1161 | template<typename F, typename T> |
| 1162 | size_t Vector(const char *key, F f, T &state) { |
| 1163 | auto start = StartVector(key); |
| 1164 | f(state); |
| 1165 | return EndVector(start, false, false); |
| 1166 | } |
| 1167 | |
| 1168 | template<typename T> void Vector(const T *elems, size_t len) { |
| 1169 | if (flatbuffers::is_scalar<T>::value) { |
| 1170 | // This path should be a lot quicker and use less space. |
| 1171 | ScalarVector(elems, len, false); |
| 1172 | } else { |
| 1173 | auto start = StartVector(); |
| 1174 | for (size_t i = 0; i < len; i++) Add(elems[i]); |
| 1175 | EndVector(start, false, false); |
| 1176 | } |
| 1177 | } |
| 1178 | template<typename T> |
| 1179 | void Vector(const char *key, const T *elems, size_t len) { |
| 1180 | Key(key); |
| 1181 | Vector(elems, len); |
| 1182 | } |
| 1183 | template<typename T> void Vector(const std::vector<T> &vec) { |
| 1184 | Vector(flatbuffers::vector_data(vec), vec.size()); |
| 1185 | } |
| 1186 | |
| 1187 | template<typename F> size_t TypedVector(F f) { |
| 1188 | auto start = StartVector(); |
| 1189 | f(); |
| 1190 | return EndVector(start, true, false); |
| 1191 | } |
| 1192 | template<typename F, typename T> size_t TypedVector(F f, T &state) { |
| 1193 | auto start = StartVector(); |
| 1194 | f(state); |
| 1195 | return EndVector(start, true, false); |
| 1196 | } |
| 1197 | template<typename F> size_t TypedVector(const char *key, F f) { |
| 1198 | auto start = StartVector(key); |
| 1199 | f(); |
| 1200 | return EndVector(start, true, false); |
| 1201 | } |
| 1202 | template<typename F, typename T> |
| 1203 | size_t TypedVector(const char *key, F f, T &state) { |
| 1204 | auto start = StartVector(key); |
| 1205 | f(state); |
| 1206 | return EndVector(start, true, false); |
| 1207 | } |
| 1208 | |
| 1209 | template<typename T> size_t FixedTypedVector(const T *elems, size_t len) { |
| 1210 | // We only support a few fixed vector lengths. Anything bigger use a |
| 1211 | // regular typed vector. |
| 1212 | FLATBUFFERS_ASSERT(len >= 2 && len <= 4); |
| 1213 | // And only scalar values. |
| 1214 | static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types"); |
| 1215 | return ScalarVector(elems, len, true); |
| 1216 | } |
| 1217 | |
| 1218 | template<typename T> |
| 1219 | size_t FixedTypedVector(const char *key, const T *elems, size_t len) { |
| 1220 | Key(key); |
| 1221 | return FixedTypedVector(elems, len); |
| 1222 | } |
| 1223 | |
| 1224 | template<typename F> size_t Map(F f) { |
| 1225 | auto start = StartMap(); |
| 1226 | f(); |
| 1227 | return EndMap(start); |
| 1228 | } |
| 1229 | template<typename F, typename T> size_t Map(F f, T &state) { |
| 1230 | auto start = StartMap(); |
| 1231 | f(state); |
| 1232 | return EndMap(start); |
| 1233 | } |
| 1234 | template<typename F> size_t Map(const char *key, F f) { |
| 1235 | auto start = StartMap(key); |
| 1236 | f(); |
| 1237 | return EndMap(start); |
| 1238 | } |
| 1239 | template<typename F, typename T> size_t Map(const char *key, F f, T &state) { |
| 1240 | auto start = StartMap(key); |
| 1241 | f(state); |
| 1242 | return EndMap(start); |
| 1243 | } |
| 1244 | template<typename T> void Map(const std::map<std::string, T> &map) { |
| 1245 | auto start = StartMap(); |
| 1246 | for (auto it = map.begin(); it != map.end(); ++it) |
| 1247 | Add(it->first.c_str(), it->second); |
| 1248 | EndMap(start); |
| 1249 | } |
| 1250 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1251 | // If you wish to share a value explicitly (a value not shared automatically |
| 1252 | // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these |
| 1253 | // functions. Or if you wish to turn those flags off for performance reasons |
| 1254 | // and still do some explicit sharing. For example: |
| 1255 | // builder.IndirectDouble(M_PI); |
| 1256 | // auto id = builder.LastValue(); // Remember where we stored it. |
| 1257 | // .. more code goes here .. |
| 1258 | // builder.ReuseValue(id); // Refers to same double by offset. |
| 1259 | // LastValue works regardless of whether the value has a key or not. |
| 1260 | // Works on any data type. |
| 1261 | struct Value; |
| 1262 | Value LastValue() { return stack_.back(); } |
| 1263 | void ReuseValue(Value v) { stack_.push_back(v); } |
| 1264 | void ReuseValue(const char *key, Value v) { |
| 1265 | Key(key); |
| 1266 | ReuseValue(v); |
| 1267 | } |
| 1268 | |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1269 | // Overloaded Add that tries to call the correct function above. |
| 1270 | void Add(int8_t i) { Int(i); } |
| 1271 | void Add(int16_t i) { Int(i); } |
| 1272 | void Add(int32_t i) { Int(i); } |
| 1273 | void Add(int64_t i) { Int(i); } |
| 1274 | void Add(uint8_t u) { UInt(u); } |
| 1275 | void Add(uint16_t u) { UInt(u); } |
| 1276 | void Add(uint32_t u) { UInt(u); } |
| 1277 | void Add(uint64_t u) { UInt(u); } |
| 1278 | void Add(float f) { Float(f); } |
| 1279 | void Add(double d) { Double(d); } |
| 1280 | void Add(bool b) { Bool(b); } |
| 1281 | void Add(const char *str) { String(str); } |
| 1282 | void Add(const std::string &str) { String(str); } |
| 1283 | void Add(const flexbuffers::String &str) { String(str); } |
| 1284 | |
| 1285 | template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); } |
| 1286 | |
| 1287 | template<typename T> void Add(const char *key, const T &t) { |
| 1288 | Key(key); |
| 1289 | Add(t); |
| 1290 | } |
| 1291 | |
| 1292 | template<typename T> void Add(const std::map<std::string, T> &map) { |
| 1293 | Map(map); |
| 1294 | } |
| 1295 | |
| 1296 | template<typename T> void operator+=(const T &t) { Add(t); } |
| 1297 | |
| 1298 | // This function is useful in combination with the Mutate* functions above. |
| 1299 | // It forces elements of vectors and maps to have a minimum size, such that |
| 1300 | // they can later be updated without failing. |
| 1301 | // Call with no arguments to reset. |
| 1302 | void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { |
| 1303 | force_min_bit_width_ = bw; |
| 1304 | } |
| 1305 | |
| 1306 | void Finish() { |
| 1307 | // If you hit this assert, you likely have objects that were never included |
| 1308 | // in a parent. You need to have exactly one root to finish a buffer. |
| 1309 | // Check your Start/End calls are matched, and all objects are inside |
| 1310 | // some other object. |
| 1311 | FLATBUFFERS_ASSERT(stack_.size() == 1); |
| 1312 | |
| 1313 | // Write root value. |
| 1314 | auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); |
| 1315 | WriteAny(stack_[0], byte_width); |
| 1316 | // Write root type. |
| 1317 | Write(stack_[0].StoredPackedType(), 1); |
| 1318 | // Write root size. Normally determined by parent, but root has no parent :) |
| 1319 | Write(byte_width, 1); |
| 1320 | |
| 1321 | finished_ = true; |
| 1322 | } |
| 1323 | |
| 1324 | private: |
| 1325 | void Finished() const { |
| 1326 | // If you get this assert, you're attempting to get access a buffer |
| 1327 | // which hasn't been finished yet. Be sure to call |
| 1328 | // Builder::Finish with your root object. |
| 1329 | FLATBUFFERS_ASSERT(finished_); |
| 1330 | } |
| 1331 | |
| 1332 | // Align to prepare for writing a scalar with a certain size. |
| 1333 | uint8_t Align(BitWidth alignment) { |
| 1334 | auto byte_width = 1U << alignment; |
| 1335 | buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), |
| 1336 | 0); |
| 1337 | return static_cast<uint8_t>(byte_width); |
| 1338 | } |
| 1339 | |
| 1340 | void WriteBytes(const void *val, size_t size) { |
| 1341 | buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val), |
| 1342 | reinterpret_cast<const uint8_t *>(val) + size); |
| 1343 | } |
| 1344 | |
| 1345 | template<typename T> void Write(T val, size_t byte_width) { |
| 1346 | FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); |
| 1347 | val = flatbuffers::EndianScalar(val); |
| 1348 | WriteBytes(&val, byte_width); |
| 1349 | } |
| 1350 | |
| 1351 | void WriteDouble(double f, uint8_t byte_width) { |
| 1352 | switch (byte_width) { |
| 1353 | case 8: Write(f, byte_width); break; |
| 1354 | case 4: Write(static_cast<float>(f), byte_width); break; |
| 1355 | // case 2: Write(static_cast<half>(f), byte_width); break; |
| 1356 | // case 1: Write(static_cast<quarter>(f), byte_width); break; |
| 1357 | default: FLATBUFFERS_ASSERT(0); |
| 1358 | } |
| 1359 | } |
| 1360 | |
| 1361 | void WriteOffset(uint64_t o, uint8_t byte_width) { |
| 1362 | auto reloff = buf_.size() - o; |
| 1363 | FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); |
| 1364 | Write(reloff, byte_width); |
| 1365 | } |
| 1366 | |
| 1367 | template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) { |
| 1368 | auto byte_width = Align(bit_width); |
| 1369 | auto iloc = buf_.size(); |
| 1370 | Write(val, byte_width); |
| 1371 | stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width)); |
| 1372 | } |
| 1373 | |
| 1374 | static BitWidth WidthB(size_t byte_width) { |
| 1375 | switch (byte_width) { |
| 1376 | case 1: return BIT_WIDTH_8; |
| 1377 | case 2: return BIT_WIDTH_16; |
| 1378 | case 4: return BIT_WIDTH_32; |
| 1379 | case 8: return BIT_WIDTH_64; |
| 1380 | default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; |
| 1381 | } |
| 1382 | } |
| 1383 | |
| 1384 | template<typename T> static Type GetScalarType() { |
| 1385 | static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types"); |
| 1386 | return flatbuffers::is_floating_point<T>::value |
| 1387 | ? FBT_FLOAT |
| 1388 | : flatbuffers::is_same<T, bool>::value |
| 1389 | ? FBT_BOOL |
| 1390 | : (flatbuffers::is_unsigned<T>::value ? FBT_UINT |
| 1391 | : FBT_INT); |
| 1392 | } |
| 1393 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1394 | public: |
| 1395 | // This was really intended to be private, except for LastValue/ReuseValue. |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1396 | struct Value { |
| 1397 | union { |
| 1398 | int64_t i_; |
| 1399 | uint64_t u_; |
| 1400 | double f_; |
| 1401 | }; |
| 1402 | |
| 1403 | Type type_; |
| 1404 | |
| 1405 | // For scalars: of itself, for vector: of its elements, for string: length. |
| 1406 | BitWidth min_bit_width_; |
| 1407 | |
| 1408 | Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} |
| 1409 | |
| 1410 | Value(bool b) |
| 1411 | : u_(static_cast<uint64_t>(b)), |
| 1412 | type_(FBT_BOOL), |
| 1413 | min_bit_width_(BIT_WIDTH_8) {} |
| 1414 | |
| 1415 | Value(int64_t i, Type t, BitWidth bw) |
| 1416 | : i_(i), type_(t), min_bit_width_(bw) {} |
| 1417 | Value(uint64_t u, Type t, BitWidth bw) |
| 1418 | : u_(u), type_(t), min_bit_width_(bw) {} |
| 1419 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1420 | Value(float f) |
| 1421 | : f_(static_cast<double>(f)), |
| 1422 | type_(FBT_FLOAT), |
| 1423 | min_bit_width_(BIT_WIDTH_32) {} |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1424 | Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} |
| 1425 | |
| 1426 | uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { |
| 1427 | return PackedType(StoredWidth(parent_bit_width_), type_); |
| 1428 | } |
| 1429 | |
| 1430 | BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { |
| 1431 | if (IsInline(type_)) { |
| 1432 | return min_bit_width_; |
| 1433 | } else { |
| 1434 | // We have an absolute offset, but want to store a relative offset |
| 1435 | // elem_index elements beyond the current buffer end. Since whether |
| 1436 | // the relative offset fits in a certain byte_width depends on |
| 1437 | // the size of the elements before it (and their alignment), we have |
| 1438 | // to test for each size in turn. |
| 1439 | for (size_t byte_width = 1; |
| 1440 | byte_width <= sizeof(flatbuffers::largest_scalar_t); |
| 1441 | byte_width *= 2) { |
| 1442 | // Where are we going to write this offset? |
| 1443 | auto offset_loc = buf_size + |
| 1444 | flatbuffers::PaddingBytes(buf_size, byte_width) + |
| 1445 | elem_index * byte_width; |
| 1446 | // Compute relative offset. |
| 1447 | auto offset = offset_loc - u_; |
| 1448 | // Does it fit? |
| 1449 | auto bit_width = WidthU(offset); |
| 1450 | if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) == |
| 1451 | byte_width) |
| 1452 | return bit_width; |
| 1453 | } |
| 1454 | FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. |
| 1455 | return BIT_WIDTH_64; |
| 1456 | } |
| 1457 | } |
| 1458 | |
| 1459 | BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { |
| 1460 | if (IsInline(type_)) { |
| 1461 | return (std::max)(min_bit_width_, parent_bit_width_); |
| 1462 | } else { |
| 1463 | return min_bit_width_; |
| 1464 | } |
| 1465 | } |
| 1466 | }; |
| 1467 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1468 | private: |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1469 | void WriteAny(const Value &val, uint8_t byte_width) { |
| 1470 | switch (val.type_) { |
| 1471 | case FBT_NULL: |
| 1472 | case FBT_INT: Write(val.i_, byte_width); break; |
| 1473 | case FBT_BOOL: |
| 1474 | case FBT_UINT: Write(val.u_, byte_width); break; |
| 1475 | case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; |
| 1476 | default: WriteOffset(val.u_, byte_width); break; |
| 1477 | } |
| 1478 | } |
| 1479 | |
| 1480 | size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { |
| 1481 | auto bit_width = WidthU(len); |
| 1482 | auto byte_width = Align(bit_width); |
| 1483 | Write<uint64_t>(len, byte_width); |
| 1484 | auto sloc = buf_.size(); |
| 1485 | WriteBytes(data, len + trailing); |
| 1486 | stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width)); |
| 1487 | return sloc; |
| 1488 | } |
| 1489 | |
| 1490 | template<typename T> |
| 1491 | size_t ScalarVector(const T *elems, size_t len, bool fixed) { |
| 1492 | auto vector_type = GetScalarType<T>(); |
| 1493 | auto byte_width = sizeof(T); |
| 1494 | auto bit_width = WidthB(byte_width); |
| 1495 | // If you get this assert, you're trying to write a vector with a size |
| 1496 | // field that is bigger than the scalars you're trying to write (e.g. a |
| 1497 | // byte vector > 255 elements). For such types, write a "blob" instead. |
| 1498 | // TODO: instead of asserting, could write vector with larger elements |
| 1499 | // instead, though that would be wasteful. |
| 1500 | FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1501 | Align(bit_width); |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1502 | if (!fixed) Write<uint64_t>(len, byte_width); |
| 1503 | auto vloc = buf_.size(); |
| 1504 | for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); |
| 1505 | stack_.push_back(Value(static_cast<uint64_t>(vloc), |
| 1506 | ToTypedVector(vector_type, fixed ? len : 0), |
| 1507 | bit_width)); |
| 1508 | return vloc; |
| 1509 | } |
| 1510 | |
| 1511 | Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, |
| 1512 | bool fixed, const Value *keys = nullptr) { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1513 | FLATBUFFERS_ASSERT( |
| 1514 | !fixed || |
| 1515 | typed); // typed=false, fixed=true combination is not supported. |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1516 | // Figure out smallest bit width we can store this vector with. |
| 1517 | auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); |
| 1518 | auto prefix_elems = 1; |
| 1519 | if (keys) { |
| 1520 | // If this vector is part of a map, we will pre-fix an offset to the keys |
| 1521 | // to this vector. |
| 1522 | bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); |
| 1523 | prefix_elems += 2; |
| 1524 | } |
| 1525 | Type vector_type = FBT_KEY; |
| 1526 | // Check bit widths and types for all elements. |
| 1527 | for (size_t i = start; i < stack_.size(); i += step) { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1528 | auto elem_width = |
| 1529 | stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems); |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1530 | bit_width = (std::max)(bit_width, elem_width); |
| 1531 | if (typed) { |
| 1532 | if (i == start) { |
| 1533 | vector_type = stack_[i].type_; |
| 1534 | } else { |
| 1535 | // If you get this assert, you are writing a typed vector with |
| 1536 | // elements that are not all the same type. |
| 1537 | FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); |
| 1538 | } |
| 1539 | } |
| 1540 | } |
| 1541 | // If you get this assert, your fixed types are not one of: |
| 1542 | // Int / UInt / Float / Key. |
| 1543 | FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type)); |
| 1544 | auto byte_width = Align(bit_width); |
| 1545 | // Write vector. First the keys width/offset if available, and size. |
| 1546 | if (keys) { |
| 1547 | WriteOffset(keys->u_, byte_width); |
| 1548 | Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width); |
| 1549 | } |
| 1550 | if (!fixed) Write<uint64_t>(vec_len, byte_width); |
| 1551 | // Then the actual data. |
| 1552 | auto vloc = buf_.size(); |
| 1553 | for (size_t i = start; i < stack_.size(); i += step) { |
| 1554 | WriteAny(stack_[i], byte_width); |
| 1555 | } |
| 1556 | // Then the types. |
| 1557 | if (!typed) { |
| 1558 | for (size_t i = start; i < stack_.size(); i += step) { |
| 1559 | buf_.push_back(stack_[i].StoredPackedType(bit_width)); |
| 1560 | } |
| 1561 | } |
| 1562 | return Value(static_cast<uint64_t>(vloc), |
| 1563 | keys ? FBT_MAP |
| 1564 | : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) |
| 1565 | : FBT_VECTOR), |
| 1566 | bit_width); |
| 1567 | } |
| 1568 | |
| 1569 | // You shouldn't really be copying instances of this class. |
| 1570 | Builder(const Builder &); |
| 1571 | Builder &operator=(const Builder &); |
| 1572 | |
| 1573 | std::vector<uint8_t> buf_; |
| 1574 | std::vector<Value> stack_; |
| 1575 | |
| 1576 | bool finished_; |
| 1577 | |
| 1578 | BuilderFlag flags_; |
| 1579 | |
| 1580 | BitWidth force_min_bit_width_; |
| 1581 | |
| 1582 | struct KeyOffsetCompare { |
| 1583 | explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {} |
| 1584 | bool operator()(size_t a, size_t b) const { |
| 1585 | auto stra = |
| 1586 | reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a); |
| 1587 | auto strb = |
| 1588 | reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b); |
| 1589 | return strcmp(stra, strb) < 0; |
| 1590 | } |
| 1591 | const std::vector<uint8_t> *buf_; |
| 1592 | }; |
| 1593 | |
| 1594 | typedef std::pair<size_t, size_t> StringOffset; |
| 1595 | struct StringOffsetCompare { |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1596 | explicit StringOffsetCompare(const std::vector<uint8_t> &buf) |
| 1597 | : buf_(&buf) {} |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1598 | bool operator()(const StringOffset &a, const StringOffset &b) const { |
| 1599 | auto stra = reinterpret_cast<const char *>( |
| 1600 | flatbuffers::vector_data(*buf_) + a.first); |
| 1601 | auto strb = reinterpret_cast<const char *>( |
| 1602 | flatbuffers::vector_data(*buf_) + b.first); |
| 1603 | return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0; |
| 1604 | } |
| 1605 | const std::vector<uint8_t> *buf_; |
| 1606 | }; |
| 1607 | |
| 1608 | typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap; |
| 1609 | typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap; |
| 1610 | |
| 1611 | KeyOffsetMap key_pool; |
| 1612 | StringOffsetMap string_pool; |
| 1613 | }; |
| 1614 | |
| 1615 | } // namespace flexbuffers |
| 1616 | |
Austin Schuh | 272c613 | 2020-11-14 16:37:52 -0800 | [diff] [blame^] | 1617 | #if defined(_MSC_VER) |
| 1618 | # pragma warning(pop) |
| 1619 | #endif |
Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1620 | |
| 1621 | #endif // FLATBUFFERS_FLEXBUFFERS_H_ |