Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2017 Google Inc. All rights reserved. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef FLATBUFFERS_FLEXBUFFERS_H_ |
| 18 | #define FLATBUFFERS_FLEXBUFFERS_H_ |
| 19 | |
| 20 | #include <map> |
| 21 | // Used to select STL variant. |
| 22 | #include "flatbuffers/base.h" |
| 23 | // We use the basic binary writing functions from the regular FlatBuffers. |
| 24 | #include "flatbuffers/util.h" |
| 25 | |
| 26 | #ifdef _MSC_VER |
| 27 | # include <intrin.h> |
| 28 | #endif |
| 29 | |
| 30 | #if defined(_MSC_VER) |
| 31 | # pragma warning(push) |
| 32 | # pragma warning(disable : 4127) // C4127: conditional expression is constant |
| 33 | #endif |
| 34 | |
| 35 | namespace flexbuffers { |
| 36 | |
| 37 | class Reference; |
| 38 | class Map; |
| 39 | |
| 40 | // These are used in the lower 2 bits of a type field to determine the size of |
| 41 | // the elements (and or size field) of the item pointed to (e.g. vector). |
| 42 | enum BitWidth { |
| 43 | BIT_WIDTH_8 = 0, |
| 44 | BIT_WIDTH_16 = 1, |
| 45 | BIT_WIDTH_32 = 2, |
| 46 | BIT_WIDTH_64 = 3, |
| 47 | }; |
| 48 | |
| 49 | // These are used as the upper 6 bits of a type field to indicate the actual |
| 50 | // type. |
| 51 | enum Type { |
| 52 | FBT_NULL = 0, |
| 53 | FBT_INT = 1, |
| 54 | FBT_UINT = 2, |
| 55 | FBT_FLOAT = 3, |
| 56 | // Types above stored inline, types below store an offset. |
| 57 | FBT_KEY = 4, |
| 58 | FBT_STRING = 5, |
| 59 | FBT_INDIRECT_INT = 6, |
| 60 | FBT_INDIRECT_UINT = 7, |
| 61 | FBT_INDIRECT_FLOAT = 8, |
| 62 | FBT_MAP = 9, |
| 63 | FBT_VECTOR = 10, // Untyped. |
| 64 | FBT_VECTOR_INT = 11, // Typed any size (stores no type table). |
| 65 | FBT_VECTOR_UINT = 12, |
| 66 | FBT_VECTOR_FLOAT = 13, |
| 67 | FBT_VECTOR_KEY = 14, |
| 68 | FBT_VECTOR_STRING = 15, |
| 69 | FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). |
| 70 | FBT_VECTOR_UINT2 = 17, |
| 71 | FBT_VECTOR_FLOAT2 = 18, |
| 72 | FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). |
| 73 | FBT_VECTOR_UINT3 = 20, |
| 74 | FBT_VECTOR_FLOAT3 = 21, |
| 75 | FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). |
| 76 | FBT_VECTOR_UINT4 = 23, |
| 77 | FBT_VECTOR_FLOAT4 = 24, |
| 78 | FBT_BLOB = 25, |
| 79 | FBT_BOOL = 26, |
| 80 | FBT_VECTOR_BOOL = |
| 81 | 36, // To Allow the same type of conversion of type to vector type |
| 82 | }; |
| 83 | |
| 84 | inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } |
| 85 | |
| 86 | inline bool IsTypedVectorElementType(Type t) { |
| 87 | return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; |
| 88 | } |
| 89 | |
| 90 | inline bool IsTypedVector(Type t) { |
| 91 | return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING) || |
| 92 | t == FBT_VECTOR_BOOL; |
| 93 | } |
| 94 | |
| 95 | inline bool IsFixedTypedVector(Type t) { |
| 96 | return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; |
| 97 | } |
| 98 | |
| 99 | inline Type ToTypedVector(Type t, size_t fixed_len = 0) { |
| 100 | FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); |
| 101 | switch (fixed_len) { |
| 102 | case 0: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT); |
| 103 | case 2: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2); |
| 104 | case 3: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3); |
| 105 | case 4: return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4); |
| 106 | default: FLATBUFFERS_ASSERT(0); return FBT_NULL; |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | inline Type ToTypedVectorElementType(Type t) { |
| 111 | FLATBUFFERS_ASSERT(IsTypedVector(t)); |
| 112 | return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT); |
| 113 | } |
| 114 | |
| 115 | inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { |
| 116 | FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); |
| 117 | auto fixed_type = t - FBT_VECTOR_INT2; |
| 118 | *len = static_cast<uint8_t>(fixed_type / 3 + |
| 119 | 2); // 3 types each, starting from length 2. |
| 120 | return static_cast<Type>(fixed_type % 3 + FBT_INT); |
| 121 | } |
| 122 | |
| 123 | // TODO: implement proper support for 8/16bit floats, or decide not to |
| 124 | // support them. |
| 125 | typedef int16_t half; |
| 126 | typedef int8_t quarter; |
| 127 | |
| 128 | // TODO: can we do this without conditionals using intrinsics or inline asm |
| 129 | // on some platforms? Given branch prediction the method below should be |
| 130 | // decently quick, but it is the most frequently executed function. |
| 131 | // We could do an (unaligned) 64-bit read if we ifdef out the platforms for |
| 132 | // which that doesn't work (or where we'd read into un-owned memory). |
| 133 | template<typename R, typename T1, typename T2, typename T4, typename T8> |
| 134 | R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { |
| 135 | return byte_width < 4 |
| 136 | ? (byte_width < 2 |
| 137 | ? static_cast<R>(flatbuffers::ReadScalar<T1>(data)) |
| 138 | : static_cast<R>(flatbuffers::ReadScalar<T2>(data))) |
| 139 | : (byte_width < 8 |
| 140 | ? static_cast<R>(flatbuffers::ReadScalar<T4>(data)) |
| 141 | : static_cast<R>(flatbuffers::ReadScalar<T8>(data))); |
| 142 | } |
| 143 | |
| 144 | inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { |
| 145 | return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>( |
| 146 | data, byte_width); |
| 147 | } |
| 148 | |
| 149 | inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { |
| 150 | // This is the "hottest" function (all offset lookups use this), so worth |
| 151 | // optimizing if possible. |
| 152 | // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a |
| 153 | // constant, which here it isn't. Test if memcpy is still faster than |
| 154 | // the conditionals in ReadSizedScalar. Can also use inline asm. |
| 155 | // clang-format off |
| 156 | #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86) |
| 157 | uint64_t u = 0; |
| 158 | __movsb(reinterpret_cast<uint8_t *>(&u), |
| 159 | reinterpret_cast<const uint8_t *>(data), byte_width); |
| 160 | return flatbuffers::EndianScalar(u); |
| 161 | #else |
| 162 | return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>( |
| 163 | data, byte_width); |
| 164 | #endif |
| 165 | // clang-format on |
| 166 | } |
| 167 | |
| 168 | inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { |
| 169 | return ReadSizedScalar<double, quarter, half, float, double>(data, |
| 170 | byte_width); |
| 171 | } |
| 172 | |
| 173 | inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { |
| 174 | return offset - ReadUInt64(offset, byte_width); |
| 175 | } |
| 176 | |
| 177 | template<typename T> const uint8_t *Indirect(const uint8_t *offset) { |
| 178 | return offset - flatbuffers::ReadScalar<T>(offset); |
| 179 | } |
| 180 | |
| 181 | inline BitWidth WidthU(uint64_t u) { |
| 182 | #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ |
| 183 | { \ |
| 184 | if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ |
| 185 | } |
| 186 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); |
| 187 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); |
| 188 | FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); |
| 189 | #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH |
| 190 | return BIT_WIDTH_64; |
| 191 | } |
| 192 | |
| 193 | inline BitWidth WidthI(int64_t i) { |
| 194 | auto u = static_cast<uint64_t>(i) << 1; |
| 195 | return WidthU(i >= 0 ? u : ~u); |
| 196 | } |
| 197 | |
| 198 | inline BitWidth WidthF(double f) { |
| 199 | return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32 |
| 200 | : BIT_WIDTH_64; |
| 201 | } |
| 202 | |
| 203 | // Base class of all types below. |
| 204 | // Points into the data buffer and allows access to one type. |
| 205 | class Object { |
| 206 | public: |
| 207 | Object(const uint8_t *data, uint8_t byte_width) |
| 208 | : data_(data), byte_width_(byte_width) {} |
| 209 | |
| 210 | protected: |
| 211 | const uint8_t *data_; |
| 212 | uint8_t byte_width_; |
| 213 | }; |
| 214 | |
| 215 | // Stores size in `byte_width_` bytes before data_ pointer. |
| 216 | class Sized : public Object { |
| 217 | public: |
| 218 | Sized(const uint8_t *data, uint8_t byte_width) : Object(data, byte_width) {} |
| 219 | size_t size() const { |
| 220 | return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_)); |
| 221 | } |
| 222 | }; |
| 223 | |
| 224 | class String : public Sized { |
| 225 | public: |
| 226 | String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} |
| 227 | |
| 228 | size_t length() const { return size(); } |
| 229 | const char *c_str() const { return reinterpret_cast<const char *>(data_); } |
| 230 | std::string str() const { return std::string(c_str(), length()); } |
| 231 | |
| 232 | static String EmptyString() { |
| 233 | static const uint8_t empty_string[] = { 0 /*len*/, 0 /*terminator*/ }; |
| 234 | return String(empty_string + 1, 1); |
| 235 | } |
| 236 | bool IsTheEmptyString() const { return data_ == EmptyString().data_; } |
| 237 | }; |
| 238 | |
| 239 | class Blob : public Sized { |
| 240 | public: |
| 241 | Blob(const uint8_t *data_buf, uint8_t byte_width) |
| 242 | : Sized(data_buf, byte_width) {} |
| 243 | |
| 244 | static Blob EmptyBlob() { |
| 245 | static const uint8_t empty_blob[] = { 0 /*len*/ }; |
| 246 | return Blob(empty_blob + 1, 1); |
| 247 | } |
| 248 | bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } |
| 249 | const uint8_t *data() const { return data_; } |
| 250 | }; |
| 251 | |
| 252 | class Vector : public Sized { |
| 253 | public: |
| 254 | Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} |
| 255 | |
| 256 | Reference operator[](size_t i) const; |
| 257 | |
| 258 | static Vector EmptyVector() { |
| 259 | static const uint8_t empty_vector[] = { 0 /*len*/ }; |
| 260 | return Vector(empty_vector + 1, 1); |
| 261 | } |
| 262 | bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } |
| 263 | }; |
| 264 | |
| 265 | class TypedVector : public Sized { |
| 266 | public: |
| 267 | TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) |
| 268 | : Sized(data, byte_width), type_(element_type) {} |
| 269 | |
| 270 | Reference operator[](size_t i) const; |
| 271 | |
| 272 | static TypedVector EmptyTypedVector() { |
| 273 | static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; |
| 274 | return TypedVector(empty_typed_vector + 1, 1, FBT_INT); |
| 275 | } |
| 276 | bool IsTheEmptyVector() const { |
| 277 | return data_ == TypedVector::EmptyTypedVector().data_; |
| 278 | } |
| 279 | |
| 280 | Type ElementType() { return type_; } |
| 281 | |
| 282 | private: |
| 283 | Type type_; |
| 284 | |
| 285 | friend Map; |
| 286 | }; |
| 287 | |
| 288 | class FixedTypedVector : public Object { |
| 289 | public: |
| 290 | FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, |
| 291 | uint8_t len) |
| 292 | : Object(data, byte_width), type_(element_type), len_(len) {} |
| 293 | |
| 294 | Reference operator[](size_t i) const; |
| 295 | |
| 296 | static FixedTypedVector EmptyFixedTypedVector() { |
| 297 | static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; |
| 298 | return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); |
| 299 | } |
| 300 | bool IsTheEmptyFixedTypedVector() const { |
| 301 | return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; |
| 302 | } |
| 303 | |
| 304 | Type ElementType() { return type_; } |
| 305 | uint8_t size() { return len_; } |
| 306 | |
| 307 | private: |
| 308 | Type type_; |
| 309 | uint8_t len_; |
| 310 | }; |
| 311 | |
| 312 | class Map : public Vector { |
| 313 | public: |
| 314 | Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} |
| 315 | |
| 316 | Reference operator[](const char *key) const; |
| 317 | Reference operator[](const std::string &key) const; |
| 318 | |
| 319 | Vector Values() const { return Vector(data_, byte_width_); } |
| 320 | |
| 321 | TypedVector Keys() const { |
| 322 | const size_t num_prefixed_fields = 3; |
| 323 | auto keys_offset = data_ - byte_width_ * num_prefixed_fields; |
| 324 | return TypedVector(Indirect(keys_offset, byte_width_), |
| 325 | static_cast<uint8_t>( |
| 326 | ReadUInt64(keys_offset + byte_width_, byte_width_)), |
| 327 | FBT_KEY); |
| 328 | } |
| 329 | |
| 330 | static Map EmptyMap() { |
| 331 | static const uint8_t empty_map[] = { |
| 332 | 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ |
| 333 | }; |
| 334 | return Map(empty_map + 4, 1); |
| 335 | } |
| 336 | |
| 337 | bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } |
| 338 | }; |
| 339 | |
| 340 | template<typename T> |
| 341 | void AppendToString(std::string &s, T &&v, bool keys_quoted) { |
| 342 | s += "[ "; |
| 343 | for (size_t i = 0; i < v.size(); i++) { |
| 344 | if (i) s += ", "; |
| 345 | v[i].ToString(true, keys_quoted, s); |
| 346 | } |
| 347 | s += " ]"; |
| 348 | } |
| 349 | |
| 350 | class Reference { |
| 351 | public: |
| 352 | Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, |
| 353 | Type type) |
| 354 | : data_(data), |
| 355 | parent_width_(parent_width), |
| 356 | byte_width_(byte_width), |
| 357 | type_(type) {} |
| 358 | |
| 359 | Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) |
| 360 | : data_(data), parent_width_(parent_width) { |
| 361 | byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3); |
| 362 | type_ = static_cast<Type>(packed_type >> 2); |
| 363 | } |
| 364 | |
| 365 | Type GetType() const { return type_; } |
| 366 | |
| 367 | bool IsNull() const { return type_ == FBT_NULL; } |
| 368 | bool IsBool() const { return type_ == FBT_BOOL; } |
| 369 | bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } |
| 370 | bool IsUInt() const { |
| 371 | return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; |
| 372 | } |
| 373 | bool IsIntOrUint() const { return IsInt() || IsUInt(); } |
| 374 | bool IsFloat() const { |
| 375 | return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; |
| 376 | } |
| 377 | bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } |
| 378 | bool IsString() const { return type_ == FBT_STRING; } |
| 379 | bool IsKey() const { return type_ == FBT_KEY; } |
| 380 | bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } |
| 381 | bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } |
| 382 | bool IsFixedTypedVector() const { return flexbuffers::IsFixedTypedVector(type_); } |
| 383 | bool IsAnyVector() const { return (IsTypedVector() || IsFixedTypedVector() || IsVector());} |
| 384 | bool IsMap() const { return type_ == FBT_MAP; } |
| 385 | bool IsBlob() const { return type_ == FBT_BLOB; } |
| 386 | |
| 387 | bool AsBool() const { |
| 388 | return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) |
| 389 | : AsUInt64()) != 0; |
| 390 | } |
| 391 | |
| 392 | // Reads any type as a int64_t. Never fails, does most sensible conversion. |
| 393 | // Truncates floats, strings are attempted to be parsed for a number, |
| 394 | // vectors/maps return their size. Returns 0 if all else fails. |
| 395 | int64_t AsInt64() const { |
| 396 | if (type_ == FBT_INT) { |
| 397 | // A fast path for the common case. |
| 398 | return ReadInt64(data_, parent_width_); |
| 399 | } else |
| 400 | switch (type_) { |
| 401 | case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); |
| 402 | case FBT_UINT: return ReadUInt64(data_, parent_width_); |
| 403 | case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); |
| 404 | case FBT_FLOAT: |
| 405 | return static_cast<int64_t>(ReadDouble(data_, parent_width_)); |
| 406 | case FBT_INDIRECT_FLOAT: |
| 407 | return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_)); |
| 408 | case FBT_NULL: return 0; |
| 409 | case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); |
| 410 | case FBT_VECTOR: return static_cast<int64_t>(AsVector().size()); |
| 411 | case FBT_BOOL: return ReadInt64(data_, parent_width_); |
| 412 | default: |
| 413 | // Convert other things to int. |
| 414 | return 0; |
| 415 | } |
| 416 | } |
| 417 | |
| 418 | // TODO: could specialize these to not use AsInt64() if that saves |
| 419 | // extension ops in generated code, and use a faster op than ReadInt64. |
| 420 | int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); } |
| 421 | int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); } |
| 422 | int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); } |
| 423 | |
| 424 | uint64_t AsUInt64() const { |
| 425 | if (type_ == FBT_UINT) { |
| 426 | // A fast path for the common case. |
| 427 | return ReadUInt64(data_, parent_width_); |
| 428 | } else |
| 429 | switch (type_) { |
| 430 | case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); |
| 431 | case FBT_INT: return ReadInt64(data_, parent_width_); |
| 432 | case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); |
| 433 | case FBT_FLOAT: |
| 434 | return static_cast<uint64_t>(ReadDouble(data_, parent_width_)); |
| 435 | case FBT_INDIRECT_FLOAT: |
| 436 | return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_)); |
| 437 | case FBT_NULL: return 0; |
| 438 | case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); |
| 439 | case FBT_VECTOR: return static_cast<uint64_t>(AsVector().size()); |
| 440 | case FBT_BOOL: return ReadUInt64(data_, parent_width_); |
| 441 | default: |
| 442 | // Convert other things to uint. |
| 443 | return 0; |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); } |
| 448 | uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); } |
| 449 | uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); } |
| 450 | |
| 451 | double AsDouble() const { |
| 452 | if (type_ == FBT_FLOAT) { |
| 453 | // A fast path for the common case. |
| 454 | return ReadDouble(data_, parent_width_); |
| 455 | } else |
| 456 | switch (type_) { |
| 457 | case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); |
| 458 | case FBT_INT: |
| 459 | return static_cast<double>(ReadInt64(data_, parent_width_)); |
| 460 | case FBT_UINT: |
| 461 | return static_cast<double>(ReadUInt64(data_, parent_width_)); |
| 462 | case FBT_INDIRECT_INT: |
| 463 | return static_cast<double>(ReadInt64(Indirect(), byte_width_)); |
| 464 | case FBT_INDIRECT_UINT: |
| 465 | return static_cast<double>(ReadUInt64(Indirect(), byte_width_)); |
| 466 | case FBT_NULL: return 0.0; |
| 467 | case FBT_STRING: return strtod(AsString().c_str(), nullptr); |
| 468 | case FBT_VECTOR: return static_cast<double>(AsVector().size()); |
| 469 | case FBT_BOOL: |
| 470 | return static_cast<double>(ReadUInt64(data_, parent_width_)); |
| 471 | default: |
| 472 | // Convert strings and other things to float. |
| 473 | return 0; |
| 474 | } |
| 475 | } |
| 476 | |
| 477 | float AsFloat() const { return static_cast<float>(AsDouble()); } |
| 478 | |
| 479 | const char *AsKey() const { |
| 480 | if (type_ == FBT_KEY) { |
| 481 | return reinterpret_cast<const char *>(Indirect()); |
| 482 | } else { |
| 483 | return ""; |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | // This function returns the empty string if you try to read a not-string. |
| 488 | String AsString() const { |
| 489 | if (type_ == FBT_STRING) { |
| 490 | return String(Indirect(), byte_width_); |
| 491 | } else { |
| 492 | return String::EmptyString(); |
| 493 | } |
| 494 | } |
| 495 | |
| 496 | // Unlike AsString(), this will convert any type to a std::string. |
| 497 | std::string ToString() const { |
| 498 | std::string s; |
| 499 | ToString(false, false, s); |
| 500 | return s; |
| 501 | } |
| 502 | |
| 503 | // Convert any type to a JSON-like string. strings_quoted determines if |
| 504 | // string values at the top level receive "" quotes (inside other values |
| 505 | // they always do). keys_quoted determines if keys are quoted, at any level. |
| 506 | // TODO(wvo): add further options to have indentation/newlines. |
| 507 | void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { |
| 508 | if (type_ == FBT_STRING) { |
| 509 | String str(Indirect(), byte_width_); |
| 510 | if (strings_quoted) { |
| 511 | flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false); |
| 512 | } else { |
| 513 | s.append(str.c_str(), str.length()); |
| 514 | } |
| 515 | } else if (IsKey()) { |
| 516 | auto str = AsKey(); |
| 517 | if (keys_quoted) { |
| 518 | flatbuffers::EscapeString(str, strlen(str), &s, true, false); |
| 519 | } else { |
| 520 | s += str; |
| 521 | } |
| 522 | } else if (IsInt()) { |
| 523 | s += flatbuffers::NumToString(AsInt64()); |
| 524 | } else if (IsUInt()) { |
| 525 | s += flatbuffers::NumToString(AsUInt64()); |
| 526 | } else if (IsFloat()) { |
| 527 | s += flatbuffers::NumToString(AsDouble()); |
| 528 | } else if (IsNull()) { |
| 529 | s += "null"; |
| 530 | } else if (IsBool()) { |
| 531 | s += AsBool() ? "true" : "false"; |
| 532 | } else if (IsMap()) { |
| 533 | s += "{ "; |
| 534 | auto m = AsMap(); |
| 535 | auto keys = m.Keys(); |
| 536 | auto vals = m.Values(); |
| 537 | for (size_t i = 0; i < keys.size(); i++) { |
| 538 | keys[i].ToString(true, keys_quoted, s); |
| 539 | s += ": "; |
| 540 | vals[i].ToString(true, keys_quoted, s); |
| 541 | if (i < keys.size() - 1) s += ", "; |
| 542 | } |
| 543 | s += " }"; |
| 544 | } else if (IsVector()) { |
| 545 | AppendToString<Vector>(s, AsVector(), keys_quoted); |
| 546 | } else if (IsTypedVector()) { |
| 547 | AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted); |
| 548 | } else if (IsFixedTypedVector()) { |
| 549 | AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted); |
| 550 | } else if (IsBlob()) { |
| 551 | auto blob = AsBlob(); |
| 552 | flatbuffers::EscapeString(reinterpret_cast<const char*>(blob.data()), blob.size(), &s, true, false); |
| 553 | } else { |
| 554 | s += "(?)"; |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | // This function returns the empty blob if you try to read a not-blob. |
| 559 | // Strings can be viewed as blobs too. |
| 560 | Blob AsBlob() const { |
| 561 | if (type_ == FBT_BLOB || type_ == FBT_STRING) { |
| 562 | return Blob(Indirect(), byte_width_); |
| 563 | } else { |
| 564 | return Blob::EmptyBlob(); |
| 565 | } |
| 566 | } |
| 567 | |
| 568 | // This function returns the empty vector if you try to read a not-vector. |
| 569 | // Maps can be viewed as vectors too. |
| 570 | Vector AsVector() const { |
| 571 | if (type_ == FBT_VECTOR || type_ == FBT_MAP) { |
| 572 | return Vector(Indirect(), byte_width_); |
| 573 | } else { |
| 574 | return Vector::EmptyVector(); |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | TypedVector AsTypedVector() const { |
| 579 | if (IsTypedVector()) { |
| 580 | return TypedVector(Indirect(), byte_width_, |
| 581 | ToTypedVectorElementType(type_)); |
| 582 | } else { |
| 583 | return TypedVector::EmptyTypedVector(); |
| 584 | } |
| 585 | } |
| 586 | |
| 587 | FixedTypedVector AsFixedTypedVector() const { |
| 588 | if (IsFixedTypedVector()) { |
| 589 | uint8_t len = 0; |
| 590 | auto vtype = ToFixedTypedVectorElementType(type_, &len); |
| 591 | return FixedTypedVector(Indirect(), byte_width_, vtype, len); |
| 592 | } else { |
| 593 | return FixedTypedVector::EmptyFixedTypedVector(); |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | Map AsMap() const { |
| 598 | if (type_ == FBT_MAP) { |
| 599 | return Map(Indirect(), byte_width_); |
| 600 | } else { |
| 601 | return Map::EmptyMap(); |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | template<typename T> T As() const; |
| 606 | |
| 607 | // Experimental: Mutation functions. |
| 608 | // These allow scalars in an already created buffer to be updated in-place. |
| 609 | // Since by default scalars are stored in the smallest possible space, |
| 610 | // the new value may not fit, in which case these functions return false. |
| 611 | // To avoid this, you can construct the values you intend to mutate using |
| 612 | // Builder::ForceMinimumBitWidth. |
| 613 | bool MutateInt(int64_t i) { |
| 614 | if (type_ == FBT_INT) { |
| 615 | return Mutate(data_, i, parent_width_, WidthI(i)); |
| 616 | } else if (type_ == FBT_INDIRECT_INT) { |
| 617 | return Mutate(Indirect(), i, byte_width_, WidthI(i)); |
| 618 | } else if (type_ == FBT_UINT) { |
| 619 | auto u = static_cast<uint64_t>(i); |
| 620 | return Mutate(data_, u, parent_width_, WidthU(u)); |
| 621 | } else if (type_ == FBT_INDIRECT_UINT) { |
| 622 | auto u = static_cast<uint64_t>(i); |
| 623 | return Mutate(Indirect(), u, byte_width_, WidthU(u)); |
| 624 | } else { |
| 625 | return false; |
| 626 | } |
| 627 | } |
| 628 | |
| 629 | bool MutateBool(bool b) { |
| 630 | return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); |
| 631 | } |
| 632 | |
| 633 | bool MutateUInt(uint64_t u) { |
| 634 | if (type_ == FBT_UINT) { |
| 635 | return Mutate(data_, u, parent_width_, WidthU(u)); |
| 636 | } else if (type_ == FBT_INDIRECT_UINT) { |
| 637 | return Mutate(Indirect(), u, byte_width_, WidthU(u)); |
| 638 | } else if (type_ == FBT_INT) { |
| 639 | auto i = static_cast<int64_t>(u); |
| 640 | return Mutate(data_, i, parent_width_, WidthI(i)); |
| 641 | } else if (type_ == FBT_INDIRECT_INT) { |
| 642 | auto i = static_cast<int64_t>(u); |
| 643 | return Mutate(Indirect(), i, byte_width_, WidthI(i)); |
| 644 | } else { |
| 645 | return false; |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | bool MutateFloat(float f) { |
| 650 | if (type_ == FBT_FLOAT) { |
| 651 | return MutateF(data_, f, parent_width_, BIT_WIDTH_32); |
| 652 | } else if (type_ == FBT_INDIRECT_FLOAT) { |
| 653 | return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); |
| 654 | } else { |
| 655 | return false; |
| 656 | } |
| 657 | } |
| 658 | |
| 659 | bool MutateFloat(double d) { |
| 660 | if (type_ == FBT_FLOAT) { |
| 661 | return MutateF(data_, d, parent_width_, WidthF(d)); |
| 662 | } else if (type_ == FBT_INDIRECT_FLOAT) { |
| 663 | return MutateF(Indirect(), d, byte_width_, WidthF(d)); |
| 664 | } else { |
| 665 | return false; |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | bool MutateString(const char *str, size_t len) { |
| 670 | auto s = AsString(); |
| 671 | if (s.IsTheEmptyString()) return false; |
| 672 | // This is very strict, could allow shorter strings, but that creates |
| 673 | // garbage. |
| 674 | if (s.length() != len) return false; |
| 675 | memcpy(const_cast<char *>(s.c_str()), str, len); |
| 676 | return true; |
| 677 | } |
| 678 | bool MutateString(const char *str) { return MutateString(str, strlen(str)); } |
| 679 | bool MutateString(const std::string &str) { |
| 680 | return MutateString(str.data(), str.length()); |
| 681 | } |
| 682 | |
| 683 | private: |
| 684 | const uint8_t *Indirect() const { |
| 685 | return flexbuffers::Indirect(data_, parent_width_); |
| 686 | } |
| 687 | |
| 688 | template<typename T> |
| 689 | bool Mutate(const uint8_t *dest, T t, size_t byte_width, |
| 690 | BitWidth value_width) { |
| 691 | auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <= |
| 692 | byte_width; |
| 693 | if (fits) { |
| 694 | t = flatbuffers::EndianScalar(t); |
| 695 | memcpy(const_cast<uint8_t *>(dest), &t, byte_width); |
| 696 | } |
| 697 | return fits; |
| 698 | } |
| 699 | |
| 700 | template<typename T> |
| 701 | bool MutateF(const uint8_t *dest, T t, size_t byte_width, |
| 702 | BitWidth value_width) { |
| 703 | if (byte_width == sizeof(double)) |
| 704 | return Mutate(dest, static_cast<double>(t), byte_width, value_width); |
| 705 | if (byte_width == sizeof(float)) |
| 706 | return Mutate(dest, static_cast<float>(t), byte_width, value_width); |
| 707 | FLATBUFFERS_ASSERT(false); |
| 708 | return false; |
| 709 | } |
| 710 | |
| 711 | const uint8_t *data_; |
| 712 | uint8_t parent_width_; |
| 713 | uint8_t byte_width_; |
| 714 | Type type_; |
| 715 | }; |
| 716 | |
| 717 | // Template specialization for As(). |
| 718 | template<> inline bool Reference::As<bool>() const { return AsBool(); } |
| 719 | |
| 720 | template<> inline int8_t Reference::As<int8_t>() const { return AsInt8(); } |
| 721 | template<> inline int16_t Reference::As<int16_t>() const { return AsInt16(); } |
| 722 | template<> inline int32_t Reference::As<int32_t>() const { return AsInt32(); } |
| 723 | template<> inline int64_t Reference::As<int64_t>() const { return AsInt64(); } |
| 724 | |
| 725 | template<> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); } |
| 726 | template<> inline uint16_t Reference::As<uint16_t>() const { return AsUInt16(); } |
| 727 | template<> inline uint32_t Reference::As<uint32_t>() const { return AsUInt32(); } |
| 728 | template<> inline uint64_t Reference::As<uint64_t>() const { return AsUInt64(); } |
| 729 | |
| 730 | template<> inline double Reference::As<double>() const { return AsDouble(); } |
| 731 | template<> inline float Reference::As<float>() const { return AsFloat(); } |
| 732 | |
| 733 | template<> inline String Reference::As<String>() const { return AsString(); } |
| 734 | template<> inline std::string Reference::As<std::string>() const { |
| 735 | return AsString().str(); |
| 736 | } |
| 737 | |
| 738 | template<> inline Blob Reference::As<Blob>() const { return AsBlob(); } |
| 739 | template<> inline Vector Reference::As<Vector>() const { return AsVector(); } |
| 740 | template<> inline TypedVector Reference::As<TypedVector>() const { |
| 741 | return AsTypedVector(); |
| 742 | } |
| 743 | template<> inline FixedTypedVector Reference::As<FixedTypedVector>() const { |
| 744 | return AsFixedTypedVector(); |
| 745 | } |
| 746 | template<> inline Map Reference::As<Map>() const { return AsMap(); } |
| 747 | |
| 748 | inline uint8_t PackedType(BitWidth bit_width, Type type) { |
| 749 | return static_cast<uint8_t>(bit_width | (type << 2)); |
| 750 | } |
| 751 | |
| 752 | inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } |
| 753 | |
| 754 | // Vector accessors. |
| 755 | // Note: if you try to access outside of bounds, you get a Null value back |
| 756 | // instead. Normally this would be an assert, but since this is "dynamically |
| 757 | // typed" data, you may not want that (someone sends you a 2d vector and you |
| 758 | // wanted 3d). |
| 759 | // The Null converts seamlessly into a default value for any other type. |
| 760 | // TODO(wvo): Could introduce an #ifdef that makes this into an assert? |
| 761 | inline Reference Vector::operator[](size_t i) const { |
| 762 | auto len = size(); |
| 763 | if (i >= len) return Reference(nullptr, 1, NullPackedType()); |
| 764 | auto packed_type = (data_ + len * byte_width_)[i]; |
| 765 | auto elem = data_ + i * byte_width_; |
| 766 | return Reference(elem, byte_width_, packed_type); |
| 767 | } |
| 768 | |
| 769 | inline Reference TypedVector::operator[](size_t i) const { |
| 770 | auto len = size(); |
| 771 | if (i >= len) return Reference(nullptr, 1, NullPackedType()); |
| 772 | auto elem = data_ + i * byte_width_; |
| 773 | return Reference(elem, byte_width_, 1, type_); |
| 774 | } |
| 775 | |
| 776 | inline Reference FixedTypedVector::operator[](size_t i) const { |
| 777 | if (i >= len_) return Reference(nullptr, 1, NullPackedType()); |
| 778 | auto elem = data_ + i * byte_width_; |
| 779 | return Reference(elem, byte_width_, 1, type_); |
| 780 | } |
| 781 | |
| 782 | template<typename T> int KeyCompare(const void *key, const void *elem) { |
| 783 | auto str_elem = reinterpret_cast<const char *>( |
| 784 | Indirect<T>(reinterpret_cast<const uint8_t *>(elem))); |
| 785 | auto skey = reinterpret_cast<const char *>(key); |
| 786 | return strcmp(skey, str_elem); |
| 787 | } |
| 788 | |
| 789 | inline Reference Map::operator[](const char *key) const { |
| 790 | auto keys = Keys(); |
| 791 | // We can't pass keys.byte_width_ to the comparison function, so we have |
| 792 | // to pick the right one ahead of time. |
| 793 | int (*comp)(const void *, const void *) = nullptr; |
| 794 | switch (keys.byte_width_) { |
| 795 | case 1: comp = KeyCompare<uint8_t>; break; |
| 796 | case 2: comp = KeyCompare<uint16_t>; break; |
| 797 | case 4: comp = KeyCompare<uint32_t>; break; |
| 798 | case 8: comp = KeyCompare<uint64_t>; break; |
| 799 | } |
| 800 | auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); |
| 801 | if (!res) return Reference(nullptr, 1, NullPackedType()); |
| 802 | auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_; |
| 803 | return (*static_cast<const Vector *>(this))[i]; |
| 804 | } |
| 805 | |
| 806 | inline Reference Map::operator[](const std::string &key) const { |
| 807 | return (*this)[key.c_str()]; |
| 808 | } |
| 809 | |
| 810 | inline Reference GetRoot(const uint8_t *buffer, size_t size) { |
| 811 | // See Finish() below for the serialization counterpart of this. |
| 812 | // The root starts at the end of the buffer, so we parse backwards from there. |
| 813 | auto end = buffer + size; |
| 814 | auto byte_width = *--end; |
| 815 | auto packed_type = *--end; |
| 816 | end -= byte_width; // The root data item. |
| 817 | return Reference(end, byte_width, packed_type); |
| 818 | } |
| 819 | |
| 820 | inline Reference GetRoot(const std::vector<uint8_t> &buffer) { |
| 821 | return GetRoot(flatbuffers::vector_data(buffer), buffer.size()); |
| 822 | } |
| 823 | |
| 824 | // Flags that configure how the Builder behaves. |
| 825 | // The "Share" flags determine if the Builder automatically tries to pool |
| 826 | // this type. Pooling can reduce the size of serialized data if there are |
| 827 | // multiple maps of the same kind, at the expense of slightly slower |
| 828 | // serialization (the cost of lookups) and more memory use (std::set). |
| 829 | // By default this is on for keys, but off for strings. |
| 830 | // Turn keys off if you have e.g. only one map. |
| 831 | // Turn strings on if you expect many non-unique string values. |
| 832 | // Additionally, sharing key vectors can save space if you have maps with |
| 833 | // identical field populations. |
| 834 | enum BuilderFlag { |
| 835 | BUILDER_FLAG_NONE = 0, |
| 836 | BUILDER_FLAG_SHARE_KEYS = 1, |
| 837 | BUILDER_FLAG_SHARE_STRINGS = 2, |
| 838 | BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, |
| 839 | BUILDER_FLAG_SHARE_KEY_VECTORS = 4, |
| 840 | BUILDER_FLAG_SHARE_ALL = 7, |
| 841 | }; |
| 842 | |
| 843 | class Builder FLATBUFFERS_FINAL_CLASS { |
| 844 | public: |
| 845 | Builder(size_t initial_size = 256, |
| 846 | BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) |
| 847 | : buf_(initial_size), |
| 848 | finished_(false), |
| 849 | flags_(flags), |
| 850 | force_min_bit_width_(BIT_WIDTH_8), |
| 851 | key_pool(KeyOffsetCompare(buf_)), |
| 852 | string_pool(StringOffsetCompare(buf_)) { |
| 853 | buf_.clear(); |
| 854 | } |
| 855 | |
| 856 | /// @brief Get the serialized buffer (after you call `Finish()`). |
| 857 | /// @return Returns a vector owned by this class. |
| 858 | const std::vector<uint8_t> &GetBuffer() const { |
| 859 | Finished(); |
| 860 | return buf_; |
| 861 | } |
| 862 | |
| 863 | // Size of the buffer. Does not include unfinished values. |
| 864 | size_t GetSize() const { return buf_.size(); } |
| 865 | |
| 866 | // Reset all state so we can re-use the buffer. |
| 867 | void Clear() { |
| 868 | buf_.clear(); |
| 869 | stack_.clear(); |
| 870 | finished_ = false; |
| 871 | // flags_ remains as-is; |
| 872 | force_min_bit_width_ = BIT_WIDTH_8; |
| 873 | key_pool.clear(); |
| 874 | string_pool.clear(); |
| 875 | } |
| 876 | |
| 877 | // All value constructing functions below have two versions: one that |
| 878 | // takes a key (for placement inside a map) and one that doesn't (for inside |
| 879 | // vectors and elsewhere). |
| 880 | |
| 881 | void Null() { stack_.push_back(Value()); } |
| 882 | void Null(const char *key) { |
| 883 | Key(key); |
| 884 | Null(); |
| 885 | } |
| 886 | |
| 887 | void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } |
| 888 | void Int(const char *key, int64_t i) { |
| 889 | Key(key); |
| 890 | Int(i); |
| 891 | } |
| 892 | |
| 893 | void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } |
| 894 | void UInt(const char *key, uint64_t u) { |
| 895 | Key(key); |
| 896 | UInt(u); |
| 897 | } |
| 898 | |
| 899 | void Float(float f) { stack_.push_back(Value(f)); } |
| 900 | void Float(const char *key, float f) { |
| 901 | Key(key); |
| 902 | Float(f); |
| 903 | } |
| 904 | |
| 905 | void Double(double f) { stack_.push_back(Value(f)); } |
| 906 | void Double(const char *key, double d) { |
| 907 | Key(key); |
| 908 | Double(d); |
| 909 | } |
| 910 | |
| 911 | void Bool(bool b) { stack_.push_back(Value(b)); } |
| 912 | void Bool(const char *key, bool b) { |
| 913 | Key(key); |
| 914 | Bool(b); |
| 915 | } |
| 916 | |
| 917 | void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } |
| 918 | void IndirectInt(const char *key, int64_t i) { |
| 919 | Key(key); |
| 920 | IndirectInt(i); |
| 921 | } |
| 922 | |
| 923 | void IndirectUInt(uint64_t u) { |
| 924 | PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); |
| 925 | } |
| 926 | void IndirectUInt(const char *key, uint64_t u) { |
| 927 | Key(key); |
| 928 | IndirectUInt(u); |
| 929 | } |
| 930 | |
| 931 | void IndirectFloat(float f) { |
| 932 | PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); |
| 933 | } |
| 934 | void IndirectFloat(const char *key, float f) { |
| 935 | Key(key); |
| 936 | IndirectFloat(f); |
| 937 | } |
| 938 | |
| 939 | void IndirectDouble(double f) { |
| 940 | PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); |
| 941 | } |
| 942 | void IndirectDouble(const char *key, double d) { |
| 943 | Key(key); |
| 944 | IndirectDouble(d); |
| 945 | } |
| 946 | |
| 947 | size_t Key(const char *str, size_t len) { |
| 948 | auto sloc = buf_.size(); |
| 949 | WriteBytes(str, len + 1); |
| 950 | if (flags_ & BUILDER_FLAG_SHARE_KEYS) { |
| 951 | auto it = key_pool.find(sloc); |
| 952 | if (it != key_pool.end()) { |
| 953 | // Already in the buffer. Remove key we just serialized, and use |
| 954 | // existing offset instead. |
| 955 | buf_.resize(sloc); |
| 956 | sloc = *it; |
| 957 | } else { |
| 958 | key_pool.insert(sloc); |
| 959 | } |
| 960 | } |
| 961 | stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8)); |
| 962 | return sloc; |
| 963 | } |
| 964 | |
| 965 | size_t Key(const char *str) { return Key(str, strlen(str)); } |
| 966 | size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } |
| 967 | |
| 968 | size_t String(const char *str, size_t len) { |
| 969 | auto reset_to = buf_.size(); |
| 970 | auto sloc = CreateBlob(str, len, 1, FBT_STRING); |
| 971 | if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { |
| 972 | StringOffset so(sloc, len); |
| 973 | auto it = string_pool.find(so); |
| 974 | if (it != string_pool.end()) { |
| 975 | // Already in the buffer. Remove string we just serialized, and use |
| 976 | // existing offset instead. |
| 977 | buf_.resize(reset_to); |
| 978 | sloc = it->first; |
| 979 | stack_.back().u_ = sloc; |
| 980 | } else { |
| 981 | string_pool.insert(so); |
| 982 | } |
| 983 | } |
| 984 | return sloc; |
| 985 | } |
| 986 | size_t String(const char *str) { return String(str, strlen(str)); } |
| 987 | size_t String(const std::string &str) { |
| 988 | return String(str.c_str(), str.size()); |
| 989 | } |
| 990 | void String(const flexbuffers::String &str) { |
| 991 | String(str.c_str(), str.length()); |
| 992 | } |
| 993 | |
| 994 | void String(const char *key, const char *str) { |
| 995 | Key(key); |
| 996 | String(str); |
| 997 | } |
| 998 | void String(const char *key, const std::string &str) { |
| 999 | Key(key); |
| 1000 | String(str); |
| 1001 | } |
| 1002 | void String(const char *key, const flexbuffers::String &str) { |
| 1003 | Key(key); |
| 1004 | String(str); |
| 1005 | } |
| 1006 | |
| 1007 | size_t Blob(const void *data, size_t len) { |
| 1008 | return CreateBlob(data, len, 0, FBT_BLOB); |
| 1009 | } |
| 1010 | size_t Blob(const std::vector<uint8_t> &v) { |
| 1011 | return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB); |
| 1012 | } |
| 1013 | |
| 1014 | // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), |
| 1015 | // e.g. Vector etc. Also in overloaded versions. |
| 1016 | // Also some FlatBuffers types? |
| 1017 | |
| 1018 | size_t StartVector() { return stack_.size(); } |
| 1019 | size_t StartVector(const char *key) { |
| 1020 | Key(key); |
| 1021 | return stack_.size(); |
| 1022 | } |
| 1023 | size_t StartMap() { return stack_.size(); } |
| 1024 | size_t StartMap(const char *key) { |
| 1025 | Key(key); |
| 1026 | return stack_.size(); |
| 1027 | } |
| 1028 | |
| 1029 | // TODO(wvo): allow this to specify an aligment greater than the natural |
| 1030 | // alignment. |
| 1031 | size_t EndVector(size_t start, bool typed, bool fixed) { |
| 1032 | auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); |
| 1033 | // Remove temp elements and return vector. |
| 1034 | stack_.resize(start); |
| 1035 | stack_.push_back(vec); |
| 1036 | return static_cast<size_t>(vec.u_); |
| 1037 | } |
| 1038 | |
| 1039 | size_t EndMap(size_t start) { |
| 1040 | // We should have interleaved keys and values on the stack. |
| 1041 | // Make sure it is an even number: |
| 1042 | auto len = stack_.size() - start; |
| 1043 | FLATBUFFERS_ASSERT(!(len & 1)); |
| 1044 | len /= 2; |
| 1045 | // Make sure keys are all strings: |
| 1046 | for (auto key = start; key < stack_.size(); key += 2) { |
| 1047 | FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); |
| 1048 | } |
| 1049 | // Now sort values, so later we can do a binary seach lookup. |
| 1050 | // We want to sort 2 array elements at a time. |
| 1051 | struct TwoValue { |
| 1052 | Value key; |
| 1053 | Value val; |
| 1054 | }; |
| 1055 | // TODO(wvo): strict aliasing? |
| 1056 | // TODO(wvo): allow the caller to indicate the data is already sorted |
| 1057 | // for maximum efficiency? With an assert to check sortedness to make sure |
| 1058 | // we're not breaking binary search. |
| 1059 | // Or, we can track if the map is sorted as keys are added which would be |
| 1060 | // be quite cheap (cheaper than checking it here), so we can skip this |
| 1061 | // step automatically when appliccable, and encourage people to write in |
| 1062 | // sorted fashion. |
| 1063 | // std::sort is typically already a lot faster on sorted data though. |
| 1064 | auto dict = |
| 1065 | reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start); |
| 1066 | std::sort(dict, dict + len, |
| 1067 | [&](const TwoValue &a, const TwoValue &b) -> bool { |
| 1068 | auto as = reinterpret_cast<const char *>( |
| 1069 | flatbuffers::vector_data(buf_) + a.key.u_); |
| 1070 | auto bs = reinterpret_cast<const char *>( |
| 1071 | flatbuffers::vector_data(buf_) + b.key.u_); |
| 1072 | auto comp = strcmp(as, bs); |
| 1073 | // If this assertion hits, you've added two keys with the same |
| 1074 | // value to this map. |
| 1075 | // TODO: Have to check for pointer equality, as some sort |
| 1076 | // implementation apparently call this function with the same |
| 1077 | // element?? Why? |
| 1078 | FLATBUFFERS_ASSERT(comp || &a == &b); |
| 1079 | return comp < 0; |
| 1080 | }); |
| 1081 | // First create a vector out of all keys. |
| 1082 | // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share |
| 1083 | // the first vector. |
| 1084 | auto keys = CreateVector(start, len, 2, true, false); |
| 1085 | auto vec = CreateVector(start + 1, len, 2, false, false, &keys); |
| 1086 | // Remove temp elements and return map. |
| 1087 | stack_.resize(start); |
| 1088 | stack_.push_back(vec); |
| 1089 | return static_cast<size_t>(vec.u_); |
| 1090 | } |
| 1091 | |
| 1092 | template<typename F> size_t Vector(F f) { |
| 1093 | auto start = StartVector(); |
| 1094 | f(); |
| 1095 | return EndVector(start, false, false); |
| 1096 | } |
| 1097 | template<typename F, typename T> size_t Vector(F f, T &state) { |
| 1098 | auto start = StartVector(); |
| 1099 | f(state); |
| 1100 | return EndVector(start, false, false); |
| 1101 | } |
| 1102 | template<typename F> size_t Vector(const char *key, F f) { |
| 1103 | auto start = StartVector(key); |
| 1104 | f(); |
| 1105 | return EndVector(start, false, false); |
| 1106 | } |
| 1107 | template<typename F, typename T> |
| 1108 | size_t Vector(const char *key, F f, T &state) { |
| 1109 | auto start = StartVector(key); |
| 1110 | f(state); |
| 1111 | return EndVector(start, false, false); |
| 1112 | } |
| 1113 | |
| 1114 | template<typename T> void Vector(const T *elems, size_t len) { |
| 1115 | if (flatbuffers::is_scalar<T>::value) { |
| 1116 | // This path should be a lot quicker and use less space. |
| 1117 | ScalarVector(elems, len, false); |
| 1118 | } else { |
| 1119 | auto start = StartVector(); |
| 1120 | for (size_t i = 0; i < len; i++) Add(elems[i]); |
| 1121 | EndVector(start, false, false); |
| 1122 | } |
| 1123 | } |
| 1124 | template<typename T> |
| 1125 | void Vector(const char *key, const T *elems, size_t len) { |
| 1126 | Key(key); |
| 1127 | Vector(elems, len); |
| 1128 | } |
| 1129 | template<typename T> void Vector(const std::vector<T> &vec) { |
| 1130 | Vector(flatbuffers::vector_data(vec), vec.size()); |
| 1131 | } |
| 1132 | |
| 1133 | template<typename F> size_t TypedVector(F f) { |
| 1134 | auto start = StartVector(); |
| 1135 | f(); |
| 1136 | return EndVector(start, true, false); |
| 1137 | } |
| 1138 | template<typename F, typename T> size_t TypedVector(F f, T &state) { |
| 1139 | auto start = StartVector(); |
| 1140 | f(state); |
| 1141 | return EndVector(start, true, false); |
| 1142 | } |
| 1143 | template<typename F> size_t TypedVector(const char *key, F f) { |
| 1144 | auto start = StartVector(key); |
| 1145 | f(); |
| 1146 | return EndVector(start, true, false); |
| 1147 | } |
| 1148 | template<typename F, typename T> |
| 1149 | size_t TypedVector(const char *key, F f, T &state) { |
| 1150 | auto start = StartVector(key); |
| 1151 | f(state); |
| 1152 | return EndVector(start, true, false); |
| 1153 | } |
| 1154 | |
| 1155 | template<typename T> size_t FixedTypedVector(const T *elems, size_t len) { |
| 1156 | // We only support a few fixed vector lengths. Anything bigger use a |
| 1157 | // regular typed vector. |
| 1158 | FLATBUFFERS_ASSERT(len >= 2 && len <= 4); |
| 1159 | // And only scalar values. |
| 1160 | static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types"); |
| 1161 | return ScalarVector(elems, len, true); |
| 1162 | } |
| 1163 | |
| 1164 | template<typename T> |
| 1165 | size_t FixedTypedVector(const char *key, const T *elems, size_t len) { |
| 1166 | Key(key); |
| 1167 | return FixedTypedVector(elems, len); |
| 1168 | } |
| 1169 | |
| 1170 | template<typename F> size_t Map(F f) { |
| 1171 | auto start = StartMap(); |
| 1172 | f(); |
| 1173 | return EndMap(start); |
| 1174 | } |
| 1175 | template<typename F, typename T> size_t Map(F f, T &state) { |
| 1176 | auto start = StartMap(); |
| 1177 | f(state); |
| 1178 | return EndMap(start); |
| 1179 | } |
| 1180 | template<typename F> size_t Map(const char *key, F f) { |
| 1181 | auto start = StartMap(key); |
| 1182 | f(); |
| 1183 | return EndMap(start); |
| 1184 | } |
| 1185 | template<typename F, typename T> size_t Map(const char *key, F f, T &state) { |
| 1186 | auto start = StartMap(key); |
| 1187 | f(state); |
| 1188 | return EndMap(start); |
| 1189 | } |
| 1190 | template<typename T> void Map(const std::map<std::string, T> &map) { |
| 1191 | auto start = StartMap(); |
| 1192 | for (auto it = map.begin(); it != map.end(); ++it) |
| 1193 | Add(it->first.c_str(), it->second); |
| 1194 | EndMap(start); |
| 1195 | } |
| 1196 | |
| 1197 | // Overloaded Add that tries to call the correct function above. |
| 1198 | void Add(int8_t i) { Int(i); } |
| 1199 | void Add(int16_t i) { Int(i); } |
| 1200 | void Add(int32_t i) { Int(i); } |
| 1201 | void Add(int64_t i) { Int(i); } |
| 1202 | void Add(uint8_t u) { UInt(u); } |
| 1203 | void Add(uint16_t u) { UInt(u); } |
| 1204 | void Add(uint32_t u) { UInt(u); } |
| 1205 | void Add(uint64_t u) { UInt(u); } |
| 1206 | void Add(float f) { Float(f); } |
| 1207 | void Add(double d) { Double(d); } |
| 1208 | void Add(bool b) { Bool(b); } |
| 1209 | void Add(const char *str) { String(str); } |
| 1210 | void Add(const std::string &str) { String(str); } |
| 1211 | void Add(const flexbuffers::String &str) { String(str); } |
| 1212 | |
| 1213 | template<typename T> void Add(const std::vector<T> &vec) { Vector(vec); } |
| 1214 | |
| 1215 | template<typename T> void Add(const char *key, const T &t) { |
| 1216 | Key(key); |
| 1217 | Add(t); |
| 1218 | } |
| 1219 | |
| 1220 | template<typename T> void Add(const std::map<std::string, T> &map) { |
| 1221 | Map(map); |
| 1222 | } |
| 1223 | |
| 1224 | template<typename T> void operator+=(const T &t) { Add(t); } |
| 1225 | |
| 1226 | // This function is useful in combination with the Mutate* functions above. |
| 1227 | // It forces elements of vectors and maps to have a minimum size, such that |
| 1228 | // they can later be updated without failing. |
| 1229 | // Call with no arguments to reset. |
| 1230 | void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { |
| 1231 | force_min_bit_width_ = bw; |
| 1232 | } |
| 1233 | |
| 1234 | void Finish() { |
| 1235 | // If you hit this assert, you likely have objects that were never included |
| 1236 | // in a parent. You need to have exactly one root to finish a buffer. |
| 1237 | // Check your Start/End calls are matched, and all objects are inside |
| 1238 | // some other object. |
| 1239 | FLATBUFFERS_ASSERT(stack_.size() == 1); |
| 1240 | |
| 1241 | // Write root value. |
| 1242 | auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); |
| 1243 | WriteAny(stack_[0], byte_width); |
| 1244 | // Write root type. |
| 1245 | Write(stack_[0].StoredPackedType(), 1); |
| 1246 | // Write root size. Normally determined by parent, but root has no parent :) |
| 1247 | Write(byte_width, 1); |
| 1248 | |
| 1249 | finished_ = true; |
| 1250 | } |
| 1251 | |
| 1252 | private: |
| 1253 | void Finished() const { |
| 1254 | // If you get this assert, you're attempting to get access a buffer |
| 1255 | // which hasn't been finished yet. Be sure to call |
| 1256 | // Builder::Finish with your root object. |
| 1257 | FLATBUFFERS_ASSERT(finished_); |
| 1258 | } |
| 1259 | |
| 1260 | // Align to prepare for writing a scalar with a certain size. |
| 1261 | uint8_t Align(BitWidth alignment) { |
| 1262 | auto byte_width = 1U << alignment; |
| 1263 | buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), |
| 1264 | 0); |
| 1265 | return static_cast<uint8_t>(byte_width); |
| 1266 | } |
| 1267 | |
| 1268 | void WriteBytes(const void *val, size_t size) { |
| 1269 | buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val), |
| 1270 | reinterpret_cast<const uint8_t *>(val) + size); |
| 1271 | } |
| 1272 | |
| 1273 | template<typename T> void Write(T val, size_t byte_width) { |
| 1274 | FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); |
| 1275 | val = flatbuffers::EndianScalar(val); |
| 1276 | WriteBytes(&val, byte_width); |
| 1277 | } |
| 1278 | |
| 1279 | void WriteDouble(double f, uint8_t byte_width) { |
| 1280 | switch (byte_width) { |
| 1281 | case 8: Write(f, byte_width); break; |
| 1282 | case 4: Write(static_cast<float>(f), byte_width); break; |
| 1283 | // case 2: Write(static_cast<half>(f), byte_width); break; |
| 1284 | // case 1: Write(static_cast<quarter>(f), byte_width); break; |
| 1285 | default: FLATBUFFERS_ASSERT(0); |
| 1286 | } |
| 1287 | } |
| 1288 | |
| 1289 | void WriteOffset(uint64_t o, uint8_t byte_width) { |
| 1290 | auto reloff = buf_.size() - o; |
| 1291 | FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); |
| 1292 | Write(reloff, byte_width); |
| 1293 | } |
| 1294 | |
| 1295 | template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) { |
| 1296 | auto byte_width = Align(bit_width); |
| 1297 | auto iloc = buf_.size(); |
| 1298 | Write(val, byte_width); |
| 1299 | stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width)); |
| 1300 | } |
| 1301 | |
| 1302 | static BitWidth WidthB(size_t byte_width) { |
| 1303 | switch (byte_width) { |
| 1304 | case 1: return BIT_WIDTH_8; |
| 1305 | case 2: return BIT_WIDTH_16; |
| 1306 | case 4: return BIT_WIDTH_32; |
| 1307 | case 8: return BIT_WIDTH_64; |
| 1308 | default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; |
| 1309 | } |
| 1310 | } |
| 1311 | |
| 1312 | template<typename T> static Type GetScalarType() { |
| 1313 | static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types"); |
| 1314 | return flatbuffers::is_floating_point<T>::value |
| 1315 | ? FBT_FLOAT |
| 1316 | : flatbuffers::is_same<T, bool>::value |
| 1317 | ? FBT_BOOL |
| 1318 | : (flatbuffers::is_unsigned<T>::value ? FBT_UINT |
| 1319 | : FBT_INT); |
| 1320 | } |
| 1321 | |
| 1322 | struct Value { |
| 1323 | union { |
| 1324 | int64_t i_; |
| 1325 | uint64_t u_; |
| 1326 | double f_; |
| 1327 | }; |
| 1328 | |
| 1329 | Type type_; |
| 1330 | |
| 1331 | // For scalars: of itself, for vector: of its elements, for string: length. |
| 1332 | BitWidth min_bit_width_; |
| 1333 | |
| 1334 | Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} |
| 1335 | |
| 1336 | Value(bool b) |
| 1337 | : u_(static_cast<uint64_t>(b)), |
| 1338 | type_(FBT_BOOL), |
| 1339 | min_bit_width_(BIT_WIDTH_8) {} |
| 1340 | |
| 1341 | Value(int64_t i, Type t, BitWidth bw) |
| 1342 | : i_(i), type_(t), min_bit_width_(bw) {} |
| 1343 | Value(uint64_t u, Type t, BitWidth bw) |
| 1344 | : u_(u), type_(t), min_bit_width_(bw) {} |
| 1345 | |
| 1346 | Value(float f) : f_(f), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {} |
| 1347 | Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} |
| 1348 | |
| 1349 | uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { |
| 1350 | return PackedType(StoredWidth(parent_bit_width_), type_); |
| 1351 | } |
| 1352 | |
| 1353 | BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { |
| 1354 | if (IsInline(type_)) { |
| 1355 | return min_bit_width_; |
| 1356 | } else { |
| 1357 | // We have an absolute offset, but want to store a relative offset |
| 1358 | // elem_index elements beyond the current buffer end. Since whether |
| 1359 | // the relative offset fits in a certain byte_width depends on |
| 1360 | // the size of the elements before it (and their alignment), we have |
| 1361 | // to test for each size in turn. |
| 1362 | for (size_t byte_width = 1; |
| 1363 | byte_width <= sizeof(flatbuffers::largest_scalar_t); |
| 1364 | byte_width *= 2) { |
| 1365 | // Where are we going to write this offset? |
| 1366 | auto offset_loc = buf_size + |
| 1367 | flatbuffers::PaddingBytes(buf_size, byte_width) + |
| 1368 | elem_index * byte_width; |
| 1369 | // Compute relative offset. |
| 1370 | auto offset = offset_loc - u_; |
| 1371 | // Does it fit? |
| 1372 | auto bit_width = WidthU(offset); |
| 1373 | if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) == |
| 1374 | byte_width) |
| 1375 | return bit_width; |
| 1376 | } |
| 1377 | FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. |
| 1378 | return BIT_WIDTH_64; |
| 1379 | } |
| 1380 | } |
| 1381 | |
| 1382 | BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { |
| 1383 | if (IsInline(type_)) { |
| 1384 | return (std::max)(min_bit_width_, parent_bit_width_); |
| 1385 | } else { |
| 1386 | return min_bit_width_; |
| 1387 | } |
| 1388 | } |
| 1389 | }; |
| 1390 | |
| 1391 | void WriteAny(const Value &val, uint8_t byte_width) { |
| 1392 | switch (val.type_) { |
| 1393 | case FBT_NULL: |
| 1394 | case FBT_INT: Write(val.i_, byte_width); break; |
| 1395 | case FBT_BOOL: |
| 1396 | case FBT_UINT: Write(val.u_, byte_width); break; |
| 1397 | case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; |
| 1398 | default: WriteOffset(val.u_, byte_width); break; |
| 1399 | } |
| 1400 | } |
| 1401 | |
| 1402 | size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { |
| 1403 | auto bit_width = WidthU(len); |
| 1404 | auto byte_width = Align(bit_width); |
| 1405 | Write<uint64_t>(len, byte_width); |
| 1406 | auto sloc = buf_.size(); |
| 1407 | WriteBytes(data, len + trailing); |
| 1408 | stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width)); |
| 1409 | return sloc; |
| 1410 | } |
| 1411 | |
| 1412 | template<typename T> |
| 1413 | size_t ScalarVector(const T *elems, size_t len, bool fixed) { |
| 1414 | auto vector_type = GetScalarType<T>(); |
| 1415 | auto byte_width = sizeof(T); |
| 1416 | auto bit_width = WidthB(byte_width); |
| 1417 | // If you get this assert, you're trying to write a vector with a size |
| 1418 | // field that is bigger than the scalars you're trying to write (e.g. a |
| 1419 | // byte vector > 255 elements). For such types, write a "blob" instead. |
| 1420 | // TODO: instead of asserting, could write vector with larger elements |
| 1421 | // instead, though that would be wasteful. |
| 1422 | FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); |
| 1423 | if (!fixed) Write<uint64_t>(len, byte_width); |
| 1424 | auto vloc = buf_.size(); |
| 1425 | for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); |
| 1426 | stack_.push_back(Value(static_cast<uint64_t>(vloc), |
| 1427 | ToTypedVector(vector_type, fixed ? len : 0), |
| 1428 | bit_width)); |
| 1429 | return vloc; |
| 1430 | } |
| 1431 | |
| 1432 | Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, |
| 1433 | bool fixed, const Value *keys = nullptr) { |
| 1434 | FLATBUFFERS_ASSERT(!fixed || typed); // typed=false, fixed=true combination is not supported. |
| 1435 | // Figure out smallest bit width we can store this vector with. |
| 1436 | auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); |
| 1437 | auto prefix_elems = 1; |
| 1438 | if (keys) { |
| 1439 | // If this vector is part of a map, we will pre-fix an offset to the keys |
| 1440 | // to this vector. |
| 1441 | bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); |
| 1442 | prefix_elems += 2; |
| 1443 | } |
| 1444 | Type vector_type = FBT_KEY; |
| 1445 | // Check bit widths and types for all elements. |
| 1446 | for (size_t i = start; i < stack_.size(); i += step) { |
| 1447 | auto elem_width = stack_[i].ElemWidth(buf_.size(), i + prefix_elems); |
| 1448 | bit_width = (std::max)(bit_width, elem_width); |
| 1449 | if (typed) { |
| 1450 | if (i == start) { |
| 1451 | vector_type = stack_[i].type_; |
| 1452 | } else { |
| 1453 | // If you get this assert, you are writing a typed vector with |
| 1454 | // elements that are not all the same type. |
| 1455 | FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); |
| 1456 | } |
| 1457 | } |
| 1458 | } |
| 1459 | // If you get this assert, your fixed types are not one of: |
| 1460 | // Int / UInt / Float / Key. |
| 1461 | FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type)); |
| 1462 | auto byte_width = Align(bit_width); |
| 1463 | // Write vector. First the keys width/offset if available, and size. |
| 1464 | if (keys) { |
| 1465 | WriteOffset(keys->u_, byte_width); |
| 1466 | Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width); |
| 1467 | } |
| 1468 | if (!fixed) Write<uint64_t>(vec_len, byte_width); |
| 1469 | // Then the actual data. |
| 1470 | auto vloc = buf_.size(); |
| 1471 | for (size_t i = start; i < stack_.size(); i += step) { |
| 1472 | WriteAny(stack_[i], byte_width); |
| 1473 | } |
| 1474 | // Then the types. |
| 1475 | if (!typed) { |
| 1476 | for (size_t i = start; i < stack_.size(); i += step) { |
| 1477 | buf_.push_back(stack_[i].StoredPackedType(bit_width)); |
| 1478 | } |
| 1479 | } |
| 1480 | return Value(static_cast<uint64_t>(vloc), |
| 1481 | keys ? FBT_MAP |
| 1482 | : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) |
| 1483 | : FBT_VECTOR), |
| 1484 | bit_width); |
| 1485 | } |
| 1486 | |
| 1487 | // You shouldn't really be copying instances of this class. |
| 1488 | Builder(const Builder &); |
| 1489 | Builder &operator=(const Builder &); |
| 1490 | |
| 1491 | std::vector<uint8_t> buf_; |
| 1492 | std::vector<Value> stack_; |
| 1493 | |
| 1494 | bool finished_; |
| 1495 | |
| 1496 | BuilderFlag flags_; |
| 1497 | |
| 1498 | BitWidth force_min_bit_width_; |
| 1499 | |
| 1500 | struct KeyOffsetCompare { |
| 1501 | explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {} |
| 1502 | bool operator()(size_t a, size_t b) const { |
| 1503 | auto stra = |
| 1504 | reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a); |
| 1505 | auto strb = |
| 1506 | reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b); |
| 1507 | return strcmp(stra, strb) < 0; |
| 1508 | } |
| 1509 | const std::vector<uint8_t> *buf_; |
| 1510 | }; |
| 1511 | |
| 1512 | typedef std::pair<size_t, size_t> StringOffset; |
| 1513 | struct StringOffsetCompare { |
| 1514 | explicit StringOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {} |
| 1515 | bool operator()(const StringOffset &a, const StringOffset &b) const { |
| 1516 | auto stra = reinterpret_cast<const char *>( |
| 1517 | flatbuffers::vector_data(*buf_) + a.first); |
| 1518 | auto strb = reinterpret_cast<const char *>( |
| 1519 | flatbuffers::vector_data(*buf_) + b.first); |
| 1520 | return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0; |
| 1521 | } |
| 1522 | const std::vector<uint8_t> *buf_; |
| 1523 | }; |
| 1524 | |
| 1525 | typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap; |
| 1526 | typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap; |
| 1527 | |
| 1528 | KeyOffsetMap key_pool; |
| 1529 | StringOffsetMap string_pool; |
| 1530 | }; |
| 1531 | |
| 1532 | } // namespace flexbuffers |
| 1533 | |
| 1534 | # if defined(_MSC_VER) |
| 1535 | # pragma warning(pop) |
| 1536 | # endif |
| 1537 | |
| 1538 | #endif // FLATBUFFERS_FLEXBUFFERS_H_ |