Austin Schuh | e89fa2d | 2019-08-14 20:24:23 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. All rights reserved. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "flatbuffers/reflection.h" |
| 18 | #include "flatbuffers/util.h" |
| 19 | |
| 20 | // Helper functionality for reflection. |
| 21 | |
| 22 | namespace flatbuffers { |
| 23 | |
| 24 | int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data) { |
| 25 | // clang-format off |
| 26 | #define FLATBUFFERS_GET(T) static_cast<int64_t>(ReadScalar<T>(data)) |
| 27 | switch (type) { |
| 28 | case reflection::UType: |
| 29 | case reflection::Bool: |
| 30 | case reflection::UByte: return FLATBUFFERS_GET(uint8_t); |
| 31 | case reflection::Byte: return FLATBUFFERS_GET(int8_t); |
| 32 | case reflection::Short: return FLATBUFFERS_GET(int16_t); |
| 33 | case reflection::UShort: return FLATBUFFERS_GET(uint16_t); |
| 34 | case reflection::Int: return FLATBUFFERS_GET(int32_t); |
| 35 | case reflection::UInt: return FLATBUFFERS_GET(uint32_t); |
| 36 | case reflection::Long: return FLATBUFFERS_GET(int64_t); |
| 37 | case reflection::ULong: return FLATBUFFERS_GET(uint64_t); |
| 38 | case reflection::Float: return FLATBUFFERS_GET(float); |
| 39 | case reflection::Double: return FLATBUFFERS_GET(double); |
| 40 | case reflection::String: { |
| 41 | auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) + |
| 42 | data); |
| 43 | return s ? StringToInt(s->c_str()) : 0; |
| 44 | } |
| 45 | default: return 0; // Tables & vectors do not make sense. |
| 46 | } |
| 47 | #undef FLATBUFFERS_GET |
| 48 | // clang-format on |
| 49 | } |
| 50 | |
| 51 | double GetAnyValueF(reflection::BaseType type, const uint8_t *data) { |
| 52 | switch (type) { |
| 53 | case reflection::Float: return static_cast<double>(ReadScalar<float>(data)); |
| 54 | case reflection::Double: return ReadScalar<double>(data); |
| 55 | case reflection::String: { |
| 56 | auto s = |
| 57 | reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) + data); |
| 58 | return s ? strtod(s->c_str(), nullptr) : 0.0; |
| 59 | } |
| 60 | default: return static_cast<double>(GetAnyValueI(type, data)); |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data, |
| 65 | const reflection::Schema *schema, int type_index) { |
| 66 | switch (type) { |
| 67 | case reflection::Float: |
| 68 | case reflection::Double: return NumToString(GetAnyValueF(type, data)); |
| 69 | case reflection::String: { |
| 70 | auto s = |
| 71 | reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) + data); |
| 72 | return s ? s->c_str() : ""; |
| 73 | } |
| 74 | case reflection::Obj: |
| 75 | if (schema) { |
| 76 | // Convert the table to a string. This is mostly for debugging purposes, |
| 77 | // and does NOT promise to be JSON compliant. |
| 78 | // Also prefixes the type. |
| 79 | auto &objectdef = *schema->objects()->Get(type_index); |
| 80 | auto s = objectdef.name()->str(); |
| 81 | if (objectdef.is_struct()) { |
| 82 | s += "(struct)"; // TODO: implement this as well. |
| 83 | } else { |
| 84 | auto table_field = reinterpret_cast<const Table *>( |
| 85 | ReadScalar<uoffset_t>(data) + data); |
| 86 | s += " { "; |
| 87 | auto fielddefs = objectdef.fields(); |
| 88 | for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { |
| 89 | auto &fielddef = **it; |
| 90 | if (!table_field->CheckField(fielddef.offset())) continue; |
| 91 | auto val = GetAnyFieldS(*table_field, fielddef, schema); |
| 92 | if (fielddef.type()->base_type() == reflection::String) { |
| 93 | std::string esc; |
| 94 | flatbuffers::EscapeString(val.c_str(), val.length(), &esc, true, |
| 95 | false); |
| 96 | val = esc; |
| 97 | } |
| 98 | s += fielddef.name()->str(); |
| 99 | s += ": "; |
| 100 | s += val; |
| 101 | s += ", "; |
| 102 | } |
| 103 | s += "}"; |
| 104 | } |
| 105 | return s; |
| 106 | } else { |
| 107 | return "(table)"; |
| 108 | } |
| 109 | case reflection::Vector: |
| 110 | return "[(elements)]"; // TODO: implement this as well. |
| 111 | case reflection::Union: return "(union)"; // TODO: implement this as well. |
| 112 | default: return NumToString(GetAnyValueI(type, data)); |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val) { |
| 117 | // clang-format off |
| 118 | #define FLATBUFFERS_SET(T) WriteScalar(data, static_cast<T>(val)) |
| 119 | switch (type) { |
| 120 | case reflection::UType: |
| 121 | case reflection::Bool: |
| 122 | case reflection::UByte: FLATBUFFERS_SET(uint8_t ); break; |
| 123 | case reflection::Byte: FLATBUFFERS_SET(int8_t ); break; |
| 124 | case reflection::Short: FLATBUFFERS_SET(int16_t ); break; |
| 125 | case reflection::UShort: FLATBUFFERS_SET(uint16_t); break; |
| 126 | case reflection::Int: FLATBUFFERS_SET(int32_t ); break; |
| 127 | case reflection::UInt: FLATBUFFERS_SET(uint32_t); break; |
| 128 | case reflection::Long: FLATBUFFERS_SET(int64_t ); break; |
| 129 | case reflection::ULong: FLATBUFFERS_SET(uint64_t); break; |
| 130 | case reflection::Float: FLATBUFFERS_SET(float ); break; |
| 131 | case reflection::Double: FLATBUFFERS_SET(double ); break; |
| 132 | // TODO: support strings |
| 133 | default: break; |
| 134 | } |
| 135 | #undef FLATBUFFERS_SET |
| 136 | // clang-format on |
| 137 | } |
| 138 | |
| 139 | void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val) { |
| 140 | switch (type) { |
| 141 | case reflection::Float: WriteScalar(data, static_cast<float>(val)); break; |
| 142 | case reflection::Double: WriteScalar(data, val); break; |
| 143 | // TODO: support strings. |
| 144 | default: SetAnyValueI(type, data, static_cast<int64_t>(val)); break; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val) { |
| 149 | switch (type) { |
| 150 | case reflection::Float: |
| 151 | case reflection::Double: |
| 152 | SetAnyValueF(type, data, strtod(val, nullptr)); |
| 153 | break; |
| 154 | // TODO: support strings. |
| 155 | default: SetAnyValueI(type, data, StringToInt(val)); break; |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | // Resize a FlatBuffer in-place by iterating through all offsets in the buffer |
| 160 | // and adjusting them by "delta" if they straddle the start offset. |
| 161 | // Once that is done, bytes can now be inserted/deleted safely. |
| 162 | // "delta" may be negative (shrinking). |
| 163 | // Unless "delta" is a multiple of the largest alignment, you'll create a small |
| 164 | // amount of garbage space in the buffer (usually 0..7 bytes). |
| 165 | // If your FlatBuffer's root table is not the schema's root table, you should |
| 166 | // pass in your root_table type as well. |
| 167 | class ResizeContext { |
| 168 | public: |
| 169 | ResizeContext(const reflection::Schema &schema, uoffset_t start, int delta, |
| 170 | std::vector<uint8_t> *flatbuf, |
| 171 | const reflection::Object *root_table = nullptr) |
| 172 | : schema_(schema), |
| 173 | startptr_(vector_data(*flatbuf) + start), |
| 174 | delta_(delta), |
| 175 | buf_(*flatbuf), |
| 176 | dag_check_(flatbuf->size() / sizeof(uoffset_t), false) { |
| 177 | auto mask = static_cast<int>(sizeof(largest_scalar_t) - 1); |
| 178 | delta_ = (delta_ + mask) & ~mask; |
| 179 | if (!delta_) return; // We can't shrink by less than largest_scalar_t. |
| 180 | // Now change all the offsets by delta_. |
| 181 | auto root = GetAnyRoot(vector_data(buf_)); |
| 182 | Straddle<uoffset_t, 1>(vector_data(buf_), root, vector_data(buf_)); |
| 183 | ResizeTable(root_table ? *root_table : *schema.root_table(), root); |
| 184 | // We can now add or remove bytes at start. |
| 185 | if (delta_ > 0) |
| 186 | buf_.insert(buf_.begin() + start, delta_, 0); |
| 187 | else |
| 188 | buf_.erase(buf_.begin() + start, buf_.begin() + start - delta_); |
| 189 | } |
| 190 | |
| 191 | // Check if the range between first (lower address) and second straddles |
| 192 | // the insertion point. If it does, change the offset at offsetloc (of |
| 193 | // type T, with direction D). |
| 194 | template<typename T, int D> |
| 195 | void Straddle(const void *first, const void *second, void *offsetloc) { |
| 196 | if (first <= startptr_ && second >= startptr_) { |
| 197 | WriteScalar<T>(offsetloc, ReadScalar<T>(offsetloc) + delta_ * D); |
| 198 | DagCheck(offsetloc) = true; |
| 199 | } |
| 200 | } |
| 201 | |
| 202 | // This returns a boolean that records if the corresponding offset location |
| 203 | // has been modified already. If so, we can't even read the corresponding |
| 204 | // offset, since it is pointing to a location that is illegal until the |
| 205 | // resize actually happens. |
| 206 | // This must be checked for every offset, since we can't know which offsets |
| 207 | // will straddle and which won't. |
| 208 | uint8_t &DagCheck(const void *offsetloc) { |
| 209 | auto dag_idx = reinterpret_cast<const uoffset_t *>(offsetloc) - |
| 210 | reinterpret_cast<const uoffset_t *>(vector_data(buf_)); |
| 211 | return dag_check_[dag_idx]; |
| 212 | } |
| 213 | |
| 214 | void ResizeTable(const reflection::Object &objectdef, Table *table) { |
| 215 | if (DagCheck(table)) return; // Table already visited. |
| 216 | auto vtable = table->GetVTable(); |
| 217 | // Early out: since all fields inside the table must point forwards in |
| 218 | // memory, if the insertion point is before the table we can stop here. |
| 219 | auto tableloc = reinterpret_cast<uint8_t *>(table); |
| 220 | if (startptr_ <= tableloc) { |
| 221 | // Check if insertion point is between the table and a vtable that |
| 222 | // precedes it. This can't happen in current construction code, but check |
| 223 | // just in case we ever change the way flatbuffers are built. |
| 224 | Straddle<soffset_t, -1>(vtable, table, table); |
| 225 | } else { |
| 226 | // Check each field. |
| 227 | auto fielddefs = objectdef.fields(); |
| 228 | for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { |
| 229 | auto &fielddef = **it; |
| 230 | auto base_type = fielddef.type()->base_type(); |
| 231 | // Ignore scalars. |
| 232 | if (base_type <= reflection::Double) continue; |
| 233 | // Ignore fields that are not stored. |
| 234 | auto offset = table->GetOptionalFieldOffset(fielddef.offset()); |
| 235 | if (!offset) continue; |
| 236 | // Ignore structs. |
| 237 | auto subobjectdef = |
| 238 | base_type == reflection::Obj |
| 239 | ? schema_.objects()->Get(fielddef.type()->index()) |
| 240 | : nullptr; |
| 241 | if (subobjectdef && subobjectdef->is_struct()) continue; |
| 242 | // Get this fields' offset, and read it if safe. |
| 243 | auto offsetloc = tableloc + offset; |
| 244 | if (DagCheck(offsetloc)) continue; // This offset already visited. |
| 245 | auto ref = offsetloc + ReadScalar<uoffset_t>(offsetloc); |
| 246 | Straddle<uoffset_t, 1>(offsetloc, ref, offsetloc); |
| 247 | // Recurse. |
| 248 | switch (base_type) { |
| 249 | case reflection::Obj: { |
| 250 | ResizeTable(*subobjectdef, reinterpret_cast<Table *>(ref)); |
| 251 | break; |
| 252 | } |
| 253 | case reflection::Vector: { |
| 254 | auto elem_type = fielddef.type()->element(); |
| 255 | if (elem_type != reflection::Obj && elem_type != reflection::String) |
| 256 | break; |
| 257 | auto vec = reinterpret_cast<Vector<uoffset_t> *>(ref); |
| 258 | auto elemobjectdef = |
| 259 | elem_type == reflection::Obj |
| 260 | ? schema_.objects()->Get(fielddef.type()->index()) |
| 261 | : nullptr; |
| 262 | if (elemobjectdef && elemobjectdef->is_struct()) break; |
| 263 | for (uoffset_t i = 0; i < vec->size(); i++) { |
| 264 | auto loc = vec->Data() + i * sizeof(uoffset_t); |
| 265 | if (DagCheck(loc)) continue; // This offset already visited. |
| 266 | auto dest = loc + vec->Get(i); |
| 267 | Straddle<uoffset_t, 1>(loc, dest, loc); |
| 268 | if (elemobjectdef) |
| 269 | ResizeTable(*elemobjectdef, reinterpret_cast<Table *>(dest)); |
| 270 | } |
| 271 | break; |
| 272 | } |
| 273 | case reflection::Union: { |
| 274 | ResizeTable(GetUnionType(schema_, objectdef, fielddef, *table), |
| 275 | reinterpret_cast<Table *>(ref)); |
| 276 | break; |
| 277 | } |
| 278 | case reflection::String: break; |
| 279 | default: FLATBUFFERS_ASSERT(false); |
| 280 | } |
| 281 | } |
| 282 | // Check if the vtable offset points beyond the insertion point. |
| 283 | // Must do this last, since GetOptionalFieldOffset above still reads |
| 284 | // this value. |
| 285 | Straddle<soffset_t, -1>(table, vtable, table); |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | void operator=(const ResizeContext &rc); |
| 290 | |
| 291 | private: |
| 292 | const reflection::Schema &schema_; |
| 293 | uint8_t *startptr_; |
| 294 | int delta_; |
| 295 | std::vector<uint8_t> &buf_; |
| 296 | std::vector<uint8_t> dag_check_; |
| 297 | }; |
| 298 | |
| 299 | void SetString(const reflection::Schema &schema, const std::string &val, |
| 300 | const String *str, std::vector<uint8_t> *flatbuf, |
| 301 | const reflection::Object *root_table) { |
| 302 | auto delta = static_cast<int>(val.size()) - static_cast<int>(str->size()); |
| 303 | auto str_start = static_cast<uoffset_t>( |
| 304 | reinterpret_cast<const uint8_t *>(str) - vector_data(*flatbuf)); |
| 305 | auto start = str_start + static_cast<uoffset_t>(sizeof(uoffset_t)); |
| 306 | if (delta) { |
| 307 | // Clear the old string, since we don't want parts of it remaining. |
| 308 | memset(vector_data(*flatbuf) + start, 0, str->size()); |
| 309 | // Different size, we must expand (or contract). |
| 310 | ResizeContext(schema, start, delta, flatbuf, root_table); |
| 311 | // Set the new length. |
| 312 | WriteScalar(vector_data(*flatbuf) + str_start, |
| 313 | static_cast<uoffset_t>(val.size())); |
| 314 | } |
| 315 | // Copy new data. Safe because we created the right amount of space. |
| 316 | memcpy(vector_data(*flatbuf) + start, val.c_str(), val.size() + 1); |
| 317 | } |
| 318 | |
| 319 | uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize, |
| 320 | const VectorOfAny *vec, uoffset_t num_elems, |
| 321 | uoffset_t elem_size, std::vector<uint8_t> *flatbuf, |
| 322 | const reflection::Object *root_table) { |
| 323 | auto delta_elem = static_cast<int>(newsize) - static_cast<int>(num_elems); |
| 324 | auto delta_bytes = delta_elem * static_cast<int>(elem_size); |
| 325 | auto vec_start = |
| 326 | reinterpret_cast<const uint8_t *>(vec) - vector_data(*flatbuf); |
| 327 | auto start = static_cast<uoffset_t>(vec_start + sizeof(uoffset_t) + |
| 328 | elem_size * num_elems); |
| 329 | if (delta_bytes) { |
| 330 | if (delta_elem < 0) { |
| 331 | // Clear elements we're throwing away, since some might remain in the |
| 332 | // buffer. |
| 333 | auto size_clear = -delta_elem * elem_size; |
| 334 | memset(vector_data(*flatbuf) + start - size_clear, 0, size_clear); |
| 335 | } |
| 336 | ResizeContext(schema, start, delta_bytes, flatbuf, root_table); |
| 337 | WriteScalar(vector_data(*flatbuf) + vec_start, newsize); // Length field. |
| 338 | // Set new elements to 0.. this can be overwritten by the caller. |
| 339 | if (delta_elem > 0) { |
| 340 | memset(vector_data(*flatbuf) + start, 0, delta_elem * elem_size); |
| 341 | } |
| 342 | } |
| 343 | return vector_data(*flatbuf) + start; |
| 344 | } |
| 345 | |
| 346 | const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf, |
| 347 | const uint8_t *newbuf, size_t newlen) { |
| 348 | // Align to sizeof(uoffset_t) past sizeof(largest_scalar_t) since we're |
| 349 | // going to chop off the root offset. |
| 350 | while ((flatbuf.size() & (sizeof(uoffset_t) - 1)) || |
| 351 | !(flatbuf.size() & (sizeof(largest_scalar_t) - 1))) { |
| 352 | flatbuf.push_back(0); |
| 353 | } |
| 354 | auto insertion_point = static_cast<uoffset_t>(flatbuf.size()); |
| 355 | // Insert the entire FlatBuffer minus the root pointer. |
| 356 | flatbuf.insert(flatbuf.end(), newbuf + sizeof(uoffset_t), newbuf + newlen); |
| 357 | auto root_offset = ReadScalar<uoffset_t>(newbuf) - sizeof(uoffset_t); |
| 358 | return vector_data(flatbuf) + insertion_point + root_offset; |
| 359 | } |
| 360 | |
| 361 | void CopyInline(FlatBufferBuilder &fbb, const reflection::Field &fielddef, |
| 362 | const Table &table, size_t align, size_t size) { |
| 363 | fbb.Align(align); |
| 364 | fbb.PushBytes(table.GetStruct<const uint8_t *>(fielddef.offset()), size); |
| 365 | fbb.TrackField(fielddef.offset(), fbb.GetSize()); |
| 366 | } |
| 367 | |
| 368 | Offset<const Table *> CopyTable(FlatBufferBuilder &fbb, |
| 369 | const reflection::Schema &schema, |
| 370 | const reflection::Object &objectdef, |
| 371 | const Table &table, bool use_string_pooling) { |
| 372 | // Before we can construct the table, we have to first generate any |
| 373 | // subobjects, and collect their offsets. |
| 374 | std::vector<uoffset_t> offsets; |
| 375 | auto fielddefs = objectdef.fields(); |
| 376 | for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { |
| 377 | auto &fielddef = **it; |
| 378 | // Skip if field is not present in the source. |
| 379 | if (!table.CheckField(fielddef.offset())) continue; |
| 380 | uoffset_t offset = 0; |
| 381 | switch (fielddef.type()->base_type()) { |
| 382 | case reflection::String: { |
| 383 | offset = use_string_pooling |
| 384 | ? fbb.CreateSharedString(GetFieldS(table, fielddef)).o |
| 385 | : fbb.CreateString(GetFieldS(table, fielddef)).o; |
| 386 | break; |
| 387 | } |
| 388 | case reflection::Obj: { |
| 389 | auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index()); |
| 390 | if (!subobjectdef.is_struct()) { |
| 391 | offset = |
| 392 | CopyTable(fbb, schema, subobjectdef, *GetFieldT(table, fielddef)) |
| 393 | .o; |
| 394 | } |
| 395 | break; |
| 396 | } |
| 397 | case reflection::Union: { |
| 398 | auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table); |
| 399 | offset = |
| 400 | CopyTable(fbb, schema, subobjectdef, *GetFieldT(table, fielddef)).o; |
| 401 | break; |
| 402 | } |
| 403 | case reflection::Vector: { |
| 404 | auto vec = |
| 405 | table.GetPointer<const Vector<Offset<Table>> *>(fielddef.offset()); |
| 406 | auto element_base_type = fielddef.type()->element(); |
| 407 | auto elemobjectdef = |
| 408 | element_base_type == reflection::Obj |
| 409 | ? schema.objects()->Get(fielddef.type()->index()) |
| 410 | : nullptr; |
| 411 | switch (element_base_type) { |
| 412 | case reflection::String: { |
| 413 | std::vector<Offset<const String *>> elements(vec->size()); |
| 414 | auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec); |
| 415 | for (uoffset_t i = 0; i < vec_s->size(); i++) { |
| 416 | elements[i] = use_string_pooling |
| 417 | ? fbb.CreateSharedString(vec_s->Get(i)).o |
| 418 | : fbb.CreateString(vec_s->Get(i)).o; |
| 419 | } |
| 420 | offset = fbb.CreateVector(elements).o; |
| 421 | break; |
| 422 | } |
| 423 | case reflection::Obj: { |
| 424 | if (!elemobjectdef->is_struct()) { |
| 425 | std::vector<Offset<const Table *>> elements(vec->size()); |
| 426 | for (uoffset_t i = 0; i < vec->size(); i++) { |
| 427 | elements[i] = |
| 428 | CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i)); |
| 429 | } |
| 430 | offset = fbb.CreateVector(elements).o; |
| 431 | break; |
| 432 | } |
| 433 | } |
| 434 | FLATBUFFERS_FALLTHROUGH(); // fall thru |
| 435 | default: { // Scalars and structs. |
| 436 | auto element_size = GetTypeSize(element_base_type); |
| 437 | if (elemobjectdef && elemobjectdef->is_struct()) |
| 438 | element_size = elemobjectdef->bytesize(); |
| 439 | fbb.StartVector(vec->size(), element_size); |
| 440 | fbb.PushBytes(vec->Data(), element_size * vec->size()); |
| 441 | offset = fbb.EndVector(vec->size()); |
| 442 | break; |
| 443 | } |
| 444 | } |
| 445 | break; |
| 446 | } |
| 447 | default: // Scalars. |
| 448 | break; |
| 449 | } |
| 450 | if (offset) { offsets.push_back(offset); } |
| 451 | } |
| 452 | // Now we can build the actual table from either offsets or scalar data. |
| 453 | auto start = objectdef.is_struct() ? fbb.StartStruct(objectdef.minalign()) |
| 454 | : fbb.StartTable(); |
| 455 | size_t offset_idx = 0; |
| 456 | for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) { |
| 457 | auto &fielddef = **it; |
| 458 | if (!table.CheckField(fielddef.offset())) continue; |
| 459 | auto base_type = fielddef.type()->base_type(); |
| 460 | switch (base_type) { |
| 461 | case reflection::Obj: { |
| 462 | auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index()); |
| 463 | if (subobjectdef.is_struct()) { |
| 464 | CopyInline(fbb, fielddef, table, subobjectdef.minalign(), |
| 465 | subobjectdef.bytesize()); |
| 466 | break; |
| 467 | } |
| 468 | } |
| 469 | FLATBUFFERS_FALLTHROUGH(); // fall thru |
| 470 | case reflection::Union: |
| 471 | case reflection::String: |
| 472 | case reflection::Vector: |
| 473 | fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++])); |
| 474 | break; |
| 475 | default: { // Scalars. |
| 476 | auto size = GetTypeSize(base_type); |
| 477 | CopyInline(fbb, fielddef, table, size, size); |
| 478 | break; |
| 479 | } |
| 480 | } |
| 481 | } |
| 482 | FLATBUFFERS_ASSERT(offset_idx == offsets.size()); |
| 483 | if (objectdef.is_struct()) { |
| 484 | fbb.ClearOffsets(); |
| 485 | return fbb.EndStruct(); |
| 486 | } else { |
| 487 | return fbb.EndTable(start); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | bool VerifyStruct(flatbuffers::Verifier &v, |
| 492 | const flatbuffers::Table &parent_table, |
| 493 | voffset_t field_offset, const reflection::Object &obj, |
| 494 | bool required) { |
| 495 | auto offset = parent_table.GetOptionalFieldOffset(field_offset); |
| 496 | if (required && !offset) { return false; } |
| 497 | |
| 498 | return !offset || |
| 499 | v.Verify(reinterpret_cast<const uint8_t *>(&parent_table), offset, |
| 500 | obj.bytesize()); |
| 501 | } |
| 502 | |
| 503 | bool VerifyVectorOfStructs(flatbuffers::Verifier &v, |
| 504 | const flatbuffers::Table &parent_table, |
| 505 | voffset_t field_offset, |
| 506 | const reflection::Object &obj, bool required) { |
| 507 | auto p = parent_table.GetPointer<const uint8_t *>(field_offset); |
| 508 | if (required && !p) { return false; } |
| 509 | |
| 510 | return !p || v.VerifyVectorOrString(p, obj.bytesize()); |
| 511 | } |
| 512 | |
| 513 | // forward declare to resolve cyclic deps between VerifyObject and VerifyVector |
| 514 | bool VerifyObject(flatbuffers::Verifier &v, const reflection::Schema &schema, |
| 515 | const reflection::Object &obj, |
| 516 | const flatbuffers::Table *table, bool required); |
| 517 | |
| 518 | bool VerifyVector(flatbuffers::Verifier &v, const reflection::Schema &schema, |
| 519 | const flatbuffers::Table &table, |
| 520 | const reflection::Field &vec_field) { |
| 521 | FLATBUFFERS_ASSERT(vec_field.type()->base_type() == reflection::Vector); |
| 522 | if (!table.VerifyField<uoffset_t>(v, vec_field.offset())) return false; |
| 523 | |
| 524 | switch (vec_field.type()->element()) { |
| 525 | case reflection::None: FLATBUFFERS_ASSERT(false); break; |
| 526 | case reflection::UType: |
| 527 | return v.VerifyVector(flatbuffers::GetFieldV<uint8_t>(table, vec_field)); |
| 528 | case reflection::Bool: |
| 529 | case reflection::Byte: |
| 530 | case reflection::UByte: |
| 531 | return v.VerifyVector(flatbuffers::GetFieldV<int8_t>(table, vec_field)); |
| 532 | case reflection::Short: |
| 533 | case reflection::UShort: |
| 534 | return v.VerifyVector(flatbuffers::GetFieldV<int16_t>(table, vec_field)); |
| 535 | case reflection::Int: |
| 536 | case reflection::UInt: |
| 537 | return v.VerifyVector(flatbuffers::GetFieldV<int32_t>(table, vec_field)); |
| 538 | case reflection::Long: |
| 539 | case reflection::ULong: |
| 540 | return v.VerifyVector(flatbuffers::GetFieldV<int64_t>(table, vec_field)); |
| 541 | case reflection::Float: |
| 542 | return v.VerifyVector(flatbuffers::GetFieldV<float>(table, vec_field)); |
| 543 | case reflection::Double: |
| 544 | return v.VerifyVector(flatbuffers::GetFieldV<double>(table, vec_field)); |
| 545 | case reflection::String: { |
| 546 | auto vec_string = |
| 547 | flatbuffers::GetFieldV<flatbuffers::Offset<flatbuffers::String>>( |
| 548 | table, vec_field); |
| 549 | if (v.VerifyVector(vec_string) && v.VerifyVectorOfStrings(vec_string)) { |
| 550 | return true; |
| 551 | } else { |
| 552 | return false; |
| 553 | } |
| 554 | } |
| 555 | case reflection::Vector: FLATBUFFERS_ASSERT(false); break; |
| 556 | case reflection::Obj: { |
| 557 | auto obj = schema.objects()->Get(vec_field.type()->index()); |
| 558 | if (obj->is_struct()) { |
| 559 | if (!VerifyVectorOfStructs(v, table, vec_field.offset(), *obj, |
| 560 | vec_field.required())) { |
| 561 | return false; |
| 562 | } |
| 563 | } else { |
| 564 | auto vec = |
| 565 | flatbuffers::GetFieldV<flatbuffers::Offset<flatbuffers::Table>>( |
| 566 | table, vec_field); |
| 567 | if (!v.VerifyVector(vec)) return false; |
| 568 | if (vec) { |
| 569 | for (uoffset_t j = 0; j < vec->size(); j++) { |
| 570 | if (!VerifyObject(v, schema, *obj, vec->Get(j), true)) { |
| 571 | return false; |
| 572 | } |
| 573 | } |
| 574 | } |
| 575 | } |
| 576 | return true; |
| 577 | } |
| 578 | case reflection::Union: FLATBUFFERS_ASSERT(false); break; |
| 579 | default: FLATBUFFERS_ASSERT(false); break; |
| 580 | } |
| 581 | |
| 582 | return false; |
| 583 | } |
| 584 | |
| 585 | bool VerifyObject(flatbuffers::Verifier &v, const reflection::Schema &schema, |
| 586 | const reflection::Object &obj, |
| 587 | const flatbuffers::Table *table, bool required) { |
| 588 | if (!table) { |
| 589 | if (!required) |
| 590 | return true; |
| 591 | else |
| 592 | return false; |
| 593 | } |
| 594 | |
| 595 | if (!table->VerifyTableStart(v)) return false; |
| 596 | |
| 597 | for (uoffset_t i = 0; i < obj.fields()->size(); i++) { |
| 598 | auto field_def = obj.fields()->Get(i); |
| 599 | switch (field_def->type()->base_type()) { |
| 600 | case reflection::None: FLATBUFFERS_ASSERT(false); break; |
| 601 | case reflection::UType: |
| 602 | if (!table->VerifyField<uint8_t>(v, field_def->offset())) return false; |
| 603 | break; |
| 604 | case reflection::Bool: |
| 605 | case reflection::Byte: |
| 606 | case reflection::UByte: |
| 607 | if (!table->VerifyField<int8_t>(v, field_def->offset())) return false; |
| 608 | break; |
| 609 | case reflection::Short: |
| 610 | case reflection::UShort: |
| 611 | if (!table->VerifyField<int16_t>(v, field_def->offset())) return false; |
| 612 | break; |
| 613 | case reflection::Int: |
| 614 | case reflection::UInt: |
| 615 | if (!table->VerifyField<int32_t>(v, field_def->offset())) return false; |
| 616 | break; |
| 617 | case reflection::Long: |
| 618 | case reflection::ULong: |
| 619 | if (!table->VerifyField<int64_t>(v, field_def->offset())) return false; |
| 620 | break; |
| 621 | case reflection::Float: |
| 622 | if (!table->VerifyField<float>(v, field_def->offset())) return false; |
| 623 | break; |
| 624 | case reflection::Double: |
| 625 | if (!table->VerifyField<double>(v, field_def->offset())) return false; |
| 626 | break; |
| 627 | case reflection::String: |
| 628 | if (!table->VerifyField<uoffset_t>(v, field_def->offset()) || |
| 629 | !v.VerifyString(flatbuffers::GetFieldS(*table, *field_def))) { |
| 630 | return false; |
| 631 | } |
| 632 | break; |
| 633 | case reflection::Vector: |
| 634 | if (!VerifyVector(v, schema, *table, *field_def)) return false; |
| 635 | break; |
| 636 | case reflection::Obj: { |
| 637 | auto child_obj = schema.objects()->Get(field_def->type()->index()); |
| 638 | if (child_obj->is_struct()) { |
| 639 | if (!VerifyStruct(v, *table, field_def->offset(), *child_obj, |
| 640 | field_def->required())) { |
| 641 | return false; |
| 642 | } |
| 643 | } else { |
| 644 | if (!VerifyObject(v, schema, *child_obj, |
| 645 | flatbuffers::GetFieldT(*table, *field_def), |
| 646 | field_def->required())) { |
| 647 | return false; |
| 648 | } |
| 649 | } |
| 650 | break; |
| 651 | } |
| 652 | case reflection::Union: { |
| 653 | // get union type from the prev field |
| 654 | voffset_t utype_offset = field_def->offset() - sizeof(voffset_t); |
| 655 | auto utype = table->GetField<uint8_t>(utype_offset, 0); |
| 656 | if (utype != 0) { |
| 657 | // Means we have this union field present |
| 658 | auto fb_enum = schema.enums()->Get(field_def->type()->index()); |
| 659 | auto child_obj = fb_enum->values()->Get(utype)->object(); |
| 660 | if (!VerifyObject(v, schema, *child_obj, |
| 661 | flatbuffers::GetFieldT(*table, *field_def), |
| 662 | field_def->required())) { |
| 663 | return false; |
| 664 | } |
| 665 | } |
| 666 | break; |
| 667 | } |
| 668 | default: FLATBUFFERS_ASSERT(false); break; |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | if (!v.EndTable()) return false; |
| 673 | |
| 674 | return true; |
| 675 | } |
| 676 | |
| 677 | bool Verify(const reflection::Schema &schema, const reflection::Object &root, |
| 678 | const uint8_t *buf, size_t length) { |
| 679 | Verifier v(buf, length); |
| 680 | return VerifyObject(v, schema, root, flatbuffers::GetAnyRoot(buf), true); |
| 681 | } |
| 682 | |
| 683 | } // namespace flatbuffers |