1 /* 2 * Copyright 2017 Google Inc. All rights reserved. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef FLATBUFFERS_FLEXBUFFERS_H_ 18 #define FLATBUFFERS_FLEXBUFFERS_H_ 19 20 #include <map> 21 // Used to select STL variant. 22 #include "flatbuffers/base.h" 23 // We use the basic binary writing functions from the regular FlatBuffers. 24 #include "flatbuffers/util.h" 25 26 #ifdef _MSC_VER 27 #include <intrin.h> 28 #endif 29 30 #if defined(_MSC_VER) 31 #pragma warning(push) 32 #pragma warning(disable: 4127) // C4127: conditional expression is constant 33 #endif 34 35 namespace flexbuffers { 36 37 class Reference; 38 class Map; 39 40 // These are used in the lower 2 bits of a type field to determine the size of 41 // the elements (and or size field) of the item pointed to (e.g. vector). 42 enum BitWidth { 43 BIT_WIDTH_8 = 0, 44 BIT_WIDTH_16 = 1, 45 BIT_WIDTH_32 = 2, 46 BIT_WIDTH_64 = 3, 47 }; 48 49 // These are used as the upper 6 bits of a type field to indicate the actual 50 // type. 51 enum Type { 52 TYPE_NULL = 0, 53 TYPE_INT = 1, 54 TYPE_UINT = 2, 55 TYPE_FLOAT = 3, 56 // Types above stored inline, types below store an offset. 57 TYPE_KEY = 4, 58 TYPE_STRING = 5, 59 TYPE_INDIRECT_INT = 6, 60 TYPE_INDIRECT_UINT = 7, 61 TYPE_INDIRECT_FLOAT = 8, 62 TYPE_MAP = 9, 63 TYPE_VECTOR = 10, // Untyped. 64 TYPE_VECTOR_INT = 11, // Typed any size (stores no type table). 65 TYPE_VECTOR_UINT = 12, 66 TYPE_VECTOR_FLOAT = 13, 67 TYPE_VECTOR_KEY = 14, 68 TYPE_VECTOR_STRING = 15, 69 TYPE_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). 70 TYPE_VECTOR_UINT2 = 17, 71 TYPE_VECTOR_FLOAT2 = 18, 72 TYPE_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). 73 TYPE_VECTOR_UINT3 = 20, 74 TYPE_VECTOR_FLOAT3 = 21, 75 TYPE_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). 76 TYPE_VECTOR_UINT4 = 23, 77 TYPE_VECTOR_FLOAT4 = 24, 78 TYPE_BLOB = 25, 79 TYPE_BOOL = 26, 80 TYPE_VECTOR_BOOL = 36, // To Allow the same type of conversion of type to vector type 81 }; 82 83 inline bool IsInline(Type t) { return t <= TYPE_FLOAT || t == TYPE_BOOL; } 84 85 inline bool IsTypedVectorElementType(Type t) { 86 return (t >= TYPE_INT && t <= TYPE_STRING) || t == TYPE_BOOL; 87 } 88 89 inline bool IsTypedVector(Type t) { 90 return (t >= TYPE_VECTOR_INT && t <= TYPE_VECTOR_STRING) || t == TYPE_VECTOR_BOOL; 91 } 92 93 inline bool IsFixedTypedVector(Type t) { 94 return t >= TYPE_VECTOR_INT2 && t <= TYPE_VECTOR_FLOAT4; 95 } 96 97 inline Type ToTypedVector(Type t, size_t fixed_len = 0) { 98 assert(IsTypedVectorElementType(t)); 99 switch (fixed_len) { 100 case 0: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT); 101 case 2: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT2); 102 case 3: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT3); 103 case 4: return static_cast<Type>(t - TYPE_INT + TYPE_VECTOR_INT4); 104 default: assert(0); return TYPE_NULL; 105 } 106 } 107 108 inline Type ToTypedVectorElementType(Type t) { 109 assert(IsTypedVector(t)); 110 return static_cast<Type>(t - TYPE_VECTOR_INT + TYPE_INT); 111 } 112 113 inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { 114 assert(IsFixedTypedVector(t)); 115 auto fixed_type = t - TYPE_VECTOR_INT2; 116 *len = static_cast<uint8_t>(fixed_type / 3 + 2); // 3 types each, starting from length 2. 117 return static_cast<Type>(fixed_type % 3 + TYPE_INT); 118 } 119 120 // TODO: implement proper support for 8/16bit floats, or decide not to 121 // support them. 122 typedef int16_t half; 123 typedef int8_t quarter; 124 125 // TODO: can we do this without conditionals using intrinsics or inline asm 126 // on some platforms? Given branch prediction the method below should be 127 // decently quick, but it is the most frequently executed function. 128 // We could do an (unaligned) 64-bit read if we ifdef out the platforms for 129 // which that doesn't work (or where we'd read into un-owned memory). 130 template <typename R, typename T1, typename T2, typename T4, typename T8> 131 R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { 132 return byte_width < 4 133 ? (byte_width < 2 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data)) 134 : static_cast<R>(flatbuffers::ReadScalar<T2>(data))) 135 : (byte_width < 8 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data)) 136 : static_cast<R>(flatbuffers::ReadScalar<T8>(data))); 137 } 138 139 140 inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { 141 return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(data, 142 byte_width); 143 } 144 145 inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { 146 // This is the "hottest" function (all offset lookups use this), so worth 147 // optimizing if possible. 148 // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a 149 // constant, which here it isn't. Test if memcpy is still faster than 150 // the conditionals in ReadSizedScalar. Can also use inline asm. 151 #ifdef _MSC_VER 152 uint64_t u = 0; 153 __movsb(reinterpret_cast<uint8_t *>(&u), 154 reinterpret_cast<const uint8_t *>(data), byte_width); 155 return flatbuffers::EndianScalar(u); 156 #else 157 return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>( 158 data, byte_width); 159 #endif 160 } 161 162 inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { 163 return ReadSizedScalar<double, quarter, half, float, double>(data, 164 byte_width); 165 } 166 167 inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { 168 return offset - ReadUInt64(offset, byte_width); 169 } 170 171 template<typename T> const uint8_t *Indirect(const uint8_t *offset) { 172 return offset - flatbuffers::ReadScalar<T>(offset); 173 } 174 175 inline BitWidth WidthU(uint64_t u) { 176 #define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) { \ 177 if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ 178 } 179 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); 180 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); 181 FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); 182 #undef FLATBUFFERS_GET_FIELD_BIT_WIDTH 183 return BIT_WIDTH_64; 184 } 185 186 inline BitWidth WidthI(int64_t i) { 187 auto u = static_cast<uint64_t>(i) << 1; 188 return WidthU(i >= 0 ? u : ~u); 189 } 190 191 inline BitWidth WidthF(double f) { 192 return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32 193 : BIT_WIDTH_64; 194 } 195 196 // Base class of all types below. 197 // Points into the data buffer and allows access to one type. 198 class Object { 199 public: 200 Object(const uint8_t *data, uint8_t byte_width) 201 : data_(data), byte_width_(byte_width) {} 202 203 protected: 204 const uint8_t *data_; 205 uint8_t byte_width_; 206 }; 207 208 // Stores size in `byte_width_` bytes before data_ pointer. 209 class Sized : public Object { 210 public: 211 Sized(const uint8_t *data, uint8_t byte_width) : Object(data, byte_width) {} 212 size_t size() const { 213 return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_)); 214 } 215 }; 216 217 class String : public Sized { 218 public: 219 String(const uint8_t *data, uint8_t byte_width) 220 : Sized(data, byte_width) {} 221 222 size_t length() const { return size(); } 223 const char *c_str() const { return reinterpret_cast<const char *>(data_); } 224 std::string str() const { return std::string(c_str(), length()); } 225 226 static String EmptyString() { 227 static const uint8_t empty_string[] = { 0/*len*/, 0/*terminator*/ }; 228 return String(empty_string + 1, 1); 229 } 230 bool IsTheEmptyString() const { return data_ == EmptyString().data_; } 231 }; 232 233 class Blob : public Sized { 234 public: 235 Blob(const uint8_t *data_buf, uint8_t byte_width) 236 : Sized(data_buf, byte_width) {} 237 238 static Blob EmptyBlob() { 239 static const uint8_t empty_blob[] = { 0/*len*/ }; 240 return Blob(empty_blob + 1, 1); 241 } 242 bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } 243 const uint8_t *data() const { return data_; } 244 }; 245 246 class Vector : public Sized { 247 public: 248 Vector(const uint8_t *data, uint8_t byte_width) 249 : Sized(data, byte_width) {} 250 251 Reference operator[](size_t i) const; 252 253 static Vector EmptyVector() { 254 static const uint8_t empty_vector[] = { 0/*len*/ }; 255 return Vector(empty_vector + 1, 1); 256 } 257 bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } 258 }; 259 260 class TypedVector : public Sized { 261 public: 262 TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) 263 : Sized(data, byte_width), type_(element_type) {} 264 265 Reference operator[](size_t i) const; 266 267 static TypedVector EmptyTypedVector() { 268 static const uint8_t empty_typed_vector[] = { 0/*len*/ }; 269 return TypedVector(empty_typed_vector + 1, 1, TYPE_INT); 270 } 271 bool IsTheEmptyVector() const { 272 return data_ == TypedVector::EmptyTypedVector().data_; 273 } 274 275 Type ElementType() { return type_; } 276 277 private: 278 Type type_; 279 280 friend Map; 281 }; 282 283 class FixedTypedVector : public Object { 284 public: 285 FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, 286 uint8_t len) 287 : Object(data, byte_width), type_(element_type), len_(len) {} 288 289 Reference operator[](size_t i) const; 290 291 static FixedTypedVector EmptyFixedTypedVector() { 292 static const uint8_t fixed_empty_vector[] = { 0/* unused */ }; 293 return FixedTypedVector(fixed_empty_vector, 1, TYPE_INT, 0); 294 } 295 bool IsTheEmptyFixedTypedVector() const { 296 return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; 297 } 298 299 Type ElementType() { return type_; } 300 uint8_t size() { return len_; } 301 302 private: 303 Type type_; 304 uint8_t len_; 305 }; 306 307 class Map : public Vector { 308 public: 309 Map(const uint8_t *data, uint8_t byte_width) 310 : Vector(data, byte_width) {} 311 312 Reference operator[](const char *key) const; 313 Reference operator[](const std::string &key) const; 314 315 Vector Values() const { return Vector(data_, byte_width_); } 316 317 TypedVector Keys() const { 318 const size_t num_prefixed_fields = 3; 319 auto keys_offset = data_ - byte_width_ * num_prefixed_fields; 320 return TypedVector(Indirect(keys_offset, byte_width_), 321 static_cast<uint8_t>( 322 ReadUInt64(keys_offset + byte_width_, byte_width_)), 323 TYPE_KEY); 324 } 325 326 static Map EmptyMap() { 327 static const uint8_t empty_map[] = { 328 0/*keys_len*/, 0/*keys_offset*/, 1/*keys_width*/, 0/*len*/ 329 }; 330 return Map(empty_map + 4, 1); 331 } 332 333 bool IsTheEmptyMap() const { 334 return data_ == EmptyMap().data_; 335 } 336 }; 337 338 class Reference { 339 public: 340 Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, 341 Type type) 342 : data_(data), parent_width_(parent_width), byte_width_(byte_width), 343 type_(type) {} 344 345 Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) 346 : data_(data), parent_width_(parent_width) { 347 byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3); 348 type_ = static_cast<Type>(packed_type >> 2); 349 } 350 351 Type GetType() const { return type_; } 352 353 bool IsNull() const { return type_ == TYPE_NULL; } 354 bool IsBool() const { return type_ == TYPE_BOOL; } 355 bool IsInt() const { return type_ == TYPE_INT || 356 type_ == TYPE_INDIRECT_INT; } 357 bool IsUInt() const { return type_ == TYPE_UINT|| 358 type_ == TYPE_INDIRECT_UINT;; } 359 bool IsIntOrUint() const { return IsInt() || IsUInt(); } 360 bool IsFloat() const { return type_ == TYPE_FLOAT || 361 type_ == TYPE_INDIRECT_FLOAT; } 362 bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } 363 bool IsString() const { return type_ == TYPE_STRING; } 364 bool IsKey() const { return type_ == TYPE_KEY; } 365 bool IsVector() const { return type_ == TYPE_VECTOR || type_ == TYPE_MAP; } 366 bool IsMap() const { return type_ == TYPE_MAP; } 367 bool IsBlob() const { return type_ == TYPE_BLOB; } 368 369 bool AsBool() const { 370 return (type_ == TYPE_BOOL ? ReadUInt64(data_, parent_width_) : AsUInt64()) != 0; 371 } 372 373 // Reads any type as a int64_t. Never fails, does most sensible conversion. 374 // Truncates floats, strings are attempted to be parsed for a number, 375 // vectors/maps return their size. Returns 0 if all else fails. 376 int64_t AsInt64() const { 377 if (type_ == TYPE_INT) { 378 // A fast path for the common case. 379 return ReadInt64(data_, parent_width_); 380 } else switch (type_) { 381 case TYPE_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); 382 case TYPE_UINT: return ReadUInt64(data_, parent_width_); 383 case TYPE_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); 384 case TYPE_FLOAT: return static_cast<int64_t>( 385 ReadDouble(data_, parent_width_)); 386 case TYPE_INDIRECT_FLOAT: return static_cast<int64_t>( 387 ReadDouble(Indirect(), byte_width_)); 388 case TYPE_NULL: return 0; 389 case TYPE_STRING: return flatbuffers::StringToInt(AsString().c_str()); 390 case TYPE_VECTOR: return static_cast<int64_t>(AsVector().size()); 391 case TYPE_BOOL: return ReadInt64(data_, parent_width_); 392 default: 393 // Convert other things to int. 394 return 0; 395 } 396 } 397 398 // TODO: could specialize these to not use AsInt64() if that saves 399 // extension ops in generated code, and use a faster op than ReadInt64. 400 int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); } 401 int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); } 402 int8_t AsInt8() const { return static_cast<int8_t> (AsInt64()); } 403 404 uint64_t AsUInt64() const { 405 if (type_ == TYPE_UINT) { 406 // A fast path for the common case. 407 return ReadUInt64(data_, parent_width_); 408 } else switch (type_) { 409 case TYPE_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); 410 case TYPE_INT: return ReadInt64(data_, parent_width_); 411 case TYPE_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); 412 case TYPE_FLOAT: return static_cast<uint64_t>( 413 ReadDouble(data_, parent_width_)); 414 case TYPE_INDIRECT_FLOAT: return static_cast<uint64_t>( 415 ReadDouble(Indirect(), byte_width_)); 416 case TYPE_NULL: return 0; 417 case TYPE_STRING: return flatbuffers::StringToUInt(AsString().c_str()); 418 case TYPE_VECTOR: return static_cast<uint64_t>(AsVector().size()); 419 case TYPE_BOOL: return ReadUInt64(data_, parent_width_); 420 default: 421 // Convert other things to uint. 422 return 0; 423 } 424 } 425 426 uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); } 427 uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); } 428 uint8_t AsUInt8() const { return static_cast<uint8_t> (AsUInt64()); } 429 430 double AsDouble() const { 431 if (type_ == TYPE_FLOAT) { 432 // A fast path for the common case. 433 return ReadDouble(data_, parent_width_); 434 } else switch (type_) { 435 case TYPE_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); 436 case TYPE_INT: return static_cast<double>( 437 ReadInt64(data_, parent_width_)); 438 case TYPE_UINT: return static_cast<double>( 439 ReadUInt64(data_, parent_width_)); 440 case TYPE_INDIRECT_INT: return static_cast<double>( 441 ReadInt64(Indirect(), byte_width_)); 442 case TYPE_INDIRECT_UINT: return static_cast<double>( 443 ReadUInt64(Indirect(), byte_width_)); 444 case TYPE_NULL: return 0.0; 445 case TYPE_STRING: return strtod(AsString().c_str(), nullptr); 446 case TYPE_VECTOR: return static_cast<double>(AsVector().size()); 447 case TYPE_BOOL: return static_cast<double>( 448 ReadUInt64(data_, parent_width_)); 449 default: 450 // Convert strings and other things to float. 451 return 0; 452 } 453 } 454 455 float AsFloat() const { return static_cast<float>(AsDouble()); } 456 457 const char *AsKey() const { 458 if (type_ == TYPE_KEY) { 459 return reinterpret_cast<const char *>(Indirect()); 460 } else { 461 return ""; 462 } 463 } 464 465 // This function returns the empty string if you try to read a not-string. 466 String AsString() const { 467 if (type_ == TYPE_STRING) { 468 return String(Indirect(), byte_width_); 469 } else { 470 return String::EmptyString(); 471 } 472 } 473 474 // Unlike AsString(), this will convert any type to a std::string. 475 std::string ToString() { 476 std::string s; 477 ToString(false, false, s); 478 return s; 479 } 480 481 // Convert any type to a JSON-like string. strings_quoted determines if 482 // string values at the top level receive "" quotes (inside other values 483 // they always do). keys_quoted determines if keys are quoted, at any level. 484 // TODO(wvo): add further options to have indentation/newlines. 485 void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { 486 if (type_ == TYPE_STRING) { 487 String str(Indirect(), byte_width_); 488 if (strings_quoted) { 489 flatbuffers::EscapeString(str.c_str(), str.length(), &s, true); 490 } else { 491 s.append(str.c_str(), str.length()); 492 } 493 } else if (IsKey()) { 494 auto str = AsKey(); 495 if (keys_quoted) { 496 flatbuffers::EscapeString(str, strlen(str), &s, true); 497 } else { 498 s += str; 499 } 500 } else if (IsInt()) { 501 s += flatbuffers::NumToString(AsInt64()); 502 } else if (IsUInt()) { 503 s += flatbuffers::NumToString(AsUInt64()); 504 } else if (IsFloat()) { 505 s += flatbuffers::NumToString(AsDouble()); 506 } else if (IsNull()) { 507 s += "null"; 508 } else if (IsBool()) { 509 s += AsBool() ? "true" : "false"; 510 } else if (IsMap()) { 511 s += "{ "; 512 auto m = AsMap(); 513 auto keys = m.Keys(); 514 auto vals = m.Values(); 515 for (size_t i = 0; i < keys.size(); i++) { 516 keys[i].ToString(true, keys_quoted, s); 517 s += ": "; 518 vals[i].ToString(true, keys_quoted, s); 519 if (i < keys.size() - 1) s += ", "; 520 } 521 s += " }"; 522 } else if (IsVector()) { 523 s += "[ "; 524 auto v = AsVector(); 525 for (size_t i = 0; i < v.size(); i++) { 526 v[i].ToString(true, keys_quoted, s); 527 if (i < v.size() - 1) s += ", "; 528 } 529 s += " ]"; 530 } else { 531 s += "(?)"; 532 } 533 } 534 535 // This function returns the empty blob if you try to read a not-blob. 536 // Strings can be viewed as blobs too. 537 Blob AsBlob() const { 538 if (type_ == TYPE_BLOB || type_ == TYPE_STRING) { 539 return Blob(Indirect(), byte_width_); 540 } else { 541 return Blob::EmptyBlob(); 542 } 543 } 544 545 // This function returns the empty vector if you try to read a not-vector. 546 // Maps can be viewed as vectors too. 547 Vector AsVector() const { 548 if (type_ == TYPE_VECTOR || type_ == TYPE_MAP) { 549 return Vector(Indirect(), byte_width_); 550 } else { 551 return Vector::EmptyVector(); 552 } 553 } 554 555 TypedVector AsTypedVector() const { 556 if (IsTypedVector(type_)) { 557 return TypedVector(Indirect(), byte_width_, 558 ToTypedVectorElementType(type_)); 559 } else { 560 return TypedVector::EmptyTypedVector(); 561 } 562 } 563 564 FixedTypedVector AsFixedTypedVector() const { 565 if (IsFixedTypedVector(type_)) { 566 uint8_t len = 0; 567 auto vtype = ToFixedTypedVectorElementType(type_, &len); 568 return FixedTypedVector(Indirect(), byte_width_, vtype, len); 569 } else { 570 return FixedTypedVector::EmptyFixedTypedVector(); 571 } 572 } 573 574 Map AsMap() const { 575 if (type_ == TYPE_MAP) { 576 return Map(Indirect(), byte_width_); 577 } else { 578 return Map::EmptyMap(); 579 } 580 } 581 582 template<typename T> T As(); 583 584 // Experimental: Mutation functions. 585 // These allow scalars in an already created buffer to be updated in-place. 586 // Since by default scalars are stored in the smallest possible space, 587 // the new value may not fit, in which case these functions return false. 588 // To avoid this, you can construct the values you intend to mutate using 589 // Builder::ForceMinimumBitWidth. 590 bool MutateInt(int64_t i) { 591 if (type_ == TYPE_INT) { 592 return Mutate(data_, i, parent_width_, WidthI(i)); 593 } else if (type_ == TYPE_INDIRECT_INT) { 594 return Mutate(Indirect(), i, byte_width_, WidthI(i)); 595 } else if (type_ == TYPE_UINT) { 596 auto u = static_cast<uint64_t>(i); 597 return Mutate(data_, u, parent_width_, WidthU(u)); 598 } else if (type_ == TYPE_INDIRECT_UINT) { 599 auto u = static_cast<uint64_t>(i); 600 return Mutate(Indirect(), u, byte_width_, WidthU(u)); 601 } else { 602 return false; 603 } 604 } 605 606 bool MutateBool(bool b) { 607 return type_ == TYPE_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); 608 } 609 610 bool MutateUInt(uint64_t u) { 611 if (type_ == TYPE_UINT) { 612 return Mutate(data_, u, parent_width_, WidthU(u)); 613 } else if (type_ == TYPE_INDIRECT_UINT) { 614 return Mutate(Indirect(), u, byte_width_, WidthU(u)); 615 } else if (type_ == TYPE_INT) { 616 auto i = static_cast<int64_t>(u); 617 return Mutate(data_, i, parent_width_, WidthI(i)); 618 } else if (type_ == TYPE_INDIRECT_INT) { 619 auto i = static_cast<int64_t>(u); 620 return Mutate(Indirect(), i, byte_width_, WidthI(i)); 621 } else { 622 return false; 623 } 624 } 625 626 bool MutateFloat(float f) { 627 if (type_ == TYPE_FLOAT) { 628 return MutateF(data_, f, parent_width_, BIT_WIDTH_32); 629 } else if (type_ == TYPE_INDIRECT_FLOAT) { 630 return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); 631 } else { 632 return false; 633 } 634 } 635 636 bool MutateFloat(double d) { 637 if (type_ == TYPE_FLOAT) { 638 return MutateF(data_, d, parent_width_, WidthF(d)); 639 } else if (type_ == TYPE_INDIRECT_FLOAT) { 640 return MutateF(Indirect(), d, byte_width_, WidthF(d)); 641 } else { 642 return false; 643 } 644 } 645 646 bool MutateString(const char *str, size_t len) { 647 auto s = AsString(); 648 if (s.IsTheEmptyString()) return false; 649 // This is very strict, could allow shorter strings, but that creates 650 // garbage. 651 if (s.length() != len) return false; 652 memcpy(const_cast<char *>(s.c_str()), str, len); 653 return true; 654 } 655 bool MutateString(const char *str) { 656 return MutateString(str, strlen(str)); 657 } 658 bool MutateString(const std::string &str) { 659 return MutateString(str.data(), str.length()); 660 } 661 662 private: 663 const uint8_t *Indirect() const { 664 return flexbuffers::Indirect(data_, parent_width_); 665 } 666 667 template<typename T> bool Mutate(const uint8_t *dest, T t, size_t byte_width, 668 BitWidth value_width) { 669 auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <= byte_width; 670 if (fits) { 671 t = flatbuffers::EndianScalar(t); 672 memcpy(const_cast<uint8_t *>(dest), &t, byte_width); 673 } 674 return fits; 675 } 676 677 template<typename T> bool MutateF(const uint8_t *dest, T t, size_t byte_width, 678 BitWidth value_width) { 679 if (byte_width == sizeof(double)) 680 return Mutate(dest, static_cast<double>(t), byte_width, value_width); 681 if (byte_width == sizeof(float)) 682 return Mutate(dest, static_cast<float>(t), byte_width, value_width); 683 assert(false); 684 return false; 685 } 686 687 const uint8_t *data_; 688 uint8_t parent_width_; 689 uint8_t byte_width_; 690 Type type_; 691 }; 692 693 // Template specialization for As(). 694 template<> inline bool Reference::As<bool>() { return AsBool(); } 695 696 template<> inline int8_t Reference::As<int8_t>() { return AsInt8(); } 697 template<> inline int16_t Reference::As<int16_t>() { return AsInt16(); } 698 template<> inline int32_t Reference::As<int32_t>() { return AsInt32(); } 699 template<> inline int64_t Reference::As<int64_t>() { return AsInt64(); } 700 701 template<> inline uint8_t Reference::As<uint8_t>() { return AsUInt8(); } 702 template<> inline uint16_t Reference::As<uint16_t>() { return AsUInt16(); } 703 template<> inline uint32_t Reference::As<uint32_t>() { return AsUInt32(); } 704 template<> inline uint64_t Reference::As<uint64_t>() { return AsUInt64(); } 705 706 template<> inline double Reference::As<double>() { return AsDouble(); } 707 template<> inline float Reference::As<float>() { return AsFloat(); } 708 709 template<> inline String Reference::As<String>() { return AsString(); } 710 template<> inline std::string Reference::As<std::string>() { return AsString().str(); } 711 712 template<> inline Blob Reference::As<Blob>() { return AsBlob(); } 713 template<> inline Vector Reference::As<Vector>() { return AsVector(); } 714 template<> inline TypedVector Reference::As<TypedVector>() { return AsTypedVector(); } 715 template<> inline FixedTypedVector Reference::As<FixedTypedVector>() { return AsFixedTypedVector(); } 716 template<> inline Map Reference::As<Map>() { return AsMap(); } 717 718 inline uint8_t PackedType(BitWidth bit_width, Type type) { 719 return static_cast<uint8_t>(bit_width | (type << 2)); 720 } 721 722 inline uint8_t NullPackedType() { 723 return PackedType(BIT_WIDTH_8, TYPE_NULL); 724 } 725 726 // Vector accessors. 727 // Note: if you try to access outside of bounds, you get a Null value back 728 // instead. Normally this would be an assert, but since this is "dynamically 729 // typed" data, you may not want that (someone sends you a 2d vector and you 730 // wanted 3d). 731 // The Null converts seamlessly into a default value for any other type. 732 // TODO(wvo): Could introduce an #ifdef that makes this into an assert? 733 inline Reference Vector::operator[](size_t i) const { 734 auto len = size(); 735 if (i >= len) return Reference(nullptr, 1, NullPackedType()); 736 auto packed_type = (data_ + len * byte_width_)[i]; 737 auto elem = data_ + i * byte_width_; 738 return Reference(elem, byte_width_, packed_type); 739 } 740 741 inline Reference TypedVector::operator[](size_t i) const { 742 auto len = size(); 743 if (i >= len) return Reference(nullptr, 1, NullPackedType()); 744 auto elem = data_ + i * byte_width_; 745 return Reference(elem, byte_width_, 1, type_); 746 } 747 748 inline Reference FixedTypedVector::operator[](size_t i) const { 749 if (i >= len_) return Reference(nullptr, 1, NullPackedType()); 750 auto elem = data_ + i * byte_width_; 751 return Reference(elem, byte_width_, 1, type_); 752 } 753 754 template<typename T> int KeyCompare(const void *key, const void *elem) { 755 auto str_elem = reinterpret_cast<const char *>( 756 Indirect<T>(reinterpret_cast<const uint8_t *>(elem))); 757 auto skey = reinterpret_cast<const char *>(key); 758 return strcmp(skey, str_elem); 759 } 760 761 inline Reference Map::operator[](const char *key) const { 762 auto keys = Keys(); 763 // We can't pass keys.byte_width_ to the comparison function, so we have 764 // to pick the right one ahead of time. 765 int (*comp)(const void *, const void *) = nullptr; 766 switch (keys.byte_width_) { 767 case 1: comp = KeyCompare<uint8_t>; break; 768 case 2: comp = KeyCompare<uint16_t>; break; 769 case 4: comp = KeyCompare<uint32_t>; break; 770 case 8: comp = KeyCompare<uint64_t>; break; 771 } 772 auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); 773 if (!res) 774 return Reference(nullptr, 1, NullPackedType()); 775 auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_; 776 return (*static_cast<const Vector *>(this))[i]; 777 } 778 779 inline Reference Map::operator[](const std::string &key) const { 780 return (*this)[key.c_str()]; 781 } 782 783 inline Reference GetRoot(const uint8_t *buffer, size_t size) { 784 // See Finish() below for the serialization counterpart of this. 785 // The root starts at the end of the buffer, so we parse backwards from there. 786 auto end = buffer + size; 787 auto byte_width = *--end; 788 auto packed_type = *--end; 789 end -= byte_width; // The root data item. 790 return Reference(end, byte_width, packed_type); 791 } 792 793 inline Reference GetRoot(const std::vector<uint8_t> &buffer) { 794 return GetRoot(flatbuffers::vector_data(buffer), buffer.size()); 795 } 796 797 // Flags that configure how the Builder behaves. 798 // The "Share" flags determine if the Builder automatically tries to pool 799 // this type. Pooling can reduce the size of serialized data if there are 800 // multiple maps of the same kind, at the expense of slightly slower 801 // serialization (the cost of lookups) and more memory use (std::set). 802 // By default this is on for keys, but off for strings. 803 // Turn keys off if you have e.g. only one map. 804 // Turn strings on if you expect many non-unique string values. 805 // Additionally, sharing key vectors can save space if you have maps with 806 // identical field populations. 807 enum BuilderFlag { 808 BUILDER_FLAG_NONE = 0, 809 BUILDER_FLAG_SHARE_KEYS = 1, 810 BUILDER_FLAG_SHARE_STRINGS = 2, 811 BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, 812 BUILDER_FLAG_SHARE_KEY_VECTORS = 4, 813 BUILDER_FLAG_SHARE_ALL = 7, 814 }; 815 816 class Builder FLATBUFFERS_FINAL_CLASS { 817 public: 818 Builder(size_t initial_size = 256, 819 BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) 820 : buf_(initial_size), finished_(false), flags_(flags), 821 force_min_bit_width_(BIT_WIDTH_8), key_pool(KeyOffsetCompare(buf_)), 822 string_pool(StringOffsetCompare(buf_)) { 823 buf_.clear(); 824 } 825 826 /// @brief Get the serialized buffer (after you call `Finish()`). 827 /// @return Returns a vector owned by this class. 828 const std::vector<uint8_t> &GetBuffer() const { 829 Finished(); 830 return buf_; 831 } 832 833 // Size of the buffer. Does not include unfinished values. 834 size_t GetSize() const { 835 return buf_.size(); 836 } 837 838 // Reset all state so we can re-use the buffer. 839 void Clear() { 840 buf_.clear(); 841 stack_.clear(); 842 finished_ = false; 843 // flags_ remains as-is; 844 force_min_bit_width_ = BIT_WIDTH_8; 845 key_pool.clear(); 846 string_pool.clear(); 847 } 848 849 // All value constructing functions below have two versions: one that 850 // takes a key (for placement inside a map) and one that doesn't (for inside 851 // vectors and elsewhere). 852 853 void Null() { stack_.push_back(Value()); } 854 void Null(const char *key) { Key(key); Null(); } 855 856 void Int(int64_t i) { stack_.push_back(Value(i, TYPE_INT, WidthI(i))); } 857 void Int(const char *key, int64_t i) { Key(key); Int(i); } 858 859 void UInt(uint64_t u) { stack_.push_back(Value(u, TYPE_UINT, WidthU(u))); } 860 void UInt(const char *key, uint64_t u) { Key(key); Int(u); } 861 862 void Float(float f) { stack_.push_back(Value(f)); } 863 void Float(const char *key, float f) { Key(key); Float(f); } 864 865 void Double(double f) { stack_.push_back(Value(f)); } 866 void Double(const char *key, double d) { Key(key); Double(d); } 867 868 void Bool(bool b) { stack_.push_back(Value(b)); } 869 void Bool(const char *key, bool b) { Key(key); Bool(b); } 870 871 void IndirectInt(int64_t i) { 872 PushIndirect(i, TYPE_INDIRECT_INT, WidthI(i)); 873 } 874 void IndirectInt(const char *key, int64_t i) { 875 Key(key); 876 IndirectInt(i); 877 } 878 879 void IndirectUInt(uint64_t u) { 880 PushIndirect(u, TYPE_INDIRECT_UINT, WidthU(u)); 881 } 882 void IndirectUInt(const char *key, uint64_t u) { 883 Key(key); 884 IndirectUInt(u); 885 } 886 887 void IndirectFloat(float f) { 888 PushIndirect(f, TYPE_INDIRECT_FLOAT, BIT_WIDTH_32); 889 } 890 void IndirectFloat(const char *key, float f) { 891 Key(key); 892 IndirectFloat(f); 893 } 894 895 void IndirectDouble(double f) { 896 PushIndirect(f, TYPE_INDIRECT_FLOAT, WidthF(f)); 897 } 898 void IndirectDouble(const char *key, double d) { 899 Key(key); 900 IndirectDouble(d); 901 } 902 903 size_t Key(const char *str, size_t len) { 904 auto sloc = buf_.size(); 905 WriteBytes(str, len + 1); 906 if (flags_ & BUILDER_FLAG_SHARE_KEYS) { 907 auto it = key_pool.find(sloc); 908 if (it != key_pool.end()) { 909 // Already in the buffer. Remove key we just serialized, and use 910 // existing offset instead. 911 buf_.resize(sloc); 912 sloc = *it; 913 } else { 914 key_pool.insert(sloc); 915 } 916 } 917 stack_.push_back(Value(static_cast<uint64_t>(sloc), TYPE_KEY, BIT_WIDTH_8)); 918 return sloc; 919 } 920 921 size_t Key(const char *str) { return Key(str, strlen(str)); } 922 size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } 923 924 size_t String(const char *str, size_t len) { 925 auto reset_to = buf_.size(); 926 auto sloc = CreateBlob(str, len, 1, TYPE_STRING); 927 if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { 928 StringOffset so(sloc, len); 929 auto it = string_pool.find(so); 930 if (it != string_pool.end()) { 931 // Already in the buffer. Remove string we just serialized, and use 932 // existing offset instead. 933 buf_.resize(reset_to); 934 sloc = it->first; 935 stack_.back().u_ = sloc; 936 } else { 937 string_pool.insert(so); 938 } 939 } 940 return sloc; 941 } 942 size_t String(const char *str) { 943 return String(str, strlen(str)); 944 } 945 size_t String(const std::string &str) { 946 return String(str.c_str(), str.size()); 947 } 948 void String(const flexbuffers::String &str) { 949 String(str.c_str(), str.length()); 950 } 951 952 void String(const char *key, const char *str) { 953 Key(key); 954 String(str); 955 } 956 void String(const char *key, const std::string &str) { 957 Key(key); 958 String(str); 959 } 960 void String(const char *key, const flexbuffers::String &str) { 961 Key(key); 962 String(str); 963 } 964 965 size_t Blob(const void *data, size_t len) { 966 return CreateBlob(data, len, 0, TYPE_BLOB); 967 } 968 size_t Blob(const std::vector<uint8_t> &v) { 969 return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, TYPE_BLOB); 970 } 971 972 // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), 973 // e.g. Vector etc. Also in overloaded versions. 974 // Also some FlatBuffers types? 975 976 size_t StartVector() { return stack_.size(); } 977 size_t StartVector(const char *key) { Key(key); return stack_.size(); } 978 size_t StartMap() { return stack_.size(); } 979 size_t StartMap(const char *key) { Key(key); return stack_.size(); } 980 981 // TODO(wvo): allow this to specify an aligment greater than the natural 982 // alignment. 983 size_t EndVector(size_t start, bool typed, bool fixed) { 984 auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); 985 // Remove temp elements and return vector. 986 stack_.resize(start); 987 stack_.push_back(vec); 988 return static_cast<size_t>(vec.u_); 989 } 990 991 size_t EndMap(size_t start) { 992 // We should have interleaved keys and values on the stack. 993 // Make sure it is an even number: 994 auto len = stack_.size() - start; 995 assert(!(len & 1)); 996 len /= 2; 997 // Make sure keys are all strings: 998 for (auto key = start; key < stack_.size(); key += 2) { 999 assert(stack_[key].type_ == TYPE_KEY); 1000 } 1001 // Now sort values, so later we can do a binary seach lookup. 1002 // We want to sort 2 array elements at a time. 1003 struct TwoValue { Value key; Value val; }; 1004 // TODO(wvo): strict aliasing? 1005 // TODO(wvo): allow the caller to indicate the data is already sorted 1006 // for maximum efficiency? With an assert to check sortedness to make sure 1007 // we're not breaking binary search. 1008 // Or, we can track if the map is sorted as keys are added which would be 1009 // be quite cheap (cheaper than checking it here), so we can skip this 1010 // step automatically when appliccable, and encourage people to write in 1011 // sorted fashion. 1012 // std::sort is typically already a lot faster on sorted data though. 1013 auto dict = 1014 reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + 1015 start); 1016 std::sort(dict, dict + len, 1017 [&](const TwoValue &a, const TwoValue &b) -> bool { 1018 auto as = reinterpret_cast<const char *>( 1019 flatbuffers::vector_data(buf_) + a.key.u_); 1020 auto bs = reinterpret_cast<const char *>( 1021 flatbuffers::vector_data(buf_) + b.key.u_); 1022 auto comp = strcmp(as, bs); 1023 // If this assertion hits, you've added two keys with the same value to 1024 // this map. 1025 // TODO: Have to check for pointer equality, as some sort implementation 1026 // apparently call this function with the same element?? Why? 1027 assert(comp || &a == &b); 1028 return comp < 0; 1029 }); 1030 // First create a vector out of all keys. 1031 // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share 1032 // the first vector. 1033 auto keys = CreateVector(start, len, 2, true, false); 1034 auto vec = CreateVector(start + 1, len, 2, false, false, &keys); 1035 // Remove temp elements and return map. 1036 stack_.resize(start); 1037 stack_.push_back(vec); 1038 return static_cast<size_t>(vec.u_); 1039 } 1040 1041 template<typename F> size_t Vector(F f) { 1042 auto start = StartVector(); 1043 f(); 1044 return EndVector(start, false, false); 1045 } 1046 template <typename F, typename T> size_t Vector(F f, T &state) { 1047 auto start = StartVector(); 1048 f(state); 1049 return EndVector(start, false, false); 1050 } 1051 template<typename F> size_t Vector(const char *key, F f) { 1052 auto start = StartVector(key); 1053 f(); 1054 return EndVector(start, false, false); 1055 } 1056 template <typename F, typename T> size_t Vector(const char *key, F f, 1057 T &state) { 1058 auto start = StartVector(key); 1059 f(state); 1060 return EndVector(start, false, false); 1061 } 1062 1063 template<typename T> void Vector(const T *elems, size_t len) { 1064 if (flatbuffers::is_scalar<T>::value) { 1065 // This path should be a lot quicker and use less space. 1066 ScalarVector(elems, len, false); 1067 } else { 1068 auto start = StartVector(); 1069 for (size_t i = 0; i < len; i++) Add(elems[i]); 1070 EndVector(start, false, false); 1071 } 1072 } 1073 template<typename T> void Vector(const char *key, const T *elems, 1074 size_t len) { 1075 Key(key); 1076 Vector(elems, len); 1077 } 1078 template<typename T> void Vector(const std::vector<T> &vec) { 1079 Vector(flatbuffers::vector_data(vec), vec.size()); 1080 } 1081 1082 template<typename F> size_t TypedVector(F f) { 1083 auto start = StartVector(); 1084 f(); 1085 return EndVector(start, true, false); 1086 } 1087 template <typename F, typename T> size_t TypedVector(F f, T &state) { 1088 auto start = StartVector(); 1089 f(state); 1090 return EndVector(start, true, false); 1091 } 1092 template<typename F> size_t TypedVector(const char *key, F f) { 1093 auto start = StartVector(key); 1094 f(); 1095 return EndVector(start, true, false); 1096 } 1097 template <typename F, typename T> size_t TypedVector(const char *key, F f, 1098 T &state) { 1099 auto start = StartVector(key); 1100 f(state); 1101 return EndVector(start, true, false); 1102 } 1103 1104 template<typename T> size_t FixedTypedVector(const T *elems, size_t len) { 1105 // We only support a few fixed vector lengths. Anything bigger use a 1106 // regular typed vector. 1107 assert(len >= 2 && len <= 4); 1108 // And only scalar values. 1109 assert(flatbuffers::is_scalar<T>::value); 1110 return ScalarVector(elems, len, true); 1111 } 1112 1113 template<typename T> size_t FixedTypedVector(const char *key, const T *elems, 1114 size_t len) { 1115 Key(key); 1116 return FixedTypedVector(elems, len); 1117 } 1118 1119 template<typename F> size_t Map(F f) { 1120 auto start = StartMap(); 1121 f(); 1122 return EndMap(start); 1123 } 1124 template <typename F, typename T> size_t Map(F f, T &state) { 1125 auto start = StartMap(); 1126 f(state); 1127 return EndMap(start); 1128 } 1129 template<typename F> size_t Map(const char *key, F f) { 1130 auto start = StartMap(key); 1131 f(); 1132 return EndMap(start); 1133 } 1134 template <typename F, typename T> size_t Map(const char *key, F f, 1135 T &state) { 1136 auto start = StartMap(key); 1137 f(state); 1138 return EndMap(start); 1139 } 1140 template<typename T> void Map(const std::map<std::string, T> &map) { 1141 auto start = StartMap(); 1142 for (auto it = map.begin(); it != map.end(); ++it) 1143 Add(it->first.c_str(), it->second); 1144 EndMap(start); 1145 } 1146 1147 // Overloaded Add that tries to call the correct function above. 1148 void Add(int8_t i) { Int(i); } 1149 void Add(int16_t i) { Int(i); } 1150 void Add(int32_t i) { Int(i); } 1151 void Add(int64_t i) { Int(i); } 1152 void Add(uint8_t u) { UInt(u); } 1153 void Add(uint16_t u) { UInt(u); } 1154 void Add(uint32_t u) { UInt(u); } 1155 void Add(uint64_t u) { UInt(u); } 1156 void Add(float f) { Float(f); } 1157 void Add(double d) { Double(d); } 1158 void Add(bool b) { Bool(b); } 1159 void Add(const char *str) { String(str); } 1160 void Add(const std::string &str) { String(str); } 1161 void Add(const flexbuffers::String &str) { String(str); } 1162 1163 template<typename T> void Add(const std::vector<T> &vec) { 1164 Vector(vec); 1165 } 1166 1167 template<typename T> void Add(const char *key, const T &t) { 1168 Key(key); 1169 Add(t); 1170 } 1171 1172 template<typename T> void Add(const std::map<std::string, T> &map) { 1173 Map(map); 1174 } 1175 1176 template<typename T> void operator+=(const T &t) { 1177 Add(t); 1178 } 1179 1180 // This function is useful in combination with the Mutate* functions above. 1181 // It forces elements of vectors and maps to have a minimum size, such that 1182 // they can later be updated without failing. 1183 // Call with no arguments to reset. 1184 void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { 1185 force_min_bit_width_ = bw; 1186 } 1187 1188 void Finish() { 1189 // If you hit this assert, you likely have objects that were never included 1190 // in a parent. You need to have exactly one root to finish a buffer. 1191 // Check your Start/End calls are matched, and all objects are inside 1192 // some other object. 1193 assert(stack_.size() == 1); 1194 1195 // Write root value. 1196 auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); 1197 WriteAny(stack_[0], byte_width); 1198 // Write root type. 1199 Write(stack_[0].StoredPackedType(), 1); 1200 // Write root size. Normally determined by parent, but root has no parent :) 1201 Write(byte_width, 1); 1202 1203 finished_ = true; 1204 } 1205 1206 private: 1207 void Finished() const { 1208 // If you get this assert, you're attempting to get access a buffer 1209 // which hasn't been finished yet. Be sure to call 1210 // Builder::Finish with your root object. 1211 assert(finished_); 1212 } 1213 1214 // Align to prepare for writing a scalar with a certain size. 1215 uint8_t Align(BitWidth alignment) { 1216 auto byte_width = 1U << alignment; 1217 buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), 1218 0); 1219 return static_cast<uint8_t>(byte_width); 1220 } 1221 1222 void WriteBytes(const void *val, size_t size) { 1223 buf_.insert(buf_.end(), 1224 reinterpret_cast<const uint8_t *>(val), 1225 reinterpret_cast<const uint8_t *>(val) + size); 1226 } 1227 1228 template<typename T> void Write(T val, size_t byte_width) { 1229 assert(sizeof(T) >= byte_width); 1230 val = flatbuffers::EndianScalar(val); 1231 WriteBytes(&val, byte_width); 1232 } 1233 1234 void WriteDouble(double f, uint8_t byte_width) { 1235 switch (byte_width) { 1236 case 8: Write(f, byte_width); break; 1237 case 4: Write(static_cast<float>(f), byte_width); break; 1238 //case 2: Write(static_cast<half>(f), byte_width); break; 1239 //case 1: Write(static_cast<quarter>(f), byte_width); break; 1240 default: assert(0); 1241 } 1242 } 1243 1244 void WriteOffset(uint64_t o, uint8_t byte_width) { 1245 auto reloff = buf_.size() - o; 1246 assert(reloff < 1ULL << (byte_width * 8) || byte_width == 8); 1247 Write(reloff, byte_width); 1248 } 1249 1250 template<typename T> void PushIndirect(T val, Type type, BitWidth bit_width) { 1251 auto byte_width = Align(bit_width); 1252 auto iloc = buf_.size(); 1253 Write(val, byte_width); 1254 stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width)); 1255 } 1256 1257 static BitWidth WidthB(size_t byte_width) { 1258 switch (byte_width) { 1259 case 1: return BIT_WIDTH_8; 1260 case 2: return BIT_WIDTH_16; 1261 case 4: return BIT_WIDTH_32; 1262 case 8: return BIT_WIDTH_64; 1263 default: assert(false); return BIT_WIDTH_64; 1264 } 1265 } 1266 1267 template<typename T> static Type GetScalarType() { 1268 assert(flatbuffers::is_scalar<T>::value); 1269 return flatbuffers::is_floating_point<T>::value 1270 ? TYPE_FLOAT 1271 : flatbuffers::is_same<T, bool>::value ? TYPE_BOOL 1272 : (flatbuffers::is_unsigned<T>::value ? TYPE_UINT : TYPE_INT); 1273 } 1274 1275 struct Value { 1276 union { 1277 int64_t i_; 1278 uint64_t u_; 1279 double f_; 1280 }; 1281 1282 Type type_; 1283 1284 // For scalars: of itself, for vector: of its elements, for string: length. 1285 BitWidth min_bit_width_; 1286 1287 Value() : i_(0), type_(TYPE_NULL), min_bit_width_(BIT_WIDTH_8) {} 1288 1289 Value(bool b) : u_(static_cast<uint64_t>(b)), type_(TYPE_BOOL), min_bit_width_(BIT_WIDTH_8) {} 1290 1291 Value(int64_t i, Type t, BitWidth bw) 1292 : i_(i), type_(t), min_bit_width_(bw) {} 1293 Value(uint64_t u, Type t, BitWidth bw) 1294 : u_(u), type_(t), min_bit_width_(bw) {} 1295 1296 Value(float f) 1297 : f_(f), type_(TYPE_FLOAT), min_bit_width_(BIT_WIDTH_32) {} 1298 Value(double f) 1299 : f_(f), type_(TYPE_FLOAT), min_bit_width_(WidthF(f)) {} 1300 1301 uint8_t StoredPackedType(BitWidth parent_bit_width_= BIT_WIDTH_8) const { 1302 return PackedType(StoredWidth(parent_bit_width_), type_); 1303 } 1304 1305 BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { 1306 if (IsInline(type_)) { 1307 return min_bit_width_; 1308 } else { 1309 // We have an absolute offset, but want to store a relative offset 1310 // elem_index elements beyond the current buffer end. Since whether 1311 // the relative offset fits in a certain byte_width depends on 1312 // the size of the elements before it (and their alignment), we have 1313 // to test for each size in turn. 1314 for (size_t byte_width = 1; 1315 byte_width <= sizeof(flatbuffers::largest_scalar_t); 1316 byte_width *= 2) { 1317 // Where are we going to write this offset? 1318 auto offset_loc = 1319 buf_size + 1320 flatbuffers::PaddingBytes(buf_size, byte_width) + 1321 elem_index * byte_width; 1322 // Compute relative offset. 1323 auto offset = offset_loc - u_; 1324 // Does it fit? 1325 auto bit_width = WidthU(offset); 1326 if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) == byte_width) 1327 return bit_width; 1328 } 1329 assert(false); // Must match one of the sizes above. 1330 return BIT_WIDTH_64; 1331 } 1332 } 1333 1334 BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { 1335 if (IsInline(type_)) { 1336 return (std::max)(min_bit_width_, parent_bit_width_); 1337 } else { 1338 return min_bit_width_; 1339 } 1340 } 1341 }; 1342 1343 void WriteAny(const Value &val, uint8_t byte_width) { 1344 switch (val.type_) { 1345 case TYPE_NULL: 1346 case TYPE_INT: 1347 Write(val.i_, byte_width); 1348 break; 1349 case TYPE_BOOL: 1350 case TYPE_UINT: 1351 Write(val.u_, byte_width); 1352 break; 1353 case TYPE_FLOAT: 1354 WriteDouble(val.f_, byte_width); 1355 break; 1356 default: 1357 WriteOffset(val.u_, byte_width); 1358 break; 1359 } 1360 } 1361 1362 size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { 1363 auto bit_width = WidthU(len); 1364 auto byte_width = Align(bit_width); 1365 Write<uint64_t>(len, byte_width); 1366 auto sloc = buf_.size(); 1367 WriteBytes(data, len + trailing); 1368 stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width)); 1369 return sloc; 1370 } 1371 1372 template<typename T> size_t ScalarVector(const T *elems, size_t len, 1373 bool fixed) { 1374 auto vector_type = GetScalarType<T>(); 1375 auto byte_width = sizeof(T); 1376 auto bit_width = WidthB(byte_width); 1377 // If you get this assert, you're trying to write a vector with a size 1378 // field that is bigger than the scalars you're trying to write (e.g. a 1379 // byte vector > 255 elements). For such types, write a "blob" instead. 1380 // TODO: instead of asserting, could write vector with larger elements 1381 // instead, though that would be wasteful. 1382 assert(WidthU(len) <= bit_width); 1383 if (!fixed) Write<uint64_t>(len, byte_width); 1384 auto vloc = buf_.size(); 1385 for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); 1386 stack_.push_back(Value(static_cast<uint64_t>(vloc), 1387 ToTypedVector(vector_type, fixed ? len : 0), 1388 bit_width)); 1389 return vloc; 1390 } 1391 1392 Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, 1393 bool fixed, const Value *keys = nullptr) { 1394 // Figure out smallest bit width we can store this vector with. 1395 auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); 1396 auto prefix_elems = 1; 1397 if (keys) { 1398 // If this vector is part of a map, we will pre-fix an offset to the keys 1399 // to this vector. 1400 bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); 1401 prefix_elems += 2; 1402 } 1403 Type vector_type = TYPE_KEY; 1404 // Check bit widths and types for all elements. 1405 for (size_t i = start; i < stack_.size(); i += step) { 1406 auto elem_width = stack_[i].ElemWidth(buf_.size(), i + prefix_elems); 1407 bit_width = (std::max)(bit_width, elem_width); 1408 if (typed) { 1409 if (i == start) { 1410 vector_type = stack_[i].type_; 1411 } else { 1412 // If you get this assert, you are writing a typed vector with 1413 // elements that are not all the same type. 1414 assert(vector_type == stack_[i].type_); 1415 } 1416 } 1417 } 1418 // If you get this assert, your fixed types are not one of: 1419 // Int / UInt / Float / Key. 1420 assert(IsTypedVectorElementType(vector_type)); 1421 auto byte_width = Align(bit_width); 1422 // Write vector. First the keys width/offset if available, and size. 1423 if (keys) { 1424 WriteOffset(keys->u_, byte_width); 1425 Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width); 1426 } 1427 if (!fixed) Write<uint64_t>(vec_len, byte_width); 1428 // Then the actual data. 1429 auto vloc = buf_.size(); 1430 for (size_t i = start; i < stack_.size(); i += step) { 1431 WriteAny(stack_[i], byte_width); 1432 } 1433 // Then the types. 1434 if (!typed) { 1435 for (size_t i = start; i < stack_.size(); i += step) { 1436 buf_.push_back(stack_[i].StoredPackedType(bit_width)); 1437 } 1438 } 1439 return Value(static_cast<uint64_t>(vloc), keys 1440 ? TYPE_MAP 1441 : (typed 1442 ? ToTypedVector(vector_type, fixed ? vec_len : 0) 1443 : TYPE_VECTOR), 1444 bit_width); 1445 } 1446 1447 // You shouldn't really be copying instances of this class. 1448 Builder(const Builder &); 1449 Builder &operator=(const Builder &); 1450 1451 std::vector<uint8_t> buf_; 1452 std::vector<Value> stack_; 1453 1454 bool finished_; 1455 1456 BuilderFlag flags_; 1457 1458 BitWidth force_min_bit_width_; 1459 1460 struct KeyOffsetCompare { 1461 KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {} 1462 bool operator()(size_t a, size_t b) const { 1463 auto stra = 1464 reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a); 1465 auto strb = 1466 reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b); 1467 return strcmp(stra, strb) < 0; 1468 } 1469 const std::vector<uint8_t> *buf_; 1470 }; 1471 1472 typedef std::pair<size_t, size_t> StringOffset; 1473 struct StringOffsetCompare { 1474 StringOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {} 1475 bool operator()(const StringOffset &a, const StringOffset &b) const { 1476 auto stra = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + 1477 a.first); 1478 auto strb = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + 1479 b.first); 1480 return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0; 1481 } 1482 const std::vector<uint8_t> *buf_; 1483 }; 1484 1485 typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap; 1486 typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap; 1487 1488 KeyOffsetMap key_pool; 1489 StringOffsetMap string_pool; 1490 }; 1491 1492 } // namespace flexbuffers 1493 1494 #if defined(_MSC_VER) 1495 #pragma warning(pop) 1496 #endif 1497 1498 #endif // FLATBUFFERS_FLEXBUFFERS_H_ 1499