1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_UTILS_H_ 6 #define V8_UTILS_H_ 7 8 #include <limits.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <cmath> 12 #include <string> 13 #include <type_traits> 14 15 #include "include/v8.h" 16 #include "src/allocation.h" 17 #include "src/base/bits.h" 18 #include "src/base/compiler-specific.h" 19 #include "src/base/logging.h" 20 #include "src/base/macros.h" 21 #include "src/base/platform/platform.h" 22 #include "src/base/v8-fallthrough.h" 23 #include "src/globals.h" 24 #include "src/vector.h" 25 26 #if defined(V8_OS_AIX) 27 #include <fenv.h> // NOLINT(build/c++11) 28 #endif 29 30 namespace v8 { 31 namespace internal { 32 33 // ---------------------------------------------------------------------------- 34 // General helper functions 35 36 // Returns the value (0 .. 15) of a hexadecimal character c. 37 // If c is not a legal hexadecimal character, returns a value < 0. 38 inline int HexValue(uc32 c) { 39 c -= '0'; 40 if (static_cast<unsigned>(c) <= 9) return c; 41 c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36. 42 if (static_cast<unsigned>(c) <= 5) return c + 10; 43 return -1; 44 } 45 46 inline char HexCharOfValue(int value) { 47 DCHECK(0 <= value && value <= 16); 48 if (value < 10) return value + '0'; 49 return value - 10 + 'A'; 50 } 51 52 inline int BoolToInt(bool b) { return b ? 1 : 0; } 53 54 // Same as strcmp, but can handle NULL arguments. 55 inline bool CStringEquals(const char* s1, const char* s2) { 56 return (s1 == s2) || (s1 != nullptr && s2 != nullptr && strcmp(s1, s2) == 0); 57 } 58 59 // X must be a power of 2. Returns the number of trailing zeros. 60 template <typename T, 61 typename = typename std::enable_if<std::is_integral<T>::value>::type> 62 inline int WhichPowerOf2(T x) { 63 DCHECK(base::bits::IsPowerOfTwo(x)); 64 int bits = 0; 65 #ifdef DEBUG 66 const T original_x = x; 67 #endif 68 constexpr int max_bits = sizeof(T) * 8; 69 static_assert(max_bits <= 64, "integral types are not bigger than 64 bits"); 70 // Avoid shifting by more than the bit width of x to avoid compiler warnings. 71 #define CHECK_BIGGER(s) \ 72 if (max_bits > s && x >= T{1} << (max_bits > s ? s : 0)) { \ 73 bits += s; \ 74 x >>= max_bits > s ? s : 0; \ 75 } 76 CHECK_BIGGER(32) 77 CHECK_BIGGER(16) 78 CHECK_BIGGER(8) 79 CHECK_BIGGER(4) 80 #undef CHECK_BIGGER 81 switch (x) { 82 default: UNREACHABLE(); 83 case 8: 84 bits++; 85 V8_FALLTHROUGH; 86 case 4: 87 bits++; 88 V8_FALLTHROUGH; 89 case 2: 90 bits++; 91 V8_FALLTHROUGH; 92 case 1: break; 93 } 94 DCHECK_EQ(T{1} << bits, original_x); 95 return bits; 96 } 97 98 inline int MostSignificantBit(uint32_t x) { 99 static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4}; 100 int nibble = 0; 101 if (x & 0xffff0000) { 102 nibble += 16; 103 x >>= 16; 104 } 105 if (x & 0xff00) { 106 nibble += 8; 107 x >>= 8; 108 } 109 if (x & 0xf0) { 110 nibble += 4; 111 x >>= 4; 112 } 113 return nibble + msb4[x]; 114 } 115 116 template <typename T> 117 static T ArithmeticShiftRight(T x, int shift) { 118 DCHECK_LE(0, shift); 119 if (x < 0) { 120 // Right shift of signed values is implementation defined. Simulate a 121 // true arithmetic right shift by adding leading sign bits. 122 using UnsignedT = typename std::make_unsigned<T>::type; 123 UnsignedT mask = ~(static_cast<UnsignedT>(~0) >> shift); 124 return (static_cast<UnsignedT>(x) >> shift) | mask; 125 } else { 126 return x >> shift; 127 } 128 } 129 130 template <typename T> 131 int Compare(const T& a, const T& b) { 132 if (a == b) 133 return 0; 134 else if (a < b) 135 return -1; 136 else 137 return 1; 138 } 139 140 // Compare function to compare the object pointer value of two 141 // handlified objects. The handles are passed as pointers to the 142 // handles. 143 template<typename T> class Handle; // Forward declaration. 144 template <typename T> 145 int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) { 146 return Compare<T*>(*(*a), *(*b)); 147 } 148 149 150 template <typename T, typename U> 151 inline bool IsAligned(T value, U alignment) { 152 return (value & (alignment - 1)) == 0; 153 } 154 155 156 // Returns true if (addr + offset) is aligned. 157 inline bool IsAddressAligned(Address addr, 158 intptr_t alignment, 159 int offset = 0) { 160 intptr_t offs = OffsetFrom(addr + offset); 161 return IsAligned(offs, alignment); 162 } 163 164 165 // Returns the maximum of the two parameters. 166 template <typename T> 167 constexpr T Max(T a, T b) { 168 return a < b ? b : a; 169 } 170 171 172 // Returns the minimum of the two parameters. 173 template <typename T> 174 constexpr T Min(T a, T b) { 175 return a < b ? a : b; 176 } 177 178 // Returns the maximum of the two parameters according to JavaScript semantics. 179 template <typename T> 180 T JSMax(T x, T y) { 181 if (std::isnan(x)) return x; 182 if (std::isnan(y)) return y; 183 if (std::signbit(x) < std::signbit(y)) return x; 184 return x > y ? x : y; 185 } 186 187 // Returns the maximum of the two parameters according to JavaScript semantics. 188 template <typename T> 189 T JSMin(T x, T y) { 190 if (std::isnan(x)) return x; 191 if (std::isnan(y)) return y; 192 if (std::signbit(x) < std::signbit(y)) return y; 193 return x > y ? y : x; 194 } 195 196 // Returns the absolute value of its argument. 197 template <typename T, 198 typename = typename std::enable_if<std::is_signed<T>::value>::type> 199 typename std::make_unsigned<T>::type Abs(T a) { 200 // This is a branch-free implementation of the absolute value function and is 201 // described in Warren's "Hacker's Delight", chapter 2. It avoids undefined 202 // behavior with the arithmetic negation operation on signed values as well. 203 typedef typename std::make_unsigned<T>::type unsignedT; 204 unsignedT x = static_cast<unsignedT>(a); 205 unsignedT y = static_cast<unsignedT>(a >> (sizeof(T) * 8 - 1)); 206 return (x ^ y) - y; 207 } 208 209 // Returns the negative absolute value of its argument. 210 template <typename T, 211 typename = typename std::enable_if<std::is_signed<T>::value>::type> 212 T Nabs(T a) { 213 return a < 0 ? a : -a; 214 } 215 216 // Floor(-0.0) == 0.0 217 inline double Floor(double x) { 218 #if V8_CC_MSVC 219 if (x == 0) return x; // Fix for issue 3477. 220 #endif 221 return std::floor(x); 222 } 223 224 inline double Modulo(double x, double y) { 225 #if defined(V8_OS_WIN) 226 // Workaround MS fmod bugs. ECMA-262 says: 227 // dividend is finite and divisor is an infinity => result equals dividend 228 // dividend is a zero and divisor is nonzero finite => result equals dividend 229 if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) && 230 !(x == 0 && (y != 0 && std::isfinite(y)))) { 231 x = fmod(x, y); 232 } 233 return x; 234 #elif defined(V8_OS_AIX) 235 // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE) 236 feclearexcept(FE_ALL_EXCEPT); 237 double result = std::fmod(x, y); 238 int exception = fetestexcept(FE_UNDERFLOW); 239 return (exception ? x : result); 240 #else 241 return std::fmod(x, y); 242 #endif 243 } 244 245 inline double Pow(double x, double y) { 246 if (y == 0.0) return 1.0; 247 if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) { 248 return std::numeric_limits<double>::quiet_NaN(); 249 } 250 #if (defined(__MINGW64_VERSION_MAJOR) && \ 251 (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \ 252 defined(V8_OS_AIX) 253 // MinGW64 and AIX have a custom implementation for pow. This handles certain 254 // special cases that are different. 255 if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) { 256 double f; 257 double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; 258 /* retain sign if odd integer exponent */ 259 return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1)) 260 ? copysign(result, x) 261 : result; 262 } 263 264 if (x == 2.0) { 265 int y_int = static_cast<int>(y); 266 if (y == y_int) { 267 return std::ldexp(1.0, y_int); 268 } 269 } 270 #endif 271 return std::pow(x, y); 272 } 273 274 template <typename T> 275 T SaturateAdd(T a, T b) { 276 if (std::is_signed<T>::value) { 277 if (a > 0 && b > 0) { 278 if (a > std::numeric_limits<T>::max() - b) { 279 return std::numeric_limits<T>::max(); 280 } 281 } else if (a < 0 && b < 0) { 282 if (a < std::numeric_limits<T>::min() - b) { 283 return std::numeric_limits<T>::min(); 284 } 285 } 286 } else { 287 CHECK(std::is_unsigned<T>::value); 288 if (a > std::numeric_limits<T>::max() - b) { 289 return std::numeric_limits<T>::max(); 290 } 291 } 292 return a + b; 293 } 294 295 template <typename T> 296 T SaturateSub(T a, T b) { 297 if (std::is_signed<T>::value) { 298 if (a >= 0 && b < 0) { 299 if (a > std::numeric_limits<T>::max() + b) { 300 return std::numeric_limits<T>::max(); 301 } 302 } else if (a < 0 && b > 0) { 303 if (a < std::numeric_limits<T>::min() + b) { 304 return std::numeric_limits<T>::min(); 305 } 306 } 307 } else { 308 CHECK(std::is_unsigned<T>::value); 309 if (a < b) { 310 return static_cast<T>(0); 311 } 312 } 313 return a - b; 314 } 315 316 // ---------------------------------------------------------------------------- 317 // BitField is a help template for encoding and decode bitfield with 318 // unsigned content. 319 320 template<class T, int shift, int size, class U> 321 class BitFieldBase { 322 public: 323 typedef T FieldType; 324 325 // A type U mask of bit field. To use all bits of a type U of x bits 326 // in a bitfield without compiler warnings we have to compute 2^x 327 // without using a shift count of x in the computation. 328 static const U kOne = static_cast<U>(1U); 329 static const U kMask = ((kOne << shift) << size) - (kOne << shift); 330 static const U kShift = shift; 331 static const U kSize = size; 332 static const U kNext = kShift + kSize; 333 static const U kNumValues = kOne << size; 334 335 // Value for the field with all bits set. 336 static const T kMax = static_cast<T>(kNumValues - 1); 337 338 // Tells whether the provided value fits into the bit field. 339 static constexpr bool is_valid(T value) { 340 return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0; 341 } 342 343 // Returns a type U with the bit field value encoded. 344 static U encode(T value) { 345 DCHECK(is_valid(value)); 346 return static_cast<U>(value) << shift; 347 } 348 349 // Returns a type U with the bit field value updated. 350 static U update(U previous, T value) { 351 return (previous & ~kMask) | encode(value); 352 } 353 354 // Extracts the bit field from the value. 355 static T decode(U value) { 356 return static_cast<T>((value & kMask) >> shift); 357 } 358 359 STATIC_ASSERT((kNext - 1) / 8 < sizeof(U)); 360 }; 361 362 template <class T, int shift, int size> 363 class BitField8 : public BitFieldBase<T, shift, size, uint8_t> {}; 364 365 366 template <class T, int shift, int size> 367 class BitField16 : public BitFieldBase<T, shift, size, uint16_t> {}; 368 369 370 template<class T, int shift, int size> 371 class BitField : public BitFieldBase<T, shift, size, uint32_t> { }; 372 373 374 template<class T, int shift, int size> 375 class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { }; 376 377 // Helper macros for defining a contiguous sequence of bit fields. Example: 378 // (backslashes at the ends of respective lines of this multi-line macro 379 // definition are omitted here to please the compiler) 380 // 381 // #define MAP_BIT_FIELD1(V, _) 382 // V(IsAbcBit, bool, 1, _) 383 // V(IsBcdBit, bool, 1, _) 384 // V(CdeBits, int, 5, _) 385 // V(DefBits, MutableMode, 1, _) 386 // 387 // DEFINE_BIT_FIELDS(MAP_BIT_FIELD1) 388 // or 389 // DEFINE_BIT_FIELDS_64(MAP_BIT_FIELD1) 390 // 391 #define DEFINE_BIT_FIELD_RANGE_TYPE(Name, Type, Size, _) \ 392 k##Name##Start, k##Name##End = k##Name##Start + Size - 1, 393 394 #define DEFINE_BIT_RANGES(LIST_MACRO) \ 395 struct LIST_MACRO##_Ranges { \ 396 enum { LIST_MACRO(DEFINE_BIT_FIELD_RANGE_TYPE, _) kBitsCount }; \ 397 }; 398 399 #define DEFINE_BIT_FIELD_TYPE(Name, Type, Size, RangesName) \ 400 typedef BitField<Type, RangesName::k##Name##Start, Size> Name; 401 402 #define DEFINE_BIT_FIELD_64_TYPE(Name, Type, Size, RangesName) \ 403 typedef BitField64<Type, RangesName::k##Name##Start, Size> Name; 404 405 #define DEFINE_BIT_FIELDS(LIST_MACRO) \ 406 DEFINE_BIT_RANGES(LIST_MACRO) \ 407 LIST_MACRO(DEFINE_BIT_FIELD_TYPE, LIST_MACRO##_Ranges) 408 409 #define DEFINE_BIT_FIELDS_64(LIST_MACRO) \ 410 DEFINE_BIT_RANGES(LIST_MACRO) \ 411 LIST_MACRO(DEFINE_BIT_FIELD_64_TYPE, LIST_MACRO##_Ranges) 412 413 // ---------------------------------------------------------------------------- 414 // BitSetComputer is a help template for encoding and decoding information for 415 // a variable number of items in an array. 416 // 417 // To encode boolean data in a smi array you would use: 418 // typedef BitSetComputer<bool, 1, kSmiValueSize, uint32_t> BoolComputer; 419 // 420 template <class T, int kBitsPerItem, int kBitsPerWord, class U> 421 class BitSetComputer { 422 public: 423 static const int kItemsPerWord = kBitsPerWord / kBitsPerItem; 424 static const int kMask = (1 << kBitsPerItem) - 1; 425 426 // The number of array elements required to embed T information for each item. 427 static int word_count(int items) { 428 if (items == 0) return 0; 429 return (items - 1) / kItemsPerWord + 1; 430 } 431 432 // The array index to look at for item. 433 static int index(int base_index, int item) { 434 return base_index + item / kItemsPerWord; 435 } 436 437 // Extract T data for a given item from data. 438 static T decode(U data, int item) { 439 return static_cast<T>((data >> shift(item)) & kMask); 440 } 441 442 // Return the encoding for a store of value for item in previous. 443 static U encode(U previous, int item, T value) { 444 int shift_value = shift(item); 445 int set_bits = (static_cast<int>(value) << shift_value); 446 return (previous & ~(kMask << shift_value)) | set_bits; 447 } 448 449 static int shift(int item) { return (item % kItemsPerWord) * kBitsPerItem; } 450 }; 451 452 // Helper macros for defining a contiguous sequence of field offset constants. 453 // Example: (backslashes at the ends of respective lines of this multi-line 454 // macro definition are omitted here to please the compiler) 455 // 456 // #define MAP_FIELDS(V) 457 // V(kField1Offset, kPointerSize) 458 // V(kField2Offset, kIntSize) 459 // V(kField3Offset, kIntSize) 460 // V(kField4Offset, kPointerSize) 461 // V(kSize, 0) 462 // 463 // DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS) 464 // 465 #define DEFINE_ONE_FIELD_OFFSET(Name, Size) Name, Name##End = Name + (Size)-1, 466 467 #define DEFINE_FIELD_OFFSET_CONSTANTS(StartOffset, LIST_MACRO) \ 468 enum { \ 469 LIST_MACRO##_StartOffset = StartOffset - 1, \ 470 LIST_MACRO(DEFINE_ONE_FIELD_OFFSET) \ 471 }; 472 473 // ---------------------------------------------------------------------------- 474 // Hash function. 475 476 static const uint64_t kZeroHashSeed = 0; 477 478 // Thomas Wang, Integer Hash Functions. 479 // http://www.concentric.net/~Ttwang/tech/inthash.htm 480 inline uint32_t ComputeIntegerHash(uint32_t key, uint64_t seed) { 481 uint32_t hash = key; 482 hash = hash ^ static_cast<uint32_t>(seed); 483 hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1; 484 hash = hash ^ (hash >> 12); 485 hash = hash + (hash << 2); 486 hash = hash ^ (hash >> 4); 487 hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11); 488 hash = hash ^ (hash >> 16); 489 return hash & 0x3fffffff; 490 } 491 492 inline uint32_t ComputeIntegerHash(uint32_t key) { 493 return ComputeIntegerHash(key, kZeroHashSeed); 494 } 495 496 inline uint32_t ComputeLongHash(uint64_t key) { 497 uint64_t hash = key; 498 hash = ~hash + (hash << 18); // hash = (hash << 18) - hash - 1; 499 hash = hash ^ (hash >> 31); 500 hash = hash * 21; // hash = (hash + (hash << 2)) + (hash << 4); 501 hash = hash ^ (hash >> 11); 502 hash = hash + (hash << 6); 503 hash = hash ^ (hash >> 22); 504 return static_cast<uint32_t>(hash); 505 } 506 507 508 inline uint32_t ComputePointerHash(void* ptr) { 509 return ComputeIntegerHash( 510 static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr))); 511 } 512 513 inline uint32_t ComputeAddressHash(Address address) { 514 return ComputeIntegerHash(static_cast<uint32_t>(address & 0xFFFFFFFFul)); 515 } 516 517 // ---------------------------------------------------------------------------- 518 // Generated memcpy/memmove 519 520 // Initializes the codegen support that depends on CPU features. 521 void init_memcopy_functions(Isolate* isolate); 522 523 #if defined(V8_TARGET_ARCH_IA32) 524 // Limit below which the extra overhead of the MemCopy function is likely 525 // to outweigh the benefits of faster copying. 526 const int kMinComplexMemCopy = 64; 527 528 // Copy memory area. No restrictions. 529 V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size); 530 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); 531 532 // Keep the distinction of "move" vs. "copy" for the benefit of other 533 // architectures. 534 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 535 MemMove(dest, src, size); 536 } 537 #elif defined(V8_HOST_ARCH_ARM) 538 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, 539 size_t size); 540 V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function; 541 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, 542 size_t chars) { 543 memcpy(dest, src, chars); 544 } 545 // For values < 16, the assembler function is slower than the inlined C code. 546 const int kMinComplexMemCopy = 16; 547 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 548 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), 549 reinterpret_cast<const uint8_t*>(src), size); 550 } 551 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, 552 size_t size) { 553 memmove(dest, src, size); 554 } 555 556 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, 557 size_t size); 558 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; 559 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, 560 size_t chars); 561 // For values < 12, the assembler function is slower than the inlined C code. 562 const int kMinComplexConvertMemCopy = 12; 563 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, 564 size_t size) { 565 (*memcopy_uint16_uint8_function)(dest, src, size); 566 } 567 #elif defined(V8_HOST_ARCH_MIPS) 568 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, 569 size_t size); 570 V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function; 571 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, 572 size_t chars) { 573 memcpy(dest, src, chars); 574 } 575 // For values < 16, the assembler function is slower than the inlined C code. 576 const int kMinComplexMemCopy = 16; 577 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 578 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), 579 reinterpret_cast<const uint8_t*>(src), size); 580 } 581 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, 582 size_t size) { 583 memmove(dest, src, size); 584 } 585 #else 586 // Copy memory area to disjoint memory area. 587 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 588 memcpy(dest, src, size); 589 } 590 V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, 591 size_t size) { 592 memmove(dest, src, size); 593 } 594 const int kMinComplexMemCopy = 8; 595 #endif // V8_TARGET_ARCH_IA32 596 597 598 // ---------------------------------------------------------------------------- 599 // Miscellaneous 600 601 // Memory offset for lower and higher bits in a 64 bit integer. 602 #if defined(V8_TARGET_LITTLE_ENDIAN) 603 static const int kInt64LowerHalfMemoryOffset = 0; 604 static const int kInt64UpperHalfMemoryOffset = 4; 605 #elif defined(V8_TARGET_BIG_ENDIAN) 606 static const int kInt64LowerHalfMemoryOffset = 4; 607 static const int kInt64UpperHalfMemoryOffset = 0; 608 #endif // V8_TARGET_LITTLE_ENDIAN 609 610 // A static resource holds a static instance that can be reserved in 611 // a local scope using an instance of Access. Attempts to re-reserve 612 // the instance will cause an error. 613 template <typename T> 614 class StaticResource { 615 public: 616 StaticResource() : is_reserved_(false) {} 617 618 private: 619 template <typename S> friend class Access; 620 T instance_; 621 bool is_reserved_; 622 }; 623 624 625 // Locally scoped access to a static resource. 626 template <typename T> 627 class Access { 628 public: 629 explicit Access(StaticResource<T>* resource) 630 : resource_(resource) 631 , instance_(&resource->instance_) { 632 DCHECK(!resource->is_reserved_); 633 resource->is_reserved_ = true; 634 } 635 636 ~Access() { 637 resource_->is_reserved_ = false; 638 resource_ = nullptr; 639 instance_ = nullptr; 640 } 641 642 T* value() { return instance_; } 643 T* operator -> () { return instance_; } 644 645 private: 646 StaticResource<T>* resource_; 647 T* instance_; 648 }; 649 650 // A pointer that can only be set once and doesn't allow NULL values. 651 template<typename T> 652 class SetOncePointer { 653 public: 654 SetOncePointer() = default; 655 656 bool is_set() const { return pointer_ != nullptr; } 657 658 T* get() const { 659 DCHECK_NOT_NULL(pointer_); 660 return pointer_; 661 } 662 663 void set(T* value) { 664 DCHECK(pointer_ == nullptr && value != nullptr); 665 pointer_ = value; 666 } 667 668 T* operator=(T* value) { 669 set(value); 670 return value; 671 } 672 673 bool operator==(std::nullptr_t) const { return pointer_ == nullptr; } 674 bool operator!=(std::nullptr_t) const { return pointer_ != nullptr; } 675 676 private: 677 T* pointer_ = nullptr; 678 }; 679 680 681 template <typename T, int kSize> 682 class EmbeddedVector : public Vector<T> { 683 public: 684 EmbeddedVector() : Vector<T>(buffer_, kSize) { } 685 686 explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) { 687 for (int i = 0; i < kSize; ++i) { 688 buffer_[i] = initial_value; 689 } 690 } 691 692 // When copying, make underlying Vector to reference our buffer. 693 EmbeddedVector(const EmbeddedVector& rhs) 694 : Vector<T>(rhs) { 695 MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); 696 this->set_start(buffer_); 697 } 698 699 EmbeddedVector& operator=(const EmbeddedVector& rhs) { 700 if (this == &rhs) return *this; 701 Vector<T>::operator=(rhs); 702 MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); 703 this->set_start(buffer_); 704 return *this; 705 } 706 707 private: 708 T buffer_[kSize]; 709 }; 710 711 // Compare 8bit/16bit chars to 8bit/16bit chars. 712 template <typename lchar, typename rchar> 713 inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs, 714 size_t chars) { 715 const lchar* limit = lhs + chars; 716 if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) { 717 // memcmp compares byte-by-byte, yielding wrong results for two-byte 718 // strings on little-endian systems. 719 return memcmp(lhs, rhs, chars); 720 } 721 while (lhs < limit) { 722 int r = static_cast<int>(*lhs) - static_cast<int>(*rhs); 723 if (r != 0) return r; 724 ++lhs; 725 ++rhs; 726 } 727 return 0; 728 } 729 730 template <typename lchar, typename rchar> 731 inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) { 732 DCHECK_LE(sizeof(lchar), 2); 733 DCHECK_LE(sizeof(rchar), 2); 734 if (sizeof(lchar) == 1) { 735 if (sizeof(rchar) == 1) { 736 return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), 737 reinterpret_cast<const uint8_t*>(rhs), 738 chars); 739 } else { 740 return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), 741 reinterpret_cast<const uint16_t*>(rhs), 742 chars); 743 } 744 } else { 745 if (sizeof(rchar) == 1) { 746 return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs), 747 reinterpret_cast<const uint8_t*>(rhs), 748 chars); 749 } else { 750 return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs), 751 reinterpret_cast<const uint16_t*>(rhs), 752 chars); 753 } 754 } 755 } 756 757 758 // Calculate 10^exponent. 759 inline int TenToThe(int exponent) { 760 DCHECK_LE(exponent, 9); 761 DCHECK_GE(exponent, 1); 762 int answer = 10; 763 for (int i = 1; i < exponent; i++) answer *= 10; 764 return answer; 765 } 766 767 768 template<typename ElementType, int NumElements> 769 class EmbeddedContainer { 770 public: 771 EmbeddedContainer() : elems_() { } 772 773 int length() const { return NumElements; } 774 const ElementType& operator[](int i) const { 775 DCHECK(i < length()); 776 return elems_[i]; 777 } 778 ElementType& operator[](int i) { 779 DCHECK(i < length()); 780 return elems_[i]; 781 } 782 783 private: 784 ElementType elems_[NumElements]; 785 }; 786 787 788 template<typename ElementType> 789 class EmbeddedContainer<ElementType, 0> { 790 public: 791 int length() const { return 0; } 792 const ElementType& operator[](int i) const { 793 UNREACHABLE(); 794 static ElementType t = 0; 795 return t; 796 } 797 ElementType& operator[](int i) { 798 UNREACHABLE(); 799 static ElementType t = 0; 800 return t; 801 } 802 }; 803 804 805 // Helper class for building result strings in a character buffer. The 806 // purpose of the class is to use safe operations that checks the 807 // buffer bounds on all operations in debug mode. 808 // This simple base class does not allow formatted output. 809 class SimpleStringBuilder { 810 public: 811 // Create a string builder with a buffer of the given size. The 812 // buffer is allocated through NewArray<char> and must be 813 // deallocated by the caller of Finalize(). 814 explicit SimpleStringBuilder(int size); 815 816 SimpleStringBuilder(char* buffer, int size) 817 : buffer_(buffer, size), position_(0) { } 818 819 ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); } 820 821 int size() const { return buffer_.length(); } 822 823 // Get the current position in the builder. 824 int position() const { 825 DCHECK(!is_finalized()); 826 return position_; 827 } 828 829 // Reset the position. 830 void Reset() { position_ = 0; } 831 832 // Add a single character to the builder. It is not allowed to add 833 // 0-characters; use the Finalize() method to terminate the string 834 // instead. 835 void AddCharacter(char c) { 836 DCHECK_NE(c, '\0'); 837 DCHECK(!is_finalized() && position_ < buffer_.length()); 838 buffer_[position_++] = c; 839 } 840 841 // Add an entire string to the builder. Uses strlen() internally to 842 // compute the length of the input string. 843 void AddString(const char* s); 844 845 // Add the first 'n' characters of the given 0-terminated string 's' to the 846 // builder. The input string must have enough characters. 847 void AddSubstring(const char* s, int n); 848 849 // Add character padding to the builder. If count is non-positive, 850 // nothing is added to the builder. 851 void AddPadding(char c, int count); 852 853 // Add the decimal representation of the value. 854 void AddDecimalInteger(int value); 855 856 // Finalize the string by 0-terminating it and returning the buffer. 857 char* Finalize(); 858 859 protected: 860 Vector<char> buffer_; 861 int position_; 862 863 bool is_finalized() const { return position_ < 0; } 864 865 private: 866 DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder); 867 }; 868 869 870 // A poor man's version of STL's bitset: A bit set of enums E (without explicit 871 // values), fitting into an integral type T. 872 template <class E, class T = int> 873 class EnumSet { 874 public: 875 explicit EnumSet(T bits = 0) : bits_(bits) {} 876 bool IsEmpty() const { return bits_ == 0; } 877 bool Contains(E element) const { return (bits_ & Mask(element)) != 0; } 878 bool ContainsAnyOf(const EnumSet& set) const { 879 return (bits_ & set.bits_) != 0; 880 } 881 void Add(E element) { bits_ |= Mask(element); } 882 void Add(const EnumSet& set) { bits_ |= set.bits_; } 883 void Remove(E element) { bits_ &= ~Mask(element); } 884 void Remove(const EnumSet& set) { bits_ &= ~set.bits_; } 885 void RemoveAll() { bits_ = 0; } 886 void Intersect(const EnumSet& set) { bits_ &= set.bits_; } 887 T ToIntegral() const { return bits_; } 888 bool operator==(const EnumSet& set) { return bits_ == set.bits_; } 889 bool operator!=(const EnumSet& set) { return bits_ != set.bits_; } 890 EnumSet operator|(const EnumSet& set) const { 891 return EnumSet(bits_ | set.bits_); 892 } 893 894 private: 895 static_assert(std::is_enum<E>::value, "EnumSet can only be used with enums"); 896 897 T Mask(E element) const { 898 DCHECK_GT(sizeof(T) * CHAR_BIT, static_cast<int>(element)); 899 return T{1} << static_cast<typename std::underlying_type<E>::type>(element); 900 } 901 902 T bits_; 903 }; 904 905 // Bit field extraction. 906 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { 907 return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); 908 } 909 910 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { 911 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1); 912 } 913 914 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { 915 return (x << (31 - msb)) >> (lsb + 31 - msb); 916 } 917 918 inline int signed_bitextract_64(int msb, int lsb, int x) { 919 // TODO(jbramley): This is broken for big bitfields. 920 return (x << (63 - msb)) >> (lsb + 63 - msb); 921 } 922 923 // Check number width. 924 inline bool is_intn(int64_t x, unsigned n) { 925 DCHECK((0 < n) && (n < 64)); 926 int64_t limit = static_cast<int64_t>(1) << (n - 1); 927 return (-limit <= x) && (x < limit); 928 } 929 930 inline bool is_uintn(int64_t x, unsigned n) { 931 DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte))); 932 return !(x >> n); 933 } 934 935 template <class T> 936 inline T truncate_to_intn(T x, unsigned n) { 937 DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte))); 938 return (x & ((static_cast<T>(1) << n) - 1)); 939 } 940 941 #define INT_1_TO_63_LIST(V) \ 942 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ 943 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ 944 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ 945 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ 946 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ 947 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ 948 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ 949 V(57) V(58) V(59) V(60) V(61) V(62) V(63) 950 951 #define DECLARE_IS_INT_N(N) \ 952 inline bool is_int##N(int64_t x) { return is_intn(x, N); } 953 #define DECLARE_IS_UINT_N(N) \ 954 template <class T> \ 955 inline bool is_uint##N(T x) { return is_uintn(x, N); } 956 #define DECLARE_TRUNCATE_TO_INT_N(N) \ 957 template <class T> \ 958 inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); } 959 INT_1_TO_63_LIST(DECLARE_IS_INT_N) 960 INT_1_TO_63_LIST(DECLARE_IS_UINT_N) 961 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) 962 #undef DECLARE_IS_INT_N 963 #undef DECLARE_IS_UINT_N 964 #undef DECLARE_TRUNCATE_TO_INT_N 965 966 class FeedbackSlot { 967 public: 968 FeedbackSlot() : id_(kInvalidSlot) {} 969 explicit FeedbackSlot(int id) : id_(id) {} 970 971 int ToInt() const { return id_; } 972 973 static FeedbackSlot Invalid() { return FeedbackSlot(); } 974 bool IsInvalid() const { return id_ == kInvalidSlot; } 975 976 bool operator==(FeedbackSlot that) const { return this->id_ == that.id_; } 977 bool operator!=(FeedbackSlot that) const { return !(*this == that); } 978 979 friend size_t hash_value(FeedbackSlot slot) { return slot.ToInt(); } 980 friend std::ostream& operator<<(std::ostream& os, FeedbackSlot); 981 982 private: 983 static const int kInvalidSlot = -1; 984 985 int id_; 986 }; 987 988 989 class BailoutId { 990 public: 991 explicit BailoutId(int id) : id_(id) { } 992 int ToInt() const { return id_; } 993 994 static BailoutId None() { return BailoutId(kNoneId); } 995 static BailoutId ScriptContext() { return BailoutId(kScriptContextId); } 996 static BailoutId FunctionContext() { return BailoutId(kFunctionContextId); } 997 static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); } 998 static BailoutId Declarations() { return BailoutId(kDeclarationsId); } 999 static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); } 1000 static BailoutId StubEntry() { return BailoutId(kStubEntryId); } 1001 1002 // Special bailout id support for deopting into the {JSConstructStub} stub. 1003 // The following hard-coded deoptimization points are supported by the stub: 1004 // - {ConstructStubCreate} maps to {construct_stub_create_deopt_pc_offset}. 1005 // - {ConstructStubInvoke} maps to {construct_stub_invoke_deopt_pc_offset}. 1006 static BailoutId ConstructStubCreate() { return BailoutId(1); } 1007 static BailoutId ConstructStubInvoke() { return BailoutId(2); } 1008 bool IsValidForConstructStub() const { 1009 return id_ == ConstructStubCreate().ToInt() || 1010 id_ == ConstructStubInvoke().ToInt(); 1011 } 1012 1013 bool IsNone() const { return id_ == kNoneId; } 1014 bool operator==(const BailoutId& other) const { return id_ == other.id_; } 1015 bool operator!=(const BailoutId& other) const { return id_ != other.id_; } 1016 friend size_t hash_value(BailoutId); 1017 V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&, BailoutId); 1018 1019 private: 1020 friend class Builtins; 1021 1022 static const int kNoneId = -1; 1023 1024 // Using 0 could disguise errors. 1025 static const int kScriptContextId = 1; 1026 static const int kFunctionContextId = 2; 1027 static const int kFunctionEntryId = 3; 1028 1029 // This AST id identifies the point after the declarations have been visited. 1030 // We need it to capture the environment effects of declarations that emit 1031 // code (function declarations). 1032 static const int kDeclarationsId = 4; 1033 1034 // Every FunctionState starts with this id. 1035 static const int kFirstUsableId = 5; 1036 1037 // Every compiled stub starts with this id. 1038 static const int kStubEntryId = 6; 1039 1040 // Builtin continuations bailout ids start here. If you need to add a 1041 // non-builtin BailoutId, add it before this id so that this Id has the 1042 // highest number. 1043 static const int kFirstBuiltinContinuationId = 7; 1044 1045 int id_; 1046 }; 1047 1048 1049 // ---------------------------------------------------------------------------- 1050 // I/O support. 1051 1052 // Our version of printf(). 1053 V8_EXPORT_PRIVATE void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...); 1054 void PRINTF_FORMAT(2, 3) PrintF(FILE* out, const char* format, ...); 1055 1056 // Prepends the current process ID to the output. 1057 void PRINTF_FORMAT(1, 2) PrintPID(const char* format, ...); 1058 1059 // Prepends the current process ID and given isolate pointer to the output. 1060 void PRINTF_FORMAT(2, 3) PrintIsolate(void* isolate, const char* format, ...); 1061 1062 // Safe formatting print. Ensures that str is always null-terminated. 1063 // Returns the number of chars written, or -1 if output was truncated. 1064 int PRINTF_FORMAT(2, 3) SNPrintF(Vector<char> str, const char* format, ...); 1065 V8_EXPORT_PRIVATE int PRINTF_FORMAT(2, 0) 1066 VSNPrintF(Vector<char> str, const char* format, va_list args); 1067 1068 void StrNCpy(Vector<char> dest, const char* src, size_t n); 1069 1070 // Our version of fflush. 1071 void Flush(FILE* out); 1072 1073 inline void Flush() { 1074 Flush(stdout); 1075 } 1076 1077 1078 // Read a line of characters after printing the prompt to stdout. The resulting 1079 // char* needs to be disposed off with DeleteArray by the caller. 1080 char* ReadLine(const char* prompt); 1081 1082 1083 // Append size chars from str to the file given by filename. 1084 // The file is overwritten. Returns the number of chars written. 1085 int AppendChars(const char* filename, 1086 const char* str, 1087 int size, 1088 bool verbose = true); 1089 1090 1091 // Write size chars from str to the file given by filename. 1092 // The file is overwritten. Returns the number of chars written. 1093 int WriteChars(const char* filename, 1094 const char* str, 1095 int size, 1096 bool verbose = true); 1097 1098 1099 // Write size bytes to the file given by filename. 1100 // The file is overwritten. Returns the number of bytes written. 1101 int WriteBytes(const char* filename, 1102 const byte* bytes, 1103 int size, 1104 bool verbose = true); 1105 1106 1107 // Write the C code 1108 // const char* <varname> = "<str>"; 1109 // const int <varname>_len = <len>; 1110 // to the file given by filename. Only the first len chars are written. 1111 int WriteAsCFile(const char* filename, const char* varname, 1112 const char* str, int size, bool verbose = true); 1113 1114 1115 // ---------------------------------------------------------------------------- 1116 // Memory 1117 1118 // Copies words from |src| to |dst|. The data spans must not overlap. 1119 template <typename T> 1120 inline void CopyWords(T* dst, const T* src, size_t num_words) { 1121 STATIC_ASSERT(sizeof(T) == kPointerSize); 1122 DCHECK(Min(dst, const_cast<T*>(src)) + num_words <= 1123 Max(dst, const_cast<T*>(src))); 1124 DCHECK_GT(num_words, 0); 1125 1126 // Use block copying MemCopy if the segment we're copying is 1127 // enough to justify the extra call/setup overhead. 1128 static const size_t kBlockCopyLimit = 16; 1129 1130 if (num_words < kBlockCopyLimit) { 1131 do { 1132 num_words--; 1133 *dst++ = *src++; 1134 } while (num_words > 0); 1135 } else { 1136 MemCopy(dst, src, num_words * kPointerSize); 1137 } 1138 } 1139 1140 1141 // Copies words from |src| to |dst|. No restrictions. 1142 template <typename T> 1143 inline void MoveWords(T* dst, const T* src, size_t num_words) { 1144 STATIC_ASSERT(sizeof(T) == kPointerSize); 1145 DCHECK_GT(num_words, 0); 1146 1147 // Use block copying MemCopy if the segment we're copying is 1148 // enough to justify the extra call/setup overhead. 1149 static const size_t kBlockCopyLimit = 16; 1150 1151 if (num_words < kBlockCopyLimit && 1152 ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { 1153 T* end = dst + num_words; 1154 do { 1155 num_words--; 1156 *dst++ = *src++; 1157 } while (num_words > 0); 1158 } else { 1159 MemMove(dst, src, num_words * kPointerSize); 1160 } 1161 } 1162 1163 1164 // Copies data from |src| to |dst|. The data spans must not overlap. 1165 template <typename T> 1166 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { 1167 STATIC_ASSERT(sizeof(T) == 1); 1168 DCHECK(Min(dst, const_cast<T*>(src)) + num_bytes <= 1169 Max(dst, const_cast<T*>(src))); 1170 if (num_bytes == 0) return; 1171 1172 // Use block copying MemCopy if the segment we're copying is 1173 // enough to justify the extra call/setup overhead. 1174 static const int kBlockCopyLimit = kMinComplexMemCopy; 1175 1176 if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { 1177 do { 1178 num_bytes--; 1179 *dst++ = *src++; 1180 } while (num_bytes > 0); 1181 } else { 1182 MemCopy(dst, src, num_bytes); 1183 } 1184 } 1185 1186 1187 template <typename T, typename U> 1188 inline void MemsetPointer(T** dest, U* value, int counter) { 1189 #ifdef DEBUG 1190 T* a = nullptr; 1191 U* b = nullptr; 1192 a = b; // Fake assignment to check assignability. 1193 USE(a); 1194 #endif // DEBUG 1195 #if V8_HOST_ARCH_IA32 1196 #define STOS "stosl" 1197 #elif V8_HOST_ARCH_X64 1198 #if V8_HOST_ARCH_32_BIT 1199 #define STOS "addr32 stosl" 1200 #else 1201 #define STOS "stosq" 1202 #endif 1203 #endif 1204 1205 #if defined(MEMORY_SANITIZER) 1206 // MemorySanitizer does not understand inline assembly. 1207 #undef STOS 1208 #endif 1209 1210 #if defined(__GNUC__) && defined(STOS) 1211 asm volatile( 1212 "cld;" 1213 "rep ; " STOS 1214 : "+&c" (counter), "+&D" (dest) 1215 : "a" (value) 1216 : "memory", "cc"); 1217 #else 1218 for (int i = 0; i < counter; i++) { 1219 dest[i] = value; 1220 } 1221 #endif 1222 1223 #undef STOS 1224 } 1225 1226 // Simple support to read a file into std::string. 1227 // On return, *exits tells whether the file existed. 1228 V8_EXPORT_PRIVATE std::string ReadFile(const char* filename, bool* exists, 1229 bool verbose = true); 1230 std::string ReadFile(FILE* file, bool* exists, bool verbose = true); 1231 1232 template <typename sourcechar, typename sinkchar> 1233 V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, 1234 size_t chars); 1235 #if defined(V8_HOST_ARCH_ARM) 1236 V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, 1237 size_t chars); 1238 V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, 1239 size_t chars); 1240 V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, 1241 size_t chars); 1242 #elif defined(V8_HOST_ARCH_MIPS) 1243 V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, 1244 size_t chars); 1245 V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, 1246 size_t chars); 1247 #elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390) 1248 V8_INLINE void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, 1249 size_t chars); 1250 V8_INLINE void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, 1251 size_t chars); 1252 #endif 1253 1254 // Copy from 8bit/16bit chars to 8bit/16bit chars. 1255 template <typename sourcechar, typename sinkchar> 1256 V8_INLINE void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars); 1257 1258 template <typename sourcechar, typename sinkchar> 1259 void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) { 1260 DCHECK_LE(sizeof(sourcechar), 2); 1261 DCHECK_LE(sizeof(sinkchar), 2); 1262 if (sizeof(sinkchar) == 1) { 1263 if (sizeof(sourcechar) == 1) { 1264 CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), 1265 reinterpret_cast<const uint8_t*>(src), 1266 chars); 1267 } else { 1268 CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), 1269 reinterpret_cast<const uint16_t*>(src), 1270 chars); 1271 } 1272 } else { 1273 if (sizeof(sourcechar) == 1) { 1274 CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), 1275 reinterpret_cast<const uint8_t*>(src), 1276 chars); 1277 } else { 1278 CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), 1279 reinterpret_cast<const uint16_t*>(src), 1280 chars); 1281 } 1282 } 1283 } 1284 1285 template <typename sourcechar, typename sinkchar> 1286 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) { 1287 sinkchar* limit = dest + chars; 1288 if ((sizeof(*dest) == sizeof(*src)) && 1289 (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) { 1290 MemCopy(dest, src, chars * sizeof(*dest)); 1291 } else { 1292 while (dest < limit) *dest++ = static_cast<sinkchar>(*src++); 1293 } 1294 } 1295 1296 1297 #if defined(V8_HOST_ARCH_ARM) 1298 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { 1299 switch (static_cast<unsigned>(chars)) { 1300 case 0: 1301 break; 1302 case 1: 1303 *dest = *src; 1304 break; 1305 case 2: 1306 memcpy(dest, src, 2); 1307 break; 1308 case 3: 1309 memcpy(dest, src, 3); 1310 break; 1311 case 4: 1312 memcpy(dest, src, 4); 1313 break; 1314 case 5: 1315 memcpy(dest, src, 5); 1316 break; 1317 case 6: 1318 memcpy(dest, src, 6); 1319 break; 1320 case 7: 1321 memcpy(dest, src, 7); 1322 break; 1323 case 8: 1324 memcpy(dest, src, 8); 1325 break; 1326 case 9: 1327 memcpy(dest, src, 9); 1328 break; 1329 case 10: 1330 memcpy(dest, src, 10); 1331 break; 1332 case 11: 1333 memcpy(dest, src, 11); 1334 break; 1335 case 12: 1336 memcpy(dest, src, 12); 1337 break; 1338 case 13: 1339 memcpy(dest, src, 13); 1340 break; 1341 case 14: 1342 memcpy(dest, src, 14); 1343 break; 1344 case 15: 1345 memcpy(dest, src, 15); 1346 break; 1347 default: 1348 MemCopy(dest, src, chars); 1349 break; 1350 } 1351 } 1352 1353 1354 void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) { 1355 if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) { 1356 MemCopyUint16Uint8(dest, src, chars); 1357 } else { 1358 MemCopyUint16Uint8Wrapper(dest, src, chars); 1359 } 1360 } 1361 1362 1363 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { 1364 switch (static_cast<unsigned>(chars)) { 1365 case 0: 1366 break; 1367 case 1: 1368 *dest = *src; 1369 break; 1370 case 2: 1371 memcpy(dest, src, 4); 1372 break; 1373 case 3: 1374 memcpy(dest, src, 6); 1375 break; 1376 case 4: 1377 memcpy(dest, src, 8); 1378 break; 1379 case 5: 1380 memcpy(dest, src, 10); 1381 break; 1382 case 6: 1383 memcpy(dest, src, 12); 1384 break; 1385 case 7: 1386 memcpy(dest, src, 14); 1387 break; 1388 default: 1389 MemCopy(dest, src, chars * sizeof(*dest)); 1390 break; 1391 } 1392 } 1393 1394 1395 #elif defined(V8_HOST_ARCH_MIPS) 1396 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { 1397 if (chars < kMinComplexMemCopy) { 1398 memcpy(dest, src, chars); 1399 } else { 1400 MemCopy(dest, src, chars); 1401 } 1402 } 1403 1404 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { 1405 if (chars < kMinComplexMemCopy) { 1406 memcpy(dest, src, chars * sizeof(*dest)); 1407 } else { 1408 MemCopy(dest, src, chars * sizeof(*dest)); 1409 } 1410 } 1411 #elif defined(V8_HOST_ARCH_PPC) || defined(V8_HOST_ARCH_S390) 1412 #define CASE(n) \ 1413 case n: \ 1414 memcpy(dest, src, n); \ 1415 break 1416 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { 1417 switch (static_cast<unsigned>(chars)) { 1418 case 0: 1419 break; 1420 case 1: 1421 *dest = *src; 1422 break; 1423 CASE(2); 1424 CASE(3); 1425 CASE(4); 1426 CASE(5); 1427 CASE(6); 1428 CASE(7); 1429 CASE(8); 1430 CASE(9); 1431 CASE(10); 1432 CASE(11); 1433 CASE(12); 1434 CASE(13); 1435 CASE(14); 1436 CASE(15); 1437 CASE(16); 1438 CASE(17); 1439 CASE(18); 1440 CASE(19); 1441 CASE(20); 1442 CASE(21); 1443 CASE(22); 1444 CASE(23); 1445 CASE(24); 1446 CASE(25); 1447 CASE(26); 1448 CASE(27); 1449 CASE(28); 1450 CASE(29); 1451 CASE(30); 1452 CASE(31); 1453 CASE(32); 1454 CASE(33); 1455 CASE(34); 1456 CASE(35); 1457 CASE(36); 1458 CASE(37); 1459 CASE(38); 1460 CASE(39); 1461 CASE(40); 1462 CASE(41); 1463 CASE(42); 1464 CASE(43); 1465 CASE(44); 1466 CASE(45); 1467 CASE(46); 1468 CASE(47); 1469 CASE(48); 1470 CASE(49); 1471 CASE(50); 1472 CASE(51); 1473 CASE(52); 1474 CASE(53); 1475 CASE(54); 1476 CASE(55); 1477 CASE(56); 1478 CASE(57); 1479 CASE(58); 1480 CASE(59); 1481 CASE(60); 1482 CASE(61); 1483 CASE(62); 1484 CASE(63); 1485 CASE(64); 1486 default: 1487 memcpy(dest, src, chars); 1488 break; 1489 } 1490 } 1491 #undef CASE 1492 1493 #define CASE(n) \ 1494 case n: \ 1495 memcpy(dest, src, n * 2); \ 1496 break 1497 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { 1498 switch (static_cast<unsigned>(chars)) { 1499 case 0: 1500 break; 1501 case 1: 1502 *dest = *src; 1503 break; 1504 CASE(2); 1505 CASE(3); 1506 CASE(4); 1507 CASE(5); 1508 CASE(6); 1509 CASE(7); 1510 CASE(8); 1511 CASE(9); 1512 CASE(10); 1513 CASE(11); 1514 CASE(12); 1515 CASE(13); 1516 CASE(14); 1517 CASE(15); 1518 CASE(16); 1519 CASE(17); 1520 CASE(18); 1521 CASE(19); 1522 CASE(20); 1523 CASE(21); 1524 CASE(22); 1525 CASE(23); 1526 CASE(24); 1527 CASE(25); 1528 CASE(26); 1529 CASE(27); 1530 CASE(28); 1531 CASE(29); 1532 CASE(30); 1533 CASE(31); 1534 CASE(32); 1535 default: 1536 memcpy(dest, src, chars * 2); 1537 break; 1538 } 1539 } 1540 #undef CASE 1541 #endif 1542 1543 1544 class StringBuilder : public SimpleStringBuilder { 1545 public: 1546 explicit StringBuilder(int size) : SimpleStringBuilder(size) { } 1547 StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { } 1548 1549 // Add formatted contents to the builder just like printf(). 1550 void PRINTF_FORMAT(2, 3) AddFormatted(const char* format, ...); 1551 1552 // Add formatted contents like printf based on a va_list. 1553 void PRINTF_FORMAT(2, 0) AddFormattedList(const char* format, va_list list); 1554 1555 private: 1556 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); 1557 }; 1558 1559 1560 bool DoubleToBoolean(double d); 1561 1562 template <typename Stream> 1563 bool StringToArrayIndex(Stream* stream, uint32_t* index); 1564 1565 // Returns the current stack top. Works correctly with ASAN and SafeStack. 1566 // GetCurrentStackPosition() should not be inlined, because it works on stack 1567 // frames if it were inlined into a function with a huge stack frame it would 1568 // return an address significantly above the actual current stack position. 1569 V8_NOINLINE uintptr_t GetCurrentStackPosition(); 1570 1571 template <typename V> 1572 static inline V ByteReverse(V value) { 1573 size_t size_of_v = sizeof(value); 1574 switch (size_of_v) { 1575 case 2: 1576 #if V8_HAS_BUILTIN_BSWAP16 1577 return static_cast<V>(__builtin_bswap16(static_cast<uint16_t>(value))); 1578 #else 1579 return value << 8 | (value >> 8 & 0x00FF); 1580 #endif 1581 case 4: 1582 #if V8_HAS_BUILTIN_BSWAP32 1583 return static_cast<V>(__builtin_bswap32(static_cast<uint32_t>(value))); 1584 #else 1585 { 1586 size_t bits_of_v = size_of_v * kBitsPerByte; 1587 return value << (bits_of_v - 8) | 1588 ((value << (bits_of_v - 24)) & 0x00FF0000) | 1589 ((value >> (bits_of_v - 24)) & 0x0000FF00) | 1590 ((value >> (bits_of_v - 8)) & 0x00000FF); 1591 } 1592 #endif 1593 case 8: 1594 #if V8_HAS_BUILTIN_BSWAP64 1595 return static_cast<V>(__builtin_bswap64(static_cast<uint64_t>(value))); 1596 #else 1597 { 1598 size_t bits_of_v = size_of_v * kBitsPerByte; 1599 return value << (bits_of_v - 8) | 1600 ((value << (bits_of_v - 24)) & 0x00FF000000000000) | 1601 ((value << (bits_of_v - 40)) & 0x0000FF0000000000) | 1602 ((value << (bits_of_v - 56)) & 0x000000FF00000000) | 1603 ((value >> (bits_of_v - 56)) & 0x00000000FF000000) | 1604 ((value >> (bits_of_v - 40)) & 0x0000000000FF0000) | 1605 ((value >> (bits_of_v - 24)) & 0x000000000000FF00) | 1606 ((value >> (bits_of_v - 8)) & 0x00000000000000FF); 1607 } 1608 #endif 1609 default: 1610 UNREACHABLE(); 1611 } 1612 } 1613 1614 // Represents a linked list that threads through the nodes in the linked list. 1615 // Entries in the list are pointers to nodes. The nodes need to have a T** 1616 // next() method that returns the location where the next value is stored. 1617 template <typename T> 1618 class ThreadedList final { 1619 public: 1620 ThreadedList() : head_(nullptr), tail_(&head_) {} 1621 void Add(T* v) { 1622 DCHECK_NULL(*tail_); 1623 DCHECK_NULL(*v->next()); 1624 *tail_ = v; 1625 tail_ = v->next(); 1626 } 1627 1628 void Clear() { 1629 head_ = nullptr; 1630 tail_ = &head_; 1631 } 1632 1633 class Iterator final { 1634 public: 1635 Iterator& operator++() { 1636 entry_ = (*entry_)->next(); 1637 return *this; 1638 } 1639 bool operator!=(const Iterator& other) { return entry_ != other.entry_; } 1640 T* operator*() { return *entry_; } 1641 T* operator->() { return *entry_; } 1642 Iterator& operator=(T* entry) { 1643 T* next = *(*entry_)->next(); 1644 *entry->next() = next; 1645 *entry_ = entry; 1646 return *this; 1647 } 1648 1649 private: 1650 explicit Iterator(T** entry) : entry_(entry) {} 1651 1652 T** entry_; 1653 1654 friend class ThreadedList; 1655 }; 1656 1657 class ConstIterator final { 1658 public: 1659 ConstIterator& operator++() { 1660 entry_ = (*entry_)->next(); 1661 return *this; 1662 } 1663 bool operator!=(const ConstIterator& other) { 1664 return entry_ != other.entry_; 1665 } 1666 const T* operator*() const { return *entry_; } 1667 1668 private: 1669 explicit ConstIterator(T* const* entry) : entry_(entry) {} 1670 1671 T* const* entry_; 1672 1673 friend class ThreadedList; 1674 }; 1675 1676 Iterator begin() { return Iterator(&head_); } 1677 Iterator end() { return Iterator(tail_); } 1678 1679 ConstIterator begin() const { return ConstIterator(&head_); } 1680 ConstIterator end() const { return ConstIterator(tail_); } 1681 1682 void Rewind(Iterator reset_point) { 1683 tail_ = reset_point.entry_; 1684 *tail_ = nullptr; 1685 } 1686 1687 void MoveTail(ThreadedList<T>* parent, Iterator location) { 1688 if (parent->end() != location) { 1689 DCHECK_NULL(*tail_); 1690 *tail_ = *location; 1691 tail_ = parent->tail_; 1692 parent->Rewind(location); 1693 } 1694 } 1695 1696 bool is_empty() const { return head_ == nullptr; } 1697 1698 // Slow. For testing purposes. 1699 int LengthForTest() { 1700 int result = 0; 1701 for (Iterator t = begin(); t != end(); ++t) ++result; 1702 return result; 1703 } 1704 T* AtForTest(int i) { 1705 Iterator t = begin(); 1706 while (i-- > 0) ++t; 1707 return *t; 1708 } 1709 1710 private: 1711 T* head_; 1712 T** tail_; 1713 DISALLOW_COPY_AND_ASSIGN(ThreadedList); 1714 }; 1715 1716 V8_EXPORT_PRIVATE bool PassesFilter(Vector<const char> name, 1717 Vector<const char> filter); 1718 1719 // Zap the specified area with a specific byte pattern. This currently defaults 1720 // to int3 on x64 and ia32. On other architectures this will produce unspecified 1721 // instruction sequences. 1722 // TODO(jgruber): Better support for other architectures. 1723 V8_INLINE void ZapCode(Address addr, size_t size_in_bytes) { 1724 static constexpr int kZapByte = 0xCC; 1725 std::memset(reinterpret_cast<void*>(addr), kZapByte, size_in_bytes); 1726 } 1727 1728 } // namespace internal 1729 } // namespace v8 1730 1731 #endif // V8_UTILS_H_ 1732