1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_UTILS_H_ 6 #define V8_UTILS_H_ 7 8 #include <limits.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <cmath> 12 13 #include "include/v8.h" 14 #include "src/allocation.h" 15 #include "src/base/bits.h" 16 #include "src/base/logging.h" 17 #include "src/base/macros.h" 18 #include "src/base/platform/platform.h" 19 #include "src/globals.h" 20 #include "src/list.h" 21 #include "src/vector.h" 22 23 namespace v8 { 24 namespace internal { 25 26 // ---------------------------------------------------------------------------- 27 // General helper functions 28 29 // Returns the value (0 .. 15) of a hexadecimal character c. 30 // If c is not a legal hexadecimal character, returns a value < 0. 31 inline int HexValue(uc32 c) { 32 c -= '0'; 33 if (static_cast<unsigned>(c) <= 9) return c; 34 c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36. 35 if (static_cast<unsigned>(c) <= 5) return c + 10; 36 return -1; 37 } 38 39 40 inline int BoolToInt(bool b) { return b ? 1 : 0; } 41 42 43 // Same as strcmp, but can handle NULL arguments. 44 inline bool CStringEquals(const char* s1, const char* s2) { 45 return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0); 46 } 47 48 49 // X must be a power of 2. Returns the number of trailing zeros. 50 inline int WhichPowerOf2(uint32_t x) { 51 DCHECK(base::bits::IsPowerOfTwo32(x)); 52 int bits = 0; 53 #ifdef DEBUG 54 uint32_t original_x = x; 55 #endif 56 if (x >= 0x10000) { 57 bits += 16; 58 x >>= 16; 59 } 60 if (x >= 0x100) { 61 bits += 8; 62 x >>= 8; 63 } 64 if (x >= 0x10) { 65 bits += 4; 66 x >>= 4; 67 } 68 switch (x) { 69 default: UNREACHABLE(); 70 case 8: bits++; // Fall through. 71 case 4: bits++; // Fall through. 72 case 2: bits++; // Fall through. 73 case 1: break; 74 } 75 DCHECK_EQ(static_cast<uint32_t>(1) << bits, original_x); 76 return bits; 77 } 78 79 80 // X must be a power of 2. Returns the number of trailing zeros. 81 inline int WhichPowerOf2_64(uint64_t x) { 82 DCHECK(base::bits::IsPowerOfTwo64(x)); 83 int bits = 0; 84 #ifdef DEBUG 85 uint64_t original_x = x; 86 #endif 87 if (x >= 0x100000000L) { 88 bits += 32; 89 x >>= 32; 90 } 91 if (x >= 0x10000) { 92 bits += 16; 93 x >>= 16; 94 } 95 if (x >= 0x100) { 96 bits += 8; 97 x >>= 8; 98 } 99 if (x >= 0x10) { 100 bits += 4; 101 x >>= 4; 102 } 103 switch (x) { 104 default: UNREACHABLE(); 105 case 8: bits++; // Fall through. 106 case 4: bits++; // Fall through. 107 case 2: bits++; // Fall through. 108 case 1: break; 109 } 110 DCHECK_EQ(static_cast<uint64_t>(1) << bits, original_x); 111 return bits; 112 } 113 114 115 inline int MostSignificantBit(uint32_t x) { 116 static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4}; 117 int nibble = 0; 118 if (x & 0xffff0000) { 119 nibble += 16; 120 x >>= 16; 121 } 122 if (x & 0xff00) { 123 nibble += 8; 124 x >>= 8; 125 } 126 if (x & 0xf0) { 127 nibble += 4; 128 x >>= 4; 129 } 130 return nibble + msb4[x]; 131 } 132 133 134 // The C++ standard leaves the semantics of '>>' undefined for 135 // negative signed operands. Most implementations do the right thing, 136 // though. 137 inline int ArithmeticShiftRight(int x, int s) { 138 return x >> s; 139 } 140 141 142 template <typename T> 143 int Compare(const T& a, const T& b) { 144 if (a == b) 145 return 0; 146 else if (a < b) 147 return -1; 148 else 149 return 1; 150 } 151 152 153 template <typename T> 154 int PointerValueCompare(const T* a, const T* b) { 155 return Compare<T>(*a, *b); 156 } 157 158 159 // Compare function to compare the object pointer value of two 160 // handlified objects. The handles are passed as pointers to the 161 // handles. 162 template<typename T> class Handle; // Forward declaration. 163 template <typename T> 164 int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) { 165 return Compare<T*>(*(*a), *(*b)); 166 } 167 168 169 template <typename T, typename U> 170 inline bool IsAligned(T value, U alignment) { 171 return (value & (alignment - 1)) == 0; 172 } 173 174 175 // Returns true if (addr + offset) is aligned. 176 inline bool IsAddressAligned(Address addr, 177 intptr_t alignment, 178 int offset = 0) { 179 intptr_t offs = OffsetFrom(addr + offset); 180 return IsAligned(offs, alignment); 181 } 182 183 184 // Returns the maximum of the two parameters. 185 template <typename T> 186 T Max(T a, T b) { 187 return a < b ? b : a; 188 } 189 190 191 // Returns the minimum of the two parameters. 192 template <typename T> 193 T Min(T a, T b) { 194 return a < b ? a : b; 195 } 196 197 198 // Returns the absolute value of its argument. 199 template <typename T> 200 T Abs(T a) { 201 return a < 0 ? -a : a; 202 } 203 204 205 // Floor(-0.0) == 0.0 206 inline double Floor(double x) { 207 #if V8_CC_MSVC 208 if (x == 0) return x; // Fix for issue 3477. 209 #endif 210 return std::floor(x); 211 } 212 213 214 // TODO(svenpanne) Clean up the whole power-of-2 mess. 215 inline int32_t WhichPowerOf2Abs(int32_t x) { 216 return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x)); 217 } 218 219 220 // Obtains the unsigned type corresponding to T 221 // available in C++11 as std::make_unsigned 222 template<typename T> 223 struct make_unsigned { 224 typedef T type; 225 }; 226 227 228 // Template specializations necessary to have make_unsigned work 229 template<> struct make_unsigned<int32_t> { 230 typedef uint32_t type; 231 }; 232 233 234 template<> struct make_unsigned<int64_t> { 235 typedef uint64_t type; 236 }; 237 238 239 // ---------------------------------------------------------------------------- 240 // BitField is a help template for encoding and decode bitfield with 241 // unsigned content. 242 243 template<class T, int shift, int size, class U> 244 class BitFieldBase { 245 public: 246 // A type U mask of bit field. To use all bits of a type U of x bits 247 // in a bitfield without compiler warnings we have to compute 2^x 248 // without using a shift count of x in the computation. 249 static const U kOne = static_cast<U>(1U); 250 static const U kMask = ((kOne << shift) << size) - (kOne << shift); 251 static const U kShift = shift; 252 static const U kSize = size; 253 static const U kNext = kShift + kSize; 254 255 // Value for the field with all bits set. 256 static const T kMax = static_cast<T>((kOne << size) - 1); 257 258 // Tells whether the provided value fits into the bit field. 259 static bool is_valid(T value) { 260 return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0; 261 } 262 263 // Returns a type U with the bit field value encoded. 264 static U encode(T value) { 265 DCHECK(is_valid(value)); 266 return static_cast<U>(value) << shift; 267 } 268 269 // Returns a type U with the bit field value updated. 270 static U update(U previous, T value) { 271 return (previous & ~kMask) | encode(value); 272 } 273 274 // Extracts the bit field from the value. 275 static T decode(U value) { 276 return static_cast<T>((value & kMask) >> shift); 277 } 278 }; 279 280 281 template <class T, int shift, int size> 282 class BitField8 : public BitFieldBase<T, shift, size, uint8_t> {}; 283 284 285 template <class T, int shift, int size> 286 class BitField16 : public BitFieldBase<T, shift, size, uint16_t> {}; 287 288 289 template<class T, int shift, int size> 290 class BitField : public BitFieldBase<T, shift, size, uint32_t> { }; 291 292 293 template<class T, int shift, int size> 294 class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { }; 295 296 297 // ---------------------------------------------------------------------------- 298 // BitSetComputer is a help template for encoding and decoding information for 299 // a variable number of items in an array. 300 // 301 // To encode boolean data in a smi array you would use: 302 // typedef BitSetComputer<bool, 1, kSmiValueSize, uint32_t> BoolComputer; 303 // 304 template <class T, int kBitsPerItem, int kBitsPerWord, class U> 305 class BitSetComputer { 306 public: 307 static const int kItemsPerWord = kBitsPerWord / kBitsPerItem; 308 static const int kMask = (1 << kBitsPerItem) - 1; 309 310 // The number of array elements required to embed T information for each item. 311 static int word_count(int items) { 312 if (items == 0) return 0; 313 return (items - 1) / kItemsPerWord + 1; 314 } 315 316 // The array index to look at for item. 317 static int index(int base_index, int item) { 318 return base_index + item / kItemsPerWord; 319 } 320 321 // Extract T data for a given item from data. 322 static T decode(U data, int item) { 323 return static_cast<T>((data >> shift(item)) & kMask); 324 } 325 326 // Return the encoding for a store of value for item in previous. 327 static U encode(U previous, int item, T value) { 328 int shift_value = shift(item); 329 int set_bits = (static_cast<int>(value) << shift_value); 330 return (previous & ~(kMask << shift_value)) | set_bits; 331 } 332 333 static int shift(int item) { return (item % kItemsPerWord) * kBitsPerItem; } 334 }; 335 336 337 // ---------------------------------------------------------------------------- 338 // Hash function. 339 340 static const uint32_t kZeroHashSeed = 0; 341 342 // Thomas Wang, Integer Hash Functions. 343 // http://www.concentric.net/~Ttwang/tech/inthash.htm 344 inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) { 345 uint32_t hash = key; 346 hash = hash ^ seed; 347 hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1; 348 hash = hash ^ (hash >> 12); 349 hash = hash + (hash << 2); 350 hash = hash ^ (hash >> 4); 351 hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11); 352 hash = hash ^ (hash >> 16); 353 return hash & 0x3fffffff; 354 } 355 356 357 inline uint32_t ComputeLongHash(uint64_t key) { 358 uint64_t hash = key; 359 hash = ~hash + (hash << 18); // hash = (hash << 18) - hash - 1; 360 hash = hash ^ (hash >> 31); 361 hash = hash * 21; // hash = (hash + (hash << 2)) + (hash << 4); 362 hash = hash ^ (hash >> 11); 363 hash = hash + (hash << 6); 364 hash = hash ^ (hash >> 22); 365 return static_cast<uint32_t>(hash); 366 } 367 368 369 inline uint32_t ComputePointerHash(void* ptr) { 370 return ComputeIntegerHash( 371 static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)), 372 v8::internal::kZeroHashSeed); 373 } 374 375 376 // ---------------------------------------------------------------------------- 377 // Generated memcpy/memmove 378 379 // Initializes the codegen support that depends on CPU features. 380 void init_memcopy_functions(Isolate* isolate); 381 382 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87) 383 // Limit below which the extra overhead of the MemCopy function is likely 384 // to outweigh the benefits of faster copying. 385 const int kMinComplexMemCopy = 64; 386 387 // Copy memory area. No restrictions. 388 void MemMove(void* dest, const void* src, size_t size); 389 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size); 390 391 // Keep the distinction of "move" vs. "copy" for the benefit of other 392 // architectures. 393 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 394 MemMove(dest, src, size); 395 } 396 #elif defined(V8_HOST_ARCH_ARM) 397 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, 398 size_t size); 399 extern MemCopyUint8Function memcopy_uint8_function; 400 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, 401 size_t chars) { 402 memcpy(dest, src, chars); 403 } 404 // For values < 16, the assembler function is slower than the inlined C code. 405 const int kMinComplexMemCopy = 16; 406 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 407 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), 408 reinterpret_cast<const uint8_t*>(src), size); 409 } 410 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { 411 memmove(dest, src, size); 412 } 413 414 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src, 415 size_t size); 416 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function; 417 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src, 418 size_t chars); 419 // For values < 12, the assembler function is slower than the inlined C code. 420 const int kMinComplexConvertMemCopy = 12; 421 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src, 422 size_t size) { 423 (*memcopy_uint16_uint8_function)(dest, src, size); 424 } 425 #elif defined(V8_HOST_ARCH_MIPS) 426 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src, 427 size_t size); 428 extern MemCopyUint8Function memcopy_uint8_function; 429 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, 430 size_t chars) { 431 memcpy(dest, src, chars); 432 } 433 // For values < 16, the assembler function is slower than the inlined C code. 434 const int kMinComplexMemCopy = 16; 435 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 436 (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest), 437 reinterpret_cast<const uint8_t*>(src), size); 438 } 439 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { 440 memmove(dest, src, size); 441 } 442 #else 443 // Copy memory area to disjoint memory area. 444 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { 445 memcpy(dest, src, size); 446 } 447 V8_INLINE void MemMove(void* dest, const void* src, size_t size) { 448 memmove(dest, src, size); 449 } 450 const int kMinComplexMemCopy = 16 * kPointerSize; 451 #endif // V8_TARGET_ARCH_IA32 452 453 454 // ---------------------------------------------------------------------------- 455 // Miscellaneous 456 457 // A static resource holds a static instance that can be reserved in 458 // a local scope using an instance of Access. Attempts to re-reserve 459 // the instance will cause an error. 460 template <typename T> 461 class StaticResource { 462 public: 463 StaticResource() : is_reserved_(false) {} 464 465 private: 466 template <typename S> friend class Access; 467 T instance_; 468 bool is_reserved_; 469 }; 470 471 472 // Locally scoped access to a static resource. 473 template <typename T> 474 class Access { 475 public: 476 explicit Access(StaticResource<T>* resource) 477 : resource_(resource) 478 , instance_(&resource->instance_) { 479 DCHECK(!resource->is_reserved_); 480 resource->is_reserved_ = true; 481 } 482 483 ~Access() { 484 resource_->is_reserved_ = false; 485 resource_ = NULL; 486 instance_ = NULL; 487 } 488 489 T* value() { return instance_; } 490 T* operator -> () { return instance_; } 491 492 private: 493 StaticResource<T>* resource_; 494 T* instance_; 495 }; 496 497 498 // A pointer that can only be set once and doesn't allow NULL values. 499 template<typename T> 500 class SetOncePointer { 501 public: 502 SetOncePointer() : pointer_(NULL) { } 503 504 bool is_set() const { return pointer_ != NULL; } 505 506 T* get() const { 507 DCHECK(pointer_ != NULL); 508 return pointer_; 509 } 510 511 void set(T* value) { 512 DCHECK(pointer_ == NULL && value != NULL); 513 pointer_ = value; 514 } 515 516 private: 517 T* pointer_; 518 }; 519 520 521 template <typename T, int kSize> 522 class EmbeddedVector : public Vector<T> { 523 public: 524 EmbeddedVector() : Vector<T>(buffer_, kSize) { } 525 526 explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) { 527 for (int i = 0; i < kSize; ++i) { 528 buffer_[i] = initial_value; 529 } 530 } 531 532 // When copying, make underlying Vector to reference our buffer. 533 EmbeddedVector(const EmbeddedVector& rhs) 534 : Vector<T>(rhs) { 535 MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); 536 this->set_start(buffer_); 537 } 538 539 EmbeddedVector& operator=(const EmbeddedVector& rhs) { 540 if (this == &rhs) return *this; 541 Vector<T>::operator=(rhs); 542 MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize); 543 this->set_start(buffer_); 544 return *this; 545 } 546 547 private: 548 T buffer_[kSize]; 549 }; 550 551 552 /* 553 * A class that collects values into a backing store. 554 * Specialized versions of the class can allow access to the backing store 555 * in different ways. 556 * There is no guarantee that the backing store is contiguous (and, as a 557 * consequence, no guarantees that consecutively added elements are adjacent 558 * in memory). The collector may move elements unless it has guaranteed not 559 * to. 560 */ 561 template <typename T, int growth_factor = 2, int max_growth = 1 * MB> 562 class Collector { 563 public: 564 explicit Collector(int initial_capacity = kMinCapacity) 565 : index_(0), size_(0) { 566 current_chunk_ = Vector<T>::New(initial_capacity); 567 } 568 569 virtual ~Collector() { 570 // Free backing store (in reverse allocation order). 571 current_chunk_.Dispose(); 572 for (int i = chunks_.length() - 1; i >= 0; i--) { 573 chunks_.at(i).Dispose(); 574 } 575 } 576 577 // Add a single element. 578 inline void Add(T value) { 579 if (index_ >= current_chunk_.length()) { 580 Grow(1); 581 } 582 current_chunk_[index_] = value; 583 index_++; 584 size_++; 585 } 586 587 // Add a block of contiguous elements and return a Vector backed by the 588 // memory area. 589 // A basic Collector will keep this vector valid as long as the Collector 590 // is alive. 591 inline Vector<T> AddBlock(int size, T initial_value) { 592 DCHECK(size > 0); 593 if (size > current_chunk_.length() - index_) { 594 Grow(size); 595 } 596 T* position = current_chunk_.start() + index_; 597 index_ += size; 598 size_ += size; 599 for (int i = 0; i < size; i++) { 600 position[i] = initial_value; 601 } 602 return Vector<T>(position, size); 603 } 604 605 606 // Add a contiguous block of elements and return a vector backed 607 // by the added block. 608 // A basic Collector will keep this vector valid as long as the Collector 609 // is alive. 610 inline Vector<T> AddBlock(Vector<const T> source) { 611 if (source.length() > current_chunk_.length() - index_) { 612 Grow(source.length()); 613 } 614 T* position = current_chunk_.start() + index_; 615 index_ += source.length(); 616 size_ += source.length(); 617 for (int i = 0; i < source.length(); i++) { 618 position[i] = source[i]; 619 } 620 return Vector<T>(position, source.length()); 621 } 622 623 624 // Write the contents of the collector into the provided vector. 625 void WriteTo(Vector<T> destination) { 626 DCHECK(size_ <= destination.length()); 627 int position = 0; 628 for (int i = 0; i < chunks_.length(); i++) { 629 Vector<T> chunk = chunks_.at(i); 630 for (int j = 0; j < chunk.length(); j++) { 631 destination[position] = chunk[j]; 632 position++; 633 } 634 } 635 for (int i = 0; i < index_; i++) { 636 destination[position] = current_chunk_[i]; 637 position++; 638 } 639 } 640 641 // Allocate a single contiguous vector, copy all the collected 642 // elements to the vector, and return it. 643 // The caller is responsible for freeing the memory of the returned 644 // vector (e.g., using Vector::Dispose). 645 Vector<T> ToVector() { 646 Vector<T> new_store = Vector<T>::New(size_); 647 WriteTo(new_store); 648 return new_store; 649 } 650 651 // Resets the collector to be empty. 652 virtual void Reset() { 653 for (int i = chunks_.length() - 1; i >= 0; i--) { 654 chunks_.at(i).Dispose(); 655 } 656 chunks_.Rewind(0); 657 index_ = 0; 658 size_ = 0; 659 } 660 661 // Total number of elements added to collector so far. 662 inline int size() { return size_; } 663 664 protected: 665 static const int kMinCapacity = 16; 666 List<Vector<T> > chunks_; 667 Vector<T> current_chunk_; // Block of memory currently being written into. 668 int index_; // Current index in current chunk. 669 int size_; // Total number of elements in collector. 670 671 // Creates a new current chunk, and stores the old chunk in the chunks_ list. 672 void Grow(int min_capacity) { 673 DCHECK(growth_factor > 1); 674 int new_capacity; 675 int current_length = current_chunk_.length(); 676 if (current_length < kMinCapacity) { 677 // The collector started out as empty. 678 new_capacity = min_capacity * growth_factor; 679 if (new_capacity < kMinCapacity) new_capacity = kMinCapacity; 680 } else { 681 int growth = current_length * (growth_factor - 1); 682 if (growth > max_growth) { 683 growth = max_growth; 684 } 685 new_capacity = current_length + growth; 686 if (new_capacity < min_capacity) { 687 new_capacity = min_capacity + growth; 688 } 689 } 690 NewChunk(new_capacity); 691 DCHECK(index_ + min_capacity <= current_chunk_.length()); 692 } 693 694 // Before replacing the current chunk, give a subclass the option to move 695 // some of the current data into the new chunk. The function may update 696 // the current index_ value to represent data no longer in the current chunk. 697 // Returns the initial index of the new chunk (after copied data). 698 virtual void NewChunk(int new_capacity) { 699 Vector<T> new_chunk = Vector<T>::New(new_capacity); 700 if (index_ > 0) { 701 chunks_.Add(current_chunk_.SubVector(0, index_)); 702 } else { 703 current_chunk_.Dispose(); 704 } 705 current_chunk_ = new_chunk; 706 index_ = 0; 707 } 708 }; 709 710 711 /* 712 * A collector that allows sequences of values to be guaranteed to 713 * stay consecutive. 714 * If the backing store grows while a sequence is active, the current 715 * sequence might be moved, but after the sequence is ended, it will 716 * not move again. 717 * NOTICE: Blocks allocated using Collector::AddBlock(int) can move 718 * as well, if inside an active sequence where another element is added. 719 */ 720 template <typename T, int growth_factor = 2, int max_growth = 1 * MB> 721 class SequenceCollector : public Collector<T, growth_factor, max_growth> { 722 public: 723 explicit SequenceCollector(int initial_capacity) 724 : Collector<T, growth_factor, max_growth>(initial_capacity), 725 sequence_start_(kNoSequence) { } 726 727 virtual ~SequenceCollector() {} 728 729 void StartSequence() { 730 DCHECK(sequence_start_ == kNoSequence); 731 sequence_start_ = this->index_; 732 } 733 734 Vector<T> EndSequence() { 735 DCHECK(sequence_start_ != kNoSequence); 736 int sequence_start = sequence_start_; 737 sequence_start_ = kNoSequence; 738 if (sequence_start == this->index_) return Vector<T>(); 739 return this->current_chunk_.SubVector(sequence_start, this->index_); 740 } 741 742 // Drops the currently added sequence, and all collected elements in it. 743 void DropSequence() { 744 DCHECK(sequence_start_ != kNoSequence); 745 int sequence_length = this->index_ - sequence_start_; 746 this->index_ = sequence_start_; 747 this->size_ -= sequence_length; 748 sequence_start_ = kNoSequence; 749 } 750 751 virtual void Reset() { 752 sequence_start_ = kNoSequence; 753 this->Collector<T, growth_factor, max_growth>::Reset(); 754 } 755 756 private: 757 static const int kNoSequence = -1; 758 int sequence_start_; 759 760 // Move the currently active sequence to the new chunk. 761 virtual void NewChunk(int new_capacity) { 762 if (sequence_start_ == kNoSequence) { 763 // Fall back on default behavior if no sequence has been started. 764 this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity); 765 return; 766 } 767 int sequence_length = this->index_ - sequence_start_; 768 Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity); 769 DCHECK(sequence_length < new_chunk.length()); 770 for (int i = 0; i < sequence_length; i++) { 771 new_chunk[i] = this->current_chunk_[sequence_start_ + i]; 772 } 773 if (sequence_start_ > 0) { 774 this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_)); 775 } else { 776 this->current_chunk_.Dispose(); 777 } 778 this->current_chunk_ = new_chunk; 779 this->index_ = sequence_length; 780 sequence_start_ = 0; 781 } 782 }; 783 784 785 // Compare 8bit/16bit chars to 8bit/16bit chars. 786 template <typename lchar, typename rchar> 787 inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs, 788 size_t chars) { 789 const lchar* limit = lhs + chars; 790 if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) { 791 // memcmp compares byte-by-byte, yielding wrong results for two-byte 792 // strings on little-endian systems. 793 return memcmp(lhs, rhs, chars); 794 } 795 while (lhs < limit) { 796 int r = static_cast<int>(*lhs) - static_cast<int>(*rhs); 797 if (r != 0) return r; 798 ++lhs; 799 ++rhs; 800 } 801 return 0; 802 } 803 804 template <typename lchar, typename rchar> 805 inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) { 806 DCHECK(sizeof(lchar) <= 2); 807 DCHECK(sizeof(rchar) <= 2); 808 if (sizeof(lchar) == 1) { 809 if (sizeof(rchar) == 1) { 810 return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), 811 reinterpret_cast<const uint8_t*>(rhs), 812 chars); 813 } else { 814 return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), 815 reinterpret_cast<const uint16_t*>(rhs), 816 chars); 817 } 818 } else { 819 if (sizeof(rchar) == 1) { 820 return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs), 821 reinterpret_cast<const uint8_t*>(rhs), 822 chars); 823 } else { 824 return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs), 825 reinterpret_cast<const uint16_t*>(rhs), 826 chars); 827 } 828 } 829 } 830 831 832 // Calculate 10^exponent. 833 inline int TenToThe(int exponent) { 834 DCHECK(exponent <= 9); 835 DCHECK(exponent >= 1); 836 int answer = 10; 837 for (int i = 1; i < exponent; i++) answer *= 10; 838 return answer; 839 } 840 841 842 template<typename ElementType, int NumElements> 843 class EmbeddedContainer { 844 public: 845 EmbeddedContainer() : elems_() { } 846 847 int length() const { return NumElements; } 848 const ElementType& operator[](int i) const { 849 DCHECK(i < length()); 850 return elems_[i]; 851 } 852 ElementType& operator[](int i) { 853 DCHECK(i < length()); 854 return elems_[i]; 855 } 856 857 private: 858 ElementType elems_[NumElements]; 859 }; 860 861 862 template<typename ElementType> 863 class EmbeddedContainer<ElementType, 0> { 864 public: 865 int length() const { return 0; } 866 const ElementType& operator[](int i) const { 867 UNREACHABLE(); 868 static ElementType t = 0; 869 return t; 870 } 871 ElementType& operator[](int i) { 872 UNREACHABLE(); 873 static ElementType t = 0; 874 return t; 875 } 876 }; 877 878 879 // Helper class for building result strings in a character buffer. The 880 // purpose of the class is to use safe operations that checks the 881 // buffer bounds on all operations in debug mode. 882 // This simple base class does not allow formatted output. 883 class SimpleStringBuilder { 884 public: 885 // Create a string builder with a buffer of the given size. The 886 // buffer is allocated through NewArray<char> and must be 887 // deallocated by the caller of Finalize(). 888 explicit SimpleStringBuilder(int size); 889 890 SimpleStringBuilder(char* buffer, int size) 891 : buffer_(buffer, size), position_(0) { } 892 893 ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); } 894 895 int size() const { return buffer_.length(); } 896 897 // Get the current position in the builder. 898 int position() const { 899 DCHECK(!is_finalized()); 900 return position_; 901 } 902 903 // Reset the position. 904 void Reset() { position_ = 0; } 905 906 // Add a single character to the builder. It is not allowed to add 907 // 0-characters; use the Finalize() method to terminate the string 908 // instead. 909 void AddCharacter(char c) { 910 DCHECK(c != '\0'); 911 DCHECK(!is_finalized() && position_ < buffer_.length()); 912 buffer_[position_++] = c; 913 } 914 915 // Add an entire string to the builder. Uses strlen() internally to 916 // compute the length of the input string. 917 void AddString(const char* s); 918 919 // Add the first 'n' characters of the given 0-terminated string 's' to the 920 // builder. The input string must have enough characters. 921 void AddSubstring(const char* s, int n); 922 923 // Add character padding to the builder. If count is non-positive, 924 // nothing is added to the builder. 925 void AddPadding(char c, int count); 926 927 // Add the decimal representation of the value. 928 void AddDecimalInteger(int value); 929 930 // Finalize the string by 0-terminating it and returning the buffer. 931 char* Finalize(); 932 933 protected: 934 Vector<char> buffer_; 935 int position_; 936 937 bool is_finalized() const { return position_ < 0; } 938 939 private: 940 DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder); 941 }; 942 943 944 // A poor man's version of STL's bitset: A bit set of enums E (without explicit 945 // values), fitting into an integral type T. 946 template <class E, class T = int> 947 class EnumSet { 948 public: 949 explicit EnumSet(T bits = 0) : bits_(bits) {} 950 bool IsEmpty() const { return bits_ == 0; } 951 bool Contains(E element) const { return (bits_ & Mask(element)) != 0; } 952 bool ContainsAnyOf(const EnumSet& set) const { 953 return (bits_ & set.bits_) != 0; 954 } 955 void Add(E element) { bits_ |= Mask(element); } 956 void Add(const EnumSet& set) { bits_ |= set.bits_; } 957 void Remove(E element) { bits_ &= ~Mask(element); } 958 void Remove(const EnumSet& set) { bits_ &= ~set.bits_; } 959 void RemoveAll() { bits_ = 0; } 960 void Intersect(const EnumSet& set) { bits_ &= set.bits_; } 961 T ToIntegral() const { return bits_; } 962 bool operator==(const EnumSet& set) { return bits_ == set.bits_; } 963 bool operator!=(const EnumSet& set) { return bits_ != set.bits_; } 964 EnumSet<E, T> operator|(const EnumSet& set) const { 965 return EnumSet<E, T>(bits_ | set.bits_); 966 } 967 968 private: 969 T Mask(E element) const { 970 // The strange typing in DCHECK is necessary to avoid stupid warnings, see: 971 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680 972 DCHECK(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT)); 973 return static_cast<T>(1) << element; 974 } 975 976 T bits_; 977 }; 978 979 // Bit field extraction. 980 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { 981 return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); 982 } 983 984 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { 985 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1); 986 } 987 988 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { 989 return (x << (31 - msb)) >> (lsb + 31 - msb); 990 } 991 992 inline int signed_bitextract_64(int msb, int lsb, int x) { 993 // TODO(jbramley): This is broken for big bitfields. 994 return (x << (63 - msb)) >> (lsb + 63 - msb); 995 } 996 997 // Check number width. 998 inline bool is_intn(int64_t x, unsigned n) { 999 DCHECK((0 < n) && (n < 64)); 1000 int64_t limit = static_cast<int64_t>(1) << (n - 1); 1001 return (-limit <= x) && (x < limit); 1002 } 1003 1004 inline bool is_uintn(int64_t x, unsigned n) { 1005 DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte))); 1006 return !(x >> n); 1007 } 1008 1009 template <class T> 1010 inline T truncate_to_intn(T x, unsigned n) { 1011 DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte))); 1012 return (x & ((static_cast<T>(1) << n) - 1)); 1013 } 1014 1015 #define INT_1_TO_63_LIST(V) \ 1016 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ 1017 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ 1018 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ 1019 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ 1020 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ 1021 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ 1022 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ 1023 V(57) V(58) V(59) V(60) V(61) V(62) V(63) 1024 1025 #define DECLARE_IS_INT_N(N) \ 1026 inline bool is_int##N(int64_t x) { return is_intn(x, N); } 1027 #define DECLARE_IS_UINT_N(N) \ 1028 template <class T> \ 1029 inline bool is_uint##N(T x) { return is_uintn(x, N); } 1030 #define DECLARE_TRUNCATE_TO_INT_N(N) \ 1031 template <class T> \ 1032 inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); } 1033 INT_1_TO_63_LIST(DECLARE_IS_INT_N) 1034 INT_1_TO_63_LIST(DECLARE_IS_UINT_N) 1035 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) 1036 #undef DECLARE_IS_INT_N 1037 #undef DECLARE_IS_UINT_N 1038 #undef DECLARE_TRUNCATE_TO_INT_N 1039 1040 class TypeFeedbackId { 1041 public: 1042 explicit TypeFeedbackId(int id) : id_(id) { } 1043 int ToInt() const { return id_; } 1044 1045 static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); } 1046 bool IsNone() const { return id_ == kNoneId; } 1047 1048 private: 1049 static const int kNoneId = -1; 1050 1051 int id_; 1052 }; 1053 1054 inline bool operator<(TypeFeedbackId lhs, TypeFeedbackId rhs) { 1055 return lhs.ToInt() < rhs.ToInt(); 1056 } 1057 inline bool operator>(TypeFeedbackId lhs, TypeFeedbackId rhs) { 1058 return lhs.ToInt() > rhs.ToInt(); 1059 } 1060 1061 1062 class FeedbackVectorSlot { 1063 public: 1064 FeedbackVectorSlot() : id_(kInvalidSlot) {} 1065 explicit FeedbackVectorSlot(int id) : id_(id) {} 1066 1067 int ToInt() const { return id_; } 1068 1069 static FeedbackVectorSlot Invalid() { return FeedbackVectorSlot(); } 1070 bool IsInvalid() const { return id_ == kInvalidSlot; } 1071 1072 bool operator==(FeedbackVectorSlot that) const { 1073 return this->id_ == that.id_; 1074 } 1075 bool operator!=(FeedbackVectorSlot that) const { return !(*this == that); } 1076 1077 friend size_t hash_value(FeedbackVectorSlot slot) { return slot.ToInt(); } 1078 friend std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot); 1079 1080 private: 1081 static const int kInvalidSlot = -1; 1082 1083 int id_; 1084 }; 1085 1086 1087 class BailoutId { 1088 public: 1089 explicit BailoutId(int id) : id_(id) { } 1090 int ToInt() const { return id_; } 1091 1092 static BailoutId None() { return BailoutId(kNoneId); } 1093 static BailoutId ScriptContext() { return BailoutId(kScriptContextId); } 1094 static BailoutId FunctionContext() { return BailoutId(kFunctionContextId); } 1095 static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); } 1096 static BailoutId Declarations() { return BailoutId(kDeclarationsId); } 1097 static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); } 1098 static BailoutId StubEntry() { return BailoutId(kStubEntryId); } 1099 1100 bool IsNone() const { return id_ == kNoneId; } 1101 bool operator==(const BailoutId& other) const { return id_ == other.id_; } 1102 bool operator!=(const BailoutId& other) const { return id_ != other.id_; } 1103 friend size_t hash_value(BailoutId); 1104 friend std::ostream& operator<<(std::ostream&, BailoutId); 1105 1106 private: 1107 static const int kNoneId = -1; 1108 1109 // Using 0 could disguise errors. 1110 static const int kScriptContextId = 1; 1111 static const int kFunctionContextId = 2; 1112 static const int kFunctionEntryId = 3; 1113 1114 // This AST id identifies the point after the declarations have been visited. 1115 // We need it to capture the environment effects of declarations that emit 1116 // code (function declarations). 1117 static const int kDeclarationsId = 4; 1118 1119 // Every FunctionState starts with this id. 1120 static const int kFirstUsableId = 5; 1121 1122 // Every compiled stub starts with this id. 1123 static const int kStubEntryId = 6; 1124 1125 int id_; 1126 }; 1127 1128 1129 // ---------------------------------------------------------------------------- 1130 // I/O support. 1131 1132 #if __GNUC__ >= 4 1133 // On gcc we can ask the compiler to check the types of %d-style format 1134 // specifiers and their associated arguments. TODO(erikcorry) fix this 1135 // so it works on MacOSX. 1136 #if defined(__MACH__) && defined(__APPLE__) 1137 #define PRINTF_CHECKING 1138 #define FPRINTF_CHECKING 1139 #define PRINTF_METHOD_CHECKING 1140 #define FPRINTF_METHOD_CHECKING 1141 #else // MacOsX. 1142 #define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2))) 1143 #define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3))) 1144 #define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3))) 1145 #define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4))) 1146 #endif 1147 #else 1148 #define PRINTF_CHECKING 1149 #define FPRINTF_CHECKING 1150 #define PRINTF_METHOD_CHECKING 1151 #define FPRINTF_METHOD_CHECKING 1152 #endif 1153 1154 // Our version of printf(). 1155 void PRINTF_CHECKING PrintF(const char* format, ...); 1156 void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...); 1157 1158 // Prepends the current process ID to the output. 1159 void PRINTF_CHECKING PrintPID(const char* format, ...); 1160 1161 // Prepends the current process ID and given isolate pointer to the output. 1162 void PrintIsolate(void* isolate, const char* format, ...); 1163 1164 // Safe formatting print. Ensures that str is always null-terminated. 1165 // Returns the number of chars written, or -1 if output was truncated. 1166 int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...); 1167 int VSNPrintF(Vector<char> str, const char* format, va_list args); 1168 1169 void StrNCpy(Vector<char> dest, const char* src, size_t n); 1170 1171 // Our version of fflush. 1172 void Flush(FILE* out); 1173 1174 inline void Flush() { 1175 Flush(stdout); 1176 } 1177 1178 1179 // Read a line of characters after printing the prompt to stdout. The resulting 1180 // char* needs to be disposed off with DeleteArray by the caller. 1181 char* ReadLine(const char* prompt); 1182 1183 1184 // Read and return the raw bytes in a file. the size of the buffer is returned 1185 // in size. 1186 // The returned buffer must be freed by the caller. 1187 byte* ReadBytes(const char* filename, int* size, bool verbose = true); 1188 1189 1190 // Append size chars from str to the file given by filename. 1191 // The file is overwritten. Returns the number of chars written. 1192 int AppendChars(const char* filename, 1193 const char* str, 1194 int size, 1195 bool verbose = true); 1196 1197 1198 // Write size chars from str to the file given by filename. 1199 // The file is overwritten. Returns the number of chars written. 1200 int WriteChars(const char* filename, 1201 const char* str, 1202 int size, 1203 bool verbose = true); 1204 1205 1206 // Write size bytes to the file given by filename. 1207 // The file is overwritten. Returns the number of bytes written. 1208 int WriteBytes(const char* filename, 1209 const byte* bytes, 1210 int size, 1211 bool verbose = true); 1212 1213 1214 // Write the C code 1215 // const char* <varname> = "<str>"; 1216 // const int <varname>_len = <len>; 1217 // to the file given by filename. Only the first len chars are written. 1218 int WriteAsCFile(const char* filename, const char* varname, 1219 const char* str, int size, bool verbose = true); 1220 1221 1222 // ---------------------------------------------------------------------------- 1223 // Memory 1224 1225 // Copies words from |src| to |dst|. The data spans must not overlap. 1226 template <typename T> 1227 inline void CopyWords(T* dst, const T* src, size_t num_words) { 1228 STATIC_ASSERT(sizeof(T) == kPointerSize); 1229 // TODO(mvstanton): disabled because mac builds are bogus failing on this 1230 // assert. They are doing a signed comparison. Investigate in 1231 // the morning. 1232 // DCHECK(Min(dst, const_cast<T*>(src)) + num_words <= 1233 // Max(dst, const_cast<T*>(src))); 1234 DCHECK(num_words > 0); 1235 1236 // Use block copying MemCopy if the segment we're copying is 1237 // enough to justify the extra call/setup overhead. 1238 static const size_t kBlockCopyLimit = 16; 1239 1240 if (num_words < kBlockCopyLimit) { 1241 do { 1242 num_words--; 1243 *dst++ = *src++; 1244 } while (num_words > 0); 1245 } else { 1246 MemCopy(dst, src, num_words * kPointerSize); 1247 } 1248 } 1249 1250 1251 // Copies words from |src| to |dst|. No restrictions. 1252 template <typename T> 1253 inline void MoveWords(T* dst, const T* src, size_t num_words) { 1254 STATIC_ASSERT(sizeof(T) == kPointerSize); 1255 DCHECK(num_words > 0); 1256 1257 // Use block copying MemCopy if the segment we're copying is 1258 // enough to justify the extra call/setup overhead. 1259 static const size_t kBlockCopyLimit = 16; 1260 1261 if (num_words < kBlockCopyLimit && 1262 ((dst < src) || (dst >= (src + num_words * kPointerSize)))) { 1263 T* end = dst + num_words; 1264 do { 1265 num_words--; 1266 *dst++ = *src++; 1267 } while (num_words > 0); 1268 } else { 1269 MemMove(dst, src, num_words * kPointerSize); 1270 } 1271 } 1272 1273 1274 // Copies data from |src| to |dst|. The data spans must not overlap. 1275 template <typename T> 1276 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { 1277 STATIC_ASSERT(sizeof(T) == 1); 1278 DCHECK(Min(dst, const_cast<T*>(src)) + num_bytes <= 1279 Max(dst, const_cast<T*>(src))); 1280 if (num_bytes == 0) return; 1281 1282 // Use block copying MemCopy if the segment we're copying is 1283 // enough to justify the extra call/setup overhead. 1284 static const int kBlockCopyLimit = kMinComplexMemCopy; 1285 1286 if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) { 1287 do { 1288 num_bytes--; 1289 *dst++ = *src++; 1290 } while (num_bytes > 0); 1291 } else { 1292 MemCopy(dst, src, num_bytes); 1293 } 1294 } 1295 1296 1297 template <typename T, typename U> 1298 inline void MemsetPointer(T** dest, U* value, int counter) { 1299 #ifdef DEBUG 1300 T* a = NULL; 1301 U* b = NULL; 1302 a = b; // Fake assignment to check assignability. 1303 USE(a); 1304 #endif // DEBUG 1305 #if V8_HOST_ARCH_IA32 1306 #define STOS "stosl" 1307 #elif V8_HOST_ARCH_X64 1308 #if V8_HOST_ARCH_32_BIT 1309 #define STOS "addr32 stosl" 1310 #else 1311 #define STOS "stosq" 1312 #endif 1313 #endif 1314 #if defined(__native_client__) 1315 // This STOS sequence does not validate for x86_64 Native Client. 1316 // Here we #undef STOS to force use of the slower C version. 1317 // TODO(bradchen): Profile V8 and implement a faster REP STOS 1318 // here if the profile indicates it matters. 1319 #undef STOS 1320 #endif 1321 1322 #if defined(MEMORY_SANITIZER) 1323 // MemorySanitizer does not understand inline assembly. 1324 #undef STOS 1325 #endif 1326 1327 #if defined(__GNUC__) && defined(STOS) 1328 asm volatile( 1329 "cld;" 1330 "rep ; " STOS 1331 : "+&c" (counter), "+&D" (dest) 1332 : "a" (value) 1333 : "memory", "cc"); 1334 #else 1335 for (int i = 0; i < counter; i++) { 1336 dest[i] = value; 1337 } 1338 #endif 1339 1340 #undef STOS 1341 } 1342 1343 1344 // Simple support to read a file into a 0-terminated C-string. 1345 // The returned buffer must be freed by the caller. 1346 // On return, *exits tells whether the file existed. 1347 Vector<const char> ReadFile(const char* filename, 1348 bool* exists, 1349 bool verbose = true); 1350 Vector<const char> ReadFile(FILE* file, 1351 bool* exists, 1352 bool verbose = true); 1353 1354 1355 template <typename sourcechar, typename sinkchar> 1356 INLINE(static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, 1357 size_t chars)); 1358 #if defined(V8_HOST_ARCH_ARM) 1359 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars)); 1360 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, 1361 size_t chars)); 1362 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, 1363 size_t chars)); 1364 #elif defined(V8_HOST_ARCH_MIPS) 1365 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars)); 1366 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, 1367 size_t chars)); 1368 #elif defined(V8_HOST_ARCH_PPC) 1369 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars)); 1370 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, 1371 size_t chars)); 1372 #endif 1373 1374 // Copy from 8bit/16bit chars to 8bit/16bit chars. 1375 template <typename sourcechar, typename sinkchar> 1376 INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars)); 1377 1378 template <typename sourcechar, typename sinkchar> 1379 void CopyChars(sinkchar* dest, const sourcechar* src, size_t chars) { 1380 DCHECK(sizeof(sourcechar) <= 2); 1381 DCHECK(sizeof(sinkchar) <= 2); 1382 if (sizeof(sinkchar) == 1) { 1383 if (sizeof(sourcechar) == 1) { 1384 CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), 1385 reinterpret_cast<const uint8_t*>(src), 1386 chars); 1387 } else { 1388 CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), 1389 reinterpret_cast<const uint16_t*>(src), 1390 chars); 1391 } 1392 } else { 1393 if (sizeof(sourcechar) == 1) { 1394 CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), 1395 reinterpret_cast<const uint8_t*>(src), 1396 chars); 1397 } else { 1398 CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), 1399 reinterpret_cast<const uint16_t*>(src), 1400 chars); 1401 } 1402 } 1403 } 1404 1405 template <typename sourcechar, typename sinkchar> 1406 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, size_t chars) { 1407 sinkchar* limit = dest + chars; 1408 if ((sizeof(*dest) == sizeof(*src)) && 1409 (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) { 1410 MemCopy(dest, src, chars * sizeof(*dest)); 1411 } else { 1412 while (dest < limit) *dest++ = static_cast<sinkchar>(*src++); 1413 } 1414 } 1415 1416 1417 #if defined(V8_HOST_ARCH_ARM) 1418 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { 1419 switch (static_cast<unsigned>(chars)) { 1420 case 0: 1421 break; 1422 case 1: 1423 *dest = *src; 1424 break; 1425 case 2: 1426 memcpy(dest, src, 2); 1427 break; 1428 case 3: 1429 memcpy(dest, src, 3); 1430 break; 1431 case 4: 1432 memcpy(dest, src, 4); 1433 break; 1434 case 5: 1435 memcpy(dest, src, 5); 1436 break; 1437 case 6: 1438 memcpy(dest, src, 6); 1439 break; 1440 case 7: 1441 memcpy(dest, src, 7); 1442 break; 1443 case 8: 1444 memcpy(dest, src, 8); 1445 break; 1446 case 9: 1447 memcpy(dest, src, 9); 1448 break; 1449 case 10: 1450 memcpy(dest, src, 10); 1451 break; 1452 case 11: 1453 memcpy(dest, src, 11); 1454 break; 1455 case 12: 1456 memcpy(dest, src, 12); 1457 break; 1458 case 13: 1459 memcpy(dest, src, 13); 1460 break; 1461 case 14: 1462 memcpy(dest, src, 14); 1463 break; 1464 case 15: 1465 memcpy(dest, src, 15); 1466 break; 1467 default: 1468 MemCopy(dest, src, chars); 1469 break; 1470 } 1471 } 1472 1473 1474 void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, size_t chars) { 1475 if (chars >= static_cast<size_t>(kMinComplexConvertMemCopy)) { 1476 MemCopyUint16Uint8(dest, src, chars); 1477 } else { 1478 MemCopyUint16Uint8Wrapper(dest, src, chars); 1479 } 1480 } 1481 1482 1483 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { 1484 switch (static_cast<unsigned>(chars)) { 1485 case 0: 1486 break; 1487 case 1: 1488 *dest = *src; 1489 break; 1490 case 2: 1491 memcpy(dest, src, 4); 1492 break; 1493 case 3: 1494 memcpy(dest, src, 6); 1495 break; 1496 case 4: 1497 memcpy(dest, src, 8); 1498 break; 1499 case 5: 1500 memcpy(dest, src, 10); 1501 break; 1502 case 6: 1503 memcpy(dest, src, 12); 1504 break; 1505 case 7: 1506 memcpy(dest, src, 14); 1507 break; 1508 default: 1509 MemCopy(dest, src, chars * sizeof(*dest)); 1510 break; 1511 } 1512 } 1513 1514 1515 #elif defined(V8_HOST_ARCH_MIPS) 1516 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { 1517 if (chars < kMinComplexMemCopy) { 1518 memcpy(dest, src, chars); 1519 } else { 1520 MemCopy(dest, src, chars); 1521 } 1522 } 1523 1524 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { 1525 if (chars < kMinComplexMemCopy) { 1526 memcpy(dest, src, chars * sizeof(*dest)); 1527 } else { 1528 MemCopy(dest, src, chars * sizeof(*dest)); 1529 } 1530 } 1531 #elif defined(V8_HOST_ARCH_PPC) 1532 #define CASE(n) \ 1533 case n: \ 1534 memcpy(dest, src, n); \ 1535 break 1536 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, size_t chars) { 1537 switch (static_cast<unsigned>(chars)) { 1538 case 0: 1539 break; 1540 case 1: 1541 *dest = *src; 1542 break; 1543 CASE(2); 1544 CASE(3); 1545 CASE(4); 1546 CASE(5); 1547 CASE(6); 1548 CASE(7); 1549 CASE(8); 1550 CASE(9); 1551 CASE(10); 1552 CASE(11); 1553 CASE(12); 1554 CASE(13); 1555 CASE(14); 1556 CASE(15); 1557 CASE(16); 1558 CASE(17); 1559 CASE(18); 1560 CASE(19); 1561 CASE(20); 1562 CASE(21); 1563 CASE(22); 1564 CASE(23); 1565 CASE(24); 1566 CASE(25); 1567 CASE(26); 1568 CASE(27); 1569 CASE(28); 1570 CASE(29); 1571 CASE(30); 1572 CASE(31); 1573 CASE(32); 1574 CASE(33); 1575 CASE(34); 1576 CASE(35); 1577 CASE(36); 1578 CASE(37); 1579 CASE(38); 1580 CASE(39); 1581 CASE(40); 1582 CASE(41); 1583 CASE(42); 1584 CASE(43); 1585 CASE(44); 1586 CASE(45); 1587 CASE(46); 1588 CASE(47); 1589 CASE(48); 1590 CASE(49); 1591 CASE(50); 1592 CASE(51); 1593 CASE(52); 1594 CASE(53); 1595 CASE(54); 1596 CASE(55); 1597 CASE(56); 1598 CASE(57); 1599 CASE(58); 1600 CASE(59); 1601 CASE(60); 1602 CASE(61); 1603 CASE(62); 1604 CASE(63); 1605 CASE(64); 1606 default: 1607 memcpy(dest, src, chars); 1608 break; 1609 } 1610 } 1611 #undef CASE 1612 1613 #define CASE(n) \ 1614 case n: \ 1615 memcpy(dest, src, n * 2); \ 1616 break 1617 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) { 1618 switch (static_cast<unsigned>(chars)) { 1619 case 0: 1620 break; 1621 case 1: 1622 *dest = *src; 1623 break; 1624 CASE(2); 1625 CASE(3); 1626 CASE(4); 1627 CASE(5); 1628 CASE(6); 1629 CASE(7); 1630 CASE(8); 1631 CASE(9); 1632 CASE(10); 1633 CASE(11); 1634 CASE(12); 1635 CASE(13); 1636 CASE(14); 1637 CASE(15); 1638 CASE(16); 1639 CASE(17); 1640 CASE(18); 1641 CASE(19); 1642 CASE(20); 1643 CASE(21); 1644 CASE(22); 1645 CASE(23); 1646 CASE(24); 1647 CASE(25); 1648 CASE(26); 1649 CASE(27); 1650 CASE(28); 1651 CASE(29); 1652 CASE(30); 1653 CASE(31); 1654 CASE(32); 1655 default: 1656 memcpy(dest, src, chars * 2); 1657 break; 1658 } 1659 } 1660 #undef CASE 1661 #endif 1662 1663 1664 class StringBuilder : public SimpleStringBuilder { 1665 public: 1666 explicit StringBuilder(int size) : SimpleStringBuilder(size) { } 1667 StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { } 1668 1669 // Add formatted contents to the builder just like printf(). 1670 void AddFormatted(const char* format, ...); 1671 1672 // Add formatted contents like printf based on a va_list. 1673 void AddFormattedList(const char* format, va_list list); 1674 private: 1675 DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder); 1676 }; 1677 1678 1679 bool DoubleToBoolean(double d); 1680 1681 template <typename Stream> 1682 bool StringToArrayIndex(Stream* stream, uint32_t* index) { 1683 uint16_t ch = stream->GetNext(); 1684 1685 // If the string begins with a '0' character, it must only consist 1686 // of it to be a legal array index. 1687 if (ch == '0') { 1688 *index = 0; 1689 return !stream->HasMore(); 1690 } 1691 1692 // Convert string to uint32 array index; character by character. 1693 int d = ch - '0'; 1694 if (d < 0 || d > 9) return false; 1695 uint32_t result = d; 1696 while (stream->HasMore()) { 1697 d = stream->GetNext() - '0'; 1698 if (d < 0 || d > 9) return false; 1699 // Check that the new result is below the 32 bit limit. 1700 if (result > 429496729U - ((d + 3) >> 3)) return false; 1701 result = (result * 10) + d; 1702 } 1703 1704 *index = result; 1705 return true; 1706 } 1707 1708 1709 // Returns current value of top of the stack. Works correctly with ASAN. 1710 DISABLE_ASAN 1711 inline uintptr_t GetCurrentStackPosition() { 1712 // Takes the address of the limit variable in order to find out where 1713 // the top of stack is right now. 1714 uintptr_t limit = reinterpret_cast<uintptr_t>(&limit); 1715 return limit; 1716 } 1717 1718 static inline double ReadDoubleValue(const void* p) { 1719 #ifndef V8_TARGET_ARCH_MIPS 1720 return *reinterpret_cast<const double*>(p); 1721 #else // V8_TARGET_ARCH_MIPS 1722 // Prevent compiler from using load-double (mips ldc1) on (possibly) 1723 // non-64-bit aligned address. 1724 union conversion { 1725 double d; 1726 uint32_t u[2]; 1727 } c; 1728 const uint32_t* ptr = reinterpret_cast<const uint32_t*>(p); 1729 c.u[0] = *ptr; 1730 c.u[1] = *(ptr + 1); 1731 return c.d; 1732 #endif // V8_TARGET_ARCH_MIPS 1733 } 1734 1735 1736 static inline void WriteDoubleValue(void* p, double value) { 1737 #ifndef V8_TARGET_ARCH_MIPS 1738 *(reinterpret_cast<double*>(p)) = value; 1739 #else // V8_TARGET_ARCH_MIPS 1740 // Prevent compiler from using load-double (mips sdc1) on (possibly) 1741 // non-64-bit aligned address. 1742 union conversion { 1743 double d; 1744 uint32_t u[2]; 1745 } c; 1746 c.d = value; 1747 uint32_t* ptr = reinterpret_cast<uint32_t*>(p); 1748 *ptr = c.u[0]; 1749 *(ptr + 1) = c.u[1]; 1750 #endif // V8_TARGET_ARCH_MIPS 1751 } 1752 1753 1754 static inline uint16_t ReadUnalignedUInt16(const void* p) { 1755 #if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64) 1756 return *reinterpret_cast<const uint16_t*>(p); 1757 #else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 1758 // Prevent compiler from using load-half (mips lh) on (possibly) 1759 // non-16-bit aligned address. 1760 union conversion { 1761 uint16_t h; 1762 uint8_t b[2]; 1763 } c; 1764 const uint8_t* ptr = reinterpret_cast<const uint8_t*>(p); 1765 c.b[0] = *ptr; 1766 c.b[1] = *(ptr + 1); 1767 return c.h; 1768 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 1769 } 1770 1771 1772 static inline void WriteUnalignedUInt16(void* p, uint16_t value) { 1773 #if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64) 1774 *(reinterpret_cast<uint16_t*>(p)) = value; 1775 #else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 1776 // Prevent compiler from using store-half (mips sh) on (possibly) 1777 // non-16-bit aligned address. 1778 union conversion { 1779 uint16_t h; 1780 uint8_t b[2]; 1781 } c; 1782 c.h = value; 1783 uint8_t* ptr = reinterpret_cast<uint8_t*>(p); 1784 *ptr = c.b[0]; 1785 *(ptr + 1) = c.b[1]; 1786 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 1787 } 1788 1789 } // namespace internal 1790 } // namespace v8 1791 1792 #endif // V8_UTILS_H_ 1793