Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_UTILS_H_
      6 #define V8_UTILS_H_
      7 
      8 #include <limits.h>
      9 #include <stdlib.h>
     10 #include <string.h>
     11 
     12 #include "src/allocation.h"
     13 #include "src/base/macros.h"
     14 #include "src/checks.h"
     15 #include "src/globals.h"
     16 #include "src/list.h"
     17 #include "src/platform.h"
     18 #include "src/vector.h"
     19 
     20 namespace v8 {
     21 namespace internal {
     22 
     23 // ----------------------------------------------------------------------------
     24 // General helper functions
     25 
     26 // Returns true iff x is a power of 2. Cannot be used with the maximally
     27 // negative value of the type T (the -1 overflows).
     28 template <typename T>
     29 inline bool IsPowerOf2(T x) {
     30   return IS_POWER_OF_TWO(x);
     31 }
     32 
     33 
     34 // X must be a power of 2.  Returns the number of trailing zeros.
     35 inline int WhichPowerOf2(uint32_t x) {
     36   ASSERT(IsPowerOf2(x));
     37   int bits = 0;
     38 #ifdef DEBUG
     39   int original_x = x;
     40 #endif
     41   if (x >= 0x10000) {
     42     bits += 16;
     43     x >>= 16;
     44   }
     45   if (x >= 0x100) {
     46     bits += 8;
     47     x >>= 8;
     48   }
     49   if (x >= 0x10) {
     50     bits += 4;
     51     x >>= 4;
     52   }
     53   switch (x) {
     54     default: UNREACHABLE();
     55     case 8: bits++;  // Fall through.
     56     case 4: bits++;  // Fall through.
     57     case 2: bits++;  // Fall through.
     58     case 1: break;
     59   }
     60   ASSERT_EQ(1 << bits, original_x);
     61   return bits;
     62   return 0;
     63 }
     64 
     65 
     66 inline int MostSignificantBit(uint32_t x) {
     67   static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
     68   int nibble = 0;
     69   if (x & 0xffff0000) {
     70     nibble += 16;
     71     x >>= 16;
     72   }
     73   if (x & 0xff00) {
     74     nibble += 8;
     75     x >>= 8;
     76   }
     77   if (x & 0xf0) {
     78     nibble += 4;
     79     x >>= 4;
     80   }
     81   return nibble + msb4[x];
     82 }
     83 
     84 
     85 // The C++ standard leaves the semantics of '>>' undefined for
     86 // negative signed operands. Most implementations do the right thing,
     87 // though.
     88 inline int ArithmeticShiftRight(int x, int s) {
     89   return x >> s;
     90 }
     91 
     92 
     93 // Compute the 0-relative offset of some absolute value x of type T.
     94 // This allows conversion of Addresses and integral types into
     95 // 0-relative int offsets.
     96 template <typename T>
     97 inline intptr_t OffsetFrom(T x) {
     98   return x - static_cast<T>(0);
     99 }
    100 
    101 
    102 // Compute the absolute value of type T for some 0-relative offset x.
    103 // This allows conversion of 0-relative int offsets into Addresses and
    104 // integral types.
    105 template <typename T>
    106 inline T AddressFrom(intptr_t x) {
    107   return static_cast<T>(static_cast<T>(0) + x);
    108 }
    109 
    110 
    111 // Return the largest multiple of m which is <= x.
    112 template <typename T>
    113 inline T RoundDown(T x, intptr_t m) {
    114   ASSERT(IsPowerOf2(m));
    115   return AddressFrom<T>(OffsetFrom(x) & -m);
    116 }
    117 
    118 
    119 // Return the smallest multiple of m which is >= x.
    120 template <typename T>
    121 inline T RoundUp(T x, intptr_t m) {
    122   return RoundDown<T>(static_cast<T>(x + m - 1), m);
    123 }
    124 
    125 
    126 // Increment a pointer until it has the specified alignment.
    127 // This works like RoundUp, but it works correctly on pointer types where
    128 // sizeof(*pointer) might not be 1.
    129 template<class T>
    130 T AlignUp(T pointer, size_t alignment) {
    131   ASSERT(sizeof(pointer) == sizeof(uintptr_t));
    132   uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
    133   return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
    134 }
    135 
    136 
    137 template <typename T>
    138 int Compare(const T& a, const T& b) {
    139   if (a == b)
    140     return 0;
    141   else if (a < b)
    142     return -1;
    143   else
    144     return 1;
    145 }
    146 
    147 
    148 template <typename T>
    149 int PointerValueCompare(const T* a, const T* b) {
    150   return Compare<T>(*a, *b);
    151 }
    152 
    153 
    154 // Compare function to compare the object pointer value of two
    155 // handlified objects. The handles are passed as pointers to the
    156 // handles.
    157 template<typename T> class Handle;  // Forward declaration.
    158 template <typename T>
    159 int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
    160   return Compare<T*>(*(*a), *(*b));
    161 }
    162 
    163 
    164 // Returns the smallest power of two which is >= x. If you pass in a
    165 // number that is already a power of two, it is returned as is.
    166 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
    167 // figure 3-3, page 48, where the function is called clp2.
    168 inline uint32_t RoundUpToPowerOf2(uint32_t x) {
    169   ASSERT(x <= 0x80000000u);
    170   x = x - 1;
    171   x = x | (x >> 1);
    172   x = x | (x >> 2);
    173   x = x | (x >> 4);
    174   x = x | (x >> 8);
    175   x = x | (x >> 16);
    176   return x + 1;
    177 }
    178 
    179 
    180 inline uint32_t RoundDownToPowerOf2(uint32_t x) {
    181   uint32_t rounded_up = RoundUpToPowerOf2(x);
    182   if (rounded_up > x) return rounded_up >> 1;
    183   return rounded_up;
    184 }
    185 
    186 
    187 template <typename T, typename U>
    188 inline bool IsAligned(T value, U alignment) {
    189   return (value & (alignment - 1)) == 0;
    190 }
    191 
    192 
    193 // Returns true if (addr + offset) is aligned.
    194 inline bool IsAddressAligned(Address addr,
    195                              intptr_t alignment,
    196                              int offset = 0) {
    197   intptr_t offs = OffsetFrom(addr + offset);
    198   return IsAligned(offs, alignment);
    199 }
    200 
    201 
    202 // Returns the maximum of the two parameters.
    203 template <typename T>
    204 T Max(T a, T b) {
    205   return a < b ? b : a;
    206 }
    207 
    208 
    209 // Returns the minimum of the two parameters.
    210 template <typename T>
    211 T Min(T a, T b) {
    212   return a < b ? a : b;
    213 }
    214 
    215 
    216 // Returns the absolute value of its argument.
    217 template <typename T>
    218 T Abs(T a) {
    219   return a < 0 ? -a : a;
    220 }
    221 
    222 
    223 // Returns the negative absolute value of its argument.
    224 template <typename T>
    225 T NegAbs(T a) {
    226   return a < 0 ? a : -a;
    227 }
    228 
    229 
    230 // TODO(svenpanne) Clean up the whole power-of-2 mess.
    231 inline int32_t WhichPowerOf2Abs(int32_t x) {
    232   return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
    233 }
    234 
    235 
    236 // Obtains the unsigned type corresponding to T
    237 // available in C++11 as std::make_unsigned
    238 template<typename T>
    239 struct make_unsigned {
    240   typedef T type;
    241 };
    242 
    243 
    244 // Template specializations necessary to have make_unsigned work
    245 template<> struct make_unsigned<int32_t> {
    246   typedef uint32_t type;
    247 };
    248 
    249 
    250 template<> struct make_unsigned<int64_t> {
    251   typedef uint64_t type;
    252 };
    253 
    254 
    255 // ----------------------------------------------------------------------------
    256 // BitField is a help template for encoding and decode bitfield with
    257 // unsigned content.
    258 
    259 template<class T, int shift, int size, class U>
    260 class BitFieldBase {
    261  public:
    262   // A type U mask of bit field.  To use all bits of a type U of x bits
    263   // in a bitfield without compiler warnings we have to compute 2^x
    264   // without using a shift count of x in the computation.
    265   static const U kOne = static_cast<U>(1U);
    266   static const U kMask = ((kOne << shift) << size) - (kOne << shift);
    267   static const U kShift = shift;
    268   static const U kSize = size;
    269   static const U kNext = kShift + kSize;
    270 
    271   // Value for the field with all bits set.
    272   static const T kMax = static_cast<T>((1U << size) - 1);
    273 
    274   // Tells whether the provided value fits into the bit field.
    275   static bool is_valid(T value) {
    276     return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
    277   }
    278 
    279   // Returns a type U with the bit field value encoded.
    280   static U encode(T value) {
    281     ASSERT(is_valid(value));
    282     return static_cast<U>(value) << shift;
    283   }
    284 
    285   // Returns a type U with the bit field value updated.
    286   static U update(U previous, T value) {
    287     return (previous & ~kMask) | encode(value);
    288   }
    289 
    290   // Extracts the bit field from the value.
    291   static T decode(U value) {
    292     return static_cast<T>((value & kMask) >> shift);
    293   }
    294 };
    295 
    296 
    297 template<class T, int shift, int size>
    298 class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
    299 
    300 
    301 template<class T, int shift, int size>
    302 class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
    303 
    304 
    305 // ----------------------------------------------------------------------------
    306 // Hash function.
    307 
    308 static const uint32_t kZeroHashSeed = 0;
    309 
    310 // Thomas Wang, Integer Hash Functions.
    311 // http://www.concentric.net/~Ttwang/tech/inthash.htm
    312 inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
    313   uint32_t hash = key;
    314   hash = hash ^ seed;
    315   hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
    316   hash = hash ^ (hash >> 12);
    317   hash = hash + (hash << 2);
    318   hash = hash ^ (hash >> 4);
    319   hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
    320   hash = hash ^ (hash >> 16);
    321   return hash;
    322 }
    323 
    324 
    325 inline uint32_t ComputeLongHash(uint64_t key) {
    326   uint64_t hash = key;
    327   hash = ~hash + (hash << 18);  // hash = (hash << 18) - hash - 1;
    328   hash = hash ^ (hash >> 31);
    329   hash = hash * 21;  // hash = (hash + (hash << 2)) + (hash << 4);
    330   hash = hash ^ (hash >> 11);
    331   hash = hash + (hash << 6);
    332   hash = hash ^ (hash >> 22);
    333   return static_cast<uint32_t>(hash);
    334 }
    335 
    336 
    337 inline uint32_t ComputePointerHash(void* ptr) {
    338   return ComputeIntegerHash(
    339       static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
    340       v8::internal::kZeroHashSeed);
    341 }
    342 
    343 
    344 // ----------------------------------------------------------------------------
    345 // Generated memcpy/memmove
    346 
    347 // Initializes the codegen support that depends on CPU features. This is
    348 // called after CPU initialization.
    349 void init_memcopy_functions();
    350 
    351 #if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
    352 // Limit below which the extra overhead of the MemCopy function is likely
    353 // to outweigh the benefits of faster copying.
    354 const int kMinComplexMemCopy = 64;
    355 
    356 // Copy memory area. No restrictions.
    357 void MemMove(void* dest, const void* src, size_t size);
    358 typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
    359 
    360 // Keep the distinction of "move" vs. "copy" for the benefit of other
    361 // architectures.
    362 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
    363   MemMove(dest, src, size);
    364 }
    365 #elif defined(V8_HOST_ARCH_ARM)
    366 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
    367                                      size_t size);
    368 extern MemCopyUint8Function memcopy_uint8_function;
    369 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
    370                                    size_t chars) {
    371   memcpy(dest, src, chars);
    372 }
    373 // For values < 16, the assembler function is slower than the inlined C code.
    374 const int kMinComplexMemCopy = 16;
    375 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
    376   (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
    377                             reinterpret_cast<const uint8_t*>(src), size);
    378 }
    379 V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
    380   memmove(dest, src, size);
    381 }
    382 
    383 typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src,
    384                                            size_t size);
    385 extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
    386 void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
    387                                size_t chars);
    388 // For values < 12, the assembler function is slower than the inlined C code.
    389 const int kMinComplexConvertMemCopy = 12;
    390 V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
    391                                   size_t size) {
    392   (*memcopy_uint16_uint8_function)(dest, src, size);
    393 }
    394 #elif defined(V8_HOST_ARCH_MIPS)
    395 typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
    396                                      size_t size);
    397 extern MemCopyUint8Function memcopy_uint8_function;
    398 V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
    399                                    size_t chars) {
    400   memcpy(dest, src, chars);
    401 }
    402 // For values < 16, the assembler function is slower than the inlined C code.
    403 const int kMinComplexMemCopy = 16;
    404 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
    405   (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
    406                             reinterpret_cast<const uint8_t*>(src), size);
    407 }
    408 V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
    409   memmove(dest, src, size);
    410 }
    411 #else
    412 // Copy memory area to disjoint memory area.
    413 V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
    414   memcpy(dest, src, size);
    415 }
    416 V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
    417   memmove(dest, src, size);
    418 }
    419 const int kMinComplexMemCopy = 16 * kPointerSize;
    420 #endif  // V8_TARGET_ARCH_IA32
    421 
    422 
    423 // ----------------------------------------------------------------------------
    424 // Miscellaneous
    425 
    426 // A static resource holds a static instance that can be reserved in
    427 // a local scope using an instance of Access.  Attempts to re-reserve
    428 // the instance will cause an error.
    429 template <typename T>
    430 class StaticResource {
    431  public:
    432   StaticResource() : is_reserved_(false)  {}
    433 
    434  private:
    435   template <typename S> friend class Access;
    436   T instance_;
    437   bool is_reserved_;
    438 };
    439 
    440 
    441 // Locally scoped access to a static resource.
    442 template <typename T>
    443 class Access {
    444  public:
    445   explicit Access(StaticResource<T>* resource)
    446     : resource_(resource)
    447     , instance_(&resource->instance_) {
    448     ASSERT(!resource->is_reserved_);
    449     resource->is_reserved_ = true;
    450   }
    451 
    452   ~Access() {
    453     resource_->is_reserved_ = false;
    454     resource_ = NULL;
    455     instance_ = NULL;
    456   }
    457 
    458   T* value()  { return instance_; }
    459   T* operator -> ()  { return instance_; }
    460 
    461  private:
    462   StaticResource<T>* resource_;
    463   T* instance_;
    464 };
    465 
    466 
    467 // A pointer that can only be set once and doesn't allow NULL values.
    468 template<typename T>
    469 class SetOncePointer {
    470  public:
    471   SetOncePointer() : pointer_(NULL) { }
    472 
    473   bool is_set() const { return pointer_ != NULL; }
    474 
    475   T* get() const {
    476     ASSERT(pointer_ != NULL);
    477     return pointer_;
    478   }
    479 
    480   void set(T* value) {
    481     ASSERT(pointer_ == NULL && value != NULL);
    482     pointer_ = value;
    483   }
    484 
    485  private:
    486   T* pointer_;
    487 };
    488 
    489 
    490 template <typename T, int kSize>
    491 class EmbeddedVector : public Vector<T> {
    492  public:
    493   EmbeddedVector() : Vector<T>(buffer_, kSize) { }
    494 
    495   explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
    496     for (int i = 0; i < kSize; ++i) {
    497       buffer_[i] = initial_value;
    498     }
    499   }
    500 
    501   // When copying, make underlying Vector to reference our buffer.
    502   EmbeddedVector(const EmbeddedVector& rhs)
    503       : Vector<T>(rhs) {
    504     MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
    505     set_start(buffer_);
    506   }
    507 
    508   EmbeddedVector& operator=(const EmbeddedVector& rhs) {
    509     if (this == &rhs) return *this;
    510     Vector<T>::operator=(rhs);
    511     MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
    512     this->set_start(buffer_);
    513     return *this;
    514   }
    515 
    516  private:
    517   T buffer_[kSize];
    518 };
    519 
    520 
    521 /*
    522  * A class that collects values into a backing store.
    523  * Specialized versions of the class can allow access to the backing store
    524  * in different ways.
    525  * There is no guarantee that the backing store is contiguous (and, as a
    526  * consequence, no guarantees that consecutively added elements are adjacent
    527  * in memory). The collector may move elements unless it has guaranteed not
    528  * to.
    529  */
    530 template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
    531 class Collector {
    532  public:
    533   explicit Collector(int initial_capacity = kMinCapacity)
    534       : index_(0), size_(0) {
    535     current_chunk_ = Vector<T>::New(initial_capacity);
    536   }
    537 
    538   virtual ~Collector() {
    539     // Free backing store (in reverse allocation order).
    540     current_chunk_.Dispose();
    541     for (int i = chunks_.length() - 1; i >= 0; i--) {
    542       chunks_.at(i).Dispose();
    543     }
    544   }
    545 
    546   // Add a single element.
    547   inline void Add(T value) {
    548     if (index_ >= current_chunk_.length()) {
    549       Grow(1);
    550     }
    551     current_chunk_[index_] = value;
    552     index_++;
    553     size_++;
    554   }
    555 
    556   // Add a block of contiguous elements and return a Vector backed by the
    557   // memory area.
    558   // A basic Collector will keep this vector valid as long as the Collector
    559   // is alive.
    560   inline Vector<T> AddBlock(int size, T initial_value) {
    561     ASSERT(size > 0);
    562     if (size > current_chunk_.length() - index_) {
    563       Grow(size);
    564     }
    565     T* position = current_chunk_.start() + index_;
    566     index_ += size;
    567     size_ += size;
    568     for (int i = 0; i < size; i++) {
    569       position[i] = initial_value;
    570     }
    571     return Vector<T>(position, size);
    572   }
    573 
    574 
    575   // Add a contiguous block of elements and return a vector backed
    576   // by the added block.
    577   // A basic Collector will keep this vector valid as long as the Collector
    578   // is alive.
    579   inline Vector<T> AddBlock(Vector<const T> source) {
    580     if (source.length() > current_chunk_.length() - index_) {
    581       Grow(source.length());
    582     }
    583     T* position = current_chunk_.start() + index_;
    584     index_ += source.length();
    585     size_ += source.length();
    586     for (int i = 0; i < source.length(); i++) {
    587       position[i] = source[i];
    588     }
    589     return Vector<T>(position, source.length());
    590   }
    591 
    592 
    593   // Write the contents of the collector into the provided vector.
    594   void WriteTo(Vector<T> destination) {
    595     ASSERT(size_ <= destination.length());
    596     int position = 0;
    597     for (int i = 0; i < chunks_.length(); i++) {
    598       Vector<T> chunk = chunks_.at(i);
    599       for (int j = 0; j < chunk.length(); j++) {
    600         destination[position] = chunk[j];
    601         position++;
    602       }
    603     }
    604     for (int i = 0; i < index_; i++) {
    605       destination[position] = current_chunk_[i];
    606       position++;
    607     }
    608   }
    609 
    610   // Allocate a single contiguous vector, copy all the collected
    611   // elements to the vector, and return it.
    612   // The caller is responsible for freeing the memory of the returned
    613   // vector (e.g., using Vector::Dispose).
    614   Vector<T> ToVector() {
    615     Vector<T> new_store = Vector<T>::New(size_);
    616     WriteTo(new_store);
    617     return new_store;
    618   }
    619 
    620   // Resets the collector to be empty.
    621   virtual void Reset();
    622 
    623   // Total number of elements added to collector so far.
    624   inline int size() { return size_; }
    625 
    626  protected:
    627   static const int kMinCapacity = 16;
    628   List<Vector<T> > chunks_;
    629   Vector<T> current_chunk_;  // Block of memory currently being written into.
    630   int index_;  // Current index in current chunk.
    631   int size_;  // Total number of elements in collector.
    632 
    633   // Creates a new current chunk, and stores the old chunk in the chunks_ list.
    634   void Grow(int min_capacity) {
    635     ASSERT(growth_factor > 1);
    636     int new_capacity;
    637     int current_length = current_chunk_.length();
    638     if (current_length < kMinCapacity) {
    639       // The collector started out as empty.
    640       new_capacity = min_capacity * growth_factor;
    641       if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
    642     } else {
    643       int growth = current_length * (growth_factor - 1);
    644       if (growth > max_growth) {
    645         growth = max_growth;
    646       }
    647       new_capacity = current_length + growth;
    648       if (new_capacity < min_capacity) {
    649         new_capacity = min_capacity + growth;
    650       }
    651     }
    652     NewChunk(new_capacity);
    653     ASSERT(index_ + min_capacity <= current_chunk_.length());
    654   }
    655 
    656   // Before replacing the current chunk, give a subclass the option to move
    657   // some of the current data into the new chunk. The function may update
    658   // the current index_ value to represent data no longer in the current chunk.
    659   // Returns the initial index of the new chunk (after copied data).
    660   virtual void NewChunk(int new_capacity)  {
    661     Vector<T> new_chunk = Vector<T>::New(new_capacity);
    662     if (index_ > 0) {
    663       chunks_.Add(current_chunk_.SubVector(0, index_));
    664     } else {
    665       current_chunk_.Dispose();
    666     }
    667     current_chunk_ = new_chunk;
    668     index_ = 0;
    669   }
    670 };
    671 
    672 
    673 /*
    674  * A collector that allows sequences of values to be guaranteed to
    675  * stay consecutive.
    676  * If the backing store grows while a sequence is active, the current
    677  * sequence might be moved, but after the sequence is ended, it will
    678  * not move again.
    679  * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
    680  * as well, if inside an active sequence where another element is added.
    681  */
    682 template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
    683 class SequenceCollector : public Collector<T, growth_factor, max_growth> {
    684  public:
    685   explicit SequenceCollector(int initial_capacity)
    686       : Collector<T, growth_factor, max_growth>(initial_capacity),
    687         sequence_start_(kNoSequence) { }
    688 
    689   virtual ~SequenceCollector() {}
    690 
    691   void StartSequence() {
    692     ASSERT(sequence_start_ == kNoSequence);
    693     sequence_start_ = this->index_;
    694   }
    695 
    696   Vector<T> EndSequence() {
    697     ASSERT(sequence_start_ != kNoSequence);
    698     int sequence_start = sequence_start_;
    699     sequence_start_ = kNoSequence;
    700     if (sequence_start == this->index_) return Vector<T>();
    701     return this->current_chunk_.SubVector(sequence_start, this->index_);
    702   }
    703 
    704   // Drops the currently added sequence, and all collected elements in it.
    705   void DropSequence() {
    706     ASSERT(sequence_start_ != kNoSequence);
    707     int sequence_length = this->index_ - sequence_start_;
    708     this->index_ = sequence_start_;
    709     this->size_ -= sequence_length;
    710     sequence_start_ = kNoSequence;
    711   }
    712 
    713   virtual void Reset() {
    714     sequence_start_ = kNoSequence;
    715     this->Collector<T, growth_factor, max_growth>::Reset();
    716   }
    717 
    718  private:
    719   static const int kNoSequence = -1;
    720   int sequence_start_;
    721 
    722   // Move the currently active sequence to the new chunk.
    723   virtual void NewChunk(int new_capacity) {
    724     if (sequence_start_ == kNoSequence) {
    725       // Fall back on default behavior if no sequence has been started.
    726       this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
    727       return;
    728     }
    729     int sequence_length = this->index_ - sequence_start_;
    730     Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
    731     ASSERT(sequence_length < new_chunk.length());
    732     for (int i = 0; i < sequence_length; i++) {
    733       new_chunk[i] = this->current_chunk_[sequence_start_ + i];
    734     }
    735     if (sequence_start_ > 0) {
    736       this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
    737     } else {
    738       this->current_chunk_.Dispose();
    739     }
    740     this->current_chunk_ = new_chunk;
    741     this->index_ = sequence_length;
    742     sequence_start_ = 0;
    743   }
    744 };
    745 
    746 
    747 // Compare ASCII/16bit chars to ASCII/16bit chars.
    748 template <typename lchar, typename rchar>
    749 inline int CompareCharsUnsigned(const lchar* lhs,
    750                                 const rchar* rhs,
    751                                 int chars) {
    752   const lchar* limit = lhs + chars;
    753 #ifdef V8_HOST_CAN_READ_UNALIGNED
    754   if (sizeof(*lhs) == sizeof(*rhs)) {
    755     // Number of characters in a uintptr_t.
    756     static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs);  // NOLINT
    757     while (lhs <= limit - kStepSize) {
    758       if (*reinterpret_cast<const uintptr_t*>(lhs) !=
    759           *reinterpret_cast<const uintptr_t*>(rhs)) {
    760         break;
    761       }
    762       lhs += kStepSize;
    763       rhs += kStepSize;
    764     }
    765   }
    766 #endif
    767   while (lhs < limit) {
    768     int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
    769     if (r != 0) return r;
    770     ++lhs;
    771     ++rhs;
    772   }
    773   return 0;
    774 }
    775 
    776 template<typename lchar, typename rchar>
    777 inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
    778   ASSERT(sizeof(lchar) <= 2);
    779   ASSERT(sizeof(rchar) <= 2);
    780   if (sizeof(lchar) == 1) {
    781     if (sizeof(rchar) == 1) {
    782       return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
    783                                   reinterpret_cast<const uint8_t*>(rhs),
    784                                   chars);
    785     } else {
    786       return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
    787                                   reinterpret_cast<const uint16_t*>(rhs),
    788                                   chars);
    789     }
    790   } else {
    791     if (sizeof(rchar) == 1) {
    792       return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
    793                                   reinterpret_cast<const uint8_t*>(rhs),
    794                                   chars);
    795     } else {
    796       return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
    797                                   reinterpret_cast<const uint16_t*>(rhs),
    798                                   chars);
    799     }
    800   }
    801 }
    802 
    803 
    804 // Calculate 10^exponent.
    805 inline int TenToThe(int exponent) {
    806   ASSERT(exponent <= 9);
    807   ASSERT(exponent >= 1);
    808   int answer = 10;
    809   for (int i = 1; i < exponent; i++) answer *= 10;
    810   return answer;
    811 }
    812 
    813 
    814 // The type-based aliasing rule allows the compiler to assume that pointers of
    815 // different types (for some definition of different) never alias each other.
    816 // Thus the following code does not work:
    817 //
    818 // float f = foo();
    819 // int fbits = *(int*)(&f);
    820 //
    821 // The compiler 'knows' that the int pointer can't refer to f since the types
    822 // don't match, so the compiler may cache f in a register, leaving random data
    823 // in fbits.  Using C++ style casts makes no difference, however a pointer to
    824 // char data is assumed to alias any other pointer.  This is the 'memcpy
    825 // exception'.
    826 //
    827 // Bit_cast uses the memcpy exception to move the bits from a variable of one
    828 // type of a variable of another type.  Of course the end result is likely to
    829 // be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
    830 // will completely optimize BitCast away.
    831 //
    832 // There is an additional use for BitCast.
    833 // Recent gccs will warn when they see casts that may result in breakage due to
    834 // the type-based aliasing rule.  If you have checked that there is no breakage
    835 // you can use BitCast to cast one pointer type to another.  This confuses gcc
    836 // enough that it can no longer see that you have cast one pointer type to
    837 // another thus avoiding the warning.
    838 
    839 // We need different implementations of BitCast for pointer and non-pointer
    840 // values. We use partial specialization of auxiliary struct to work around
    841 // issues with template functions overloading.
    842 template <class Dest, class Source>
    843 struct BitCastHelper {
    844   STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
    845 
    846   INLINE(static Dest cast(const Source& source)) {
    847     Dest dest;
    848     memcpy(&dest, &source, sizeof(dest));
    849     return dest;
    850   }
    851 };
    852 
    853 template <class Dest, class Source>
    854 struct BitCastHelper<Dest, Source*> {
    855   INLINE(static Dest cast(Source* source)) {
    856     return BitCastHelper<Dest, uintptr_t>::
    857         cast(reinterpret_cast<uintptr_t>(source));
    858   }
    859 };
    860 
    861 template <class Dest, class Source>
    862 INLINE(Dest BitCast(const Source& source));
    863 
    864 template <class Dest, class Source>
    865 inline Dest BitCast(const Source& source) {
    866   return BitCastHelper<Dest, Source>::cast(source);
    867 }
    868 
    869 
    870 template<typename ElementType, int NumElements>
    871 class EmbeddedContainer {
    872  public:
    873   EmbeddedContainer() : elems_() { }
    874 
    875   int length() const { return NumElements; }
    876   const ElementType& operator[](int i) const {
    877     ASSERT(i < length());
    878     return elems_[i];
    879   }
    880   ElementType& operator[](int i) {
    881     ASSERT(i < length());
    882     return elems_[i];
    883   }
    884 
    885  private:
    886   ElementType elems_[NumElements];
    887 };
    888 
    889 
    890 template<typename ElementType>
    891 class EmbeddedContainer<ElementType, 0> {
    892  public:
    893   int length() const { return 0; }
    894   const ElementType& operator[](int i) const {
    895     UNREACHABLE();
    896     static ElementType t = 0;
    897     return t;
    898   }
    899   ElementType& operator[](int i) {
    900     UNREACHABLE();
    901     static ElementType t = 0;
    902     return t;
    903   }
    904 };
    905 
    906 
    907 // Helper class for building result strings in a character buffer. The
    908 // purpose of the class is to use safe operations that checks the
    909 // buffer bounds on all operations in debug mode.
    910 // This simple base class does not allow formatted output.
    911 class SimpleStringBuilder {
    912  public:
    913   // Create a string builder with a buffer of the given size. The
    914   // buffer is allocated through NewArray<char> and must be
    915   // deallocated by the caller of Finalize().
    916   explicit SimpleStringBuilder(int size);
    917 
    918   SimpleStringBuilder(char* buffer, int size)
    919       : buffer_(buffer, size), position_(0) { }
    920 
    921   ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
    922 
    923   int size() const { return buffer_.length(); }
    924 
    925   // Get the current position in the builder.
    926   int position() const {
    927     ASSERT(!is_finalized());
    928     return position_;
    929   }
    930 
    931   // Reset the position.
    932   void Reset() { position_ = 0; }
    933 
    934   // Add a single character to the builder. It is not allowed to add
    935   // 0-characters; use the Finalize() method to terminate the string
    936   // instead.
    937   void AddCharacter(char c) {
    938     ASSERT(c != '\0');
    939     ASSERT(!is_finalized() && position_ < buffer_.length());
    940     buffer_[position_++] = c;
    941   }
    942 
    943   // Add an entire string to the builder. Uses strlen() internally to
    944   // compute the length of the input string.
    945   void AddString(const char* s);
    946 
    947   // Add the first 'n' characters of the given string 's' to the
    948   // builder. The input string must have enough characters.
    949   void AddSubstring(const char* s, int n);
    950 
    951   // Add character padding to the builder. If count is non-positive,
    952   // nothing is added to the builder.
    953   void AddPadding(char c, int count);
    954 
    955   // Add the decimal representation of the value.
    956   void AddDecimalInteger(int value);
    957 
    958   // Finalize the string by 0-terminating it and returning the buffer.
    959   char* Finalize();
    960 
    961  protected:
    962   Vector<char> buffer_;
    963   int position_;
    964 
    965   bool is_finalized() const { return position_ < 0; }
    966 
    967  private:
    968   DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
    969 };
    970 
    971 
    972 // A poor man's version of STL's bitset: A bit set of enums E (without explicit
    973 // values), fitting into an integral type T.
    974 template <class E, class T = int>
    975 class EnumSet {
    976  public:
    977   explicit EnumSet(T bits = 0) : bits_(bits) {}
    978   bool IsEmpty() const { return bits_ == 0; }
    979   bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
    980   bool ContainsAnyOf(const EnumSet& set) const {
    981     return (bits_ & set.bits_) != 0;
    982   }
    983   void Add(E element) { bits_ |= Mask(element); }
    984   void Add(const EnumSet& set) { bits_ |= set.bits_; }
    985   void Remove(E element) { bits_ &= ~Mask(element); }
    986   void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
    987   void RemoveAll() { bits_ = 0; }
    988   void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
    989   T ToIntegral() const { return bits_; }
    990   bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
    991   bool operator!=(const EnumSet& set) { return bits_ != set.bits_; }
    992   EnumSet<E, T> operator|(const EnumSet& set) const {
    993     return EnumSet<E, T>(bits_ | set.bits_);
    994   }
    995 
    996  private:
    997   T Mask(E element) const {
    998     // The strange typing in ASSERT is necessary to avoid stupid warnings, see:
    999     // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
   1000     ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
   1001     return static_cast<T>(1) << element;
   1002   }
   1003 
   1004   T bits_;
   1005 };
   1006 
   1007 // Bit field extraction.
   1008 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
   1009   return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
   1010 }
   1011 
   1012 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
   1013   return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
   1014 }
   1015 
   1016 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
   1017   return (x << (31 - msb)) >> (lsb + 31 - msb);
   1018 }
   1019 
   1020 inline int signed_bitextract_64(int msb, int lsb, int x) {
   1021   // TODO(jbramley): This is broken for big bitfields.
   1022   return (x << (63 - msb)) >> (lsb + 63 - msb);
   1023 }
   1024 
   1025 // Check number width.
   1026 inline bool is_intn(int64_t x, unsigned n) {
   1027   ASSERT((0 < n) && (n < 64));
   1028   int64_t limit = static_cast<int64_t>(1) << (n - 1);
   1029   return (-limit <= x) && (x < limit);
   1030 }
   1031 
   1032 inline bool is_uintn(int64_t x, unsigned n) {
   1033   ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
   1034   return !(x >> n);
   1035 }
   1036 
   1037 template <class T>
   1038 inline T truncate_to_intn(T x, unsigned n) {
   1039   ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
   1040   return (x & ((static_cast<T>(1) << n) - 1));
   1041 }
   1042 
   1043 #define INT_1_TO_63_LIST(V)                                                    \
   1044 V(1)  V(2)  V(3)  V(4)  V(5)  V(6)  V(7)  V(8)                                 \
   1045 V(9)  V(10) V(11) V(12) V(13) V(14) V(15) V(16)                                \
   1046 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24)                                \
   1047 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)                                \
   1048 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40)                                \
   1049 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48)                                \
   1050 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56)                                \
   1051 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
   1052 
   1053 #define DECLARE_IS_INT_N(N)                                                    \
   1054 inline bool is_int##N(int64_t x) { return is_intn(x, N); }
   1055 #define DECLARE_IS_UINT_N(N)                                                   \
   1056 template <class T>                                                             \
   1057 inline bool is_uint##N(T x) { return is_uintn(x, N); }
   1058 #define DECLARE_TRUNCATE_TO_INT_N(N)                                           \
   1059 template <class T>                                                             \
   1060 inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
   1061 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
   1062 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
   1063 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
   1064 #undef DECLARE_IS_INT_N
   1065 #undef DECLARE_IS_UINT_N
   1066 #undef DECLARE_TRUNCATE_TO_INT_N
   1067 
   1068 class TypeFeedbackId {
   1069  public:
   1070   explicit TypeFeedbackId(int id) : id_(id) { }
   1071   int ToInt() const { return id_; }
   1072 
   1073   static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
   1074   bool IsNone() const { return id_ == kNoneId; }
   1075 
   1076  private:
   1077   static const int kNoneId = -1;
   1078 
   1079   int id_;
   1080 };
   1081 
   1082 
   1083 class BailoutId {
   1084  public:
   1085   explicit BailoutId(int id) : id_(id) { }
   1086   int ToInt() const { return id_; }
   1087 
   1088   static BailoutId None() { return BailoutId(kNoneId); }
   1089   static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
   1090   static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
   1091   static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
   1092   static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
   1093 
   1094   bool IsNone() const { return id_ == kNoneId; }
   1095   bool operator==(const BailoutId& other) const { return id_ == other.id_; }
   1096   bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
   1097 
   1098  private:
   1099   static const int kNoneId = -1;
   1100 
   1101   // Using 0 could disguise errors.
   1102   static const int kFunctionEntryId = 2;
   1103 
   1104   // This AST id identifies the point after the declarations have been visited.
   1105   // We need it to capture the environment effects of declarations that emit
   1106   // code (function declarations).
   1107   static const int kDeclarationsId = 3;
   1108 
   1109   // Every FunctionState starts with this id.
   1110   static const int kFirstUsableId = 4;
   1111 
   1112   // Every compiled stub starts with this id.
   1113   static const int kStubEntryId = 5;
   1114 
   1115   int id_;
   1116 };
   1117 
   1118 
   1119 template <class C>
   1120 class ContainerPointerWrapper {
   1121  public:
   1122   typedef typename C::iterator iterator;
   1123   typedef typename C::reverse_iterator reverse_iterator;
   1124   explicit ContainerPointerWrapper(C* container) : container_(container) {}
   1125   iterator begin() { return container_->begin(); }
   1126   iterator end() { return container_->end(); }
   1127   reverse_iterator rbegin() { return container_->rbegin(); }
   1128   reverse_iterator rend() { return container_->rend(); }
   1129  private:
   1130   C* container_;
   1131 };
   1132 
   1133 
   1134 // ----------------------------------------------------------------------------
   1135 // I/O support.
   1136 
   1137 #if __GNUC__ >= 4
   1138 // On gcc we can ask the compiler to check the types of %d-style format
   1139 // specifiers and their associated arguments.  TODO(erikcorry) fix this
   1140 // so it works on MacOSX.
   1141 #if defined(__MACH__) && defined(__APPLE__)
   1142 #define PRINTF_CHECKING
   1143 #define FPRINTF_CHECKING
   1144 #define PRINTF_METHOD_CHECKING
   1145 #define FPRINTF_METHOD_CHECKING
   1146 #else  // MacOsX.
   1147 #define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
   1148 #define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
   1149 #define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3)))
   1150 #define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4)))
   1151 #endif
   1152 #else
   1153 #define PRINTF_CHECKING
   1154 #define FPRINTF_CHECKING
   1155 #define PRINTF_METHOD_CHECKING
   1156 #define FPRINTF_METHOD_CHECKING
   1157 #endif
   1158 
   1159 // Our version of printf().
   1160 void PRINTF_CHECKING PrintF(const char* format, ...);
   1161 void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
   1162 
   1163 // Prepends the current process ID to the output.
   1164 void PRINTF_CHECKING PrintPID(const char* format, ...);
   1165 
   1166 // Safe formatting print. Ensures that str is always null-terminated.
   1167 // Returns the number of chars written, or -1 if output was truncated.
   1168 int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...);
   1169 int VSNPrintF(Vector<char> str, const char* format, va_list args);
   1170 
   1171 void StrNCpy(Vector<char> dest, const char* src, size_t n);
   1172 
   1173 // Our version of fflush.
   1174 void Flush(FILE* out);
   1175 
   1176 inline void Flush() {
   1177   Flush(stdout);
   1178 }
   1179 
   1180 
   1181 // Read a line of characters after printing the prompt to stdout. The resulting
   1182 // char* needs to be disposed off with DeleteArray by the caller.
   1183 char* ReadLine(const char* prompt);
   1184 
   1185 
   1186 // Read and return the raw bytes in a file. the size of the buffer is returned
   1187 // in size.
   1188 // The returned buffer must be freed by the caller.
   1189 byte* ReadBytes(const char* filename, int* size, bool verbose = true);
   1190 
   1191 
   1192 // Append size chars from str to the file given by filename.
   1193 // The file is overwritten. Returns the number of chars written.
   1194 int AppendChars(const char* filename,
   1195                 const char* str,
   1196                 int size,
   1197                 bool verbose = true);
   1198 
   1199 
   1200 // Write size chars from str to the file given by filename.
   1201 // The file is overwritten. Returns the number of chars written.
   1202 int WriteChars(const char* filename,
   1203                const char* str,
   1204                int size,
   1205                bool verbose = true);
   1206 
   1207 
   1208 // Write size bytes to the file given by filename.
   1209 // The file is overwritten. Returns the number of bytes written.
   1210 int WriteBytes(const char* filename,
   1211                const byte* bytes,
   1212                int size,
   1213                bool verbose = true);
   1214 
   1215 
   1216 // Write the C code
   1217 // const char* <varname> = "<str>";
   1218 // const int <varname>_len = <len>;
   1219 // to the file given by filename. Only the first len chars are written.
   1220 int WriteAsCFile(const char* filename, const char* varname,
   1221                  const char* str, int size, bool verbose = true);
   1222 
   1223 
   1224 // ----------------------------------------------------------------------------
   1225 // Data structures
   1226 
   1227 template <typename T>
   1228 inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
   1229                                              int length) {
   1230   return Vector< Handle<Object> >(
   1231       reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
   1232 }
   1233 
   1234 
   1235 // ----------------------------------------------------------------------------
   1236 // Memory
   1237 
   1238 // Copies words from |src| to |dst|. The data spans must not overlap.
   1239 template <typename T>
   1240 inline void CopyWords(T* dst, const T* src, size_t num_words) {
   1241   STATIC_ASSERT(sizeof(T) == kPointerSize);
   1242   // TODO(mvstanton): disabled because mac builds are bogus failing on this
   1243   // assert. They are doing a signed comparison. Investigate in
   1244   // the morning.
   1245   // ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
   1246   //       Max(dst, const_cast<T*>(src)));
   1247   ASSERT(num_words > 0);
   1248 
   1249   // Use block copying MemCopy if the segment we're copying is
   1250   // enough to justify the extra call/setup overhead.
   1251   static const size_t kBlockCopyLimit = 16;
   1252 
   1253   if (num_words < kBlockCopyLimit) {
   1254     do {
   1255       num_words--;
   1256       *dst++ = *src++;
   1257     } while (num_words > 0);
   1258   } else {
   1259     MemCopy(dst, src, num_words * kPointerSize);
   1260   }
   1261 }
   1262 
   1263 
   1264 // Copies words from |src| to |dst|. No restrictions.
   1265 template <typename T>
   1266 inline void MoveWords(T* dst, const T* src, size_t num_words) {
   1267   STATIC_ASSERT(sizeof(T) == kPointerSize);
   1268   ASSERT(num_words > 0);
   1269 
   1270   // Use block copying MemCopy if the segment we're copying is
   1271   // enough to justify the extra call/setup overhead.
   1272   static const size_t kBlockCopyLimit = 16;
   1273 
   1274   if (num_words < kBlockCopyLimit &&
   1275       ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
   1276     T* end = dst + num_words;
   1277     do {
   1278       num_words--;
   1279       *dst++ = *src++;
   1280     } while (num_words > 0);
   1281   } else {
   1282     MemMove(dst, src, num_words * kPointerSize);
   1283   }
   1284 }
   1285 
   1286 
   1287 // Copies data from |src| to |dst|.  The data spans must not overlap.
   1288 template <typename T>
   1289 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
   1290   STATIC_ASSERT(sizeof(T) == 1);
   1291   ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
   1292          Max(dst, const_cast<T*>(src)));
   1293   if (num_bytes == 0) return;
   1294 
   1295   // Use block copying MemCopy if the segment we're copying is
   1296   // enough to justify the extra call/setup overhead.
   1297   static const int kBlockCopyLimit = kMinComplexMemCopy;
   1298 
   1299   if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
   1300     do {
   1301       num_bytes--;
   1302       *dst++ = *src++;
   1303     } while (num_bytes > 0);
   1304   } else {
   1305     MemCopy(dst, src, num_bytes);
   1306   }
   1307 }
   1308 
   1309 
   1310 template <typename T, typename U>
   1311 inline void MemsetPointer(T** dest, U* value, int counter) {
   1312 #ifdef DEBUG
   1313   T* a = NULL;
   1314   U* b = NULL;
   1315   a = b;  // Fake assignment to check assignability.
   1316   USE(a);
   1317 #endif  // DEBUG
   1318 #if V8_HOST_ARCH_IA32
   1319 #define STOS "stosl"
   1320 #elif V8_HOST_ARCH_X64
   1321 #define STOS "stosq"
   1322 #endif
   1323 #if defined(__native_client__)
   1324   // This STOS sequence does not validate for x86_64 Native Client.
   1325   // Here we #undef STOS to force use of the slower C version.
   1326   // TODO(bradchen): Profile V8 and implement a faster REP STOS
   1327   // here if the profile indicates it matters.
   1328 #undef STOS
   1329 #endif
   1330 
   1331 #if defined(MEMORY_SANITIZER)
   1332   // MemorySanitizer does not understand inline assembly.
   1333 #undef STOS
   1334 #endif
   1335 
   1336 #if defined(__GNUC__) && defined(STOS)
   1337   asm volatile(
   1338       "cld;"
   1339       "rep ; " STOS
   1340       : "+&c" (counter), "+&D" (dest)
   1341       : "a" (value)
   1342       : "memory", "cc");
   1343 #else
   1344   for (int i = 0; i < counter; i++) {
   1345     dest[i] = value;
   1346   }
   1347 #endif
   1348 
   1349 #undef STOS
   1350 }
   1351 
   1352 
   1353 // Simple wrapper that allows an ExternalString to refer to a
   1354 // Vector<const char>. Doesn't assume ownership of the data.
   1355 class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
   1356  public:
   1357   explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
   1358 
   1359   virtual const char* data() const { return data_.start(); }
   1360 
   1361   virtual size_t length() const { return data_.length(); }
   1362 
   1363  private:
   1364   Vector<const char> data_;
   1365 };
   1366 
   1367 
   1368 // Simple support to read a file into a 0-terminated C-string.
   1369 // The returned buffer must be freed by the caller.
   1370 // On return, *exits tells whether the file existed.
   1371 Vector<const char> ReadFile(const char* filename,
   1372                             bool* exists,
   1373                             bool verbose = true);
   1374 Vector<const char> ReadFile(FILE* file,
   1375                             bool* exists,
   1376                             bool verbose = true);
   1377 
   1378 
   1379 template <typename sourcechar, typename sinkchar>
   1380 INLINE(static void CopyCharsUnsigned(sinkchar* dest,
   1381                                      const sourcechar* src,
   1382                                      int chars));
   1383 #if defined(V8_HOST_ARCH_ARM)
   1384 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
   1385 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
   1386 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
   1387 #elif defined(V8_HOST_ARCH_MIPS)
   1388 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
   1389 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
   1390 #endif
   1391 
   1392 // Copy from ASCII/16bit chars to ASCII/16bit chars.
   1393 template <typename sourcechar, typename sinkchar>
   1394 INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
   1395 
   1396 template<typename sourcechar, typename sinkchar>
   1397 void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   1398   ASSERT(sizeof(sourcechar) <= 2);
   1399   ASSERT(sizeof(sinkchar) <= 2);
   1400   if (sizeof(sinkchar) == 1) {
   1401     if (sizeof(sourcechar) == 1) {
   1402       CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
   1403                         reinterpret_cast<const uint8_t*>(src),
   1404                         chars);
   1405     } else {
   1406       CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
   1407                         reinterpret_cast<const uint16_t*>(src),
   1408                         chars);
   1409     }
   1410   } else {
   1411     if (sizeof(sourcechar) == 1) {
   1412       CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
   1413                         reinterpret_cast<const uint8_t*>(src),
   1414                         chars);
   1415     } else {
   1416       CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
   1417                         reinterpret_cast<const uint16_t*>(src),
   1418                         chars);
   1419     }
   1420   }
   1421 }
   1422 
   1423 template <typename sourcechar, typename sinkchar>
   1424 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
   1425   sinkchar* limit = dest + chars;
   1426 #ifdef V8_HOST_CAN_READ_UNALIGNED
   1427   if (sizeof(*dest) == sizeof(*src)) {
   1428     if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
   1429       MemCopy(dest, src, chars * sizeof(*dest));
   1430       return;
   1431     }
   1432     // Number of characters in a uintptr_t.
   1433     static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest);  // NOLINT
   1434     ASSERT(dest + kStepSize > dest);  // Check for overflow.
   1435     while (dest + kStepSize <= limit) {
   1436       *reinterpret_cast<uintptr_t*>(dest) =
   1437           *reinterpret_cast<const uintptr_t*>(src);
   1438       dest += kStepSize;
   1439       src += kStepSize;
   1440     }
   1441   }
   1442 #endif
   1443   while (dest < limit) {
   1444     *dest++ = static_cast<sinkchar>(*src++);
   1445   }
   1446 }
   1447 
   1448 
   1449 #if defined(V8_HOST_ARCH_ARM)
   1450 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
   1451   switch (static_cast<unsigned>(chars)) {
   1452     case 0:
   1453       break;
   1454     case 1:
   1455       *dest = *src;
   1456       break;
   1457     case 2:
   1458       memcpy(dest, src, 2);
   1459       break;
   1460     case 3:
   1461       memcpy(dest, src, 3);
   1462       break;
   1463     case 4:
   1464       memcpy(dest, src, 4);
   1465       break;
   1466     case 5:
   1467       memcpy(dest, src, 5);
   1468       break;
   1469     case 6:
   1470       memcpy(dest, src, 6);
   1471       break;
   1472     case 7:
   1473       memcpy(dest, src, 7);
   1474       break;
   1475     case 8:
   1476       memcpy(dest, src, 8);
   1477       break;
   1478     case 9:
   1479       memcpy(dest, src, 9);
   1480       break;
   1481     case 10:
   1482       memcpy(dest, src, 10);
   1483       break;
   1484     case 11:
   1485       memcpy(dest, src, 11);
   1486       break;
   1487     case 12:
   1488       memcpy(dest, src, 12);
   1489       break;
   1490     case 13:
   1491       memcpy(dest, src, 13);
   1492       break;
   1493     case 14:
   1494       memcpy(dest, src, 14);
   1495       break;
   1496     case 15:
   1497       memcpy(dest, src, 15);
   1498       break;
   1499     default:
   1500       MemCopy(dest, src, chars);
   1501       break;
   1502   }
   1503 }
   1504 
   1505 
   1506 void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
   1507   if (chars >= kMinComplexConvertMemCopy) {
   1508     MemCopyUint16Uint8(dest, src, chars);
   1509   } else {
   1510     MemCopyUint16Uint8Wrapper(dest, src, chars);
   1511   }
   1512 }
   1513 
   1514 
   1515 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
   1516   switch (static_cast<unsigned>(chars)) {
   1517     case 0:
   1518       break;
   1519     case 1:
   1520       *dest = *src;
   1521       break;
   1522     case 2:
   1523       memcpy(dest, src, 4);
   1524       break;
   1525     case 3:
   1526       memcpy(dest, src, 6);
   1527       break;
   1528     case 4:
   1529       memcpy(dest, src, 8);
   1530       break;
   1531     case 5:
   1532       memcpy(dest, src, 10);
   1533       break;
   1534     case 6:
   1535       memcpy(dest, src, 12);
   1536       break;
   1537     case 7:
   1538       memcpy(dest, src, 14);
   1539       break;
   1540     default:
   1541       MemCopy(dest, src, chars * sizeof(*dest));
   1542       break;
   1543   }
   1544 }
   1545 
   1546 
   1547 #elif defined(V8_HOST_ARCH_MIPS)
   1548 void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
   1549   if (chars < kMinComplexMemCopy) {
   1550     memcpy(dest, src, chars);
   1551   } else {
   1552     MemCopy(dest, src, chars);
   1553   }
   1554 }
   1555 
   1556 void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
   1557   if (chars < kMinComplexMemCopy) {
   1558     memcpy(dest, src, chars * sizeof(*dest));
   1559   } else {
   1560     MemCopy(dest, src, chars * sizeof(*dest));
   1561   }
   1562 }
   1563 #endif
   1564 
   1565 
   1566 class StringBuilder : public SimpleStringBuilder {
   1567  public:
   1568   explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
   1569   StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
   1570 
   1571   // Add formatted contents to the builder just like printf().
   1572   void AddFormatted(const char* format, ...);
   1573 
   1574   // Add formatted contents like printf based on a va_list.
   1575   void AddFormattedList(const char* format, va_list list);
   1576  private:
   1577   DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
   1578 };
   1579 
   1580 
   1581 } }  // namespace v8::internal
   1582 
   1583 #endif  // V8_UTILS_H_
   1584