Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include <string.h>
      6 
      7 #include "src/v8.h"
      8 #include "src/zone-inl.h"
      9 
     10 namespace v8 {
     11 namespace internal {
     12 
     13 
     14 // Segments represent chunks of memory: They have starting address
     15 // (encoded in the this pointer) and a size in bytes. Segments are
     16 // chained together forming a LIFO structure with the newest segment
     17 // available as segment_head_. Segments are allocated using malloc()
     18 // and de-allocated using free().
     19 
     20 class Segment {
     21  public:
     22   void Initialize(Segment* next, int size) {
     23     next_ = next;
     24     size_ = size;
     25   }
     26 
     27   Segment* next() const { return next_; }
     28   void clear_next() { next_ = NULL; }
     29 
     30   int size() const { return size_; }
     31   int capacity() const { return size_ - sizeof(Segment); }
     32 
     33   Address start() const { return address(sizeof(Segment)); }
     34   Address end() const { return address(size_); }
     35 
     36  private:
     37   // Computes the address of the nth byte in this segment.
     38   Address address(int n) const {
     39     return Address(this) + n;
     40   }
     41 
     42   Segment* next_;
     43   int size_;
     44 };
     45 
     46 
     47 Zone::Zone(Isolate* isolate)
     48     : allocation_size_(0),
     49       segment_bytes_allocated_(0),
     50       position_(0),
     51       limit_(0),
     52       segment_head_(NULL),
     53       isolate_(isolate) {
     54 }
     55 
     56 
     57 Zone::~Zone() {
     58   DeleteAll();
     59   DeleteKeptSegment();
     60 
     61   DCHECK(segment_bytes_allocated_ == 0);
     62 }
     63 
     64 
     65 void* Zone::New(int size) {
     66   // Round up the requested size to fit the alignment.
     67   size = RoundUp(size, kAlignment);
     68 
     69   // If the allocation size is divisible by 8 then we return an 8-byte aligned
     70   // address.
     71   if (kPointerSize == 4 && kAlignment == 4) {
     72     position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
     73   } else {
     74     DCHECK(kAlignment >= kPointerSize);
     75   }
     76 
     77   // Check if the requested size is available without expanding.
     78   Address result = position_;
     79 
     80   int size_with_redzone =
     81 #ifdef V8_USE_ADDRESS_SANITIZER
     82       size + kASanRedzoneBytes;
     83 #else
     84       size;
     85 #endif
     86 
     87   if (size_with_redzone > limit_ - position_) {
     88      result = NewExpand(size_with_redzone);
     89   } else {
     90      position_ += size_with_redzone;
     91   }
     92 
     93 #ifdef V8_USE_ADDRESS_SANITIZER
     94   Address redzone_position = result + size;
     95   DCHECK(redzone_position + kASanRedzoneBytes == position_);
     96   ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
     97 #endif
     98 
     99   // Check that the result has the proper alignment and return it.
    100   DCHECK(IsAddressAligned(result, kAlignment, 0));
    101   allocation_size_ += size;
    102   return reinterpret_cast<void*>(result);
    103 }
    104 
    105 
    106 void Zone::DeleteAll() {
    107 #ifdef DEBUG
    108   // Constant byte value used for zapping dead memory in debug mode.
    109   static const unsigned char kZapDeadByte = 0xcd;
    110 #endif
    111 
    112   // Find a segment with a suitable size to keep around.
    113   Segment* keep = NULL;
    114   // Traverse the chained list of segments, zapping (in debug mode)
    115   // and freeing every segment except the one we wish to keep.
    116   for (Segment* current = segment_head_; current != NULL; ) {
    117     Segment* next = current->next();
    118     if (keep == NULL && current->size() <= kMaximumKeptSegmentSize) {
    119       // Unlink the segment we wish to keep from the list.
    120       keep = current;
    121       keep->clear_next();
    122     } else {
    123       int size = current->size();
    124 #ifdef DEBUG
    125       // Un-poison first so the zapping doesn't trigger ASan complaints.
    126       ASAN_UNPOISON_MEMORY_REGION(current, size);
    127       // Zap the entire current segment (including the header).
    128       memset(current, kZapDeadByte, size);
    129 #endif
    130       DeleteSegment(current, size);
    131     }
    132     current = next;
    133   }
    134 
    135   // If we have found a segment we want to keep, we must recompute the
    136   // variables 'position' and 'limit' to prepare for future allocate
    137   // attempts. Otherwise, we must clear the position and limit to
    138   // force a new segment to be allocated on demand.
    139   if (keep != NULL) {
    140     Address start = keep->start();
    141     position_ = RoundUp(start, kAlignment);
    142     limit_ = keep->end();
    143     // Un-poison so we can re-use the segment later.
    144     ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
    145 #ifdef DEBUG
    146     // Zap the contents of the kept segment (but not the header).
    147     memset(start, kZapDeadByte, keep->capacity());
    148 #endif
    149   } else {
    150     position_ = limit_ = 0;
    151   }
    152 
    153   // Update the head segment to be the kept segment (if any).
    154   segment_head_ = keep;
    155 }
    156 
    157 
    158 void Zone::DeleteKeptSegment() {
    159 #ifdef DEBUG
    160   // Constant byte value used for zapping dead memory in debug mode.
    161   static const unsigned char kZapDeadByte = 0xcd;
    162 #endif
    163 
    164   DCHECK(segment_head_ == NULL || segment_head_->next() == NULL);
    165   if (segment_head_ != NULL) {
    166     int size = segment_head_->size();
    167 #ifdef DEBUG
    168     // Un-poison first so the zapping doesn't trigger ASan complaints.
    169     ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
    170     // Zap the entire kept segment (including the header).
    171     memset(segment_head_, kZapDeadByte, size);
    172 #endif
    173     DeleteSegment(segment_head_, size);
    174     segment_head_ = NULL;
    175   }
    176 
    177   DCHECK(segment_bytes_allocated_ == 0);
    178 }
    179 
    180 
    181 // Creates a new segment, sets it size, and pushes it to the front
    182 // of the segment chain. Returns the new segment.
    183 Segment* Zone::NewSegment(int size) {
    184   Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
    185   adjust_segment_bytes_allocated(size);
    186   if (result != NULL) {
    187     result->Initialize(segment_head_, size);
    188     segment_head_ = result;
    189   }
    190   return result;
    191 }
    192 
    193 
    194 // Deletes the given segment. Does not touch the segment chain.
    195 void Zone::DeleteSegment(Segment* segment, int size) {
    196   adjust_segment_bytes_allocated(-size);
    197   Malloced::Delete(segment);
    198 }
    199 
    200 
    201 Address Zone::NewExpand(int size) {
    202   // Make sure the requested size is already properly aligned and that
    203   // there isn't enough room in the Zone to satisfy the request.
    204   DCHECK(size == RoundDown(size, kAlignment));
    205   DCHECK(size > limit_ - position_);
    206 
    207   // Compute the new segment size. We use a 'high water mark'
    208   // strategy, where we increase the segment size every time we expand
    209   // except that we employ a maximum segment size when we delete. This
    210   // is to avoid excessive malloc() and free() overhead.
    211   Segment* head = segment_head_;
    212   const size_t old_size = (head == NULL) ? 0 : head->size();
    213   static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
    214   const size_t new_size_no_overhead = size + (old_size << 1);
    215   size_t new_size = kSegmentOverhead + new_size_no_overhead;
    216   const size_t min_new_size = kSegmentOverhead + static_cast<size_t>(size);
    217   // Guard against integer overflow.
    218   if (new_size_no_overhead < static_cast<size_t>(size) ||
    219       new_size < static_cast<size_t>(kSegmentOverhead)) {
    220     V8::FatalProcessOutOfMemory("Zone");
    221     return NULL;
    222   }
    223   if (new_size < static_cast<size_t>(kMinimumSegmentSize)) {
    224     new_size = kMinimumSegmentSize;
    225   } else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) {
    226     // Limit the size of new segments to avoid growing the segment size
    227     // exponentially, thus putting pressure on contiguous virtual address space.
    228     // All the while making sure to allocate a segment large enough to hold the
    229     // requested size.
    230     new_size = Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize));
    231   }
    232   if (new_size > INT_MAX) {
    233     V8::FatalProcessOutOfMemory("Zone");
    234     return NULL;
    235   }
    236   Segment* segment = NewSegment(static_cast<int>(new_size));
    237   if (segment == NULL) {
    238     V8::FatalProcessOutOfMemory("Zone");
    239     return NULL;
    240   }
    241 
    242   // Recompute 'top' and 'limit' based on the new segment.
    243   Address result = RoundUp(segment->start(), kAlignment);
    244   position_ = result + size;
    245   // Check for address overflow.
    246   // (Should not happen since the segment is guaranteed to accomodate
    247   // size bytes + header and alignment padding)
    248   if (reinterpret_cast<uintptr_t>(position_)
    249       < reinterpret_cast<uintptr_t>(result)) {
    250     V8::FatalProcessOutOfMemory("Zone");
    251     return NULL;
    252   }
    253   limit_ = segment->end();
    254   DCHECK(position_ <= limit_);
    255   return result;
    256 }
    257 
    258 
    259 } }  // namespace v8::internal
    260