Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/zone.h"
      6 
      7 #include <cstring>
      8 
      9 #include "src/v8.h"
     10 
     11 #ifdef V8_USE_ADDRESS_SANITIZER
     12 #include <sanitizer/asan_interface.h>
     13 #endif  // V8_USE_ADDRESS_SANITIZER
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 namespace {
     19 
     20 #if V8_USE_ADDRESS_SANITIZER
     21 
     22 const size_t kASanRedzoneBytes = 24;  // Must be a multiple of 8.
     23 
     24 #else
     25 
     26 #define ASAN_POISON_MEMORY_REGION(start, size) \
     27   do {                                         \
     28     USE(start);                                \
     29     USE(size);                                 \
     30   } while (false)
     31 
     32 #define ASAN_UNPOISON_MEMORY_REGION(start, size) \
     33   do {                                           \
     34     USE(start);                                  \
     35     USE(size);                                   \
     36   } while (false)
     37 
     38 const size_t kASanRedzoneBytes = 0;
     39 
     40 #endif  // V8_USE_ADDRESS_SANITIZER
     41 
     42 }  // namespace
     43 
     44 
     45 // Segments represent chunks of memory: They have starting address
     46 // (encoded in the this pointer) and a size in bytes. Segments are
     47 // chained together forming a LIFO structure with the newest segment
     48 // available as segment_head_. Segments are allocated using malloc()
     49 // and de-allocated using free().
     50 
     51 class Segment {
     52  public:
     53   void Initialize(Segment* next, size_t size) {
     54     next_ = next;
     55     size_ = size;
     56   }
     57 
     58   Segment* next() const { return next_; }
     59   void clear_next() { next_ = nullptr; }
     60 
     61   size_t size() const { return size_; }
     62   size_t capacity() const { return size_ - sizeof(Segment); }
     63 
     64   Address start() const { return address(sizeof(Segment)); }
     65   Address end() const { return address(size_); }
     66 
     67  private:
     68   // Computes the address of the nth byte in this segment.
     69   Address address(size_t n) const { return Address(this) + n; }
     70 
     71   Segment* next_;
     72   size_t size_;
     73 };
     74 
     75 
     76 Zone::Zone()
     77     : allocation_size_(0),
     78       segment_bytes_allocated_(0),
     79       position_(0),
     80       limit_(0),
     81       segment_head_(nullptr) {}
     82 
     83 
     84 Zone::~Zone() {
     85   DeleteAll();
     86   DeleteKeptSegment();
     87 
     88   DCHECK(segment_bytes_allocated_ == 0);
     89 }
     90 
     91 
     92 void* Zone::New(size_t size) {
     93   // Round up the requested size to fit the alignment.
     94   size = RoundUp(size, kAlignment);
     95 
     96   // If the allocation size is divisible by 8 then we return an 8-byte aligned
     97   // address.
     98   if (kPointerSize == 4 && kAlignment == 4) {
     99     position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
    100   } else {
    101     DCHECK(kAlignment >= kPointerSize);
    102   }
    103 
    104   // Check if the requested size is available without expanding.
    105   Address result = position_;
    106 
    107   const size_t size_with_redzone = size + kASanRedzoneBytes;
    108   if (limit_ < position_ + size_with_redzone) {
    109     result = NewExpand(size_with_redzone);
    110   } else {
    111     position_ += size_with_redzone;
    112   }
    113 
    114   Address redzone_position = result + size;
    115   DCHECK(redzone_position + kASanRedzoneBytes == position_);
    116   ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
    117 
    118   // Check that the result has the proper alignment and return it.
    119   DCHECK(IsAddressAligned(result, kAlignment, 0));
    120   allocation_size_ += size;
    121   return reinterpret_cast<void*>(result);
    122 }
    123 
    124 
    125 void Zone::DeleteAll() {
    126 #ifdef DEBUG
    127   // Constant byte value used for zapping dead memory in debug mode.
    128   static const unsigned char kZapDeadByte = 0xcd;
    129 #endif
    130 
    131   // Find a segment with a suitable size to keep around.
    132   Segment* keep = nullptr;
    133   // Traverse the chained list of segments, zapping (in debug mode)
    134   // and freeing every segment except the one we wish to keep.
    135   for (Segment* current = segment_head_; current;) {
    136     Segment* next = current->next();
    137     if (!keep && current->size() <= kMaximumKeptSegmentSize) {
    138       // Unlink the segment we wish to keep from the list.
    139       keep = current;
    140       keep->clear_next();
    141     } else {
    142       size_t size = current->size();
    143 #ifdef DEBUG
    144       // Un-poison first so the zapping doesn't trigger ASan complaints.
    145       ASAN_UNPOISON_MEMORY_REGION(current, size);
    146       // Zap the entire current segment (including the header).
    147       memset(current, kZapDeadByte, size);
    148 #endif
    149       DeleteSegment(current, size);
    150     }
    151     current = next;
    152   }
    153 
    154   // If we have found a segment we want to keep, we must recompute the
    155   // variables 'position' and 'limit' to prepare for future allocate
    156   // attempts. Otherwise, we must clear the position and limit to
    157   // force a new segment to be allocated on demand.
    158   if (keep) {
    159     Address start = keep->start();
    160     position_ = RoundUp(start, kAlignment);
    161     limit_ = keep->end();
    162     // Un-poison so we can re-use the segment later.
    163     ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
    164 #ifdef DEBUG
    165     // Zap the contents of the kept segment (but not the header).
    166     memset(start, kZapDeadByte, keep->capacity());
    167 #endif
    168   } else {
    169     position_ = limit_ = 0;
    170   }
    171 
    172   allocation_size_ = 0;
    173   // Update the head segment to be the kept segment (if any).
    174   segment_head_ = keep;
    175 }
    176 
    177 
    178 void Zone::DeleteKeptSegment() {
    179 #ifdef DEBUG
    180   // Constant byte value used for zapping dead memory in debug mode.
    181   static const unsigned char kZapDeadByte = 0xcd;
    182 #endif
    183 
    184   DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
    185   if (segment_head_ != nullptr) {
    186     size_t size = segment_head_->size();
    187 #ifdef DEBUG
    188     // Un-poison first so the zapping doesn't trigger ASan complaints.
    189     ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
    190     // Zap the entire kept segment (including the header).
    191     memset(segment_head_, kZapDeadByte, size);
    192 #endif
    193     DeleteSegment(segment_head_, size);
    194     segment_head_ = nullptr;
    195   }
    196 
    197   DCHECK(segment_bytes_allocated_ == 0);
    198 }
    199 
    200 
    201 // Creates a new segment, sets it size, and pushes it to the front
    202 // of the segment chain. Returns the new segment.
    203 Segment* Zone::NewSegment(size_t size) {
    204   Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
    205   segment_bytes_allocated_ += size;
    206   if (result != nullptr) {
    207     result->Initialize(segment_head_, size);
    208     segment_head_ = result;
    209   }
    210   return result;
    211 }
    212 
    213 
    214 // Deletes the given segment. Does not touch the segment chain.
    215 void Zone::DeleteSegment(Segment* segment, size_t size) {
    216   segment_bytes_allocated_ -= size;
    217   Malloced::Delete(segment);
    218 }
    219 
    220 
    221 Address Zone::NewExpand(size_t size) {
    222   // Make sure the requested size is already properly aligned and that
    223   // there isn't enough room in the Zone to satisfy the request.
    224   DCHECK_EQ(size, RoundDown(size, kAlignment));
    225   DCHECK_LT(limit_, position_ + size);
    226 
    227   // Compute the new segment size. We use a 'high water mark'
    228   // strategy, where we increase the segment size every time we expand
    229   // except that we employ a maximum segment size when we delete. This
    230   // is to avoid excessive malloc() and free() overhead.
    231   Segment* head = segment_head_;
    232   const size_t old_size = (head == nullptr) ? 0 : head->size();
    233   static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
    234   const size_t new_size_no_overhead = size + (old_size << 1);
    235   size_t new_size = kSegmentOverhead + new_size_no_overhead;
    236   const size_t min_new_size = kSegmentOverhead + size;
    237   // Guard against integer overflow.
    238   if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
    239     V8::FatalProcessOutOfMemory("Zone");
    240     return nullptr;
    241   }
    242   if (new_size < kMinimumSegmentSize) {
    243     new_size = kMinimumSegmentSize;
    244   } else if (new_size > kMaximumSegmentSize) {
    245     // Limit the size of new segments to avoid growing the segment size
    246     // exponentially, thus putting pressure on contiguous virtual address space.
    247     // All the while making sure to allocate a segment large enough to hold the
    248     // requested size.
    249     new_size = Max(min_new_size, kMaximumSegmentSize);
    250   }
    251   if (new_size > INT_MAX) {
    252     V8::FatalProcessOutOfMemory("Zone");
    253     return nullptr;
    254   }
    255   Segment* segment = NewSegment(new_size);
    256   if (segment == nullptr) {
    257     V8::FatalProcessOutOfMemory("Zone");
    258     return nullptr;
    259   }
    260 
    261   // Recompute 'top' and 'limit' based on the new segment.
    262   Address result = RoundUp(segment->start(), kAlignment);
    263   position_ = result + size;
    264   // Check for address overflow.
    265   // (Should not happen since the segment is guaranteed to accomodate
    266   // size bytes + header and alignment padding)
    267   DCHECK(reinterpret_cast<uintptr_t>(position_) >=
    268          reinterpret_cast<uintptr_t>(result));
    269   limit_ = segment->end();
    270   DCHECK(position_ <= limit_);
    271   return result;
    272 }
    273 
    274 }  // namespace internal
    275 }  // namespace v8
    276