Home | History | Annotate | Download | only in src
      1 // Copyright 2006-2010 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_SPACES_INL_H_
     29 #define V8_SPACES_INL_H_
     30 
     31 #include "isolate.h"
     32 #include "spaces.h"
     33 #include "v8memory.h"
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 
     39 // -----------------------------------------------------------------------------
     40 // PageIterator
     41 
     42 bool PageIterator::has_next() {
     43   return prev_page_ != stop_page_;
     44 }
     45 
     46 
     47 Page* PageIterator::next() {
     48   ASSERT(has_next());
     49   prev_page_ = (prev_page_ == NULL)
     50                ? space_->first_page_
     51                : prev_page_->next_page();
     52   return prev_page_;
     53 }
     54 
     55 
     56 // -----------------------------------------------------------------------------
     57 // Page
     58 
     59 Page* Page::next_page() {
     60   return heap_->isolate()->memory_allocator()->GetNextPage(this);
     61 }
     62 
     63 
     64 Address Page::AllocationTop() {
     65   PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
     66   return owner->PageAllocationTop(this);
     67 }
     68 
     69 
     70 Address Page::AllocationWatermark() {
     71   PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
     72   if (this == owner->AllocationTopPage()) {
     73     return owner->top();
     74   }
     75   return address() + AllocationWatermarkOffset();
     76 }
     77 
     78 
     79 uint32_t Page::AllocationWatermarkOffset() {
     80   return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
     81                                kAllocationWatermarkOffsetShift);
     82 }
     83 
     84 
     85 void Page::SetAllocationWatermark(Address allocation_watermark) {
     86   if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
     87     // When iterating intergenerational references during scavenge
     88     // we might decide to promote an encountered young object.
     89     // We will allocate a space for such an object and put it
     90     // into the promotion queue to process it later.
     91     // If space for object was allocated somewhere beyond allocation
     92     // watermark this might cause garbage pointers to appear under allocation
     93     // watermark. To avoid visiting them during dirty regions iteration
     94     // which might be still in progress we store a valid allocation watermark
     95     // value and mark this page as having an invalid watermark.
     96     SetCachedAllocationWatermark(AllocationWatermark());
     97     InvalidateWatermark(true);
     98   }
     99 
    100   flags_ = (flags_ & kFlagsMask) |
    101            Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
    102   ASSERT(AllocationWatermarkOffset()
    103          == static_cast<uint32_t>(Offset(allocation_watermark)));
    104 }
    105 
    106 
    107 void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
    108   mc_first_forwarded = allocation_watermark;
    109 }
    110 
    111 
    112 Address Page::CachedAllocationWatermark() {
    113   return mc_first_forwarded;
    114 }
    115 
    116 
    117 uint32_t Page::GetRegionMarks() {
    118   return dirty_regions_;
    119 }
    120 
    121 
    122 void Page::SetRegionMarks(uint32_t marks) {
    123   dirty_regions_ = marks;
    124 }
    125 
    126 
    127 int Page::GetRegionNumberForAddress(Address addr) {
    128   // Each page is divided into 256 byte regions. Each region has a corresponding
    129   // dirty mark bit in the page header. Region can contain intergenerational
    130   // references iff its dirty mark is set.
    131   // A normal 8K page contains exactly 32 regions so all region marks fit
    132   // into 32-bit integer field. To calculate a region number we just divide
    133   // offset inside page by region size.
    134   // A large page can contain more then 32 regions. But we want to avoid
    135   // additional write barrier code for distinguishing between large and normal
    136   // pages so we just ignore the fact that addr points into a large page and
    137   // calculate region number as if addr pointed into a normal 8K page. This way
    138   // we get a region number modulo 32 so for large pages several regions might
    139   // be mapped to a single dirty mark.
    140   ASSERT_PAGE_ALIGNED(this->address());
    141   STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
    142 
    143   // We are using masking with kPageAlignmentMask instead of Page::Offset()
    144   // to get an offset to the beginning of 8K page containing addr not to the
    145   // beginning of actual page which can be bigger then 8K.
    146   intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
    147   return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
    148 }
    149 
    150 
    151 uint32_t Page::GetRegionMaskForAddress(Address addr) {
    152   return 1 << GetRegionNumberForAddress(addr);
    153 }
    154 
    155 
    156 uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
    157   uint32_t result = 0;
    158   if (length_in_bytes >= kPageSize) {
    159     result = kAllRegionsDirtyMarks;
    160   } else if (length_in_bytes > 0) {
    161     int start_region = GetRegionNumberForAddress(start);
    162     int end_region =
    163         GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
    164     uint32_t start_mask = (~0) << start_region;
    165     uint32_t end_mask = ~((~1) << end_region);
    166     result = start_mask & end_mask;
    167     // if end_region < start_region, the mask is ored.
    168     if (result == 0) result = start_mask | end_mask;
    169   }
    170 #ifdef DEBUG
    171   if (FLAG_enable_slow_asserts) {
    172     uint32_t expected = 0;
    173     for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
    174       expected |= GetRegionMaskForAddress(a);
    175     }
    176     ASSERT(expected == result);
    177   }
    178 #endif
    179   return result;
    180 }
    181 
    182 
    183 void Page::MarkRegionDirty(Address address) {
    184   SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
    185 }
    186 
    187 
    188 bool Page::IsRegionDirty(Address address) {
    189   return GetRegionMarks() & GetRegionMaskForAddress(address);
    190 }
    191 
    192 
    193 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
    194   int rstart = GetRegionNumberForAddress(start);
    195   int rend = GetRegionNumberForAddress(end);
    196 
    197   if (reaches_limit) {
    198     end += 1;
    199   }
    200 
    201   if ((rend - rstart) == 0) {
    202     return;
    203   }
    204 
    205   uint32_t bitmask = 0;
    206 
    207   if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
    208       || (start == ObjectAreaStart())) {
    209     // First region is fully covered
    210     bitmask = 1 << rstart;
    211   }
    212 
    213   while (++rstart < rend) {
    214     bitmask |= 1 << rstart;
    215   }
    216 
    217   if (bitmask) {
    218     SetRegionMarks(GetRegionMarks() & ~bitmask);
    219   }
    220 }
    221 
    222 
    223 void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
    224   heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
    225 }
    226 
    227 
    228 bool Page::IsWatermarkValid() {
    229   return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
    230       heap_->page_watermark_invalidated_mark_;
    231 }
    232 
    233 
    234 void Page::InvalidateWatermark(bool value) {
    235   if (value) {
    236     flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
    237              heap_->page_watermark_invalidated_mark_;
    238   } else {
    239     flags_ =
    240         (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
    241         (heap_->page_watermark_invalidated_mark_ ^
    242          (1 << WATERMARK_INVALIDATED));
    243   }
    244 
    245   ASSERT(IsWatermarkValid() == !value);
    246 }
    247 
    248 
    249 bool Page::GetPageFlag(PageFlag flag) {
    250   return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
    251 }
    252 
    253 
    254 void Page::SetPageFlag(PageFlag flag, bool value) {
    255   if (value) {
    256     flags_ |= static_cast<intptr_t>(1 << flag);
    257   } else {
    258     flags_ &= ~static_cast<intptr_t>(1 << flag);
    259   }
    260 }
    261 
    262 
    263 void Page::ClearPageFlags() {
    264   flags_ = 0;
    265 }
    266 
    267 
    268 void Page::ClearGCFields() {
    269   InvalidateWatermark(true);
    270   SetAllocationWatermark(ObjectAreaStart());
    271   if (heap_->gc_state() == Heap::SCAVENGE) {
    272     SetCachedAllocationWatermark(ObjectAreaStart());
    273   }
    274   SetRegionMarks(kAllRegionsCleanMarks);
    275 }
    276 
    277 
    278 bool Page::WasInUseBeforeMC() {
    279   return GetPageFlag(WAS_IN_USE_BEFORE_MC);
    280 }
    281 
    282 
    283 void Page::SetWasInUseBeforeMC(bool was_in_use) {
    284   SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
    285 }
    286 
    287 
    288 bool Page::IsLargeObjectPage() {
    289   return !GetPageFlag(IS_NORMAL_PAGE);
    290 }
    291 
    292 
    293 void Page::SetIsLargeObjectPage(bool is_large_object_page) {
    294   SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
    295 }
    296 
    297 bool Page::IsPageExecutable() {
    298   return GetPageFlag(IS_EXECUTABLE);
    299 }
    300 
    301 
    302 void Page::SetIsPageExecutable(bool is_page_executable) {
    303   SetPageFlag(IS_EXECUTABLE, is_page_executable);
    304 }
    305 
    306 
    307 // -----------------------------------------------------------------------------
    308 // MemoryAllocator
    309 
    310 void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
    311   address_ = a;
    312   size_ = s;
    313   owner_ = o;
    314   executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
    315   owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
    316 }
    317 
    318 
    319 bool MemoryAllocator::IsValidChunk(int chunk_id) {
    320   if (!IsValidChunkId(chunk_id)) return false;
    321 
    322   ChunkInfo& c = chunks_[chunk_id];
    323   return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
    324 }
    325 
    326 
    327 bool MemoryAllocator::IsValidChunkId(int chunk_id) {
    328   return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
    329 }
    330 
    331 
    332 bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
    333   ASSERT(p->is_valid());
    334 
    335   int chunk_id = GetChunkId(p);
    336   if (!IsValidChunkId(chunk_id)) return false;
    337 
    338   ChunkInfo& c = chunks_[chunk_id];
    339   return (c.address() <= p->address()) &&
    340          (p->address() < c.address() + c.size()) &&
    341          (space == c.owner());
    342 }
    343 
    344 
    345 Page* MemoryAllocator::GetNextPage(Page* p) {
    346   ASSERT(p->is_valid());
    347   intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
    348   return Page::FromAddress(AddressFrom<Address>(raw_addr));
    349 }
    350 
    351 
    352 int MemoryAllocator::GetChunkId(Page* p) {
    353   ASSERT(p->is_valid());
    354   return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
    355 }
    356 
    357 
    358 void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
    359   ASSERT(prev->is_valid());
    360   int chunk_id = GetChunkId(prev);
    361   ASSERT_PAGE_ALIGNED(next->address());
    362   prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
    363 }
    364 
    365 
    366 PagedSpace* MemoryAllocator::PageOwner(Page* page) {
    367   int chunk_id = GetChunkId(page);
    368   ASSERT(IsValidChunk(chunk_id));
    369   return chunks_[chunk_id].owner();
    370 }
    371 
    372 
    373 bool MemoryAllocator::InInitialChunk(Address address) {
    374   if (initial_chunk_ == NULL) return false;
    375 
    376   Address start = static_cast<Address>(initial_chunk_->address());
    377   return (start <= address) && (address < start + initial_chunk_->size());
    378 }
    379 
    380 
    381 #ifdef ENABLE_HEAP_PROTECTION
    382 
    383 void MemoryAllocator::Protect(Address start, size_t size) {
    384   OS::Protect(start, size);
    385 }
    386 
    387 
    388 void MemoryAllocator::Unprotect(Address start,
    389                                 size_t size,
    390                                 Executability executable) {
    391   OS::Unprotect(start, size, executable);
    392 }
    393 
    394 
    395 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
    396   int id = GetChunkId(page);
    397   OS::Protect(chunks_[id].address(), chunks_[id].size());
    398 }
    399 
    400 
    401 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
    402   int id = GetChunkId(page);
    403   OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
    404                 chunks_[id].owner()->executable() == EXECUTABLE);
    405 }
    406 
    407 #endif
    408 
    409 
    410 // --------------------------------------------------------------------------
    411 // PagedSpace
    412 
    413 bool PagedSpace::Contains(Address addr) {
    414   Page* p = Page::FromAddress(addr);
    415   if (!p->is_valid()) return false;
    416   return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
    417 }
    418 
    419 
    420 // Try linear allocation in the page of alloc_info's allocation top.  Does
    421 // not contain slow case logic (eg, move to the next page or try free list
    422 // allocation) so it can be used by all the allocation functions and for all
    423 // the paged spaces.
    424 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
    425                                          int size_in_bytes) {
    426   Address current_top = alloc_info->top;
    427   Address new_top = current_top + size_in_bytes;
    428   if (new_top > alloc_info->limit) return NULL;
    429 
    430   alloc_info->top = new_top;
    431   ASSERT(alloc_info->VerifyPagedAllocation());
    432   accounting_stats_.AllocateBytes(size_in_bytes);
    433   return HeapObject::FromAddress(current_top);
    434 }
    435 
    436 
    437 // Raw allocation.
    438 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
    439   ASSERT(HasBeenSetup());
    440   ASSERT_OBJECT_SIZE(size_in_bytes);
    441   HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
    442   if (object != NULL) return object;
    443 
    444   object = SlowAllocateRaw(size_in_bytes);
    445   if (object != NULL) return object;
    446 
    447   return Failure::RetryAfterGC(identity());
    448 }
    449 
    450 
    451 // Reallocating (and promoting) objects during a compacting collection.
    452 MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
    453   ASSERT(HasBeenSetup());
    454   ASSERT_OBJECT_SIZE(size_in_bytes);
    455   HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
    456   if (object != NULL) return object;
    457 
    458   object = SlowMCAllocateRaw(size_in_bytes);
    459   if (object != NULL) return object;
    460 
    461   return Failure::RetryAfterGC(identity());
    462 }
    463 
    464 
    465 // -----------------------------------------------------------------------------
    466 // LargeObjectChunk
    467 
    468 Address LargeObjectChunk::GetStartAddress() {
    469   // Round the chunk address up to the nearest page-aligned address
    470   // and return the heap object in that page.
    471   Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
    472   return page->ObjectAreaStart();
    473 }
    474 
    475 
    476 void LargeObjectChunk::Free(Executability executable) {
    477   Isolate* isolate =
    478       Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
    479   isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
    480 }
    481 
    482 // -----------------------------------------------------------------------------
    483 // NewSpace
    484 
    485 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
    486                                            AllocationInfo* alloc_info) {
    487   Address new_top = alloc_info->top + size_in_bytes;
    488   if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
    489 
    490   Object* obj = HeapObject::FromAddress(alloc_info->top);
    491   alloc_info->top = new_top;
    492 #ifdef DEBUG
    493   SemiSpace* space =
    494       (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
    495   ASSERT(space->low() <= alloc_info->top
    496          && alloc_info->top <= space->high()
    497          && alloc_info->limit == space->high());
    498 #endif
    499   return obj;
    500 }
    501 
    502 
    503 intptr_t LargeObjectSpace::Available() {
    504   return LargeObjectChunk::ObjectSizeFor(
    505       heap()->isolate()->memory_allocator()->Available());
    506 }
    507 
    508 
    509 template <typename StringType>
    510 void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
    511   ASSERT(length <= string->length());
    512   ASSERT(string->IsSeqString());
    513   ASSERT(string->address() + StringType::SizeFor(string->length()) ==
    514          allocation_info_.top);
    515   allocation_info_.top =
    516       string->address() + StringType::SizeFor(length);
    517   string->set_length(length);
    518 }
    519 
    520 
    521 bool FreeListNode::IsFreeListNode(HeapObject* object) {
    522   return object->map() == HEAP->raw_unchecked_byte_array_map()
    523       || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
    524       || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
    525 }
    526 
    527 } }  // namespace v8::internal
    528 
    529 #endif  // V8_SPACES_INL_H_
    530