Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_MARK_COMPACT_H_
     29 #define V8_MARK_COMPACT_H_
     30 
     31 #include "compiler-intrinsics.h"
     32 #include "spaces.h"
     33 
     34 namespace v8 {
     35 namespace internal {
     36 
     37 // Callback function, returns whether an object is alive. The heap size
     38 // of the object is returned in size. It optionally updates the offset
     39 // to the first live object in the page (only used for old and map objects).
     40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
     41 
     42 // Forward declarations.
     43 class CodeFlusher;
     44 class GCTracer;
     45 class MarkCompactCollector;
     46 class MarkingVisitor;
     47 class RootMarkingVisitor;
     48 
     49 
     50 class Marking {
     51  public:
     52   explicit Marking(Heap* heap)
     53       : heap_(heap) {
     54   }
     55 
     56   INLINE(static MarkBit MarkBitFrom(Address addr));
     57 
     58   INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
     59     return MarkBitFrom(reinterpret_cast<Address>(obj));
     60   }
     61 
     62   // Impossible markbits: 01
     63   static const char* kImpossibleBitPattern;
     64   INLINE(static bool IsImpossible(MarkBit mark_bit)) {
     65     return !mark_bit.Get() && mark_bit.Next().Get();
     66   }
     67 
     68   // Black markbits: 10 - this is required by the sweeper.
     69   static const char* kBlackBitPattern;
     70   INLINE(static bool IsBlack(MarkBit mark_bit)) {
     71     return mark_bit.Get() && !mark_bit.Next().Get();
     72   }
     73 
     74   // White markbits: 00 - this is required by the mark bit clearer.
     75   static const char* kWhiteBitPattern;
     76   INLINE(static bool IsWhite(MarkBit mark_bit)) {
     77     return !mark_bit.Get();
     78   }
     79 
     80   // Grey markbits: 11
     81   static const char* kGreyBitPattern;
     82   INLINE(static bool IsGrey(MarkBit mark_bit)) {
     83     return mark_bit.Get() && mark_bit.Next().Get();
     84   }
     85 
     86   INLINE(static void MarkBlack(MarkBit mark_bit)) {
     87     mark_bit.Set();
     88     mark_bit.Next().Clear();
     89   }
     90 
     91   INLINE(static void BlackToGrey(MarkBit markbit)) {
     92     markbit.Next().Set();
     93   }
     94 
     95   INLINE(static void WhiteToGrey(MarkBit markbit)) {
     96     markbit.Set();
     97     markbit.Next().Set();
     98   }
     99 
    100   INLINE(static void GreyToBlack(MarkBit markbit)) {
    101     markbit.Next().Clear();
    102   }
    103 
    104   INLINE(static void BlackToGrey(HeapObject* obj)) {
    105     BlackToGrey(MarkBitFrom(obj));
    106   }
    107 
    108   INLINE(static void AnyToGrey(MarkBit markbit)) {
    109     markbit.Set();
    110     markbit.Next().Set();
    111   }
    112 
    113   // Returns true if the the object whose mark is transferred is marked black.
    114   bool TransferMark(Address old_start, Address new_start);
    115 
    116 #ifdef DEBUG
    117   enum ObjectColor {
    118     BLACK_OBJECT,
    119     WHITE_OBJECT,
    120     GREY_OBJECT,
    121     IMPOSSIBLE_COLOR
    122   };
    123 
    124   static const char* ColorName(ObjectColor color) {
    125     switch (color) {
    126       case BLACK_OBJECT: return "black";
    127       case WHITE_OBJECT: return "white";
    128       case GREY_OBJECT: return "grey";
    129       case IMPOSSIBLE_COLOR: return "impossible";
    130     }
    131     return "error";
    132   }
    133 
    134   static ObjectColor Color(HeapObject* obj) {
    135     return Color(Marking::MarkBitFrom(obj));
    136   }
    137 
    138   static ObjectColor Color(MarkBit mark_bit) {
    139     if (IsBlack(mark_bit)) return BLACK_OBJECT;
    140     if (IsWhite(mark_bit)) return WHITE_OBJECT;
    141     if (IsGrey(mark_bit)) return GREY_OBJECT;
    142     UNREACHABLE();
    143     return IMPOSSIBLE_COLOR;
    144   }
    145 #endif
    146 
    147   // Returns true if the transferred color is black.
    148   INLINE(static bool TransferColor(HeapObject* from,
    149                                    HeapObject* to)) {
    150     MarkBit from_mark_bit = MarkBitFrom(from);
    151     MarkBit to_mark_bit = MarkBitFrom(to);
    152     bool is_black = false;
    153     if (from_mark_bit.Get()) {
    154       to_mark_bit.Set();
    155       is_black = true;  // Looks black so far.
    156     }
    157     if (from_mark_bit.Next().Get()) {
    158       to_mark_bit.Next().Set();
    159       is_black = false;  // Was actually gray.
    160     }
    161     return is_black;
    162   }
    163 
    164  private:
    165   Heap* heap_;
    166 };
    167 
    168 // ----------------------------------------------------------------------------
    169 // Marking deque for tracing live objects.
    170 class MarkingDeque {
    171  public:
    172   MarkingDeque()
    173       : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
    174 
    175   void Initialize(Address low, Address high) {
    176     HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
    177     HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
    178     array_ = obj_low;
    179     mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
    180     top_ = bottom_ = 0;
    181     overflowed_ = false;
    182   }
    183 
    184   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
    185 
    186   inline bool IsEmpty() { return top_ == bottom_; }
    187 
    188   bool overflowed() const { return overflowed_; }
    189 
    190   void ClearOverflowed() { overflowed_ = false; }
    191 
    192   void SetOverflowed() { overflowed_ = true; }
    193 
    194   // Push the (marked) object on the marking stack if there is room,
    195   // otherwise mark the object as overflowed and wait for a rescan of the
    196   // heap.
    197   INLINE(void PushBlack(HeapObject* object)) {
    198     ASSERT(object->IsHeapObject());
    199     if (IsFull()) {
    200       Marking::BlackToGrey(object);
    201       MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
    202       SetOverflowed();
    203     } else {
    204       array_[top_] = object;
    205       top_ = ((top_ + 1) & mask_);
    206     }
    207   }
    208 
    209   INLINE(void PushGrey(HeapObject* object)) {
    210     ASSERT(object->IsHeapObject());
    211     if (IsFull()) {
    212       SetOverflowed();
    213     } else {
    214       array_[top_] = object;
    215       top_ = ((top_ + 1) & mask_);
    216     }
    217   }
    218 
    219   INLINE(HeapObject* Pop()) {
    220     ASSERT(!IsEmpty());
    221     top_ = ((top_ - 1) & mask_);
    222     HeapObject* object = array_[top_];
    223     ASSERT(object->IsHeapObject());
    224     return object;
    225   }
    226 
    227   INLINE(void UnshiftGrey(HeapObject* object)) {
    228     ASSERT(object->IsHeapObject());
    229     if (IsFull()) {
    230       SetOverflowed();
    231     } else {
    232       bottom_ = ((bottom_ - 1) & mask_);
    233       array_[bottom_] = object;
    234     }
    235   }
    236 
    237   HeapObject** array() { return array_; }
    238   int bottom() { return bottom_; }
    239   int top() { return top_; }
    240   int mask() { return mask_; }
    241   void set_top(int top) { top_ = top; }
    242 
    243  private:
    244   HeapObject** array_;
    245   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
    246   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
    247   // (mod mask + 1).
    248   int top_;
    249   int bottom_;
    250   int mask_;
    251   bool overflowed_;
    252 
    253   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
    254 };
    255 
    256 
    257 class SlotsBufferAllocator {
    258  public:
    259   SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
    260   void DeallocateBuffer(SlotsBuffer* buffer);
    261 
    262   void DeallocateChain(SlotsBuffer** buffer_address);
    263 };
    264 
    265 
    266 // SlotsBuffer records a sequence of slots that has to be updated
    267 // after live objects were relocated from evacuation candidates.
    268 // All slots are either untyped or typed:
    269 //    - Untyped slots are expected to contain a tagged object pointer.
    270 //      They are recorded by an address.
    271 //    - Typed slots are expected to contain an encoded pointer to a heap
    272 //      object where the way of encoding depends on the type of the slot.
    273 //      They are recorded as a pair (SlotType, slot address).
    274 // We assume that zero-page is never mapped this allows us to distinguish
    275 // untyped slots from typed slots during iteration by a simple comparison:
    276 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
    277 // is the first element of typed slot's pair.
    278 class SlotsBuffer {
    279  public:
    280   typedef Object** ObjectSlot;
    281 
    282   explicit SlotsBuffer(SlotsBuffer* next_buffer)
    283       : idx_(0), chain_length_(1), next_(next_buffer) {
    284     if (next_ != NULL) {
    285       chain_length_ = next_->chain_length_ + 1;
    286     }
    287   }
    288 
    289   ~SlotsBuffer() {
    290   }
    291 
    292   void Add(ObjectSlot slot) {
    293     ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
    294     slots_[idx_++] = slot;
    295   }
    296 
    297   enum SlotType {
    298     EMBEDDED_OBJECT_SLOT,
    299     RELOCATED_CODE_OBJECT,
    300     CODE_TARGET_SLOT,
    301     CODE_ENTRY_SLOT,
    302     DEBUG_TARGET_SLOT,
    303     JS_RETURN_SLOT,
    304     NUMBER_OF_SLOT_TYPES
    305   };
    306 
    307   static const char* SlotTypeToString(SlotType type) {
    308     switch (type) {
    309       case EMBEDDED_OBJECT_SLOT:
    310         return "EMBEDDED_OBJECT_SLOT";
    311       case RELOCATED_CODE_OBJECT:
    312         return "RELOCATED_CODE_OBJECT";
    313       case CODE_TARGET_SLOT:
    314         return "CODE_TARGET_SLOT";
    315       case CODE_ENTRY_SLOT:
    316         return "CODE_ENTRY_SLOT";
    317       case DEBUG_TARGET_SLOT:
    318         return "DEBUG_TARGET_SLOT";
    319       case JS_RETURN_SLOT:
    320         return "JS_RETURN_SLOT";
    321       case NUMBER_OF_SLOT_TYPES:
    322         return "NUMBER_OF_SLOT_TYPES";
    323     }
    324     return "UNKNOWN SlotType";
    325   }
    326 
    327   void UpdateSlots(Heap* heap);
    328 
    329   void UpdateSlotsWithFilter(Heap* heap);
    330 
    331   SlotsBuffer* next() { return next_; }
    332 
    333   static int SizeOfChain(SlotsBuffer* buffer) {
    334     if (buffer == NULL) return 0;
    335     return static_cast<int>(buffer->idx_ +
    336                             (buffer->chain_length_ - 1) * kNumberOfElements);
    337   }
    338 
    339   inline bool IsFull() {
    340     return idx_ == kNumberOfElements;
    341   }
    342 
    343   inline bool HasSpaceForTypedSlot() {
    344     return idx_ < kNumberOfElements - 1;
    345   }
    346 
    347   static void UpdateSlotsRecordedIn(Heap* heap,
    348                                     SlotsBuffer* buffer,
    349                                     bool code_slots_filtering_required) {
    350     while (buffer != NULL) {
    351       if (code_slots_filtering_required) {
    352         buffer->UpdateSlotsWithFilter(heap);
    353       } else {
    354         buffer->UpdateSlots(heap);
    355       }
    356       buffer = buffer->next();
    357     }
    358   }
    359 
    360   enum AdditionMode {
    361     FAIL_ON_OVERFLOW,
    362     IGNORE_OVERFLOW
    363   };
    364 
    365   static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
    366     return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
    367   }
    368 
    369   INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
    370                            SlotsBuffer** buffer_address,
    371                            ObjectSlot slot,
    372                            AdditionMode mode)) {
    373     SlotsBuffer* buffer = *buffer_address;
    374     if (buffer == NULL || buffer->IsFull()) {
    375       if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
    376         allocator->DeallocateChain(buffer_address);
    377         return false;
    378       }
    379       buffer = allocator->AllocateBuffer(buffer);
    380       *buffer_address = buffer;
    381     }
    382     buffer->Add(slot);
    383     return true;
    384   }
    385 
    386   static bool IsTypedSlot(ObjectSlot slot);
    387 
    388   static bool AddTo(SlotsBufferAllocator* allocator,
    389                     SlotsBuffer** buffer_address,
    390                     SlotType type,
    391                     Address addr,
    392                     AdditionMode mode);
    393 
    394   static const int kNumberOfElements = 1021;
    395 
    396  private:
    397   static const int kChainLengthThreshold = 15;
    398 
    399   intptr_t idx_;
    400   intptr_t chain_length_;
    401   SlotsBuffer* next_;
    402   ObjectSlot slots_[kNumberOfElements];
    403 };
    404 
    405 
    406 // CodeFlusher collects candidates for code flushing during marking and
    407 // processes those candidates after marking has completed in order to
    408 // reset those functions referencing code objects that would otherwise
    409 // be unreachable. Code objects can be referenced in three ways:
    410 //    - SharedFunctionInfo references unoptimized code.
    411 //    - JSFunction references either unoptimized or optimized code.
    412 //    - OptimizedCodeMap references optimized code.
    413 // We are not allowed to flush unoptimized code for functions that got
    414 // optimized or inlined into optimized code, because we might bailout
    415 // into the unoptimized code again during deoptimization.
    416 class CodeFlusher {
    417  public:
    418   explicit CodeFlusher(Isolate* isolate)
    419       : isolate_(isolate),
    420         jsfunction_candidates_head_(NULL),
    421         shared_function_info_candidates_head_(NULL),
    422         optimized_code_map_holder_head_(NULL) {}
    423 
    424   void AddCandidate(SharedFunctionInfo* shared_info) {
    425     if (GetNextCandidate(shared_info) == NULL) {
    426       SetNextCandidate(shared_info, shared_function_info_candidates_head_);
    427       shared_function_info_candidates_head_ = shared_info;
    428     }
    429   }
    430 
    431   void AddCandidate(JSFunction* function) {
    432     ASSERT(function->code() == function->shared()->code());
    433     if (GetNextCandidate(function)->IsUndefined()) {
    434       SetNextCandidate(function, jsfunction_candidates_head_);
    435       jsfunction_candidates_head_ = function;
    436     }
    437   }
    438 
    439   void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
    440     if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
    441       SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
    442       optimized_code_map_holder_head_ = code_map_holder;
    443     }
    444   }
    445 
    446   void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
    447   void EvictCandidate(SharedFunctionInfo* shared_info);
    448   void EvictCandidate(JSFunction* function);
    449 
    450   void ProcessCandidates() {
    451     ProcessOptimizedCodeMaps();
    452     ProcessSharedFunctionInfoCandidates();
    453     ProcessJSFunctionCandidates();
    454   }
    455 
    456   void EvictAllCandidates() {
    457     EvictOptimizedCodeMaps();
    458     EvictJSFunctionCandidates();
    459     EvictSharedFunctionInfoCandidates();
    460   }
    461 
    462   void IteratePointersToFromSpace(ObjectVisitor* v);
    463 
    464  private:
    465   void ProcessOptimizedCodeMaps();
    466   void ProcessJSFunctionCandidates();
    467   void ProcessSharedFunctionInfoCandidates();
    468   void EvictOptimizedCodeMaps();
    469   void EvictJSFunctionCandidates();
    470   void EvictSharedFunctionInfoCandidates();
    471 
    472   static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
    473     return reinterpret_cast<JSFunction**>(
    474         HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
    475   }
    476 
    477   static JSFunction* GetNextCandidate(JSFunction* candidate) {
    478     Object* next_candidate = candidate->next_function_link();
    479     return reinterpret_cast<JSFunction*>(next_candidate);
    480   }
    481 
    482   static void SetNextCandidate(JSFunction* candidate,
    483                                JSFunction* next_candidate) {
    484     candidate->set_next_function_link(next_candidate);
    485   }
    486 
    487   static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
    488     ASSERT(undefined->IsUndefined());
    489     candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
    490   }
    491 
    492   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
    493     Object* next_candidate = candidate->code()->gc_metadata();
    494     return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
    495   }
    496 
    497   static void SetNextCandidate(SharedFunctionInfo* candidate,
    498                                SharedFunctionInfo* next_candidate) {
    499     candidate->code()->set_gc_metadata(next_candidate);
    500   }
    501 
    502   static void ClearNextCandidate(SharedFunctionInfo* candidate) {
    503     candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
    504   }
    505 
    506   static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
    507     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    508     Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
    509     return reinterpret_cast<SharedFunctionInfo*>(next_map);
    510   }
    511 
    512   static void SetNextCodeMap(SharedFunctionInfo* holder,
    513                              SharedFunctionInfo* next_holder) {
    514     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    515     code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
    516   }
    517 
    518   static void ClearNextCodeMap(SharedFunctionInfo* holder) {
    519     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    520     code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
    521   }
    522 
    523   Isolate* isolate_;
    524   JSFunction* jsfunction_candidates_head_;
    525   SharedFunctionInfo* shared_function_info_candidates_head_;
    526   SharedFunctionInfo* optimized_code_map_holder_head_;
    527 
    528   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
    529 };
    530 
    531 
    532 // Defined in isolate.h.
    533 class ThreadLocalTop;
    534 
    535 
    536 // -------------------------------------------------------------------------
    537 // Mark-Compact collector
    538 class MarkCompactCollector {
    539  public:
    540   // Type of functions to compute forwarding addresses of objects in
    541   // compacted spaces.  Given an object and its size, return a (non-failure)
    542   // Object* that will be the object after forwarding.  There is a separate
    543   // allocation function for each (compactable) space based on the location
    544   // of the object before compaction.
    545   typedef MaybeObject* (*AllocationFunction)(Heap* heap,
    546                                              HeapObject* object,
    547                                              int object_size);
    548 
    549   // Type of functions to encode the forwarding address for an object.
    550   // Given the object, its size, and the new (non-failure) object it will be
    551   // forwarded to, encode the forwarding address.  For paged spaces, the
    552   // 'offset' input/output parameter contains the offset of the forwarded
    553   // object from the forwarding address of the previous live object in the
    554   // page as input, and is updated to contain the offset to be used for the
    555   // next live object in the same page.  For spaces using a different
    556   // encoding (i.e., contiguous spaces), the offset parameter is ignored.
    557   typedef void (*EncodingFunction)(Heap* heap,
    558                                    HeapObject* old_object,
    559                                    int object_size,
    560                                    Object* new_object,
    561                                    int* offset);
    562 
    563   // Type of functions to process non-live objects.
    564   typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
    565 
    566   // Pointer to member function, used in IterateLiveObjects.
    567   typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
    568 
    569   // Set the global flags, it must be called before Prepare to take effect.
    570   inline void SetFlags(int flags);
    571 
    572   static void Initialize();
    573 
    574   void TearDown();
    575 
    576   void CollectEvacuationCandidates(PagedSpace* space);
    577 
    578   void AddEvacuationCandidate(Page* p);
    579 
    580   // Prepares for GC by resetting relocation info in old and map spaces and
    581   // choosing spaces to compact.
    582   void Prepare(GCTracer* tracer);
    583 
    584   // Performs a global garbage collection.
    585   void CollectGarbage();
    586 
    587   enum CompactionMode {
    588     INCREMENTAL_COMPACTION,
    589     NON_INCREMENTAL_COMPACTION
    590   };
    591 
    592   bool StartCompaction(CompactionMode mode);
    593 
    594   void AbortCompaction();
    595 
    596   // During a full GC, there is a stack-allocated GCTracer that is used for
    597   // bookkeeping information.  Return a pointer to that tracer.
    598   GCTracer* tracer() { return tracer_; }
    599 
    600 #ifdef DEBUG
    601   // Checks whether performing mark-compact collection.
    602   bool in_use() { return state_ > PREPARE_GC; }
    603   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
    604 #endif
    605 
    606   // Determine type of object and emit deletion log event.
    607   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
    608 
    609   // Distinguishable invalid map encodings (for single word and multiple words)
    610   // that indicate free regions.
    611   static const uint32_t kSingleFreeEncoding = 0;
    612   static const uint32_t kMultiFreeEncoding = 1;
    613 
    614   static inline bool IsMarked(Object* obj);
    615 
    616   inline Heap* heap() const { return heap_; }
    617   inline Isolate* isolate() const;
    618 
    619   CodeFlusher* code_flusher() { return code_flusher_; }
    620   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
    621   void EnableCodeFlushing(bool enable);
    622 
    623   enum SweeperType {
    624     CONSERVATIVE,
    625     LAZY_CONSERVATIVE,
    626     PARALLEL_CONSERVATIVE,
    627     CONCURRENT_CONSERVATIVE,
    628     PRECISE
    629   };
    630 
    631   enum SweepingParallelism {
    632     SWEEP_SEQUENTIALLY,
    633     SWEEP_IN_PARALLEL
    634   };
    635 
    636 #ifdef VERIFY_HEAP
    637   void VerifyMarkbitsAreClean();
    638   static void VerifyMarkbitsAreClean(PagedSpace* space);
    639   static void VerifyMarkbitsAreClean(NewSpace* space);
    640   void VerifyWeakEmbeddedMapsInOptimizedCode();
    641   void VerifyOmittedMapChecks();
    642 #endif
    643 
    644   // Sweep a single page from the given space conservatively.
    645   // Return a number of reclaimed bytes.
    646   template<SweepingParallelism type>
    647   static intptr_t SweepConservatively(PagedSpace* space,
    648                                       FreeList* free_list,
    649                                       Page* p);
    650 
    651   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
    652     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
    653         ShouldSkipEvacuationSlotRecording();
    654   }
    655 
    656   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
    657     return Page::FromAddress(reinterpret_cast<Address>(host))->
    658         ShouldSkipEvacuationSlotRecording();
    659   }
    660 
    661   INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
    662     return Page::FromAddress(reinterpret_cast<Address>(obj))->
    663         IsEvacuationCandidate();
    664   }
    665 
    666   INLINE(void EvictEvacuationCandidate(Page* page)) {
    667     if (FLAG_trace_fragmentation) {
    668       PrintF("Page %p is too popular. Disabling evacuation.\n",
    669              reinterpret_cast<void*>(page));
    670     }
    671 
    672     // TODO(gc) If all evacuation candidates are too popular we
    673     // should stop slots recording entirely.
    674     page->ClearEvacuationCandidate();
    675 
    676     // We were not collecting slots on this page that point
    677     // to other evacuation candidates thus we have to
    678     // rescan the page after evacuation to discover and update all
    679     // pointers to evacuated objects.
    680     if (page->owner()->identity() == OLD_DATA_SPACE) {
    681       evacuation_candidates_.RemoveElement(page);
    682     } else {
    683       page->SetFlag(Page::RESCAN_ON_EVACUATION);
    684     }
    685   }
    686 
    687   void RecordRelocSlot(RelocInfo* rinfo, Object* target);
    688   void RecordCodeEntrySlot(Address slot, Code* target);
    689   void RecordCodeTargetPatch(Address pc, Code* target);
    690 
    691   INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
    692 
    693   void MigrateObject(Address dst,
    694                      Address src,
    695                      int size,
    696                      AllocationSpace to_old_space);
    697 
    698   bool TryPromoteObject(HeapObject* object, int object_size);
    699 
    700   inline Object* encountered_weak_collections() {
    701     return encountered_weak_collections_;
    702   }
    703   inline void set_encountered_weak_collections(Object* weak_collection) {
    704     encountered_weak_collections_ = weak_collection;
    705   }
    706 
    707   void InvalidateCode(Code* code);
    708 
    709   void ClearMarkbits();
    710 
    711   bool abort_incremental_marking() const { return abort_incremental_marking_; }
    712 
    713   bool is_compacting() const { return compacting_; }
    714 
    715   MarkingParity marking_parity() { return marking_parity_; }
    716 
    717   // Concurrent and parallel sweeping support.
    718   void SweepInParallel(PagedSpace* space,
    719                        FreeList* private_free_list,
    720                        FreeList* free_list);
    721 
    722   void WaitUntilSweepingCompleted();
    723 
    724   intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
    725 
    726   bool AreSweeperThreadsActivated();
    727 
    728   bool IsConcurrentSweepingInProgress();
    729 
    730   void set_sequential_sweeping(bool sequential_sweeping) {
    731     sequential_sweeping_ = sequential_sweeping;
    732   }
    733 
    734   bool sequential_sweeping() const {
    735     return sequential_sweeping_;
    736   }
    737 
    738   // Parallel marking support.
    739   void MarkInParallel();
    740 
    741   void WaitUntilMarkingCompleted();
    742 
    743  private:
    744   MarkCompactCollector();
    745   ~MarkCompactCollector();
    746 
    747   bool MarkInvalidatedCode();
    748   bool WillBeDeoptimized(Code* code);
    749   void RemoveDeadInvalidatedCode();
    750   void ProcessInvalidatedCode(ObjectVisitor* visitor);
    751 
    752   void UnlinkEvacuationCandidates();
    753   void ReleaseEvacuationCandidates();
    754 
    755   void StartSweeperThreads();
    756 
    757 #ifdef DEBUG
    758   enum CollectorState {
    759     IDLE,
    760     PREPARE_GC,
    761     MARK_LIVE_OBJECTS,
    762     SWEEP_SPACES,
    763     ENCODE_FORWARDING_ADDRESSES,
    764     UPDATE_POINTERS,
    765     RELOCATE_OBJECTS
    766   };
    767 
    768   // The current stage of the collector.
    769   CollectorState state_;
    770 #endif
    771 
    772   // Global flag that forces sweeping to be precise, so we can traverse the
    773   // heap.
    774   bool sweep_precisely_;
    775 
    776   bool reduce_memory_footprint_;
    777 
    778   bool abort_incremental_marking_;
    779 
    780   MarkingParity marking_parity_;
    781 
    782   // True if we are collecting slots to perform evacuation from evacuation
    783   // candidates.
    784   bool compacting_;
    785 
    786   bool was_marked_incrementally_;
    787 
    788   // True if concurrent or parallel sweeping is currently in progress.
    789   bool sweeping_pending_;
    790 
    791   bool sequential_sweeping_;
    792 
    793   // A pointer to the current stack-allocated GC tracer object during a full
    794   // collection (NULL before and after).
    795   GCTracer* tracer_;
    796 
    797   SlotsBufferAllocator slots_buffer_allocator_;
    798 
    799   SlotsBuffer* migration_slots_buffer_;
    800 
    801   // Finishes GC, performs heap verification if enabled.
    802   void Finish();
    803 
    804   // -----------------------------------------------------------------------
    805   // Phase 1: Marking live objects.
    806   //
    807   //  Before: The heap has been prepared for garbage collection by
    808   //          MarkCompactCollector::Prepare() and is otherwise in its
    809   //          normal state.
    810   //
    811   //   After: Live objects are marked and non-live objects are unmarked.
    812 
    813   friend class RootMarkingVisitor;
    814   friend class MarkingVisitor;
    815   friend class MarkCompactMarkingVisitor;
    816   friend class CodeMarkingVisitor;
    817   friend class SharedFunctionInfoMarkingVisitor;
    818 
    819   // Mark code objects that are active on the stack to prevent them
    820   // from being flushed.
    821   void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
    822 
    823   void PrepareForCodeFlushing();
    824 
    825   // Marking operations for objects reachable from roots.
    826   void MarkLiveObjects();
    827 
    828   void AfterMarking();
    829 
    830   // Marks the object black and pushes it on the marking stack.
    831   // This is for non-incremental marking only.
    832   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
    833 
    834   // Marks the object black assuming that it is not yet marked.
    835   // This is for non-incremental marking only.
    836   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
    837 
    838   // Mark the heap roots and all objects reachable from them.
    839   void MarkRoots(RootMarkingVisitor* visitor);
    840 
    841   // Mark the string table specially.  References to internalized strings from
    842   // the string table are weak.
    843   void MarkStringTable(RootMarkingVisitor* visitor);
    844 
    845   // Mark objects in implicit references groups if their parent object
    846   // is marked.
    847   void MarkImplicitRefGroups();
    848 
    849   // Mark objects reachable (transitively) from objects in the marking stack
    850   // or overflowed in the heap.
    851   void ProcessMarkingDeque();
    852 
    853   // Mark objects reachable (transitively) from objects in the marking stack
    854   // or overflowed in the heap.  This respects references only considered in
    855   // the final atomic marking pause including the following:
    856   //    - Processing of objects reachable through Harmony WeakMaps.
    857   //    - Objects reachable due to host application logic like object groups
    858   //      or implicit references' groups.
    859   void ProcessEphemeralMarking(ObjectVisitor* visitor);
    860 
    861   // If the call-site of the top optimized code was not prepared for
    862   // deoptimization, then treat the maps in the code as strong pointers,
    863   // otherwise a map can die and deoptimize the code.
    864   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
    865 
    866   // Mark objects reachable (transitively) from objects in the marking
    867   // stack.  This function empties the marking stack, but may leave
    868   // overflowed objects in the heap, in which case the marking stack's
    869   // overflow flag will be set.
    870   void EmptyMarkingDeque();
    871 
    872   // Refill the marking stack with overflowed objects from the heap.  This
    873   // function either leaves the marking stack full or clears the overflow
    874   // flag on the marking stack.
    875   void RefillMarkingDeque();
    876 
    877   // After reachable maps have been marked process per context object
    878   // literal map caches removing unmarked entries.
    879   void ProcessMapCaches();
    880 
    881   // Callback function for telling whether the object *p is an unmarked
    882   // heap object.
    883   static bool IsUnmarkedHeapObject(Object** p);
    884   static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
    885 
    886   // Map transitions from a live map to a dead map must be killed.
    887   // We replace them with a null descriptor, with the same key.
    888   void ClearNonLiveReferences();
    889   void ClearNonLivePrototypeTransitions(Map* map);
    890   void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
    891 
    892   void ClearAndDeoptimizeDependentCode(Map* map);
    893   void ClearNonLiveDependentCode(DependentCode* dependent_code);
    894 
    895   // Marking detaches initial maps from SharedFunctionInfo objects
    896   // to make this reference weak. We need to reattach initial maps
    897   // back after collection. This is either done during
    898   // ClearNonLiveTransitions pass or by calling this function.
    899   void ReattachInitialMaps();
    900 
    901   // Mark all values associated with reachable keys in weak collections
    902   // encountered so far.  This might push new object or even new weak maps onto
    903   // the marking stack.
    904   void ProcessWeakCollections();
    905 
    906   // After all reachable objects have been marked those weak map entries
    907   // with an unreachable key are removed from all encountered weak maps.
    908   // The linked list of all encountered weak maps is destroyed.
    909   void ClearWeakCollections();
    910 
    911   // -----------------------------------------------------------------------
    912   // Phase 2: Sweeping to clear mark bits and free non-live objects for
    913   // a non-compacting collection.
    914   //
    915   //  Before: Live objects are marked and non-live objects are unmarked.
    916   //
    917   //   After: Live objects are unmarked, non-live regions have been added to
    918   //          their space's free list. Active eden semispace is compacted by
    919   //          evacuation.
    920   //
    921 
    922   // If we are not compacting the heap, we simply sweep the spaces except
    923   // for the large object space, clearing mark bits and adding unmarked
    924   // regions to each space's free list.
    925   void SweepSpaces();
    926 
    927   int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
    928                                            NewSpacePage* p);
    929 
    930   void EvacuateNewSpace();
    931 
    932   void EvacuateLiveObjectsFromPage(Page* p);
    933 
    934   void EvacuatePages();
    935 
    936   void EvacuateNewSpaceAndCandidates();
    937 
    938   void SweepSpace(PagedSpace* space, SweeperType sweeper);
    939 
    940 #ifdef DEBUG
    941   friend class MarkObjectVisitor;
    942   static void VisitObject(HeapObject* obj);
    943 
    944   friend class UnmarkObjectVisitor;
    945   static void UnmarkObject(HeapObject* obj);
    946 #endif
    947 
    948   Heap* heap_;
    949   MarkingDeque marking_deque_;
    950   CodeFlusher* code_flusher_;
    951   Object* encountered_weak_collections_;
    952   Object* code_to_deoptimize_;
    953 
    954   List<Page*> evacuation_candidates_;
    955   List<Code*> invalidated_code_;
    956 
    957   friend class Heap;
    958 };
    959 
    960 
    961 class MarkBitCellIterator BASE_EMBEDDED {
    962  public:
    963   explicit MarkBitCellIterator(MemoryChunk* chunk)
    964       : chunk_(chunk) {
    965     last_cell_index_ = Bitmap::IndexToCell(
    966         Bitmap::CellAlignIndex(
    967             chunk_->AddressToMarkbitIndex(chunk_->area_end())));
    968     cell_base_ = chunk_->area_start();
    969     cell_index_ = Bitmap::IndexToCell(
    970         Bitmap::CellAlignIndex(
    971             chunk_->AddressToMarkbitIndex(cell_base_)));
    972     cells_ = chunk_->markbits()->cells();
    973   }
    974 
    975   inline bool Done() { return cell_index_ == last_cell_index_; }
    976 
    977   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
    978 
    979   inline MarkBit::CellType* CurrentCell() {
    980     ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    981         chunk_->AddressToMarkbitIndex(cell_base_))));
    982     return &cells_[cell_index_];
    983   }
    984 
    985   inline Address CurrentCellBase() {
    986     ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    987         chunk_->AddressToMarkbitIndex(cell_base_))));
    988     return cell_base_;
    989   }
    990 
    991   inline void Advance() {
    992     cell_index_++;
    993     cell_base_ += 32 * kPointerSize;
    994   }
    995 
    996  private:
    997   MemoryChunk* chunk_;
    998   MarkBit::CellType* cells_;
    999   unsigned int last_cell_index_;
   1000   unsigned int cell_index_;
   1001   Address cell_base_;
   1002 };
   1003 
   1004 
   1005 class SequentialSweepingScope BASE_EMBEDDED {
   1006  public:
   1007   explicit SequentialSweepingScope(MarkCompactCollector *collector) :
   1008     collector_(collector) {
   1009     collector_->set_sequential_sweeping(true);
   1010   }
   1011 
   1012   ~SequentialSweepingScope() {
   1013     collector_->set_sequential_sweeping(false);
   1014   }
   1015 
   1016  private:
   1017   MarkCompactCollector* collector_;
   1018 };
   1019 
   1020 
   1021 const char* AllocationSpaceName(AllocationSpace space);
   1022 
   1023 } }  // namespace v8::internal
   1024 
   1025 #endif  // V8_MARK_COMPACT_H_
   1026