Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_MARK_COMPACT_H_
      6 #define V8_MARK_COMPACT_H_
      7 
      8 #include "src/compiler-intrinsics.h"
      9 #include "src/spaces.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 
     14 // Callback function, returns whether an object is alive. The heap size
     15 // of the object is returned in size. It optionally updates the offset
     16 // to the first live object in the page (only used for old and map objects).
     17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
     18 
     19 // Forward declarations.
     20 class CodeFlusher;
     21 class GCTracer;
     22 class MarkCompactCollector;
     23 class MarkingVisitor;
     24 class RootMarkingVisitor;
     25 
     26 
     27 class Marking {
     28  public:
     29   explicit Marking(Heap* heap)
     30       : heap_(heap) {
     31   }
     32 
     33   INLINE(static MarkBit MarkBitFrom(Address addr));
     34 
     35   INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
     36     return MarkBitFrom(reinterpret_cast<Address>(obj));
     37   }
     38 
     39   // Impossible markbits: 01
     40   static const char* kImpossibleBitPattern;
     41   INLINE(static bool IsImpossible(MarkBit mark_bit)) {
     42     return !mark_bit.Get() && mark_bit.Next().Get();
     43   }
     44 
     45   // Black markbits: 10 - this is required by the sweeper.
     46   static const char* kBlackBitPattern;
     47   INLINE(static bool IsBlack(MarkBit mark_bit)) {
     48     return mark_bit.Get() && !mark_bit.Next().Get();
     49   }
     50 
     51   // White markbits: 00 - this is required by the mark bit clearer.
     52   static const char* kWhiteBitPattern;
     53   INLINE(static bool IsWhite(MarkBit mark_bit)) {
     54     return !mark_bit.Get();
     55   }
     56 
     57   // Grey markbits: 11
     58   static const char* kGreyBitPattern;
     59   INLINE(static bool IsGrey(MarkBit mark_bit)) {
     60     return mark_bit.Get() && mark_bit.Next().Get();
     61   }
     62 
     63   INLINE(static void MarkBlack(MarkBit mark_bit)) {
     64     mark_bit.Set();
     65     mark_bit.Next().Clear();
     66   }
     67 
     68   INLINE(static void BlackToGrey(MarkBit markbit)) {
     69     markbit.Next().Set();
     70   }
     71 
     72   INLINE(static void WhiteToGrey(MarkBit markbit)) {
     73     markbit.Set();
     74     markbit.Next().Set();
     75   }
     76 
     77   INLINE(static void GreyToBlack(MarkBit markbit)) {
     78     markbit.Next().Clear();
     79   }
     80 
     81   INLINE(static void BlackToGrey(HeapObject* obj)) {
     82     BlackToGrey(MarkBitFrom(obj));
     83   }
     84 
     85   INLINE(static void AnyToGrey(MarkBit markbit)) {
     86     markbit.Set();
     87     markbit.Next().Set();
     88   }
     89 
     90   void TransferMark(Address old_start, Address new_start);
     91 
     92 #ifdef DEBUG
     93   enum ObjectColor {
     94     BLACK_OBJECT,
     95     WHITE_OBJECT,
     96     GREY_OBJECT,
     97     IMPOSSIBLE_COLOR
     98   };
     99 
    100   static const char* ColorName(ObjectColor color) {
    101     switch (color) {
    102       case BLACK_OBJECT: return "black";
    103       case WHITE_OBJECT: return "white";
    104       case GREY_OBJECT: return "grey";
    105       case IMPOSSIBLE_COLOR: return "impossible";
    106     }
    107     return "error";
    108   }
    109 
    110   static ObjectColor Color(HeapObject* obj) {
    111     return Color(Marking::MarkBitFrom(obj));
    112   }
    113 
    114   static ObjectColor Color(MarkBit mark_bit) {
    115     if (IsBlack(mark_bit)) return BLACK_OBJECT;
    116     if (IsWhite(mark_bit)) return WHITE_OBJECT;
    117     if (IsGrey(mark_bit)) return GREY_OBJECT;
    118     UNREACHABLE();
    119     return IMPOSSIBLE_COLOR;
    120   }
    121 #endif
    122 
    123   // Returns true if the transferred color is black.
    124   INLINE(static bool TransferColor(HeapObject* from,
    125                                    HeapObject* to)) {
    126     MarkBit from_mark_bit = MarkBitFrom(from);
    127     MarkBit to_mark_bit = MarkBitFrom(to);
    128     bool is_black = false;
    129     if (from_mark_bit.Get()) {
    130       to_mark_bit.Set();
    131       is_black = true;  // Looks black so far.
    132     }
    133     if (from_mark_bit.Next().Get()) {
    134       to_mark_bit.Next().Set();
    135       is_black = false;  // Was actually gray.
    136     }
    137     return is_black;
    138   }
    139 
    140  private:
    141   Heap* heap_;
    142 };
    143 
    144 // ----------------------------------------------------------------------------
    145 // Marking deque for tracing live objects.
    146 class MarkingDeque {
    147  public:
    148   MarkingDeque()
    149       : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
    150 
    151   void Initialize(Address low, Address high) {
    152     HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
    153     HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
    154     array_ = obj_low;
    155     mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
    156     top_ = bottom_ = 0;
    157     overflowed_ = false;
    158   }
    159 
    160   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
    161 
    162   inline bool IsEmpty() { return top_ == bottom_; }
    163 
    164   bool overflowed() const { return overflowed_; }
    165 
    166   void ClearOverflowed() { overflowed_ = false; }
    167 
    168   void SetOverflowed() { overflowed_ = true; }
    169 
    170   // Push the (marked) object on the marking stack if there is room,
    171   // otherwise mark the object as overflowed and wait for a rescan of the
    172   // heap.
    173   INLINE(void PushBlack(HeapObject* object)) {
    174     ASSERT(object->IsHeapObject());
    175     if (IsFull()) {
    176       Marking::BlackToGrey(object);
    177       MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
    178       SetOverflowed();
    179     } else {
    180       array_[top_] = object;
    181       top_ = ((top_ + 1) & mask_);
    182     }
    183   }
    184 
    185   INLINE(void PushGrey(HeapObject* object)) {
    186     ASSERT(object->IsHeapObject());
    187     if (IsFull()) {
    188       SetOverflowed();
    189     } else {
    190       array_[top_] = object;
    191       top_ = ((top_ + 1) & mask_);
    192     }
    193   }
    194 
    195   INLINE(HeapObject* Pop()) {
    196     ASSERT(!IsEmpty());
    197     top_ = ((top_ - 1) & mask_);
    198     HeapObject* object = array_[top_];
    199     ASSERT(object->IsHeapObject());
    200     return object;
    201   }
    202 
    203   INLINE(void UnshiftGrey(HeapObject* object)) {
    204     ASSERT(object->IsHeapObject());
    205     if (IsFull()) {
    206       SetOverflowed();
    207     } else {
    208       bottom_ = ((bottom_ - 1) & mask_);
    209       array_[bottom_] = object;
    210     }
    211   }
    212 
    213   HeapObject** array() { return array_; }
    214   int bottom() { return bottom_; }
    215   int top() { return top_; }
    216   int mask() { return mask_; }
    217   void set_top(int top) { top_ = top; }
    218 
    219  private:
    220   HeapObject** array_;
    221   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
    222   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
    223   // (mod mask + 1).
    224   int top_;
    225   int bottom_;
    226   int mask_;
    227   bool overflowed_;
    228 
    229   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
    230 };
    231 
    232 
    233 class SlotsBufferAllocator {
    234  public:
    235   SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
    236   void DeallocateBuffer(SlotsBuffer* buffer);
    237 
    238   void DeallocateChain(SlotsBuffer** buffer_address);
    239 };
    240 
    241 
    242 // SlotsBuffer records a sequence of slots that has to be updated
    243 // after live objects were relocated from evacuation candidates.
    244 // All slots are either untyped or typed:
    245 //    - Untyped slots are expected to contain a tagged object pointer.
    246 //      They are recorded by an address.
    247 //    - Typed slots are expected to contain an encoded pointer to a heap
    248 //      object where the way of encoding depends on the type of the slot.
    249 //      They are recorded as a pair (SlotType, slot address).
    250 // We assume that zero-page is never mapped this allows us to distinguish
    251 // untyped slots from typed slots during iteration by a simple comparison:
    252 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
    253 // is the first element of typed slot's pair.
    254 class SlotsBuffer {
    255  public:
    256   typedef Object** ObjectSlot;
    257 
    258   explicit SlotsBuffer(SlotsBuffer* next_buffer)
    259       : idx_(0), chain_length_(1), next_(next_buffer) {
    260     if (next_ != NULL) {
    261       chain_length_ = next_->chain_length_ + 1;
    262     }
    263   }
    264 
    265   ~SlotsBuffer() {
    266   }
    267 
    268   void Add(ObjectSlot slot) {
    269     ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
    270     slots_[idx_++] = slot;
    271   }
    272 
    273   enum SlotType {
    274     EMBEDDED_OBJECT_SLOT,
    275     RELOCATED_CODE_OBJECT,
    276     CODE_TARGET_SLOT,
    277     CODE_ENTRY_SLOT,
    278     DEBUG_TARGET_SLOT,
    279     JS_RETURN_SLOT,
    280     NUMBER_OF_SLOT_TYPES
    281   };
    282 
    283   static const char* SlotTypeToString(SlotType type) {
    284     switch (type) {
    285       case EMBEDDED_OBJECT_SLOT:
    286         return "EMBEDDED_OBJECT_SLOT";
    287       case RELOCATED_CODE_OBJECT:
    288         return "RELOCATED_CODE_OBJECT";
    289       case CODE_TARGET_SLOT:
    290         return "CODE_TARGET_SLOT";
    291       case CODE_ENTRY_SLOT:
    292         return "CODE_ENTRY_SLOT";
    293       case DEBUG_TARGET_SLOT:
    294         return "DEBUG_TARGET_SLOT";
    295       case JS_RETURN_SLOT:
    296         return "JS_RETURN_SLOT";
    297       case NUMBER_OF_SLOT_TYPES:
    298         return "NUMBER_OF_SLOT_TYPES";
    299     }
    300     return "UNKNOWN SlotType";
    301   }
    302 
    303   void UpdateSlots(Heap* heap);
    304 
    305   void UpdateSlotsWithFilter(Heap* heap);
    306 
    307   SlotsBuffer* next() { return next_; }
    308 
    309   static int SizeOfChain(SlotsBuffer* buffer) {
    310     if (buffer == NULL) return 0;
    311     return static_cast<int>(buffer->idx_ +
    312                             (buffer->chain_length_ - 1) * kNumberOfElements);
    313   }
    314 
    315   inline bool IsFull() {
    316     return idx_ == kNumberOfElements;
    317   }
    318 
    319   inline bool HasSpaceForTypedSlot() {
    320     return idx_ < kNumberOfElements - 1;
    321   }
    322 
    323   static void UpdateSlotsRecordedIn(Heap* heap,
    324                                     SlotsBuffer* buffer,
    325                                     bool code_slots_filtering_required) {
    326     while (buffer != NULL) {
    327       if (code_slots_filtering_required) {
    328         buffer->UpdateSlotsWithFilter(heap);
    329       } else {
    330         buffer->UpdateSlots(heap);
    331       }
    332       buffer = buffer->next();
    333     }
    334   }
    335 
    336   enum AdditionMode {
    337     FAIL_ON_OVERFLOW,
    338     IGNORE_OVERFLOW
    339   };
    340 
    341   static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
    342     return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
    343   }
    344 
    345   INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
    346                            SlotsBuffer** buffer_address,
    347                            ObjectSlot slot,
    348                            AdditionMode mode)) {
    349     SlotsBuffer* buffer = *buffer_address;
    350     if (buffer == NULL || buffer->IsFull()) {
    351       if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
    352         allocator->DeallocateChain(buffer_address);
    353         return false;
    354       }
    355       buffer = allocator->AllocateBuffer(buffer);
    356       *buffer_address = buffer;
    357     }
    358     buffer->Add(slot);
    359     return true;
    360   }
    361 
    362   static bool IsTypedSlot(ObjectSlot slot);
    363 
    364   static bool AddTo(SlotsBufferAllocator* allocator,
    365                     SlotsBuffer** buffer_address,
    366                     SlotType type,
    367                     Address addr,
    368                     AdditionMode mode);
    369 
    370   static const int kNumberOfElements = 1021;
    371 
    372  private:
    373   static const int kChainLengthThreshold = 15;
    374 
    375   intptr_t idx_;
    376   intptr_t chain_length_;
    377   SlotsBuffer* next_;
    378   ObjectSlot slots_[kNumberOfElements];
    379 };
    380 
    381 
    382 // CodeFlusher collects candidates for code flushing during marking and
    383 // processes those candidates after marking has completed in order to
    384 // reset those functions referencing code objects that would otherwise
    385 // be unreachable. Code objects can be referenced in three ways:
    386 //    - SharedFunctionInfo references unoptimized code.
    387 //    - JSFunction references either unoptimized or optimized code.
    388 //    - OptimizedCodeMap references optimized code.
    389 // We are not allowed to flush unoptimized code for functions that got
    390 // optimized or inlined into optimized code, because we might bailout
    391 // into the unoptimized code again during deoptimization.
    392 class CodeFlusher {
    393  public:
    394   explicit CodeFlusher(Isolate* isolate)
    395       : isolate_(isolate),
    396         jsfunction_candidates_head_(NULL),
    397         shared_function_info_candidates_head_(NULL),
    398         optimized_code_map_holder_head_(NULL) {}
    399 
    400   void AddCandidate(SharedFunctionInfo* shared_info) {
    401     if (GetNextCandidate(shared_info) == NULL) {
    402       SetNextCandidate(shared_info, shared_function_info_candidates_head_);
    403       shared_function_info_candidates_head_ = shared_info;
    404     }
    405   }
    406 
    407   void AddCandidate(JSFunction* function) {
    408     ASSERT(function->code() == function->shared()->code());
    409     if (GetNextCandidate(function)->IsUndefined()) {
    410       SetNextCandidate(function, jsfunction_candidates_head_);
    411       jsfunction_candidates_head_ = function;
    412     }
    413   }
    414 
    415   void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
    416     if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
    417       SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
    418       optimized_code_map_holder_head_ = code_map_holder;
    419     }
    420   }
    421 
    422   void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
    423   void EvictCandidate(SharedFunctionInfo* shared_info);
    424   void EvictCandidate(JSFunction* function);
    425 
    426   void ProcessCandidates() {
    427     ProcessOptimizedCodeMaps();
    428     ProcessSharedFunctionInfoCandidates();
    429     ProcessJSFunctionCandidates();
    430   }
    431 
    432   void EvictAllCandidates() {
    433     EvictOptimizedCodeMaps();
    434     EvictJSFunctionCandidates();
    435     EvictSharedFunctionInfoCandidates();
    436   }
    437 
    438   void IteratePointersToFromSpace(ObjectVisitor* v);
    439 
    440  private:
    441   void ProcessOptimizedCodeMaps();
    442   void ProcessJSFunctionCandidates();
    443   void ProcessSharedFunctionInfoCandidates();
    444   void EvictOptimizedCodeMaps();
    445   void EvictJSFunctionCandidates();
    446   void EvictSharedFunctionInfoCandidates();
    447 
    448   static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
    449     return reinterpret_cast<JSFunction**>(
    450         HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
    451   }
    452 
    453   static JSFunction* GetNextCandidate(JSFunction* candidate) {
    454     Object* next_candidate = candidate->next_function_link();
    455     return reinterpret_cast<JSFunction*>(next_candidate);
    456   }
    457 
    458   static void SetNextCandidate(JSFunction* candidate,
    459                                JSFunction* next_candidate) {
    460     candidate->set_next_function_link(next_candidate);
    461   }
    462 
    463   static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
    464     ASSERT(undefined->IsUndefined());
    465     candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
    466   }
    467 
    468   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
    469     Object* next_candidate = candidate->code()->gc_metadata();
    470     return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
    471   }
    472 
    473   static void SetNextCandidate(SharedFunctionInfo* candidate,
    474                                SharedFunctionInfo* next_candidate) {
    475     candidate->code()->set_gc_metadata(next_candidate);
    476   }
    477 
    478   static void ClearNextCandidate(SharedFunctionInfo* candidate) {
    479     candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
    480   }
    481 
    482   static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
    483     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    484     Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
    485     return reinterpret_cast<SharedFunctionInfo*>(next_map);
    486   }
    487 
    488   static void SetNextCodeMap(SharedFunctionInfo* holder,
    489                              SharedFunctionInfo* next_holder) {
    490     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    491     code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
    492   }
    493 
    494   static void ClearNextCodeMap(SharedFunctionInfo* holder) {
    495     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
    496     code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
    497   }
    498 
    499   Isolate* isolate_;
    500   JSFunction* jsfunction_candidates_head_;
    501   SharedFunctionInfo* shared_function_info_candidates_head_;
    502   SharedFunctionInfo* optimized_code_map_holder_head_;
    503 
    504   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
    505 };
    506 
    507 
    508 // Defined in isolate.h.
    509 class ThreadLocalTop;
    510 
    511 
    512 // -------------------------------------------------------------------------
    513 // Mark-Compact collector
    514 class MarkCompactCollector {
    515  public:
    516   // Set the global flags, it must be called before Prepare to take effect.
    517   inline void SetFlags(int flags);
    518 
    519   static void Initialize();
    520 
    521   void SetUp();
    522 
    523   void TearDown();
    524 
    525   void CollectEvacuationCandidates(PagedSpace* space);
    526 
    527   void AddEvacuationCandidate(Page* p);
    528 
    529   // Prepares for GC by resetting relocation info in old and map spaces and
    530   // choosing spaces to compact.
    531   void Prepare(GCTracer* tracer);
    532 
    533   // Performs a global garbage collection.
    534   void CollectGarbage();
    535 
    536   enum CompactionMode {
    537     INCREMENTAL_COMPACTION,
    538     NON_INCREMENTAL_COMPACTION
    539   };
    540 
    541   bool StartCompaction(CompactionMode mode);
    542 
    543   void AbortCompaction();
    544 
    545   // During a full GC, there is a stack-allocated GCTracer that is used for
    546   // bookkeeping information.  Return a pointer to that tracer.
    547   GCTracer* tracer() { return tracer_; }
    548 
    549 #ifdef DEBUG
    550   // Checks whether performing mark-compact collection.
    551   bool in_use() { return state_ > PREPARE_GC; }
    552   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
    553 #endif
    554 
    555   // Determine type of object and emit deletion log event.
    556   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
    557 
    558   // Distinguishable invalid map encodings (for single word and multiple words)
    559   // that indicate free regions.
    560   static const uint32_t kSingleFreeEncoding = 0;
    561   static const uint32_t kMultiFreeEncoding = 1;
    562 
    563   static inline bool IsMarked(Object* obj);
    564 
    565   inline Heap* heap() const { return heap_; }
    566   inline Isolate* isolate() const;
    567 
    568   CodeFlusher* code_flusher() { return code_flusher_; }
    569   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
    570   void EnableCodeFlushing(bool enable);
    571 
    572   enum SweeperType {
    573     CONSERVATIVE,
    574     PARALLEL_CONSERVATIVE,
    575     CONCURRENT_CONSERVATIVE,
    576     PRECISE
    577   };
    578 
    579   enum SweepingParallelism {
    580     SWEEP_SEQUENTIALLY,
    581     SWEEP_IN_PARALLEL
    582   };
    583 
    584 #ifdef VERIFY_HEAP
    585   void VerifyMarkbitsAreClean();
    586   static void VerifyMarkbitsAreClean(PagedSpace* space);
    587   static void VerifyMarkbitsAreClean(NewSpace* space);
    588   void VerifyWeakEmbeddedObjectsInCode();
    589   void VerifyOmittedMapChecks();
    590 #endif
    591 
    592   // Sweep a single page from the given space conservatively.
    593   // Return a number of reclaimed bytes.
    594   template<SweepingParallelism type>
    595   static intptr_t SweepConservatively(PagedSpace* space,
    596                                       FreeList* free_list,
    597                                       Page* p);
    598 
    599   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
    600     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
    601         ShouldSkipEvacuationSlotRecording();
    602   }
    603 
    604   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
    605     return Page::FromAddress(reinterpret_cast<Address>(host))->
    606         ShouldSkipEvacuationSlotRecording();
    607   }
    608 
    609   INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
    610     return Page::FromAddress(reinterpret_cast<Address>(obj))->
    611         IsEvacuationCandidate();
    612   }
    613 
    614   INLINE(void EvictEvacuationCandidate(Page* page)) {
    615     if (FLAG_trace_fragmentation) {
    616       PrintF("Page %p is too popular. Disabling evacuation.\n",
    617              reinterpret_cast<void*>(page));
    618     }
    619 
    620     // TODO(gc) If all evacuation candidates are too popular we
    621     // should stop slots recording entirely.
    622     page->ClearEvacuationCandidate();
    623 
    624     // We were not collecting slots on this page that point
    625     // to other evacuation candidates thus we have to
    626     // rescan the page after evacuation to discover and update all
    627     // pointers to evacuated objects.
    628     if (page->owner()->identity() == OLD_DATA_SPACE) {
    629       evacuation_candidates_.RemoveElement(page);
    630     } else {
    631       page->SetFlag(Page::RESCAN_ON_EVACUATION);
    632     }
    633   }
    634 
    635   void RecordRelocSlot(RelocInfo* rinfo, Object* target);
    636   void RecordCodeEntrySlot(Address slot, Code* target);
    637   void RecordCodeTargetPatch(Address pc, Code* target);
    638 
    639   INLINE(void RecordSlot(Object** anchor_slot,
    640                          Object** slot,
    641                          Object* object,
    642                          SlotsBuffer::AdditionMode mode =
    643                              SlotsBuffer::FAIL_ON_OVERFLOW));
    644 
    645   void MigrateObject(HeapObject* dst,
    646                      HeapObject* src,
    647                      int size,
    648                      AllocationSpace to_old_space);
    649 
    650   bool TryPromoteObject(HeapObject* object, int object_size);
    651 
    652   void InvalidateCode(Code* code);
    653 
    654   void ClearMarkbits();
    655 
    656   bool abort_incremental_marking() const { return abort_incremental_marking_; }
    657 
    658   bool is_compacting() const { return compacting_; }
    659 
    660   MarkingParity marking_parity() { return marking_parity_; }
    661 
    662   // Concurrent and parallel sweeping support.
    663   void SweepInParallel(PagedSpace* space);
    664 
    665   void WaitUntilSweepingCompleted();
    666 
    667   bool IsSweepingCompleted();
    668 
    669   void RefillFreeList(PagedSpace* space);
    670 
    671   bool AreSweeperThreadsActivated();
    672 
    673   bool IsConcurrentSweepingInProgress();
    674 
    675   void set_sequential_sweeping(bool sequential_sweeping) {
    676     sequential_sweeping_ = sequential_sweeping;
    677   }
    678 
    679   bool sequential_sweeping() const {
    680     return sequential_sweeping_;
    681   }
    682 
    683   // Mark the global table which maps weak objects to dependent code without
    684   // marking its contents.
    685   void MarkWeakObjectToCodeTable();
    686 
    687   // Special case for processing weak references in a full collection. We need
    688   // to artificially keep AllocationSites alive for a time.
    689   void MarkAllocationSite(AllocationSite* site);
    690 
    691  private:
    692   class SweeperTask;
    693 
    694   explicit MarkCompactCollector(Heap* heap);
    695   ~MarkCompactCollector();
    696 
    697   bool MarkInvalidatedCode();
    698   bool WillBeDeoptimized(Code* code);
    699   void RemoveDeadInvalidatedCode();
    700   void ProcessInvalidatedCode(ObjectVisitor* visitor);
    701 
    702   void StartSweeperThreads();
    703 
    704 #ifdef DEBUG
    705   enum CollectorState {
    706     IDLE,
    707     PREPARE_GC,
    708     MARK_LIVE_OBJECTS,
    709     SWEEP_SPACES,
    710     ENCODE_FORWARDING_ADDRESSES,
    711     UPDATE_POINTERS,
    712     RELOCATE_OBJECTS
    713   };
    714 
    715   // The current stage of the collector.
    716   CollectorState state_;
    717 #endif
    718 
    719   // Global flag that forces sweeping to be precise, so we can traverse the
    720   // heap.
    721   bool sweep_precisely_;
    722 
    723   bool reduce_memory_footprint_;
    724 
    725   bool abort_incremental_marking_;
    726 
    727   MarkingParity marking_parity_;
    728 
    729   // True if we are collecting slots to perform evacuation from evacuation
    730   // candidates.
    731   bool compacting_;
    732 
    733   bool was_marked_incrementally_;
    734 
    735   // True if concurrent or parallel sweeping is currently in progress.
    736   bool sweeping_pending_;
    737 
    738   Semaphore pending_sweeper_jobs_semaphore_;
    739 
    740   bool sequential_sweeping_;
    741 
    742   // A pointer to the current stack-allocated GC tracer object during a full
    743   // collection (NULL before and after).
    744   GCTracer* tracer_;
    745 
    746   SlotsBufferAllocator slots_buffer_allocator_;
    747 
    748   SlotsBuffer* migration_slots_buffer_;
    749 
    750   // Finishes GC, performs heap verification if enabled.
    751   void Finish();
    752 
    753   // -----------------------------------------------------------------------
    754   // Phase 1: Marking live objects.
    755   //
    756   //  Before: The heap has been prepared for garbage collection by
    757   //          MarkCompactCollector::Prepare() and is otherwise in its
    758   //          normal state.
    759   //
    760   //   After: Live objects are marked and non-live objects are unmarked.
    761 
    762   friend class RootMarkingVisitor;
    763   friend class MarkingVisitor;
    764   friend class MarkCompactMarkingVisitor;
    765   friend class CodeMarkingVisitor;
    766   friend class SharedFunctionInfoMarkingVisitor;
    767 
    768   // Mark code objects that are active on the stack to prevent them
    769   // from being flushed.
    770   void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
    771 
    772   void PrepareForCodeFlushing();
    773 
    774   // Marking operations for objects reachable from roots.
    775   void MarkLiveObjects();
    776 
    777   void AfterMarking();
    778 
    779   // Marks the object black and pushes it on the marking stack.
    780   // This is for non-incremental marking only.
    781   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
    782 
    783   // Marks the object black assuming that it is not yet marked.
    784   // This is for non-incremental marking only.
    785   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
    786 
    787   // Mark the heap roots and all objects reachable from them.
    788   void MarkRoots(RootMarkingVisitor* visitor);
    789 
    790   // Mark the string table specially.  References to internalized strings from
    791   // the string table are weak.
    792   void MarkStringTable(RootMarkingVisitor* visitor);
    793 
    794   // Mark objects in implicit references groups if their parent object
    795   // is marked.
    796   void MarkImplicitRefGroups();
    797 
    798   // Mark objects reachable (transitively) from objects in the marking stack
    799   // or overflowed in the heap.
    800   void ProcessMarkingDeque();
    801 
    802   // Mark objects reachable (transitively) from objects in the marking stack
    803   // or overflowed in the heap.  This respects references only considered in
    804   // the final atomic marking pause including the following:
    805   //    - Processing of objects reachable through Harmony WeakMaps.
    806   //    - Objects reachable due to host application logic like object groups
    807   //      or implicit references' groups.
    808   void ProcessEphemeralMarking(ObjectVisitor* visitor);
    809 
    810   // If the call-site of the top optimized code was not prepared for
    811   // deoptimization, then treat the maps in the code as strong pointers,
    812   // otherwise a map can die and deoptimize the code.
    813   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
    814 
    815   // Mark objects reachable (transitively) from objects in the marking
    816   // stack.  This function empties the marking stack, but may leave
    817   // overflowed objects in the heap, in which case the marking stack's
    818   // overflow flag will be set.
    819   void EmptyMarkingDeque();
    820 
    821   // Refill the marking stack with overflowed objects from the heap.  This
    822   // function either leaves the marking stack full or clears the overflow
    823   // flag on the marking stack.
    824   void RefillMarkingDeque();
    825 
    826   // After reachable maps have been marked process per context object
    827   // literal map caches removing unmarked entries.
    828   void ProcessMapCaches();
    829 
    830   // Callback function for telling whether the object *p is an unmarked
    831   // heap object.
    832   static bool IsUnmarkedHeapObject(Object** p);
    833   static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
    834 
    835   // Map transitions from a live map to a dead map must be killed.
    836   // We replace them with a null descriptor, with the same key.
    837   void ClearNonLiveReferences();
    838   void ClearNonLivePrototypeTransitions(Map* map);
    839   void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
    840 
    841   void ClearDependentCode(DependentCode* dependent_code);
    842   void ClearDependentICList(Object* head);
    843   void ClearNonLiveDependentCode(DependentCode* dependent_code);
    844   int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
    845                                        int start, int end, int new_start);
    846 
    847   // Mark all values associated with reachable keys in weak collections
    848   // encountered so far.  This might push new object or even new weak maps onto
    849   // the marking stack.
    850   void ProcessWeakCollections();
    851 
    852   // After all reachable objects have been marked those weak map entries
    853   // with an unreachable key are removed from all encountered weak maps.
    854   // The linked list of all encountered weak maps is destroyed.
    855   void ClearWeakCollections();
    856 
    857   // -----------------------------------------------------------------------
    858   // Phase 2: Sweeping to clear mark bits and free non-live objects for
    859   // a non-compacting collection.
    860   //
    861   //  Before: Live objects are marked and non-live objects are unmarked.
    862   //
    863   //   After: Live objects are unmarked, non-live regions have been added to
    864   //          their space's free list. Active eden semispace is compacted by
    865   //          evacuation.
    866   //
    867 
    868   // If we are not compacting the heap, we simply sweep the spaces except
    869   // for the large object space, clearing mark bits and adding unmarked
    870   // regions to each space's free list.
    871   void SweepSpaces();
    872 
    873   int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
    874                                            NewSpacePage* p);
    875 
    876   void EvacuateNewSpace();
    877 
    878   void EvacuateLiveObjectsFromPage(Page* p);
    879 
    880   void EvacuatePages();
    881 
    882   void EvacuateNewSpaceAndCandidates();
    883 
    884   void ReleaseEvacuationCandidates();
    885 
    886   // Moves the pages of the evacuation_candidates_ list to the end of their
    887   // corresponding space pages list.
    888   void MoveEvacuationCandidatesToEndOfPagesList();
    889 
    890   void SweepSpace(PagedSpace* space, SweeperType sweeper);
    891 
    892   // Finalizes the parallel sweeping phase. Marks all the pages that were
    893   // swept in parallel.
    894   void ParallelSweepSpacesComplete();
    895 
    896   void ParallelSweepSpaceComplete(PagedSpace* space);
    897 
    898 #ifdef DEBUG
    899   friend class MarkObjectVisitor;
    900   static void VisitObject(HeapObject* obj);
    901 
    902   friend class UnmarkObjectVisitor;
    903   static void UnmarkObject(HeapObject* obj);
    904 #endif
    905 
    906   Heap* heap_;
    907   MarkingDeque marking_deque_;
    908   CodeFlusher* code_flusher_;
    909   bool have_code_to_deoptimize_;
    910 
    911   List<Page*> evacuation_candidates_;
    912   List<Code*> invalidated_code_;
    913 
    914   SmartPointer<FreeList> free_list_old_data_space_;
    915   SmartPointer<FreeList> free_list_old_pointer_space_;
    916 
    917   friend class Heap;
    918 };
    919 
    920 
    921 class MarkBitCellIterator BASE_EMBEDDED {
    922  public:
    923   explicit MarkBitCellIterator(MemoryChunk* chunk)
    924       : chunk_(chunk) {
    925     last_cell_index_ = Bitmap::IndexToCell(
    926         Bitmap::CellAlignIndex(
    927             chunk_->AddressToMarkbitIndex(chunk_->area_end())));
    928     cell_base_ = chunk_->area_start();
    929     cell_index_ = Bitmap::IndexToCell(
    930         Bitmap::CellAlignIndex(
    931             chunk_->AddressToMarkbitIndex(cell_base_)));
    932     cells_ = chunk_->markbits()->cells();
    933   }
    934 
    935   inline bool Done() { return cell_index_ == last_cell_index_; }
    936 
    937   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
    938 
    939   inline MarkBit::CellType* CurrentCell() {
    940     ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    941         chunk_->AddressToMarkbitIndex(cell_base_))));
    942     return &cells_[cell_index_];
    943   }
    944 
    945   inline Address CurrentCellBase() {
    946     ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    947         chunk_->AddressToMarkbitIndex(cell_base_))));
    948     return cell_base_;
    949   }
    950 
    951   inline void Advance() {
    952     cell_index_++;
    953     cell_base_ += 32 * kPointerSize;
    954   }
    955 
    956  private:
    957   MemoryChunk* chunk_;
    958   MarkBit::CellType* cells_;
    959   unsigned int last_cell_index_;
    960   unsigned int cell_index_;
    961   Address cell_base_;
    962 };
    963 
    964 
    965 class SequentialSweepingScope BASE_EMBEDDED {
    966  public:
    967   explicit SequentialSweepingScope(MarkCompactCollector *collector) :
    968     collector_(collector) {
    969     collector_->set_sequential_sweeping(true);
    970   }
    971 
    972   ~SequentialSweepingScope() {
    973     collector_->set_sequential_sweeping(false);
    974   }
    975 
    976  private:
    977   MarkCompactCollector* collector_;
    978 };
    979 
    980 
    981 const char* AllocationSpaceName(AllocationSpace space);
    982 
    983 } }  // namespace v8::internal
    984 
    985 #endif  // V8_MARK_COMPACT_H_
    986