Home | History | Annotate | Download | only in heap
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_HEAP_MARK_COMPACT_H_
      6 #define V8_HEAP_MARK_COMPACT_H_
      7 
      8 #include <deque>
      9 
     10 #include "src/base/bits.h"
     11 #include "src/base/platform/condition-variable.h"
     12 #include "src/cancelable-task.h"
     13 #include "src/heap/marking.h"
     14 #include "src/heap/spaces.h"
     15 #include "src/heap/store-buffer.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 enum class MarkCompactMode { FULL, YOUNG_GENERATION };
     21 
     22 // Callback function, returns whether an object is alive. The heap size
     23 // of the object is returned in size. It optionally updates the offset
     24 // to the first live object in the page (only used for old and map objects).
     25 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
     26 
     27 // Callback function to mark an object in a given heap.
     28 typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
     29 
     30 // Forward declarations.
     31 class CodeFlusher;
     32 class MarkCompactCollector;
     33 class MarkingVisitor;
     34 template <MarkCompactMode mode>
     35 class RootMarkingVisitor;
     36 
     37 class ObjectMarking : public AllStatic {
     38  public:
     39   V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj) {
     40     const Address address = obj->address();
     41     MemoryChunk* p = MemoryChunk::FromAddress(address);
     42     return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(address));
     43   }
     44 
     45   static Marking::ObjectColor Color(HeapObject* obj) {
     46     return Marking::Color(ObjectMarking::MarkBitFrom(obj));
     47   }
     48 
     49   V8_INLINE static bool IsImpossible(HeapObject* obj) {
     50     return Marking::IsImpossible(MarkBitFrom(obj));
     51   }
     52 
     53   V8_INLINE static bool IsBlack(HeapObject* obj) {
     54     return Marking::IsBlack(MarkBitFrom(obj));
     55   }
     56 
     57   V8_INLINE static bool IsWhite(HeapObject* obj) {
     58     return Marking::IsWhite(MarkBitFrom(obj));
     59   }
     60 
     61   V8_INLINE static bool IsGrey(HeapObject* obj) {
     62     return Marking::IsGrey(MarkBitFrom(obj));
     63   }
     64 
     65   V8_INLINE static bool IsBlackOrGrey(HeapObject* obj) {
     66     return Marking::IsBlackOrGrey(MarkBitFrom(obj));
     67   }
     68 
     69   V8_INLINE static void ClearMarkBit(HeapObject* obj) {
     70     Marking::MarkWhite(MarkBitFrom(obj));
     71   }
     72 
     73   V8_INLINE static void BlackToWhite(HeapObject* obj) {
     74     DCHECK(IsBlack(obj));
     75     MarkBit markbit = MarkBitFrom(obj);
     76     Marking::BlackToWhite(markbit);
     77     MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
     78   }
     79 
     80   V8_INLINE static void GreyToWhite(HeapObject* obj) {
     81     DCHECK(IsGrey(obj));
     82     Marking::GreyToWhite(MarkBitFrom(obj));
     83   }
     84 
     85   V8_INLINE static void BlackToGrey(HeapObject* obj) {
     86     DCHECK(IsBlack(obj));
     87     MarkBit markbit = MarkBitFrom(obj);
     88     Marking::BlackToGrey(markbit);
     89     MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
     90   }
     91 
     92   V8_INLINE static void WhiteToGrey(HeapObject* obj) {
     93     DCHECK(IsWhite(obj));
     94     Marking::WhiteToGrey(MarkBitFrom(obj));
     95   }
     96 
     97   V8_INLINE static void WhiteToBlack(HeapObject* obj) {
     98     DCHECK(IsWhite(obj));
     99     MarkBit markbit = MarkBitFrom(obj);
    100     Marking::WhiteToBlack(markbit);
    101     MemoryChunk::IncrementLiveBytes(obj, obj->Size());
    102   }
    103 
    104   V8_INLINE static void GreyToBlack(HeapObject* obj) {
    105     DCHECK(IsGrey(obj));
    106     MarkBit markbit = MarkBitFrom(obj);
    107     Marking::GreyToBlack(markbit);
    108     MemoryChunk::IncrementLiveBytes(obj, obj->Size());
    109   }
    110 
    111   V8_INLINE static void AnyToGrey(HeapObject* obj) {
    112     MarkBit markbit = MarkBitFrom(obj);
    113     if (Marking::IsBlack(markbit)) {
    114       MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
    115     }
    116     Marking::AnyToGrey(markbit);
    117   }
    118 
    119  private:
    120   DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
    121 };
    122 
    123 // ----------------------------------------------------------------------------
    124 // Marking deque for tracing live objects.
    125 class MarkingDeque {
    126  public:
    127   explicit MarkingDeque(Heap* heap)
    128       : backing_store_(nullptr),
    129         backing_store_committed_size_(0),
    130         array_(nullptr),
    131         top_(0),
    132         bottom_(0),
    133         mask_(0),
    134         overflowed_(false),
    135         in_use_(false),
    136         uncommit_task_pending_(false),
    137         heap_(heap) {}
    138 
    139   void SetUp();
    140   void TearDown();
    141 
    142   // Ensures that the marking deque is committed and will stay committed until
    143   // StopUsing() is called.
    144   void StartUsing();
    145   void StopUsing();
    146   void Clear();
    147 
    148   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
    149 
    150   inline bool IsEmpty() { return top_ == bottom_; }
    151 
    152   bool overflowed() const { return overflowed_; }
    153 
    154   void ClearOverflowed() { overflowed_ = false; }
    155 
    156   void SetOverflowed() { overflowed_ = true; }
    157 
    158   // Push the object on the marking stack if there is room, otherwise mark the
    159   // deque as overflowed and wait for a rescan of the heap.
    160   INLINE(bool Push(HeapObject* object)) {
    161     DCHECK(object->IsHeapObject());
    162     if (IsFull()) {
    163       SetOverflowed();
    164       return false;
    165     } else {
    166       array_[top_] = object;
    167       top_ = ((top_ + 1) & mask_);
    168       return true;
    169     }
    170   }
    171 
    172   INLINE(HeapObject* Pop()) {
    173     DCHECK(!IsEmpty());
    174     top_ = ((top_ - 1) & mask_);
    175     HeapObject* object = array_[top_];
    176     DCHECK(object->IsHeapObject());
    177     return object;
    178   }
    179 
    180   // Unshift the object into the marking stack if there is room, otherwise mark
    181   // the deque as overflowed and wait for a rescan of the heap.
    182   INLINE(bool Unshift(HeapObject* object)) {
    183     DCHECK(object->IsHeapObject());
    184     if (IsFull()) {
    185       SetOverflowed();
    186       return false;
    187     } else {
    188       bottom_ = ((bottom_ - 1) & mask_);
    189       array_[bottom_] = object;
    190       return true;
    191     }
    192   }
    193 
    194   HeapObject** array() { return array_; }
    195   int bottom() { return bottom_; }
    196   int top() { return top_; }
    197   int mask() { return mask_; }
    198   void set_top(int top) { top_ = top; }
    199 
    200  private:
    201   // This task uncommits the marking_deque backing store if
    202   // markin_deque->in_use_ is false.
    203   class UncommitTask : public CancelableTask {
    204    public:
    205     explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
    206         : CancelableTask(isolate), marking_deque_(marking_deque) {}
    207 
    208    private:
    209     // CancelableTask override.
    210     void RunInternal() override {
    211       base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
    212       if (!marking_deque_->in_use_) {
    213         marking_deque_->Uncommit();
    214       }
    215       marking_deque_->uncommit_task_pending_ = false;
    216     }
    217 
    218     MarkingDeque* marking_deque_;
    219     DISALLOW_COPY_AND_ASSIGN(UncommitTask);
    220   };
    221 
    222   static const size_t kMaxSize = 4 * MB;
    223   static const size_t kMinSize = 256 * KB;
    224 
    225   // Must be called with mutex lock.
    226   void EnsureCommitted();
    227 
    228   // Must be called with mutex lock.
    229   void Uncommit();
    230 
    231   // Must be called with mutex lock.
    232   void StartUncommitTask();
    233 
    234   base::Mutex mutex_;
    235 
    236   base::VirtualMemory* backing_store_;
    237   size_t backing_store_committed_size_;
    238   HeapObject** array_;
    239   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
    240   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
    241   // (mod mask + 1).
    242   int top_;
    243   int bottom_;
    244   int mask_;
    245   bool overflowed_;
    246   // in_use_ == true after taking mutex lock implies that the marking deque is
    247   // committed and will stay committed at least until in_use_ == false.
    248   bool in_use_;
    249   bool uncommit_task_pending_;
    250   Heap* heap_;
    251 
    252   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
    253 };
    254 
    255 
    256 // CodeFlusher collects candidates for code flushing during marking and
    257 // processes those candidates after marking has completed in order to
    258 // reset those functions referencing code objects that would otherwise
    259 // be unreachable. Code objects can be referenced in two ways:
    260 //    - SharedFunctionInfo references unoptimized code.
    261 //    - JSFunction references either unoptimized or optimized code.
    262 // We are not allowed to flush unoptimized code for functions that got
    263 // optimized or inlined into optimized code, because we might bailout
    264 // into the unoptimized code again during deoptimization.
    265 class CodeFlusher {
    266  public:
    267   explicit CodeFlusher(Isolate* isolate)
    268       : isolate_(isolate),
    269         jsfunction_candidates_head_(nullptr),
    270         shared_function_info_candidates_head_(nullptr) {}
    271 
    272   inline void AddCandidate(SharedFunctionInfo* shared_info);
    273   inline void AddCandidate(JSFunction* function);
    274 
    275   void EvictCandidate(SharedFunctionInfo* shared_info);
    276   void EvictCandidate(JSFunction* function);
    277 
    278   void ProcessCandidates() {
    279     ProcessSharedFunctionInfoCandidates();
    280     ProcessJSFunctionCandidates();
    281   }
    282 
    283   void IteratePointersToFromSpace(ObjectVisitor* v);
    284 
    285  private:
    286   void ProcessJSFunctionCandidates();
    287   void ProcessSharedFunctionInfoCandidates();
    288 
    289   static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
    290   static inline JSFunction* GetNextCandidate(JSFunction* candidate);
    291   static inline void SetNextCandidate(JSFunction* candidate,
    292                                       JSFunction* next_candidate);
    293   static inline void ClearNextCandidate(JSFunction* candidate,
    294                                         Object* undefined);
    295 
    296   static inline SharedFunctionInfo* GetNextCandidate(
    297       SharedFunctionInfo* candidate);
    298   static inline void SetNextCandidate(SharedFunctionInfo* candidate,
    299                                       SharedFunctionInfo* next_candidate);
    300   static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
    301 
    302   Isolate* isolate_;
    303   JSFunction* jsfunction_candidates_head_;
    304   SharedFunctionInfo* shared_function_info_candidates_head_;
    305 
    306   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
    307 };
    308 
    309 
    310 // Defined in isolate.h.
    311 class ThreadLocalTop;
    312 
    313 class MarkBitCellIterator BASE_EMBEDDED {
    314  public:
    315   explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
    316     last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    317         chunk_->AddressToMarkbitIndex(chunk_->area_end())));
    318     cell_base_ = chunk_->area_start();
    319     cell_index_ = Bitmap::IndexToCell(
    320         Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
    321     cells_ = chunk_->markbits()->cells();
    322   }
    323 
    324   inline bool Done() { return cell_index_ == last_cell_index_; }
    325 
    326   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
    327 
    328   inline MarkBit::CellType* CurrentCell() {
    329     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    330                               chunk_->AddressToMarkbitIndex(cell_base_))));
    331     return &cells_[cell_index_];
    332   }
    333 
    334   inline Address CurrentCellBase() {
    335     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    336                               chunk_->AddressToMarkbitIndex(cell_base_))));
    337     return cell_base_;
    338   }
    339 
    340   inline void Advance() {
    341     cell_index_++;
    342     cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
    343   }
    344 
    345   inline bool Advance(unsigned int new_cell_index) {
    346     if (new_cell_index != cell_index_) {
    347       DCHECK_GT(new_cell_index, cell_index_);
    348       DCHECK_LE(new_cell_index, last_cell_index_);
    349       unsigned int diff = new_cell_index - cell_index_;
    350       cell_index_ = new_cell_index;
    351       cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
    352       return true;
    353     }
    354     return false;
    355   }
    356 
    357   // Return the next mark bit cell. If there is no next it returns 0;
    358   inline MarkBit::CellType PeekNext() {
    359     if (HasNext()) {
    360       return cells_[cell_index_ + 1];
    361     }
    362     return 0;
    363   }
    364 
    365  private:
    366   MemoryChunk* chunk_;
    367   MarkBit::CellType* cells_;
    368   unsigned int last_cell_index_;
    369   unsigned int cell_index_;
    370   Address cell_base_;
    371 };
    372 
    373 // Grey objects can happen on black pages when black objects transition to
    374 // grey e.g. when calling RecordWrites on them.
    375 enum LiveObjectIterationMode {
    376   kBlackObjects,
    377   kGreyObjects,
    378   kAllLiveObjects
    379 };
    380 
    381 template <LiveObjectIterationMode T>
    382 class LiveObjectIterator BASE_EMBEDDED {
    383  public:
    384   explicit LiveObjectIterator(MemoryChunk* chunk)
    385       : chunk_(chunk),
    386         it_(chunk_),
    387         cell_base_(it_.CurrentCellBase()),
    388         current_cell_(*it_.CurrentCell()) {
    389   }
    390 
    391   HeapObject* Next();
    392 
    393  private:
    394   inline Heap* heap() { return chunk_->heap(); }
    395 
    396   MemoryChunk* chunk_;
    397   MarkBitCellIterator it_;
    398   Address cell_base_;
    399   MarkBit::CellType current_cell_;
    400 };
    401 
    402 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
    403 
    404 // -------------------------------------------------------------------------
    405 // Mark-Compact collector
    406 class MarkCompactCollector {
    407  public:
    408   class Evacuator;
    409 
    410   class Sweeper {
    411    public:
    412     class SweeperTask;
    413 
    414     enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
    415     enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
    416     enum ClearOldToNewSlotsMode {
    417       DO_NOT_CLEAR,
    418       CLEAR_REGULAR_SLOTS,
    419       CLEAR_TYPED_SLOTS
    420     };
    421 
    422     typedef std::deque<Page*> SweepingList;
    423     typedef List<Page*> SweptList;
    424 
    425     static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
    426                         FreeSpaceTreatmentMode free_space_mode);
    427 
    428     explicit Sweeper(Heap* heap)
    429         : heap_(heap),
    430           pending_sweeper_tasks_semaphore_(0),
    431           sweeping_in_progress_(false),
    432           num_sweeping_tasks_(0) {}
    433 
    434     bool sweeping_in_progress() { return sweeping_in_progress_; }
    435 
    436     void AddPage(AllocationSpace space, Page* page);
    437 
    438     int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
    439                            int max_pages = 0);
    440     int ParallelSweepPage(Page* page, AllocationSpace identity);
    441 
    442     // After calling this function sweeping is considered to be in progress
    443     // and the main thread can sweep lazily, but the background sweeper tasks
    444     // are not running yet.
    445     void StartSweeping();
    446     void StartSweeperTasks();
    447     void EnsureCompleted();
    448     void EnsureNewSpaceCompleted();
    449     bool AreSweeperTasksRunning();
    450     bool IsSweepingCompleted(AllocationSpace space);
    451     void SweepOrWaitUntilSweepingCompleted(Page* page);
    452 
    453     void AddSweptPageSafe(PagedSpace* space, Page* page);
    454     Page* GetSweptPageSafe(PagedSpace* space);
    455 
    456    private:
    457     static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
    458 
    459     static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
    460 
    461     template <typename Callback>
    462     void ForAllSweepingSpaces(Callback callback) {
    463       for (int i = 0; i < kAllocationSpaces; i++) {
    464         callback(static_cast<AllocationSpace>(i));
    465       }
    466     }
    467 
    468     Page* GetSweepingPageSafe(AllocationSpace space);
    469     void AddSweepingPageSafe(AllocationSpace space, Page* page);
    470 
    471     void PrepareToBeSweptPage(AllocationSpace space, Page* page);
    472 
    473     Heap* heap_;
    474     base::Semaphore pending_sweeper_tasks_semaphore_;
    475     base::Mutex mutex_;
    476     SweptList swept_list_[kAllocationSpaces];
    477     SweepingList sweeping_list_[kAllocationSpaces];
    478     bool sweeping_in_progress_;
    479     base::AtomicNumber<intptr_t> num_sweeping_tasks_;
    480   };
    481 
    482   enum IterationMode {
    483     kKeepMarking,
    484     kClearMarkbits,
    485   };
    486 
    487   static void Initialize();
    488 
    489   static SlotCallbackResult CheckAndMarkObject(Heap* heap,
    490                                                Address slot_address);
    491 
    492   void SetUp();
    493 
    494   void TearDown();
    495 
    496   void CollectEvacuationCandidates(PagedSpace* space);
    497 
    498   void AddEvacuationCandidate(Page* p);
    499 
    500   // Prepares for GC by resetting relocation info in old and map spaces and
    501   // choosing spaces to compact.
    502   void Prepare();
    503 
    504   // Performs a global garbage collection.
    505   void CollectGarbage();
    506 
    507   bool StartCompaction();
    508 
    509   void AbortCompaction();
    510 
    511   // Determine type of object and emit deletion log event.
    512   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
    513 
    514   // Distinguishable invalid map encodings (for single word and multiple words)
    515   // that indicate free regions.
    516   static const uint32_t kSingleFreeEncoding = 0;
    517   static const uint32_t kMultiFreeEncoding = 1;
    518 
    519   static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
    520 
    521   inline Heap* heap() const { return heap_; }
    522   inline Isolate* isolate() const;
    523 
    524   CodeFlusher* code_flusher() { return code_flusher_; }
    525   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
    526 
    527   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
    528     return Page::FromAddress(reinterpret_cast<Address>(host))
    529         ->ShouldSkipEvacuationSlotRecording();
    530   }
    531 
    532   static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
    533     return Page::FromAddress(reinterpret_cast<Address>(obj))
    534         ->IsEvacuationCandidate();
    535   }
    536 
    537   void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
    538   void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
    539   void RecordCodeTargetPatch(Address pc, Code* target);
    540   INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
    541   INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
    542                               Object* target));
    543   void RecordLiveSlotsOnPage(Page* page);
    544 
    545   void UpdateSlots(SlotsBuffer* buffer);
    546   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
    547 
    548   void InvalidateCode(Code* code);
    549 
    550   void ClearMarkbits();
    551 
    552   bool is_compacting() const { return compacting_; }
    553 
    554   // Ensures that sweeping is finished.
    555   //
    556   // Note: Can only be called safely from main thread.
    557   void EnsureSweepingCompleted();
    558 
    559   // Help out in sweeping the corresponding space and refill memory that has
    560   // been regained.
    561   //
    562   // Note: Thread-safe.
    563   void SweepAndRefill(CompactionSpace* space);
    564 
    565   // Checks if sweeping is in progress right now on any space.
    566   bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
    567 
    568   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
    569 
    570   bool evacuation() const { return evacuation_; }
    571 
    572   // Mark objects in implicit references groups if their parent object
    573   // is marked.
    574   void MarkImplicitRefGroups(MarkObjectFunction mark_object);
    575 
    576   MarkingDeque* marking_deque() { return &marking_deque_; }
    577 
    578   Sweeper& sweeper() { return sweeper_; }
    579 
    580 #ifdef DEBUG
    581   // Checks whether performing mark-compact collection.
    582   bool in_use() { return state_ > PREPARE_GC; }
    583   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
    584 #endif
    585 
    586 #ifdef VERIFY_HEAP
    587   void VerifyValidStoreAndSlotsBufferEntries();
    588   void VerifyMarkbitsAreClean();
    589   static void VerifyMarkbitsAreClean(PagedSpace* space);
    590   static void VerifyMarkbitsAreClean(NewSpace* space);
    591   void VerifyWeakEmbeddedObjectsInCode();
    592   void VerifyOmittedMapChecks();
    593 #endif
    594 
    595  private:
    596   template <PageEvacuationMode mode>
    597   class EvacuateNewSpacePageVisitor;
    598   class EvacuateNewSpaceVisitor;
    599   class EvacuateOldSpaceVisitor;
    600   class EvacuateRecordOnlyVisitor;
    601   class EvacuateVisitorBase;
    602   class HeapObjectVisitor;
    603   class ObjectStatsVisitor;
    604 
    605   explicit MarkCompactCollector(Heap* heap);
    606 
    607   bool WillBeDeoptimized(Code* code);
    608 
    609   void ComputeEvacuationHeuristics(size_t area_size,
    610                                    int* target_fragmentation_percent,
    611                                    size_t* max_evacuated_bytes);
    612 
    613   void VisitAllObjects(HeapObjectVisitor* visitor);
    614 
    615   void RecordObjectStats();
    616 
    617   // Finishes GC, performs heap verification if enabled.
    618   void Finish();
    619 
    620   // -----------------------------------------------------------------------
    621   // Phase 1: Marking live objects.
    622   //
    623   //  Before: The heap has been prepared for garbage collection by
    624   //          MarkCompactCollector::Prepare() and is otherwise in its
    625   //          normal state.
    626   //
    627   //   After: Live objects are marked and non-live objects are unmarked.
    628 
    629   friend class CodeMarkingVisitor;
    630   friend class IncrementalMarkingMarkingVisitor;
    631   friend class MarkCompactMarkingVisitor;
    632   friend class MarkingVisitor;
    633   friend class RecordMigratedSlotVisitor;
    634   template <MarkCompactMode mode>
    635   friend class RootMarkingVisitor;
    636   friend class SharedFunctionInfoMarkingVisitor;
    637   friend class StaticYoungGenerationMarkingVisitor;
    638 
    639   // Mark code objects that are active on the stack to prevent them
    640   // from being flushed.
    641   void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
    642 
    643   void PrepareForCodeFlushing();
    644 
    645   // Marking operations for objects reachable from roots.
    646   void MarkLiveObjects();
    647   // Mark the young generation.
    648   void MarkLiveObjectsInYoungGeneration();
    649 
    650   // Pushes a black object onto the marking stack and accounts for live bytes.
    651   // Note that this assumes live bytes have not yet been counted.
    652   INLINE(void PushBlack(HeapObject* obj));
    653 
    654   // Unshifts a black object into the marking stack and accounts for live bytes.
    655   // Note that this assumes lives bytes have already been counted.
    656   INLINE(void UnshiftBlack(HeapObject* obj));
    657 
    658   // Marks the object black and pushes it on the marking stack.
    659   // This is for non-incremental marking only.
    660   INLINE(void MarkObject(HeapObject* obj));
    661 
    662   // Mark the heap roots and all objects reachable from them.
    663   void MarkRoots(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
    664 
    665   // Mark the string table specially.  References to internalized strings from
    666   // the string table are weak.
    667   void MarkStringTable(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
    668 
    669   // Mark objects reachable (transitively) from objects in the marking stack
    670   // or overflowed in the heap.
    671   template <MarkCompactMode mode>
    672   void ProcessMarkingDeque();
    673 
    674   // Mark objects reachable (transitively) from objects in the marking stack
    675   // or overflowed in the heap.  This respects references only considered in
    676   // the final atomic marking pause including the following:
    677   //    - Processing of objects reachable through Harmony WeakMaps.
    678   //    - Objects reachable due to host application logic like object groups,
    679   //      implicit references' groups, or embedder heap tracing.
    680   void ProcessEphemeralMarking(ObjectVisitor* visitor,
    681                                bool only_process_harmony_weak_collections);
    682 
    683   // If the call-site of the top optimized code was not prepared for
    684   // deoptimization, then treat the maps in the code as strong pointers,
    685   // otherwise a map can die and deoptimize the code.
    686   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
    687 
    688   // Collects a list of dependent code from maps embedded in optimize code.
    689   DependentCode* DependentCodeListFromNonLiveMaps();
    690 
    691   // Mark objects reachable (transitively) from objects in the marking
    692   // stack.  This function empties the marking stack, but may leave
    693   // overflowed objects in the heap, in which case the marking stack's
    694   // overflow flag will be set.
    695   template <MarkCompactMode mode>
    696   void EmptyMarkingDeque();
    697 
    698   // Refill the marking stack with overflowed objects from the heap.  This
    699   // function either leaves the marking stack full or clears the overflow
    700   // flag on the marking stack.
    701   template <MarkCompactMode mode>
    702   void RefillMarkingDeque();
    703 
    704   // Helper methods for refilling the marking stack by discovering grey objects
    705   // on various pages of the heap. Used by {RefillMarkingDeque} only.
    706   template <class T>
    707   void DiscoverGreyObjectsWithIterator(T* it);
    708   void DiscoverGreyObjectsOnPage(MemoryChunk* p);
    709   void DiscoverGreyObjectsInSpace(PagedSpace* space);
    710   void DiscoverGreyObjectsInNewSpace();
    711 
    712   // Callback function for telling whether the object *p is an unmarked
    713   // heap object.
    714   static bool IsUnmarkedHeapObject(Object** p);
    715 
    716   // Clear non-live references in weak cells, transition and descriptor arrays,
    717   // and deoptimize dependent code of non-live maps.
    718   void ClearNonLiveReferences();
    719   void MarkDependentCodeForDeoptimization(DependentCode* list);
    720   // Find non-live targets of simple transitions in the given list. Clear
    721   // transitions to non-live targets and if needed trim descriptors arrays.
    722   void ClearSimpleMapTransitions(Object* non_live_map_list);
    723   void ClearSimpleMapTransition(Map* map, Map* dead_transition);
    724   // Compact every array in the global list of transition arrays and
    725   // trim the corresponding descriptor array if a transition target is non-live.
    726   void ClearFullMapTransitions();
    727   bool CompactTransitionArray(Map* map, TransitionArray* transitions,
    728                               DescriptorArray* descriptors);
    729   void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
    730   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
    731 
    732   // Mark all values associated with reachable keys in weak collections
    733   // encountered so far.  This might push new object or even new weak maps onto
    734   // the marking stack.
    735   void ProcessWeakCollections();
    736 
    737   // After all reachable objects have been marked those weak map entries
    738   // with an unreachable key are removed from all encountered weak maps.
    739   // The linked list of all encountered weak maps is destroyed.
    740   void ClearWeakCollections();
    741 
    742   // We have to remove all encountered weak maps from the list of weak
    743   // collections when incremental marking is aborted.
    744   void AbortWeakCollections();
    745 
    746   void ClearWeakCells(Object** non_live_map_list,
    747                       DependentCode** dependent_code_list);
    748   void AbortWeakCells();
    749 
    750   void AbortTransitionArrays();
    751 
    752   // Starts sweeping of spaces by contributing on the main thread and setting
    753   // up other pages for sweeping. Does not start sweeper tasks.
    754   void StartSweepSpaces();
    755   void StartSweepSpace(PagedSpace* space);
    756 
    757   void EvacuatePrologue();
    758   void EvacuateEpilogue();
    759   void EvacuatePagesInParallel();
    760 
    761   // The number of parallel compaction tasks, including the main thread.
    762   int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
    763 
    764   void EvacuateNewSpaceAndCandidates();
    765 
    766   void UpdatePointersAfterEvacuation();
    767 
    768   // Iterates through all live objects on a page using marking information.
    769   // Returns whether all objects have successfully been visited.
    770   template <class Visitor>
    771   bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
    772                         IterationMode mode);
    773 
    774   void RecomputeLiveBytes(MemoryChunk* page);
    775 
    776   void ReleaseEvacuationCandidates();
    777 
    778 
    779 #ifdef DEBUG
    780   friend class MarkObjectVisitor;
    781   static void VisitObject(HeapObject* obj);
    782 
    783   friend class UnmarkObjectVisitor;
    784   static void UnmarkObject(HeapObject* obj);
    785 #endif
    786 
    787   Heap* heap_;
    788 
    789   base::Semaphore page_parallel_job_semaphore_;
    790 
    791 #ifdef DEBUG
    792   enum CollectorState {
    793     IDLE,
    794     PREPARE_GC,
    795     MARK_LIVE_OBJECTS,
    796     SWEEP_SPACES,
    797     ENCODE_FORWARDING_ADDRESSES,
    798     UPDATE_POINTERS,
    799     RELOCATE_OBJECTS
    800   };
    801 
    802   // The current stage of the collector.
    803   CollectorState state_;
    804 #endif
    805 
    806   bool was_marked_incrementally_;
    807 
    808   bool evacuation_;
    809 
    810   // True if we are collecting slots to perform evacuation from evacuation
    811   // candidates.
    812   bool compacting_;
    813 
    814   bool black_allocation_;
    815 
    816   bool have_code_to_deoptimize_;
    817 
    818   MarkingDeque marking_deque_;
    819 
    820   CodeFlusher* code_flusher_;
    821 
    822   // Candidates for pages that should be evacuated.
    823   List<Page*> evacuation_candidates_;
    824   // Pages that are actually processed during evacuation.
    825   List<Page*> old_space_evacuation_pages_;
    826   List<Page*> new_space_evacuation_pages_;
    827 
    828   Sweeper sweeper_;
    829 
    830   friend class Heap;
    831   friend class StoreBuffer;
    832 };
    833 
    834 
    835 class EvacuationScope BASE_EMBEDDED {
    836  public:
    837   explicit EvacuationScope(MarkCompactCollector* collector)
    838       : collector_(collector) {
    839     collector_->set_evacuation(true);
    840   }
    841 
    842   ~EvacuationScope() { collector_->set_evacuation(false); }
    843 
    844  private:
    845   MarkCompactCollector* collector_;
    846 };
    847 
    848 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
    849 }  // namespace internal
    850 }  // namespace v8
    851 
    852 #endif  // V8_HEAP_MARK_COMPACT_H_
    853