Home | History | Annotate | Download | only in heap
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_HEAP_MARK_COMPACT_H_
      6 #define V8_HEAP_MARK_COMPACT_H_
      7 
      8 #include <vector>
      9 
     10 #include "src/heap/concurrent-marking.h"
     11 #include "src/heap/marking.h"
     12 #include "src/heap/objects-visiting.h"
     13 #include "src/heap/spaces.h"
     14 #include "src/heap/sweeper.h"
     15 #include "src/heap/worklist.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 // Forward declarations.
     21 class EvacuationJobTraits;
     22 class HeapObjectVisitor;
     23 class ItemParallelJob;
     24 class MigrationObserver;
     25 class RecordMigratedSlotVisitor;
     26 class UpdatingItem;
     27 class YoungGenerationMarkingVisitor;
     28 
     29 template <typename ConcreteState, AccessMode access_mode>
     30 class MarkingStateBase {
     31  public:
     32   V8_INLINE MarkBit MarkBitFrom(HeapObject* obj) {
     33     return MarkBitFrom(MemoryChunk::FromAddress(obj->address()),
     34                        obj->address());
     35   }
     36 
     37   V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
     38     return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
     39         p->AddressToMarkbitIndex(addr));
     40   }
     41 
     42   Marking::ObjectColor Color(HeapObject* obj) {
     43     return Marking::Color(MarkBitFrom(obj));
     44   }
     45 
     46   V8_INLINE bool IsImpossible(HeapObject* obj) {
     47     return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
     48   }
     49 
     50   V8_INLINE bool IsBlack(HeapObject* obj) {
     51     return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
     52   }
     53 
     54   V8_INLINE bool IsWhite(HeapObject* obj) {
     55     return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
     56   }
     57 
     58   V8_INLINE bool IsGrey(HeapObject* obj) {
     59     return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
     60   }
     61 
     62   V8_INLINE bool IsBlackOrGrey(HeapObject* obj) {
     63     return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
     64   }
     65 
     66   V8_INLINE bool WhiteToGrey(HeapObject* obj) {
     67     return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
     68   }
     69 
     70   V8_INLINE bool WhiteToBlack(HeapObject* obj) {
     71     return WhiteToGrey(obj) && GreyToBlack(obj);
     72   }
     73 
     74   V8_INLINE bool GreyToBlack(HeapObject* obj) {
     75     MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
     76     MarkBit markbit = MarkBitFrom(p, obj->address());
     77     if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
     78     static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
     79     return true;
     80   }
     81 
     82   void ClearLiveness(MemoryChunk* chunk) {
     83     static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
     84     static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
     85   }
     86 };
     87 
     88 class MarkBitCellIterator {
     89  public:
     90   MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
     91     DCHECK(Bitmap::IsCellAligned(
     92         chunk_->AddressToMarkbitIndex(chunk_->area_start())));
     93     DCHECK(Bitmap::IsCellAligned(
     94         chunk_->AddressToMarkbitIndex(chunk_->area_end())));
     95     last_cell_index_ =
     96         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
     97     cell_base_ = chunk_->area_start();
     98     cell_index_ =
     99         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
    100     cells_ = bitmap->cells();
    101   }
    102 
    103   inline bool Done() { return cell_index_ >= last_cell_index_; }
    104 
    105   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
    106 
    107   inline MarkBit::CellType* CurrentCell() {
    108     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    109                                chunk_->AddressToMarkbitIndex(cell_base_))));
    110     return &cells_[cell_index_];
    111   }
    112 
    113   inline Address CurrentCellBase() {
    114     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    115                                chunk_->AddressToMarkbitIndex(cell_base_))));
    116     return cell_base_;
    117   }
    118 
    119   V8_WARN_UNUSED_RESULT inline bool Advance() {
    120     cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
    121     return ++cell_index_ != last_cell_index_;
    122   }
    123 
    124   inline bool Advance(unsigned int new_cell_index) {
    125     if (new_cell_index != cell_index_) {
    126       DCHECK_GT(new_cell_index, cell_index_);
    127       DCHECK_LE(new_cell_index, last_cell_index_);
    128       unsigned int diff = new_cell_index - cell_index_;
    129       cell_index_ = new_cell_index;
    130       cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
    131       return true;
    132     }
    133     return false;
    134   }
    135 
    136   // Return the next mark bit cell. If there is no next it returns 0;
    137   inline MarkBit::CellType PeekNext() {
    138     if (HasNext()) {
    139       return cells_[cell_index_ + 1];
    140     }
    141     return 0;
    142   }
    143 
    144  private:
    145   MemoryChunk* chunk_;
    146   MarkBit::CellType* cells_;
    147   unsigned int last_cell_index_;
    148   unsigned int cell_index_;
    149   Address cell_base_;
    150 };
    151 
    152 enum LiveObjectIterationMode {
    153   kBlackObjects,
    154   kGreyObjects,
    155   kAllLiveObjects
    156 };
    157 
    158 template <LiveObjectIterationMode mode>
    159 class LiveObjectRange {
    160  public:
    161   class iterator {
    162    public:
    163     using value_type = std::pair<HeapObject*, int /* size */>;
    164     using pointer = const value_type*;
    165     using reference = const value_type&;
    166     using iterator_category = std::forward_iterator_tag;
    167 
    168     inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
    169 
    170     inline iterator& operator++();
    171     inline iterator operator++(int);
    172 
    173     bool operator==(iterator other) const {
    174       return current_object_ == other.current_object_;
    175     }
    176 
    177     bool operator!=(iterator other) const { return !(*this == other); }
    178 
    179     value_type operator*() {
    180       return std::make_pair(current_object_, current_size_);
    181     }
    182 
    183    private:
    184     inline void AdvanceToNextValidObject();
    185 
    186     MemoryChunk* const chunk_;
    187     Map* const one_word_filler_map_;
    188     Map* const two_word_filler_map_;
    189     Map* const free_space_map_;
    190     MarkBitCellIterator it_;
    191     Address cell_base_;
    192     MarkBit::CellType current_cell_;
    193     HeapObject* current_object_;
    194     int current_size_;
    195   };
    196 
    197   LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
    198       : chunk_(chunk),
    199         bitmap_(bitmap),
    200         start_(chunk_->area_start()),
    201         end_(chunk->area_end()) {}
    202 
    203   inline iterator begin();
    204   inline iterator end();
    205 
    206  private:
    207   MemoryChunk* const chunk_;
    208   Bitmap* bitmap_;
    209   Address start_;
    210   Address end_;
    211 };
    212 
    213 class LiveObjectVisitor : AllStatic {
    214  public:
    215   enum IterationMode {
    216     kKeepMarking,
    217     kClearMarkbits,
    218   };
    219 
    220   // Visits black objects on a MemoryChunk until the Visitor returns |false| for
    221   // an object. If IterationMode::kClearMarkbits is passed the markbits and
    222   // slots for visited objects are cleared for each successfully visited object.
    223   template <class Visitor, typename MarkingState>
    224   static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
    225                                 Visitor* visitor, IterationMode iteration_mode,
    226                                 HeapObject** failed_object);
    227 
    228   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
    229   // visitation for an object.
    230   template <class Visitor, typename MarkingState>
    231   static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
    232                                       Visitor* visitor,
    233                                       IterationMode iteration_mode);
    234 
    235   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
    236   // visitation for an object.
    237   template <class Visitor, typename MarkingState>
    238   static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
    239                                      Visitor* visitor,
    240                                      IterationMode iteration_mode);
    241 
    242   template <typename MarkingState>
    243   static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
    244 };
    245 
    246 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
    247 enum MarkingTreatmentMode { KEEP, CLEAR };
    248 enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
    249 
    250 // Base class for minor and full MC collectors.
    251 class MarkCompactCollectorBase {
    252  public:
    253   virtual ~MarkCompactCollectorBase() {}
    254 
    255   virtual void SetUp() = 0;
    256   virtual void TearDown() = 0;
    257   virtual void CollectGarbage() = 0;
    258 
    259   inline Heap* heap() const { return heap_; }
    260   inline Isolate* isolate();
    261 
    262  protected:
    263   static const int kMainThread = 0;
    264   explicit MarkCompactCollectorBase(Heap* heap)
    265       : heap_(heap), old_to_new_slots_(0) {}
    266 
    267   // Marking operations for objects reachable from roots.
    268   virtual void MarkLiveObjects() = 0;
    269   // Mark objects reachable (transitively) from objects in the marking
    270   // work list.
    271   virtual void ProcessMarkingWorklist() = 0;
    272   // Clear non-live references held in side data structures.
    273   virtual void ClearNonLiveReferences() = 0;
    274   virtual void EvacuatePrologue() = 0;
    275   virtual void EvacuateEpilogue() = 0;
    276   virtual void Evacuate() = 0;
    277   virtual void EvacuatePagesInParallel() = 0;
    278   virtual void UpdatePointersAfterEvacuation() = 0;
    279   virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
    280                                                   Address start,
    281                                                   Address end) = 0;
    282   virtual UpdatingItem* CreateRememberedSetUpdatingItem(
    283       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
    284 
    285   template <class Evacuator, class Collector>
    286   void CreateAndExecuteEvacuationTasks(
    287       Collector* collector, ItemParallelJob* job,
    288       RecordMigratedSlotVisitor* record_visitor,
    289       MigrationObserver* migration_observer, const intptr_t live_bytes);
    290 
    291   // Returns whether this page should be moved according to heuristics.
    292   bool ShouldMovePage(Page* p, intptr_t live_bytes);
    293 
    294   int CollectToSpaceUpdatingItems(ItemParallelJob* job);
    295   template <typename IterateableSpace>
    296   int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
    297                                         IterateableSpace* space,
    298                                         RememberedSetUpdatingMode mode);
    299 
    300   int NumberOfParallelCompactionTasks(int pages);
    301   int NumberOfParallelPointerUpdateTasks(int pages, int slots);
    302   int NumberOfParallelToSpacePointerUpdateTasks(int pages);
    303 
    304   Heap* heap_;
    305   // Number of old to new slots. Should be computed during MarkLiveObjects.
    306   // -1 indicates that the value couldn't be computed.
    307   int old_to_new_slots_;
    308 };
    309 
    310 class MinorMarkingState final
    311     : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
    312  public:
    313   Bitmap* bitmap(const MemoryChunk* chunk) const {
    314     return chunk->young_generation_bitmap_;
    315   }
    316 
    317   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    318     chunk->young_generation_live_byte_count_ += by;
    319   }
    320 
    321   intptr_t live_bytes(MemoryChunk* chunk) const {
    322     return chunk->young_generation_live_byte_count_;
    323   }
    324 
    325   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    326     chunk->young_generation_live_byte_count_ = value;
    327   }
    328 };
    329 
    330 class MinorNonAtomicMarkingState final
    331     : public MarkingStateBase<MinorNonAtomicMarkingState,
    332                               AccessMode::NON_ATOMIC> {
    333  public:
    334   Bitmap* bitmap(const MemoryChunk* chunk) const {
    335     return chunk->young_generation_bitmap_;
    336   }
    337 
    338   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    339     chunk->young_generation_live_byte_count_ += by;
    340   }
    341 
    342   intptr_t live_bytes(MemoryChunk* chunk) const {
    343     return chunk->young_generation_live_byte_count_;
    344   }
    345 
    346   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    347     chunk->young_generation_live_byte_count_ = value;
    348   }
    349 };
    350 
    351 // This marking state is used when concurrent marking is running.
    352 class IncrementalMarkingState final
    353     : public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
    354  public:
    355   Bitmap* bitmap(const MemoryChunk* chunk) const {
    356     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
    357   }
    358 
    359   // Concurrent marking uses local live bytes.
    360   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    361     chunk->live_byte_count_ += by;
    362   }
    363 
    364   intptr_t live_bytes(MemoryChunk* chunk) const {
    365     return chunk->live_byte_count_;
    366   }
    367 
    368   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    369     chunk->live_byte_count_ = value;
    370   }
    371 };
    372 
    373 class MajorAtomicMarkingState final
    374     : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
    375  public:
    376   Bitmap* bitmap(const MemoryChunk* chunk) const {
    377     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
    378   }
    379 
    380   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    381     chunk->live_byte_count_ += by;
    382   }
    383 
    384   intptr_t live_bytes(MemoryChunk* chunk) const {
    385     return chunk->live_byte_count_;
    386   }
    387 
    388   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    389     chunk->live_byte_count_ = value;
    390   }
    391 };
    392 
    393 class MajorNonAtomicMarkingState final
    394     : public MarkingStateBase<MajorNonAtomicMarkingState,
    395                               AccessMode::NON_ATOMIC> {
    396  public:
    397   Bitmap* bitmap(const MemoryChunk* chunk) const {
    398     return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
    399   }
    400 
    401   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
    402     chunk->live_byte_count_ += by;
    403   }
    404 
    405   intptr_t live_bytes(MemoryChunk* chunk) const {
    406     return chunk->live_byte_count_;
    407   }
    408 
    409   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
    410     chunk->live_byte_count_ = value;
    411   }
    412 };
    413 
    414 struct Ephemeron {
    415   HeapObject* key;
    416   HeapObject* value;
    417 };
    418 
    419 typedef Worklist<Ephemeron, 64> EphemeronWorklist;
    420 
    421 // Weak objects encountered during marking.
    422 struct WeakObjects {
    423   Worklist<TransitionArray*, 64> transition_arrays;
    424 
    425   // Keep track of all EphemeronHashTables in the heap to process
    426   // them in the atomic pause.
    427   Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
    428 
    429   // Keep track of all ephemerons for concurrent marking tasks. Only store
    430   // ephemerons in these Worklists if both key and value are unreachable at the
    431   // moment.
    432   //
    433   // MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
    434   // worklists.
    435   //
    436   // current_ephemerons is used as draining worklist in the current fixpoint
    437   // iteration.
    438   EphemeronWorklist current_ephemerons;
    439 
    440   // Stores ephemerons to visit in the next fixpoint iteration.
    441   EphemeronWorklist next_ephemerons;
    442 
    443   // When draining the marking worklist new discovered ephemerons are pushed
    444   // into this worklist.
    445   EphemeronWorklist discovered_ephemerons;
    446 
    447   // TODO(marja): For old space, we only need the slot, not the host
    448   // object. Optimize this by adding a different storage for old space.
    449   Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
    450   Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
    451 };
    452 
    453 struct EphemeronMarking {
    454   std::vector<HeapObject*> newly_discovered;
    455   bool newly_discovered_overflowed;
    456   size_t newly_discovered_limit;
    457 };
    458 
    459 // Collector for young and old generation.
    460 class MarkCompactCollector final : public MarkCompactCollectorBase {
    461  public:
    462 #ifdef V8_CONCURRENT_MARKING
    463   using MarkingState = IncrementalMarkingState;
    464 #else
    465   using MarkingState = MajorNonAtomicMarkingState;
    466 #endif  // V8_CONCURRENT_MARKING
    467   using NonAtomicMarkingState = MajorNonAtomicMarkingState;
    468   // Wrapper for the shared and bailout worklists.
    469   class MarkingWorklist {
    470    public:
    471     using ConcurrentMarkingWorklist = Worklist<HeapObject*, 64>;
    472 
    473     // The heap parameter is not used but needed to match the sequential case.
    474     explicit MarkingWorklist(Heap* heap) {}
    475 
    476     void Push(HeapObject* object) {
    477       bool success = shared_.Push(kMainThread, object);
    478       USE(success);
    479       DCHECK(success);
    480     }
    481 
    482     void PushBailout(HeapObject* object) {
    483       bool success = bailout_.Push(kMainThread, object);
    484       USE(success);
    485       DCHECK(success);
    486     }
    487 
    488     HeapObject* Pop() {
    489       HeapObject* result;
    490 #ifdef V8_CONCURRENT_MARKING
    491       if (bailout_.Pop(kMainThread, &result)) return result;
    492 #endif
    493       if (shared_.Pop(kMainThread, &result)) return result;
    494 #ifdef V8_CONCURRENT_MARKING
    495       // The expectation is that this work list is empty almost all the time
    496       // and we can thus avoid the emptiness checks by putting it last.
    497       if (on_hold_.Pop(kMainThread, &result)) return result;
    498 #endif
    499       return nullptr;
    500     }
    501 
    502     HeapObject* PopBailout() {
    503       HeapObject* result;
    504 #ifdef V8_CONCURRENT_MARKING
    505       if (bailout_.Pop(kMainThread, &result)) return result;
    506 #endif
    507       return nullptr;
    508     }
    509 
    510     void Clear() {
    511       bailout_.Clear();
    512       shared_.Clear();
    513       on_hold_.Clear();
    514     }
    515 
    516     bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
    517 
    518     bool IsEmpty() {
    519       return bailout_.IsLocalEmpty(kMainThread) &&
    520              shared_.IsLocalEmpty(kMainThread) &&
    521              on_hold_.IsLocalEmpty(kMainThread) &&
    522              bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
    523              on_hold_.IsGlobalPoolEmpty();
    524     }
    525 
    526     int Size() {
    527       return static_cast<int>(bailout_.LocalSize(kMainThread) +
    528                               shared_.LocalSize(kMainThread) +
    529                               on_hold_.LocalSize(kMainThread));
    530     }
    531 
    532     // Calls the specified callback on each element of the deques and replaces
    533     // the element with the result of the callback. If the callback returns
    534     // nullptr then the element is removed from the deque.
    535     // The callback must accept HeapObject* and return HeapObject*.
    536     template <typename Callback>
    537     void Update(Callback callback) {
    538       bailout_.Update(callback);
    539       shared_.Update(callback);
    540       on_hold_.Update(callback);
    541     }
    542 
    543     ConcurrentMarkingWorklist* shared() { return &shared_; }
    544     ConcurrentMarkingWorklist* bailout() { return &bailout_; }
    545     ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
    546 
    547     void Print() {
    548       PrintWorklist("shared", &shared_);
    549       PrintWorklist("bailout", &bailout_);
    550       PrintWorklist("on_hold", &on_hold_);
    551     }
    552 
    553    private:
    554     // Prints the stats about the global pool of the worklist.
    555     void PrintWorklist(const char* worklist_name,
    556                        ConcurrentMarkingWorklist* worklist);
    557 
    558     // Worklist used for most objects.
    559     ConcurrentMarkingWorklist shared_;
    560 
    561     // Concurrent marking uses this worklist to bail out of concurrently
    562     // marking certain object types. These objects are handled later in a STW
    563     // pause after concurrent marking has finished.
    564     ConcurrentMarkingWorklist bailout_;
    565 
    566     // Concurrent marking uses this worklist to bail out of marking objects
    567     // in new space's linear allocation area. Used to avoid black allocation
    568     // for new space. This allow the compiler to remove write barriers
    569     // for freshly allocatd objects.
    570     ConcurrentMarkingWorklist on_hold_;
    571   };
    572 
    573   class RootMarkingVisitor;
    574   class CustomRootBodyMarkingVisitor;
    575 
    576   enum IterationMode {
    577     kKeepMarking,
    578     kClearMarkbits,
    579   };
    580 
    581   MarkingState* marking_state() { return &marking_state_; }
    582 
    583   NonAtomicMarkingState* non_atomic_marking_state() {
    584     return &non_atomic_marking_state_;
    585   }
    586 
    587   void SetUp() override;
    588   void TearDown() override;
    589   // Performs a global garbage collection.
    590   void CollectGarbage() override;
    591 
    592   void CollectEvacuationCandidates(PagedSpace* space);
    593 
    594   void AddEvacuationCandidate(Page* p);
    595 
    596   // Prepares for GC by resetting relocation info in old and map spaces and
    597   // choosing spaces to compact.
    598   void Prepare();
    599 
    600   // Stop concurrent marking (either by preempting it right away or waiting for
    601   // it to complete as requested by |stop_request|).
    602   void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
    603 
    604   bool StartCompaction();
    605 
    606   void AbortCompaction();
    607 
    608   static inline bool IsOnEvacuationCandidate(Object* obj) {
    609     return Page::FromAddress(reinterpret_cast<Address>(obj))
    610         ->IsEvacuationCandidate();
    611   }
    612 
    613   static inline bool IsOnEvacuationCandidate(MaybeObject* obj) {
    614     return Page::FromAddress(reinterpret_cast<Address>(obj))
    615         ->IsEvacuationCandidate();
    616   }
    617 
    618   void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
    619   V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
    620                                    HeapObject* target);
    621   V8_INLINE static void RecordSlot(HeapObject* object,
    622                                    HeapObjectReference** slot,
    623                                    HeapObject* target);
    624   void RecordLiveSlotsOnPage(Page* page);
    625 
    626   void UpdateSlots(SlotsBuffer* buffer);
    627   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
    628 
    629   void ClearMarkbits();
    630 
    631   bool is_compacting() const { return compacting_; }
    632 
    633   // Ensures that sweeping is finished.
    634   //
    635   // Note: Can only be called safely from main thread.
    636   void EnsureSweepingCompleted();
    637 
    638   // Checks if sweeping is in progress right now on any space.
    639   bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
    640 
    641   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
    642 
    643   bool evacuation() const { return evacuation_; }
    644 
    645   MarkingWorklist* marking_worklist() { return &marking_worklist_; }
    646 
    647   WeakObjects* weak_objects() { return &weak_objects_; }
    648 
    649   void AddTransitionArray(TransitionArray* array) {
    650     weak_objects_.transition_arrays.Push(kMainThread, array);
    651   }
    652 
    653   void AddEphemeronHashTable(EphemeronHashTable* table) {
    654     weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
    655   }
    656 
    657   void AddEphemeron(HeapObject* key, HeapObject* value) {
    658     weak_objects_.discovered_ephemerons.Push(kMainThread,
    659                                              Ephemeron{key, value});
    660   }
    661 
    662   void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
    663     weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
    664   }
    665 
    666   void AddWeakObjectInCode(HeapObject* object, Code* code) {
    667     weak_objects_.weak_objects_in_code.Push(kMainThread,
    668                                             std::make_pair(object, code));
    669   }
    670 
    671   void AddNewlyDiscovered(HeapObject* object) {
    672     if (ephemeron_marking_.newly_discovered_overflowed) return;
    673 
    674     if (ephemeron_marking_.newly_discovered.size() <
    675         ephemeron_marking_.newly_discovered_limit) {
    676       ephemeron_marking_.newly_discovered.push_back(object);
    677     } else {
    678       ephemeron_marking_.newly_discovered_overflowed = true;
    679     }
    680   }
    681 
    682   void ResetNewlyDiscovered() {
    683     ephemeron_marking_.newly_discovered_overflowed = false;
    684     ephemeron_marking_.newly_discovered.clear();
    685   }
    686 
    687   Sweeper* sweeper() { return sweeper_; }
    688 
    689 #ifdef DEBUG
    690   // Checks whether performing mark-compact collection.
    691   bool in_use() { return state_ > PREPARE_GC; }
    692   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
    693 #endif
    694 
    695   void VerifyMarking();
    696 #ifdef VERIFY_HEAP
    697   void VerifyValidStoreAndSlotsBufferEntries();
    698   void VerifyMarkbitsAreClean();
    699   void VerifyMarkbitsAreDirty(PagedSpace* space);
    700   void VerifyMarkbitsAreClean(PagedSpace* space);
    701   void VerifyMarkbitsAreClean(NewSpace* space);
    702 #endif
    703 
    704  private:
    705   explicit MarkCompactCollector(Heap* heap);
    706   ~MarkCompactCollector();
    707 
    708   bool WillBeDeoptimized(Code* code);
    709 
    710   void ComputeEvacuationHeuristics(size_t area_size,
    711                                    int* target_fragmentation_percent,
    712                                    size_t* max_evacuated_bytes);
    713 
    714   void RecordObjectStats();
    715 
    716   // Finishes GC, performs heap verification if enabled.
    717   void Finish();
    718 
    719   void MarkLiveObjects() override;
    720 
    721   // Marks the object black and adds it to the marking work list.
    722   // This is for non-incremental marking only.
    723   V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
    724 
    725   // Marks the object black and adds it to the marking work list.
    726   // This is for non-incremental marking only.
    727   V8_INLINE void MarkRootObject(Root root, HeapObject* obj);
    728 
    729   // Used by wrapper tracing.
    730   V8_INLINE void MarkExternallyReferencedObject(HeapObject* obj);
    731 
    732   // Mark the heap roots and all objects reachable from them.
    733   void MarkRoots(RootVisitor* root_visitor,
    734                  ObjectVisitor* custom_root_body_visitor);
    735 
    736   // Mark the string table specially.  References to internalized strings from
    737   // the string table are weak.
    738   void MarkStringTable(ObjectVisitor* visitor);
    739 
    740   // Marks object reachable from harmony weak maps and wrapper tracing.
    741   void ProcessEphemeronMarking();
    742 
    743   // If the call-site of the top optimized code was not prepared for
    744   // deoptimization, then treat embedded pointers in the code as strong as
    745   // otherwise they can die and try to deoptimize the underlying code.
    746   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
    747 
    748   // Collects a list of dependent code from maps embedded in optimize code.
    749   DependentCode* DependentCodeListFromNonLiveMaps();
    750 
    751   // Drains the main thread marking work list. Will mark all pending objects
    752   // if no concurrent threads are running.
    753   void ProcessMarkingWorklist() override;
    754 
    755   enum class MarkingWorklistProcessingMode {
    756     kDefault,
    757     kTrackNewlyDiscoveredObjects
    758   };
    759 
    760   template <MarkingWorklistProcessingMode mode>
    761   void ProcessMarkingWorklistInternal();
    762 
    763   // Implements ephemeron semantics: Marks value if key is already reachable.
    764   // Returns true if value was actually marked.
    765   bool VisitEphemeron(HeapObject* key, HeapObject* value);
    766 
    767   // Marks ephemerons and drains marking worklist iteratively
    768   // until a fixpoint is reached.
    769   void ProcessEphemeronsUntilFixpoint();
    770 
    771   // Drains ephemeron and marking worklists. Single iteration of the
    772   // fixpoint iteration.
    773   bool ProcessEphemerons();
    774 
    775   // Mark ephemerons and drain marking worklist with a linear algorithm.
    776   // Only used if fixpoint iteration doesn't finish within a few iterations.
    777   void ProcessEphemeronsLinear();
    778 
    779   // Perform Wrapper Tracing if in use.
    780   void PerformWrapperTracing();
    781 
    782   // Callback function for telling whether the object *p is an unmarked
    783   // heap object.
    784   static bool IsUnmarkedHeapObject(Heap* heap, Object** p);
    785 
    786   // Clear non-live references in weak cells, transition and descriptor arrays,
    787   // and deoptimize dependent code of non-live maps.
    788   void ClearNonLiveReferences() override;
    789   void MarkDependentCodeForDeoptimization();
    790   // Checks if the given weak cell is a simple transition from the parent map
    791   // of the given dead target. If so it clears the transition and trims
    792   // the descriptor array of the parent if needed.
    793   void ClearPotentialSimpleMapTransition(Map* dead_target);
    794   void ClearPotentialSimpleMapTransition(Map* map, Map* dead_target);
    795   // Compact every array in the global list of transition arrays and
    796   // trim the corresponding descriptor array if a transition target is non-live.
    797   void ClearFullMapTransitions();
    798   bool CompactTransitionArray(Map* map, TransitionArray* transitions,
    799                               DescriptorArray* descriptors);
    800   void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
    801   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
    802 
    803   // After all reachable objects have been marked those weak map entries
    804   // with an unreachable key are removed from all encountered weak maps.
    805   // The linked list of all encountered weak maps is destroyed.
    806   void ClearWeakCollections();
    807 
    808   // Goes through the list of encountered weak references and clears those with
    809   // dead values. If the value is a dead map and the parent map transitions to
    810   // the dead map via weak cell, then this function also clears the map
    811   // transition.
    812   void ClearWeakReferences();
    813   void AbortWeakObjects();
    814 
    815   // Starts sweeping of spaces by contributing on the main thread and setting
    816   // up other pages for sweeping. Does not start sweeper tasks.
    817   void StartSweepSpaces();
    818   void StartSweepSpace(PagedSpace* space);
    819 
    820   void EvacuatePrologue() override;
    821   void EvacuateEpilogue() override;
    822   void Evacuate() override;
    823   void EvacuatePagesInParallel() override;
    824   void UpdatePointersAfterEvacuation() override;
    825 
    826   UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
    827                                           Address end) override;
    828   UpdatingItem* CreateRememberedSetUpdatingItem(
    829       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
    830 
    831   int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
    832   int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
    833 
    834   void ReleaseEvacuationCandidates();
    835   void PostProcessEvacuationCandidates();
    836   void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
    837 
    838   void ClearMarkbitsInPagedSpace(PagedSpace* space);
    839   void ClearMarkbitsInNewSpace(NewSpace* space);
    840 
    841   static const int kEphemeronChunkSize = 8 * KB;
    842 
    843   int NumberOfParallelEphemeronVisitingTasks(size_t elements);
    844 
    845   base::Mutex mutex_;
    846   base::Semaphore page_parallel_job_semaphore_;
    847 
    848 #ifdef DEBUG
    849   enum CollectorState {
    850     IDLE,
    851     PREPARE_GC,
    852     MARK_LIVE_OBJECTS,
    853     SWEEP_SPACES,
    854     ENCODE_FORWARDING_ADDRESSES,
    855     UPDATE_POINTERS,
    856     RELOCATE_OBJECTS
    857   };
    858 
    859   // The current stage of the collector.
    860   CollectorState state_;
    861 #endif
    862 
    863   bool was_marked_incrementally_;
    864 
    865   bool evacuation_;
    866 
    867   // True if we are collecting slots to perform evacuation from evacuation
    868   // candidates.
    869   bool compacting_;
    870 
    871   bool black_allocation_;
    872 
    873   bool have_code_to_deoptimize_;
    874 
    875   MarkingWorklist marking_worklist_;
    876   WeakObjects weak_objects_;
    877   EphemeronMarking ephemeron_marking_;
    878 
    879   // Candidates for pages that should be evacuated.
    880   std::vector<Page*> evacuation_candidates_;
    881   // Pages that are actually processed during evacuation.
    882   std::vector<Page*> old_space_evacuation_pages_;
    883   std::vector<Page*> new_space_evacuation_pages_;
    884   std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
    885 
    886   Sweeper* sweeper_;
    887 
    888   MarkingState marking_state_;
    889   NonAtomicMarkingState non_atomic_marking_state_;
    890 
    891   friend class EphemeronHashTableMarkingTask;
    892   friend class FullEvacuator;
    893   friend class Heap;
    894   friend class RecordMigratedSlotVisitor;
    895 };
    896 
    897 template <FixedArrayVisitationMode fixed_array_mode,
    898           TraceRetainingPathMode retaining_path_mode, typename MarkingState>
    899 class MarkingVisitor final
    900     : public HeapVisitor<
    901           int,
    902           MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
    903  public:
    904   typedef HeapVisitor<
    905       int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>
    906       Parent;
    907 
    908   V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
    909                            MarkingState* marking_state);
    910 
    911   V8_INLINE bool ShouldVisitMapPointer() { return false; }
    912 
    913   V8_INLINE int VisitAllocationSite(Map* map, AllocationSite* object);
    914   V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
    915   V8_INLINE int VisitCodeDataContainer(Map* map, CodeDataContainer* object);
    916   V8_INLINE int VisitEphemeronHashTable(Map* map, EphemeronHashTable* object);
    917   V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
    918   V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
    919   V8_INLINE int VisitJSFunction(Map* map, JSFunction* object);
    920   V8_INLINE int VisitMap(Map* map, Map* object);
    921   V8_INLINE int VisitNativeContext(Map* map, Context* object);
    922   V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
    923 
    924   // ObjectVisitor implementation.
    925   V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
    926   V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** p) final;
    927   V8_INLINE void VisitPointers(HeapObject* host, Object** start,
    928                                Object** end) final;
    929   V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
    930                                MaybeObject** end) final;
    931   V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
    932   V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
    933 
    934  private:
    935   // Granularity in which FixedArrays are scanned if |fixed_array_mode|
    936   // is true.
    937   static const int kProgressBarScanningChunk = 32 * 1024;
    938 
    939   V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
    940 
    941   V8_INLINE void MarkMapContents(Map* map);
    942 
    943   // Marks the object black without pushing it on the marking work list. Returns
    944   // true if the object needed marking and false otherwise.
    945   V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object);
    946 
    947   // Marks the object grey and pushes it on the marking work list.
    948   V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
    949 
    950   MarkingState* marking_state() { return marking_state_; }
    951 
    952   MarkCompactCollector::MarkingWorklist* marking_worklist() const {
    953     return collector_->marking_worklist();
    954   }
    955 
    956   Heap* const heap_;
    957   MarkCompactCollector* const collector_;
    958   MarkingState* const marking_state_;
    959 };
    960 
    961 class EvacuationScope {
    962  public:
    963   explicit EvacuationScope(MarkCompactCollector* collector)
    964       : collector_(collector) {
    965     collector_->set_evacuation(true);
    966   }
    967 
    968   ~EvacuationScope() { collector_->set_evacuation(false); }
    969 
    970  private:
    971   MarkCompactCollector* collector_;
    972 };
    973 
    974 #ifdef ENABLE_MINOR_MC
    975 
    976 // Collector for young-generation only.
    977 class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
    978  public:
    979   using MarkingState = MinorMarkingState;
    980   using NonAtomicMarkingState = MinorNonAtomicMarkingState;
    981 
    982   explicit MinorMarkCompactCollector(Heap* heap);
    983   ~MinorMarkCompactCollector();
    984 
    985   MarkingState* marking_state() { return &marking_state_; }
    986 
    987   NonAtomicMarkingState* non_atomic_marking_state() {
    988     return &non_atomic_marking_state_;
    989   }
    990 
    991   void SetUp() override;
    992   void TearDown() override;
    993   void CollectGarbage() override;
    994 
    995   void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
    996                     FreeSpaceTreatmentMode free_space_mode);
    997   void CleanupSweepToIteratePages();
    998 
    999  private:
   1000   using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
   1001   class RootMarkingVisitor;
   1002 
   1003   static const int kNumMarkers = 8;
   1004   static const int kMainMarker = 0;
   1005 
   1006   inline MarkingWorklist* worklist() { return worklist_; }
   1007 
   1008   inline YoungGenerationMarkingVisitor* main_marking_visitor() {
   1009     return main_marking_visitor_;
   1010   }
   1011 
   1012   void MarkLiveObjects() override;
   1013   void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
   1014   V8_INLINE void MarkRootObject(HeapObject* obj);
   1015   void ProcessMarkingWorklist() override;
   1016   void ClearNonLiveReferences() override;
   1017 
   1018   void EvacuatePrologue() override;
   1019   void EvacuateEpilogue() override;
   1020   void Evacuate() override;
   1021   void EvacuatePagesInParallel() override;
   1022   void UpdatePointersAfterEvacuation() override;
   1023 
   1024   UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
   1025                                           Address end) override;
   1026   UpdatingItem* CreateRememberedSetUpdatingItem(
   1027       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
   1028 
   1029   int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
   1030 
   1031   int NumberOfParallelMarkingTasks(int pages);
   1032 
   1033   MarkingWorklist* worklist_;
   1034 
   1035   YoungGenerationMarkingVisitor* main_marking_visitor_;
   1036   base::Semaphore page_parallel_job_semaphore_;
   1037   std::vector<Page*> new_space_evacuation_pages_;
   1038   std::vector<Page*> sweep_to_iterate_pages_;
   1039 
   1040   MarkingState marking_state_;
   1041   NonAtomicMarkingState non_atomic_marking_state_;
   1042 
   1043   friend class YoungGenerationMarkingTask;
   1044   friend class YoungGenerationMarkingVisitor;
   1045 };
   1046 
   1047 #endif  // ENABLE_MINOR_MC
   1048 
   1049 }  // namespace internal
   1050 }  // namespace v8
   1051 
   1052 #endif  // V8_HEAP_MARK_COMPACT_H_
   1053