Home | History | Annotate | Download | only in heap
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_HEAP_MARK_COMPACT_H_
      6 #define V8_HEAP_MARK_COMPACT_H_
      7 
      8 #include <deque>
      9 
     10 #include "src/base/bits.h"
     11 #include "src/heap/spaces.h"
     12 #include "src/heap/store-buffer.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 
     17 // Callback function, returns whether an object is alive. The heap size
     18 // of the object is returned in size. It optionally updates the offset
     19 // to the first live object in the page (only used for old and map objects).
     20 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
     21 
     22 // Callback function to mark an object in a given heap.
     23 typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
     24 
     25 // Forward declarations.
     26 class CodeFlusher;
     27 class MarkCompactCollector;
     28 class MarkingVisitor;
     29 class RootMarkingVisitor;
     30 
     31 class Marking : public AllStatic {
     32  public:
     33   INLINE(static MarkBit MarkBitFrom(Address addr)) {
     34     MemoryChunk* p = MemoryChunk::FromAddress(addr);
     35     return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
     36   }
     37 
     38   INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
     39     return MarkBitFrom(reinterpret_cast<Address>(obj));
     40   }
     41 
     42   // Impossible markbits: 01
     43   static const char* kImpossibleBitPattern;
     44   INLINE(static bool IsImpossible(MarkBit mark_bit)) {
     45     return !mark_bit.Get() && mark_bit.Next().Get();
     46   }
     47 
     48   // Black markbits: 11
     49   static const char* kBlackBitPattern;
     50   INLINE(static bool IsBlack(MarkBit mark_bit)) {
     51     return mark_bit.Get() && mark_bit.Next().Get();
     52   }
     53 
     54   // White markbits: 00 - this is required by the mark bit clearer.
     55   static const char* kWhiteBitPattern;
     56   INLINE(static bool IsWhite(MarkBit mark_bit)) {
     57     DCHECK(!IsImpossible(mark_bit));
     58     return !mark_bit.Get();
     59   }
     60 
     61   // Grey markbits: 10
     62   static const char* kGreyBitPattern;
     63   INLINE(static bool IsGrey(MarkBit mark_bit)) {
     64     return mark_bit.Get() && !mark_bit.Next().Get();
     65   }
     66 
     67   // IsBlackOrGrey assumes that the first bit is set for black or grey
     68   // objects.
     69   INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
     70 
     71   INLINE(static void MarkBlack(MarkBit mark_bit)) {
     72     mark_bit.Set();
     73     mark_bit.Next().Set();
     74   }
     75 
     76   INLINE(static void MarkWhite(MarkBit mark_bit)) {
     77     mark_bit.Clear();
     78     mark_bit.Next().Clear();
     79   }
     80 
     81   INLINE(static void BlackToWhite(MarkBit markbit)) {
     82     DCHECK(IsBlack(markbit));
     83     markbit.Clear();
     84     markbit.Next().Clear();
     85   }
     86 
     87   INLINE(static void GreyToWhite(MarkBit markbit)) {
     88     DCHECK(IsGrey(markbit));
     89     markbit.Clear();
     90     markbit.Next().Clear();
     91   }
     92 
     93   INLINE(static void BlackToGrey(MarkBit markbit)) {
     94     DCHECK(IsBlack(markbit));
     95     markbit.Next().Clear();
     96   }
     97 
     98   INLINE(static void WhiteToGrey(MarkBit markbit)) {
     99     DCHECK(IsWhite(markbit));
    100     markbit.Set();
    101   }
    102 
    103   INLINE(static void WhiteToBlack(MarkBit markbit)) {
    104     DCHECK(IsWhite(markbit));
    105     markbit.Set();
    106     markbit.Next().Set();
    107   }
    108 
    109   INLINE(static void GreyToBlack(MarkBit markbit)) {
    110     DCHECK(IsGrey(markbit));
    111     markbit.Next().Set();
    112   }
    113 
    114   INLINE(static void BlackToGrey(HeapObject* obj)) {
    115     BlackToGrey(MarkBitFrom(obj));
    116   }
    117 
    118   INLINE(static void AnyToGrey(MarkBit markbit)) {
    119     markbit.Set();
    120     markbit.Next().Clear();
    121   }
    122 
    123   static void TransferMark(Heap* heap, Address old_start, Address new_start);
    124 
    125 #ifdef DEBUG
    126   enum ObjectColor {
    127     BLACK_OBJECT,
    128     WHITE_OBJECT,
    129     GREY_OBJECT,
    130     IMPOSSIBLE_COLOR
    131   };
    132 
    133   static const char* ColorName(ObjectColor color) {
    134     switch (color) {
    135       case BLACK_OBJECT:
    136         return "black";
    137       case WHITE_OBJECT:
    138         return "white";
    139       case GREY_OBJECT:
    140         return "grey";
    141       case IMPOSSIBLE_COLOR:
    142         return "impossible";
    143     }
    144     return "error";
    145   }
    146 
    147   static ObjectColor Color(HeapObject* obj) {
    148     return Color(Marking::MarkBitFrom(obj));
    149   }
    150 
    151   static ObjectColor Color(MarkBit mark_bit) {
    152     if (IsBlack(mark_bit)) return BLACK_OBJECT;
    153     if (IsWhite(mark_bit)) return WHITE_OBJECT;
    154     if (IsGrey(mark_bit)) return GREY_OBJECT;
    155     UNREACHABLE();
    156     return IMPOSSIBLE_COLOR;
    157   }
    158 #endif
    159 
    160   // Returns true if the transferred color is black.
    161   INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
    162     if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
    163       return true;
    164     MarkBit from_mark_bit = MarkBitFrom(from);
    165     MarkBit to_mark_bit = MarkBitFrom(to);
    166     DCHECK(Marking::IsWhite(to_mark_bit));
    167     if (from_mark_bit.Get()) {
    168       to_mark_bit.Set();
    169       if (from_mark_bit.Next().Get()) {
    170         to_mark_bit.Next().Set();
    171         return true;
    172       }
    173     }
    174     return false;
    175   }
    176 
    177  private:
    178   DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
    179 };
    180 
    181 // ----------------------------------------------------------------------------
    182 // Marking deque for tracing live objects.
    183 class MarkingDeque {
    184  public:
    185   MarkingDeque()
    186       : array_(NULL),
    187         top_(0),
    188         bottom_(0),
    189         mask_(0),
    190         overflowed_(false),
    191         in_use_(false) {}
    192 
    193   void Initialize(Address low, Address high);
    194   void Uninitialize(bool aborting = false);
    195 
    196   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
    197 
    198   inline bool IsEmpty() { return top_ == bottom_; }
    199 
    200   bool overflowed() const { return overflowed_; }
    201 
    202   bool in_use() const { return in_use_; }
    203 
    204   void ClearOverflowed() { overflowed_ = false; }
    205 
    206   void SetOverflowed() { overflowed_ = true; }
    207 
    208   // Push the object on the marking stack if there is room, otherwise mark the
    209   // deque as overflowed and wait for a rescan of the heap.
    210   INLINE(bool Push(HeapObject* object)) {
    211     DCHECK(object->IsHeapObject());
    212     if (IsFull()) {
    213       SetOverflowed();
    214       return false;
    215     } else {
    216       array_[top_] = object;
    217       top_ = ((top_ + 1) & mask_);
    218       return true;
    219     }
    220   }
    221 
    222   INLINE(HeapObject* Pop()) {
    223     DCHECK(!IsEmpty());
    224     top_ = ((top_ - 1) & mask_);
    225     HeapObject* object = array_[top_];
    226     DCHECK(object->IsHeapObject());
    227     return object;
    228   }
    229 
    230   // Unshift the object into the marking stack if there is room, otherwise mark
    231   // the deque as overflowed and wait for a rescan of the heap.
    232   INLINE(bool Unshift(HeapObject* object)) {
    233     DCHECK(object->IsHeapObject());
    234     if (IsFull()) {
    235       SetOverflowed();
    236       return false;
    237     } else {
    238       bottom_ = ((bottom_ - 1) & mask_);
    239       array_[bottom_] = object;
    240       return true;
    241     }
    242   }
    243 
    244   HeapObject** array() { return array_; }
    245   int bottom() { return bottom_; }
    246   int top() { return top_; }
    247   int mask() { return mask_; }
    248   void set_top(int top) { top_ = top; }
    249 
    250  private:
    251   HeapObject** array_;
    252   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
    253   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
    254   // (mod mask + 1).
    255   int top_;
    256   int bottom_;
    257   int mask_;
    258   bool overflowed_;
    259   bool in_use_;
    260 
    261   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
    262 };
    263 
    264 
    265 // CodeFlusher collects candidates for code flushing during marking and
    266 // processes those candidates after marking has completed in order to
    267 // reset those functions referencing code objects that would otherwise
    268 // be unreachable. Code objects can be referenced in two ways:
    269 //    - SharedFunctionInfo references unoptimized code.
    270 //    - JSFunction references either unoptimized or optimized code.
    271 // We are not allowed to flush unoptimized code for functions that got
    272 // optimized or inlined into optimized code, because we might bailout
    273 // into the unoptimized code again during deoptimization.
    274 class CodeFlusher {
    275  public:
    276   explicit CodeFlusher(Isolate* isolate)
    277       : isolate_(isolate),
    278         jsfunction_candidates_head_(nullptr),
    279         shared_function_info_candidates_head_(nullptr) {}
    280 
    281   inline void AddCandidate(SharedFunctionInfo* shared_info);
    282   inline void AddCandidate(JSFunction* function);
    283 
    284   void EvictCandidate(SharedFunctionInfo* shared_info);
    285   void EvictCandidate(JSFunction* function);
    286 
    287   void ProcessCandidates() {
    288     ProcessSharedFunctionInfoCandidates();
    289     ProcessJSFunctionCandidates();
    290   }
    291 
    292   void IteratePointersToFromSpace(ObjectVisitor* v);
    293 
    294  private:
    295   void ProcessJSFunctionCandidates();
    296   void ProcessSharedFunctionInfoCandidates();
    297 
    298   static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
    299   static inline JSFunction* GetNextCandidate(JSFunction* candidate);
    300   static inline void SetNextCandidate(JSFunction* candidate,
    301                                       JSFunction* next_candidate);
    302   static inline void ClearNextCandidate(JSFunction* candidate,
    303                                         Object* undefined);
    304 
    305   static inline SharedFunctionInfo* GetNextCandidate(
    306       SharedFunctionInfo* candidate);
    307   static inline void SetNextCandidate(SharedFunctionInfo* candidate,
    308                                       SharedFunctionInfo* next_candidate);
    309   static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
    310 
    311   Isolate* isolate_;
    312   JSFunction* jsfunction_candidates_head_;
    313   SharedFunctionInfo* shared_function_info_candidates_head_;
    314 
    315   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
    316 };
    317 
    318 
    319 // Defined in isolate.h.
    320 class ThreadLocalTop;
    321 
    322 class MarkBitCellIterator BASE_EMBEDDED {
    323  public:
    324   explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
    325     last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    326         chunk_->AddressToMarkbitIndex(chunk_->area_end())));
    327     cell_base_ = chunk_->area_start();
    328     cell_index_ = Bitmap::IndexToCell(
    329         Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
    330     cells_ = chunk_->markbits()->cells();
    331   }
    332 
    333   inline bool Done() { return cell_index_ == last_cell_index_; }
    334 
    335   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
    336 
    337   inline MarkBit::CellType* CurrentCell() {
    338     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    339                               chunk_->AddressToMarkbitIndex(cell_base_))));
    340     return &cells_[cell_index_];
    341   }
    342 
    343   inline Address CurrentCellBase() {
    344     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
    345                               chunk_->AddressToMarkbitIndex(cell_base_))));
    346     return cell_base_;
    347   }
    348 
    349   inline void Advance() {
    350     cell_index_++;
    351     cell_base_ += 32 * kPointerSize;
    352   }
    353 
    354   // Return the next mark bit cell. If there is no next it returns 0;
    355   inline MarkBit::CellType PeekNext() {
    356     if (HasNext()) {
    357       return cells_[cell_index_ + 1];
    358     }
    359     return 0;
    360   }
    361 
    362  private:
    363   MemoryChunk* chunk_;
    364   MarkBit::CellType* cells_;
    365   unsigned int last_cell_index_;
    366   unsigned int cell_index_;
    367   Address cell_base_;
    368 };
    369 
    370 // Grey objects can happen on black pages when black objects transition to
    371 // grey e.g. when calling RecordWrites on them.
    372 enum LiveObjectIterationMode {
    373   kBlackObjects,
    374   kGreyObjects,
    375   kAllLiveObjects
    376 };
    377 
    378 template <LiveObjectIterationMode T>
    379 class LiveObjectIterator BASE_EMBEDDED {
    380  public:
    381   explicit LiveObjectIterator(MemoryChunk* chunk)
    382       : chunk_(chunk),
    383         it_(chunk_),
    384         cell_base_(it_.CurrentCellBase()),
    385         current_cell_(*it_.CurrentCell()) {
    386     // Black pages can not be iterated.
    387     DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
    388   }
    389 
    390   HeapObject* Next();
    391 
    392  private:
    393   MemoryChunk* chunk_;
    394   MarkBitCellIterator it_;
    395   Address cell_base_;
    396   MarkBit::CellType current_cell_;
    397 };
    398 
    399 // -------------------------------------------------------------------------
    400 // Mark-Compact collector
    401 class MarkCompactCollector {
    402  public:
    403   class Evacuator;
    404 
    405   class Sweeper {
    406    public:
    407     class SweeperTask;
    408 
    409     enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
    410     enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
    411     enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
    412     enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
    413     enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
    414 
    415     typedef std::deque<Page*> SweepingList;
    416     typedef List<Page*> SweptList;
    417 
    418     template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
    419               SkipListRebuildingMode skip_list_mode,
    420               FreeListRebuildingMode free_list_mode,
    421               FreeSpaceTreatmentMode free_space_mode>
    422     static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
    423 
    424     explicit Sweeper(Heap* heap)
    425         : heap_(heap),
    426           pending_sweeper_tasks_semaphore_(0),
    427           sweeping_in_progress_(false),
    428           late_pages_(false),
    429           num_sweeping_tasks_(0) {}
    430 
    431     bool sweeping_in_progress() { return sweeping_in_progress_; }
    432     bool contains_late_pages() { return late_pages_; }
    433 
    434     void AddPage(AllocationSpace space, Page* page);
    435     void AddLatePage(AllocationSpace space, Page* page);
    436 
    437     int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
    438                            int max_pages = 0);
    439     int ParallelSweepPage(Page* page, AllocationSpace identity);
    440 
    441     void StartSweeping();
    442     void StartSweepingHelper(AllocationSpace space_to_start);
    443     void EnsureCompleted();
    444     void EnsureNewSpaceCompleted();
    445     bool IsSweepingCompleted();
    446     void SweepOrWaitUntilSweepingCompleted(Page* page);
    447 
    448     void AddSweptPageSafe(PagedSpace* space, Page* page);
    449     Page* GetSweptPageSafe(PagedSpace* space);
    450 
    451    private:
    452     static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
    453 
    454     template <typename Callback>
    455     void ForAllSweepingSpaces(Callback callback) {
    456       for (int i = 0; i < kAllocationSpaces; i++) {
    457         callback(static_cast<AllocationSpace>(i));
    458       }
    459     }
    460 
    461     Page* GetSweepingPageSafe(AllocationSpace space);
    462     void AddSweepingPageSafe(AllocationSpace space, Page* page);
    463 
    464     void PrepareToBeSweptPage(AllocationSpace space, Page* page);
    465 
    466     Heap* heap_;
    467     base::Semaphore pending_sweeper_tasks_semaphore_;
    468     base::Mutex mutex_;
    469     SweptList swept_list_[kAllocationSpaces];
    470     SweepingList sweeping_list_[kAllocationSpaces];
    471     bool sweeping_in_progress_;
    472     bool late_pages_;
    473     base::AtomicNumber<intptr_t> num_sweeping_tasks_;
    474   };
    475 
    476   enum IterationMode {
    477     kKeepMarking,
    478     kClearMarkbits,
    479   };
    480 
    481   static void Initialize();
    482 
    483   void SetUp();
    484 
    485   void TearDown();
    486 
    487   void CollectEvacuationCandidates(PagedSpace* space);
    488 
    489   void AddEvacuationCandidate(Page* p);
    490 
    491   // Prepares for GC by resetting relocation info in old and map spaces and
    492   // choosing spaces to compact.
    493   void Prepare();
    494 
    495   // Performs a global garbage collection.
    496   void CollectGarbage();
    497 
    498   enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
    499 
    500   bool StartCompaction(CompactionMode mode);
    501 
    502   void AbortCompaction();
    503 
    504 #ifdef DEBUG
    505   // Checks whether performing mark-compact collection.
    506   bool in_use() { return state_ > PREPARE_GC; }
    507   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
    508 #endif
    509 
    510   // Determine type of object and emit deletion log event.
    511   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
    512 
    513   // Distinguishable invalid map encodings (for single word and multiple words)
    514   // that indicate free regions.
    515   static const uint32_t kSingleFreeEncoding = 0;
    516   static const uint32_t kMultiFreeEncoding = 1;
    517 
    518   static inline bool IsMarked(Object* obj);
    519   static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
    520 
    521   inline Heap* heap() const { return heap_; }
    522   inline Isolate* isolate() const;
    523 
    524   CodeFlusher* code_flusher() { return code_flusher_; }
    525   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
    526 
    527 #ifdef VERIFY_HEAP
    528   void VerifyValidStoreAndSlotsBufferEntries();
    529   void VerifyMarkbitsAreClean();
    530   static void VerifyMarkbitsAreClean(PagedSpace* space);
    531   static void VerifyMarkbitsAreClean(NewSpace* space);
    532   void VerifyWeakEmbeddedObjectsInCode();
    533   void VerifyOmittedMapChecks();
    534 #endif
    535 
    536   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
    537     return Page::FromAddress(reinterpret_cast<Address>(host))
    538         ->ShouldSkipEvacuationSlotRecording();
    539   }
    540 
    541   INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
    542     return Page::FromAddress(reinterpret_cast<Address>(obj))
    543         ->IsEvacuationCandidate();
    544   }
    545 
    546   void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
    547   void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
    548   void RecordCodeTargetPatch(Address pc, Code* target);
    549   INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
    550   INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
    551                               Object* target));
    552 
    553   void UpdateSlots(SlotsBuffer* buffer);
    554   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
    555 
    556   void InvalidateCode(Code* code);
    557 
    558   void ClearMarkbits();
    559 
    560   bool is_compacting() const { return compacting_; }
    561 
    562   MarkingParity marking_parity() { return marking_parity_; }
    563 
    564   // Ensures that sweeping is finished.
    565   //
    566   // Note: Can only be called safely from main thread.
    567   void EnsureSweepingCompleted();
    568 
    569   // Help out in sweeping the corresponding space and refill memory that has
    570   // been regained.
    571   //
    572   // Note: Thread-safe.
    573   void SweepAndRefill(CompactionSpace* space);
    574 
    575   // Checks if sweeping is in progress right now on any space.
    576   bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
    577 
    578   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
    579 
    580   bool evacuation() const { return evacuation_; }
    581 
    582   // Special case for processing weak references in a full collection. We need
    583   // to artificially keep AllocationSites alive for a time.
    584   void MarkAllocationSite(AllocationSite* site);
    585 
    586   // Mark objects in implicit references groups if their parent object
    587   // is marked.
    588   void MarkImplicitRefGroups(MarkObjectFunction mark_object);
    589 
    590   MarkingDeque* marking_deque() { return &marking_deque_; }
    591 
    592   static const size_t kMaxMarkingDequeSize = 4 * MB;
    593   static const size_t kMinMarkingDequeSize = 256 * KB;
    594 
    595   void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
    596     if (!marking_deque_.in_use()) {
    597       EnsureMarkingDequeIsCommitted(max_size);
    598       InitializeMarkingDeque();
    599     }
    600   }
    601 
    602   void EnsureMarkingDequeIsCommitted(size_t max_size);
    603   void EnsureMarkingDequeIsReserved();
    604 
    605   void InitializeMarkingDeque();
    606 
    607   // The following two methods can just be called after marking, when the
    608   // whole transitive closure is known. They must be called before sweeping
    609   // when mark bits are still intact.
    610   bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
    611   HeapObject* FindBlackObjectBySlotSlow(Address slot);
    612 
    613   // Removes all the slots in the slot buffers that are within the given
    614   // address range.
    615   void RemoveObjectSlots(Address start_slot, Address end_slot);
    616 
    617   Sweeper& sweeper() { return sweeper_; }
    618 
    619   void RegisterWrappersWithEmbedderHeapTracer();
    620 
    621   void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
    622 
    623   EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
    624 
    625   bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }
    626 
    627   void TracePossibleWrapper(JSObject* js_object);
    628 
    629   void RegisterExternallyReferencedObject(Object** object);
    630 
    631  private:
    632   class EvacuateNewSpacePageVisitor;
    633   class EvacuateNewSpaceVisitor;
    634   class EvacuateOldSpaceVisitor;
    635   class EvacuateRecordOnlyVisitor;
    636   class EvacuateVisitorBase;
    637   class HeapObjectVisitor;
    638 
    639   explicit MarkCompactCollector(Heap* heap);
    640 
    641   bool WillBeDeoptimized(Code* code);
    642   void ClearInvalidRememberedSetSlots();
    643 
    644   void ComputeEvacuationHeuristics(int area_size,
    645                                    int* target_fragmentation_percent,
    646                                    int* max_evacuated_bytes);
    647 
    648   // Finishes GC, performs heap verification if enabled.
    649   void Finish();
    650 
    651   // -----------------------------------------------------------------------
    652   // Phase 1: Marking live objects.
    653   //
    654   //  Before: The heap has been prepared for garbage collection by
    655   //          MarkCompactCollector::Prepare() and is otherwise in its
    656   //          normal state.
    657   //
    658   //   After: Live objects are marked and non-live objects are unmarked.
    659 
    660   friend class CodeMarkingVisitor;
    661   friend class IncrementalMarkingMarkingVisitor;
    662   friend class MarkCompactMarkingVisitor;
    663   friend class MarkingVisitor;
    664   friend class RecordMigratedSlotVisitor;
    665   friend class RootMarkingVisitor;
    666   friend class SharedFunctionInfoMarkingVisitor;
    667 
    668   // Mark code objects that are active on the stack to prevent them
    669   // from being flushed.
    670   void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
    671 
    672   void PrepareForCodeFlushing();
    673 
    674   // Marking operations for objects reachable from roots.
    675   void MarkLiveObjects();
    676 
    677   // Pushes a black object onto the marking stack and accounts for live bytes.
    678   // Note that this assumes live bytes have not yet been counted.
    679   INLINE(void PushBlack(HeapObject* obj));
    680 
    681   // Unshifts a black object into the marking stack and accounts for live bytes.
    682   // Note that this assumes lives bytes have already been counted.
    683   INLINE(void UnshiftBlack(HeapObject* obj));
    684 
    685   // Marks the object black and pushes it on the marking stack.
    686   // This is for non-incremental marking only.
    687   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
    688 
    689   // Marks the object black assuming that it is not yet marked.
    690   // This is for non-incremental marking only.
    691   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
    692 
    693   // Mark the heap roots and all objects reachable from them.
    694   void MarkRoots(RootMarkingVisitor* visitor);
    695 
    696   // Mark the string table specially.  References to internalized strings from
    697   // the string table are weak.
    698   void MarkStringTable(RootMarkingVisitor* visitor);
    699 
    700   // Mark objects reachable (transitively) from objects in the marking stack
    701   // or overflowed in the heap.
    702   void ProcessMarkingDeque();
    703 
    704   // Mark objects reachable (transitively) from objects in the marking stack
    705   // or overflowed in the heap.  This respects references only considered in
    706   // the final atomic marking pause including the following:
    707   //    - Processing of objects reachable through Harmony WeakMaps.
    708   //    - Objects reachable due to host application logic like object groups,
    709   //      implicit references' groups, or embedder heap tracing.
    710   void ProcessEphemeralMarking(ObjectVisitor* visitor,
    711                                bool only_process_harmony_weak_collections);
    712 
    713   // If the call-site of the top optimized code was not prepared for
    714   // deoptimization, then treat the maps in the code as strong pointers,
    715   // otherwise a map can die and deoptimize the code.
    716   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
    717 
    718   // Collects a list of dependent code from maps embedded in optimize code.
    719   DependentCode* DependentCodeListFromNonLiveMaps();
    720 
    721   // Mark objects reachable (transitively) from objects in the marking
    722   // stack.  This function empties the marking stack, but may leave
    723   // overflowed objects in the heap, in which case the marking stack's
    724   // overflow flag will be set.
    725   void EmptyMarkingDeque();
    726 
    727   // Refill the marking stack with overflowed objects from the heap.  This
    728   // function either leaves the marking stack full or clears the overflow
    729   // flag on the marking stack.
    730   void RefillMarkingDeque();
    731 
    732   // Helper methods for refilling the marking stack by discovering grey objects
    733   // on various pages of the heap. Used by {RefillMarkingDeque} only.
    734   template <class T>
    735   void DiscoverGreyObjectsWithIterator(T* it);
    736   void DiscoverGreyObjectsOnPage(MemoryChunk* p);
    737   void DiscoverGreyObjectsInSpace(PagedSpace* space);
    738   void DiscoverGreyObjectsInNewSpace();
    739 
    740   // Callback function for telling whether the object *p is an unmarked
    741   // heap object.
    742   static bool IsUnmarkedHeapObject(Object** p);
    743 
    744   // Clear non-live references in weak cells, transition and descriptor arrays,
    745   // and deoptimize dependent code of non-live maps.
    746   void ClearNonLiveReferences();
    747   void MarkDependentCodeForDeoptimization(DependentCode* list);
    748   // Find non-live targets of simple transitions in the given list. Clear
    749   // transitions to non-live targets and if needed trim descriptors arrays.
    750   void ClearSimpleMapTransitions(Object* non_live_map_list);
    751   void ClearSimpleMapTransition(Map* map, Map* dead_transition);
    752   // Compact every array in the global list of transition arrays and
    753   // trim the corresponding descriptor array if a transition target is non-live.
    754   void ClearFullMapTransitions();
    755   bool CompactTransitionArray(Map* map, TransitionArray* transitions,
    756                               DescriptorArray* descriptors);
    757   void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
    758   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
    759 
    760   // Mark all values associated with reachable keys in weak collections
    761   // encountered so far.  This might push new object or even new weak maps onto
    762   // the marking stack.
    763   void ProcessWeakCollections();
    764 
    765   // After all reachable objects have been marked those weak map entries
    766   // with an unreachable key are removed from all encountered weak maps.
    767   // The linked list of all encountered weak maps is destroyed.
    768   void ClearWeakCollections();
    769 
    770   // We have to remove all encountered weak maps from the list of weak
    771   // collections when incremental marking is aborted.
    772   void AbortWeakCollections();
    773 
    774   void ClearWeakCells(Object** non_live_map_list,
    775                       DependentCode** dependent_code_list);
    776   void AbortWeakCells();
    777 
    778   void AbortTransitionArrays();
    779 
    780   // -----------------------------------------------------------------------
    781   // Phase 2: Sweeping to clear mark bits and free non-live objects for
    782   // a non-compacting collection.
    783   //
    784   //  Before: Live objects are marked and non-live objects are unmarked.
    785   //
    786   //   After: Live objects are unmarked, non-live regions have been added to
    787   //          their space's free list. Active eden semispace is compacted by
    788   //          evacuation.
    789   //
    790 
    791   // If we are not compacting the heap, we simply sweep the spaces except
    792   // for the large object space, clearing mark bits and adding unmarked
    793   // regions to each space's free list.
    794   void SweepSpaces();
    795 
    796   void EvacuateNewSpacePrologue();
    797 
    798   void EvacuatePagesInParallel();
    799 
    800   // The number of parallel compaction tasks, including the main thread.
    801   int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
    802 
    803   void EvacuateNewSpaceAndCandidates();
    804 
    805   void UpdatePointersAfterEvacuation();
    806 
    807   // Iterates through all live objects on a page using marking information.
    808   // Returns whether all objects have successfully been visited.
    809   template <class Visitor>
    810   bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
    811                         IterationMode mode);
    812 
    813   void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
    814 
    815   void RecomputeLiveBytes(MemoryChunk* page);
    816 
    817   void ReleaseEvacuationCandidates();
    818 
    819   // Starts sweeping of a space by contributing on the main thread and setting
    820   // up other pages for sweeping.
    821   void StartSweepSpace(PagedSpace* space);
    822 
    823 #ifdef DEBUG
    824   friend class MarkObjectVisitor;
    825   static void VisitObject(HeapObject* obj);
    826 
    827   friend class UnmarkObjectVisitor;
    828   static void UnmarkObject(HeapObject* obj);
    829 #endif
    830 
    831   Heap* heap_;
    832 
    833   base::Semaphore page_parallel_job_semaphore_;
    834 
    835 #ifdef DEBUG
    836   enum CollectorState {
    837     IDLE,
    838     PREPARE_GC,
    839     MARK_LIVE_OBJECTS,
    840     SWEEP_SPACES,
    841     ENCODE_FORWARDING_ADDRESSES,
    842     UPDATE_POINTERS,
    843     RELOCATE_OBJECTS
    844   };
    845 
    846   // The current stage of the collector.
    847   CollectorState state_;
    848 #endif
    849 
    850   MarkingParity marking_parity_;
    851 
    852   bool was_marked_incrementally_;
    853 
    854   bool evacuation_;
    855 
    856   // True if we are collecting slots to perform evacuation from evacuation
    857   // candidates.
    858   bool compacting_;
    859 
    860   bool black_allocation_;
    861 
    862   bool have_code_to_deoptimize_;
    863 
    864   base::VirtualMemory* marking_deque_memory_;
    865   size_t marking_deque_memory_committed_;
    866   MarkingDeque marking_deque_;
    867   std::vector<std::pair<void*, void*>> wrappers_to_trace_;
    868 
    869   CodeFlusher* code_flusher_;
    870 
    871   EmbedderHeapTracer* embedder_heap_tracer_;
    872 
    873   List<Page*> evacuation_candidates_;
    874   List<Page*> newspace_evacuation_candidates_;
    875 
    876   Sweeper sweeper_;
    877 
    878   friend class Heap;
    879   friend class StoreBuffer;
    880 };
    881 
    882 
    883 class EvacuationScope BASE_EMBEDDED {
    884  public:
    885   explicit EvacuationScope(MarkCompactCollector* collector)
    886       : collector_(collector) {
    887     collector_->set_evacuation(true);
    888   }
    889 
    890   ~EvacuationScope() { collector_->set_evacuation(false); }
    891 
    892  private:
    893   MarkCompactCollector* collector_;
    894 };
    895 
    896 
    897 const char* AllocationSpaceName(AllocationSpace space);
    898 }  // namespace internal
    899 }  // namespace v8
    900 
    901 #endif  // V8_HEAP_MARK_COMPACT_H_
    902