Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #ifndef V8_MARK_COMPACT_H_
     29 #define V8_MARK_COMPACT_H_
     30 
     31 #include "compiler-intrinsics.h"
     32 #include "spaces.h"
     33 
     34 namespace v8 {
     35 namespace internal {
     36 
     37 // Callback function, returns whether an object is alive. The heap size
     38 // of the object is returned in size. It optionally updates the offset
     39 // to the first live object in the page (only used for old and map objects).
     40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
     41 
     42 // Forward declarations.
     43 class CodeFlusher;
     44 class GCTracer;
     45 class MarkingVisitor;
     46 class RootMarkingVisitor;
     47 
     48 
     49 class Marking {
     50  public:
     51   explicit Marking(Heap* heap)
     52       : heap_(heap) {
     53   }
     54 
     55   static inline MarkBit MarkBitFrom(Address addr);
     56 
     57   static inline MarkBit MarkBitFrom(HeapObject* obj) {
     58     return MarkBitFrom(reinterpret_cast<Address>(obj));
     59   }
     60 
     61   // Impossible markbits: 01
     62   static const char* kImpossibleBitPattern;
     63   static inline bool IsImpossible(MarkBit mark_bit) {
     64     return !mark_bit.Get() && mark_bit.Next().Get();
     65   }
     66 
     67   // Black markbits: 10 - this is required by the sweeper.
     68   static const char* kBlackBitPattern;
     69   static inline bool IsBlack(MarkBit mark_bit) {
     70     return mark_bit.Get() && !mark_bit.Next().Get();
     71   }
     72 
     73   // White markbits: 00 - this is required by the mark bit clearer.
     74   static const char* kWhiteBitPattern;
     75   static inline bool IsWhite(MarkBit mark_bit) {
     76     return !mark_bit.Get();
     77   }
     78 
     79   // Grey markbits: 11
     80   static const char* kGreyBitPattern;
     81   static inline bool IsGrey(MarkBit mark_bit) {
     82     return mark_bit.Get() && mark_bit.Next().Get();
     83   }
     84 
     85   static inline void MarkBlack(MarkBit mark_bit) {
     86     mark_bit.Set();
     87     mark_bit.Next().Clear();
     88   }
     89 
     90   static inline void BlackToGrey(MarkBit markbit) {
     91     markbit.Next().Set();
     92   }
     93 
     94   static inline void WhiteToGrey(MarkBit markbit) {
     95     markbit.Set();
     96     markbit.Next().Set();
     97   }
     98 
     99   static inline void GreyToBlack(MarkBit markbit) {
    100     markbit.Next().Clear();
    101   }
    102 
    103   static inline void BlackToGrey(HeapObject* obj) {
    104     BlackToGrey(MarkBitFrom(obj));
    105   }
    106 
    107   static inline void AnyToGrey(MarkBit markbit) {
    108     markbit.Set();
    109     markbit.Next().Set();
    110   }
    111 
    112   // Returns true if the the object whose mark is transferred is marked black.
    113   bool TransferMark(Address old_start, Address new_start);
    114 
    115 #ifdef DEBUG
    116   enum ObjectColor {
    117     BLACK_OBJECT,
    118     WHITE_OBJECT,
    119     GREY_OBJECT,
    120     IMPOSSIBLE_COLOR
    121   };
    122 
    123   static const char* ColorName(ObjectColor color) {
    124     switch (color) {
    125       case BLACK_OBJECT: return "black";
    126       case WHITE_OBJECT: return "white";
    127       case GREY_OBJECT: return "grey";
    128       case IMPOSSIBLE_COLOR: return "impossible";
    129     }
    130     return "error";
    131   }
    132 
    133   static ObjectColor Color(HeapObject* obj) {
    134     return Color(Marking::MarkBitFrom(obj));
    135   }
    136 
    137   static ObjectColor Color(MarkBit mark_bit) {
    138     if (IsBlack(mark_bit)) return BLACK_OBJECT;
    139     if (IsWhite(mark_bit)) return WHITE_OBJECT;
    140     if (IsGrey(mark_bit)) return GREY_OBJECT;
    141     UNREACHABLE();
    142     return IMPOSSIBLE_COLOR;
    143   }
    144 #endif
    145 
    146   // Returns true if the transferred color is black.
    147   INLINE(static bool TransferColor(HeapObject* from,
    148                                    HeapObject* to)) {
    149     MarkBit from_mark_bit = MarkBitFrom(from);
    150     MarkBit to_mark_bit = MarkBitFrom(to);
    151     bool is_black = false;
    152     if (from_mark_bit.Get()) {
    153       to_mark_bit.Set();
    154       is_black = true;  // Looks black so far.
    155     }
    156     if (from_mark_bit.Next().Get()) {
    157       to_mark_bit.Next().Set();
    158       is_black = false;  // Was actually gray.
    159     }
    160     return is_black;
    161   }
    162 
    163  private:
    164   Heap* heap_;
    165 };
    166 
    167 // ----------------------------------------------------------------------------
    168 // Marking deque for tracing live objects.
    169 
    170 class MarkingDeque {
    171  public:
    172   MarkingDeque()
    173       : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
    174 
    175   void Initialize(Address low, Address high) {
    176     HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
    177     HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
    178     array_ = obj_low;
    179     mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
    180     top_ = bottom_ = 0;
    181     overflowed_ = false;
    182   }
    183 
    184   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
    185 
    186   inline bool IsEmpty() { return top_ == bottom_; }
    187 
    188   bool overflowed() const { return overflowed_; }
    189 
    190   void ClearOverflowed() { overflowed_ = false; }
    191 
    192   void SetOverflowed() { overflowed_ = true; }
    193 
    194   // Push the (marked) object on the marking stack if there is room,
    195   // otherwise mark the object as overflowed and wait for a rescan of the
    196   // heap.
    197   inline void PushBlack(HeapObject* object) {
    198     ASSERT(object->IsHeapObject());
    199     if (IsFull()) {
    200       Marking::BlackToGrey(object);
    201       MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
    202       SetOverflowed();
    203     } else {
    204       array_[top_] = object;
    205       top_ = ((top_ + 1) & mask_);
    206     }
    207   }
    208 
    209   inline void PushGrey(HeapObject* object) {
    210     ASSERT(object->IsHeapObject());
    211     if (IsFull()) {
    212       SetOverflowed();
    213     } else {
    214       array_[top_] = object;
    215       top_ = ((top_ + 1) & mask_);
    216     }
    217   }
    218 
    219   inline HeapObject* Pop() {
    220     ASSERT(!IsEmpty());
    221     top_ = ((top_ - 1) & mask_);
    222     HeapObject* object = array_[top_];
    223     ASSERT(object->IsHeapObject());
    224     return object;
    225   }
    226 
    227   inline void UnshiftGrey(HeapObject* object) {
    228     ASSERT(object->IsHeapObject());
    229     if (IsFull()) {
    230       SetOverflowed();
    231     } else {
    232       bottom_ = ((bottom_ - 1) & mask_);
    233       array_[bottom_] = object;
    234     }
    235   }
    236 
    237   HeapObject** array() { return array_; }
    238   int bottom() { return bottom_; }
    239   int top() { return top_; }
    240   int mask() { return mask_; }
    241   void set_top(int top) { top_ = top; }
    242 
    243  private:
    244   HeapObject** array_;
    245   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
    246   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
    247   // (mod mask + 1).
    248   int top_;
    249   int bottom_;
    250   int mask_;
    251   bool overflowed_;
    252 
    253   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
    254 };
    255 
    256 
    257 class SlotsBufferAllocator {
    258  public:
    259   SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
    260   void DeallocateBuffer(SlotsBuffer* buffer);
    261 
    262   void DeallocateChain(SlotsBuffer** buffer_address);
    263 };
    264 
    265 
    266 // SlotsBuffer records a sequence of slots that has to be updated
    267 // after live objects were relocated from evacuation candidates.
    268 // All slots are either untyped or typed:
    269 //    - Untyped slots are expected to contain a tagged object pointer.
    270 //      They are recorded by an address.
    271 //    - Typed slots are expected to contain an encoded pointer to a heap
    272 //      object where the way of encoding depends on the type of the slot.
    273 //      They are recorded as a pair (SlotType, slot address).
    274 // We assume that zero-page is never mapped this allows us to distinguish
    275 // untyped slots from typed slots during iteration by a simple comparison:
    276 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
    277 // is the first element of typed slot's pair.
    278 class SlotsBuffer {
    279  public:
    280   typedef Object** ObjectSlot;
    281 
    282   explicit SlotsBuffer(SlotsBuffer* next_buffer)
    283       : idx_(0), chain_length_(1), next_(next_buffer) {
    284     if (next_ != NULL) {
    285       chain_length_ = next_->chain_length_ + 1;
    286     }
    287   }
    288 
    289   ~SlotsBuffer() {
    290   }
    291 
    292   void Add(ObjectSlot slot) {
    293     ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
    294     slots_[idx_++] = slot;
    295   }
    296 
    297   enum SlotType {
    298     EMBEDDED_OBJECT_SLOT,
    299     RELOCATED_CODE_OBJECT,
    300     CODE_TARGET_SLOT,
    301     CODE_ENTRY_SLOT,
    302     DEBUG_TARGET_SLOT,
    303     JS_RETURN_SLOT,
    304     NUMBER_OF_SLOT_TYPES
    305   };
    306 
    307   void UpdateSlots(Heap* heap);
    308 
    309   void UpdateSlotsWithFilter(Heap* heap);
    310 
    311   SlotsBuffer* next() { return next_; }
    312 
    313   static int SizeOfChain(SlotsBuffer* buffer) {
    314     if (buffer == NULL) return 0;
    315     return static_cast<int>(buffer->idx_ +
    316                             (buffer->chain_length_ - 1) * kNumberOfElements);
    317   }
    318 
    319   inline bool IsFull() {
    320     return idx_ == kNumberOfElements;
    321   }
    322 
    323   inline bool HasSpaceForTypedSlot() {
    324     return idx_ < kNumberOfElements - 1;
    325   }
    326 
    327   static void UpdateSlotsRecordedIn(Heap* heap,
    328                                     SlotsBuffer* buffer,
    329                                     bool code_slots_filtering_required) {
    330     while (buffer != NULL) {
    331       if (code_slots_filtering_required) {
    332         buffer->UpdateSlotsWithFilter(heap);
    333       } else {
    334         buffer->UpdateSlots(heap);
    335       }
    336       buffer = buffer->next();
    337     }
    338   }
    339 
    340   enum AdditionMode {
    341     FAIL_ON_OVERFLOW,
    342     IGNORE_OVERFLOW
    343   };
    344 
    345   static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
    346     return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
    347   }
    348 
    349   static bool AddTo(SlotsBufferAllocator* allocator,
    350                     SlotsBuffer** buffer_address,
    351                     ObjectSlot slot,
    352                     AdditionMode mode) {
    353     SlotsBuffer* buffer = *buffer_address;
    354     if (buffer == NULL || buffer->IsFull()) {
    355       if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
    356         allocator->DeallocateChain(buffer_address);
    357         return false;
    358       }
    359       buffer = allocator->AllocateBuffer(buffer);
    360       *buffer_address = buffer;
    361     }
    362     buffer->Add(slot);
    363     return true;
    364   }
    365 
    366   static bool IsTypedSlot(ObjectSlot slot);
    367 
    368   static bool AddTo(SlotsBufferAllocator* allocator,
    369                     SlotsBuffer** buffer_address,
    370                     SlotType type,
    371                     Address addr,
    372                     AdditionMode mode);
    373 
    374   static const int kNumberOfElements = 1021;
    375 
    376  private:
    377   static const int kChainLengthThreshold = 15;
    378 
    379   intptr_t idx_;
    380   intptr_t chain_length_;
    381   SlotsBuffer* next_;
    382   ObjectSlot slots_[kNumberOfElements];
    383 };
    384 
    385 
    386 // Defined in isolate.h.
    387 class ThreadLocalTop;
    388 
    389 
    390 // -------------------------------------------------------------------------
    391 // Mark-Compact collector
    392 class MarkCompactCollector {
    393  public:
    394   // Type of functions to compute forwarding addresses of objects in
    395   // compacted spaces.  Given an object and its size, return a (non-failure)
    396   // Object* that will be the object after forwarding.  There is a separate
    397   // allocation function for each (compactable) space based on the location
    398   // of the object before compaction.
    399   typedef MaybeObject* (*AllocationFunction)(Heap* heap,
    400                                              HeapObject* object,
    401                                              int object_size);
    402 
    403   // Type of functions to encode the forwarding address for an object.
    404   // Given the object, its size, and the new (non-failure) object it will be
    405   // forwarded to, encode the forwarding address.  For paged spaces, the
    406   // 'offset' input/output parameter contains the offset of the forwarded
    407   // object from the forwarding address of the previous live object in the
    408   // page as input, and is updated to contain the offset to be used for the
    409   // next live object in the same page.  For spaces using a different
    410   // encoding (i.e., contiguous spaces), the offset parameter is ignored.
    411   typedef void (*EncodingFunction)(Heap* heap,
    412                                    HeapObject* old_object,
    413                                    int object_size,
    414                                    Object* new_object,
    415                                    int* offset);
    416 
    417   // Type of functions to process non-live objects.
    418   typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
    419 
    420   // Pointer to member function, used in IterateLiveObjects.
    421   typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
    422 
    423   // Set the global flags, it must be called before Prepare to take effect.
    424   inline void SetFlags(int flags);
    425 
    426   static void Initialize();
    427 
    428   void CollectEvacuationCandidates(PagedSpace* space);
    429 
    430   void AddEvacuationCandidate(Page* p);
    431 
    432   // Prepares for GC by resetting relocation info in old and map spaces and
    433   // choosing spaces to compact.
    434   void Prepare(GCTracer* tracer);
    435 
    436   // Performs a global garbage collection.
    437   void CollectGarbage();
    438 
    439   enum CompactionMode {
    440     INCREMENTAL_COMPACTION,
    441     NON_INCREMENTAL_COMPACTION
    442   };
    443 
    444   bool StartCompaction(CompactionMode mode);
    445 
    446   void AbortCompaction();
    447 
    448   // During a full GC, there is a stack-allocated GCTracer that is used for
    449   // bookkeeping information.  Return a pointer to that tracer.
    450   GCTracer* tracer() { return tracer_; }
    451 
    452 #ifdef DEBUG
    453   // Checks whether performing mark-compact collection.
    454   bool in_use() { return state_ > PREPARE_GC; }
    455   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
    456 #endif
    457 
    458   // Determine type of object and emit deletion log event.
    459   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
    460 
    461   // Distinguishable invalid map encodings (for single word and multiple words)
    462   // that indicate free regions.
    463   static const uint32_t kSingleFreeEncoding = 0;
    464   static const uint32_t kMultiFreeEncoding = 1;
    465 
    466   static inline bool IsMarked(Object* obj);
    467 
    468   inline Heap* heap() const { return heap_; }
    469 
    470   CodeFlusher* code_flusher() { return code_flusher_; }
    471   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
    472   void EnableCodeFlushing(bool enable);
    473 
    474   enum SweeperType {
    475     CONSERVATIVE,
    476     LAZY_CONSERVATIVE,
    477     PRECISE
    478   };
    479 
    480 #ifdef DEBUG
    481   void VerifyMarkbitsAreClean();
    482   static void VerifyMarkbitsAreClean(PagedSpace* space);
    483   static void VerifyMarkbitsAreClean(NewSpace* space);
    484 #endif
    485 
    486   // Sweep a single page from the given space conservatively.
    487   // Return a number of reclaimed bytes.
    488   static intptr_t SweepConservatively(PagedSpace* space, Page* p);
    489 
    490   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
    491     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
    492         ShouldSkipEvacuationSlotRecording();
    493   }
    494 
    495   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
    496     return Page::FromAddress(reinterpret_cast<Address>(host))->
    497         ShouldSkipEvacuationSlotRecording();
    498   }
    499 
    500   INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
    501     return Page::FromAddress(reinterpret_cast<Address>(obj))->
    502         IsEvacuationCandidate();
    503   }
    504 
    505   void EvictEvacuationCandidate(Page* page) {
    506     if (FLAG_trace_fragmentation) {
    507       PrintF("Page %p is too popular. Disabling evacuation.\n",
    508              reinterpret_cast<void*>(page));
    509     }
    510 
    511     // TODO(gc) If all evacuation candidates are too popular we
    512     // should stop slots recording entirely.
    513     page->ClearEvacuationCandidate();
    514 
    515     // We were not collecting slots on this page that point
    516     // to other evacuation candidates thus we have to
    517     // rescan the page after evacuation to discover and update all
    518     // pointers to evacuated objects.
    519     if (page->owner()->identity() == OLD_DATA_SPACE) {
    520       evacuation_candidates_.RemoveElement(page);
    521     } else {
    522       page->SetFlag(Page::RESCAN_ON_EVACUATION);
    523     }
    524   }
    525 
    526   void RecordRelocSlot(RelocInfo* rinfo, Object* target);
    527   void RecordCodeEntrySlot(Address slot, Code* target);
    528 
    529   INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
    530 
    531   void MigrateObject(Address dst,
    532                      Address src,
    533                      int size,
    534                      AllocationSpace to_old_space);
    535 
    536   bool TryPromoteObject(HeapObject* object, int object_size);
    537 
    538   inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
    539   inline void set_encountered_weak_maps(Object* weak_map) {
    540     encountered_weak_maps_ = weak_map;
    541   }
    542 
    543   void InvalidateCode(Code* code);
    544 
    545   void ClearMarkbits();
    546 
    547  private:
    548   MarkCompactCollector();
    549   ~MarkCompactCollector();
    550 
    551   bool MarkInvalidatedCode();
    552   void RemoveDeadInvalidatedCode();
    553   void ProcessInvalidatedCode(ObjectVisitor* visitor);
    554 
    555 
    556 #ifdef DEBUG
    557   enum CollectorState {
    558     IDLE,
    559     PREPARE_GC,
    560     MARK_LIVE_OBJECTS,
    561     SWEEP_SPACES,
    562     ENCODE_FORWARDING_ADDRESSES,
    563     UPDATE_POINTERS,
    564     RELOCATE_OBJECTS
    565   };
    566 
    567   // The current stage of the collector.
    568   CollectorState state_;
    569 #endif
    570 
    571   // Global flag that forces sweeping to be precise, so we can traverse the
    572   // heap.
    573   bool sweep_precisely_;
    574 
    575   bool reduce_memory_footprint_;
    576 
    577   bool abort_incremental_marking_;
    578 
    579   // True if we are collecting slots to perform evacuation from evacuation
    580   // candidates.
    581   bool compacting_;
    582 
    583   bool was_marked_incrementally_;
    584 
    585   bool collect_maps_;
    586 
    587   bool flush_monomorphic_ics_;
    588 
    589   // A pointer to the current stack-allocated GC tracer object during a full
    590   // collection (NULL before and after).
    591   GCTracer* tracer_;
    592 
    593   SlotsBufferAllocator slots_buffer_allocator_;
    594 
    595   SlotsBuffer* migration_slots_buffer_;
    596 
    597   // Finishes GC, performs heap verification if enabled.
    598   void Finish();
    599 
    600   // -----------------------------------------------------------------------
    601   // Phase 1: Marking live objects.
    602   //
    603   //  Before: The heap has been prepared for garbage collection by
    604   //          MarkCompactCollector::Prepare() and is otherwise in its
    605   //          normal state.
    606   //
    607   //   After: Live objects are marked and non-live objects are unmarked.
    608 
    609 
    610   friend class RootMarkingVisitor;
    611   friend class MarkingVisitor;
    612   friend class StaticMarkingVisitor;
    613   friend class CodeMarkingVisitor;
    614   friend class SharedFunctionInfoMarkingVisitor;
    615 
    616   // Mark non-optimize code for functions inlined into the given optimized
    617   // code. This will prevent it from being flushed.
    618   void MarkInlinedFunctionsCode(Code* code);
    619 
    620   // Mark code objects that are active on the stack to prevent them
    621   // from being flushed.
    622   void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
    623 
    624   void PrepareForCodeFlushing();
    625 
    626   // Marking operations for objects reachable from roots.
    627   void MarkLiveObjects();
    628 
    629   void AfterMarking();
    630 
    631   // Marks the object black and pushes it on the marking stack.
    632   // This is for non-incremental marking.
    633   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
    634 
    635   INLINE(bool MarkObjectWithoutPush(HeapObject* object));
    636   INLINE(void MarkObjectAndPush(HeapObject* value));
    637 
    638   // Marks the object black.  This is for non-incremental marking.
    639   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
    640 
    641   void ProcessNewlyMarkedObject(HeapObject* obj);
    642 
    643   // Creates back pointers for all map transitions, stores them in
    644   // the prototype field.  The original prototype pointers are restored
    645   // in ClearNonLiveTransitions().  All JSObject maps
    646   // connected by map transitions have the same prototype object, which
    647   // is why we can use this field temporarily for back pointers.
    648   void CreateBackPointers();
    649 
    650   // Mark a Map and its DescriptorArray together, skipping transitions.
    651   void MarkMapContents(Map* map);
    652   void MarkAccessorPairSlot(HeapObject* accessors, int offset);
    653   void MarkDescriptorArray(DescriptorArray* descriptors);
    654 
    655   // Mark the heap roots and all objects reachable from them.
    656   void MarkRoots(RootMarkingVisitor* visitor);
    657 
    658   // Mark the symbol table specially.  References to symbols from the
    659   // symbol table are weak.
    660   void MarkSymbolTable();
    661 
    662   // Mark objects in object groups that have at least one object in the
    663   // group marked.
    664   void MarkObjectGroups();
    665 
    666   // Mark objects in implicit references groups if their parent object
    667   // is marked.
    668   void MarkImplicitRefGroups();
    669 
    670   // Mark all objects which are reachable due to host application
    671   // logic like object groups or implicit references' groups.
    672   void ProcessExternalMarking();
    673 
    674   // Mark objects reachable (transitively) from objects in the marking stack
    675   // or overflowed in the heap.
    676   void ProcessMarkingDeque();
    677 
    678   // Mark objects reachable (transitively) from objects in the marking
    679   // stack.  This function empties the marking stack, but may leave
    680   // overflowed objects in the heap, in which case the marking stack's
    681   // overflow flag will be set.
    682   void EmptyMarkingDeque();
    683 
    684   // Refill the marking stack with overflowed objects from the heap.  This
    685   // function either leaves the marking stack full or clears the overflow
    686   // flag on the marking stack.
    687   void RefillMarkingDeque();
    688 
    689   // After reachable maps have been marked process per context object
    690   // literal map caches removing unmarked entries.
    691   void ProcessMapCaches();
    692 
    693   // Callback function for telling whether the object *p is an unmarked
    694   // heap object.
    695   static bool IsUnmarkedHeapObject(Object** p);
    696 
    697   // Map transitions from a live map to a dead map must be killed.
    698   // We replace them with a null descriptor, with the same key.
    699   void ClearNonLiveTransitions();
    700   void ClearNonLivePrototypeTransitions(Map* map);
    701   void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
    702 
    703   // Marking detaches initial maps from SharedFunctionInfo objects
    704   // to make this reference weak. We need to reattach initial maps
    705   // back after collection. This is either done during
    706   // ClearNonLiveTransitions pass or by calling this function.
    707   void ReattachInitialMaps();
    708 
    709   // Mark all values associated with reachable keys in weak maps encountered
    710   // so far.  This might push new object or even new weak maps onto the
    711   // marking stack.
    712   void ProcessWeakMaps();
    713 
    714   // After all reachable objects have been marked those weak map entries
    715   // with an unreachable key are removed from all encountered weak maps.
    716   // The linked list of all encountered weak maps is destroyed.
    717   void ClearWeakMaps();
    718 
    719   // -----------------------------------------------------------------------
    720   // Phase 2: Sweeping to clear mark bits and free non-live objects for
    721   // a non-compacting collection.
    722   //
    723   //  Before: Live objects are marked and non-live objects are unmarked.
    724   //
    725   //   After: Live objects are unmarked, non-live regions have been added to
    726   //          their space's free list. Active eden semispace is compacted by
    727   //          evacuation.
    728   //
    729 
    730   // If we are not compacting the heap, we simply sweep the spaces except
    731   // for the large object space, clearing mark bits and adding unmarked
    732   // regions to each space's free list.
    733   void SweepSpaces();
    734 
    735   void EvacuateNewSpace();
    736 
    737   void EvacuateLiveObjectsFromPage(Page* p);
    738 
    739   void EvacuatePages();
    740 
    741   void EvacuateNewSpaceAndCandidates();
    742 
    743   void SweepSpace(PagedSpace* space, SweeperType sweeper);
    744 
    745 #ifdef DEBUG
    746   friend class MarkObjectVisitor;
    747   static void VisitObject(HeapObject* obj);
    748 
    749   friend class UnmarkObjectVisitor;
    750   static void UnmarkObject(HeapObject* obj);
    751 #endif
    752 
    753   Heap* heap_;
    754   MarkingDeque marking_deque_;
    755   CodeFlusher* code_flusher_;
    756   Object* encountered_weak_maps_;
    757 
    758   List<Page*> evacuation_candidates_;
    759   List<Code*> invalidated_code_;
    760 
    761   friend class Heap;
    762 };
    763 
    764 
    765 const char* AllocationSpaceName(AllocationSpace space);
    766 
    767 } }  // namespace v8::internal
    768 
    769 #endif  // V8_MARK_COMPACT_H_
    770