Home | History | Annotate | Download | only in src
      1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "execution.h"
     31 #include "global-handles.h"
     32 #include "ic-inl.h"
     33 #include "mark-compact.h"
     34 #include "stub-cache.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 // -------------------------------------------------------------------------
     40 // MarkCompactCollector
     41 
     42 bool MarkCompactCollector::force_compaction_ = false;
     43 bool MarkCompactCollector::compacting_collection_ = false;
     44 bool MarkCompactCollector::compact_on_next_gc_ = false;
     45 
     46 int MarkCompactCollector::previous_marked_count_ = 0;
     47 GCTracer* MarkCompactCollector::tracer_ = NULL;
     48 
     49 
     50 #ifdef DEBUG
     51 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
     52 
     53 // Counters used for debugging the marking phase of mark-compact or mark-sweep
     54 // collection.
     55 int MarkCompactCollector::live_bytes_ = 0;
     56 int MarkCompactCollector::live_young_objects_ = 0;
     57 int MarkCompactCollector::live_old_data_objects_ = 0;
     58 int MarkCompactCollector::live_old_pointer_objects_ = 0;
     59 int MarkCompactCollector::live_code_objects_ = 0;
     60 int MarkCompactCollector::live_map_objects_ = 0;
     61 int MarkCompactCollector::live_cell_objects_ = 0;
     62 int MarkCompactCollector::live_lo_objects_ = 0;
     63 #endif
     64 
     65 void MarkCompactCollector::CollectGarbage() {
     66   // Make sure that Prepare() has been called. The individual steps below will
     67   // update the state as they proceed.
     68   ASSERT(state_ == PREPARE_GC);
     69 
     70   // Prepare has selected whether to compact the old generation or not.
     71   // Tell the tracer.
     72   if (IsCompacting()) tracer_->set_is_compacting();
     73 
     74   MarkLiveObjects();
     75 
     76   if (FLAG_collect_maps) ClearNonLiveTransitions();
     77 
     78   SweepLargeObjectSpace();
     79 
     80   if (IsCompacting()) {
     81     EncodeForwardingAddresses();
     82 
     83     UpdatePointers();
     84 
     85     RelocateObjects();
     86 
     87     RebuildRSets();
     88 
     89   } else {
     90     SweepSpaces();
     91   }
     92 
     93   Finish();
     94 
     95   // Save the count of marked objects remaining after the collection and
     96   // null out the GC tracer.
     97   previous_marked_count_ = tracer_->marked_count();
     98   ASSERT(previous_marked_count_ == 0);
     99   tracer_ = NULL;
    100 }
    101 
    102 
    103 void MarkCompactCollector::Prepare(GCTracer* tracer) {
    104   // Rather than passing the tracer around we stash it in a static member
    105   // variable.
    106   tracer_ = tracer;
    107 
    108 #ifdef DEBUG
    109   ASSERT(state_ == IDLE);
    110   state_ = PREPARE_GC;
    111 #endif
    112   ASSERT(!FLAG_always_compact || !FLAG_never_compact);
    113 
    114   compacting_collection_ =
    115       FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
    116   compact_on_next_gc_ = false;
    117 
    118   if (FLAG_never_compact) compacting_collection_ = false;
    119   if (!Heap::map_space()->MapPointersEncodable())
    120       compacting_collection_ = false;
    121   if (FLAG_collect_maps) CreateBackPointers();
    122 
    123 #ifdef DEBUG
    124   if (compacting_collection_) {
    125     // We will write bookkeeping information to the remembered set area
    126     // starting now.
    127     Page::set_rset_state(Page::NOT_IN_USE);
    128   }
    129 #endif
    130 
    131   PagedSpaces spaces;
    132   for (PagedSpace* space = spaces.next();
    133        space != NULL; space = spaces.next()) {
    134     space->PrepareForMarkCompact(compacting_collection_);
    135   }
    136 
    137 #ifdef DEBUG
    138   live_bytes_ = 0;
    139   live_young_objects_ = 0;
    140   live_old_pointer_objects_ = 0;
    141   live_old_data_objects_ = 0;
    142   live_code_objects_ = 0;
    143   live_map_objects_ = 0;
    144   live_cell_objects_ = 0;
    145   live_lo_objects_ = 0;
    146 #endif
    147 }
    148 
    149 
    150 void MarkCompactCollector::Finish() {
    151 #ifdef DEBUG
    152   ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
    153   state_ = IDLE;
    154 #endif
    155   // The stub cache is not traversed during GC; clear the cache to
    156   // force lazy re-initialization of it. This must be done after the
    157   // GC, because it relies on the new address of certain old space
    158   // objects (empty string, illegal builtin).
    159   StubCache::Clear();
    160 
    161   ExternalStringTable::CleanUp();
    162 
    163   // If we've just compacted old space there's no reason to check the
    164   // fragmentation limit. Just return.
    165   if (HasCompacted()) return;
    166 
    167   // We compact the old generation on the next GC if it has gotten too
    168   // fragmented (ie, we could recover an expected amount of space by
    169   // reclaiming the waste and free list blocks).
    170   static const int kFragmentationLimit = 15;        // Percent.
    171   static const int kFragmentationAllowed = 1 * MB;  // Absolute.
    172   int old_gen_recoverable = 0;
    173   int old_gen_used = 0;
    174 
    175   OldSpaces spaces;
    176   for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
    177     old_gen_recoverable += space->Waste() + space->AvailableFree();
    178     old_gen_used += space->Size();
    179   }
    180 
    181   int old_gen_fragmentation =
    182       static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
    183   if (old_gen_fragmentation > kFragmentationLimit &&
    184       old_gen_recoverable > kFragmentationAllowed) {
    185     compact_on_next_gc_ = true;
    186   }
    187 }
    188 
    189 
    190 // -------------------------------------------------------------------------
    191 // Phase 1: tracing and marking live objects.
    192 //   before: all objects are in normal state.
    193 //   after: a live object's map pointer is marked as '00'.
    194 
    195 // Marking all live objects in the heap as part of mark-sweep or mark-compact
    196 // collection.  Before marking, all objects are in their normal state.  After
    197 // marking, live objects' map pointers are marked indicating that the object
    198 // has been found reachable.
    199 //
    200 // The marking algorithm is a (mostly) depth-first (because of possible stack
    201 // overflow) traversal of the graph of objects reachable from the roots.  It
    202 // uses an explicit stack of pointers rather than recursion.  The young
    203 // generation's inactive ('from') space is used as a marking stack.  The
    204 // objects in the marking stack are the ones that have been reached and marked
    205 // but their children have not yet been visited.
    206 //
    207 // The marking stack can overflow during traversal.  In that case, we set an
    208 // overflow flag.  When the overflow flag is set, we continue marking objects
    209 // reachable from the objects on the marking stack, but no longer push them on
    210 // the marking stack.  Instead, we mark them as both marked and overflowed.
    211 // When the stack is in the overflowed state, objects marked as overflowed
    212 // have been reached and marked but their children have not been visited yet.
    213 // After emptying the marking stack, we clear the overflow flag and traverse
    214 // the heap looking for objects marked as overflowed, push them on the stack,
    215 // and continue with marking.  This process repeats until all reachable
    216 // objects have been marked.
    217 
    218 static MarkingStack marking_stack;
    219 
    220 
    221 static inline HeapObject* ShortCircuitConsString(Object** p) {
    222   // Optimization: If the heap object pointed to by p is a non-symbol
    223   // cons string whose right substring is Heap::empty_string, update
    224   // it in place to its left substring.  Return the updated value.
    225   //
    226   // Here we assume that if we change *p, we replace it with a heap object
    227   // (ie, the left substring of a cons string is always a heap object).
    228   //
    229   // The check performed is:
    230   //   object->IsConsString() && !object->IsSymbol() &&
    231   //   (ConsString::cast(object)->second() == Heap::empty_string())
    232   // except the maps for the object and its possible substrings might be
    233   // marked.
    234   HeapObject* object = HeapObject::cast(*p);
    235   MapWord map_word = object->map_word();
    236   map_word.ClearMark();
    237   InstanceType type = map_word.ToMap()->instance_type();
    238   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
    239 
    240   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
    241   if (second != Heap::raw_unchecked_empty_string()) {
    242     return object;
    243   }
    244 
    245   // Since we don't have the object's start, it is impossible to update the
    246   // remembered set.  Therefore, we only replace the string with its left
    247   // substring when the remembered set does not change.
    248   Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
    249   if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
    250 
    251   *p = first;
    252   return HeapObject::cast(first);
    253 }
    254 
    255 
    256 // Helper class for marking pointers in HeapObjects.
    257 class MarkingVisitor : public ObjectVisitor {
    258  public:
    259   void VisitPointer(Object** p) {
    260     MarkObjectByPointer(p);
    261   }
    262 
    263   void VisitPointers(Object** start, Object** end) {
    264     // Mark all objects pointed to in [start, end).
    265     const int kMinRangeForMarkingRecursion = 64;
    266     if (end - start >= kMinRangeForMarkingRecursion) {
    267       if (VisitUnmarkedObjects(start, end)) return;
    268       // We are close to a stack overflow, so just mark the objects.
    269     }
    270     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
    271   }
    272 
    273   void VisitCodeTarget(RelocInfo* rinfo) {
    274     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
    275     Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
    276     if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
    277       IC::Clear(rinfo->pc());
    278       // Please note targets for cleared inline cached do not have to be
    279       // marked since they are contained in Heap::non_monomorphic_cache().
    280     } else {
    281       MarkCompactCollector::MarkObject(code);
    282     }
    283   }
    284 
    285   void VisitDebugTarget(RelocInfo* rinfo) {
    286     ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
    287            rinfo->IsPatchedReturnSequence());
    288     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
    289     MarkCompactCollector::MarkObject(code);
    290   }
    291 
    292  private:
    293   // Mark object pointed to by p.
    294   void MarkObjectByPointer(Object** p) {
    295     if (!(*p)->IsHeapObject()) return;
    296     HeapObject* object = ShortCircuitConsString(p);
    297     MarkCompactCollector::MarkObject(object);
    298   }
    299 
    300   // Tells whether the mark sweep collection will perform compaction.
    301   bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
    302 
    303   // Visit an unmarked object.
    304   void VisitUnmarkedObject(HeapObject* obj) {
    305 #ifdef DEBUG
    306     ASSERT(Heap::Contains(obj));
    307     ASSERT(!obj->IsMarked());
    308 #endif
    309     Map* map = obj->map();
    310     MarkCompactCollector::SetMark(obj);
    311     // Mark the map pointer and the body.
    312     MarkCompactCollector::MarkObject(map);
    313     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
    314   }
    315 
    316   // Visit all unmarked objects pointed to by [start, end).
    317   // Returns false if the operation fails (lack of stack space).
    318   inline bool VisitUnmarkedObjects(Object** start, Object** end) {
    319     // Return false is we are close to the stack limit.
    320     StackLimitCheck check;
    321     if (check.HasOverflowed()) return false;
    322 
    323     // Visit the unmarked objects.
    324     for (Object** p = start; p < end; p++) {
    325       if (!(*p)->IsHeapObject()) continue;
    326       HeapObject* obj = HeapObject::cast(*p);
    327       if (obj->IsMarked()) continue;
    328       VisitUnmarkedObject(obj);
    329     }
    330     return true;
    331   }
    332 };
    333 
    334 
    335 // Visitor class for marking heap roots.
    336 class RootMarkingVisitor : public ObjectVisitor {
    337  public:
    338   void VisitPointer(Object** p) {
    339     MarkObjectByPointer(p);
    340   }
    341 
    342   void VisitPointers(Object** start, Object** end) {
    343     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
    344   }
    345 
    346   MarkingVisitor* stack_visitor() { return &stack_visitor_; }
    347 
    348  private:
    349   MarkingVisitor stack_visitor_;
    350 
    351   void MarkObjectByPointer(Object** p) {
    352     if (!(*p)->IsHeapObject()) return;
    353 
    354     // Replace flat cons strings in place.
    355     HeapObject* object = ShortCircuitConsString(p);
    356     if (object->IsMarked()) return;
    357 
    358     Map* map = object->map();
    359     // Mark the object.
    360     MarkCompactCollector::SetMark(object);
    361     // Mark the map pointer and body, and push them on the marking stack.
    362     MarkCompactCollector::MarkObject(map);
    363     object->IterateBody(map->instance_type(), object->SizeFromMap(map),
    364                         &stack_visitor_);
    365 
    366     // Mark all the objects reachable from the map and body.  May leave
    367     // overflowed objects in the heap.
    368     MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
    369   }
    370 };
    371 
    372 
    373 // Helper class for pruning the symbol table.
    374 class SymbolTableCleaner : public ObjectVisitor {
    375  public:
    376   SymbolTableCleaner() : pointers_removed_(0) { }
    377 
    378   virtual void VisitPointers(Object** start, Object** end) {
    379     // Visit all HeapObject pointers in [start, end).
    380     for (Object** p = start; p < end; p++) {
    381       if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
    382         // Check if the symbol being pruned is an external symbol. We need to
    383         // delete the associated external data as this symbol is going away.
    384 
    385         // Since no objects have yet been moved we can safely access the map of
    386         // the object.
    387         if ((*p)->IsExternalString()) {
    388           Heap::FinalizeExternalString(String::cast(*p));
    389         }
    390         // Set the entry to null_value (as deleted).
    391         *p = Heap::raw_unchecked_null_value();
    392         pointers_removed_++;
    393       }
    394     }
    395   }
    396 
    397   int PointersRemoved() {
    398     return pointers_removed_;
    399   }
    400  private:
    401   int pointers_removed_;
    402 };
    403 
    404 
    405 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
    406   ASSERT(!object->IsMarked());
    407   ASSERT(Heap::Contains(object));
    408   if (object->IsMap()) {
    409     Map* map = Map::cast(object);
    410     if (FLAG_cleanup_caches_in_maps_at_gc) {
    411       map->ClearCodeCache();
    412     }
    413     SetMark(map);
    414     if (FLAG_collect_maps &&
    415         map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
    416         map->instance_type() <= JS_FUNCTION_TYPE) {
    417       MarkMapContents(map);
    418     } else {
    419       marking_stack.Push(map);
    420     }
    421   } else {
    422     SetMark(object);
    423     marking_stack.Push(object);
    424   }
    425 }
    426 
    427 
    428 void MarkCompactCollector::MarkMapContents(Map* map) {
    429   MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
    430       *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
    431 
    432   // Mark the Object* fields of the Map.
    433   // Since the descriptor array has been marked already, it is fine
    434   // that one of these fields contains a pointer to it.
    435   MarkingVisitor visitor;  // Has no state or contents.
    436   visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
    437                         HeapObject::RawField(map, Map::kSize));
    438 }
    439 
    440 
    441 void MarkCompactCollector::MarkDescriptorArray(
    442     DescriptorArray* descriptors) {
    443   if (descriptors->IsMarked()) return;
    444   // Empty descriptor array is marked as a root before any maps are marked.
    445   ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
    446   SetMark(descriptors);
    447 
    448   FixedArray* contents = reinterpret_cast<FixedArray*>(
    449       descriptors->get(DescriptorArray::kContentArrayIndex));
    450   ASSERT(contents->IsHeapObject());
    451   ASSERT(!contents->IsMarked());
    452   ASSERT(contents->IsFixedArray());
    453   ASSERT(contents->length() >= 2);
    454   SetMark(contents);
    455   // Contents contains (value, details) pairs.  If the details say
    456   // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
    457   // or NULL_DESCRIPTOR, we don't mark the value as live.  Only for
    458   // type MAP_TRANSITION is the value a Object* (a Map*).
    459   for (int i = 0; i < contents->length(); i += 2) {
    460     // If the pair (value, details) at index i, i+1 is not
    461     // a transition or null descriptor, mark the value.
    462     PropertyDetails details(Smi::cast(contents->get(i + 1)));
    463     if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
    464       HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
    465       if (object->IsHeapObject() && !object->IsMarked()) {
    466         SetMark(object);
    467         marking_stack.Push(object);
    468       }
    469     }
    470   }
    471   // The DescriptorArray descriptors contains a pointer to its contents array,
    472   // but the contents array is already marked.
    473   marking_stack.Push(descriptors);
    474 }
    475 
    476 
    477 void MarkCompactCollector::CreateBackPointers() {
    478   HeapObjectIterator iterator(Heap::map_space());
    479   for (HeapObject* next_object = iterator.next();
    480        next_object != NULL; next_object = iterator.next()) {
    481     if (next_object->IsMap()) {  // Could also be ByteArray on free list.
    482       Map* map = Map::cast(next_object);
    483       if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
    484           map->instance_type() <= JS_FUNCTION_TYPE) {
    485         map->CreateBackPointers();
    486       } else {
    487         ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
    488       }
    489     }
    490   }
    491 }
    492 
    493 
    494 static int OverflowObjectSize(HeapObject* obj) {
    495   // Recover the normal map pointer, it might be marked as live and
    496   // overflowed.
    497   MapWord map_word = obj->map_word();
    498   map_word.ClearMark();
    499   map_word.ClearOverflow();
    500   return obj->SizeFromMap(map_word.ToMap());
    501 }
    502 
    503 
    504 // Fill the marking stack with overflowed objects returned by the given
    505 // iterator.  Stop when the marking stack is filled or the end of the space
    506 // is reached, whichever comes first.
    507 template<class T>
    508 static void ScanOverflowedObjects(T* it) {
    509   // The caller should ensure that the marking stack is initially not full,
    510   // so that we don't waste effort pointlessly scanning for objects.
    511   ASSERT(!marking_stack.is_full());
    512 
    513   for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
    514     if (object->IsOverflowed()) {
    515       object->ClearOverflow();
    516       ASSERT(object->IsMarked());
    517       ASSERT(Heap::Contains(object));
    518       marking_stack.Push(object);
    519       if (marking_stack.is_full()) return;
    520     }
    521   }
    522 }
    523 
    524 
    525 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
    526   return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
    527 }
    528 
    529 
    530 void MarkCompactCollector::MarkSymbolTable() {
    531   SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
    532   // Mark the symbol table itself.
    533   SetMark(symbol_table);
    534   // Explicitly mark the prefix.
    535   MarkingVisitor marker;
    536   symbol_table->IteratePrefix(&marker);
    537   ProcessMarkingStack(&marker);
    538 }
    539 
    540 
    541 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
    542   // Mark the heap roots including global variables, stack variables,
    543   // etc., and all objects reachable from them.
    544   Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
    545 
    546   // Handle the symbol table specially.
    547   MarkSymbolTable();
    548 
    549   // There may be overflowed objects in the heap.  Visit them now.
    550   while (marking_stack.overflowed()) {
    551     RefillMarkingStack();
    552     EmptyMarkingStack(visitor->stack_visitor());
    553   }
    554 }
    555 
    556 
    557 void MarkCompactCollector::MarkObjectGroups() {
    558   List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
    559 
    560   for (int i = 0; i < object_groups->length(); i++) {
    561     ObjectGroup* entry = object_groups->at(i);
    562     if (entry == NULL) continue;
    563 
    564     List<Object**>& objects = entry->objects_;
    565     bool group_marked = false;
    566     for (int j = 0; j < objects.length(); j++) {
    567       Object* object = *objects[j];
    568       if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
    569         group_marked = true;
    570         break;
    571       }
    572     }
    573 
    574     if (!group_marked) continue;
    575 
    576     // An object in the group is marked, so mark as gray all white heap
    577     // objects in the group.
    578     for (int j = 0; j < objects.length(); ++j) {
    579       if ((*objects[j])->IsHeapObject()) {
    580         MarkObject(HeapObject::cast(*objects[j]));
    581       }
    582     }
    583     // Once the entire group has been colored gray, set the object group
    584     // to NULL so it won't be processed again.
    585     delete object_groups->at(i);
    586     object_groups->at(i) = NULL;
    587   }
    588 }
    589 
    590 
    591 // Mark all objects reachable from the objects on the marking stack.
    592 // Before: the marking stack contains zero or more heap object pointers.
    593 // After: the marking stack is empty, and all objects reachable from the
    594 // marking stack have been marked, or are overflowed in the heap.
    595 void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
    596   while (!marking_stack.is_empty()) {
    597     HeapObject* object = marking_stack.Pop();
    598     ASSERT(object->IsHeapObject());
    599     ASSERT(Heap::Contains(object));
    600     ASSERT(object->IsMarked());
    601     ASSERT(!object->IsOverflowed());
    602 
    603     // Because the object is marked, we have to recover the original map
    604     // pointer and use it to mark the object's body.
    605     MapWord map_word = object->map_word();
    606     map_word.ClearMark();
    607     Map* map = map_word.ToMap();
    608     MarkObject(map);
    609     object->IterateBody(map->instance_type(), object->SizeFromMap(map),
    610                         visitor);
    611   }
    612 }
    613 
    614 
    615 // Sweep the heap for overflowed objects, clear their overflow bits, and
    616 // push them on the marking stack.  Stop early if the marking stack fills
    617 // before sweeping completes.  If sweeping completes, there are no remaining
    618 // overflowed objects in the heap so the overflow flag on the markings stack
    619 // is cleared.
    620 void MarkCompactCollector::RefillMarkingStack() {
    621   ASSERT(marking_stack.overflowed());
    622 
    623   SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
    624   ScanOverflowedObjects(&new_it);
    625   if (marking_stack.is_full()) return;
    626 
    627   HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
    628                                     &OverflowObjectSize);
    629   ScanOverflowedObjects(&old_pointer_it);
    630   if (marking_stack.is_full()) return;
    631 
    632   HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
    633   ScanOverflowedObjects(&old_data_it);
    634   if (marking_stack.is_full()) return;
    635 
    636   HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
    637   ScanOverflowedObjects(&code_it);
    638   if (marking_stack.is_full()) return;
    639 
    640   HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
    641   ScanOverflowedObjects(&map_it);
    642   if (marking_stack.is_full()) return;
    643 
    644   HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
    645   ScanOverflowedObjects(&cell_it);
    646   if (marking_stack.is_full()) return;
    647 
    648   LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
    649   ScanOverflowedObjects(&lo_it);
    650   if (marking_stack.is_full()) return;
    651 
    652   marking_stack.clear_overflowed();
    653 }
    654 
    655 
    656 // Mark all objects reachable (transitively) from objects on the marking
    657 // stack.  Before: the marking stack contains zero or more heap object
    658 // pointers.  After: the marking stack is empty and there are no overflowed
    659 // objects in the heap.
    660 void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
    661   EmptyMarkingStack(visitor);
    662   while (marking_stack.overflowed()) {
    663     RefillMarkingStack();
    664     EmptyMarkingStack(visitor);
    665   }
    666 }
    667 
    668 
    669 void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
    670   bool work_to_do = true;
    671   ASSERT(marking_stack.is_empty());
    672   while (work_to_do) {
    673     MarkObjectGroups();
    674     work_to_do = !marking_stack.is_empty();
    675     ProcessMarkingStack(visitor);
    676   }
    677 }
    678 
    679 
    680 void MarkCompactCollector::MarkLiveObjects() {
    681 #ifdef DEBUG
    682   ASSERT(state_ == PREPARE_GC);
    683   state_ = MARK_LIVE_OBJECTS;
    684 #endif
    685   // The to space contains live objects, the from space is used as a marking
    686   // stack.
    687   marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
    688                            Heap::new_space()->FromSpaceHigh());
    689 
    690   ASSERT(!marking_stack.overflowed());
    691 
    692   RootMarkingVisitor root_visitor;
    693   MarkRoots(&root_visitor);
    694 
    695   // The objects reachable from the roots are marked, yet unreachable
    696   // objects are unmarked.  Mark objects reachable from object groups
    697   // containing at least one marked object, and continue until no new
    698   // objects are reachable from the object groups.
    699   ProcessObjectGroups(root_visitor.stack_visitor());
    700 
    701   // The objects reachable from the roots or object groups are marked,
    702   // yet unreachable objects are unmarked.  Mark objects reachable
    703   // only from weak global handles.
    704   //
    705   // First we identify nonlive weak handles and mark them as pending
    706   // destruction.
    707   GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
    708   // Then we mark the objects and process the transitive closure.
    709   GlobalHandles::IterateWeakRoots(&root_visitor);
    710   while (marking_stack.overflowed()) {
    711     RefillMarkingStack();
    712     EmptyMarkingStack(root_visitor.stack_visitor());
    713   }
    714 
    715   // Repeat the object groups to mark unmarked groups reachable from the
    716   // weak roots.
    717   ProcessObjectGroups(root_visitor.stack_visitor());
    718 
    719   // Prune the symbol table removing all symbols only pointed to by the
    720   // symbol table.  Cannot use symbol_table() here because the symbol
    721   // table is marked.
    722   SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
    723   SymbolTableCleaner v;
    724   symbol_table->IterateElements(&v);
    725   symbol_table->ElementsRemoved(v.PointersRemoved());
    726   ExternalStringTable::Iterate(&v);
    727   ExternalStringTable::CleanUp();
    728 
    729   // Remove object groups after marking phase.
    730   GlobalHandles::RemoveObjectGroups();
    731 }
    732 
    733 
    734 static int CountMarkedCallback(HeapObject* obj) {
    735   MapWord map_word = obj->map_word();
    736   map_word.ClearMark();
    737   return obj->SizeFromMap(map_word.ToMap());
    738 }
    739 
    740 
    741 #ifdef DEBUG
    742 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
    743   live_bytes_ += obj->Size();
    744   if (Heap::new_space()->Contains(obj)) {
    745     live_young_objects_++;
    746   } else if (Heap::map_space()->Contains(obj)) {
    747     ASSERT(obj->IsMap());
    748     live_map_objects_++;
    749   } else if (Heap::cell_space()->Contains(obj)) {
    750     ASSERT(obj->IsJSGlobalPropertyCell());
    751     live_cell_objects_++;
    752   } else if (Heap::old_pointer_space()->Contains(obj)) {
    753     live_old_pointer_objects_++;
    754   } else if (Heap::old_data_space()->Contains(obj)) {
    755     live_old_data_objects_++;
    756   } else if (Heap::code_space()->Contains(obj)) {
    757     live_code_objects_++;
    758   } else if (Heap::lo_space()->Contains(obj)) {
    759     live_lo_objects_++;
    760   } else {
    761     UNREACHABLE();
    762   }
    763 }
    764 #endif  // DEBUG
    765 
    766 
    767 void MarkCompactCollector::SweepLargeObjectSpace() {
    768 #ifdef DEBUG
    769   ASSERT(state_ == MARK_LIVE_OBJECTS);
    770   state_ =
    771       compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
    772 #endif
    773   // Deallocate unmarked objects and clear marked bits for marked objects.
    774   Heap::lo_space()->FreeUnmarkedObjects();
    775 }
    776 
    777 // Safe to use during marking phase only.
    778 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
    779   MapWord metamap = object->map_word();
    780   metamap.ClearMark();
    781   return metamap.ToMap()->instance_type() == MAP_TYPE;
    782 }
    783 
    784 void MarkCompactCollector::ClearNonLiveTransitions() {
    785   HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
    786   // Iterate over the map space, setting map transitions that go from
    787   // a marked map to an unmarked map to null transitions.  At the same time,
    788   // set all the prototype fields of maps back to their original value,
    789   // dropping the back pointers temporarily stored in the prototype field.
    790   // Setting the prototype field requires following the linked list of
    791   // back pointers, reversing them all at once.  This allows us to find
    792   // those maps with map transitions that need to be nulled, and only
    793   // scan the descriptor arrays of those maps, not all maps.
    794   // All of these actions are carried out only on maps of JSObjects
    795   // and related subtypes.
    796   for (HeapObject* obj = map_iterator.next();
    797        obj != NULL; obj = map_iterator.next()) {
    798     Map* map = reinterpret_cast<Map*>(obj);
    799     if (!map->IsMarked() && map->IsByteArray()) continue;
    800 
    801     ASSERT(SafeIsMap(map));
    802     // Only JSObject and subtypes have map transitions and back pointers.
    803     if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
    804     if (map->instance_type() > JS_FUNCTION_TYPE) continue;
    805     // Follow the chain of back pointers to find the prototype.
    806     Map* current = map;
    807     while (SafeIsMap(current)) {
    808       current = reinterpret_cast<Map*>(current->prototype());
    809       ASSERT(current->IsHeapObject());
    810     }
    811     Object* real_prototype = current;
    812 
    813     // Follow back pointers, setting them to prototype,
    814     // clearing map transitions when necessary.
    815     current = map;
    816     bool on_dead_path = !current->IsMarked();
    817     Object* next;
    818     while (SafeIsMap(current)) {
    819       next = current->prototype();
    820       // There should never be a dead map above a live map.
    821       ASSERT(on_dead_path || current->IsMarked());
    822 
    823       // A live map above a dead map indicates a dead transition.
    824       // This test will always be false on the first iteration.
    825       if (on_dead_path && current->IsMarked()) {
    826         on_dead_path = false;
    827         current->ClearNonLiveTransitions(real_prototype);
    828       }
    829       *HeapObject::RawField(current, Map::kPrototypeOffset) =
    830           real_prototype;
    831       current = reinterpret_cast<Map*>(next);
    832     }
    833   }
    834 }
    835 
    836 // -------------------------------------------------------------------------
    837 // Phase 2: Encode forwarding addresses.
    838 // When compacting, forwarding addresses for objects in old space and map
    839 // space are encoded in their map pointer word (along with an encoding of
    840 // their map pointers).
    841 //
    842 // The excact encoding is described in the comments for class MapWord in
    843 // objects.h.
    844 //
    845 // An address range [start, end) can have both live and non-live objects.
    846 // Maximal non-live regions are marked so they can be skipped on subsequent
    847 // sweeps of the heap.  A distinguished map-pointer encoding is used to mark
    848 // free regions of one-word size (in which case the next word is the start
    849 // of a live object).  A second distinguished map-pointer encoding is used
    850 // to mark free regions larger than one word, and the size of the free
    851 // region (including the first word) is written to the second word of the
    852 // region.
    853 //
    854 // Any valid map page offset must lie in the object area of the page, so map
    855 // page offsets less than Page::kObjectStartOffset are invalid.  We use a
    856 // pair of distinguished invalid map encodings (for single word and multiple
    857 // words) to indicate free regions in the page found during computation of
    858 // forwarding addresses and skipped over in subsequent sweeps.
    859 static const uint32_t kSingleFreeEncoding = 0;
    860 static const uint32_t kMultiFreeEncoding = 1;
    861 
    862 
    863 // Encode a free region, defined by the given start address and size, in the
    864 // first word or two of the region.
    865 void EncodeFreeRegion(Address free_start, int free_size) {
    866   ASSERT(free_size >= kIntSize);
    867   if (free_size == kIntSize) {
    868     Memory::uint32_at(free_start) = kSingleFreeEncoding;
    869   } else {
    870     ASSERT(free_size >= 2 * kIntSize);
    871     Memory::uint32_at(free_start) = kMultiFreeEncoding;
    872     Memory::int_at(free_start + kIntSize) = free_size;
    873   }
    874 
    875 #ifdef DEBUG
    876   // Zap the body of the free region.
    877   if (FLAG_enable_slow_asserts) {
    878     for (int offset = 2 * kIntSize;
    879          offset < free_size;
    880          offset += kPointerSize) {
    881       Memory::Address_at(free_start + offset) = kZapValue;
    882     }
    883   }
    884 #endif
    885 }
    886 
    887 
    888 // Try to promote all objects in new space.  Heap numbers and sequential
    889 // strings are promoted to the code space, large objects to large object space,
    890 // and all others to the old space.
    891 inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
    892   Object* forwarded;
    893   if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
    894     forwarded = Failure::Exception();
    895   } else {
    896     OldSpace* target_space = Heap::TargetSpace(object);
    897     ASSERT(target_space == Heap::old_pointer_space() ||
    898            target_space == Heap::old_data_space());
    899     forwarded = target_space->MCAllocateRaw(object_size);
    900   }
    901   if (forwarded->IsFailure()) {
    902     forwarded = Heap::new_space()->MCAllocateRaw(object_size);
    903   }
    904   return forwarded;
    905 }
    906 
    907 
    908 // Allocation functions for the paged spaces call the space's MCAllocateRaw.
    909 inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
    910                                              int object_size) {
    911   return Heap::old_pointer_space()->MCAllocateRaw(object_size);
    912 }
    913 
    914 
    915 inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
    916   return Heap::old_data_space()->MCAllocateRaw(object_size);
    917 }
    918 
    919 
    920 inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
    921   return Heap::code_space()->MCAllocateRaw(object_size);
    922 }
    923 
    924 
    925 inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
    926   return Heap::map_space()->MCAllocateRaw(object_size);
    927 }
    928 
    929 
    930 inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
    931   return Heap::cell_space()->MCAllocateRaw(object_size);
    932 }
    933 
    934 
    935 // The forwarding address is encoded at the same offset as the current
    936 // to-space object, but in from space.
    937 inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
    938                                               int object_size,
    939                                               Object* new_object,
    940                                               int* ignored) {
    941   int offset =
    942       Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
    943   Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
    944       HeapObject::cast(new_object)->address();
    945 }
    946 
    947 
    948 // The forwarding address is encoded in the map pointer of the object as an
    949 // offset (in terms of live bytes) from the address of the first live object
    950 // in the page.
    951 inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
    952                                                 int object_size,
    953                                                 Object* new_object,
    954                                                 int* offset) {
    955   // Record the forwarding address of the first live object if necessary.
    956   if (*offset == 0) {
    957     Page::FromAddress(old_object->address())->mc_first_forwarded =
    958         HeapObject::cast(new_object)->address();
    959   }
    960 
    961   MapWord encoding =
    962       MapWord::EncodeAddress(old_object->map()->address(), *offset);
    963   old_object->set_map_word(encoding);
    964   *offset += object_size;
    965   ASSERT(*offset <= Page::kObjectAreaSize);
    966 }
    967 
    968 
    969 // Most non-live objects are ignored.
    970 inline void IgnoreNonLiveObject(HeapObject* object) {}
    971 
    972 
    973 // Function template that, given a range of addresses (eg, a semispace or a
    974 // paged space page), iterates through the objects in the range to clear
    975 // mark bits and compute and encode forwarding addresses.  As a side effect,
    976 // maximal free chunks are marked so that they can be skipped on subsequent
    977 // sweeps.
    978 //
    979 // The template parameters are an allocation function, a forwarding address
    980 // encoding function, and a function to process non-live objects.
    981 template<MarkCompactCollector::AllocationFunction Alloc,
    982          MarkCompactCollector::EncodingFunction Encode,
    983          MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
    984 inline void EncodeForwardingAddressesInRange(Address start,
    985                                              Address end,
    986                                              int* offset) {
    987   // The start address of the current free region while sweeping the space.
    988   // This address is set when a transition from live to non-live objects is
    989   // encountered.  A value (an encoding of the 'next free region' pointer)
    990   // is written to memory at this address when a transition from non-live to
    991   // live objects is encountered.
    992   Address free_start = NULL;
    993 
    994   // A flag giving the state of the previously swept object.  Initially true
    995   // to ensure that free_start is initialized to a proper address before
    996   // trying to write to it.
    997   bool is_prev_alive = true;
    998 
    999   int object_size;  // Will be set on each iteration of the loop.
   1000   for (Address current = start; current < end; current += object_size) {
   1001     HeapObject* object = HeapObject::FromAddress(current);
   1002     if (object->IsMarked()) {
   1003       object->ClearMark();
   1004       MarkCompactCollector::tracer()->decrement_marked_count();
   1005       object_size = object->Size();
   1006 
   1007       Object* forwarded = Alloc(object, object_size);
   1008       // Allocation cannot fail, because we are compacting the space.
   1009       ASSERT(!forwarded->IsFailure());
   1010       Encode(object, object_size, forwarded, offset);
   1011 
   1012 #ifdef DEBUG
   1013       if (FLAG_gc_verbose) {
   1014         PrintF("forward %p -> %p.\n", object->address(),
   1015                HeapObject::cast(forwarded)->address());
   1016       }
   1017 #endif
   1018       if (!is_prev_alive) {  // Transition from non-live to live.
   1019         EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
   1020         is_prev_alive = true;
   1021       }
   1022     } else {  // Non-live object.
   1023       object_size = object->Size();
   1024       ProcessNonLive(object);
   1025       if (is_prev_alive) {  // Transition from live to non-live.
   1026         free_start = current;
   1027         is_prev_alive = false;
   1028       }
   1029     }
   1030   }
   1031 
   1032   // If we ended on a free region, mark it.
   1033   if (!is_prev_alive) {
   1034     EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
   1035   }
   1036 }
   1037 
   1038 
   1039 // Functions to encode the forwarding pointers in each compactable space.
   1040 void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
   1041   int ignored;
   1042   EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
   1043                                    EncodeForwardingAddressInNewSpace,
   1044                                    IgnoreNonLiveObject>(
   1045       Heap::new_space()->bottom(),
   1046       Heap::new_space()->top(),
   1047       &ignored);
   1048 }
   1049 
   1050 
   1051 template<MarkCompactCollector::AllocationFunction Alloc,
   1052          MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
   1053 void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
   1054     PagedSpace* space) {
   1055   PageIterator it(space, PageIterator::PAGES_IN_USE);
   1056   while (it.has_next()) {
   1057     Page* p = it.next();
   1058     // The offset of each live object in the page from the first live object
   1059     // in the page.
   1060     int offset = 0;
   1061     EncodeForwardingAddressesInRange<Alloc,
   1062                                      EncodeForwardingAddressInPagedSpace,
   1063                                      ProcessNonLive>(
   1064         p->ObjectAreaStart(),
   1065         p->AllocationTop(),
   1066         &offset);
   1067   }
   1068 }
   1069 
   1070 
   1071 static void SweepSpace(NewSpace* space) {
   1072   HeapObject* object;
   1073   for (Address current = space->bottom();
   1074        current < space->top();
   1075        current += object->Size()) {
   1076     object = HeapObject::FromAddress(current);
   1077     if (object->IsMarked()) {
   1078       object->ClearMark();
   1079       MarkCompactCollector::tracer()->decrement_marked_count();
   1080     } else {
   1081       // We give non-live objects a map that will correctly give their size,
   1082       // since their existing map might not be live after the collection.
   1083       int size = object->Size();
   1084       if (size >= ByteArray::kHeaderSize) {
   1085         object->set_map(Heap::raw_unchecked_byte_array_map());
   1086         ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
   1087       } else {
   1088         ASSERT(size == kPointerSize);
   1089         object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
   1090       }
   1091       ASSERT(object->Size() == size);
   1092     }
   1093     // The object is now unmarked for the call to Size() at the top of the
   1094     // loop.
   1095   }
   1096 }
   1097 
   1098 
   1099 static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
   1100   PageIterator it(space, PageIterator::PAGES_IN_USE);
   1101   while (it.has_next()) {
   1102     Page* p = it.next();
   1103 
   1104     bool is_previous_alive = true;
   1105     Address free_start = NULL;
   1106     HeapObject* object;
   1107 
   1108     for (Address current = p->ObjectAreaStart();
   1109          current < p->AllocationTop();
   1110          current += object->Size()) {
   1111       object = HeapObject::FromAddress(current);
   1112       if (object->IsMarked()) {
   1113         object->ClearMark();
   1114         MarkCompactCollector::tracer()->decrement_marked_count();
   1115         if (!is_previous_alive) {  // Transition from free to live.
   1116           dealloc(free_start, static_cast<int>(current - free_start));
   1117           is_previous_alive = true;
   1118         }
   1119       } else {
   1120         MarkCompactCollector::ReportDeleteIfNeeded(object);
   1121         if (is_previous_alive) {  // Transition from live to free.
   1122           free_start = current;
   1123           is_previous_alive = false;
   1124         }
   1125       }
   1126       // The object is now unmarked for the call to Size() at the top of the
   1127       // loop.
   1128     }
   1129 
   1130     // If the last region was not live we need to deallocate from
   1131     // free_start to the allocation top in the page.
   1132     if (!is_previous_alive) {
   1133       int free_size = static_cast<int>(p->AllocationTop() - free_start);
   1134       if (free_size > 0) {
   1135         dealloc(free_start, free_size);
   1136       }
   1137     }
   1138   }
   1139 }
   1140 
   1141 
   1142 void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
   1143                                                      int size_in_bytes) {
   1144   Heap::ClearRSetRange(start, size_in_bytes);
   1145   Heap::old_pointer_space()->Free(start, size_in_bytes);
   1146 }
   1147 
   1148 
   1149 void MarkCompactCollector::DeallocateOldDataBlock(Address start,
   1150                                                   int size_in_bytes) {
   1151   Heap::old_data_space()->Free(start, size_in_bytes);
   1152 }
   1153 
   1154 
   1155 void MarkCompactCollector::DeallocateCodeBlock(Address start,
   1156                                                int size_in_bytes) {
   1157   Heap::code_space()->Free(start, size_in_bytes);
   1158 }
   1159 
   1160 
   1161 void MarkCompactCollector::DeallocateMapBlock(Address start,
   1162                                               int size_in_bytes) {
   1163   // Objects in map space are assumed to have size Map::kSize and a
   1164   // valid map in their first word.  Thus, we break the free block up into
   1165   // chunks and free them separately.
   1166   ASSERT(size_in_bytes % Map::kSize == 0);
   1167   Heap::ClearRSetRange(start, size_in_bytes);
   1168   Address end = start + size_in_bytes;
   1169   for (Address a = start; a < end; a += Map::kSize) {
   1170     Heap::map_space()->Free(a);
   1171   }
   1172 }
   1173 
   1174 
   1175 void MarkCompactCollector::DeallocateCellBlock(Address start,
   1176                                                int size_in_bytes) {
   1177   // Free-list elements in cell space are assumed to have a fixed size.
   1178   // We break the free block into chunks and add them to the free list
   1179   // individually.
   1180   int size = Heap::cell_space()->object_size_in_bytes();
   1181   ASSERT(size_in_bytes % size == 0);
   1182   Heap::ClearRSetRange(start, size_in_bytes);
   1183   Address end = start + size_in_bytes;
   1184   for (Address a = start; a < end; a += size) {
   1185     Heap::cell_space()->Free(a);
   1186   }
   1187 }
   1188 
   1189 
   1190 void MarkCompactCollector::EncodeForwardingAddresses() {
   1191   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
   1192   // Objects in the active semispace of the young generation may be
   1193   // relocated to the inactive semispace (if not promoted).  Set the
   1194   // relocation info to the beginning of the inactive semispace.
   1195   Heap::new_space()->MCResetRelocationInfo();
   1196 
   1197   // Compute the forwarding pointers in each space.
   1198   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
   1199                                         ReportDeleteIfNeeded>(
   1200       Heap::old_pointer_space());
   1201 
   1202   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
   1203                                         IgnoreNonLiveObject>(
   1204       Heap::old_data_space());
   1205 
   1206   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
   1207                                         ReportDeleteIfNeeded>(
   1208       Heap::code_space());
   1209 
   1210   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
   1211                                         IgnoreNonLiveObject>(
   1212       Heap::cell_space());
   1213 
   1214 
   1215   // Compute new space next to last after the old and code spaces have been
   1216   // compacted.  Objects in new space can be promoted to old or code space.
   1217   EncodeForwardingAddressesInNewSpace();
   1218 
   1219   // Compute map space last because computing forwarding addresses
   1220   // overwrites non-live objects.  Objects in the other spaces rely on
   1221   // non-live map pointers to get the sizes of non-live objects.
   1222   EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
   1223                                         IgnoreNonLiveObject>(
   1224       Heap::map_space());
   1225 
   1226   // Write relocation info to the top page, so we can use it later.  This is
   1227   // done after promoting objects from the new space so we get the correct
   1228   // allocation top.
   1229   Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
   1230   Heap::old_data_space()->MCWriteRelocationInfoToPage();
   1231   Heap::code_space()->MCWriteRelocationInfoToPage();
   1232   Heap::map_space()->MCWriteRelocationInfoToPage();
   1233   Heap::cell_space()->MCWriteRelocationInfoToPage();
   1234 }
   1235 
   1236 
   1237 class MapIterator : public HeapObjectIterator {
   1238  public:
   1239   MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
   1240 
   1241   explicit MapIterator(Address start)
   1242       : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
   1243 
   1244  private:
   1245   static int SizeCallback(HeapObject* unused) {
   1246     USE(unused);
   1247     return Map::kSize;
   1248   }
   1249 };
   1250 
   1251 
   1252 class MapCompact {
   1253  public:
   1254   explicit MapCompact(int live_maps)
   1255     : live_maps_(live_maps),
   1256       to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
   1257       map_to_evacuate_it_(to_evacuate_start_),
   1258       first_map_to_evacuate_(
   1259           reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
   1260   }
   1261 
   1262   void CompactMaps() {
   1263     // As we know the number of maps to evacuate beforehand,
   1264     // we stop then there is no more vacant maps.
   1265     for (Map* next_vacant_map = NextVacantMap();
   1266          next_vacant_map;
   1267          next_vacant_map = NextVacantMap()) {
   1268       EvacuateMap(next_vacant_map, NextMapToEvacuate());
   1269     }
   1270 
   1271 #ifdef DEBUG
   1272     CheckNoMapsToEvacuate();
   1273 #endif
   1274   }
   1275 
   1276   void UpdateMapPointersInRoots() {
   1277     Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
   1278     GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
   1279   }
   1280 
   1281   void FinishMapSpace() {
   1282     // Iterate through to space and finish move.
   1283     MapIterator it;
   1284     HeapObject* o = it.next();
   1285     for (; o != first_map_to_evacuate_; o = it.next()) {
   1286       ASSERT(o != NULL);
   1287       Map* map = reinterpret_cast<Map*>(o);
   1288       ASSERT(!map->IsMarked());
   1289       ASSERT(!map->IsOverflowed());
   1290       ASSERT(map->IsMap());
   1291       Heap::UpdateRSet(map);
   1292     }
   1293   }
   1294 
   1295   void UpdateMapPointersInPagedSpace(PagedSpace* space) {
   1296     ASSERT(space != Heap::map_space());
   1297 
   1298     PageIterator it(space, PageIterator::PAGES_IN_USE);
   1299     while (it.has_next()) {
   1300       Page* p = it.next();
   1301       UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
   1302     }
   1303   }
   1304 
   1305   void UpdateMapPointersInNewSpace() {
   1306     NewSpace* space = Heap::new_space();
   1307     UpdateMapPointersInRange(space->bottom(), space->top());
   1308   }
   1309 
   1310   void UpdateMapPointersInLargeObjectSpace() {
   1311     LargeObjectIterator it(Heap::lo_space());
   1312     for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
   1313       UpdateMapPointersInObject(obj);
   1314   }
   1315 
   1316   void Finish() {
   1317     Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
   1318   }
   1319 
   1320  private:
   1321   int live_maps_;
   1322   Address to_evacuate_start_;
   1323   MapIterator vacant_map_it_;
   1324   MapIterator map_to_evacuate_it_;
   1325   Map* first_map_to_evacuate_;
   1326 
   1327   // Helper class for updating map pointers in HeapObjects.
   1328   class MapUpdatingVisitor: public ObjectVisitor {
   1329   public:
   1330     void VisitPointer(Object** p) {
   1331       UpdateMapPointer(p);
   1332     }
   1333 
   1334     void VisitPointers(Object** start, Object** end) {
   1335       for (Object** p = start; p < end; p++) UpdateMapPointer(p);
   1336     }
   1337 
   1338   private:
   1339     void UpdateMapPointer(Object** p) {
   1340       if (!(*p)->IsHeapObject()) return;
   1341       HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
   1342 
   1343       // Moved maps are tagged with overflowed map word.  They are the only
   1344       // objects those map word is overflowed as marking is already complete.
   1345       MapWord map_word = old_map->map_word();
   1346       if (!map_word.IsOverflowed()) return;
   1347 
   1348       *p = GetForwardedMap(map_word);
   1349     }
   1350   };
   1351 
   1352   static MapUpdatingVisitor map_updating_visitor_;
   1353 
   1354   static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
   1355     while (true) {
   1356       HeapObject* next = it->next();
   1357       ASSERT(next != NULL);
   1358       if (next == last)
   1359         return NULL;
   1360       ASSERT(!next->IsOverflowed());
   1361       ASSERT(!next->IsMarked());
   1362       ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
   1363       if (next->IsMap() == live)
   1364         return reinterpret_cast<Map*>(next);
   1365     }
   1366   }
   1367 
   1368   Map* NextVacantMap() {
   1369     Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
   1370     ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
   1371     return map;
   1372   }
   1373 
   1374   Map* NextMapToEvacuate() {
   1375     Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
   1376     ASSERT(map != NULL);
   1377     ASSERT(map->IsMap());
   1378     return map;
   1379   }
   1380 
   1381   static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
   1382     ASSERT(FreeListNode::IsFreeListNode(vacant_map));
   1383     ASSERT(map_to_evacuate->IsMap());
   1384 
   1385     memcpy(
   1386         reinterpret_cast<void*>(vacant_map->address()),
   1387         reinterpret_cast<void*>(map_to_evacuate->address()),
   1388         Map::kSize);
   1389     ASSERT(vacant_map->IsMap());  // Due to memcpy above.
   1390 
   1391     MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
   1392     forwarding_map_word.SetOverflow();
   1393     map_to_evacuate->set_map_word(forwarding_map_word);
   1394 
   1395     ASSERT(map_to_evacuate->map_word().IsOverflowed());
   1396     ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
   1397   }
   1398 
   1399   static Map* GetForwardedMap(MapWord map_word) {
   1400     ASSERT(map_word.IsOverflowed());
   1401     map_word.ClearOverflow();
   1402     Map* new_map = map_word.ToMap();
   1403     ASSERT_MAP_ALIGNED(new_map->address());
   1404     return new_map;
   1405   }
   1406 
   1407   static int UpdateMapPointersInObject(HeapObject* obj) {
   1408     ASSERT(!obj->IsMarked());
   1409     Map* map = obj->map();
   1410     ASSERT(Heap::map_space()->Contains(map));
   1411     MapWord map_word = map->map_word();
   1412     ASSERT(!map_word.IsMarked());
   1413     if (map_word.IsOverflowed()) {
   1414       Map* new_map = GetForwardedMap(map_word);
   1415       ASSERT(Heap::map_space()->Contains(new_map));
   1416       obj->set_map(new_map);
   1417 
   1418 #ifdef DEBUG
   1419       if (FLAG_gc_verbose) {
   1420         PrintF("update %p : %p -> %p\n", obj->address(),
   1421               map, new_map);
   1422       }
   1423 #endif
   1424     }
   1425 
   1426     int size = obj->SizeFromMap(map);
   1427     obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
   1428     return size;
   1429   }
   1430 
   1431   static void UpdateMapPointersInRange(Address start, Address end) {
   1432     HeapObject* object;
   1433     int size;
   1434     for (Address current = start; current < end; current += size) {
   1435       object = HeapObject::FromAddress(current);
   1436       size = UpdateMapPointersInObject(object);
   1437       ASSERT(size > 0);
   1438     }
   1439   }
   1440 
   1441 #ifdef DEBUG
   1442   void CheckNoMapsToEvacuate() {
   1443     if (!FLAG_enable_slow_asserts)
   1444       return;
   1445 
   1446     for (HeapObject* obj = map_to_evacuate_it_.next();
   1447          obj != NULL; obj = map_to_evacuate_it_.next())
   1448       ASSERT(FreeListNode::IsFreeListNode(obj));
   1449   }
   1450 #endif
   1451 };
   1452 
   1453 MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
   1454 
   1455 
   1456 void MarkCompactCollector::SweepSpaces() {
   1457   ASSERT(state_ == SWEEP_SPACES);
   1458   ASSERT(!IsCompacting());
   1459   // Noncompacting collections simply sweep the spaces to clear the mark
   1460   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   1461   // the map space last because freeing non-live maps overwrites them and
   1462   // the other spaces rely on possibly non-live maps to get the sizes for
   1463   // non-live objects.
   1464   SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
   1465   SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
   1466   SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
   1467   SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
   1468   SweepSpace(Heap::new_space());
   1469   SweepSpace(Heap::map_space(), &DeallocateMapBlock);
   1470   int live_maps = Heap::map_space()->Size() / Map::kSize;
   1471   ASSERT(live_map_objects_ == live_maps);
   1472 
   1473   if (Heap::map_space()->NeedsCompaction(live_maps)) {
   1474     MapCompact map_compact(live_maps);
   1475 
   1476     map_compact.CompactMaps();
   1477     map_compact.UpdateMapPointersInRoots();
   1478 
   1479     map_compact.FinishMapSpace();
   1480     PagedSpaces spaces;
   1481     for (PagedSpace* space = spaces.next();
   1482          space != NULL; space = spaces.next()) {
   1483       if (space == Heap::map_space()) continue;
   1484       map_compact.UpdateMapPointersInPagedSpace(space);
   1485     }
   1486     map_compact.UpdateMapPointersInNewSpace();
   1487     map_compact.UpdateMapPointersInLargeObjectSpace();
   1488 
   1489     map_compact.Finish();
   1490   }
   1491 }
   1492 
   1493 
   1494 // Iterate the live objects in a range of addresses (eg, a page or a
   1495 // semispace).  The live regions of the range have been linked into a list.
   1496 // The first live region is [first_live_start, first_live_end), and the last
   1497 // address in the range is top.  The callback function is used to get the
   1498 // size of each live object.
   1499 int MarkCompactCollector::IterateLiveObjectsInRange(
   1500     Address start,
   1501     Address end,
   1502     HeapObjectCallback size_func) {
   1503   int live_objects = 0;
   1504   Address current = start;
   1505   while (current < end) {
   1506     uint32_t encoded_map = Memory::uint32_at(current);
   1507     if (encoded_map == kSingleFreeEncoding) {
   1508       current += kPointerSize;
   1509     } else if (encoded_map == kMultiFreeEncoding) {
   1510       current += Memory::int_at(current + kIntSize);
   1511     } else {
   1512       live_objects++;
   1513       current += size_func(HeapObject::FromAddress(current));
   1514     }
   1515   }
   1516   return live_objects;
   1517 }
   1518 
   1519 
   1520 int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
   1521                                              HeapObjectCallback size_f) {
   1522   ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
   1523   return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
   1524 }
   1525 
   1526 
   1527 int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
   1528                                              HeapObjectCallback size_f) {
   1529   ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
   1530   int total = 0;
   1531   PageIterator it(space, PageIterator::PAGES_IN_USE);
   1532   while (it.has_next()) {
   1533     Page* p = it.next();
   1534     total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
   1535                                        p->AllocationTop(),
   1536                                        size_f);
   1537   }
   1538   return total;
   1539 }
   1540 
   1541 
   1542 // -------------------------------------------------------------------------
   1543 // Phase 3: Update pointers
   1544 
   1545 // Helper class for updating pointers in HeapObjects.
   1546 class UpdatingVisitor: public ObjectVisitor {
   1547  public:
   1548   void VisitPointer(Object** p) {
   1549     UpdatePointer(p);
   1550   }
   1551 
   1552   void VisitPointers(Object** start, Object** end) {
   1553     // Mark all HeapObject pointers in [start, end)
   1554     for (Object** p = start; p < end; p++) UpdatePointer(p);
   1555   }
   1556 
   1557   void VisitCodeTarget(RelocInfo* rinfo) {
   1558     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
   1559     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
   1560     VisitPointer(&target);
   1561     rinfo->set_target_address(
   1562         reinterpret_cast<Code*>(target)->instruction_start());
   1563   }
   1564 
   1565   void VisitDebugTarget(RelocInfo* rinfo) {
   1566     ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
   1567            rinfo->IsPatchedReturnSequence());
   1568     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
   1569     VisitPointer(&target);
   1570     rinfo->set_call_address(
   1571         reinterpret_cast<Code*>(target)->instruction_start());
   1572   }
   1573 
   1574  private:
   1575   void UpdatePointer(Object** p) {
   1576     if (!(*p)->IsHeapObject()) return;
   1577 
   1578     HeapObject* obj = HeapObject::cast(*p);
   1579     Address old_addr = obj->address();
   1580     Address new_addr;
   1581     ASSERT(!Heap::InFromSpace(obj));
   1582 
   1583     if (Heap::new_space()->Contains(obj)) {
   1584       Address forwarding_pointer_addr =
   1585           Heap::new_space()->FromSpaceLow() +
   1586           Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
   1587       new_addr = Memory::Address_at(forwarding_pointer_addr);
   1588 
   1589 #ifdef DEBUG
   1590       ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
   1591              Heap::old_data_space()->Contains(new_addr) ||
   1592              Heap::new_space()->FromSpaceContains(new_addr) ||
   1593              Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
   1594 
   1595       if (Heap::new_space()->FromSpaceContains(new_addr)) {
   1596         ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
   1597                Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
   1598       }
   1599 #endif
   1600 
   1601     } else if (Heap::lo_space()->Contains(obj)) {
   1602       // Don't move objects in the large object space.
   1603       return;
   1604 
   1605     } else {
   1606 #ifdef DEBUG
   1607       PagedSpaces spaces;
   1608       PagedSpace* original_space = spaces.next();
   1609       while (original_space != NULL) {
   1610         if (original_space->Contains(obj)) break;
   1611         original_space = spaces.next();
   1612       }
   1613       ASSERT(original_space != NULL);
   1614 #endif
   1615       new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
   1616       ASSERT(original_space->Contains(new_addr));
   1617       ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
   1618              original_space->MCSpaceOffsetForAddress(old_addr));
   1619     }
   1620 
   1621     *p = HeapObject::FromAddress(new_addr);
   1622 
   1623 #ifdef DEBUG
   1624     if (FLAG_gc_verbose) {
   1625       PrintF("update %p : %p -> %p\n",
   1626              reinterpret_cast<Address>(p), old_addr, new_addr);
   1627     }
   1628 #endif
   1629   }
   1630 };
   1631 
   1632 
   1633 void MarkCompactCollector::UpdatePointers() {
   1634 #ifdef DEBUG
   1635   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
   1636   state_ = UPDATE_POINTERS;
   1637 #endif
   1638   UpdatingVisitor updating_visitor;
   1639   Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
   1640   GlobalHandles::IterateWeakRoots(&updating_visitor);
   1641 
   1642   int live_maps = IterateLiveObjects(Heap::map_space(),
   1643                                      &UpdatePointersInOldObject);
   1644   int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
   1645                                              &UpdatePointersInOldObject);
   1646   int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
   1647                                           &UpdatePointersInOldObject);
   1648   int live_codes = IterateLiveObjects(Heap::code_space(),
   1649                                       &UpdatePointersInOldObject);
   1650   int live_cells = IterateLiveObjects(Heap::cell_space(),
   1651                                       &UpdatePointersInOldObject);
   1652   int live_news = IterateLiveObjects(Heap::new_space(),
   1653                                      &UpdatePointersInNewObject);
   1654 
   1655   // Large objects do not move, the map word can be updated directly.
   1656   LargeObjectIterator it(Heap::lo_space());
   1657   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
   1658     UpdatePointersInNewObject(obj);
   1659 
   1660   USE(live_maps);
   1661   USE(live_pointer_olds);
   1662   USE(live_data_olds);
   1663   USE(live_codes);
   1664   USE(live_cells);
   1665   USE(live_news);
   1666   ASSERT(live_maps == live_map_objects_);
   1667   ASSERT(live_data_olds == live_old_data_objects_);
   1668   ASSERT(live_pointer_olds == live_old_pointer_objects_);
   1669   ASSERT(live_codes == live_code_objects_);
   1670   ASSERT(live_cells == live_cell_objects_);
   1671   ASSERT(live_news == live_young_objects_);
   1672 }
   1673 
   1674 
   1675 int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
   1676   // Keep old map pointers
   1677   Map* old_map = obj->map();
   1678   ASSERT(old_map->IsHeapObject());
   1679 
   1680   Address forwarded = GetForwardingAddressInOldSpace(old_map);
   1681 
   1682   ASSERT(Heap::map_space()->Contains(old_map));
   1683   ASSERT(Heap::map_space()->Contains(forwarded));
   1684 #ifdef DEBUG
   1685   if (FLAG_gc_verbose) {
   1686     PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
   1687            forwarded);
   1688   }
   1689 #endif
   1690   // Update the map pointer.
   1691   obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
   1692 
   1693   // We have to compute the object size relying on the old map because
   1694   // map objects are not relocated yet.
   1695   int obj_size = obj->SizeFromMap(old_map);
   1696 
   1697   // Update pointers in the object body.
   1698   UpdatingVisitor updating_visitor;
   1699   obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
   1700   return obj_size;
   1701 }
   1702 
   1703 
   1704 int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
   1705   // Decode the map pointer.
   1706   MapWord encoding = obj->map_word();
   1707   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   1708   ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
   1709 
   1710   // At this point, the first word of map_addr is also encoded, cannot
   1711   // cast it to Map* using Map::cast.
   1712   Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
   1713   int obj_size = obj->SizeFromMap(map);
   1714   InstanceType type = map->instance_type();
   1715 
   1716   // Update map pointer.
   1717   Address new_map_addr = GetForwardingAddressInOldSpace(map);
   1718   int offset = encoding.DecodeOffset();
   1719   obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
   1720 
   1721 #ifdef DEBUG
   1722   if (FLAG_gc_verbose) {
   1723     PrintF("update %p : %p -> %p\n", obj->address(),
   1724            map_addr, new_map_addr);
   1725   }
   1726 #endif
   1727 
   1728   // Update pointers in the object body.
   1729   UpdatingVisitor updating_visitor;
   1730   obj->IterateBody(type, obj_size, &updating_visitor);
   1731   return obj_size;
   1732 }
   1733 
   1734 
   1735 Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
   1736   // Object should either in old or map space.
   1737   MapWord encoding = obj->map_word();
   1738 
   1739   // Offset to the first live object's forwarding address.
   1740   int offset = encoding.DecodeOffset();
   1741   Address obj_addr = obj->address();
   1742 
   1743   // Find the first live object's forwarding address.
   1744   Page* p = Page::FromAddress(obj_addr);
   1745   Address first_forwarded = p->mc_first_forwarded;
   1746 
   1747   // Page start address of forwarded address.
   1748   Page* forwarded_page = Page::FromAddress(first_forwarded);
   1749   int forwarded_offset = forwarded_page->Offset(first_forwarded);
   1750 
   1751   // Find end of allocation of in the page of first_forwarded.
   1752   Address mc_top = forwarded_page->mc_relocation_top;
   1753   int mc_top_offset = forwarded_page->Offset(mc_top);
   1754 
   1755   // Check if current object's forward pointer is in the same page
   1756   // as the first live object's forwarding pointer
   1757   if (forwarded_offset + offset < mc_top_offset) {
   1758     // In the same page.
   1759     return first_forwarded + offset;
   1760   }
   1761 
   1762   // Must be in the next page, NOTE: this may cross chunks.
   1763   Page* next_page = forwarded_page->next_page();
   1764   ASSERT(next_page->is_valid());
   1765 
   1766   offset -= (mc_top_offset - forwarded_offset);
   1767   offset += Page::kObjectStartOffset;
   1768 
   1769   ASSERT_PAGE_OFFSET(offset);
   1770   ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
   1771 
   1772   return next_page->OffsetToAddress(offset);
   1773 }
   1774 
   1775 
   1776 // -------------------------------------------------------------------------
   1777 // Phase 4: Relocate objects
   1778 
   1779 void MarkCompactCollector::RelocateObjects() {
   1780 #ifdef DEBUG
   1781   ASSERT(state_ == UPDATE_POINTERS);
   1782   state_ = RELOCATE_OBJECTS;
   1783 #endif
   1784   // Relocates objects, always relocate map objects first. Relocating
   1785   // objects in other space relies on map objects to get object size.
   1786   int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
   1787   int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
   1788                                              &RelocateOldPointerObject);
   1789   int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
   1790                                           &RelocateOldDataObject);
   1791   int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
   1792   int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
   1793   int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
   1794 
   1795   USE(live_maps);
   1796   USE(live_data_olds);
   1797   USE(live_pointer_olds);
   1798   USE(live_codes);
   1799   USE(live_cells);
   1800   USE(live_news);
   1801   ASSERT(live_maps == live_map_objects_);
   1802   ASSERT(live_data_olds == live_old_data_objects_);
   1803   ASSERT(live_pointer_olds == live_old_pointer_objects_);
   1804   ASSERT(live_codes == live_code_objects_);
   1805   ASSERT(live_cells == live_cell_objects_);
   1806   ASSERT(live_news == live_young_objects_);
   1807 
   1808   // Flip from and to spaces
   1809   Heap::new_space()->Flip();
   1810 
   1811   // Set age_mark to bottom in to space
   1812   Address mark = Heap::new_space()->bottom();
   1813   Heap::new_space()->set_age_mark(mark);
   1814 
   1815   Heap::new_space()->MCCommitRelocationInfo();
   1816 #ifdef DEBUG
   1817   // It is safe to write to the remembered sets as remembered sets on a
   1818   // page-by-page basis after committing the m-c forwarding pointer.
   1819   Page::set_rset_state(Page::IN_USE);
   1820 #endif
   1821   PagedSpaces spaces;
   1822   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
   1823     space->MCCommitRelocationInfo();
   1824 }
   1825 
   1826 
   1827 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
   1828   // Recover map pointer.
   1829   MapWord encoding = obj->map_word();
   1830   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   1831   ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
   1832 
   1833   // Get forwarding address before resetting map pointer
   1834   Address new_addr = GetForwardingAddressInOldSpace(obj);
   1835 
   1836   // Reset map pointer.  The meta map object may not be copied yet so
   1837   // Map::cast does not yet work.
   1838   obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
   1839 
   1840   Address old_addr = obj->address();
   1841 
   1842   if (new_addr != old_addr) {
   1843     memmove(new_addr, old_addr, Map::kSize);  // copy contents
   1844   }
   1845 
   1846 #ifdef DEBUG
   1847   if (FLAG_gc_verbose) {
   1848     PrintF("relocate %p -> %p\n", old_addr, new_addr);
   1849   }
   1850 #endif
   1851 
   1852   return Map::kSize;
   1853 }
   1854 
   1855 
   1856 static inline int RestoreMap(HeapObject* obj,
   1857                              PagedSpace* space,
   1858                              Address new_addr,
   1859                              Address map_addr) {
   1860   // This must be a non-map object, and the function relies on the
   1861   // assumption that the Map space is compacted before the other paged
   1862   // spaces (see RelocateObjects).
   1863 
   1864   // Reset map pointer.
   1865   obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
   1866 
   1867   int obj_size = obj->Size();
   1868   ASSERT_OBJECT_SIZE(obj_size);
   1869 
   1870   ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
   1871          space->MCSpaceOffsetForAddress(obj->address()));
   1872 
   1873 #ifdef DEBUG
   1874   if (FLAG_gc_verbose) {
   1875     PrintF("relocate %p -> %p\n", obj->address(), new_addr);
   1876   }
   1877 #endif
   1878 
   1879   return obj_size;
   1880 }
   1881 
   1882 
   1883 int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
   1884                                                    PagedSpace* space) {
   1885   // Recover map pointer.
   1886   MapWord encoding = obj->map_word();
   1887   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   1888   ASSERT(Heap::map_space()->Contains(map_addr));
   1889 
   1890   // Get forwarding address before resetting map pointer.
   1891   Address new_addr = GetForwardingAddressInOldSpace(obj);
   1892 
   1893   // Reset the map pointer.
   1894   int obj_size = RestoreMap(obj, space, new_addr, map_addr);
   1895 
   1896   Address old_addr = obj->address();
   1897 
   1898   if (new_addr != old_addr) {
   1899     memmove(new_addr, old_addr, obj_size);  // Copy contents
   1900   }
   1901 
   1902   ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
   1903 
   1904   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   1905   if (copied_to->IsJSFunction()) {
   1906     LOG(FunctionMoveEvent(old_addr, new_addr));
   1907   }
   1908 
   1909   return obj_size;
   1910 }
   1911 
   1912 
   1913 int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
   1914   return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
   1915 }
   1916 
   1917 
   1918 int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
   1919   return RelocateOldNonCodeObject(obj, Heap::old_data_space());
   1920 }
   1921 
   1922 
   1923 int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
   1924   return RelocateOldNonCodeObject(obj, Heap::cell_space());
   1925 }
   1926 
   1927 
   1928 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
   1929   // Recover map pointer.
   1930   MapWord encoding = obj->map_word();
   1931   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   1932   ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
   1933 
   1934   // Get forwarding address before resetting map pointer
   1935   Address new_addr = GetForwardingAddressInOldSpace(obj);
   1936 
   1937   // Reset the map pointer.
   1938   int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
   1939 
   1940   Address old_addr = obj->address();
   1941 
   1942   if (new_addr != old_addr) {
   1943     memmove(new_addr, old_addr, obj_size);  // Copy contents.
   1944   }
   1945 
   1946   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   1947   if (copied_to->IsCode()) {
   1948     // May also update inline cache target.
   1949     Code::cast(copied_to)->Relocate(new_addr - old_addr);
   1950     // Notify the logger that compiled code has moved.
   1951     LOG(CodeMoveEvent(old_addr, new_addr));
   1952   }
   1953 
   1954   return obj_size;
   1955 }
   1956 
   1957 
   1958 int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
   1959   int obj_size = obj->Size();
   1960 
   1961   // Get forwarding address
   1962   Address old_addr = obj->address();
   1963   int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
   1964 
   1965   Address new_addr =
   1966     Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
   1967 
   1968 #ifdef DEBUG
   1969   if (Heap::new_space()->FromSpaceContains(new_addr)) {
   1970     ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
   1971            Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
   1972   } else {
   1973     ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
   1974            Heap::TargetSpace(obj) == Heap::old_data_space());
   1975   }
   1976 #endif
   1977 
   1978   // New and old addresses cannot overlap.
   1979   memcpy(reinterpret_cast<void*>(new_addr),
   1980          reinterpret_cast<void*>(old_addr),
   1981          obj_size);
   1982 
   1983 #ifdef DEBUG
   1984   if (FLAG_gc_verbose) {
   1985     PrintF("relocate %p -> %p\n", old_addr, new_addr);
   1986   }
   1987 #endif
   1988 
   1989   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   1990   if (copied_to->IsJSFunction()) {
   1991     LOG(FunctionMoveEvent(old_addr, new_addr));
   1992   }
   1993 
   1994   return obj_size;
   1995 }
   1996 
   1997 
   1998 // -------------------------------------------------------------------------
   1999 // Phase 5: rebuild remembered sets
   2000 
   2001 void MarkCompactCollector::RebuildRSets() {
   2002 #ifdef DEBUG
   2003   ASSERT(state_ == RELOCATE_OBJECTS);
   2004   state_ = REBUILD_RSETS;
   2005 #endif
   2006   Heap::RebuildRSets();
   2007 }
   2008 
   2009 
   2010 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
   2011 #ifdef ENABLE_LOGGING_AND_PROFILING
   2012   if (obj->IsCode()) {
   2013     LOG(CodeDeleteEvent(obj->address()));
   2014   } else if (obj->IsJSFunction()) {
   2015     LOG(FunctionDeleteEvent(obj->address()));
   2016   }
   2017 #endif
   2018 }
   2019 
   2020 } }  // namespace v8::internal
   2021