Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #include "accessors.h"
     31 #include "api.h"
     32 #include "bootstrapper.h"
     33 #include "codegen.h"
     34 #include "compilation-cache.h"
     35 #include "debug.h"
     36 #include "deoptimizer.h"
     37 #include "global-handles.h"
     38 #include "heap-profiler.h"
     39 #include "incremental-marking.h"
     40 #include "liveobjectlist-inl.h"
     41 #include "mark-compact.h"
     42 #include "natives.h"
     43 #include "objects-visiting.h"
     44 #include "objects-visiting-inl.h"
     45 #include "runtime-profiler.h"
     46 #include "scopeinfo.h"
     47 #include "snapshot.h"
     48 #include "store-buffer.h"
     49 #include "v8threads.h"
     50 #include "vm-state-inl.h"
     51 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
     52 #include "regexp-macro-assembler.h"
     53 #include "arm/regexp-macro-assembler-arm.h"
     54 #endif
     55 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
     56 #include "regexp-macro-assembler.h"
     57 #include "mips/regexp-macro-assembler-mips.h"
     58 #endif
     59 
     60 namespace v8 {
     61 namespace internal {
     62 
     63 static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
     64 
     65 
     66 Heap::Heap()
     67     : isolate_(NULL),
     68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
     69 // a multiple of Page::kPageSize.
     70 #if defined(V8_TARGET_ARCH_X64)
     71 #define LUMP_OF_MEMORY (2 * MB)
     72       code_range_size_(512*MB),
     73 #else
     74 #define LUMP_OF_MEMORY MB
     75       code_range_size_(0),
     76 #endif
     77 #if defined(ANDROID)
     78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
     79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
     80       initial_semispace_size_(Page::kPageSize),
     81       max_old_generation_size_(192*MB),
     82       max_executable_size_(max_old_generation_size_),
     83 #else
     84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
     85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
     86       initial_semispace_size_(Page::kPageSize),
     87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
     88       max_executable_size_(256l * LUMP_OF_MEMORY),
     89 #endif
     90 
     91 // Variables set based on semispace_size_ and old_generation_size_ in
     92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
     93 // Will be 4 * reserved_semispace_size_ to ensure that young
     94 // generation can be aligned to its size.
     95       survived_since_last_expansion_(0),
     96       sweep_generation_(0),
     97       always_allocate_scope_depth_(0),
     98       linear_allocation_scope_depth_(0),
     99       contexts_disposed_(0),
    100       global_ic_age_(0),
    101       scan_on_scavenge_pages_(0),
    102       new_space_(this),
    103       old_pointer_space_(NULL),
    104       old_data_space_(NULL),
    105       code_space_(NULL),
    106       map_space_(NULL),
    107       cell_space_(NULL),
    108       lo_space_(NULL),
    109       gc_state_(NOT_IN_GC),
    110       gc_post_processing_depth_(0),
    111       ms_count_(0),
    112       gc_count_(0),
    113       remembered_unmapped_pages_index_(0),
    114       unflattened_strings_length_(0),
    115 #ifdef DEBUG
    116       allocation_allowed_(true),
    117       allocation_timeout_(0),
    118       disallow_allocation_failure_(false),
    119       debug_utils_(NULL),
    120 #endif  // DEBUG
    121       new_space_high_promotion_mode_active_(false),
    122       old_gen_promotion_limit_(kMinimumPromotionLimit),
    123       old_gen_allocation_limit_(kMinimumAllocationLimit),
    124       old_gen_limit_factor_(1),
    125       size_of_old_gen_at_last_old_space_gc_(0),
    126       external_allocation_limit_(0),
    127       amount_of_external_allocated_memory_(0),
    128       amount_of_external_allocated_memory_at_last_global_gc_(0),
    129       old_gen_exhausted_(false),
    130       store_buffer_rebuilder_(store_buffer()),
    131       hidden_symbol_(NULL),
    132       global_gc_prologue_callback_(NULL),
    133       global_gc_epilogue_callback_(NULL),
    134       gc_safe_size_of_old_object_(NULL),
    135       total_regexp_code_generated_(0),
    136       tracer_(NULL),
    137       young_survivors_after_last_gc_(0),
    138       high_survival_rate_period_length_(0),
    139       survival_rate_(0),
    140       previous_survival_rate_trend_(Heap::STABLE),
    141       survival_rate_trend_(Heap::STABLE),
    142       max_gc_pause_(0),
    143       max_alive_after_gc_(0),
    144       min_in_mutator_(kMaxInt),
    145       alive_after_last_gc_(0),
    146       last_gc_end_timestamp_(0.0),
    147       store_buffer_(this),
    148       marking_(this),
    149       incremental_marking_(this),
    150       number_idle_notifications_(0),
    151       last_idle_notification_gc_count_(0),
    152       last_idle_notification_gc_count_init_(false),
    153       mark_sweeps_since_idle_round_started_(0),
    154       ms_count_at_last_idle_notification_(0),
    155       gc_count_at_last_idle_gc_(0),
    156       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
    157       promotion_queue_(this),
    158       configured_(false),
    159       chunks_queued_for_free_(NULL) {
    160   // Allow build-time customization of the max semispace size. Building
    161   // V8 with snapshots and a non-default max semispace size is much
    162   // easier if you can define it as part of the build environment.
    163 #if defined(V8_MAX_SEMISPACE_SIZE)
    164   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
    165 #endif
    166 
    167   intptr_t max_virtual = OS::MaxVirtualMemory();
    168 
    169   if (max_virtual > 0) {
    170     if (code_range_size_ > 0) {
    171       // Reserve no more than 1/8 of the memory for the code range.
    172       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
    173     }
    174   }
    175 
    176   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
    177   global_contexts_list_ = NULL;
    178   mark_compact_collector_.heap_ = this;
    179   external_string_table_.heap_ = this;
    180 }
    181 
    182 
    183 intptr_t Heap::Capacity() {
    184   if (!HasBeenSetUp()) return 0;
    185 
    186   return new_space_.Capacity() +
    187       old_pointer_space_->Capacity() +
    188       old_data_space_->Capacity() +
    189       code_space_->Capacity() +
    190       map_space_->Capacity() +
    191       cell_space_->Capacity();
    192 }
    193 
    194 
    195 intptr_t Heap::CommittedMemory() {
    196   if (!HasBeenSetUp()) return 0;
    197 
    198   return new_space_.CommittedMemory() +
    199       old_pointer_space_->CommittedMemory() +
    200       old_data_space_->CommittedMemory() +
    201       code_space_->CommittedMemory() +
    202       map_space_->CommittedMemory() +
    203       cell_space_->CommittedMemory() +
    204       lo_space_->Size();
    205 }
    206 
    207 intptr_t Heap::CommittedMemoryExecutable() {
    208   if (!HasBeenSetUp()) return 0;
    209 
    210   return isolate()->memory_allocator()->SizeExecutable();
    211 }
    212 
    213 
    214 intptr_t Heap::Available() {
    215   if (!HasBeenSetUp()) return 0;
    216 
    217   return new_space_.Available() +
    218       old_pointer_space_->Available() +
    219       old_data_space_->Available() +
    220       code_space_->Available() +
    221       map_space_->Available() +
    222       cell_space_->Available();
    223 }
    224 
    225 
    226 bool Heap::HasBeenSetUp() {
    227   return old_pointer_space_ != NULL &&
    228          old_data_space_ != NULL &&
    229          code_space_ != NULL &&
    230          map_space_ != NULL &&
    231          cell_space_ != NULL &&
    232          lo_space_ != NULL;
    233 }
    234 
    235 
    236 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
    237   if (IntrusiveMarking::IsMarked(object)) {
    238     return IntrusiveMarking::SizeOfMarkedObject(object);
    239   }
    240   return object->SizeFromMap(object->map());
    241 }
    242 
    243 
    244 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
    245                                               const char** reason) {
    246   // Is global GC requested?
    247   if (space != NEW_SPACE || FLAG_gc_global) {
    248     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
    249     *reason = "GC in old space requested";
    250     return MARK_COMPACTOR;
    251   }
    252 
    253   // Is enough data promoted to justify a global GC?
    254   if (OldGenerationPromotionLimitReached()) {
    255     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
    256     *reason = "promotion limit reached";
    257     return MARK_COMPACTOR;
    258   }
    259 
    260   // Have allocation in OLD and LO failed?
    261   if (old_gen_exhausted_) {
    262     isolate_->counters()->
    263         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
    264     *reason = "old generations exhausted";
    265     return MARK_COMPACTOR;
    266   }
    267 
    268   // Is there enough space left in OLD to guarantee that a scavenge can
    269   // succeed?
    270   //
    271   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
    272   // for object promotion. It counts only the bytes that the memory
    273   // allocator has not yet allocated from the OS and assigned to any space,
    274   // and does not count available bytes already in the old space or code
    275   // space.  Undercounting is safe---we may get an unrequested full GC when
    276   // a scavenge would have succeeded.
    277   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
    278     isolate_->counters()->
    279         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
    280     *reason = "scavenge might not succeed";
    281     return MARK_COMPACTOR;
    282   }
    283 
    284   // Default
    285   *reason = NULL;
    286   return SCAVENGER;
    287 }
    288 
    289 
    290 // TODO(1238405): Combine the infrastructure for --heap-stats and
    291 // --log-gc to avoid the complicated preprocessor and flag testing.
    292 void Heap::ReportStatisticsBeforeGC() {
    293   // Heap::ReportHeapStatistics will also log NewSpace statistics when
    294   // compiled --log-gc is set.  The following logic is used to avoid
    295   // double logging.
    296 #ifdef DEBUG
    297   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
    298   if (FLAG_heap_stats) {
    299     ReportHeapStatistics("Before GC");
    300   } else if (FLAG_log_gc) {
    301     new_space_.ReportStatistics();
    302   }
    303   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
    304 #else
    305   if (FLAG_log_gc) {
    306     new_space_.CollectStatistics();
    307     new_space_.ReportStatistics();
    308     new_space_.ClearHistograms();
    309   }
    310 #endif  // DEBUG
    311 }
    312 
    313 
    314 void Heap::PrintShortHeapStatistics() {
    315   if (!FLAG_trace_gc_verbose) return;
    316   PrintF("Memory allocator,   used: %8" V8_PTR_PREFIX "d"
    317              ", available: %8" V8_PTR_PREFIX "d\n",
    318          isolate_->memory_allocator()->Size(),
    319          isolate_->memory_allocator()->Available());
    320   PrintF("New space,          used: %8" V8_PTR_PREFIX "d"
    321              ", available: %8" V8_PTR_PREFIX "d\n",
    322          Heap::new_space_.Size(),
    323          new_space_.Available());
    324   PrintF("Old pointers,       used: %8" V8_PTR_PREFIX "d"
    325              ", available: %8" V8_PTR_PREFIX "d"
    326              ", waste: %8" V8_PTR_PREFIX "d\n",
    327          old_pointer_space_->Size(),
    328          old_pointer_space_->Available(),
    329          old_pointer_space_->Waste());
    330   PrintF("Old data space,     used: %8" V8_PTR_PREFIX "d"
    331              ", available: %8" V8_PTR_PREFIX "d"
    332              ", waste: %8" V8_PTR_PREFIX "d\n",
    333          old_data_space_->Size(),
    334          old_data_space_->Available(),
    335          old_data_space_->Waste());
    336   PrintF("Code space,         used: %8" V8_PTR_PREFIX "d"
    337              ", available: %8" V8_PTR_PREFIX "d"
    338              ", waste: %8" V8_PTR_PREFIX "d\n",
    339          code_space_->Size(),
    340          code_space_->Available(),
    341          code_space_->Waste());
    342   PrintF("Map space,          used: %8" V8_PTR_PREFIX "d"
    343              ", available: %8" V8_PTR_PREFIX "d"
    344              ", waste: %8" V8_PTR_PREFIX "d\n",
    345          map_space_->Size(),
    346          map_space_->Available(),
    347          map_space_->Waste());
    348   PrintF("Cell space,         used: %8" V8_PTR_PREFIX "d"
    349              ", available: %8" V8_PTR_PREFIX "d"
    350              ", waste: %8" V8_PTR_PREFIX "d\n",
    351          cell_space_->Size(),
    352          cell_space_->Available(),
    353          cell_space_->Waste());
    354   PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
    355              ", available: %8" V8_PTR_PREFIX "d\n",
    356          lo_space_->Size(),
    357          lo_space_->Available());
    358 }
    359 
    360 
    361 // TODO(1238405): Combine the infrastructure for --heap-stats and
    362 // --log-gc to avoid the complicated preprocessor and flag testing.
    363 void Heap::ReportStatisticsAfterGC() {
    364   // Similar to the before GC, we use some complicated logic to ensure that
    365   // NewSpace statistics are logged exactly once when --log-gc is turned on.
    366 #if defined(DEBUG)
    367   if (FLAG_heap_stats) {
    368     new_space_.CollectStatistics();
    369     ReportHeapStatistics("After GC");
    370   } else if (FLAG_log_gc) {
    371     new_space_.ReportStatistics();
    372   }
    373 #else
    374   if (FLAG_log_gc) new_space_.ReportStatistics();
    375 #endif  // DEBUG
    376 }
    377 
    378 
    379 void Heap::GarbageCollectionPrologue() {
    380   isolate_->transcendental_cache()->Clear();
    381   ClearJSFunctionResultCaches();
    382   gc_count_++;
    383   unflattened_strings_length_ = 0;
    384 #ifdef DEBUG
    385   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
    386   allow_allocation(false);
    387 
    388   if (FLAG_verify_heap) {
    389     Verify();
    390   }
    391 
    392   if (FLAG_gc_verbose) Print();
    393 #endif  // DEBUG
    394 
    395 #if defined(DEBUG)
    396   ReportStatisticsBeforeGC();
    397 #endif  // DEBUG
    398 
    399   LiveObjectList::GCPrologue();
    400   store_buffer()->GCPrologue();
    401 }
    402 
    403 intptr_t Heap::SizeOfObjects() {
    404   intptr_t total = 0;
    405   AllSpaces spaces;
    406   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
    407     total += space->SizeOfObjects();
    408   }
    409   return total;
    410 }
    411 
    412 void Heap::GarbageCollectionEpilogue() {
    413   store_buffer()->GCEpilogue();
    414   LiveObjectList::GCEpilogue();
    415 #ifdef DEBUG
    416   allow_allocation(true);
    417   ZapFromSpace();
    418 
    419   if (FLAG_verify_heap) {
    420     Verify();
    421   }
    422 
    423   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
    424   if (FLAG_print_handles) PrintHandles();
    425   if (FLAG_gc_verbose) Print();
    426   if (FLAG_code_stats) ReportCodeStatistics("After GC");
    427 #endif
    428 
    429   isolate_->counters()->alive_after_last_gc()->Set(
    430       static_cast<int>(SizeOfObjects()));
    431 
    432   isolate_->counters()->symbol_table_capacity()->Set(
    433       symbol_table()->Capacity());
    434   isolate_->counters()->number_of_symbols()->Set(
    435       symbol_table()->NumberOfElements());
    436 #if defined(DEBUG)
    437   ReportStatisticsAfterGC();
    438 #endif  // DEBUG
    439 #ifdef ENABLE_DEBUGGER_SUPPORT
    440   isolate_->debug()->AfterGarbageCollection();
    441 #endif  // ENABLE_DEBUGGER_SUPPORT
    442 }
    443 
    444 
    445 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
    446   // Since we are ignoring the return value, the exact choice of space does
    447   // not matter, so long as we do not specify NEW_SPACE, which would not
    448   // cause a full GC.
    449   mark_compact_collector_.SetFlags(flags);
    450   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
    451   mark_compact_collector_.SetFlags(kNoGCFlags);
    452 }
    453 
    454 
    455 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
    456   // Since we are ignoring the return value, the exact choice of space does
    457   // not matter, so long as we do not specify NEW_SPACE, which would not
    458   // cause a full GC.
    459   // Major GC would invoke weak handle callbacks on weakly reachable
    460   // handles, but won't collect weakly reachable objects until next
    461   // major GC.  Therefore if we collect aggressively and weak handle callback
    462   // has been invoked, we rerun major GC to release objects which become
    463   // garbage.
    464   // Note: as weak callbacks can execute arbitrary code, we cannot
    465   // hope that eventually there will be no weak callbacks invocations.
    466   // Therefore stop recollecting after several attempts.
    467   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
    468                                      kReduceMemoryFootprintMask);
    469   isolate_->compilation_cache()->Clear();
    470   const int kMaxNumberOfAttempts = 7;
    471   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
    472     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
    473       break;
    474     }
    475   }
    476   mark_compact_collector()->SetFlags(kNoGCFlags);
    477   new_space_.Shrink();
    478   UncommitFromSpace();
    479   Shrink();
    480   incremental_marking()->UncommitMarkingDeque();
    481 }
    482 
    483 
    484 bool Heap::CollectGarbage(AllocationSpace space,
    485                           GarbageCollector collector,
    486                           const char* gc_reason,
    487                           const char* collector_reason) {
    488   // The VM is in the GC state until exiting this function.
    489   VMState state(isolate_, GC);
    490 
    491 #ifdef DEBUG
    492   // Reset the allocation timeout to the GC interval, but make sure to
    493   // allow at least a few allocations after a collection. The reason
    494   // for this is that we have a lot of allocation sequences and we
    495   // assume that a garbage collection will allow the subsequent
    496   // allocation attempts to go through.
    497   allocation_timeout_ = Max(6, FLAG_gc_interval);
    498 #endif
    499 
    500   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
    501     if (FLAG_trace_incremental_marking) {
    502       PrintF("[IncrementalMarking] Scavenge during marking.\n");
    503     }
    504   }
    505 
    506   if (collector == MARK_COMPACTOR &&
    507       !mark_compact_collector()->abort_incremental_marking_ &&
    508       !incremental_marking()->IsStopped() &&
    509       !incremental_marking()->should_hurry() &&
    510       FLAG_incremental_marking_steps) {
    511     // Make progress in incremental marking.
    512     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
    513     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
    514                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
    515     if (!incremental_marking()->IsComplete()) {
    516       if (FLAG_trace_incremental_marking) {
    517         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
    518       }
    519       collector = SCAVENGER;
    520       collector_reason = "incremental marking delaying mark-sweep";
    521     }
    522   }
    523 
    524   bool next_gc_likely_to_collect_more = false;
    525 
    526   { GCTracer tracer(this, gc_reason, collector_reason);
    527     GarbageCollectionPrologue();
    528     // The GC count was incremented in the prologue.  Tell the tracer about
    529     // it.
    530     tracer.set_gc_count(gc_count_);
    531 
    532     // Tell the tracer which collector we've selected.
    533     tracer.set_collector(collector);
    534 
    535     HistogramTimer* rate = (collector == SCAVENGER)
    536         ? isolate_->counters()->gc_scavenger()
    537         : isolate_->counters()->gc_compactor();
    538     rate->Start();
    539     next_gc_likely_to_collect_more =
    540         PerformGarbageCollection(collector, &tracer);
    541     rate->Stop();
    542 
    543     GarbageCollectionEpilogue();
    544   }
    545 
    546   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
    547   if (incremental_marking()->IsStopped()) {
    548     if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
    549       incremental_marking()->Start();
    550     }
    551   }
    552 
    553   return next_gc_likely_to_collect_more;
    554 }
    555 
    556 
    557 void Heap::PerformScavenge() {
    558   GCTracer tracer(this, NULL, NULL);
    559   if (incremental_marking()->IsStopped()) {
    560     PerformGarbageCollection(SCAVENGER, &tracer);
    561   } else {
    562     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
    563   }
    564 }
    565 
    566 
    567 #ifdef DEBUG
    568 // Helper class for verifying the symbol table.
    569 class SymbolTableVerifier : public ObjectVisitor {
    570  public:
    571   void VisitPointers(Object** start, Object** end) {
    572     // Visit all HeapObject pointers in [start, end).
    573     for (Object** p = start; p < end; p++) {
    574       if ((*p)->IsHeapObject()) {
    575         // Check that the symbol is actually a symbol.
    576         ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
    577       }
    578     }
    579   }
    580 };
    581 #endif  // DEBUG
    582 
    583 
    584 static void VerifySymbolTable() {
    585 #ifdef DEBUG
    586   SymbolTableVerifier verifier;
    587   HEAP->symbol_table()->IterateElements(&verifier);
    588 #endif  // DEBUG
    589 }
    590 
    591 
    592 static bool AbortIncrementalMarkingAndCollectGarbage(
    593     Heap* heap,
    594     AllocationSpace space,
    595     const char* gc_reason = NULL) {
    596   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
    597   bool result = heap->CollectGarbage(space, gc_reason);
    598   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
    599   return result;
    600 }
    601 
    602 
    603 void Heap::ReserveSpace(
    604     int new_space_size,
    605     int pointer_space_size,
    606     int data_space_size,
    607     int code_space_size,
    608     int map_space_size,
    609     int cell_space_size,
    610     int large_object_size) {
    611   NewSpace* new_space = Heap::new_space();
    612   PagedSpace* old_pointer_space = Heap::old_pointer_space();
    613   PagedSpace* old_data_space = Heap::old_data_space();
    614   PagedSpace* code_space = Heap::code_space();
    615   PagedSpace* map_space = Heap::map_space();
    616   PagedSpace* cell_space = Heap::cell_space();
    617   LargeObjectSpace* lo_space = Heap::lo_space();
    618   bool gc_performed = true;
    619   int counter = 0;
    620   static const int kThreshold = 20;
    621   while (gc_performed && counter++ < kThreshold) {
    622     gc_performed = false;
    623     if (!new_space->ReserveSpace(new_space_size)) {
    624       Heap::CollectGarbage(NEW_SPACE,
    625                            "failed to reserve space in the new space");
    626       gc_performed = true;
    627     }
    628     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
    629       AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
    630           "failed to reserve space in the old pointer space");
    631       gc_performed = true;
    632     }
    633     if (!(old_data_space->ReserveSpace(data_space_size))) {
    634       AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
    635           "failed to reserve space in the old data space");
    636       gc_performed = true;
    637     }
    638     if (!(code_space->ReserveSpace(code_space_size))) {
    639       AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
    640           "failed to reserve space in the code space");
    641       gc_performed = true;
    642     }
    643     if (!(map_space->ReserveSpace(map_space_size))) {
    644       AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
    645           "failed to reserve space in the map space");
    646       gc_performed = true;
    647     }
    648     if (!(cell_space->ReserveSpace(cell_space_size))) {
    649       AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
    650           "failed to reserve space in the cell space");
    651       gc_performed = true;
    652     }
    653     // We add a slack-factor of 2 in order to have space for a series of
    654     // large-object allocations that are only just larger than the page size.
    655     large_object_size *= 2;
    656     // The ReserveSpace method on the large object space checks how much
    657     // we can expand the old generation.  This includes expansion caused by
    658     // allocation in the other spaces.
    659     large_object_size += cell_space_size + map_space_size + code_space_size +
    660         data_space_size + pointer_space_size;
    661     if (!(lo_space->ReserveSpace(large_object_size))) {
    662       AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
    663           "failed to reserve space in the large object space");
    664       gc_performed = true;
    665     }
    666   }
    667 
    668   if (gc_performed) {
    669     // Failed to reserve the space after several attempts.
    670     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
    671   }
    672 }
    673 
    674 
    675 void Heap::EnsureFromSpaceIsCommitted() {
    676   if (new_space_.CommitFromSpaceIfNeeded()) return;
    677 
    678   // Committing memory to from space failed.
    679   // Try shrinking and try again.
    680   Shrink();
    681   if (new_space_.CommitFromSpaceIfNeeded()) return;
    682 
    683   // Committing memory to from space failed again.
    684   // Memory is exhausted and we will die.
    685   V8::FatalProcessOutOfMemory("Committing semi space failed.");
    686 }
    687 
    688 
    689 void Heap::ClearJSFunctionResultCaches() {
    690   if (isolate_->bootstrapper()->IsActive()) return;
    691 
    692   Object* context = global_contexts_list_;
    693   while (!context->IsUndefined()) {
    694     // Get the caches for this context. GC can happen when the context
    695     // is not fully initialized, so the caches can be undefined.
    696     Object* caches_or_undefined =
    697         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
    698     if (!caches_or_undefined->IsUndefined()) {
    699       FixedArray* caches = FixedArray::cast(caches_or_undefined);
    700       // Clear the caches:
    701       int length = caches->length();
    702       for (int i = 0; i < length; i++) {
    703         JSFunctionResultCache::cast(caches->get(i))->Clear();
    704       }
    705     }
    706     // Get the next context:
    707     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
    708   }
    709 }
    710 
    711 
    712 
    713 void Heap::ClearNormalizedMapCaches() {
    714   if (isolate_->bootstrapper()->IsActive() &&
    715       !incremental_marking()->IsMarking()) {
    716     return;
    717   }
    718 
    719   Object* context = global_contexts_list_;
    720   while (!context->IsUndefined()) {
    721     // GC can happen when the context is not fully initialized,
    722     // so the cache can be undefined.
    723     Object* cache =
    724         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
    725     if (!cache->IsUndefined()) {
    726       NormalizedMapCache::cast(cache)->Clear();
    727     }
    728     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
    729   }
    730 }
    731 
    732 
    733 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
    734   double survival_rate =
    735       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
    736       start_new_space_size;
    737 
    738   if (survival_rate > kYoungSurvivalRateHighThreshold) {
    739     high_survival_rate_period_length_++;
    740   } else {
    741     high_survival_rate_period_length_ = 0;
    742   }
    743 
    744   if (survival_rate < kYoungSurvivalRateLowThreshold) {
    745     low_survival_rate_period_length_++;
    746   } else {
    747     low_survival_rate_period_length_ = 0;
    748   }
    749 
    750   double survival_rate_diff = survival_rate_ - survival_rate;
    751 
    752   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
    753     set_survival_rate_trend(DECREASING);
    754   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
    755     set_survival_rate_trend(INCREASING);
    756   } else {
    757     set_survival_rate_trend(STABLE);
    758   }
    759 
    760   survival_rate_ = survival_rate;
    761 }
    762 
    763 bool Heap::PerformGarbageCollection(GarbageCollector collector,
    764                                     GCTracer* tracer) {
    765   bool next_gc_likely_to_collect_more = false;
    766 
    767   if (collector != SCAVENGER) {
    768     PROFILE(isolate_, CodeMovingGCEvent());
    769   }
    770 
    771   if (FLAG_verify_heap) {
    772     VerifySymbolTable();
    773   }
    774   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
    775     ASSERT(!allocation_allowed_);
    776     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
    777     global_gc_prologue_callback_();
    778   }
    779 
    780   GCType gc_type =
    781       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
    782 
    783   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
    784     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
    785       gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
    786     }
    787   }
    788 
    789   EnsureFromSpaceIsCommitted();
    790 
    791   int start_new_space_size = Heap::new_space()->SizeAsInt();
    792 
    793   if (IsHighSurvivalRate()) {
    794     // We speed up the incremental marker if it is running so that it
    795     // does not fall behind the rate of promotion, which would cause a
    796     // constantly growing old space.
    797     incremental_marking()->NotifyOfHighPromotionRate();
    798   }
    799 
    800   if (collector == MARK_COMPACTOR) {
    801     // Perform mark-sweep with optional compaction.
    802     MarkCompact(tracer);
    803     sweep_generation_++;
    804     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
    805         IsStableOrIncreasingSurvivalTrend();
    806 
    807     UpdateSurvivalRateTrend(start_new_space_size);
    808 
    809     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
    810 
    811     if (high_survival_rate_during_scavenges &&
    812         IsStableOrIncreasingSurvivalTrend()) {
    813       // Stable high survival rates of young objects both during partial and
    814       // full collection indicate that mutator is either building or modifying
    815       // a structure with a long lifetime.
    816       // In this case we aggressively raise old generation memory limits to
    817       // postpone subsequent mark-sweep collection and thus trade memory
    818       // space for the mutation speed.
    819       old_gen_limit_factor_ = 2;
    820     } else {
    821       old_gen_limit_factor_ = 1;
    822     }
    823 
    824     old_gen_promotion_limit_ =
    825         OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
    826     old_gen_allocation_limit_ =
    827         OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
    828 
    829     old_gen_exhausted_ = false;
    830   } else {
    831     tracer_ = tracer;
    832     Scavenge();
    833     tracer_ = NULL;
    834 
    835     UpdateSurvivalRateTrend(start_new_space_size);
    836   }
    837 
    838   if (!new_space_high_promotion_mode_active_ &&
    839       new_space_.Capacity() == new_space_.MaximumCapacity() &&
    840       IsStableOrIncreasingSurvivalTrend() &&
    841       IsHighSurvivalRate()) {
    842     // Stable high survival rates even though young generation is at
    843     // maximum capacity indicates that most objects will be promoted.
    844     // To decrease scavenger pauses and final mark-sweep pauses, we
    845     // have to limit maximal capacity of the young generation.
    846     new_space_high_promotion_mode_active_ = true;
    847     if (FLAG_trace_gc) {
    848       PrintF("Limited new space size due to high promotion rate: %d MB\n",
    849              new_space_.InitialCapacity() / MB);
    850     }
    851   } else if (new_space_high_promotion_mode_active_ &&
    852       IsStableOrDecreasingSurvivalTrend() &&
    853       IsLowSurvivalRate()) {
    854     // Decreasing low survival rates might indicate that the above high
    855     // promotion mode is over and we should allow the young generation
    856     // to grow again.
    857     new_space_high_promotion_mode_active_ = false;
    858     if (FLAG_trace_gc) {
    859       PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
    860              new_space_.MaximumCapacity() / MB);
    861     }
    862   }
    863 
    864   if (new_space_high_promotion_mode_active_ &&
    865       new_space_.Capacity() > new_space_.InitialCapacity()) {
    866     new_space_.Shrink();
    867   }
    868 
    869   isolate_->counters()->objs_since_last_young()->Set(0);
    870 
    871   gc_post_processing_depth_++;
    872   { DisableAssertNoAllocation allow_allocation;
    873     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
    874     next_gc_likely_to_collect_more =
    875         isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
    876   }
    877   gc_post_processing_depth_--;
    878 
    879   // Update relocatables.
    880   Relocatable::PostGarbageCollectionProcessing();
    881 
    882   if (collector == MARK_COMPACTOR) {
    883     // Register the amount of external allocated memory.
    884     amount_of_external_allocated_memory_at_last_global_gc_ =
    885         amount_of_external_allocated_memory_;
    886   }
    887 
    888   GCCallbackFlags callback_flags = kNoGCCallbackFlags;
    889   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
    890     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
    891       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
    892     }
    893   }
    894 
    895   if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
    896     ASSERT(!allocation_allowed_);
    897     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
    898     global_gc_epilogue_callback_();
    899   }
    900   if (FLAG_verify_heap) {
    901     VerifySymbolTable();
    902   }
    903 
    904   return next_gc_likely_to_collect_more;
    905 }
    906 
    907 
    908 void Heap::MarkCompact(GCTracer* tracer) {
    909   gc_state_ = MARK_COMPACT;
    910   LOG(isolate_, ResourceEvent("markcompact", "begin"));
    911 
    912   mark_compact_collector_.Prepare(tracer);
    913 
    914   ms_count_++;
    915   tracer->set_full_gc_count(ms_count_);
    916 
    917   MarkCompactPrologue();
    918 
    919   mark_compact_collector_.CollectGarbage();
    920 
    921   LOG(isolate_, ResourceEvent("markcompact", "end"));
    922 
    923   gc_state_ = NOT_IN_GC;
    924 
    925   isolate_->counters()->objs_since_last_full()->Set(0);
    926 
    927   contexts_disposed_ = 0;
    928 
    929   isolate_->set_context_exit_happened(false);
    930 }
    931 
    932 
    933 void Heap::MarkCompactPrologue() {
    934   // At any old GC clear the keyed lookup cache to enable collection of unused
    935   // maps.
    936   isolate_->keyed_lookup_cache()->Clear();
    937   isolate_->context_slot_cache()->Clear();
    938   isolate_->descriptor_lookup_cache()->Clear();
    939   StringSplitCache::Clear(string_split_cache());
    940 
    941   isolate_->compilation_cache()->MarkCompactPrologue();
    942 
    943   CompletelyClearInstanceofCache();
    944 
    945   FlushNumberStringCache();
    946   if (FLAG_cleanup_code_caches_at_gc) {
    947     polymorphic_code_cache()->set_cache(undefined_value());
    948   }
    949 
    950   ClearNormalizedMapCaches();
    951 }
    952 
    953 
    954 Object* Heap::FindCodeObject(Address a) {
    955   return isolate()->inner_pointer_to_code_cache()->
    956       GcSafeFindCodeForInnerPointer(a);
    957 }
    958 
    959 
    960 // Helper class for copying HeapObjects
    961 class ScavengeVisitor: public ObjectVisitor {
    962  public:
    963   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
    964 
    965   void VisitPointer(Object** p) { ScavengePointer(p); }
    966 
    967   void VisitPointers(Object** start, Object** end) {
    968     // Copy all HeapObject pointers in [start, end)
    969     for (Object** p = start; p < end; p++) ScavengePointer(p);
    970   }
    971 
    972  private:
    973   void ScavengePointer(Object** p) {
    974     Object* object = *p;
    975     if (!heap_->InNewSpace(object)) return;
    976     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
    977                          reinterpret_cast<HeapObject*>(object));
    978   }
    979 
    980   Heap* heap_;
    981 };
    982 
    983 
    984 #ifdef DEBUG
    985 // Visitor class to verify pointers in code or data space do not point into
    986 // new space.
    987 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
    988  public:
    989   void VisitPointers(Object** start, Object**end) {
    990     for (Object** current = start; current < end; current++) {
    991       if ((*current)->IsHeapObject()) {
    992         ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
    993       }
    994     }
    995   }
    996 };
    997 
    998 
    999 static void VerifyNonPointerSpacePointers() {
   1000   // Verify that there are no pointers to new space in spaces where we
   1001   // do not expect them.
   1002   VerifyNonPointerSpacePointersVisitor v;
   1003   HeapObjectIterator code_it(HEAP->code_space());
   1004   for (HeapObject* object = code_it.Next();
   1005        object != NULL; object = code_it.Next())
   1006     object->Iterate(&v);
   1007 
   1008   // The old data space was normally swept conservatively so that the iterator
   1009   // doesn't work, so we normally skip the next bit.
   1010   if (!HEAP->old_data_space()->was_swept_conservatively()) {
   1011     HeapObjectIterator data_it(HEAP->old_data_space());
   1012     for (HeapObject* object = data_it.Next();
   1013          object != NULL; object = data_it.Next())
   1014       object->Iterate(&v);
   1015   }
   1016 }
   1017 #endif
   1018 
   1019 
   1020 void Heap::CheckNewSpaceExpansionCriteria() {
   1021   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
   1022       survived_since_last_expansion_ > new_space_.Capacity() &&
   1023       !new_space_high_promotion_mode_active_) {
   1024     // Grow the size of new space if there is room to grow, enough data
   1025     // has survived scavenge since the last expansion and we are not in
   1026     // high promotion mode.
   1027     new_space_.Grow();
   1028     survived_since_last_expansion_ = 0;
   1029   }
   1030 }
   1031 
   1032 
   1033 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
   1034   return heap->InNewSpace(*p) &&
   1035       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
   1036 }
   1037 
   1038 
   1039 void Heap::ScavengeStoreBufferCallback(
   1040     Heap* heap,
   1041     MemoryChunk* page,
   1042     StoreBufferEvent event) {
   1043   heap->store_buffer_rebuilder_.Callback(page, event);
   1044 }
   1045 
   1046 
   1047 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
   1048   if (event == kStoreBufferStartScanningPagesEvent) {
   1049     start_of_current_page_ = NULL;
   1050     current_page_ = NULL;
   1051   } else if (event == kStoreBufferScanningPageEvent) {
   1052     if (current_page_ != NULL) {
   1053       // If this page already overflowed the store buffer during this iteration.
   1054       if (current_page_->scan_on_scavenge()) {
   1055         // Then we should wipe out the entries that have been added for it.
   1056         store_buffer_->SetTop(start_of_current_page_);
   1057       } else if (store_buffer_->Top() - start_of_current_page_ >=
   1058                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
   1059         // Did we find too many pointers in the previous page?  The heuristic is
   1060         // that no page can take more then 1/5 the remaining slots in the store
   1061         // buffer.
   1062         current_page_->set_scan_on_scavenge(true);
   1063         store_buffer_->SetTop(start_of_current_page_);
   1064       } else {
   1065         // In this case the page we scanned took a reasonable number of slots in
   1066         // the store buffer.  It has now been rehabilitated and is no longer
   1067         // marked scan_on_scavenge.
   1068         ASSERT(!current_page_->scan_on_scavenge());
   1069       }
   1070     }
   1071     start_of_current_page_ = store_buffer_->Top();
   1072     current_page_ = page;
   1073   } else if (event == kStoreBufferFullEvent) {
   1074     // The current page overflowed the store buffer again.  Wipe out its entries
   1075     // in the store buffer and mark it scan-on-scavenge again.  This may happen
   1076     // several times while scanning.
   1077     if (current_page_ == NULL) {
   1078       // Store Buffer overflowed while scanning promoted objects.  These are not
   1079       // in any particular page, though they are likely to be clustered by the
   1080       // allocation routines.
   1081       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
   1082     } else {
   1083       // Store Buffer overflowed while scanning a particular old space page for
   1084       // pointers to new space.
   1085       ASSERT(current_page_ == page);
   1086       ASSERT(page != NULL);
   1087       current_page_->set_scan_on_scavenge(true);
   1088       ASSERT(start_of_current_page_ != store_buffer_->Top());
   1089       store_buffer_->SetTop(start_of_current_page_);
   1090     }
   1091   } else {
   1092     UNREACHABLE();
   1093   }
   1094 }
   1095 
   1096 
   1097 void PromotionQueue::Initialize() {
   1098   // Assumes that a NewSpacePage exactly fits a number of promotion queue
   1099   // entries (where each is a pair of intptr_t). This allows us to simplify
   1100   // the test fpr when to switch pages.
   1101   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
   1102          == 0);
   1103   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
   1104   front_ = rear_ =
   1105       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
   1106   emergency_stack_ = NULL;
   1107   guard_ = false;
   1108 }
   1109 
   1110 
   1111 void PromotionQueue::RelocateQueueHead() {
   1112   ASSERT(emergency_stack_ == NULL);
   1113 
   1114   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
   1115   intptr_t* head_start = rear_;
   1116   intptr_t* head_end =
   1117       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
   1118 
   1119   int entries_count =
   1120       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
   1121 
   1122   emergency_stack_ = new List<Entry>(2 * entries_count);
   1123 
   1124   while (head_start != head_end) {
   1125     int size = static_cast<int>(*(head_start++));
   1126     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
   1127     emergency_stack_->Add(Entry(obj, size));
   1128   }
   1129   rear_ = head_end;
   1130 }
   1131 
   1132 
   1133 void Heap::Scavenge() {
   1134 #ifdef DEBUG
   1135   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
   1136 #endif
   1137 
   1138   gc_state_ = SCAVENGE;
   1139 
   1140   // Implements Cheney's copying algorithm
   1141   LOG(isolate_, ResourceEvent("scavenge", "begin"));
   1142 
   1143   // Clear descriptor cache.
   1144   isolate_->descriptor_lookup_cache()->Clear();
   1145 
   1146   // Used for updating survived_since_last_expansion_ at function end.
   1147   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
   1148 
   1149   CheckNewSpaceExpansionCriteria();
   1150 
   1151   SelectScavengingVisitorsTable();
   1152 
   1153   incremental_marking()->PrepareForScavenge();
   1154 
   1155   AdvanceSweepers(static_cast<int>(new_space_.Size()));
   1156 
   1157   // Flip the semispaces.  After flipping, to space is empty, from space has
   1158   // live objects.
   1159   new_space_.Flip();
   1160   new_space_.ResetAllocationInfo();
   1161 
   1162   // We need to sweep newly copied objects which can be either in the
   1163   // to space or promoted to the old generation.  For to-space
   1164   // objects, we treat the bottom of the to space as a queue.  Newly
   1165   // copied and unswept objects lie between a 'front' mark and the
   1166   // allocation pointer.
   1167   //
   1168   // Promoted objects can go into various old-generation spaces, and
   1169   // can be allocated internally in the spaces (from the free list).
   1170   // We treat the top of the to space as a queue of addresses of
   1171   // promoted objects.  The addresses of newly promoted and unswept
   1172   // objects lie between a 'front' mark and a 'rear' mark that is
   1173   // updated as a side effect of promoting an object.
   1174   //
   1175   // There is guaranteed to be enough room at the top of the to space
   1176   // for the addresses of promoted objects: every object promoted
   1177   // frees up its size in bytes from the top of the new space, and
   1178   // objects are at least one pointer in size.
   1179   Address new_space_front = new_space_.ToSpaceStart();
   1180   promotion_queue_.Initialize();
   1181 
   1182 #ifdef DEBUG
   1183   store_buffer()->Clean();
   1184 #endif
   1185 
   1186   ScavengeVisitor scavenge_visitor(this);
   1187   // Copy roots.
   1188   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
   1189 
   1190   // Copy objects reachable from the old generation.
   1191   {
   1192     StoreBufferRebuildScope scope(this,
   1193                                   store_buffer(),
   1194                                   &ScavengeStoreBufferCallback);
   1195     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
   1196   }
   1197 
   1198   // Copy objects reachable from cells by scavenging cell values directly.
   1199   HeapObjectIterator cell_iterator(cell_space_);
   1200   for (HeapObject* cell = cell_iterator.Next();
   1201        cell != NULL; cell = cell_iterator.Next()) {
   1202     if (cell->IsJSGlobalPropertyCell()) {
   1203       Address value_address =
   1204           reinterpret_cast<Address>(cell) +
   1205           (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
   1206       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
   1207     }
   1208   }
   1209 
   1210   // Scavenge object reachable from the global contexts list directly.
   1211   scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
   1212 
   1213   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
   1214   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
   1215       &IsUnscavengedHeapObject);
   1216   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
   1217       &scavenge_visitor);
   1218   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
   1219 
   1220   UpdateNewSpaceReferencesInExternalStringTable(
   1221       &UpdateNewSpaceReferenceInExternalStringTableEntry);
   1222 
   1223   promotion_queue_.Destroy();
   1224 
   1225   LiveObjectList::UpdateReferencesForScavengeGC();
   1226   if (!FLAG_watch_ic_patching) {
   1227     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
   1228   }
   1229   incremental_marking()->UpdateMarkingDequeAfterScavenge();
   1230 
   1231   ASSERT(new_space_front == new_space_.top());
   1232 
   1233   // Set age mark.
   1234   new_space_.set_age_mark(new_space_.top());
   1235 
   1236   new_space_.LowerInlineAllocationLimit(
   1237       new_space_.inline_allocation_limit_step());
   1238 
   1239   // Update how much has survived scavenge.
   1240   IncrementYoungSurvivorsCounter(static_cast<int>(
   1241       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
   1242 
   1243   LOG(isolate_, ResourceEvent("scavenge", "end"));
   1244 
   1245   gc_state_ = NOT_IN_GC;
   1246 
   1247   scavenges_since_last_idle_round_++;
   1248 }
   1249 
   1250 
   1251 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
   1252                                                                 Object** p) {
   1253   MapWord first_word = HeapObject::cast(*p)->map_word();
   1254 
   1255   if (!first_word.IsForwardingAddress()) {
   1256     // Unreachable external string can be finalized.
   1257     heap->FinalizeExternalString(String::cast(*p));
   1258     return NULL;
   1259   }
   1260 
   1261   // String is still reachable.
   1262   return String::cast(first_word.ToForwardingAddress());
   1263 }
   1264 
   1265 
   1266 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
   1267     ExternalStringTableUpdaterCallback updater_func) {
   1268   if (FLAG_verify_heap) {
   1269     external_string_table_.Verify();
   1270   }
   1271 
   1272   if (external_string_table_.new_space_strings_.is_empty()) return;
   1273 
   1274   Object** start = &external_string_table_.new_space_strings_[0];
   1275   Object** end = start + external_string_table_.new_space_strings_.length();
   1276   Object** last = start;
   1277 
   1278   for (Object** p = start; p < end; ++p) {
   1279     ASSERT(InFromSpace(*p));
   1280     String* target = updater_func(this, p);
   1281 
   1282     if (target == NULL) continue;
   1283 
   1284     ASSERT(target->IsExternalString());
   1285 
   1286     if (InNewSpace(target)) {
   1287       // String is still in new space.  Update the table entry.
   1288       *last = target;
   1289       ++last;
   1290     } else {
   1291       // String got promoted.  Move it to the old string list.
   1292       external_string_table_.AddOldString(target);
   1293     }
   1294   }
   1295 
   1296   ASSERT(last <= end);
   1297   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
   1298 }
   1299 
   1300 
   1301 void Heap::UpdateReferencesInExternalStringTable(
   1302     ExternalStringTableUpdaterCallback updater_func) {
   1303 
   1304   // Update old space string references.
   1305   if (external_string_table_.old_space_strings_.length() > 0) {
   1306     Object** start = &external_string_table_.old_space_strings_[0];
   1307     Object** end = start + external_string_table_.old_space_strings_.length();
   1308     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
   1309   }
   1310 
   1311   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
   1312 }
   1313 
   1314 
   1315 static Object* ProcessFunctionWeakReferences(Heap* heap,
   1316                                              Object* function,
   1317                                              WeakObjectRetainer* retainer) {
   1318   Object* undefined = heap->undefined_value();
   1319   Object* head = undefined;
   1320   JSFunction* tail = NULL;
   1321   Object* candidate = function;
   1322   while (candidate != undefined) {
   1323     // Check whether to keep the candidate in the list.
   1324     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
   1325     Object* retain = retainer->RetainAs(candidate);
   1326     if (retain != NULL) {
   1327       if (head == undefined) {
   1328         // First element in the list.
   1329         head = retain;
   1330       } else {
   1331         // Subsequent elements in the list.
   1332         ASSERT(tail != NULL);
   1333         tail->set_next_function_link(retain);
   1334       }
   1335       // Retained function is new tail.
   1336       candidate_function = reinterpret_cast<JSFunction*>(retain);
   1337       tail = candidate_function;
   1338 
   1339       ASSERT(retain->IsUndefined() || retain->IsJSFunction());
   1340 
   1341       if (retain == undefined) break;
   1342     }
   1343 
   1344     // Move to next element in the list.
   1345     candidate = candidate_function->next_function_link();
   1346   }
   1347 
   1348   // Terminate the list if there is one or more elements.
   1349   if (tail != NULL) {
   1350     tail->set_next_function_link(undefined);
   1351   }
   1352 
   1353   return head;
   1354 }
   1355 
   1356 
   1357 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
   1358   Object* undefined = undefined_value();
   1359   Object* head = undefined;
   1360   Context* tail = NULL;
   1361   Object* candidate = global_contexts_list_;
   1362   while (candidate != undefined) {
   1363     // Check whether to keep the candidate in the list.
   1364     Context* candidate_context = reinterpret_cast<Context*>(candidate);
   1365     Object* retain = retainer->RetainAs(candidate);
   1366     if (retain != NULL) {
   1367       if (head == undefined) {
   1368         // First element in the list.
   1369         head = retain;
   1370       } else {
   1371         // Subsequent elements in the list.
   1372         ASSERT(tail != NULL);
   1373         tail->set_unchecked(this,
   1374                             Context::NEXT_CONTEXT_LINK,
   1375                             retain,
   1376                             UPDATE_WRITE_BARRIER);
   1377       }
   1378       // Retained context is new tail.
   1379       candidate_context = reinterpret_cast<Context*>(retain);
   1380       tail = candidate_context;
   1381 
   1382       if (retain == undefined) break;
   1383 
   1384       // Process the weak list of optimized functions for the context.
   1385       Object* function_list_head =
   1386           ProcessFunctionWeakReferences(
   1387               this,
   1388               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
   1389               retainer);
   1390       candidate_context->set_unchecked(this,
   1391                                        Context::OPTIMIZED_FUNCTIONS_LIST,
   1392                                        function_list_head,
   1393                                        UPDATE_WRITE_BARRIER);
   1394     }
   1395 
   1396     // Move to next element in the list.
   1397     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
   1398   }
   1399 
   1400   // Terminate the list if there is one or more elements.
   1401   if (tail != NULL) {
   1402     tail->set_unchecked(this,
   1403                         Context::NEXT_CONTEXT_LINK,
   1404                         Heap::undefined_value(),
   1405                         UPDATE_WRITE_BARRIER);
   1406   }
   1407 
   1408   // Update the head of the list of contexts.
   1409   global_contexts_list_ = head;
   1410 }
   1411 
   1412 
   1413 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
   1414   AssertNoAllocation no_allocation;
   1415 
   1416   class VisitorAdapter : public ObjectVisitor {
   1417    public:
   1418     explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
   1419         : visitor_(visitor) {}
   1420     virtual void VisitPointers(Object** start, Object** end) {
   1421       for (Object** p = start; p < end; p++) {
   1422         if ((*p)->IsExternalString()) {
   1423           visitor_->VisitExternalString(Utils::ToLocal(
   1424               Handle<String>(String::cast(*p))));
   1425         }
   1426       }
   1427     }
   1428    private:
   1429     v8::ExternalResourceVisitor* visitor_;
   1430   } visitor_adapter(visitor);
   1431   external_string_table_.Iterate(&visitor_adapter);
   1432 }
   1433 
   1434 
   1435 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
   1436  public:
   1437   static inline void VisitPointer(Heap* heap, Object** p) {
   1438     Object* object = *p;
   1439     if (!heap->InNewSpace(object)) return;
   1440     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
   1441                          reinterpret_cast<HeapObject*>(object));
   1442   }
   1443 };
   1444 
   1445 
   1446 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
   1447                          Address new_space_front) {
   1448   do {
   1449     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
   1450     // The addresses new_space_front and new_space_.top() define a
   1451     // queue of unprocessed copied objects.  Process them until the
   1452     // queue is empty.
   1453     while (new_space_front != new_space_.top()) {
   1454       if (!NewSpacePage::IsAtEnd(new_space_front)) {
   1455         HeapObject* object = HeapObject::FromAddress(new_space_front);
   1456         new_space_front +=
   1457           NewSpaceScavenger::IterateBody(object->map(), object);
   1458       } else {
   1459         new_space_front =
   1460             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
   1461       }
   1462     }
   1463 
   1464     // Promote and process all the to-be-promoted objects.
   1465     {
   1466       StoreBufferRebuildScope scope(this,
   1467                                     store_buffer(),
   1468                                     &ScavengeStoreBufferCallback);
   1469       while (!promotion_queue()->is_empty()) {
   1470         HeapObject* target;
   1471         int size;
   1472         promotion_queue()->remove(&target, &size);
   1473 
   1474         // Promoted object might be already partially visited
   1475         // during old space pointer iteration. Thus we search specificly
   1476         // for pointers to from semispace instead of looking for pointers
   1477         // to new space.
   1478         ASSERT(!target->IsMap());
   1479         IterateAndMarkPointersToFromSpace(target->address(),
   1480                                           target->address() + size,
   1481                                           &ScavengeObject);
   1482       }
   1483     }
   1484 
   1485     // Take another spin if there are now unswept objects in new space
   1486     // (there are currently no more unswept promoted objects).
   1487   } while (new_space_front != new_space_.top());
   1488 
   1489   return new_space_front;
   1490 }
   1491 
   1492 
   1493 enum LoggingAndProfiling {
   1494   LOGGING_AND_PROFILING_ENABLED,
   1495   LOGGING_AND_PROFILING_DISABLED
   1496 };
   1497 
   1498 
   1499 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
   1500 
   1501 
   1502 template<MarksHandling marks_handling,
   1503          LoggingAndProfiling logging_and_profiling_mode>
   1504 class ScavengingVisitor : public StaticVisitorBase {
   1505  public:
   1506   static void Initialize() {
   1507     table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
   1508     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
   1509     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
   1510     table_.Register(kVisitByteArray, &EvacuateByteArray);
   1511     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
   1512     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
   1513 
   1514     table_.Register(kVisitGlobalContext,
   1515                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1516                         template VisitSpecialized<Context::kSize>);
   1517 
   1518     table_.Register(kVisitConsString,
   1519                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1520                         template VisitSpecialized<ConsString::kSize>);
   1521 
   1522     table_.Register(kVisitSlicedString,
   1523                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1524                         template VisitSpecialized<SlicedString::kSize>);
   1525 
   1526     table_.Register(kVisitSharedFunctionInfo,
   1527                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1528                         template VisitSpecialized<SharedFunctionInfo::kSize>);
   1529 
   1530     table_.Register(kVisitJSWeakMap,
   1531                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1532                     Visit);
   1533 
   1534     table_.Register(kVisitJSRegExp,
   1535                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1536                     Visit);
   1537 
   1538     if (marks_handling == IGNORE_MARKS) {
   1539       table_.Register(kVisitJSFunction,
   1540                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
   1541                           template VisitSpecialized<JSFunction::kSize>);
   1542     } else {
   1543       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
   1544     }
   1545 
   1546     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
   1547                                    kVisitDataObject,
   1548                                    kVisitDataObjectGeneric>();
   1549 
   1550     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
   1551                                    kVisitJSObject,
   1552                                    kVisitJSObjectGeneric>();
   1553 
   1554     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
   1555                                    kVisitStruct,
   1556                                    kVisitStructGeneric>();
   1557   }
   1558 
   1559   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
   1560     return &table_;
   1561   }
   1562 
   1563  private:
   1564   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
   1565   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
   1566 
   1567   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
   1568     bool should_record = false;
   1569 #ifdef DEBUG
   1570     should_record = FLAG_heap_stats;
   1571 #endif
   1572     should_record = should_record || FLAG_log_gc;
   1573     if (should_record) {
   1574       if (heap->new_space()->Contains(obj)) {
   1575         heap->new_space()->RecordAllocation(obj);
   1576       } else {
   1577         heap->new_space()->RecordPromotion(obj);
   1578       }
   1579     }
   1580   }
   1581 
   1582   // Helper function used by CopyObject to copy a source object to an
   1583   // allocated target object and update the forwarding pointer in the source
   1584   // object.  Returns the target object.
   1585   INLINE(static void MigrateObject(Heap* heap,
   1586                                    HeapObject* source,
   1587                                    HeapObject* target,
   1588                                    int size)) {
   1589     // Copy the content of source to target.
   1590     heap->CopyBlock(target->address(), source->address(), size);
   1591 
   1592     // Set the forwarding address.
   1593     source->set_map_word(MapWord::FromForwardingAddress(target));
   1594 
   1595     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
   1596       // Update NewSpace stats if necessary.
   1597       RecordCopiedObject(heap, target);
   1598       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
   1599       Isolate* isolate = heap->isolate();
   1600       if (isolate->logger()->is_logging() ||
   1601           CpuProfiler::is_profiling(isolate)) {
   1602         if (target->IsSharedFunctionInfo()) {
   1603           PROFILE(isolate, SharedFunctionInfoMoveEvent(
   1604               source->address(), target->address()));
   1605         }
   1606       }
   1607     }
   1608 
   1609     if (marks_handling == TRANSFER_MARKS) {
   1610       if (Marking::TransferColor(source, target)) {
   1611         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
   1612       }
   1613     }
   1614   }
   1615 
   1616   template<ObjectContents object_contents, SizeRestriction size_restriction>
   1617   static inline void EvacuateObject(Map* map,
   1618                                     HeapObject** slot,
   1619                                     HeapObject* object,
   1620                                     int object_size) {
   1621     SLOW_ASSERT((size_restriction != SMALL) ||
   1622                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
   1623     SLOW_ASSERT(object->Size() == object_size);
   1624 
   1625     Heap* heap = map->GetHeap();
   1626     if (heap->ShouldBePromoted(object->address(), object_size)) {
   1627       MaybeObject* maybe_result;
   1628 
   1629       if ((size_restriction != SMALL) &&
   1630           (object_size > Page::kMaxNonCodeHeapObjectSize)) {
   1631         maybe_result = heap->lo_space()->AllocateRaw(object_size,
   1632                                                      NOT_EXECUTABLE);
   1633       } else {
   1634         if (object_contents == DATA_OBJECT) {
   1635           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
   1636         } else {
   1637           maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
   1638         }
   1639       }
   1640 
   1641       Object* result = NULL;  // Initialization to please compiler.
   1642       if (maybe_result->ToObject(&result)) {
   1643         HeapObject* target = HeapObject::cast(result);
   1644 
   1645         // Order is important: slot might be inside of the target if target
   1646         // was allocated over a dead object and slot comes from the store
   1647         // buffer.
   1648         *slot = target;
   1649         MigrateObject(heap, object, target, object_size);
   1650 
   1651         if (object_contents == POINTER_OBJECT) {
   1652           heap->promotion_queue()->insert(target, object_size);
   1653         }
   1654 
   1655         heap->tracer()->increment_promoted_objects_size(object_size);
   1656         return;
   1657       }
   1658     }
   1659     MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
   1660     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
   1661     Object* result = allocation->ToObjectUnchecked();
   1662     HeapObject* target = HeapObject::cast(result);
   1663 
   1664     // Order is important: slot might be inside of the target if target
   1665     // was allocated over a dead object and slot comes from the store
   1666     // buffer.
   1667     *slot = target;
   1668     MigrateObject(heap, object, target, object_size);
   1669     return;
   1670   }
   1671 
   1672 
   1673   static inline void EvacuateJSFunction(Map* map,
   1674                                         HeapObject** slot,
   1675                                         HeapObject* object) {
   1676     ObjectEvacuationStrategy<POINTER_OBJECT>::
   1677         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
   1678 
   1679     HeapObject* target = *slot;
   1680     MarkBit mark_bit = Marking::MarkBitFrom(target);
   1681     if (Marking::IsBlack(mark_bit)) {
   1682       // This object is black and it might not be rescanned by marker.
   1683       // We should explicitly record code entry slot for compaction because
   1684       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
   1685       // miss it as it is not HeapObject-tagged.
   1686       Address code_entry_slot =
   1687           target->address() + JSFunction::kCodeEntryOffset;
   1688       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
   1689       map->GetHeap()->mark_compact_collector()->
   1690           RecordCodeEntrySlot(code_entry_slot, code);
   1691     }
   1692   }
   1693 
   1694 
   1695   static inline void EvacuateFixedArray(Map* map,
   1696                                         HeapObject** slot,
   1697                                         HeapObject* object) {
   1698     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
   1699     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
   1700                                                  slot,
   1701                                                  object,
   1702                                                  object_size);
   1703   }
   1704 
   1705 
   1706   static inline void EvacuateFixedDoubleArray(Map* map,
   1707                                               HeapObject** slot,
   1708                                               HeapObject* object) {
   1709     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
   1710     int object_size = FixedDoubleArray::SizeFor(length);
   1711     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
   1712                                               slot,
   1713                                               object,
   1714                                               object_size);
   1715   }
   1716 
   1717 
   1718   static inline void EvacuateByteArray(Map* map,
   1719                                        HeapObject** slot,
   1720                                        HeapObject* object) {
   1721     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
   1722     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
   1723   }
   1724 
   1725 
   1726   static inline void EvacuateSeqAsciiString(Map* map,
   1727                                             HeapObject** slot,
   1728                                             HeapObject* object) {
   1729     int object_size = SeqAsciiString::cast(object)->
   1730         SeqAsciiStringSize(map->instance_type());
   1731     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
   1732   }
   1733 
   1734 
   1735   static inline void EvacuateSeqTwoByteString(Map* map,
   1736                                               HeapObject** slot,
   1737                                               HeapObject* object) {
   1738     int object_size = SeqTwoByteString::cast(object)->
   1739         SeqTwoByteStringSize(map->instance_type());
   1740     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
   1741   }
   1742 
   1743 
   1744   static inline bool IsShortcutCandidate(int type) {
   1745     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
   1746   }
   1747 
   1748   static inline void EvacuateShortcutCandidate(Map* map,
   1749                                                HeapObject** slot,
   1750                                                HeapObject* object) {
   1751     ASSERT(IsShortcutCandidate(map->instance_type()));
   1752 
   1753     Heap* heap = map->GetHeap();
   1754 
   1755     if (marks_handling == IGNORE_MARKS &&
   1756         ConsString::cast(object)->unchecked_second() ==
   1757         heap->empty_string()) {
   1758       HeapObject* first =
   1759           HeapObject::cast(ConsString::cast(object)->unchecked_first());
   1760 
   1761       *slot = first;
   1762 
   1763       if (!heap->InNewSpace(first)) {
   1764         object->set_map_word(MapWord::FromForwardingAddress(first));
   1765         return;
   1766       }
   1767 
   1768       MapWord first_word = first->map_word();
   1769       if (first_word.IsForwardingAddress()) {
   1770         HeapObject* target = first_word.ToForwardingAddress();
   1771 
   1772         *slot = target;
   1773         object->set_map_word(MapWord::FromForwardingAddress(target));
   1774         return;
   1775       }
   1776 
   1777       heap->DoScavengeObject(first->map(), slot, first);
   1778       object->set_map_word(MapWord::FromForwardingAddress(*slot));
   1779       return;
   1780     }
   1781 
   1782     int object_size = ConsString::kSize;
   1783     EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
   1784   }
   1785 
   1786   template<ObjectContents object_contents>
   1787   class ObjectEvacuationStrategy {
   1788    public:
   1789     template<int object_size>
   1790     static inline void VisitSpecialized(Map* map,
   1791                                         HeapObject** slot,
   1792                                         HeapObject* object) {
   1793       EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
   1794     }
   1795 
   1796     static inline void Visit(Map* map,
   1797                              HeapObject** slot,
   1798                              HeapObject* object) {
   1799       int object_size = map->instance_size();
   1800       EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
   1801     }
   1802   };
   1803 
   1804   static VisitorDispatchTable<ScavengingCallback> table_;
   1805 };
   1806 
   1807 
   1808 template<MarksHandling marks_handling,
   1809          LoggingAndProfiling logging_and_profiling_mode>
   1810 VisitorDispatchTable<ScavengingCallback>
   1811     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
   1812 
   1813 
   1814 static void InitializeScavengingVisitorsTables() {
   1815   ScavengingVisitor<TRANSFER_MARKS,
   1816                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
   1817   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
   1818   ScavengingVisitor<TRANSFER_MARKS,
   1819                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
   1820   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
   1821 }
   1822 
   1823 
   1824 void Heap::SelectScavengingVisitorsTable() {
   1825   bool logging_and_profiling =
   1826       isolate()->logger()->is_logging() ||
   1827       CpuProfiler::is_profiling(isolate()) ||
   1828       (isolate()->heap_profiler() != NULL &&
   1829        isolate()->heap_profiler()->is_profiling());
   1830 
   1831   if (!incremental_marking()->IsMarking()) {
   1832     if (!logging_and_profiling) {
   1833       scavenging_visitors_table_.CopyFrom(
   1834           ScavengingVisitor<IGNORE_MARKS,
   1835                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
   1836     } else {
   1837       scavenging_visitors_table_.CopyFrom(
   1838           ScavengingVisitor<IGNORE_MARKS,
   1839                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
   1840     }
   1841   } else {
   1842     if (!logging_and_profiling) {
   1843       scavenging_visitors_table_.CopyFrom(
   1844           ScavengingVisitor<TRANSFER_MARKS,
   1845                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
   1846     } else {
   1847       scavenging_visitors_table_.CopyFrom(
   1848           ScavengingVisitor<TRANSFER_MARKS,
   1849                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
   1850     }
   1851 
   1852     if (incremental_marking()->IsCompacting()) {
   1853       // When compacting forbid short-circuiting of cons-strings.
   1854       // Scavenging code relies on the fact that new space object
   1855       // can't be evacuated into evacuation candidate but
   1856       // short-circuiting violates this assumption.
   1857       scavenging_visitors_table_.Register(
   1858           StaticVisitorBase::kVisitShortcutCandidate,
   1859           scavenging_visitors_table_.GetVisitorById(
   1860               StaticVisitorBase::kVisitConsString));
   1861     }
   1862   }
   1863 }
   1864 
   1865 
   1866 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
   1867   SLOW_ASSERT(HEAP->InFromSpace(object));
   1868   MapWord first_word = object->map_word();
   1869   SLOW_ASSERT(!first_word.IsForwardingAddress());
   1870   Map* map = first_word.ToMap();
   1871   map->GetHeap()->DoScavengeObject(map, p, object);
   1872 }
   1873 
   1874 
   1875 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
   1876                                       int instance_size) {
   1877   Object* result;
   1878   { MaybeObject* maybe_result = AllocateRawMap();
   1879     if (!maybe_result->ToObject(&result)) return maybe_result;
   1880   }
   1881 
   1882   // Map::cast cannot be used due to uninitialized map field.
   1883   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
   1884   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
   1885   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
   1886   reinterpret_cast<Map*>(result)->set_visitor_id(
   1887         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
   1888   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
   1889   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
   1890   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
   1891   reinterpret_cast<Map*>(result)->set_bit_field(0);
   1892   reinterpret_cast<Map*>(result)->set_bit_field2(0);
   1893   return result;
   1894 }
   1895 
   1896 
   1897 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
   1898                                int instance_size,
   1899                                ElementsKind elements_kind) {
   1900   Object* result;
   1901   { MaybeObject* maybe_result = AllocateRawMap();
   1902     if (!maybe_result->ToObject(&result)) return maybe_result;
   1903   }
   1904 
   1905   Map* map = reinterpret_cast<Map*>(result);
   1906   map->set_map_no_write_barrier(meta_map());
   1907   map->set_instance_type(instance_type);
   1908   map->set_visitor_id(
   1909       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
   1910   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
   1911   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
   1912   map->set_instance_size(instance_size);
   1913   map->set_inobject_properties(0);
   1914   map->set_pre_allocated_property_fields(0);
   1915   map->init_instance_descriptors();
   1916   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
   1917   map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
   1918   map->set_unused_property_fields(0);
   1919   map->set_bit_field(0);
   1920   map->set_bit_field2(1 << Map::kIsExtensible);
   1921   map->set_elements_kind(elements_kind);
   1922 
   1923   // If the map object is aligned fill the padding area with Smi 0 objects.
   1924   if (Map::kPadStart < Map::kSize) {
   1925     memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
   1926            0,
   1927            Map::kSize - Map::kPadStart);
   1928   }
   1929   return map;
   1930 }
   1931 
   1932 
   1933 MaybeObject* Heap::AllocateCodeCache() {
   1934   CodeCache* code_cache;
   1935   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
   1936     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
   1937   }
   1938   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
   1939   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
   1940   return code_cache;
   1941 }
   1942 
   1943 
   1944 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
   1945   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
   1946 }
   1947 
   1948 
   1949 MaybeObject* Heap::AllocateAccessorPair() {
   1950   AccessorPair* accessors;
   1951   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
   1952     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
   1953   }
   1954   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
   1955   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
   1956   return accessors;
   1957 }
   1958 
   1959 
   1960 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
   1961   TypeFeedbackInfo* info;
   1962   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
   1963     if (!maybe_info->To(&info)) return maybe_info;
   1964   }
   1965   info->set_ic_total_count(0);
   1966   info->set_ic_with_type_info_count(0);
   1967   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
   1968                                 SKIP_WRITE_BARRIER);
   1969   return info;
   1970 }
   1971 
   1972 
   1973 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
   1974   AliasedArgumentsEntry* entry;
   1975   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
   1976     if (!maybe_entry->To(&entry)) return maybe_entry;
   1977   }
   1978   entry->set_aliased_context_slot(aliased_context_slot);
   1979   return entry;
   1980 }
   1981 
   1982 
   1983 const Heap::StringTypeTable Heap::string_type_table[] = {
   1984 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
   1985   {type, size, k##camel_name##MapRootIndex},
   1986   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
   1987 #undef STRING_TYPE_ELEMENT
   1988 };
   1989 
   1990 
   1991 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
   1992 #define CONSTANT_SYMBOL_ELEMENT(name, contents)                                \
   1993   {contents, k##name##RootIndex},
   1994   SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
   1995 #undef CONSTANT_SYMBOL_ELEMENT
   1996 };
   1997 
   1998 
   1999 const Heap::StructTable Heap::struct_table[] = {
   2000 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
   2001   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
   2002   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
   2003 #undef STRUCT_TABLE_ELEMENT
   2004 };
   2005 
   2006 
   2007 bool Heap::CreateInitialMaps() {
   2008   Object* obj;
   2009   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
   2010     if (!maybe_obj->ToObject(&obj)) return false;
   2011   }
   2012   // Map::cast cannot be used due to uninitialized map field.
   2013   Map* new_meta_map = reinterpret_cast<Map*>(obj);
   2014   set_meta_map(new_meta_map);
   2015   new_meta_map->set_map(new_meta_map);
   2016 
   2017   { MaybeObject* maybe_obj =
   2018         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2019     if (!maybe_obj->ToObject(&obj)) return false;
   2020   }
   2021   set_fixed_array_map(Map::cast(obj));
   2022 
   2023   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
   2024     if (!maybe_obj->ToObject(&obj)) return false;
   2025   }
   2026   set_oddball_map(Map::cast(obj));
   2027 
   2028   // Allocate the empty array.
   2029   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
   2030     if (!maybe_obj->ToObject(&obj)) return false;
   2031   }
   2032   set_empty_fixed_array(FixedArray::cast(obj));
   2033 
   2034   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
   2035     if (!maybe_obj->ToObject(&obj)) return false;
   2036   }
   2037   set_null_value(Oddball::cast(obj));
   2038   Oddball::cast(obj)->set_kind(Oddball::kNull);
   2039 
   2040   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
   2041     if (!maybe_obj->ToObject(&obj)) return false;
   2042   }
   2043   set_undefined_value(Oddball::cast(obj));
   2044   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
   2045   ASSERT(!InNewSpace(undefined_value()));
   2046 
   2047   // Allocate the empty descriptor array.
   2048   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
   2049     if (!maybe_obj->ToObject(&obj)) return false;
   2050   }
   2051   set_empty_descriptor_array(DescriptorArray::cast(obj));
   2052 
   2053   // Fix the instance_descriptors for the existing maps.
   2054   meta_map()->init_instance_descriptors();
   2055   meta_map()->set_code_cache(empty_fixed_array());
   2056   meta_map()->set_prototype_transitions(empty_fixed_array());
   2057 
   2058   fixed_array_map()->init_instance_descriptors();
   2059   fixed_array_map()->set_code_cache(empty_fixed_array());
   2060   fixed_array_map()->set_prototype_transitions(empty_fixed_array());
   2061 
   2062   oddball_map()->init_instance_descriptors();
   2063   oddball_map()->set_code_cache(empty_fixed_array());
   2064   oddball_map()->set_prototype_transitions(empty_fixed_array());
   2065 
   2066   // Fix prototype object for existing maps.
   2067   meta_map()->set_prototype(null_value());
   2068   meta_map()->set_constructor(null_value());
   2069 
   2070   fixed_array_map()->set_prototype(null_value());
   2071   fixed_array_map()->set_constructor(null_value());
   2072 
   2073   oddball_map()->set_prototype(null_value());
   2074   oddball_map()->set_constructor(null_value());
   2075 
   2076   { MaybeObject* maybe_obj =
   2077         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2078     if (!maybe_obj->ToObject(&obj)) return false;
   2079   }
   2080   set_fixed_cow_array_map(Map::cast(obj));
   2081   ASSERT(fixed_array_map() != fixed_cow_array_map());
   2082 
   2083   { MaybeObject* maybe_obj =
   2084         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2085     if (!maybe_obj->ToObject(&obj)) return false;
   2086   }
   2087   set_scope_info_map(Map::cast(obj));
   2088 
   2089   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
   2090     if (!maybe_obj->ToObject(&obj)) return false;
   2091   }
   2092   set_heap_number_map(Map::cast(obj));
   2093 
   2094   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
   2095     if (!maybe_obj->ToObject(&obj)) return false;
   2096   }
   2097   set_foreign_map(Map::cast(obj));
   2098 
   2099   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
   2100     const StringTypeTable& entry = string_type_table[i];
   2101     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
   2102       if (!maybe_obj->ToObject(&obj)) return false;
   2103     }
   2104     roots_[entry.index] = Map::cast(obj);
   2105   }
   2106 
   2107   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
   2108     if (!maybe_obj->ToObject(&obj)) return false;
   2109   }
   2110   set_undetectable_string_map(Map::cast(obj));
   2111   Map::cast(obj)->set_is_undetectable();
   2112 
   2113   { MaybeObject* maybe_obj =
   2114         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
   2115     if (!maybe_obj->ToObject(&obj)) return false;
   2116   }
   2117   set_undetectable_ascii_string_map(Map::cast(obj));
   2118   Map::cast(obj)->set_is_undetectable();
   2119 
   2120   { MaybeObject* maybe_obj =
   2121         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
   2122     if (!maybe_obj->ToObject(&obj)) return false;
   2123   }
   2124   set_fixed_double_array_map(Map::cast(obj));
   2125 
   2126   { MaybeObject* maybe_obj =
   2127         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
   2128     if (!maybe_obj->ToObject(&obj)) return false;
   2129   }
   2130   set_byte_array_map(Map::cast(obj));
   2131 
   2132   { MaybeObject* maybe_obj =
   2133         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
   2134     if (!maybe_obj->ToObject(&obj)) return false;
   2135   }
   2136   set_free_space_map(Map::cast(obj));
   2137 
   2138   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
   2139     if (!maybe_obj->ToObject(&obj)) return false;
   2140   }
   2141   set_empty_byte_array(ByteArray::cast(obj));
   2142 
   2143   { MaybeObject* maybe_obj =
   2144         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
   2145     if (!maybe_obj->ToObject(&obj)) return false;
   2146   }
   2147   set_external_pixel_array_map(Map::cast(obj));
   2148 
   2149   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
   2150                                          ExternalArray::kAlignedSize);
   2151     if (!maybe_obj->ToObject(&obj)) return false;
   2152   }
   2153   set_external_byte_array_map(Map::cast(obj));
   2154 
   2155   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
   2156                                          ExternalArray::kAlignedSize);
   2157     if (!maybe_obj->ToObject(&obj)) return false;
   2158   }
   2159   set_external_unsigned_byte_array_map(Map::cast(obj));
   2160 
   2161   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
   2162                                          ExternalArray::kAlignedSize);
   2163     if (!maybe_obj->ToObject(&obj)) return false;
   2164   }
   2165   set_external_short_array_map(Map::cast(obj));
   2166 
   2167   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
   2168                                          ExternalArray::kAlignedSize);
   2169     if (!maybe_obj->ToObject(&obj)) return false;
   2170   }
   2171   set_external_unsigned_short_array_map(Map::cast(obj));
   2172 
   2173   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
   2174                                          ExternalArray::kAlignedSize);
   2175     if (!maybe_obj->ToObject(&obj)) return false;
   2176   }
   2177   set_external_int_array_map(Map::cast(obj));
   2178 
   2179   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
   2180                                          ExternalArray::kAlignedSize);
   2181     if (!maybe_obj->ToObject(&obj)) return false;
   2182   }
   2183   set_external_unsigned_int_array_map(Map::cast(obj));
   2184 
   2185   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
   2186                                          ExternalArray::kAlignedSize);
   2187     if (!maybe_obj->ToObject(&obj)) return false;
   2188   }
   2189   set_external_float_array_map(Map::cast(obj));
   2190 
   2191   { MaybeObject* maybe_obj =
   2192         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2193     if (!maybe_obj->ToObject(&obj)) return false;
   2194   }
   2195   set_non_strict_arguments_elements_map(Map::cast(obj));
   2196 
   2197   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
   2198                                          ExternalArray::kAlignedSize);
   2199     if (!maybe_obj->ToObject(&obj)) return false;
   2200   }
   2201   set_external_double_array_map(Map::cast(obj));
   2202 
   2203   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
   2204     if (!maybe_obj->ToObject(&obj)) return false;
   2205   }
   2206   set_code_map(Map::cast(obj));
   2207 
   2208   { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
   2209                                          JSGlobalPropertyCell::kSize);
   2210     if (!maybe_obj->ToObject(&obj)) return false;
   2211   }
   2212   set_global_property_cell_map(Map::cast(obj));
   2213 
   2214   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
   2215     if (!maybe_obj->ToObject(&obj)) return false;
   2216   }
   2217   set_one_pointer_filler_map(Map::cast(obj));
   2218 
   2219   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
   2220     if (!maybe_obj->ToObject(&obj)) return false;
   2221   }
   2222   set_two_pointer_filler_map(Map::cast(obj));
   2223 
   2224   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
   2225     const StructTable& entry = struct_table[i];
   2226     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
   2227       if (!maybe_obj->ToObject(&obj)) return false;
   2228     }
   2229     roots_[entry.index] = Map::cast(obj);
   2230   }
   2231 
   2232   { MaybeObject* maybe_obj =
   2233         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2234     if (!maybe_obj->ToObject(&obj)) return false;
   2235   }
   2236   set_hash_table_map(Map::cast(obj));
   2237 
   2238   { MaybeObject* maybe_obj =
   2239         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2240     if (!maybe_obj->ToObject(&obj)) return false;
   2241   }
   2242   set_function_context_map(Map::cast(obj));
   2243 
   2244   { MaybeObject* maybe_obj =
   2245         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2246     if (!maybe_obj->ToObject(&obj)) return false;
   2247   }
   2248   set_catch_context_map(Map::cast(obj));
   2249 
   2250   { MaybeObject* maybe_obj =
   2251         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2252     if (!maybe_obj->ToObject(&obj)) return false;
   2253   }
   2254   set_with_context_map(Map::cast(obj));
   2255 
   2256   { MaybeObject* maybe_obj =
   2257         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2258     if (!maybe_obj->ToObject(&obj)) return false;
   2259   }
   2260   set_block_context_map(Map::cast(obj));
   2261 
   2262   { MaybeObject* maybe_obj =
   2263         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2264     if (!maybe_obj->ToObject(&obj)) return false;
   2265   }
   2266   set_module_context_map(Map::cast(obj));
   2267 
   2268   { MaybeObject* maybe_obj =
   2269         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
   2270     if (!maybe_obj->ToObject(&obj)) return false;
   2271   }
   2272   Map* global_context_map = Map::cast(obj);
   2273   global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
   2274   set_global_context_map(global_context_map);
   2275 
   2276   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
   2277                                          SharedFunctionInfo::kAlignedSize);
   2278     if (!maybe_obj->ToObject(&obj)) return false;
   2279   }
   2280   set_shared_function_info_map(Map::cast(obj));
   2281 
   2282   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
   2283                                          JSMessageObject::kSize);
   2284     if (!maybe_obj->ToObject(&obj)) return false;
   2285   }
   2286   set_message_object_map(Map::cast(obj));
   2287 
   2288   ASSERT(!InNewSpace(empty_fixed_array()));
   2289   return true;
   2290 }
   2291 
   2292 
   2293 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
   2294   // Statically ensure that it is safe to allocate heap numbers in paged
   2295   // spaces.
   2296   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
   2297   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   2298 
   2299   Object* result;
   2300   { MaybeObject* maybe_result =
   2301         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
   2302     if (!maybe_result->ToObject(&result)) return maybe_result;
   2303   }
   2304 
   2305   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
   2306   HeapNumber::cast(result)->set_value(value);
   2307   return result;
   2308 }
   2309 
   2310 
   2311 MaybeObject* Heap::AllocateHeapNumber(double value) {
   2312   // Use general version, if we're forced to always allocate.
   2313   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
   2314 
   2315   // This version of AllocateHeapNumber is optimized for
   2316   // allocation in new space.
   2317   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
   2318   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   2319   Object* result;
   2320   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
   2321     if (!maybe_result->ToObject(&result)) return maybe_result;
   2322   }
   2323   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
   2324   HeapNumber::cast(result)->set_value(value);
   2325   return result;
   2326 }
   2327 
   2328 
   2329 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
   2330   Object* result;
   2331   { MaybeObject* maybe_result = AllocateRawCell();
   2332     if (!maybe_result->ToObject(&result)) return maybe_result;
   2333   }
   2334   HeapObject::cast(result)->set_map_no_write_barrier(
   2335       global_property_cell_map());
   2336   JSGlobalPropertyCell::cast(result)->set_value(value);
   2337   return result;
   2338 }
   2339 
   2340 
   2341 MaybeObject* Heap::CreateOddball(const char* to_string,
   2342                                  Object* to_number,
   2343                                  byte kind) {
   2344   Object* result;
   2345   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
   2346     if (!maybe_result->ToObject(&result)) return maybe_result;
   2347   }
   2348   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
   2349 }
   2350 
   2351 
   2352 bool Heap::CreateApiObjects() {
   2353   Object* obj;
   2354 
   2355   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
   2356     if (!maybe_obj->ToObject(&obj)) return false;
   2357   }
   2358   // Don't use Smi-only elements optimizations for objects with the neander
   2359   // map. There are too many cases where element values are set directly with a
   2360   // bottleneck to trap the Smi-only -> fast elements transition, and there
   2361   // appears to be no benefit for optimize this case.
   2362   Map* new_neander_map = Map::cast(obj);
   2363   new_neander_map->set_elements_kind(FAST_ELEMENTS);
   2364   set_neander_map(new_neander_map);
   2365 
   2366   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
   2367     if (!maybe_obj->ToObject(&obj)) return false;
   2368   }
   2369   Object* elements;
   2370   { MaybeObject* maybe_elements = AllocateFixedArray(2);
   2371     if (!maybe_elements->ToObject(&elements)) return false;
   2372   }
   2373   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
   2374   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
   2375   set_message_listeners(JSObject::cast(obj));
   2376 
   2377   return true;
   2378 }
   2379 
   2380 
   2381 void Heap::CreateJSEntryStub() {
   2382   JSEntryStub stub;
   2383   set_js_entry_code(*stub.GetCode());
   2384 }
   2385 
   2386 
   2387 void Heap::CreateJSConstructEntryStub() {
   2388   JSConstructEntryStub stub;
   2389   set_js_construct_entry_code(*stub.GetCode());
   2390 }
   2391 
   2392 
   2393 void Heap::CreateFixedStubs() {
   2394   // Here we create roots for fixed stubs. They are needed at GC
   2395   // for cooking and uncooking (check out frames.cc).
   2396   // The eliminates the need for doing dictionary lookup in the
   2397   // stub cache for these stubs.
   2398   HandleScope scope;
   2399   // gcc-4.4 has problem generating correct code of following snippet:
   2400   // {  JSEntryStub stub;
   2401   //    js_entry_code_ = *stub.GetCode();
   2402   // }
   2403   // {  JSConstructEntryStub stub;
   2404   //    js_construct_entry_code_ = *stub.GetCode();
   2405   // }
   2406   // To workaround the problem, make separate functions without inlining.
   2407   Heap::CreateJSEntryStub();
   2408   Heap::CreateJSConstructEntryStub();
   2409 
   2410   // Create stubs that should be there, so we don't unexpectedly have to
   2411   // create them if we need them during the creation of another stub.
   2412   // Stub creation mixes raw pointers and handles in an unsafe manner so
   2413   // we cannot create stubs while we are creating stubs.
   2414   CodeStub::GenerateStubsAheadOfTime();
   2415 }
   2416 
   2417 
   2418 bool Heap::CreateInitialObjects() {
   2419   Object* obj;
   2420 
   2421   // The -0 value must be set before NumberFromDouble works.
   2422   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
   2423     if (!maybe_obj->ToObject(&obj)) return false;
   2424   }
   2425   set_minus_zero_value(HeapNumber::cast(obj));
   2426   ASSERT(signbit(minus_zero_value()->Number()) != 0);
   2427 
   2428   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
   2429     if (!maybe_obj->ToObject(&obj)) return false;
   2430   }
   2431   set_nan_value(HeapNumber::cast(obj));
   2432 
   2433   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
   2434     if (!maybe_obj->ToObject(&obj)) return false;
   2435   }
   2436   set_infinity_value(HeapNumber::cast(obj));
   2437 
   2438   // The hole has not been created yet, but we want to put something
   2439   // predictable in the gaps in the symbol table, so lets make that Smi zero.
   2440   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
   2441 
   2442   // Allocate initial symbol table.
   2443   { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
   2444     if (!maybe_obj->ToObject(&obj)) return false;
   2445   }
   2446   // Don't use set_symbol_table() due to asserts.
   2447   roots_[kSymbolTableRootIndex] = obj;
   2448 
   2449   // Finish initializing oddballs after creating symboltable.
   2450   { MaybeObject* maybe_obj =
   2451         undefined_value()->Initialize("undefined",
   2452                                       nan_value(),
   2453                                       Oddball::kUndefined);
   2454     if (!maybe_obj->ToObject(&obj)) return false;
   2455   }
   2456 
   2457   // Initialize the null_value.
   2458   { MaybeObject* maybe_obj =
   2459         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
   2460     if (!maybe_obj->ToObject(&obj)) return false;
   2461   }
   2462 
   2463   { MaybeObject* maybe_obj = CreateOddball("true",
   2464                                            Smi::FromInt(1),
   2465                                            Oddball::kTrue);
   2466     if (!maybe_obj->ToObject(&obj)) return false;
   2467   }
   2468   set_true_value(Oddball::cast(obj));
   2469 
   2470   { MaybeObject* maybe_obj = CreateOddball("false",
   2471                                            Smi::FromInt(0),
   2472                                            Oddball::kFalse);
   2473     if (!maybe_obj->ToObject(&obj)) return false;
   2474   }
   2475   set_false_value(Oddball::cast(obj));
   2476 
   2477   { MaybeObject* maybe_obj = CreateOddball("hole",
   2478                                            Smi::FromInt(-1),
   2479                                            Oddball::kTheHole);
   2480     if (!maybe_obj->ToObject(&obj)) return false;
   2481   }
   2482   set_the_hole_value(Oddball::cast(obj));
   2483 
   2484   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
   2485                                            Smi::FromInt(-4),
   2486                                            Oddball::kArgumentMarker);
   2487     if (!maybe_obj->ToObject(&obj)) return false;
   2488   }
   2489   set_arguments_marker(Oddball::cast(obj));
   2490 
   2491   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
   2492                                            Smi::FromInt(-2),
   2493                                            Oddball::kOther);
   2494     if (!maybe_obj->ToObject(&obj)) return false;
   2495   }
   2496   set_no_interceptor_result_sentinel(obj);
   2497 
   2498   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
   2499                                            Smi::FromInt(-3),
   2500                                            Oddball::kOther);
   2501     if (!maybe_obj->ToObject(&obj)) return false;
   2502   }
   2503   set_termination_exception(obj);
   2504 
   2505   // Allocate the empty string.
   2506   { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
   2507     if (!maybe_obj->ToObject(&obj)) return false;
   2508   }
   2509   set_empty_string(String::cast(obj));
   2510 
   2511   for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
   2512     { MaybeObject* maybe_obj =
   2513           LookupAsciiSymbol(constant_symbol_table[i].contents);
   2514       if (!maybe_obj->ToObject(&obj)) return false;
   2515     }
   2516     roots_[constant_symbol_table[i].index] = String::cast(obj);
   2517   }
   2518 
   2519   // Allocate the hidden symbol which is used to identify the hidden properties
   2520   // in JSObjects. The hash code has a special value so that it will not match
   2521   // the empty string when searching for the property. It cannot be part of the
   2522   // loop above because it needs to be allocated manually with the special
   2523   // hash code in place. The hash code for the hidden_symbol is zero to ensure
   2524   // that it will always be at the first entry in property descriptors.
   2525   { MaybeObject* maybe_obj =
   2526         AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
   2527     if (!maybe_obj->ToObject(&obj)) return false;
   2528   }
   2529   hidden_symbol_ = String::cast(obj);
   2530 
   2531   // Allocate the foreign for __proto__.
   2532   { MaybeObject* maybe_obj =
   2533         AllocateForeign((Address) &Accessors::ObjectPrototype);
   2534     if (!maybe_obj->ToObject(&obj)) return false;
   2535   }
   2536   set_prototype_accessors(Foreign::cast(obj));
   2537 
   2538   // Allocate the code_stubs dictionary. The initial size is set to avoid
   2539   // expanding the dictionary during bootstrapping.
   2540   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
   2541     if (!maybe_obj->ToObject(&obj)) return false;
   2542   }
   2543   set_code_stubs(UnseededNumberDictionary::cast(obj));
   2544 
   2545 
   2546   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
   2547   // is set to avoid expanding the dictionary during bootstrapping.
   2548   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
   2549     if (!maybe_obj->ToObject(&obj)) return false;
   2550   }
   2551   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
   2552 
   2553   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
   2554     if (!maybe_obj->ToObject(&obj)) return false;
   2555   }
   2556   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
   2557 
   2558   set_instanceof_cache_function(Smi::FromInt(0));
   2559   set_instanceof_cache_map(Smi::FromInt(0));
   2560   set_instanceof_cache_answer(Smi::FromInt(0));
   2561 
   2562   CreateFixedStubs();
   2563 
   2564   // Allocate the dictionary of intrinsic function names.
   2565   { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
   2566     if (!maybe_obj->ToObject(&obj)) return false;
   2567   }
   2568   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
   2569                                                                        obj);
   2570     if (!maybe_obj->ToObject(&obj)) return false;
   2571   }
   2572   set_intrinsic_function_names(StringDictionary::cast(obj));
   2573 
   2574   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
   2575     if (!maybe_obj->ToObject(&obj)) return false;
   2576   }
   2577   set_number_string_cache(FixedArray::cast(obj));
   2578 
   2579   // Allocate cache for single character ASCII strings.
   2580   { MaybeObject* maybe_obj =
   2581         AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
   2582     if (!maybe_obj->ToObject(&obj)) return false;
   2583   }
   2584   set_single_character_string_cache(FixedArray::cast(obj));
   2585 
   2586   // Allocate cache for string split.
   2587   { MaybeObject* maybe_obj =
   2588         AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
   2589     if (!maybe_obj->ToObject(&obj)) return false;
   2590   }
   2591   set_string_split_cache(FixedArray::cast(obj));
   2592 
   2593   // Allocate cache for external strings pointing to native source code.
   2594   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
   2595     if (!maybe_obj->ToObject(&obj)) return false;
   2596   }
   2597   set_natives_source_cache(FixedArray::cast(obj));
   2598 
   2599   // Handling of script id generation is in FACTORY->NewScript.
   2600   set_last_script_id(undefined_value());
   2601 
   2602   // Initialize keyed lookup cache.
   2603   isolate_->keyed_lookup_cache()->Clear();
   2604 
   2605   // Initialize context slot cache.
   2606   isolate_->context_slot_cache()->Clear();
   2607 
   2608   // Initialize descriptor cache.
   2609   isolate_->descriptor_lookup_cache()->Clear();
   2610 
   2611   // Initialize compilation cache.
   2612   isolate_->compilation_cache()->Clear();
   2613 
   2614   return true;
   2615 }
   2616 
   2617 
   2618 Object* StringSplitCache::Lookup(
   2619     FixedArray* cache, String* string, String* pattern) {
   2620   if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
   2621   uint32_t hash = string->Hash();
   2622   uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
   2623       ~(kArrayEntriesPerCacheEntry - 1));
   2624   if (cache->get(index + kStringOffset) == string &&
   2625       cache->get(index + kPatternOffset) == pattern) {
   2626     return cache->get(index + kArrayOffset);
   2627   }
   2628   index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
   2629   if (cache->get(index + kStringOffset) == string &&
   2630       cache->get(index + kPatternOffset) == pattern) {
   2631     return cache->get(index + kArrayOffset);
   2632   }
   2633   return Smi::FromInt(0);
   2634 }
   2635 
   2636 
   2637 void StringSplitCache::Enter(Heap* heap,
   2638                              FixedArray* cache,
   2639                              String* string,
   2640                              String* pattern,
   2641                              FixedArray* array) {
   2642   if (!string->IsSymbol() || !pattern->IsSymbol()) return;
   2643   uint32_t hash = string->Hash();
   2644   uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
   2645       ~(kArrayEntriesPerCacheEntry - 1));
   2646   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
   2647     cache->set(index + kStringOffset, string);
   2648     cache->set(index + kPatternOffset, pattern);
   2649     cache->set(index + kArrayOffset, array);
   2650   } else {
   2651     uint32_t index2 =
   2652         ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
   2653     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
   2654       cache->set(index2 + kStringOffset, string);
   2655       cache->set(index2 + kPatternOffset, pattern);
   2656       cache->set(index2 + kArrayOffset, array);
   2657     } else {
   2658       cache->set(index2 + kStringOffset, Smi::FromInt(0));
   2659       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
   2660       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
   2661       cache->set(index + kStringOffset, string);
   2662       cache->set(index + kPatternOffset, pattern);
   2663       cache->set(index + kArrayOffset, array);
   2664     }
   2665   }
   2666   if (array->length() < 100) {  // Limit how many new symbols we want to make.
   2667     for (int i = 0; i < array->length(); i++) {
   2668       String* str = String::cast(array->get(i));
   2669       Object* symbol;
   2670       MaybeObject* maybe_symbol = heap->LookupSymbol(str);
   2671       if (maybe_symbol->ToObject(&symbol)) {
   2672         array->set(i, symbol);
   2673       }
   2674     }
   2675   }
   2676   array->set_map_no_write_barrier(heap->fixed_cow_array_map());
   2677 }
   2678 
   2679 
   2680 void StringSplitCache::Clear(FixedArray* cache) {
   2681   for (int i = 0; i < kStringSplitCacheSize; i++) {
   2682     cache->set(i, Smi::FromInt(0));
   2683   }
   2684 }
   2685 
   2686 
   2687 MaybeObject* Heap::AllocateInitialNumberStringCache() {
   2688   MaybeObject* maybe_obj =
   2689       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
   2690   return maybe_obj;
   2691 }
   2692 
   2693 
   2694 int Heap::FullSizeNumberStringCacheLength() {
   2695   // Compute the size of the number string cache based on the max newspace size.
   2696   // The number string cache has a minimum size based on twice the initial cache
   2697   // size to ensure that it is bigger after being made 'full size'.
   2698   int number_string_cache_size = max_semispace_size_ / 512;
   2699   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
   2700                                  Min(0x4000, number_string_cache_size));
   2701   // There is a string and a number per entry so the length is twice the number
   2702   // of entries.
   2703   return number_string_cache_size * 2;
   2704 }
   2705 
   2706 
   2707 void Heap::AllocateFullSizeNumberStringCache() {
   2708   // The idea is to have a small number string cache in the snapshot to keep
   2709   // boot-time memory usage down.  If we expand the number string cache already
   2710   // while creating the snapshot then that didn't work out.
   2711   ASSERT(!Serializer::enabled());
   2712   MaybeObject* maybe_obj =
   2713       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
   2714   Object* new_cache;
   2715   if (maybe_obj->ToObject(&new_cache)) {
   2716     // We don't bother to repopulate the cache with entries from the old cache.
   2717     // It will be repopulated soon enough with new strings.
   2718     set_number_string_cache(FixedArray::cast(new_cache));
   2719   }
   2720   // If allocation fails then we just return without doing anything.  It is only
   2721   // a cache, so best effort is OK here.
   2722 }
   2723 
   2724 
   2725 void Heap::FlushNumberStringCache() {
   2726   // Flush the number to string cache.
   2727   int len = number_string_cache()->length();
   2728   for (int i = 0; i < len; i++) {
   2729     number_string_cache()->set_undefined(this, i);
   2730   }
   2731 }
   2732 
   2733 
   2734 static inline int double_get_hash(double d) {
   2735   DoubleRepresentation rep(d);
   2736   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
   2737 }
   2738 
   2739 
   2740 static inline int smi_get_hash(Smi* smi) {
   2741   return smi->value();
   2742 }
   2743 
   2744 
   2745 Object* Heap::GetNumberStringCache(Object* number) {
   2746   int hash;
   2747   int mask = (number_string_cache()->length() >> 1) - 1;
   2748   if (number->IsSmi()) {
   2749     hash = smi_get_hash(Smi::cast(number)) & mask;
   2750   } else {
   2751     hash = double_get_hash(number->Number()) & mask;
   2752   }
   2753   Object* key = number_string_cache()->get(hash * 2);
   2754   if (key == number) {
   2755     return String::cast(number_string_cache()->get(hash * 2 + 1));
   2756   } else if (key->IsHeapNumber() &&
   2757              number->IsHeapNumber() &&
   2758              key->Number() == number->Number()) {
   2759     return String::cast(number_string_cache()->get(hash * 2 + 1));
   2760   }
   2761   return undefined_value();
   2762 }
   2763 
   2764 
   2765 void Heap::SetNumberStringCache(Object* number, String* string) {
   2766   int hash;
   2767   int mask = (number_string_cache()->length() >> 1) - 1;
   2768   if (number->IsSmi()) {
   2769     hash = smi_get_hash(Smi::cast(number)) & mask;
   2770   } else {
   2771     hash = double_get_hash(number->Number()) & mask;
   2772   }
   2773   if (number_string_cache()->get(hash * 2) != undefined_value() &&
   2774       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
   2775     // The first time we have a hash collision, we move to the full sized
   2776     // number string cache.
   2777     AllocateFullSizeNumberStringCache();
   2778     return;
   2779   }
   2780   number_string_cache()->set(hash * 2, number);
   2781   number_string_cache()->set(hash * 2 + 1, string);
   2782 }
   2783 
   2784 
   2785 MaybeObject* Heap::NumberToString(Object* number,
   2786                                   bool check_number_string_cache) {
   2787   isolate_->counters()->number_to_string_runtime()->Increment();
   2788   if (check_number_string_cache) {
   2789     Object* cached = GetNumberStringCache(number);
   2790     if (cached != undefined_value()) {
   2791       return cached;
   2792     }
   2793   }
   2794 
   2795   char arr[100];
   2796   Vector<char> buffer(arr, ARRAY_SIZE(arr));
   2797   const char* str;
   2798   if (number->IsSmi()) {
   2799     int num = Smi::cast(number)->value();
   2800     str = IntToCString(num, buffer);
   2801   } else {
   2802     double num = HeapNumber::cast(number)->value();
   2803     str = DoubleToCString(num, buffer);
   2804   }
   2805 
   2806   Object* js_string;
   2807   MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
   2808   if (maybe_js_string->ToObject(&js_string)) {
   2809     SetNumberStringCache(number, String::cast(js_string));
   2810   }
   2811   return maybe_js_string;
   2812 }
   2813 
   2814 
   2815 MaybeObject* Heap::Uint32ToString(uint32_t value,
   2816                                   bool check_number_string_cache) {
   2817   Object* number;
   2818   MaybeObject* maybe = NumberFromUint32(value);
   2819   if (!maybe->To<Object>(&number)) return maybe;
   2820   return NumberToString(number, check_number_string_cache);
   2821 }
   2822 
   2823 
   2824 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
   2825   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
   2826 }
   2827 
   2828 
   2829 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
   2830     ExternalArrayType array_type) {
   2831   switch (array_type) {
   2832     case kExternalByteArray:
   2833       return kExternalByteArrayMapRootIndex;
   2834     case kExternalUnsignedByteArray:
   2835       return kExternalUnsignedByteArrayMapRootIndex;
   2836     case kExternalShortArray:
   2837       return kExternalShortArrayMapRootIndex;
   2838     case kExternalUnsignedShortArray:
   2839       return kExternalUnsignedShortArrayMapRootIndex;
   2840     case kExternalIntArray:
   2841       return kExternalIntArrayMapRootIndex;
   2842     case kExternalUnsignedIntArray:
   2843       return kExternalUnsignedIntArrayMapRootIndex;
   2844     case kExternalFloatArray:
   2845       return kExternalFloatArrayMapRootIndex;
   2846     case kExternalDoubleArray:
   2847       return kExternalDoubleArrayMapRootIndex;
   2848     case kExternalPixelArray:
   2849       return kExternalPixelArrayMapRootIndex;
   2850     default:
   2851       UNREACHABLE();
   2852       return kUndefinedValueRootIndex;
   2853   }
   2854 }
   2855 
   2856 
   2857 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
   2858   // We need to distinguish the minus zero value and this cannot be
   2859   // done after conversion to int. Doing this by comparing bit
   2860   // patterns is faster than using fpclassify() et al.
   2861   static const DoubleRepresentation minus_zero(-0.0);
   2862 
   2863   DoubleRepresentation rep(value);
   2864   if (rep.bits == minus_zero.bits) {
   2865     return AllocateHeapNumber(-0.0, pretenure);
   2866   }
   2867 
   2868   int int_value = FastD2I(value);
   2869   if (value == int_value && Smi::IsValid(int_value)) {
   2870     return Smi::FromInt(int_value);
   2871   }
   2872 
   2873   // Materialize the value in the heap.
   2874   return AllocateHeapNumber(value, pretenure);
   2875 }
   2876 
   2877 
   2878 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
   2879   // Statically ensure that it is safe to allocate foreigns in paged spaces.
   2880   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
   2881   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   2882   Foreign* result;
   2883   MaybeObject* maybe_result = Allocate(foreign_map(), space);
   2884   if (!maybe_result->To(&result)) return maybe_result;
   2885   result->set_foreign_address(address);
   2886   return result;
   2887 }
   2888 
   2889 
   2890 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
   2891   SharedFunctionInfo* share;
   2892   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
   2893   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
   2894 
   2895   // Set pointer fields.
   2896   share->set_name(name);
   2897   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   2898   share->set_code(illegal);
   2899   share->set_scope_info(ScopeInfo::Empty());
   2900   Code* construct_stub =
   2901       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
   2902   share->set_construct_stub(construct_stub);
   2903   share->set_instance_class_name(Object_symbol());
   2904   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
   2905   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
   2906   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
   2907   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
   2908   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
   2909   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
   2910   share->set_ast_node_count(0);
   2911   share->set_deopt_counter(FLAG_deopt_every_n_times);
   2912   share->set_ic_age(0);
   2913 
   2914   // Set integer fields (smi or int, depending on the architecture).
   2915   share->set_length(0);
   2916   share->set_formal_parameter_count(0);
   2917   share->set_expected_nof_properties(0);
   2918   share->set_num_literals(0);
   2919   share->set_start_position_and_type(0);
   2920   share->set_end_position(0);
   2921   share->set_function_token_position(0);
   2922   // All compiler hints default to false or 0.
   2923   share->set_compiler_hints(0);
   2924   share->set_this_property_assignments_count(0);
   2925   share->set_opt_count(0);
   2926 
   2927   return share;
   2928 }
   2929 
   2930 
   2931 MaybeObject* Heap::AllocateJSMessageObject(String* type,
   2932                                            JSArray* arguments,
   2933                                            int start_position,
   2934                                            int end_position,
   2935                                            Object* script,
   2936                                            Object* stack_trace,
   2937                                            Object* stack_frames) {
   2938   Object* result;
   2939   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
   2940     if (!maybe_result->ToObject(&result)) return maybe_result;
   2941   }
   2942   JSMessageObject* message = JSMessageObject::cast(result);
   2943   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
   2944   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
   2945   message->set_type(type);
   2946   message->set_arguments(arguments);
   2947   message->set_start_position(start_position);
   2948   message->set_end_position(end_position);
   2949   message->set_script(script);
   2950   message->set_stack_trace(stack_trace);
   2951   message->set_stack_frames(stack_frames);
   2952   return result;
   2953 }
   2954 
   2955 
   2956 
   2957 // Returns true for a character in a range.  Both limits are inclusive.
   2958 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
   2959   // This makes uses of the the unsigned wraparound.
   2960   return character - from <= to - from;
   2961 }
   2962 
   2963 
   2964 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
   2965     Heap* heap,
   2966     uint32_t c1,
   2967     uint32_t c2) {
   2968   String* symbol;
   2969   // Numeric strings have a different hash algorithm not known by
   2970   // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
   2971   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
   2972       heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
   2973     return symbol;
   2974   // Now we know the length is 2, we might as well make use of that fact
   2975   // when building the new string.
   2976   } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) {  // We can do this
   2977     ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1));  // because of this.
   2978     Object* result;
   2979     { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
   2980       if (!maybe_result->ToObject(&result)) return maybe_result;
   2981     }
   2982     char* dest = SeqAsciiString::cast(result)->GetChars();
   2983     dest[0] = c1;
   2984     dest[1] = c2;
   2985     return result;
   2986   } else {
   2987     Object* result;
   2988     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
   2989       if (!maybe_result->ToObject(&result)) return maybe_result;
   2990     }
   2991     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
   2992     dest[0] = c1;
   2993     dest[1] = c2;
   2994     return result;
   2995   }
   2996 }
   2997 
   2998 
   2999 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
   3000   int first_length = first->length();
   3001   if (first_length == 0) {
   3002     return second;
   3003   }
   3004 
   3005   int second_length = second->length();
   3006   if (second_length == 0) {
   3007     return first;
   3008   }
   3009 
   3010   int length = first_length + second_length;
   3011 
   3012   // Optimization for 2-byte strings often used as keys in a decompression
   3013   // dictionary.  Check whether we already have the string in the symbol
   3014   // table to prevent creation of many unneccesary strings.
   3015   if (length == 2) {
   3016     unsigned c1 = first->Get(0);
   3017     unsigned c2 = second->Get(0);
   3018     return MakeOrFindTwoCharacterString(this, c1, c2);
   3019   }
   3020 
   3021   bool first_is_ascii = first->IsAsciiRepresentation();
   3022   bool second_is_ascii = second->IsAsciiRepresentation();
   3023   bool is_ascii = first_is_ascii && second_is_ascii;
   3024 
   3025   // Make sure that an out of memory exception is thrown if the length
   3026   // of the new cons string is too large.
   3027   if (length > String::kMaxLength || length < 0) {
   3028     isolate()->context()->mark_out_of_memory();
   3029     return Failure::OutOfMemoryException();
   3030   }
   3031 
   3032   bool is_ascii_data_in_two_byte_string = false;
   3033   if (!is_ascii) {
   3034     // At least one of the strings uses two-byte representation so we
   3035     // can't use the fast case code for short ASCII strings below, but
   3036     // we can try to save memory if all chars actually fit in ASCII.
   3037     is_ascii_data_in_two_byte_string =
   3038         first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
   3039     if (is_ascii_data_in_two_byte_string) {
   3040       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
   3041     }
   3042   }
   3043 
   3044   // If the resulting string is small make a flat string.
   3045   if (length < ConsString::kMinLength) {
   3046     // Note that neither of the two inputs can be a slice because:
   3047     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
   3048     ASSERT(first->IsFlat());
   3049     ASSERT(second->IsFlat());
   3050     if (is_ascii) {
   3051       Object* result;
   3052       { MaybeObject* maybe_result = AllocateRawAsciiString(length);
   3053         if (!maybe_result->ToObject(&result)) return maybe_result;
   3054       }
   3055       // Copy the characters into the new object.
   3056       char* dest = SeqAsciiString::cast(result)->GetChars();
   3057       // Copy first part.
   3058       const char* src;
   3059       if (first->IsExternalString()) {
   3060         src = ExternalAsciiString::cast(first)->GetChars();
   3061       } else {
   3062         src = SeqAsciiString::cast(first)->GetChars();
   3063       }
   3064       for (int i = 0; i < first_length; i++) *dest++ = src[i];
   3065       // Copy second part.
   3066       if (second->IsExternalString()) {
   3067         src = ExternalAsciiString::cast(second)->GetChars();
   3068       } else {
   3069         src = SeqAsciiString::cast(second)->GetChars();
   3070       }
   3071       for (int i = 0; i < second_length; i++) *dest++ = src[i];
   3072       return result;
   3073     } else {
   3074       if (is_ascii_data_in_two_byte_string) {
   3075         Object* result;
   3076         { MaybeObject* maybe_result = AllocateRawAsciiString(length);
   3077           if (!maybe_result->ToObject(&result)) return maybe_result;
   3078         }
   3079         // Copy the characters into the new object.
   3080         char* dest = SeqAsciiString::cast(result)->GetChars();
   3081         String::WriteToFlat(first, dest, 0, first_length);
   3082         String::WriteToFlat(second, dest + first_length, 0, second_length);
   3083         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
   3084         return result;
   3085       }
   3086 
   3087       Object* result;
   3088       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
   3089         if (!maybe_result->ToObject(&result)) return maybe_result;
   3090       }
   3091       // Copy the characters into the new object.
   3092       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
   3093       String::WriteToFlat(first, dest, 0, first_length);
   3094       String::WriteToFlat(second, dest + first_length, 0, second_length);
   3095       return result;
   3096     }
   3097   }
   3098 
   3099   Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
   3100       cons_ascii_string_map() : cons_string_map();
   3101 
   3102   Object* result;
   3103   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
   3104     if (!maybe_result->ToObject(&result)) return maybe_result;
   3105   }
   3106 
   3107   AssertNoAllocation no_gc;
   3108   ConsString* cons_string = ConsString::cast(result);
   3109   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
   3110   cons_string->set_length(length);
   3111   cons_string->set_hash_field(String::kEmptyHashField);
   3112   cons_string->set_first(first, mode);
   3113   cons_string->set_second(second, mode);
   3114   return result;
   3115 }
   3116 
   3117 
   3118 MaybeObject* Heap::AllocateSubString(String* buffer,
   3119                                      int start,
   3120                                      int end,
   3121                                      PretenureFlag pretenure) {
   3122   int length = end - start;
   3123   if (length <= 0) {
   3124     return empty_string();
   3125   } else if (length == 1) {
   3126     return LookupSingleCharacterStringFromCode(buffer->Get(start));
   3127   } else if (length == 2) {
   3128     // Optimization for 2-byte strings often used as keys in a decompression
   3129     // dictionary.  Check whether we already have the string in the symbol
   3130     // table to prevent creation of many unneccesary strings.
   3131     unsigned c1 = buffer->Get(start);
   3132     unsigned c2 = buffer->Get(start + 1);
   3133     return MakeOrFindTwoCharacterString(this, c1, c2);
   3134   }
   3135 
   3136   // Make an attempt to flatten the buffer to reduce access time.
   3137   buffer = buffer->TryFlattenGetString();
   3138 
   3139   if (!FLAG_string_slices ||
   3140       !buffer->IsFlat() ||
   3141       length < SlicedString::kMinLength ||
   3142       pretenure == TENURED) {
   3143     Object* result;
   3144     // WriteToFlat takes care of the case when an indirect string has a
   3145     // different encoding from its underlying string.  These encodings may
   3146     // differ because of externalization.
   3147     bool is_ascii = buffer->IsAsciiRepresentation();
   3148     { MaybeObject* maybe_result = is_ascii
   3149                                   ? AllocateRawAsciiString(length, pretenure)
   3150                                   : AllocateRawTwoByteString(length, pretenure);
   3151       if (!maybe_result->ToObject(&result)) return maybe_result;
   3152     }
   3153     String* string_result = String::cast(result);
   3154     // Copy the characters into the new object.
   3155     if (is_ascii) {
   3156       ASSERT(string_result->IsAsciiRepresentation());
   3157       char* dest = SeqAsciiString::cast(string_result)->GetChars();
   3158       String::WriteToFlat(buffer, dest, start, end);
   3159     } else {
   3160       ASSERT(string_result->IsTwoByteRepresentation());
   3161       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
   3162       String::WriteToFlat(buffer, dest, start, end);
   3163     }
   3164     return result;
   3165   }
   3166 
   3167   ASSERT(buffer->IsFlat());
   3168 #if DEBUG
   3169   if (FLAG_verify_heap) {
   3170     buffer->StringVerify();
   3171   }
   3172 #endif
   3173 
   3174   Object* result;
   3175   // When slicing an indirect string we use its encoding for a newly created
   3176   // slice and don't check the encoding of the underlying string.  This is safe
   3177   // even if the encodings are different because of externalization.  If an
   3178   // indirect ASCII string is pointing to a two-byte string, the two-byte char
   3179   // codes of the underlying string must still fit into ASCII (because
   3180   // externalization must not change char codes).
   3181   { Map* map = buffer->IsAsciiRepresentation()
   3182                  ? sliced_ascii_string_map()
   3183                  : sliced_string_map();
   3184     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
   3185     if (!maybe_result->ToObject(&result)) return maybe_result;
   3186   }
   3187 
   3188   AssertNoAllocation no_gc;
   3189   SlicedString* sliced_string = SlicedString::cast(result);
   3190   sliced_string->set_length(length);
   3191   sliced_string->set_hash_field(String::kEmptyHashField);
   3192   if (buffer->IsConsString()) {
   3193     ConsString* cons = ConsString::cast(buffer);
   3194     ASSERT(cons->second()->length() == 0);
   3195     sliced_string->set_parent(cons->first());
   3196     sliced_string->set_offset(start);
   3197   } else if (buffer->IsSlicedString()) {
   3198     // Prevent nesting sliced strings.
   3199     SlicedString* parent_slice = SlicedString::cast(buffer);
   3200     sliced_string->set_parent(parent_slice->parent());
   3201     sliced_string->set_offset(start + parent_slice->offset());
   3202   } else {
   3203     sliced_string->set_parent(buffer);
   3204     sliced_string->set_offset(start);
   3205   }
   3206   ASSERT(sliced_string->parent()->IsSeqString() ||
   3207          sliced_string->parent()->IsExternalString());
   3208   return result;
   3209 }
   3210 
   3211 
   3212 MaybeObject* Heap::AllocateExternalStringFromAscii(
   3213     const ExternalAsciiString::Resource* resource) {
   3214   size_t length = resource->length();
   3215   if (length > static_cast<size_t>(String::kMaxLength)) {
   3216     isolate()->context()->mark_out_of_memory();
   3217     return Failure::OutOfMemoryException();
   3218   }
   3219 
   3220   Map* map = external_ascii_string_map();
   3221   Object* result;
   3222   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
   3223     if (!maybe_result->ToObject(&result)) return maybe_result;
   3224   }
   3225 
   3226   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
   3227   external_string->set_length(static_cast<int>(length));
   3228   external_string->set_hash_field(String::kEmptyHashField);
   3229   external_string->set_resource(resource);
   3230 
   3231   return result;
   3232 }
   3233 
   3234 
   3235 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
   3236     const ExternalTwoByteString::Resource* resource) {
   3237   size_t length = resource->length();
   3238   if (length > static_cast<size_t>(String::kMaxLength)) {
   3239     isolate()->context()->mark_out_of_memory();
   3240     return Failure::OutOfMemoryException();
   3241   }
   3242 
   3243   // For small strings we check whether the resource contains only
   3244   // ASCII characters.  If yes, we use a different string map.
   3245   static const size_t kAsciiCheckLengthLimit = 32;
   3246   bool is_ascii = length <= kAsciiCheckLengthLimit &&
   3247       String::IsAscii(resource->data(), static_cast<int>(length));
   3248   Map* map = is_ascii ?
   3249       external_string_with_ascii_data_map() : external_string_map();
   3250   Object* result;
   3251   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
   3252     if (!maybe_result->ToObject(&result)) return maybe_result;
   3253   }
   3254 
   3255   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
   3256   external_string->set_length(static_cast<int>(length));
   3257   external_string->set_hash_field(String::kEmptyHashField);
   3258   external_string->set_resource(resource);
   3259 
   3260   return result;
   3261 }
   3262 
   3263 
   3264 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
   3265   if (code <= String::kMaxAsciiCharCode) {
   3266     Object* value = single_character_string_cache()->get(code);
   3267     if (value != undefined_value()) return value;
   3268 
   3269     char buffer[1];
   3270     buffer[0] = static_cast<char>(code);
   3271     Object* result;
   3272     MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
   3273 
   3274     if (!maybe_result->ToObject(&result)) return maybe_result;
   3275     single_character_string_cache()->set(code, result);
   3276     return result;
   3277   }
   3278 
   3279   Object* result;
   3280   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
   3281     if (!maybe_result->ToObject(&result)) return maybe_result;
   3282   }
   3283   String* answer = String::cast(result);
   3284   answer->Set(0, code);
   3285   return answer;
   3286 }
   3287 
   3288 
   3289 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
   3290   if (length < 0 || length > ByteArray::kMaxLength) {
   3291     return Failure::OutOfMemoryException();
   3292   }
   3293   if (pretenure == NOT_TENURED) {
   3294     return AllocateByteArray(length);
   3295   }
   3296   int size = ByteArray::SizeFor(length);
   3297   Object* result;
   3298   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
   3299                    ? old_data_space_->AllocateRaw(size)
   3300                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
   3301     if (!maybe_result->ToObject(&result)) return maybe_result;
   3302   }
   3303 
   3304   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
   3305       byte_array_map());
   3306   reinterpret_cast<ByteArray*>(result)->set_length(length);
   3307   return result;
   3308 }
   3309 
   3310 
   3311 MaybeObject* Heap::AllocateByteArray(int length) {
   3312   if (length < 0 || length > ByteArray::kMaxLength) {
   3313     return Failure::OutOfMemoryException();
   3314   }
   3315   int size = ByteArray::SizeFor(length);
   3316   AllocationSpace space =
   3317       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
   3318   Object* result;
   3319   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
   3320     if (!maybe_result->ToObject(&result)) return maybe_result;
   3321   }
   3322 
   3323   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
   3324       byte_array_map());
   3325   reinterpret_cast<ByteArray*>(result)->set_length(length);
   3326   return result;
   3327 }
   3328 
   3329 
   3330 void Heap::CreateFillerObjectAt(Address addr, int size) {
   3331   if (size == 0) return;
   3332   HeapObject* filler = HeapObject::FromAddress(addr);
   3333   if (size == kPointerSize) {
   3334     filler->set_map_no_write_barrier(one_pointer_filler_map());
   3335   } else if (size == 2 * kPointerSize) {
   3336     filler->set_map_no_write_barrier(two_pointer_filler_map());
   3337   } else {
   3338     filler->set_map_no_write_barrier(free_space_map());
   3339     FreeSpace::cast(filler)->set_size(size);
   3340   }
   3341 }
   3342 
   3343 
   3344 MaybeObject* Heap::AllocateExternalArray(int length,
   3345                                          ExternalArrayType array_type,
   3346                                          void* external_pointer,
   3347                                          PretenureFlag pretenure) {
   3348   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   3349   Object* result;
   3350   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
   3351                                             space,
   3352                                             OLD_DATA_SPACE);
   3353     if (!maybe_result->ToObject(&result)) return maybe_result;
   3354   }
   3355 
   3356   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
   3357       MapForExternalArrayType(array_type));
   3358   reinterpret_cast<ExternalArray*>(result)->set_length(length);
   3359   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
   3360       external_pointer);
   3361 
   3362   return result;
   3363 }
   3364 
   3365 
   3366 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
   3367                               Code::Flags flags,
   3368                               Handle<Object> self_reference,
   3369                               bool immovable) {
   3370   // Allocate ByteArray before the Code object, so that we do not risk
   3371   // leaving uninitialized Code object (and breaking the heap).
   3372   ByteArray* reloc_info;
   3373   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
   3374   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
   3375 
   3376   // Compute size.
   3377   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
   3378   int obj_size = Code::SizeFor(body_size);
   3379   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
   3380   MaybeObject* maybe_result;
   3381   // Large code objects and code objects which should stay at a fixed address
   3382   // are allocated in large object space.
   3383   if (obj_size > code_space()->AreaSize() || immovable) {
   3384     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   3385   } else {
   3386     maybe_result = code_space_->AllocateRaw(obj_size);
   3387   }
   3388 
   3389   Object* result;
   3390   if (!maybe_result->ToObject(&result)) return maybe_result;
   3391 
   3392   // Initialize the object
   3393   HeapObject::cast(result)->set_map_no_write_barrier(code_map());
   3394   Code* code = Code::cast(result);
   3395   ASSERT(!isolate_->code_range()->exists() ||
   3396       isolate_->code_range()->contains(code->address()));
   3397   code->set_instruction_size(desc.instr_size);
   3398   code->set_relocation_info(reloc_info);
   3399   code->set_flags(flags);
   3400   if (code->is_call_stub() || code->is_keyed_call_stub()) {
   3401     code->set_check_type(RECEIVER_MAP_CHECK);
   3402   }
   3403   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
   3404   code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
   3405   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
   3406   code->set_gc_metadata(Smi::FromInt(0));
   3407   code->set_ic_age(global_ic_age_);
   3408   // Allow self references to created code object by patching the handle to
   3409   // point to the newly allocated Code object.
   3410   if (!self_reference.is_null()) {
   3411     *(self_reference.location()) = code;
   3412   }
   3413   // Migrate generated code.
   3414   // The generated code can contain Object** values (typically from handles)
   3415   // that are dereferenced during the copy to point directly to the actual heap
   3416   // objects. These pointers can include references to the code object itself,
   3417   // through the self_reference parameter.
   3418   code->CopyFrom(desc);
   3419 
   3420 #ifdef DEBUG
   3421   if (FLAG_verify_heap) {
   3422     code->Verify();
   3423   }
   3424 #endif
   3425   return code;
   3426 }
   3427 
   3428 
   3429 MaybeObject* Heap::CopyCode(Code* code) {
   3430   // Allocate an object the same size as the code object.
   3431   int obj_size = code->Size();
   3432   MaybeObject* maybe_result;
   3433   if (obj_size > code_space()->AreaSize()) {
   3434     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   3435   } else {
   3436     maybe_result = code_space_->AllocateRaw(obj_size);
   3437   }
   3438 
   3439   Object* result;
   3440   if (!maybe_result->ToObject(&result)) return maybe_result;
   3441 
   3442   // Copy code object.
   3443   Address old_addr = code->address();
   3444   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
   3445   CopyBlock(new_addr, old_addr, obj_size);
   3446   // Relocate the copy.
   3447   Code* new_code = Code::cast(result);
   3448   ASSERT(!isolate_->code_range()->exists() ||
   3449       isolate_->code_range()->contains(code->address()));
   3450   new_code->Relocate(new_addr - old_addr);
   3451   return new_code;
   3452 }
   3453 
   3454 
   3455 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
   3456   // Allocate ByteArray before the Code object, so that we do not risk
   3457   // leaving uninitialized Code object (and breaking the heap).
   3458   Object* reloc_info_array;
   3459   { MaybeObject* maybe_reloc_info_array =
   3460         AllocateByteArray(reloc_info.length(), TENURED);
   3461     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
   3462       return maybe_reloc_info_array;
   3463     }
   3464   }
   3465 
   3466   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
   3467 
   3468   int new_obj_size = Code::SizeFor(new_body_size);
   3469 
   3470   Address old_addr = code->address();
   3471 
   3472   size_t relocation_offset =
   3473       static_cast<size_t>(code->instruction_end() - old_addr);
   3474 
   3475   MaybeObject* maybe_result;
   3476   if (new_obj_size > code_space()->AreaSize()) {
   3477     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   3478   } else {
   3479     maybe_result = code_space_->AllocateRaw(new_obj_size);
   3480   }
   3481 
   3482   Object* result;
   3483   if (!maybe_result->ToObject(&result)) return maybe_result;
   3484 
   3485   // Copy code object.
   3486   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
   3487 
   3488   // Copy header and instructions.
   3489   memcpy(new_addr, old_addr, relocation_offset);
   3490 
   3491   Code* new_code = Code::cast(result);
   3492   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
   3493 
   3494   // Copy patched rinfo.
   3495   memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
   3496 
   3497   // Relocate the copy.
   3498   ASSERT(!isolate_->code_range()->exists() ||
   3499       isolate_->code_range()->contains(code->address()));
   3500   new_code->Relocate(new_addr - old_addr);
   3501 
   3502 #ifdef DEBUG
   3503   if (FLAG_verify_heap) {
   3504     code->Verify();
   3505   }
   3506 #endif
   3507   return new_code;
   3508 }
   3509 
   3510 
   3511 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
   3512   ASSERT(gc_state_ == NOT_IN_GC);
   3513   ASSERT(map->instance_type() != MAP_TYPE);
   3514   // If allocation failures are disallowed, we may allocate in a different
   3515   // space when new space is full and the object is not a large object.
   3516   AllocationSpace retry_space =
   3517       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
   3518   Object* result;
   3519   { MaybeObject* maybe_result =
   3520         AllocateRaw(map->instance_size(), space, retry_space);
   3521     if (!maybe_result->ToObject(&result)) return maybe_result;
   3522   }
   3523   // No need for write barrier since object is white and map is in old space.
   3524   HeapObject::cast(result)->set_map_no_write_barrier(map);
   3525   return result;
   3526 }
   3527 
   3528 
   3529 void Heap::InitializeFunction(JSFunction* function,
   3530                               SharedFunctionInfo* shared,
   3531                               Object* prototype) {
   3532   ASSERT(!prototype->IsMap());
   3533   function->initialize_properties();
   3534   function->initialize_elements();
   3535   function->set_shared(shared);
   3536   function->set_code(shared->code());
   3537   function->set_prototype_or_initial_map(prototype);
   3538   function->set_context(undefined_value());
   3539   function->set_literals_or_bindings(empty_fixed_array());
   3540   function->set_next_function_link(undefined_value());
   3541 }
   3542 
   3543 
   3544 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
   3545   // Allocate the prototype.  Make sure to use the object function
   3546   // from the function's context, since the function can be from a
   3547   // different context.
   3548   JSFunction* object_function =
   3549       function->context()->global_context()->object_function();
   3550 
   3551   // Each function prototype gets a copy of the object function map.
   3552   // This avoid unwanted sharing of maps between prototypes of different
   3553   // constructors.
   3554   Map* new_map;
   3555   ASSERT(object_function->has_initial_map());
   3556   { MaybeObject* maybe_map =
   3557         object_function->initial_map()->CopyDropTransitions();
   3558     if (!maybe_map->To<Map>(&new_map)) return maybe_map;
   3559   }
   3560   Object* prototype;
   3561   { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
   3562     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   3563   }
   3564   // When creating the prototype for the function we must set its
   3565   // constructor to the function.
   3566   Object* result;
   3567   { MaybeObject* maybe_result =
   3568         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
   3569             constructor_symbol(), function, DONT_ENUM);
   3570     if (!maybe_result->ToObject(&result)) return maybe_result;
   3571   }
   3572   return prototype;
   3573 }
   3574 
   3575 
   3576 MaybeObject* Heap::AllocateFunction(Map* function_map,
   3577                                     SharedFunctionInfo* shared,
   3578                                     Object* prototype,
   3579                                     PretenureFlag pretenure) {
   3580   AllocationSpace space =
   3581       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
   3582   Object* result;
   3583   { MaybeObject* maybe_result = Allocate(function_map, space);
   3584     if (!maybe_result->ToObject(&result)) return maybe_result;
   3585   }
   3586   InitializeFunction(JSFunction::cast(result), shared, prototype);
   3587   return result;
   3588 }
   3589 
   3590 
   3591 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
   3592   // To get fast allocation and map sharing for arguments objects we
   3593   // allocate them based on an arguments boilerplate.
   3594 
   3595   JSObject* boilerplate;
   3596   int arguments_object_size;
   3597   bool strict_mode_callee = callee->IsJSFunction() &&
   3598       !JSFunction::cast(callee)->shared()->is_classic_mode();
   3599   if (strict_mode_callee) {
   3600     boilerplate =
   3601         isolate()->context()->global_context()->
   3602             strict_mode_arguments_boilerplate();
   3603     arguments_object_size = kArgumentsObjectSizeStrict;
   3604   } else {
   3605     boilerplate =
   3606         isolate()->context()->global_context()->arguments_boilerplate();
   3607     arguments_object_size = kArgumentsObjectSize;
   3608   }
   3609 
   3610   // This calls Copy directly rather than using Heap::AllocateRaw so we
   3611   // duplicate the check here.
   3612   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   3613 
   3614   // Check that the size of the boilerplate matches our
   3615   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
   3616   // on the size being a known constant.
   3617   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
   3618 
   3619   // Do the allocation.
   3620   Object* result;
   3621   { MaybeObject* maybe_result =
   3622         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
   3623     if (!maybe_result->ToObject(&result)) return maybe_result;
   3624   }
   3625 
   3626   // Copy the content. The arguments boilerplate doesn't have any
   3627   // fields that point to new space so it's safe to skip the write
   3628   // barrier here.
   3629   CopyBlock(HeapObject::cast(result)->address(),
   3630             boilerplate->address(),
   3631             JSObject::kHeaderSize);
   3632 
   3633   // Set the length property.
   3634   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
   3635                                                 Smi::FromInt(length),
   3636                                                 SKIP_WRITE_BARRIER);
   3637   // Set the callee property for non-strict mode arguments object only.
   3638   if (!strict_mode_callee) {
   3639     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
   3640                                                   callee);
   3641   }
   3642 
   3643   // Check the state of the object
   3644   ASSERT(JSObject::cast(result)->HasFastProperties());
   3645   ASSERT(JSObject::cast(result)->HasFastElements());
   3646 
   3647   return result;
   3648 }
   3649 
   3650 
   3651 static bool HasDuplicates(DescriptorArray* descriptors) {
   3652   int count = descriptors->number_of_descriptors();
   3653   if (count > 1) {
   3654     String* prev_key = descriptors->GetKey(0);
   3655     for (int i = 1; i != count; i++) {
   3656       String* current_key = descriptors->GetKey(i);
   3657       if (prev_key == current_key) return true;
   3658       prev_key = current_key;
   3659     }
   3660   }
   3661   return false;
   3662 }
   3663 
   3664 
   3665 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
   3666   ASSERT(!fun->has_initial_map());
   3667 
   3668   // First create a new map with the size and number of in-object properties
   3669   // suggested by the function.
   3670   int instance_size = fun->shared()->CalculateInstanceSize();
   3671   int in_object_properties = fun->shared()->CalculateInObjectProperties();
   3672   Object* map_obj;
   3673   { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
   3674     if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
   3675   }
   3676 
   3677   // Fetch or allocate prototype.
   3678   Object* prototype;
   3679   if (fun->has_instance_prototype()) {
   3680     prototype = fun->instance_prototype();
   3681   } else {
   3682     { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
   3683       if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   3684     }
   3685   }
   3686   Map* map = Map::cast(map_obj);
   3687   map->set_inobject_properties(in_object_properties);
   3688   map->set_unused_property_fields(in_object_properties);
   3689   map->set_prototype(prototype);
   3690   ASSERT(map->has_fast_elements());
   3691 
   3692   // If the function has only simple this property assignments add
   3693   // field descriptors for these to the initial map as the object
   3694   // cannot be constructed without having these properties.  Guard by
   3695   // the inline_new flag so we only change the map if we generate a
   3696   // specialized construct stub.
   3697   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
   3698   if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
   3699     int count = fun->shared()->this_property_assignments_count();
   3700     if (count > in_object_properties) {
   3701       // Inline constructor can only handle inobject properties.
   3702       fun->shared()->ForbidInlineConstructor();
   3703     } else {
   3704       DescriptorArray* descriptors;
   3705       { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
   3706         if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
   3707           return maybe_descriptors_obj;
   3708         }
   3709       }
   3710       DescriptorArray::WhitenessWitness witness(descriptors);
   3711       for (int i = 0; i < count; i++) {
   3712         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
   3713         ASSERT(name->IsSymbol());
   3714         FieldDescriptor field(name, i, NONE);
   3715         field.SetEnumerationIndex(i);
   3716         descriptors->Set(i, &field, witness);
   3717       }
   3718       descriptors->SetNextEnumerationIndex(count);
   3719       descriptors->SortUnchecked(witness);
   3720 
   3721       // The descriptors may contain duplicates because the compiler does not
   3722       // guarantee the uniqueness of property names (it would have required
   3723       // quadratic time). Once the descriptors are sorted we can check for
   3724       // duplicates in linear time.
   3725       if (HasDuplicates(descriptors)) {
   3726         fun->shared()->ForbidInlineConstructor();
   3727       } else {
   3728         map->set_instance_descriptors(descriptors);
   3729         map->set_pre_allocated_property_fields(count);
   3730         map->set_unused_property_fields(in_object_properties - count);
   3731       }
   3732     }
   3733   }
   3734 
   3735   fun->shared()->StartInobjectSlackTracking(map);
   3736 
   3737   return map;
   3738 }
   3739 
   3740 
   3741 void Heap::InitializeJSObjectFromMap(JSObject* obj,
   3742                                      FixedArray* properties,
   3743                                      Map* map) {
   3744   obj->set_properties(properties);
   3745   obj->initialize_elements();
   3746   // TODO(1240798): Initialize the object's body using valid initial values
   3747   // according to the object's initial map.  For example, if the map's
   3748   // instance type is JS_ARRAY_TYPE, the length field should be initialized
   3749   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
   3750   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
   3751   // verification code has to cope with (temporarily) invalid objects.  See
   3752   // for example, JSArray::JSArrayVerify).
   3753   Object* filler;
   3754   // We cannot always fill with one_pointer_filler_map because objects
   3755   // created from API functions expect their internal fields to be initialized
   3756   // with undefined_value.
   3757   // Pre-allocated fields need to be initialized with undefined_value as well
   3758   // so that object accesses before the constructor completes (e.g. in the
   3759   // debugger) will not cause a crash.
   3760   if (map->constructor()->IsJSFunction() &&
   3761       JSFunction::cast(map->constructor())->shared()->
   3762           IsInobjectSlackTrackingInProgress()) {
   3763     // We might want to shrink the object later.
   3764     ASSERT(obj->GetInternalFieldCount() == 0);
   3765     filler = Heap::one_pointer_filler_map();
   3766   } else {
   3767     filler = Heap::undefined_value();
   3768   }
   3769   obj->InitializeBody(map, Heap::undefined_value(), filler);
   3770 }
   3771 
   3772 
   3773 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
   3774   // JSFunctions should be allocated using AllocateFunction to be
   3775   // properly initialized.
   3776   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
   3777 
   3778   // Both types of global objects should be allocated using
   3779   // AllocateGlobalObject to be properly initialized.
   3780   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
   3781   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
   3782 
   3783   // Allocate the backing storage for the properties.
   3784   int prop_size =
   3785       map->pre_allocated_property_fields() +
   3786       map->unused_property_fields() -
   3787       map->inobject_properties();
   3788   ASSERT(prop_size >= 0);
   3789   Object* properties;
   3790   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
   3791     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
   3792   }
   3793 
   3794   // Allocate the JSObject.
   3795   AllocationSpace space =
   3796       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
   3797   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
   3798   Object* obj;
   3799   { MaybeObject* maybe_obj = Allocate(map, space);
   3800     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   3801   }
   3802 
   3803   // Initialize the JSObject.
   3804   InitializeJSObjectFromMap(JSObject::cast(obj),
   3805                             FixedArray::cast(properties),
   3806                             map);
   3807   ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
   3808          JSObject::cast(obj)->HasFastElements());
   3809   return obj;
   3810 }
   3811 
   3812 
   3813 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
   3814                                     PretenureFlag pretenure) {
   3815   // Allocate the initial map if absent.
   3816   if (!constructor->has_initial_map()) {
   3817     Object* initial_map;
   3818     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
   3819       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
   3820     }
   3821     constructor->set_initial_map(Map::cast(initial_map));
   3822     Map::cast(initial_map)->set_constructor(constructor);
   3823   }
   3824   // Allocate the object based on the constructors initial map.
   3825   MaybeObject* result = AllocateJSObjectFromMap(
   3826       constructor->initial_map(), pretenure);
   3827 #ifdef DEBUG
   3828   // Make sure result is NOT a global object if valid.
   3829   Object* non_failure;
   3830   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
   3831 #endif
   3832   return result;
   3833 }
   3834 
   3835 
   3836 MaybeObject* Heap::AllocateJSArrayAndStorage(
   3837     ElementsKind elements_kind,
   3838     int length,
   3839     int capacity,
   3840     ArrayStorageAllocationMode mode,
   3841     PretenureFlag pretenure) {
   3842   ASSERT(capacity >= length);
   3843   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
   3844   JSArray* array;
   3845   if (!maybe_array->To(&array)) return maybe_array;
   3846 
   3847   if (capacity == 0) {
   3848     array->set_length(Smi::FromInt(0));
   3849     array->set_elements(empty_fixed_array());
   3850     return array;
   3851   }
   3852 
   3853   FixedArrayBase* elms;
   3854   MaybeObject* maybe_elms = NULL;
   3855   if (elements_kind == FAST_DOUBLE_ELEMENTS) {
   3856     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
   3857       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
   3858     } else {
   3859       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
   3860       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
   3861     }
   3862   } else {
   3863     ASSERT(elements_kind == FAST_ELEMENTS ||
   3864            elements_kind == FAST_SMI_ONLY_ELEMENTS);
   3865     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
   3866       maybe_elms = AllocateUninitializedFixedArray(capacity);
   3867     } else {
   3868       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
   3869       maybe_elms = AllocateFixedArrayWithHoles(capacity);
   3870     }
   3871   }
   3872   if (!maybe_elms->To(&elms)) return maybe_elms;
   3873 
   3874   array->set_elements(elms);
   3875   array->set_length(Smi::FromInt(length));
   3876   return array;
   3877 }
   3878 
   3879 
   3880 MaybeObject* Heap::AllocateJSArrayWithElements(
   3881     FixedArrayBase* elements,
   3882     ElementsKind elements_kind,
   3883     PretenureFlag pretenure) {
   3884   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
   3885   JSArray* array;
   3886   if (!maybe_array->To(&array)) return maybe_array;
   3887 
   3888   array->set_elements(elements);
   3889   array->set_length(Smi::FromInt(elements->length()));
   3890   return array;
   3891 }
   3892 
   3893 
   3894 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
   3895   // Allocate map.
   3896   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
   3897   // maps. Will probably depend on the identity of the handler object, too.
   3898   Map* map;
   3899   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
   3900   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
   3901   map->set_prototype(prototype);
   3902 
   3903   // Allocate the proxy object.
   3904   JSProxy* result;
   3905   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
   3906   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
   3907   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   3908   result->set_handler(handler);
   3909   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   3910   return result;
   3911 }
   3912 
   3913 
   3914 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
   3915                                            Object* call_trap,
   3916                                            Object* construct_trap,
   3917                                            Object* prototype) {
   3918   // Allocate map.
   3919   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
   3920   // maps. Will probably depend on the identity of the handler object, too.
   3921   Map* map;
   3922   MaybeObject* maybe_map_obj =
   3923       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
   3924   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
   3925   map->set_prototype(prototype);
   3926 
   3927   // Allocate the proxy object.
   3928   JSFunctionProxy* result;
   3929   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
   3930   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
   3931   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   3932   result->set_handler(handler);
   3933   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   3934   result->set_call_trap(call_trap);
   3935   result->set_construct_trap(construct_trap);
   3936   return result;
   3937 }
   3938 
   3939 
   3940 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
   3941   ASSERT(constructor->has_initial_map());
   3942   Map* map = constructor->initial_map();
   3943 
   3944   // Make sure no field properties are described in the initial map.
   3945   // This guarantees us that normalizing the properties does not
   3946   // require us to change property values to JSGlobalPropertyCells.
   3947   ASSERT(map->NextFreePropertyIndex() == 0);
   3948 
   3949   // Make sure we don't have a ton of pre-allocated slots in the
   3950   // global objects. They will be unused once we normalize the object.
   3951   ASSERT(map->unused_property_fields() == 0);
   3952   ASSERT(map->inobject_properties() == 0);
   3953 
   3954   // Initial size of the backing store to avoid resize of the storage during
   3955   // bootstrapping. The size differs between the JS global object ad the
   3956   // builtins object.
   3957   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
   3958 
   3959   // Allocate a dictionary object for backing storage.
   3960   Object* obj;
   3961   { MaybeObject* maybe_obj =
   3962         StringDictionary::Allocate(
   3963             map->NumberOfDescribedProperties() * 2 + initial_size);
   3964     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   3965   }
   3966   StringDictionary* dictionary = StringDictionary::cast(obj);
   3967 
   3968   // The global object might be created from an object template with accessors.
   3969   // Fill these accessors into the dictionary.
   3970   DescriptorArray* descs = map->instance_descriptors();
   3971   for (int i = 0; i < descs->number_of_descriptors(); i++) {
   3972     PropertyDetails details(descs->GetDetails(i));
   3973     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
   3974     PropertyDetails d =
   3975         PropertyDetails(details.attributes(), CALLBACKS, details.index());
   3976     Object* value = descs->GetCallbacksObject(i);
   3977     { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
   3978       if (!maybe_value->ToObject(&value)) return maybe_value;
   3979     }
   3980 
   3981     Object* result;
   3982     { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
   3983       if (!maybe_result->ToObject(&result)) return maybe_result;
   3984     }
   3985     dictionary = StringDictionary::cast(result);
   3986   }
   3987 
   3988   // Allocate the global object and initialize it with the backing store.
   3989   { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
   3990     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   3991   }
   3992   JSObject* global = JSObject::cast(obj);
   3993   InitializeJSObjectFromMap(global, dictionary, map);
   3994 
   3995   // Create a new map for the global object.
   3996   { MaybeObject* maybe_obj = map->CopyDropDescriptors();
   3997     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   3998   }
   3999   Map* new_map = Map::cast(obj);
   4000 
   4001   // Set up the global object as a normalized object.
   4002   global->set_map(new_map);
   4003   global->map()->clear_instance_descriptors();
   4004   global->set_properties(dictionary);
   4005 
   4006   // Make sure result is a global object with properties in dictionary.
   4007   ASSERT(global->IsGlobalObject());
   4008   ASSERT(!global->HasFastProperties());
   4009   return global;
   4010 }
   4011 
   4012 
   4013 MaybeObject* Heap::CopyJSObject(JSObject* source) {
   4014   // Never used to copy functions.  If functions need to be copied we
   4015   // have to be careful to clear the literals array.
   4016   SLOW_ASSERT(!source->IsJSFunction());
   4017 
   4018   // Make the clone.
   4019   Map* map = source->map();
   4020   int object_size = map->instance_size();
   4021   Object* clone;
   4022 
   4023   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
   4024 
   4025   // If we're forced to always allocate, we use the general allocation
   4026   // functions which may leave us with an object in old space.
   4027   if (always_allocate()) {
   4028     { MaybeObject* maybe_clone =
   4029           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
   4030       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
   4031     }
   4032     Address clone_address = HeapObject::cast(clone)->address();
   4033     CopyBlock(clone_address,
   4034               source->address(),
   4035               object_size);
   4036     // Update write barrier for all fields that lie beyond the header.
   4037     RecordWrites(clone_address,
   4038                  JSObject::kHeaderSize,
   4039                  (object_size - JSObject::kHeaderSize) / kPointerSize);
   4040   } else {
   4041     wb_mode = SKIP_WRITE_BARRIER;
   4042     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
   4043       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
   4044     }
   4045     SLOW_ASSERT(InNewSpace(clone));
   4046     // Since we know the clone is allocated in new space, we can copy
   4047     // the contents without worrying about updating the write barrier.
   4048     CopyBlock(HeapObject::cast(clone)->address(),
   4049               source->address(),
   4050               object_size);
   4051   }
   4052 
   4053   SLOW_ASSERT(
   4054       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
   4055   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   4056   FixedArray* properties = FixedArray::cast(source->properties());
   4057   // Update elements if necessary.
   4058   if (elements->length() > 0) {
   4059     Object* elem;
   4060     { MaybeObject* maybe_elem;
   4061       if (elements->map() == fixed_cow_array_map()) {
   4062         maybe_elem = FixedArray::cast(elements);
   4063       } else if (source->HasFastDoubleElements()) {
   4064         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
   4065       } else {
   4066         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
   4067       }
   4068       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
   4069     }
   4070     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
   4071   }
   4072   // Update properties if necessary.
   4073   if (properties->length() > 0) {
   4074     Object* prop;
   4075     { MaybeObject* maybe_prop = CopyFixedArray(properties);
   4076       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
   4077     }
   4078     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
   4079   }
   4080   // Return the new clone.
   4081   return clone;
   4082 }
   4083 
   4084 
   4085 MaybeObject* Heap::ReinitializeJSReceiver(
   4086     JSReceiver* object, InstanceType type, int size) {
   4087   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
   4088 
   4089   // Allocate fresh map.
   4090   // TODO(rossberg): Once we optimize proxies, cache these maps.
   4091   Map* map;
   4092   MaybeObject* maybe = AllocateMap(type, size);
   4093   if (!maybe->To<Map>(&map)) return maybe;
   4094 
   4095   // Check that the receiver has at least the size of the fresh object.
   4096   int size_difference = object->map()->instance_size() - map->instance_size();
   4097   ASSERT(size_difference >= 0);
   4098 
   4099   map->set_prototype(object->map()->prototype());
   4100 
   4101   // Allocate the backing storage for the properties.
   4102   int prop_size = map->unused_property_fields() - map->inobject_properties();
   4103   Object* properties;
   4104   maybe = AllocateFixedArray(prop_size, TENURED);
   4105   if (!maybe->ToObject(&properties)) return maybe;
   4106 
   4107   // Functions require some allocation, which might fail here.
   4108   SharedFunctionInfo* shared = NULL;
   4109   if (type == JS_FUNCTION_TYPE) {
   4110     String* name;
   4111     maybe = LookupAsciiSymbol("<freezing call trap>");
   4112     if (!maybe->To<String>(&name)) return maybe;
   4113     maybe = AllocateSharedFunctionInfo(name);
   4114     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
   4115   }
   4116 
   4117   // Because of possible retries of this function after failure,
   4118   // we must NOT fail after this point, where we have changed the type!
   4119 
   4120   // Reset the map for the object.
   4121   object->set_map(map);
   4122   JSObject* jsobj = JSObject::cast(object);
   4123 
   4124   // Reinitialize the object from the constructor map.
   4125   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
   4126 
   4127   // Functions require some minimal initialization.
   4128   if (type == JS_FUNCTION_TYPE) {
   4129     map->set_function_with_prototype(true);
   4130     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
   4131     JSFunction::cast(object)->set_context(
   4132         isolate()->context()->global_context());
   4133   }
   4134 
   4135   // Put in filler if the new object is smaller than the old.
   4136   if (size_difference > 0) {
   4137     CreateFillerObjectAt(
   4138         object->address() + map->instance_size(), size_difference);
   4139   }
   4140 
   4141   return object;
   4142 }
   4143 
   4144 
   4145 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
   4146                                              JSGlobalProxy* object) {
   4147   ASSERT(constructor->has_initial_map());
   4148   Map* map = constructor->initial_map();
   4149 
   4150   // Check that the already allocated object has the same size and type as
   4151   // objects allocated using the constructor.
   4152   ASSERT(map->instance_size() == object->map()->instance_size());
   4153   ASSERT(map->instance_type() == object->map()->instance_type());
   4154 
   4155   // Allocate the backing storage for the properties.
   4156   int prop_size = map->unused_property_fields() - map->inobject_properties();
   4157   Object* properties;
   4158   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
   4159     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
   4160   }
   4161 
   4162   // Reset the map for the object.
   4163   object->set_map(constructor->initial_map());
   4164 
   4165   // Reinitialize the object from the constructor map.
   4166   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
   4167   return object;
   4168 }
   4169 
   4170 
   4171 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
   4172                                            PretenureFlag pretenure) {
   4173   if (string.length() == 1) {
   4174     return Heap::LookupSingleCharacterStringFromCode(string[0]);
   4175   }
   4176   Object* result;
   4177   { MaybeObject* maybe_result =
   4178         AllocateRawAsciiString(string.length(), pretenure);
   4179     if (!maybe_result->ToObject(&result)) return maybe_result;
   4180   }
   4181 
   4182   // Copy the characters into the new object.
   4183   SeqAsciiString* string_result = SeqAsciiString::cast(result);
   4184   for (int i = 0; i < string.length(); i++) {
   4185     string_result->SeqAsciiStringSet(i, string[i]);
   4186   }
   4187   return result;
   4188 }
   4189 
   4190 
   4191 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
   4192                                               PretenureFlag pretenure) {
   4193   // Count the number of characters in the UTF-8 string and check if
   4194   // it is an ASCII string.
   4195   Access<UnicodeCache::Utf8Decoder>
   4196       decoder(isolate_->unicode_cache()->utf8_decoder());
   4197   decoder->Reset(string.start(), string.length());
   4198   int chars = 0;
   4199   while (decoder->has_more()) {
   4200     uint32_t r = decoder->GetNext();
   4201     if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
   4202       chars++;
   4203     } else {
   4204       chars += 2;
   4205     }
   4206   }
   4207 
   4208   Object* result;
   4209   { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
   4210     if (!maybe_result->ToObject(&result)) return maybe_result;
   4211   }
   4212 
   4213   // Convert and copy the characters into the new object.
   4214   String* string_result = String::cast(result);
   4215   decoder->Reset(string.start(), string.length());
   4216   int i = 0;
   4217   while (i < chars) {
   4218     uint32_t r = decoder->GetNext();
   4219     if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
   4220       string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
   4221       string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
   4222     } else {
   4223       string_result->Set(i++, r);
   4224     }
   4225   }
   4226   return result;
   4227 }
   4228 
   4229 
   4230 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
   4231                                              PretenureFlag pretenure) {
   4232   // Check if the string is an ASCII string.
   4233   MaybeObject* maybe_result;
   4234   if (String::IsAscii(string.start(), string.length())) {
   4235     maybe_result = AllocateRawAsciiString(string.length(), pretenure);
   4236   } else {  // It's not an ASCII string.
   4237     maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
   4238   }
   4239   Object* result;
   4240   if (!maybe_result->ToObject(&result)) return maybe_result;
   4241 
   4242   // Copy the characters into the new object, which may be either ASCII or
   4243   // UTF-16.
   4244   String* string_result = String::cast(result);
   4245   for (int i = 0; i < string.length(); i++) {
   4246     string_result->Set(i, string[i]);
   4247   }
   4248   return result;
   4249 }
   4250 
   4251 
   4252 Map* Heap::SymbolMapForString(String* string) {
   4253   // If the string is in new space it cannot be used as a symbol.
   4254   if (InNewSpace(string)) return NULL;
   4255 
   4256   // Find the corresponding symbol map for strings.
   4257   switch (string->map()->instance_type()) {
   4258     case STRING_TYPE: return symbol_map();
   4259     case ASCII_STRING_TYPE: return ascii_symbol_map();
   4260     case CONS_STRING_TYPE: return cons_symbol_map();
   4261     case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
   4262     case EXTERNAL_STRING_TYPE: return external_symbol_map();
   4263     case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
   4264     case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
   4265       return external_symbol_with_ascii_data_map();
   4266     case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
   4267     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
   4268       return short_external_ascii_symbol_map();
   4269     case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
   4270       return short_external_symbol_with_ascii_data_map();
   4271     default: return NULL;  // No match found.
   4272   }
   4273 }
   4274 
   4275 
   4276 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
   4277                                           int chars,
   4278                                           uint32_t hash_field) {
   4279   ASSERT(chars >= 0);
   4280   // Ensure the chars matches the number of characters in the buffer.
   4281   ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
   4282   // Determine whether the string is ASCII.
   4283   bool is_ascii = true;
   4284   while (buffer->has_more()) {
   4285     if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
   4286       is_ascii = false;
   4287       break;
   4288     }
   4289   }
   4290   buffer->Rewind();
   4291 
   4292   // Compute map and object size.
   4293   int size;
   4294   Map* map;
   4295 
   4296   if (is_ascii) {
   4297     if (chars > SeqAsciiString::kMaxLength) {
   4298       return Failure::OutOfMemoryException();
   4299     }
   4300     map = ascii_symbol_map();
   4301     size = SeqAsciiString::SizeFor(chars);
   4302   } else {
   4303     if (chars > SeqTwoByteString::kMaxLength) {
   4304       return Failure::OutOfMemoryException();
   4305     }
   4306     map = symbol_map();
   4307     size = SeqTwoByteString::SizeFor(chars);
   4308   }
   4309 
   4310   // Allocate string.
   4311   Object* result;
   4312   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
   4313                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
   4314                    : old_data_space_->AllocateRaw(size);
   4315     if (!maybe_result->ToObject(&result)) return maybe_result;
   4316   }
   4317 
   4318   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
   4319   // Set length and hash fields of the allocated string.
   4320   String* answer = String::cast(result);
   4321   answer->set_length(chars);
   4322   answer->set_hash_field(hash_field);
   4323 
   4324   ASSERT_EQ(size, answer->Size());
   4325 
   4326   // Fill in the characters.
   4327   int i = 0;
   4328   while (i < chars) {
   4329     uint32_t character = buffer->GetNext();
   4330     if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
   4331       answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
   4332       answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
   4333     } else {
   4334       answer->Set(i++, character);
   4335     }
   4336   }
   4337   return answer;
   4338 }
   4339 
   4340 
   4341 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
   4342   if (length < 0 || length > SeqAsciiString::kMaxLength) {
   4343     return Failure::OutOfMemoryException();
   4344   }
   4345 
   4346   int size = SeqAsciiString::SizeFor(length);
   4347   ASSERT(size <= SeqAsciiString::kMaxSize);
   4348 
   4349   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   4350   AllocationSpace retry_space = OLD_DATA_SPACE;
   4351 
   4352   if (space == NEW_SPACE) {
   4353     if (size > kMaxObjectSizeInNewSpace) {
   4354       // Allocate in large object space, retry space will be ignored.
   4355       space = LO_SPACE;
   4356     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
   4357       // Allocate in new space, retry in large object space.
   4358       retry_space = LO_SPACE;
   4359     }
   4360   } else if (space == OLD_DATA_SPACE &&
   4361              size > Page::kMaxNonCodeHeapObjectSize) {
   4362     space = LO_SPACE;
   4363   }
   4364   Object* result;
   4365   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
   4366     if (!maybe_result->ToObject(&result)) return maybe_result;
   4367   }
   4368 
   4369   // Partially initialize the object.
   4370   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
   4371   String::cast(result)->set_length(length);
   4372   String::cast(result)->set_hash_field(String::kEmptyHashField);
   4373   ASSERT_EQ(size, HeapObject::cast(result)->Size());
   4374   return result;
   4375 }
   4376 
   4377 
   4378 MaybeObject* Heap::AllocateRawTwoByteString(int length,
   4379                                             PretenureFlag pretenure) {
   4380   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
   4381     return Failure::OutOfMemoryException();
   4382   }
   4383   int size = SeqTwoByteString::SizeFor(length);
   4384   ASSERT(size <= SeqTwoByteString::kMaxSize);
   4385   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   4386   AllocationSpace retry_space = OLD_DATA_SPACE;
   4387 
   4388   if (space == NEW_SPACE) {
   4389     if (size > kMaxObjectSizeInNewSpace) {
   4390       // Allocate in large object space, retry space will be ignored.
   4391       space = LO_SPACE;
   4392     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
   4393       // Allocate in new space, retry in large object space.
   4394       retry_space = LO_SPACE;
   4395     }
   4396   } else if (space == OLD_DATA_SPACE &&
   4397              size > Page::kMaxNonCodeHeapObjectSize) {
   4398     space = LO_SPACE;
   4399   }
   4400   Object* result;
   4401   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
   4402     if (!maybe_result->ToObject(&result)) return maybe_result;
   4403   }
   4404 
   4405   // Partially initialize the object.
   4406   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
   4407   String::cast(result)->set_length(length);
   4408   String::cast(result)->set_hash_field(String::kEmptyHashField);
   4409   ASSERT_EQ(size, HeapObject::cast(result)->Size());
   4410   return result;
   4411 }
   4412 
   4413 
   4414 MaybeObject* Heap::AllocateJSArray(
   4415     ElementsKind elements_kind,
   4416     PretenureFlag pretenure) {
   4417   Context* global_context = isolate()->context()->global_context();
   4418   JSFunction* array_function = global_context->array_function();
   4419   Map* map = array_function->initial_map();
   4420   if (elements_kind == FAST_DOUBLE_ELEMENTS) {
   4421     map = Map::cast(global_context->double_js_array_map());
   4422   } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
   4423     map = Map::cast(global_context->object_js_array_map());
   4424   } else {
   4425     ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
   4426     ASSERT(map == global_context->smi_js_array_map());
   4427   }
   4428 
   4429   return AllocateJSObjectFromMap(map, pretenure);
   4430 }
   4431 
   4432 
   4433 MaybeObject* Heap::AllocateEmptyFixedArray() {
   4434   int size = FixedArray::SizeFor(0);
   4435   Object* result;
   4436   { MaybeObject* maybe_result =
   4437         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
   4438     if (!maybe_result->ToObject(&result)) return maybe_result;
   4439   }
   4440   // Initialize the object.
   4441   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
   4442       fixed_array_map());
   4443   reinterpret_cast<FixedArray*>(result)->set_length(0);
   4444   return result;
   4445 }
   4446 
   4447 
   4448 MaybeObject* Heap::AllocateRawFixedArray(int length) {
   4449   if (length < 0 || length > FixedArray::kMaxLength) {
   4450     return Failure::OutOfMemoryException();
   4451   }
   4452   ASSERT(length > 0);
   4453   // Use the general function if we're forced to always allocate.
   4454   if (always_allocate()) return AllocateFixedArray(length, TENURED);
   4455   // Allocate the raw data for a fixed array.
   4456   int size = FixedArray::SizeFor(length);
   4457   return size <= kMaxObjectSizeInNewSpace
   4458       ? new_space_.AllocateRaw(size)
   4459       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
   4460 }
   4461 
   4462 
   4463 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
   4464   int len = src->length();
   4465   Object* obj;
   4466   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
   4467     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   4468   }
   4469   if (InNewSpace(obj)) {
   4470     HeapObject* dst = HeapObject::cast(obj);
   4471     dst->set_map_no_write_barrier(map);
   4472     CopyBlock(dst->address() + kPointerSize,
   4473               src->address() + kPointerSize,
   4474               FixedArray::SizeFor(len) - kPointerSize);
   4475     return obj;
   4476   }
   4477   HeapObject::cast(obj)->set_map_no_write_barrier(map);
   4478   FixedArray* result = FixedArray::cast(obj);
   4479   result->set_length(len);
   4480 
   4481   // Copy the content
   4482   AssertNoAllocation no_gc;
   4483   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
   4484   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
   4485   return result;
   4486 }
   4487 
   4488 
   4489 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
   4490                                                Map* map) {
   4491   int len = src->length();
   4492   Object* obj;
   4493   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
   4494     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   4495   }
   4496   HeapObject* dst = HeapObject::cast(obj);
   4497   dst->set_map_no_write_barrier(map);
   4498   CopyBlock(
   4499       dst->address() + FixedDoubleArray::kLengthOffset,
   4500       src->address() + FixedDoubleArray::kLengthOffset,
   4501       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
   4502   return obj;
   4503 }
   4504 
   4505 
   4506 MaybeObject* Heap::AllocateFixedArray(int length) {
   4507   ASSERT(length >= 0);
   4508   if (length == 0) return empty_fixed_array();
   4509   Object* result;
   4510   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
   4511     if (!maybe_result->ToObject(&result)) return maybe_result;
   4512   }
   4513   // Initialize header.
   4514   FixedArray* array = reinterpret_cast<FixedArray*>(result);
   4515   array->set_map_no_write_barrier(fixed_array_map());
   4516   array->set_length(length);
   4517   // Initialize body.
   4518   ASSERT(!InNewSpace(undefined_value()));
   4519   MemsetPointer(array->data_start(), undefined_value(), length);
   4520   return result;
   4521 }
   4522 
   4523 
   4524 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
   4525   if (length < 0 || length > FixedArray::kMaxLength) {
   4526     return Failure::OutOfMemoryException();
   4527   }
   4528 
   4529   AllocationSpace space =
   4530       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
   4531   int size = FixedArray::SizeFor(length);
   4532   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
   4533     // Too big for new space.
   4534     space = LO_SPACE;
   4535   } else if (space == OLD_POINTER_SPACE &&
   4536              size > Page::kMaxNonCodeHeapObjectSize) {
   4537     // Too big for old pointer space.
   4538     space = LO_SPACE;
   4539   }
   4540 
   4541   AllocationSpace retry_space =
   4542       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
   4543 
   4544   return AllocateRaw(size, space, retry_space);
   4545 }
   4546 
   4547 
   4548 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
   4549     Heap* heap,
   4550     int length,
   4551     PretenureFlag pretenure,
   4552     Object* filler) {
   4553   ASSERT(length >= 0);
   4554   ASSERT(heap->empty_fixed_array()->IsFixedArray());
   4555   if (length == 0) return heap->empty_fixed_array();
   4556 
   4557   ASSERT(!heap->InNewSpace(filler));
   4558   Object* result;
   4559   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
   4560     if (!maybe_result->ToObject(&result)) return maybe_result;
   4561   }
   4562 
   4563   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
   4564   FixedArray* array = FixedArray::cast(result);
   4565   array->set_length(length);
   4566   MemsetPointer(array->data_start(), filler, length);
   4567   return array;
   4568 }
   4569 
   4570 
   4571 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
   4572   return AllocateFixedArrayWithFiller(this,
   4573                                       length,
   4574                                       pretenure,
   4575                                       undefined_value());
   4576 }
   4577 
   4578 
   4579 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
   4580                                                PretenureFlag pretenure) {
   4581   return AllocateFixedArrayWithFiller(this,
   4582                                       length,
   4583                                       pretenure,
   4584                                       the_hole_value());
   4585 }
   4586 
   4587 
   4588 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
   4589   if (length == 0) return empty_fixed_array();
   4590 
   4591   Object* obj;
   4592   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
   4593     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   4594   }
   4595 
   4596   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
   4597       fixed_array_map());
   4598   FixedArray::cast(obj)->set_length(length);
   4599   return obj;
   4600 }
   4601 
   4602 
   4603 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
   4604   int size = FixedDoubleArray::SizeFor(0);
   4605   Object* result;
   4606   { MaybeObject* maybe_result =
   4607         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
   4608     if (!maybe_result->ToObject(&result)) return maybe_result;
   4609   }
   4610   // Initialize the object.
   4611   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
   4612       fixed_double_array_map());
   4613   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
   4614   return result;
   4615 }
   4616 
   4617 
   4618 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
   4619     int length,
   4620     PretenureFlag pretenure) {
   4621   if (length == 0) return empty_fixed_array();
   4622 
   4623   Object* elements_object;
   4624   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
   4625   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
   4626   FixedDoubleArray* elements =
   4627       reinterpret_cast<FixedDoubleArray*>(elements_object);
   4628 
   4629   elements->set_map_no_write_barrier(fixed_double_array_map());
   4630   elements->set_length(length);
   4631   return elements;
   4632 }
   4633 
   4634 
   4635 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
   4636     int length,
   4637     PretenureFlag pretenure) {
   4638   if (length == 0) return empty_fixed_array();
   4639 
   4640   Object* elements_object;
   4641   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
   4642   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
   4643   FixedDoubleArray* elements =
   4644       reinterpret_cast<FixedDoubleArray*>(elements_object);
   4645 
   4646   for (int i = 0; i < length; ++i) {
   4647     elements->set_the_hole(i);
   4648   }
   4649 
   4650   elements->set_map_no_write_barrier(fixed_double_array_map());
   4651   elements->set_length(length);
   4652   return elements;
   4653 }
   4654 
   4655 
   4656 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
   4657                                                PretenureFlag pretenure) {
   4658   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
   4659     return Failure::OutOfMemoryException();
   4660   }
   4661 
   4662   AllocationSpace space =
   4663       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   4664   int size = FixedDoubleArray::SizeFor(length);
   4665   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
   4666     // Too big for new space.
   4667     space = LO_SPACE;
   4668   } else if (space == OLD_DATA_SPACE &&
   4669              size > Page::kMaxNonCodeHeapObjectSize) {
   4670     // Too big for old data space.
   4671     space = LO_SPACE;
   4672   }
   4673 
   4674   AllocationSpace retry_space =
   4675       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
   4676 
   4677   return AllocateRaw(size, space, retry_space);
   4678 }
   4679 
   4680 
   4681 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
   4682   Object* result;
   4683   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
   4684     if (!maybe_result->ToObject(&result)) return maybe_result;
   4685   }
   4686   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
   4687       hash_table_map());
   4688   ASSERT(result->IsHashTable());
   4689   return result;
   4690 }
   4691 
   4692 
   4693 MaybeObject* Heap::AllocateGlobalContext() {
   4694   Object* result;
   4695   { MaybeObject* maybe_result =
   4696         AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
   4697     if (!maybe_result->ToObject(&result)) return maybe_result;
   4698   }
   4699   Context* context = reinterpret_cast<Context*>(result);
   4700   context->set_map_no_write_barrier(global_context_map());
   4701   context->set_smi_js_array_map(undefined_value());
   4702   context->set_double_js_array_map(undefined_value());
   4703   context->set_object_js_array_map(undefined_value());
   4704   ASSERT(context->IsGlobalContext());
   4705   ASSERT(result->IsContext());
   4706   return result;
   4707 }
   4708 
   4709 
   4710 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
   4711   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
   4712   Object* result;
   4713   { MaybeObject* maybe_result = AllocateFixedArray(length);
   4714     if (!maybe_result->ToObject(&result)) return maybe_result;
   4715   }
   4716   Context* context = reinterpret_cast<Context*>(result);
   4717   context->set_map_no_write_barrier(function_context_map());
   4718   context->set_closure(function);
   4719   context->set_previous(function->context());
   4720   context->set_extension(NULL);
   4721   context->set_global(function->context()->global());
   4722   return context;
   4723 }
   4724 
   4725 
   4726 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
   4727                                         Context* previous,
   4728                                         String* name,
   4729                                         Object* thrown_object) {
   4730   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
   4731   Object* result;
   4732   { MaybeObject* maybe_result =
   4733         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
   4734     if (!maybe_result->ToObject(&result)) return maybe_result;
   4735   }
   4736   Context* context = reinterpret_cast<Context*>(result);
   4737   context->set_map_no_write_barrier(catch_context_map());
   4738   context->set_closure(function);
   4739   context->set_previous(previous);
   4740   context->set_extension(name);
   4741   context->set_global(previous->global());
   4742   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
   4743   return context;
   4744 }
   4745 
   4746 
   4747 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
   4748                                        Context* previous,
   4749                                        JSObject* extension) {
   4750   Object* result;
   4751   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
   4752     if (!maybe_result->ToObject(&result)) return maybe_result;
   4753   }
   4754   Context* context = reinterpret_cast<Context*>(result);
   4755   context->set_map_no_write_barrier(with_context_map());
   4756   context->set_closure(function);
   4757   context->set_previous(previous);
   4758   context->set_extension(extension);
   4759   context->set_global(previous->global());
   4760   return context;
   4761 }
   4762 
   4763 
   4764 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
   4765                                         Context* previous,
   4766                                         ScopeInfo* scope_info) {
   4767   Object* result;
   4768   { MaybeObject* maybe_result =
   4769         AllocateFixedArrayWithHoles(scope_info->ContextLength());
   4770     if (!maybe_result->ToObject(&result)) return maybe_result;
   4771   }
   4772   Context* context = reinterpret_cast<Context*>(result);
   4773   context->set_map_no_write_barrier(block_context_map());
   4774   context->set_closure(function);
   4775   context->set_previous(previous);
   4776   context->set_extension(scope_info);
   4777   context->set_global(previous->global());
   4778   return context;
   4779 }
   4780 
   4781 
   4782 MaybeObject* Heap::AllocateScopeInfo(int length) {
   4783   FixedArray* scope_info;
   4784   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
   4785   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
   4786   scope_info->set_map_no_write_barrier(scope_info_map());
   4787   return scope_info;
   4788 }
   4789 
   4790 
   4791 MaybeObject* Heap::AllocateStruct(InstanceType type) {
   4792   Map* map;
   4793   switch (type) {
   4794 #define MAKE_CASE(NAME, Name, name) \
   4795     case NAME##_TYPE: map = name##_map(); break;
   4796 STRUCT_LIST(MAKE_CASE)
   4797 #undef MAKE_CASE
   4798     default:
   4799       UNREACHABLE();
   4800       return Failure::InternalError();
   4801   }
   4802   int size = map->instance_size();
   4803   AllocationSpace space =
   4804       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
   4805   Object* result;
   4806   { MaybeObject* maybe_result = Allocate(map, space);
   4807     if (!maybe_result->ToObject(&result)) return maybe_result;
   4808   }
   4809   Struct::cast(result)->InitializeBody(size);
   4810   return result;
   4811 }
   4812 
   4813 
   4814 bool Heap::IsHeapIterable() {
   4815   return (!old_pointer_space()->was_swept_conservatively() &&
   4816           !old_data_space()->was_swept_conservatively());
   4817 }
   4818 
   4819 
   4820 void Heap::EnsureHeapIsIterable() {
   4821   ASSERT(IsAllocationAllowed());
   4822   if (!IsHeapIterable()) {
   4823     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
   4824   }
   4825   ASSERT(IsHeapIterable());
   4826 }
   4827 
   4828 
   4829 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
   4830   incremental_marking()->Step(step_size,
   4831                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
   4832 
   4833   if (incremental_marking()->IsComplete()) {
   4834     bool uncommit = false;
   4835     if (gc_count_at_last_idle_gc_ == gc_count_) {
   4836       // No GC since the last full GC, the mutator is probably not active.
   4837       isolate_->compilation_cache()->Clear();
   4838       uncommit = true;
   4839     }
   4840     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
   4841     gc_count_at_last_idle_gc_ = gc_count_;
   4842     if (uncommit) {
   4843       new_space_.Shrink();
   4844       UncommitFromSpace();
   4845     }
   4846   }
   4847 }
   4848 
   4849 
   4850 bool Heap::IdleNotification(int hint) {
   4851   const int kMaxHint = 1000;
   4852   intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
   4853   // The size factor is in range [3..100].
   4854   intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
   4855 
   4856   if (contexts_disposed_ > 0) {
   4857     if (hint >= kMaxHint) {
   4858       // The embedder is requesting a lot of GC work after context disposal,
   4859       // we age inline caches so that they don't keep objects from
   4860       // the old context alive.
   4861       AgeInlineCaches();
   4862     }
   4863     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
   4864     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
   4865         incremental_marking()->IsStopped()) {
   4866       HistogramTimerScope scope(isolate_->counters()->gc_context());
   4867       CollectAllGarbage(kReduceMemoryFootprintMask,
   4868                         "idle notification: contexts disposed");
   4869     } else {
   4870       AdvanceIdleIncrementalMarking(step_size);
   4871       contexts_disposed_ = 0;
   4872     }
   4873     // Make sure that we have no pending context disposals.
   4874     // Take into account that we might have decided to delay full collection
   4875     // because incremental marking is in progress.
   4876     ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
   4877     return false;
   4878   }
   4879 
   4880   if (hint >= kMaxHint || !FLAG_incremental_marking ||
   4881       FLAG_expose_gc || Serializer::enabled()) {
   4882     return IdleGlobalGC();
   4883   }
   4884 
   4885   // By doing small chunks of GC work in each IdleNotification,
   4886   // perform a round of incremental GCs and after that wait until
   4887   // the mutator creates enough garbage to justify a new round.
   4888   // An incremental GC progresses as follows:
   4889   // 1. many incremental marking steps,
   4890   // 2. one old space mark-sweep-compact,
   4891   // 3. many lazy sweep steps.
   4892   // Use mark-sweep-compact events to count incremental GCs in a round.
   4893 
   4894 
   4895   if (incremental_marking()->IsStopped()) {
   4896     if (!IsSweepingComplete() &&
   4897         !AdvanceSweepers(static_cast<int>(step_size))) {
   4898       return false;
   4899     }
   4900   }
   4901 
   4902   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
   4903     if (EnoughGarbageSinceLastIdleRound()) {
   4904       StartIdleRound();
   4905     } else {
   4906       return true;
   4907     }
   4908   }
   4909 
   4910   int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
   4911   mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
   4912   ms_count_at_last_idle_notification_ = ms_count_;
   4913 
   4914   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
   4915     FinishIdleRound();
   4916     return true;
   4917   }
   4918 
   4919   if (incremental_marking()->IsStopped()) {
   4920     if (!WorthStartingGCWhenIdle()) {
   4921       FinishIdleRound();
   4922       return true;
   4923     }
   4924     incremental_marking()->Start();
   4925   }
   4926 
   4927   AdvanceIdleIncrementalMarking(step_size);
   4928   return false;
   4929 }
   4930 
   4931 
   4932 bool Heap::IdleGlobalGC() {
   4933   static const int kIdlesBeforeScavenge = 4;
   4934   static const int kIdlesBeforeMarkSweep = 7;
   4935   static const int kIdlesBeforeMarkCompact = 8;
   4936   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
   4937   static const unsigned int kGCsBetweenCleanup = 4;
   4938 
   4939   if (!last_idle_notification_gc_count_init_) {
   4940     last_idle_notification_gc_count_ = gc_count_;
   4941     last_idle_notification_gc_count_init_ = true;
   4942   }
   4943 
   4944   bool uncommit = true;
   4945   bool finished = false;
   4946 
   4947   // Reset the number of idle notifications received when a number of
   4948   // GCs have taken place. This allows another round of cleanup based
   4949   // on idle notifications if enough work has been carried out to
   4950   // provoke a number of garbage collections.
   4951   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
   4952     number_idle_notifications_ =
   4953         Min(number_idle_notifications_ + 1, kMaxIdleCount);
   4954   } else {
   4955     number_idle_notifications_ = 0;
   4956     last_idle_notification_gc_count_ = gc_count_;
   4957   }
   4958 
   4959   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
   4960     CollectGarbage(NEW_SPACE, "idle notification");
   4961     new_space_.Shrink();
   4962     last_idle_notification_gc_count_ = gc_count_;
   4963   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
   4964     // Before doing the mark-sweep collections we clear the
   4965     // compilation cache to avoid hanging on to source code and
   4966     // generated code for cached functions.
   4967     isolate_->compilation_cache()->Clear();
   4968 
   4969     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
   4970     new_space_.Shrink();
   4971     last_idle_notification_gc_count_ = gc_count_;
   4972 
   4973   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
   4974     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
   4975     new_space_.Shrink();
   4976     last_idle_notification_gc_count_ = gc_count_;
   4977     number_idle_notifications_ = 0;
   4978     finished = true;
   4979   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
   4980     // If we have received more than kIdlesBeforeMarkCompact idle
   4981     // notifications we do not perform any cleanup because we don't
   4982     // expect to gain much by doing so.
   4983     finished = true;
   4984   }
   4985 
   4986   if (uncommit) UncommitFromSpace();
   4987 
   4988   return finished;
   4989 }
   4990 
   4991 
   4992 #ifdef DEBUG
   4993 
   4994 void Heap::Print() {
   4995   if (!HasBeenSetUp()) return;
   4996   isolate()->PrintStack();
   4997   AllSpaces spaces;
   4998   for (Space* space = spaces.next(); space != NULL; space = spaces.next())
   4999     space->Print();
   5000 }
   5001 
   5002 
   5003 void Heap::ReportCodeStatistics(const char* title) {
   5004   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
   5005   PagedSpace::ResetCodeStatistics();
   5006   // We do not look for code in new space, map space, or old space.  If code
   5007   // somehow ends up in those spaces, we would miss it here.
   5008   code_space_->CollectCodeStatistics();
   5009   lo_space_->CollectCodeStatistics();
   5010   PagedSpace::ReportCodeStatistics();
   5011 }
   5012 
   5013 
   5014 // This function expects that NewSpace's allocated objects histogram is
   5015 // populated (via a call to CollectStatistics or else as a side effect of a
   5016 // just-completed scavenge collection).
   5017 void Heap::ReportHeapStatistics(const char* title) {
   5018   USE(title);
   5019   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
   5020          title, gc_count_);
   5021   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
   5022          old_gen_promotion_limit_);
   5023   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
   5024          old_gen_allocation_limit_);
   5025   PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
   5026 
   5027   PrintF("\n");
   5028   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
   5029   isolate_->global_handles()->PrintStats();
   5030   PrintF("\n");
   5031 
   5032   PrintF("Heap statistics : ");
   5033   isolate_->memory_allocator()->ReportStatistics();
   5034   PrintF("To space : ");
   5035   new_space_.ReportStatistics();
   5036   PrintF("Old pointer space : ");
   5037   old_pointer_space_->ReportStatistics();
   5038   PrintF("Old data space : ");
   5039   old_data_space_->ReportStatistics();
   5040   PrintF("Code space : ");
   5041   code_space_->ReportStatistics();
   5042   PrintF("Map space : ");
   5043   map_space_->ReportStatistics();
   5044   PrintF("Cell space : ");
   5045   cell_space_->ReportStatistics();
   5046   PrintF("Large object space : ");
   5047   lo_space_->ReportStatistics();
   5048   PrintF(">>>>>> ========================================= >>>>>>\n");
   5049 }
   5050 
   5051 #endif  // DEBUG
   5052 
   5053 bool Heap::Contains(HeapObject* value) {
   5054   return Contains(value->address());
   5055 }
   5056 
   5057 
   5058 bool Heap::Contains(Address addr) {
   5059   if (OS::IsOutsideAllocatedSpace(addr)) return false;
   5060   return HasBeenSetUp() &&
   5061     (new_space_.ToSpaceContains(addr) ||
   5062      old_pointer_space_->Contains(addr) ||
   5063      old_data_space_->Contains(addr) ||
   5064      code_space_->Contains(addr) ||
   5065      map_space_->Contains(addr) ||
   5066      cell_space_->Contains(addr) ||
   5067      lo_space_->SlowContains(addr));
   5068 }
   5069 
   5070 
   5071 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
   5072   return InSpace(value->address(), space);
   5073 }
   5074 
   5075 
   5076 bool Heap::InSpace(Address addr, AllocationSpace space) {
   5077   if (OS::IsOutsideAllocatedSpace(addr)) return false;
   5078   if (!HasBeenSetUp()) return false;
   5079 
   5080   switch (space) {
   5081     case NEW_SPACE:
   5082       return new_space_.ToSpaceContains(addr);
   5083     case OLD_POINTER_SPACE:
   5084       return old_pointer_space_->Contains(addr);
   5085     case OLD_DATA_SPACE:
   5086       return old_data_space_->Contains(addr);
   5087     case CODE_SPACE:
   5088       return code_space_->Contains(addr);
   5089     case MAP_SPACE:
   5090       return map_space_->Contains(addr);
   5091     case CELL_SPACE:
   5092       return cell_space_->Contains(addr);
   5093     case LO_SPACE:
   5094       return lo_space_->SlowContains(addr);
   5095   }
   5096 
   5097   return false;
   5098 }
   5099 
   5100 
   5101 #ifdef DEBUG
   5102 void Heap::Verify() {
   5103   ASSERT(HasBeenSetUp());
   5104 
   5105   store_buffer()->Verify();
   5106 
   5107   VerifyPointersVisitor visitor;
   5108   IterateRoots(&visitor, VISIT_ONLY_STRONG);
   5109 
   5110   new_space_.Verify();
   5111 
   5112   old_pointer_space_->Verify(&visitor);
   5113   map_space_->Verify(&visitor);
   5114 
   5115   VerifyPointersVisitor no_dirty_regions_visitor;
   5116   old_data_space_->Verify(&no_dirty_regions_visitor);
   5117   code_space_->Verify(&no_dirty_regions_visitor);
   5118   cell_space_->Verify(&no_dirty_regions_visitor);
   5119 
   5120   lo_space_->Verify();
   5121 
   5122   VerifyNoAccessorPairSharing();
   5123 }
   5124 
   5125 
   5126 void Heap::VerifyNoAccessorPairSharing() {
   5127   // Verification is done in 2 phases: First we mark all AccessorPairs, checking
   5128   // that we mark only unmarked pairs, then we clear all marks, restoring the
   5129   // initial state. We use the Smi tag of the AccessorPair's getter as the
   5130   // marking bit, because we can never see a Smi as the getter.
   5131   for (int phase = 0; phase < 2; phase++) {
   5132     HeapObjectIterator iter(map_space());
   5133     for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
   5134       if (obj->IsMap()) {
   5135         DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
   5136         for (int i = 0; i < descs->number_of_descriptors(); i++) {
   5137           if (descs->GetType(i) == CALLBACKS &&
   5138               descs->GetValue(i)->IsAccessorPair()) {
   5139             AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
   5140             uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
   5141             uintptr_t after = (phase == 0) ?
   5142                 ((before & ~kSmiTagMask) | kSmiTag) :
   5143                 ((before & ~kHeapObjectTag) | kHeapObjectTag);
   5144             CHECK(before != after);
   5145             accessors->set_getter(reinterpret_cast<Object*>(after));
   5146           }
   5147         }
   5148       }
   5149     }
   5150   }
   5151 }
   5152 #endif  // DEBUG
   5153 
   5154 
   5155 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
   5156   Object* symbol = NULL;
   5157   Object* new_table;
   5158   { MaybeObject* maybe_new_table =
   5159         symbol_table()->LookupSymbol(string, &symbol);
   5160     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   5161   }
   5162   // Can't use set_symbol_table because SymbolTable::cast knows that
   5163   // SymbolTable is a singleton and checks for identity.
   5164   roots_[kSymbolTableRootIndex] = new_table;
   5165   ASSERT(symbol != NULL);
   5166   return symbol;
   5167 }
   5168 
   5169 
   5170 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
   5171   Object* symbol = NULL;
   5172   Object* new_table;
   5173   { MaybeObject* maybe_new_table =
   5174         symbol_table()->LookupAsciiSymbol(string, &symbol);
   5175     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   5176   }
   5177   // Can't use set_symbol_table because SymbolTable::cast knows that
   5178   // SymbolTable is a singleton and checks for identity.
   5179   roots_[kSymbolTableRootIndex] = new_table;
   5180   ASSERT(symbol != NULL);
   5181   return symbol;
   5182 }
   5183 
   5184 
   5185 MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
   5186                                      int from,
   5187                                      int length) {
   5188   Object* symbol = NULL;
   5189   Object* new_table;
   5190   { MaybeObject* maybe_new_table =
   5191         symbol_table()->LookupSubStringAsciiSymbol(string,
   5192                                                    from,
   5193                                                    length,
   5194                                                    &symbol);
   5195     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   5196   }
   5197   // Can't use set_symbol_table because SymbolTable::cast knows that
   5198   // SymbolTable is a singleton and checks for identity.
   5199   roots_[kSymbolTableRootIndex] = new_table;
   5200   ASSERT(symbol != NULL);
   5201   return symbol;
   5202 }
   5203 
   5204 
   5205 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
   5206   Object* symbol = NULL;
   5207   Object* new_table;
   5208   { MaybeObject* maybe_new_table =
   5209         symbol_table()->LookupTwoByteSymbol(string, &symbol);
   5210     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   5211   }
   5212   // Can't use set_symbol_table because SymbolTable::cast knows that
   5213   // SymbolTable is a singleton and checks for identity.
   5214   roots_[kSymbolTableRootIndex] = new_table;
   5215   ASSERT(symbol != NULL);
   5216   return symbol;
   5217 }
   5218 
   5219 
   5220 MaybeObject* Heap::LookupSymbol(String* string) {
   5221   if (string->IsSymbol()) return string;
   5222   Object* symbol = NULL;
   5223   Object* new_table;
   5224   { MaybeObject* maybe_new_table =
   5225         symbol_table()->LookupString(string, &symbol);
   5226     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
   5227   }
   5228   // Can't use set_symbol_table because SymbolTable::cast knows that
   5229   // SymbolTable is a singleton and checks for identity.
   5230   roots_[kSymbolTableRootIndex] = new_table;
   5231   ASSERT(symbol != NULL);
   5232   return symbol;
   5233 }
   5234 
   5235 
   5236 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
   5237   if (string->IsSymbol()) {
   5238     *symbol = string;
   5239     return true;
   5240   }
   5241   return symbol_table()->LookupSymbolIfExists(string, symbol);
   5242 }
   5243 
   5244 
   5245 #ifdef DEBUG
   5246 void Heap::ZapFromSpace() {
   5247   NewSpacePageIterator it(new_space_.FromSpaceStart(),
   5248                           new_space_.FromSpaceEnd());
   5249   while (it.has_next()) {
   5250     NewSpacePage* page = it.next();
   5251     for (Address cursor = page->area_start(), limit = page->area_end();
   5252          cursor < limit;
   5253          cursor += kPointerSize) {
   5254       Memory::Address_at(cursor) = kFromSpaceZapValue;
   5255     }
   5256   }
   5257 }
   5258 #endif  // DEBUG
   5259 
   5260 
   5261 void Heap::IterateAndMarkPointersToFromSpace(Address start,
   5262                                              Address end,
   5263                                              ObjectSlotCallback callback) {
   5264   Address slot_address = start;
   5265 
   5266   // We are not collecting slots on new space objects during mutation
   5267   // thus we have to scan for pointers to evacuation candidates when we
   5268   // promote objects. But we should not record any slots in non-black
   5269   // objects. Grey object's slots would be rescanned.
   5270   // White object might not survive until the end of collection
   5271   // it would be a violation of the invariant to record it's slots.
   5272   bool record_slots = false;
   5273   if (incremental_marking()->IsCompacting()) {
   5274     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
   5275     record_slots = Marking::IsBlack(mark_bit);
   5276   }
   5277 
   5278   while (slot_address < end) {
   5279     Object** slot = reinterpret_cast<Object**>(slot_address);
   5280     Object* object = *slot;
   5281     // If the store buffer becomes overfull we mark pages as being exempt from
   5282     // the store buffer.  These pages are scanned to find pointers that point
   5283     // to the new space.  In that case we may hit newly promoted objects and
   5284     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
   5285     if (object->IsHeapObject()) {
   5286       if (Heap::InFromSpace(object)) {
   5287         callback(reinterpret_cast<HeapObject**>(slot),
   5288                  HeapObject::cast(object));
   5289         Object* new_object = *slot;
   5290         if (InNewSpace(new_object)) {
   5291           SLOW_ASSERT(Heap::InToSpace(new_object));
   5292           SLOW_ASSERT(new_object->IsHeapObject());
   5293           store_buffer_.EnterDirectlyIntoStoreBuffer(
   5294               reinterpret_cast<Address>(slot));
   5295         }
   5296         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
   5297       } else if (record_slots &&
   5298                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
   5299         mark_compact_collector()->RecordSlot(slot, slot, object);
   5300       }
   5301     }
   5302     slot_address += kPointerSize;
   5303   }
   5304 }
   5305 
   5306 
   5307 #ifdef DEBUG
   5308 typedef bool (*CheckStoreBufferFilter)(Object** addr);
   5309 
   5310 
   5311 bool IsAMapPointerAddress(Object** addr) {
   5312   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
   5313   int mod = a % Map::kSize;
   5314   return mod >= Map::kPointerFieldsBeginOffset &&
   5315          mod < Map::kPointerFieldsEndOffset;
   5316 }
   5317 
   5318 
   5319 bool EverythingsAPointer(Object** addr) {
   5320   return true;
   5321 }
   5322 
   5323 
   5324 static void CheckStoreBuffer(Heap* heap,
   5325                              Object** current,
   5326                              Object** limit,
   5327                              Object**** store_buffer_position,
   5328                              Object*** store_buffer_top,
   5329                              CheckStoreBufferFilter filter,
   5330                              Address special_garbage_start,
   5331                              Address special_garbage_end) {
   5332   Map* free_space_map = heap->free_space_map();
   5333   for ( ; current < limit; current++) {
   5334     Object* o = *current;
   5335     Address current_address = reinterpret_cast<Address>(current);
   5336     // Skip free space.
   5337     if (o == free_space_map) {
   5338       Address current_address = reinterpret_cast<Address>(current);
   5339       FreeSpace* free_space =
   5340           FreeSpace::cast(HeapObject::FromAddress(current_address));
   5341       int skip = free_space->Size();
   5342       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
   5343       ASSERT(skip > 0);
   5344       current_address += skip - kPointerSize;
   5345       current = reinterpret_cast<Object**>(current_address);
   5346       continue;
   5347     }
   5348     // Skip the current linear allocation space between top and limit which is
   5349     // unmarked with the free space map, but can contain junk.
   5350     if (current_address == special_garbage_start &&
   5351         special_garbage_end != special_garbage_start) {
   5352       current_address = special_garbage_end - kPointerSize;
   5353       current = reinterpret_cast<Object**>(current_address);
   5354       continue;
   5355     }
   5356     if (!(*filter)(current)) continue;
   5357     ASSERT(current_address < special_garbage_start ||
   5358            current_address >= special_garbage_end);
   5359     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
   5360     // We have to check that the pointer does not point into new space
   5361     // without trying to cast it to a heap object since the hash field of
   5362     // a string can contain values like 1 and 3 which are tagged null
   5363     // pointers.
   5364     if (!heap->InNewSpace(o)) continue;
   5365     while (**store_buffer_position < current &&
   5366            *store_buffer_position < store_buffer_top) {
   5367       (*store_buffer_position)++;
   5368     }
   5369     if (**store_buffer_position != current ||
   5370         *store_buffer_position == store_buffer_top) {
   5371       Object** obj_start = current;
   5372       while (!(*obj_start)->IsMap()) obj_start--;
   5373       UNREACHABLE();
   5374     }
   5375   }
   5376 }
   5377 
   5378 
   5379 // Check that the store buffer contains all intergenerational pointers by
   5380 // scanning a page and ensuring that all pointers to young space are in the
   5381 // store buffer.
   5382 void Heap::OldPointerSpaceCheckStoreBuffer() {
   5383   OldSpace* space = old_pointer_space();
   5384   PageIterator pages(space);
   5385 
   5386   store_buffer()->SortUniq();
   5387 
   5388   while (pages.has_next()) {
   5389     Page* page = pages.next();
   5390     Object** current = reinterpret_cast<Object**>(page->area_start());
   5391 
   5392     Address end = page->area_end();
   5393 
   5394     Object*** store_buffer_position = store_buffer()->Start();
   5395     Object*** store_buffer_top = store_buffer()->Top();
   5396 
   5397     Object** limit = reinterpret_cast<Object**>(end);
   5398     CheckStoreBuffer(this,
   5399                      current,
   5400                      limit,
   5401                      &store_buffer_position,
   5402                      store_buffer_top,
   5403                      &EverythingsAPointer,
   5404                      space->top(),
   5405                      space->limit());
   5406   }
   5407 }
   5408 
   5409 
   5410 void Heap::MapSpaceCheckStoreBuffer() {
   5411   MapSpace* space = map_space();
   5412   PageIterator pages(space);
   5413 
   5414   store_buffer()->SortUniq();
   5415 
   5416   while (pages.has_next()) {
   5417     Page* page = pages.next();
   5418     Object** current = reinterpret_cast<Object**>(page->area_start());
   5419 
   5420     Address end = page->area_end();
   5421 
   5422     Object*** store_buffer_position = store_buffer()->Start();
   5423     Object*** store_buffer_top = store_buffer()->Top();
   5424 
   5425     Object** limit = reinterpret_cast<Object**>(end);
   5426     CheckStoreBuffer(this,
   5427                      current,
   5428                      limit,
   5429                      &store_buffer_position,
   5430                      store_buffer_top,
   5431                      &IsAMapPointerAddress,
   5432                      space->top(),
   5433                      space->limit());
   5434   }
   5435 }
   5436 
   5437 
   5438 void Heap::LargeObjectSpaceCheckStoreBuffer() {
   5439   LargeObjectIterator it(lo_space());
   5440   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
   5441     // We only have code, sequential strings, or fixed arrays in large
   5442     // object space, and only fixed arrays can possibly contain pointers to
   5443     // the young generation.
   5444     if (object->IsFixedArray()) {
   5445       Object*** store_buffer_position = store_buffer()->Start();
   5446       Object*** store_buffer_top = store_buffer()->Top();
   5447       Object** current = reinterpret_cast<Object**>(object->address());
   5448       Object** limit =
   5449           reinterpret_cast<Object**>(object->address() + object->Size());
   5450       CheckStoreBuffer(this,
   5451                        current,
   5452                        limit,
   5453                        &store_buffer_position,
   5454                        store_buffer_top,
   5455                        &EverythingsAPointer,
   5456                        NULL,
   5457                        NULL);
   5458     }
   5459   }
   5460 }
   5461 #endif
   5462 
   5463 
   5464 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   5465   IterateStrongRoots(v, mode);
   5466   IterateWeakRoots(v, mode);
   5467 }
   5468 
   5469 
   5470 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   5471   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
   5472   v->Synchronize(VisitorSynchronization::kSymbolTable);
   5473   if (mode != VISIT_ALL_IN_SCAVENGE &&
   5474       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
   5475     // Scavenge collections have special processing for this.
   5476     external_string_table_.Iterate(v);
   5477   }
   5478   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
   5479 }
   5480 
   5481 
   5482 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   5483   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
   5484   v->Synchronize(VisitorSynchronization::kStrongRootList);
   5485 
   5486   v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
   5487   v->Synchronize(VisitorSynchronization::kSymbol);
   5488 
   5489   isolate_->bootstrapper()->Iterate(v);
   5490   v->Synchronize(VisitorSynchronization::kBootstrapper);
   5491   isolate_->Iterate(v);
   5492   v->Synchronize(VisitorSynchronization::kTop);
   5493   Relocatable::Iterate(v);
   5494   v->Synchronize(VisitorSynchronization::kRelocatable);
   5495 
   5496 #ifdef ENABLE_DEBUGGER_SUPPORT
   5497   isolate_->debug()->Iterate(v);
   5498   if (isolate_->deoptimizer_data() != NULL) {
   5499     isolate_->deoptimizer_data()->Iterate(v);
   5500   }
   5501 #endif
   5502   v->Synchronize(VisitorSynchronization::kDebug);
   5503   isolate_->compilation_cache()->Iterate(v);
   5504   v->Synchronize(VisitorSynchronization::kCompilationCache);
   5505 
   5506   // Iterate over local handles in handle scopes.
   5507   isolate_->handle_scope_implementer()->Iterate(v);
   5508   v->Synchronize(VisitorSynchronization::kHandleScope);
   5509 
   5510   // Iterate over the builtin code objects and code stubs in the
   5511   // heap. Note that it is not necessary to iterate over code objects
   5512   // on scavenge collections.
   5513   if (mode != VISIT_ALL_IN_SCAVENGE) {
   5514     isolate_->builtins()->IterateBuiltins(v);
   5515   }
   5516   v->Synchronize(VisitorSynchronization::kBuiltins);
   5517 
   5518   // Iterate over global handles.
   5519   switch (mode) {
   5520     case VISIT_ONLY_STRONG:
   5521       isolate_->global_handles()->IterateStrongRoots(v);
   5522       break;
   5523     case VISIT_ALL_IN_SCAVENGE:
   5524       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
   5525       break;
   5526     case VISIT_ALL_IN_SWEEP_NEWSPACE:
   5527     case VISIT_ALL:
   5528       isolate_->global_handles()->IterateAllRoots(v);
   5529       break;
   5530   }
   5531   v->Synchronize(VisitorSynchronization::kGlobalHandles);
   5532 
   5533   // Iterate over pointers being held by inactive threads.
   5534   isolate_->thread_manager()->Iterate(v);
   5535   v->Synchronize(VisitorSynchronization::kThreadManager);
   5536 
   5537   // Iterate over the pointers the Serialization/Deserialization code is
   5538   // holding.
   5539   // During garbage collection this keeps the partial snapshot cache alive.
   5540   // During deserialization of the startup snapshot this creates the partial
   5541   // snapshot cache and deserializes the objects it refers to.  During
   5542   // serialization this does nothing, since the partial snapshot cache is
   5543   // empty.  However the next thing we do is create the partial snapshot,
   5544   // filling up the partial snapshot cache with objects it needs as we go.
   5545   SerializerDeserializer::Iterate(v);
   5546   // We don't do a v->Synchronize call here, because in debug mode that will
   5547   // output a flag to the snapshot.  However at this point the serializer and
   5548   // deserializer are deliberately a little unsynchronized (see above) so the
   5549   // checking of the sync flag in the snapshot would fail.
   5550 }
   5551 
   5552 
   5553 // TODO(1236194): Since the heap size is configurable on the command line
   5554 // and through the API, we should gracefully handle the case that the heap
   5555 // size is not big enough to fit all the initial objects.
   5556 bool Heap::ConfigureHeap(int max_semispace_size,
   5557                          intptr_t max_old_gen_size,
   5558                          intptr_t max_executable_size) {
   5559   if (HasBeenSetUp()) return false;
   5560 
   5561   if (max_semispace_size > 0) {
   5562     if (max_semispace_size < Page::kPageSize) {
   5563       max_semispace_size = Page::kPageSize;
   5564       if (FLAG_trace_gc) {
   5565         PrintF("Max semispace size cannot be less than %dkbytes\n",
   5566                Page::kPageSize >> 10);
   5567       }
   5568     }
   5569     max_semispace_size_ = max_semispace_size;
   5570   }
   5571 
   5572   if (Snapshot::IsEnabled()) {
   5573     // If we are using a snapshot we always reserve the default amount
   5574     // of memory for each semispace because code in the snapshot has
   5575     // write-barrier code that relies on the size and alignment of new
   5576     // space.  We therefore cannot use a larger max semispace size
   5577     // than the default reserved semispace size.
   5578     if (max_semispace_size_ > reserved_semispace_size_) {
   5579       max_semispace_size_ = reserved_semispace_size_;
   5580       if (FLAG_trace_gc) {
   5581         PrintF("Max semispace size cannot be more than %dkbytes\n",
   5582                reserved_semispace_size_ >> 10);
   5583       }
   5584     }
   5585   } else {
   5586     // If we are not using snapshots we reserve space for the actual
   5587     // max semispace size.
   5588     reserved_semispace_size_ = max_semispace_size_;
   5589   }
   5590 
   5591   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
   5592   if (max_executable_size > 0) {
   5593     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
   5594   }
   5595 
   5596   // The max executable size must be less than or equal to the max old
   5597   // generation size.
   5598   if (max_executable_size_ > max_old_generation_size_) {
   5599     max_executable_size_ = max_old_generation_size_;
   5600   }
   5601 
   5602   // The new space size must be a power of two to support single-bit testing
   5603   // for containment.
   5604   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
   5605   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
   5606   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
   5607   external_allocation_limit_ = 10 * max_semispace_size_;
   5608 
   5609   // The old generation is paged and needs at least one page for each space.
   5610   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
   5611   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
   5612                                                        Page::kPageSize),
   5613                                  RoundUp(max_old_generation_size_,
   5614                                          Page::kPageSize));
   5615 
   5616   configured_ = true;
   5617   return true;
   5618 }
   5619 
   5620 
   5621 bool Heap::ConfigureHeapDefault() {
   5622   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
   5623                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
   5624                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
   5625 }
   5626 
   5627 
   5628 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
   5629   *stats->start_marker = HeapStats::kStartMarker;
   5630   *stats->end_marker = HeapStats::kEndMarker;
   5631   *stats->new_space_size = new_space_.SizeAsInt();
   5632   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
   5633   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
   5634   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
   5635   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
   5636   *stats->old_data_space_capacity = old_data_space_->Capacity();
   5637   *stats->code_space_size = code_space_->SizeOfObjects();
   5638   *stats->code_space_capacity = code_space_->Capacity();
   5639   *stats->map_space_size = map_space_->SizeOfObjects();
   5640   *stats->map_space_capacity = map_space_->Capacity();
   5641   *stats->cell_space_size = cell_space_->SizeOfObjects();
   5642   *stats->cell_space_capacity = cell_space_->Capacity();
   5643   *stats->lo_space_size = lo_space_->Size();
   5644   isolate_->global_handles()->RecordStats(stats);
   5645   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
   5646   *stats->memory_allocator_capacity =
   5647       isolate()->memory_allocator()->Size() +
   5648       isolate()->memory_allocator()->Available();
   5649   *stats->os_error = OS::GetLastError();
   5650       isolate()->memory_allocator()->Available();
   5651   if (take_snapshot) {
   5652     HeapIterator iterator;
   5653     for (HeapObject* obj = iterator.next();
   5654          obj != NULL;
   5655          obj = iterator.next()) {
   5656       InstanceType type = obj->map()->instance_type();
   5657       ASSERT(0 <= type && type <= LAST_TYPE);
   5658       stats->objects_per_type[type]++;
   5659       stats->size_per_type[type] += obj->Size();
   5660     }
   5661   }
   5662 }
   5663 
   5664 
   5665 intptr_t Heap::PromotedSpaceSize() {
   5666   return old_pointer_space_->Size()
   5667       + old_data_space_->Size()
   5668       + code_space_->Size()
   5669       + map_space_->Size()
   5670       + cell_space_->Size()
   5671       + lo_space_->Size();
   5672 }
   5673 
   5674 
   5675 intptr_t Heap::PromotedSpaceSizeOfObjects() {
   5676   return old_pointer_space_->SizeOfObjects()
   5677       + old_data_space_->SizeOfObjects()
   5678       + code_space_->SizeOfObjects()
   5679       + map_space_->SizeOfObjects()
   5680       + cell_space_->SizeOfObjects()
   5681       + lo_space_->SizeOfObjects();
   5682 }
   5683 
   5684 
   5685 int Heap::PromotedExternalMemorySize() {
   5686   if (amount_of_external_allocated_memory_
   5687       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
   5688   return amount_of_external_allocated_memory_
   5689       - amount_of_external_allocated_memory_at_last_global_gc_;
   5690 }
   5691 
   5692 #ifdef DEBUG
   5693 
   5694 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
   5695 static const int kMarkTag = 2;
   5696 
   5697 
   5698 class HeapDebugUtils {
   5699  public:
   5700   explicit HeapDebugUtils(Heap* heap)
   5701     : search_for_any_global_(false),
   5702       search_target_(NULL),
   5703       found_target_(false),
   5704       object_stack_(20),
   5705       heap_(heap) {
   5706   }
   5707 
   5708   class MarkObjectVisitor : public ObjectVisitor {
   5709    public:
   5710     explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
   5711 
   5712     void VisitPointers(Object** start, Object** end) {
   5713       // Copy all HeapObject pointers in [start, end)
   5714       for (Object** p = start; p < end; p++) {
   5715         if ((*p)->IsHeapObject())
   5716           utils_->MarkObjectRecursively(p);
   5717       }
   5718     }
   5719 
   5720     HeapDebugUtils* utils_;
   5721   };
   5722 
   5723   void MarkObjectRecursively(Object** p) {
   5724     if (!(*p)->IsHeapObject()) return;
   5725 
   5726     HeapObject* obj = HeapObject::cast(*p);
   5727 
   5728     Object* map = obj->map();
   5729 
   5730     if (!map->IsHeapObject()) return;  // visited before
   5731 
   5732     if (found_target_) return;  // stop if target found
   5733     object_stack_.Add(obj);
   5734     if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
   5735         (!search_for_any_global_ && (obj == search_target_))) {
   5736       found_target_ = true;
   5737       return;
   5738     }
   5739 
   5740     // not visited yet
   5741     Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
   5742 
   5743     Address map_addr = map_p->address();
   5744 
   5745     obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
   5746 
   5747     MarkObjectRecursively(&map);
   5748 
   5749     MarkObjectVisitor mark_visitor(this);
   5750 
   5751     obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
   5752                      &mark_visitor);
   5753 
   5754     if (!found_target_)  // don't pop if found the target
   5755       object_stack_.RemoveLast();
   5756   }
   5757 
   5758 
   5759   class UnmarkObjectVisitor : public ObjectVisitor {
   5760    public:
   5761     explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
   5762 
   5763     void VisitPointers(Object** start, Object** end) {
   5764       // Copy all HeapObject pointers in [start, end)
   5765       for (Object** p = start; p < end; p++) {
   5766         if ((*p)->IsHeapObject())
   5767           utils_->UnmarkObjectRecursively(p);
   5768       }
   5769     }
   5770 
   5771     HeapDebugUtils* utils_;
   5772   };
   5773 
   5774 
   5775   void UnmarkObjectRecursively(Object** p) {
   5776     if (!(*p)->IsHeapObject()) return;
   5777 
   5778     HeapObject* obj = HeapObject::cast(*p);
   5779 
   5780     Object* map = obj->map();
   5781 
   5782     if (map->IsHeapObject()) return;  // unmarked already
   5783 
   5784     Address map_addr = reinterpret_cast<Address>(map);
   5785 
   5786     map_addr -= kMarkTag;
   5787 
   5788     ASSERT_TAG_ALIGNED(map_addr);
   5789 
   5790     HeapObject* map_p = HeapObject::FromAddress(map_addr);
   5791 
   5792     obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
   5793 
   5794     UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
   5795 
   5796     UnmarkObjectVisitor unmark_visitor(this);
   5797 
   5798     obj->IterateBody(Map::cast(map_p)->instance_type(),
   5799                      obj->SizeFromMap(Map::cast(map_p)),
   5800                      &unmark_visitor);
   5801   }
   5802 
   5803 
   5804   void MarkRootObjectRecursively(Object** root) {
   5805     if (search_for_any_global_) {
   5806       ASSERT(search_target_ == NULL);
   5807     } else {
   5808       ASSERT(search_target_->IsHeapObject());
   5809     }
   5810     found_target_ = false;
   5811     object_stack_.Clear();
   5812 
   5813     MarkObjectRecursively(root);
   5814     UnmarkObjectRecursively(root);
   5815 
   5816     if (found_target_) {
   5817       PrintF("=====================================\n");
   5818       PrintF("====        Path to object       ====\n");
   5819       PrintF("=====================================\n\n");
   5820 
   5821       ASSERT(!object_stack_.is_empty());
   5822       for (int i = 0; i < object_stack_.length(); i++) {
   5823         if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
   5824         Object* obj = object_stack_[i];
   5825         obj->Print();
   5826       }
   5827       PrintF("=====================================\n");
   5828     }
   5829   }
   5830 
   5831   // Helper class for visiting HeapObjects recursively.
   5832   class MarkRootVisitor: public ObjectVisitor {
   5833    public:
   5834     explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
   5835 
   5836     void VisitPointers(Object** start, Object** end) {
   5837       // Visit all HeapObject pointers in [start, end)
   5838       for (Object** p = start; p < end; p++) {
   5839         if ((*p)->IsHeapObject())
   5840           utils_->MarkRootObjectRecursively(p);
   5841       }
   5842     }
   5843 
   5844     HeapDebugUtils* utils_;
   5845   };
   5846 
   5847   bool search_for_any_global_;
   5848   Object* search_target_;
   5849   bool found_target_;
   5850   List<Object*> object_stack_;
   5851   Heap* heap_;
   5852 
   5853   friend class Heap;
   5854 };
   5855 
   5856 #endif
   5857 
   5858 bool Heap::SetUp(bool create_heap_objects) {
   5859 #ifdef DEBUG
   5860   allocation_timeout_ = FLAG_gc_interval;
   5861   debug_utils_ = new HeapDebugUtils(this);
   5862 #endif
   5863 
   5864   // Initialize heap spaces and initial maps and objects. Whenever something
   5865   // goes wrong, just return false. The caller should check the results and
   5866   // call Heap::TearDown() to release allocated memory.
   5867   //
   5868   // If the heap is not yet configured (e.g. through the API), configure it.
   5869   // Configuration is based on the flags new-space-size (really the semispace
   5870   // size) and old-space-size if set or the initial values of semispace_size_
   5871   // and old_generation_size_ otherwise.
   5872   if (!configured_) {
   5873     if (!ConfigureHeapDefault()) return false;
   5874   }
   5875 
   5876   gc_initializer_mutex.Pointer()->Lock();
   5877   static bool initialized_gc = false;
   5878   if (!initialized_gc) {
   5879       initialized_gc = true;
   5880       InitializeScavengingVisitorsTables();
   5881       NewSpaceScavenger::Initialize();
   5882       MarkCompactCollector::Initialize();
   5883   }
   5884   gc_initializer_mutex.Pointer()->Unlock();
   5885 
   5886   MarkMapPointersAsEncoded(false);
   5887 
   5888   // Set up memory allocator.
   5889   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
   5890       return false;
   5891 
   5892   // Set up new space.
   5893   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
   5894     return false;
   5895   }
   5896 
   5897   // Initialize old pointer space.
   5898   old_pointer_space_ =
   5899       new OldSpace(this,
   5900                    max_old_generation_size_,
   5901                    OLD_POINTER_SPACE,
   5902                    NOT_EXECUTABLE);
   5903   if (old_pointer_space_ == NULL) return false;
   5904   if (!old_pointer_space_->SetUp()) return false;
   5905 
   5906   // Initialize old data space.
   5907   old_data_space_ =
   5908       new OldSpace(this,
   5909                    max_old_generation_size_,
   5910                    OLD_DATA_SPACE,
   5911                    NOT_EXECUTABLE);
   5912   if (old_data_space_ == NULL) return false;
   5913   if (!old_data_space_->SetUp()) return false;
   5914 
   5915   // Initialize the code space, set its maximum capacity to the old
   5916   // generation size. It needs executable memory.
   5917   // On 64-bit platform(s), we put all code objects in a 2 GB range of
   5918   // virtual address space, so that they can call each other with near calls.
   5919   if (code_range_size_ > 0) {
   5920     if (!isolate_->code_range()->SetUp(code_range_size_)) {
   5921       return false;
   5922     }
   5923   }
   5924 
   5925   code_space_ =
   5926       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   5927   if (code_space_ == NULL) return false;
   5928   if (!code_space_->SetUp()) return false;
   5929 
   5930   // Initialize map space.
   5931   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
   5932   if (map_space_ == NULL) return false;
   5933   if (!map_space_->SetUp()) return false;
   5934 
   5935   // Initialize global property cell space.
   5936   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
   5937   if (cell_space_ == NULL) return false;
   5938   if (!cell_space_->SetUp()) return false;
   5939 
   5940   // The large object code space may contain code or data.  We set the memory
   5941   // to be non-executable here for safety, but this means we need to enable it
   5942   // explicitly when allocating large code objects.
   5943   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
   5944   if (lo_space_ == NULL) return false;
   5945   if (!lo_space_->SetUp()) return false;
   5946 
   5947   // Set up the seed that is used to randomize the string hash function.
   5948   ASSERT(hash_seed() == 0);
   5949   if (FLAG_randomize_hashes) {
   5950     if (FLAG_hash_seed == 0) {
   5951       set_hash_seed(
   5952           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
   5953     } else {
   5954       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
   5955     }
   5956   }
   5957 
   5958   if (create_heap_objects) {
   5959     // Create initial maps.
   5960     if (!CreateInitialMaps()) return false;
   5961     if (!CreateApiObjects()) return false;
   5962 
   5963     // Create initial objects
   5964     if (!CreateInitialObjects()) return false;
   5965 
   5966     global_contexts_list_ = undefined_value();
   5967   }
   5968 
   5969   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   5970   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
   5971 
   5972   store_buffer()->SetUp();
   5973 
   5974   return true;
   5975 }
   5976 
   5977 
   5978 void Heap::SetStackLimits() {
   5979   ASSERT(isolate_ != NULL);
   5980   ASSERT(isolate_ == isolate());
   5981   // On 64 bit machines, pointers are generally out of range of Smis.  We write
   5982   // something that looks like an out of range Smi to the GC.
   5983 
   5984   // Set up the special root array entries containing the stack limits.
   5985   // These are actually addresses, but the tag makes the GC ignore it.
   5986   roots_[kStackLimitRootIndex] =
   5987       reinterpret_cast<Object*>(
   5988           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
   5989   roots_[kRealStackLimitRootIndex] =
   5990       reinterpret_cast<Object*>(
   5991           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
   5992 }
   5993 
   5994 
   5995 void Heap::TearDown() {
   5996   if (FLAG_print_cumulative_gc_stat) {
   5997     PrintF("\n\n");
   5998     PrintF("gc_count=%d ", gc_count_);
   5999     PrintF("mark_sweep_count=%d ", ms_count_);
   6000     PrintF("max_gc_pause=%d ", get_max_gc_pause());
   6001     PrintF("min_in_mutator=%d ", get_min_in_mutator());
   6002     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
   6003            get_max_alive_after_gc());
   6004     PrintF("\n\n");
   6005   }
   6006 
   6007   isolate_->global_handles()->TearDown();
   6008 
   6009   external_string_table_.TearDown();
   6010 
   6011   new_space_.TearDown();
   6012 
   6013   if (old_pointer_space_ != NULL) {
   6014     old_pointer_space_->TearDown();
   6015     delete old_pointer_space_;
   6016     old_pointer_space_ = NULL;
   6017   }
   6018 
   6019   if (old_data_space_ != NULL) {
   6020     old_data_space_->TearDown();
   6021     delete old_data_space_;
   6022     old_data_space_ = NULL;
   6023   }
   6024 
   6025   if (code_space_ != NULL) {
   6026     code_space_->TearDown();
   6027     delete code_space_;
   6028     code_space_ = NULL;
   6029   }
   6030 
   6031   if (map_space_ != NULL) {
   6032     map_space_->TearDown();
   6033     delete map_space_;
   6034     map_space_ = NULL;
   6035   }
   6036 
   6037   if (cell_space_ != NULL) {
   6038     cell_space_->TearDown();
   6039     delete cell_space_;
   6040     cell_space_ = NULL;
   6041   }
   6042 
   6043   if (lo_space_ != NULL) {
   6044     lo_space_->TearDown();
   6045     delete lo_space_;
   6046     lo_space_ = NULL;
   6047   }
   6048 
   6049   store_buffer()->TearDown();
   6050   incremental_marking()->TearDown();
   6051 
   6052   isolate_->memory_allocator()->TearDown();
   6053 
   6054 #ifdef DEBUG
   6055   delete debug_utils_;
   6056   debug_utils_ = NULL;
   6057 #endif
   6058 }
   6059 
   6060 
   6061 void Heap::Shrink() {
   6062   // Try to shrink all paged spaces.
   6063   PagedSpaces spaces;
   6064   for (PagedSpace* space = spaces.next();
   6065        space != NULL;
   6066        space = spaces.next()) {
   6067     space->ReleaseAllUnusedPages();
   6068   }
   6069 }
   6070 
   6071 
   6072 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
   6073   ASSERT(callback != NULL);
   6074   GCPrologueCallbackPair pair(callback, gc_type);
   6075   ASSERT(!gc_prologue_callbacks_.Contains(pair));
   6076   return gc_prologue_callbacks_.Add(pair);
   6077 }
   6078 
   6079 
   6080 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
   6081   ASSERT(callback != NULL);
   6082   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
   6083     if (gc_prologue_callbacks_[i].callback == callback) {
   6084       gc_prologue_callbacks_.Remove(i);
   6085       return;
   6086     }
   6087   }
   6088   UNREACHABLE();
   6089 }
   6090 
   6091 
   6092 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
   6093   ASSERT(callback != NULL);
   6094   GCEpilogueCallbackPair pair(callback, gc_type);
   6095   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
   6096   return gc_epilogue_callbacks_.Add(pair);
   6097 }
   6098 
   6099 
   6100 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
   6101   ASSERT(callback != NULL);
   6102   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
   6103     if (gc_epilogue_callbacks_[i].callback == callback) {
   6104       gc_epilogue_callbacks_.Remove(i);
   6105       return;
   6106     }
   6107   }
   6108   UNREACHABLE();
   6109 }
   6110 
   6111 
   6112 #ifdef DEBUG
   6113 
   6114 class PrintHandleVisitor: public ObjectVisitor {
   6115  public:
   6116   void VisitPointers(Object** start, Object** end) {
   6117     for (Object** p = start; p < end; p++)
   6118       PrintF("  handle %p to %p\n",
   6119              reinterpret_cast<void*>(p),
   6120              reinterpret_cast<void*>(*p));
   6121   }
   6122 };
   6123 
   6124 void Heap::PrintHandles() {
   6125   PrintF("Handles:\n");
   6126   PrintHandleVisitor v;
   6127   isolate_->handle_scope_implementer()->Iterate(&v);
   6128 }
   6129 
   6130 #endif
   6131 
   6132 
   6133 Space* AllSpaces::next() {
   6134   switch (counter_++) {
   6135     case NEW_SPACE:
   6136       return HEAP->new_space();
   6137     case OLD_POINTER_SPACE:
   6138       return HEAP->old_pointer_space();
   6139     case OLD_DATA_SPACE:
   6140       return HEAP->old_data_space();
   6141     case CODE_SPACE:
   6142       return HEAP->code_space();
   6143     case MAP_SPACE:
   6144       return HEAP->map_space();
   6145     case CELL_SPACE:
   6146       return HEAP->cell_space();
   6147     case LO_SPACE:
   6148       return HEAP->lo_space();
   6149     default:
   6150       return NULL;
   6151   }
   6152 }
   6153 
   6154 
   6155 PagedSpace* PagedSpaces::next() {
   6156   switch (counter_++) {
   6157     case OLD_POINTER_SPACE:
   6158       return HEAP->old_pointer_space();
   6159     case OLD_DATA_SPACE:
   6160       return HEAP->old_data_space();
   6161     case CODE_SPACE:
   6162       return HEAP->code_space();
   6163     case MAP_SPACE:
   6164       return HEAP->map_space();
   6165     case CELL_SPACE:
   6166       return HEAP->cell_space();
   6167     default:
   6168       return NULL;
   6169   }
   6170 }
   6171 
   6172 
   6173 
   6174 OldSpace* OldSpaces::next() {
   6175   switch (counter_++) {
   6176     case OLD_POINTER_SPACE:
   6177       return HEAP->old_pointer_space();
   6178     case OLD_DATA_SPACE:
   6179       return HEAP->old_data_space();
   6180     case CODE_SPACE:
   6181       return HEAP->code_space();
   6182     default:
   6183       return NULL;
   6184   }
   6185 }
   6186 
   6187 
   6188 SpaceIterator::SpaceIterator()
   6189     : current_space_(FIRST_SPACE),
   6190       iterator_(NULL),
   6191       size_func_(NULL) {
   6192 }
   6193 
   6194 
   6195 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
   6196     : current_space_(FIRST_SPACE),
   6197       iterator_(NULL),
   6198       size_func_(size_func) {
   6199 }
   6200 
   6201 
   6202 SpaceIterator::~SpaceIterator() {
   6203   // Delete active iterator if any.
   6204   delete iterator_;
   6205 }
   6206 
   6207 
   6208 bool SpaceIterator::has_next() {
   6209   // Iterate until no more spaces.
   6210   return current_space_ != LAST_SPACE;
   6211 }
   6212 
   6213 
   6214 ObjectIterator* SpaceIterator::next() {
   6215   if (iterator_ != NULL) {
   6216     delete iterator_;
   6217     iterator_ = NULL;
   6218     // Move to the next space
   6219     current_space_++;
   6220     if (current_space_ > LAST_SPACE) {
   6221       return NULL;
   6222     }
   6223   }
   6224 
   6225   // Return iterator for the new current space.
   6226   return CreateIterator();
   6227 }
   6228 
   6229 
   6230 // Create an iterator for the space to iterate.
   6231 ObjectIterator* SpaceIterator::CreateIterator() {
   6232   ASSERT(iterator_ == NULL);
   6233 
   6234   switch (current_space_) {
   6235     case NEW_SPACE:
   6236       iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
   6237       break;
   6238     case OLD_POINTER_SPACE:
   6239       iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
   6240       break;
   6241     case OLD_DATA_SPACE:
   6242       iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
   6243       break;
   6244     case CODE_SPACE:
   6245       iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
   6246       break;
   6247     case MAP_SPACE:
   6248       iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
   6249       break;
   6250     case CELL_SPACE:
   6251       iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
   6252       break;
   6253     case LO_SPACE:
   6254       iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
   6255       break;
   6256   }
   6257 
   6258   // Return the newly allocated iterator;
   6259   ASSERT(iterator_ != NULL);
   6260   return iterator_;
   6261 }
   6262 
   6263 
   6264 class HeapObjectsFilter {
   6265  public:
   6266   virtual ~HeapObjectsFilter() {}
   6267   virtual bool SkipObject(HeapObject* object) = 0;
   6268 };
   6269 
   6270 
   6271 class UnreachableObjectsFilter : public HeapObjectsFilter {
   6272  public:
   6273   UnreachableObjectsFilter() {
   6274     MarkReachableObjects();
   6275   }
   6276 
   6277   ~UnreachableObjectsFilter() {
   6278     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
   6279   }
   6280 
   6281   bool SkipObject(HeapObject* object) {
   6282     MarkBit mark_bit = Marking::MarkBitFrom(object);
   6283     return !mark_bit.Get();
   6284   }
   6285 
   6286  private:
   6287   class MarkingVisitor : public ObjectVisitor {
   6288    public:
   6289     MarkingVisitor() : marking_stack_(10) {}
   6290 
   6291     void VisitPointers(Object** start, Object** end) {
   6292       for (Object** p = start; p < end; p++) {
   6293         if (!(*p)->IsHeapObject()) continue;
   6294         HeapObject* obj = HeapObject::cast(*p);
   6295         MarkBit mark_bit = Marking::MarkBitFrom(obj);
   6296         if (!mark_bit.Get()) {
   6297           mark_bit.Set();
   6298           marking_stack_.Add(obj);
   6299         }
   6300       }
   6301     }
   6302 
   6303     void TransitiveClosure() {
   6304       while (!marking_stack_.is_empty()) {
   6305         HeapObject* obj = marking_stack_.RemoveLast();
   6306         obj->Iterate(this);
   6307       }
   6308     }
   6309 
   6310    private:
   6311     List<HeapObject*> marking_stack_;
   6312   };
   6313 
   6314   void MarkReachableObjects() {
   6315     Heap* heap = Isolate::Current()->heap();
   6316     MarkingVisitor visitor;
   6317     heap->IterateRoots(&visitor, VISIT_ALL);
   6318     visitor.TransitiveClosure();
   6319   }
   6320 
   6321   AssertNoAllocation no_alloc;
   6322 };
   6323 
   6324 
   6325 HeapIterator::HeapIterator()
   6326     : filtering_(HeapIterator::kNoFiltering),
   6327       filter_(NULL) {
   6328   Init();
   6329 }
   6330 
   6331 
   6332 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
   6333     : filtering_(filtering),
   6334       filter_(NULL) {
   6335   Init();
   6336 }
   6337 
   6338 
   6339 HeapIterator::~HeapIterator() {
   6340   Shutdown();
   6341 }
   6342 
   6343 
   6344 void HeapIterator::Init() {
   6345   // Start the iteration.
   6346   space_iterator_ = new SpaceIterator;
   6347   switch (filtering_) {
   6348     case kFilterUnreachable:
   6349       filter_ = new UnreachableObjectsFilter;
   6350       break;
   6351     default:
   6352       break;
   6353   }
   6354   object_iterator_ = space_iterator_->next();
   6355 }
   6356 
   6357 
   6358 void HeapIterator::Shutdown() {
   6359 #ifdef DEBUG
   6360   // Assert that in filtering mode we have iterated through all
   6361   // objects. Otherwise, heap will be left in an inconsistent state.
   6362   if (filtering_ != kNoFiltering) {
   6363     ASSERT(object_iterator_ == NULL);
   6364   }
   6365 #endif
   6366   // Make sure the last iterator is deallocated.
   6367   delete space_iterator_;
   6368   space_iterator_ = NULL;
   6369   object_iterator_ = NULL;
   6370   delete filter_;
   6371   filter_ = NULL;
   6372 }
   6373 
   6374 
   6375 HeapObject* HeapIterator::next() {
   6376   if (filter_ == NULL) return NextObject();
   6377 
   6378   HeapObject* obj = NextObject();
   6379   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
   6380   return obj;
   6381 }
   6382 
   6383 
   6384 HeapObject* HeapIterator::NextObject() {
   6385   // No iterator means we are done.
   6386   if (object_iterator_ == NULL) return NULL;
   6387 
   6388   if (HeapObject* obj = object_iterator_->next_object()) {
   6389     // If the current iterator has more objects we are fine.
   6390     return obj;
   6391   } else {
   6392     // Go though the spaces looking for one that has objects.
   6393     while (space_iterator_->has_next()) {
   6394       object_iterator_ = space_iterator_->next();
   6395       if (HeapObject* obj = object_iterator_->next_object()) {
   6396         return obj;
   6397       }
   6398     }
   6399   }
   6400   // Done with the last space.
   6401   object_iterator_ = NULL;
   6402   return NULL;
   6403 }
   6404 
   6405 
   6406 void HeapIterator::reset() {
   6407   // Restart the iterator.
   6408   Shutdown();
   6409   Init();
   6410 }
   6411 
   6412 
   6413 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
   6414 
   6415 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
   6416 
   6417 class PathTracer::MarkVisitor: public ObjectVisitor {
   6418  public:
   6419   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
   6420   void VisitPointers(Object** start, Object** end) {
   6421     // Scan all HeapObject pointers in [start, end)
   6422     for (Object** p = start; !tracer_->found() && (p < end); p++) {
   6423       if ((*p)->IsHeapObject())
   6424         tracer_->MarkRecursively(p, this);
   6425     }
   6426   }
   6427 
   6428  private:
   6429   PathTracer* tracer_;
   6430 };
   6431 
   6432 
   6433 class PathTracer::UnmarkVisitor: public ObjectVisitor {
   6434  public:
   6435   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
   6436   void VisitPointers(Object** start, Object** end) {
   6437     // Scan all HeapObject pointers in [start, end)
   6438     for (Object** p = start; p < end; p++) {
   6439       if ((*p)->IsHeapObject())
   6440         tracer_->UnmarkRecursively(p, this);
   6441     }
   6442   }
   6443 
   6444  private:
   6445   PathTracer* tracer_;
   6446 };
   6447 
   6448 
   6449 void PathTracer::VisitPointers(Object** start, Object** end) {
   6450   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
   6451   // Visit all HeapObject pointers in [start, end)
   6452   for (Object** p = start; !done && (p < end); p++) {
   6453     if ((*p)->IsHeapObject()) {
   6454       TracePathFrom(p);
   6455       done = ((what_to_find_ == FIND_FIRST) && found_target_);
   6456     }
   6457   }
   6458 }
   6459 
   6460 
   6461 void PathTracer::Reset() {
   6462   found_target_ = false;
   6463   object_stack_.Clear();
   6464 }
   6465 
   6466 
   6467 void PathTracer::TracePathFrom(Object** root) {
   6468   ASSERT((search_target_ == kAnyGlobalObject) ||
   6469          search_target_->IsHeapObject());
   6470   found_target_in_trace_ = false;
   6471   object_stack_.Clear();
   6472 
   6473   MarkVisitor mark_visitor(this);
   6474   MarkRecursively(root, &mark_visitor);
   6475 
   6476   UnmarkVisitor unmark_visitor(this);
   6477   UnmarkRecursively(root, &unmark_visitor);
   6478 
   6479   ProcessResults();
   6480 }
   6481 
   6482 
   6483 static bool SafeIsGlobalContext(HeapObject* obj) {
   6484   return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
   6485 }
   6486 
   6487 
   6488 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   6489   if (!(*p)->IsHeapObject()) return;
   6490 
   6491   HeapObject* obj = HeapObject::cast(*p);
   6492 
   6493   Object* map = obj->map();
   6494 
   6495   if (!map->IsHeapObject()) return;  // visited before
   6496 
   6497   if (found_target_in_trace_) return;  // stop if target found
   6498   object_stack_.Add(obj);
   6499   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
   6500       (obj == search_target_)) {
   6501     found_target_in_trace_ = true;
   6502     found_target_ = true;
   6503     return;
   6504   }
   6505 
   6506   bool is_global_context = SafeIsGlobalContext(obj);
   6507 
   6508   // not visited yet
   6509   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
   6510 
   6511   Address map_addr = map_p->address();
   6512 
   6513   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
   6514 
   6515   // Scan the object body.
   6516   if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
   6517     // This is specialized to scan Context's properly.
   6518     Object** start = reinterpret_cast<Object**>(obj->address() +
   6519                                                 Context::kHeaderSize);
   6520     Object** end = reinterpret_cast<Object**>(obj->address() +
   6521         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
   6522     mark_visitor->VisitPointers(start, end);
   6523   } else {
   6524     obj->IterateBody(map_p->instance_type(),
   6525                      obj->SizeFromMap(map_p),
   6526                      mark_visitor);
   6527   }
   6528 
   6529   // Scan the map after the body because the body is a lot more interesting
   6530   // when doing leak detection.
   6531   MarkRecursively(&map, mark_visitor);
   6532 
   6533   if (!found_target_in_trace_)  // don't pop if found the target
   6534     object_stack_.RemoveLast();
   6535 }
   6536 
   6537 
   6538 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
   6539   if (!(*p)->IsHeapObject()) return;
   6540 
   6541   HeapObject* obj = HeapObject::cast(*p);
   6542 
   6543   Object* map = obj->map();
   6544 
   6545   if (map->IsHeapObject()) return;  // unmarked already
   6546 
   6547   Address map_addr = reinterpret_cast<Address>(map);
   6548 
   6549   map_addr -= kMarkTag;
   6550 
   6551   ASSERT_TAG_ALIGNED(map_addr);
   6552 
   6553   HeapObject* map_p = HeapObject::FromAddress(map_addr);
   6554 
   6555   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
   6556 
   6557   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
   6558 
   6559   obj->IterateBody(Map::cast(map_p)->instance_type(),
   6560                    obj->SizeFromMap(Map::cast(map_p)),
   6561                    unmark_visitor);
   6562 }
   6563 
   6564 
   6565 void PathTracer::ProcessResults() {
   6566   if (found_target_) {
   6567     PrintF("=====================================\n");
   6568     PrintF("====        Path to object       ====\n");
   6569     PrintF("=====================================\n\n");
   6570 
   6571     ASSERT(!object_stack_.is_empty());
   6572     for (int i = 0; i < object_stack_.length(); i++) {
   6573       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
   6574       Object* obj = object_stack_[i];
   6575 #ifdef OBJECT_PRINT
   6576       obj->Print();
   6577 #else
   6578       obj->ShortPrint();
   6579 #endif
   6580     }
   6581     PrintF("=====================================\n");
   6582   }
   6583 }
   6584 #endif  // DEBUG || LIVE_OBJECT_LIST
   6585 
   6586 
   6587 #ifdef DEBUG
   6588 // Triggers a depth-first traversal of reachable objects from roots
   6589 // and finds a path to a specific heap object and prints it.
   6590 void Heap::TracePathToObject(Object* target) {
   6591   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
   6592   IterateRoots(&tracer, VISIT_ONLY_STRONG);
   6593 }
   6594 
   6595 
   6596 // Triggers a depth-first traversal of reachable objects from roots
   6597 // and finds a path to any global object and prints it. Useful for
   6598 // determining the source for leaks of global objects.
   6599 void Heap::TracePathToGlobal() {
   6600   PathTracer tracer(PathTracer::kAnyGlobalObject,
   6601                     PathTracer::FIND_ALL,
   6602                     VISIT_ALL);
   6603   IterateRoots(&tracer, VISIT_ONLY_STRONG);
   6604 }
   6605 #endif
   6606 
   6607 
   6608 static intptr_t CountTotalHolesSize() {
   6609   intptr_t holes_size = 0;
   6610   OldSpaces spaces;
   6611   for (OldSpace* space = spaces.next();
   6612        space != NULL;
   6613        space = spaces.next()) {
   6614     holes_size += space->Waste() + space->Available();
   6615   }
   6616   return holes_size;
   6617 }
   6618 
   6619 
   6620 GCTracer::GCTracer(Heap* heap,
   6621                    const char* gc_reason,
   6622                    const char* collector_reason)
   6623     : start_time_(0.0),
   6624       start_object_size_(0),
   6625       start_memory_size_(0),
   6626       gc_count_(0),
   6627       full_gc_count_(0),
   6628       allocated_since_last_gc_(0),
   6629       spent_in_mutator_(0),
   6630       promoted_objects_size_(0),
   6631       heap_(heap),
   6632       gc_reason_(gc_reason),
   6633       collector_reason_(collector_reason) {
   6634   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   6635   start_time_ = OS::TimeCurrentMillis();
   6636   start_object_size_ = heap_->SizeOfObjects();
   6637   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
   6638 
   6639   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
   6640     scopes_[i] = 0;
   6641   }
   6642 
   6643   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
   6644 
   6645   allocated_since_last_gc_ =
   6646       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
   6647 
   6648   if (heap_->last_gc_end_timestamp_ > 0) {
   6649     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
   6650   }
   6651 
   6652   steps_count_ = heap_->incremental_marking()->steps_count();
   6653   steps_took_ = heap_->incremental_marking()->steps_took();
   6654   longest_step_ = heap_->incremental_marking()->longest_step();
   6655   steps_count_since_last_gc_ =
   6656       heap_->incremental_marking()->steps_count_since_last_gc();
   6657   steps_took_since_last_gc_ =
   6658       heap_->incremental_marking()->steps_took_since_last_gc();
   6659 }
   6660 
   6661 
   6662 GCTracer::~GCTracer() {
   6663   // Printf ONE line iff flag is set.
   6664   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   6665 
   6666   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
   6667 
   6668   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
   6669   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
   6670 
   6671   int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
   6672 
   6673   // Update cumulative GC statistics if required.
   6674   if (FLAG_print_cumulative_gc_stat) {
   6675     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
   6676     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
   6677                                      heap_->alive_after_last_gc_);
   6678     if (!first_gc) {
   6679       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
   6680                                    static_cast<int>(spent_in_mutator_));
   6681     }
   6682   }
   6683 
   6684   PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
   6685 
   6686   if (!FLAG_trace_gc_nvp) {
   6687     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
   6688 
   6689     double end_memory_size_mb =
   6690         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
   6691 
   6692     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
   6693            CollectorString(),
   6694            static_cast<double>(start_object_size_) / MB,
   6695            static_cast<double>(start_memory_size_) / MB,
   6696            SizeOfHeapObjects(),
   6697            end_memory_size_mb);
   6698 
   6699     if (external_time > 0) PrintF("%d / ", external_time);
   6700     PrintF("%d ms", time);
   6701     if (steps_count_ > 0) {
   6702       if (collector_ == SCAVENGER) {
   6703         PrintF(" (+ %d ms in %d steps since last GC)",
   6704                static_cast<int>(steps_took_since_last_gc_),
   6705                steps_count_since_last_gc_);
   6706       } else {
   6707         PrintF(" (+ %d ms in %d steps since start of marking, "
   6708                    "biggest step %f ms)",
   6709                static_cast<int>(steps_took_),
   6710                steps_count_,
   6711                longest_step_);
   6712       }
   6713     }
   6714 
   6715     if (gc_reason_ != NULL) {
   6716       PrintF(" [%s]", gc_reason_);
   6717     }
   6718 
   6719     if (collector_reason_ != NULL) {
   6720       PrintF(" [%s]", collector_reason_);
   6721     }
   6722 
   6723     PrintF(".\n");
   6724   } else {
   6725     PrintF("pause=%d ", time);
   6726     PrintF("mutator=%d ",
   6727            static_cast<int>(spent_in_mutator_));
   6728 
   6729     PrintF("gc=");
   6730     switch (collector_) {
   6731       case SCAVENGER:
   6732         PrintF("s");
   6733         break;
   6734       case MARK_COMPACTOR:
   6735         PrintF("ms");
   6736         break;
   6737       default:
   6738         UNREACHABLE();
   6739     }
   6740     PrintF(" ");
   6741 
   6742     PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
   6743     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
   6744     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
   6745     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
   6746     PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
   6747     PrintF("new_new=%d ",
   6748            static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
   6749     PrintF("root_new=%d ",
   6750            static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
   6751     PrintF("old_new=%d ",
   6752            static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
   6753     PrintF("compaction_ptrs=%d ",
   6754            static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
   6755     PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
   6756         Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
   6757     PrintF("misc_compaction=%d ",
   6758            static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
   6759 
   6760     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
   6761     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
   6762     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
   6763            in_free_list_or_wasted_before_gc_);
   6764     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
   6765 
   6766     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
   6767     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
   6768 
   6769     if (collector_ == SCAVENGER) {
   6770       PrintF("stepscount=%d ", steps_count_since_last_gc_);
   6771       PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
   6772     } else {
   6773       PrintF("stepscount=%d ", steps_count_);
   6774       PrintF("stepstook=%d ", static_cast<int>(steps_took_));
   6775     }
   6776 
   6777     PrintF("\n");
   6778   }
   6779 
   6780   heap_->PrintShortHeapStatistics();
   6781 }
   6782 
   6783 
   6784 const char* GCTracer::CollectorString() {
   6785   switch (collector_) {
   6786     case SCAVENGER:
   6787       return "Scavenge";
   6788     case MARK_COMPACTOR:
   6789       return "Mark-sweep";
   6790   }
   6791   return "Unknown GC";
   6792 }
   6793 
   6794 
   6795 int KeyedLookupCache::Hash(Map* map, String* name) {
   6796   // Uses only lower 32 bits if pointers are larger.
   6797   uintptr_t addr_hash =
   6798       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
   6799   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
   6800 }
   6801 
   6802 
   6803 int KeyedLookupCache::Lookup(Map* map, String* name) {
   6804   int index = (Hash(map, name) & kHashMask);
   6805   for (int i = 0; i < kEntriesPerBucket; i++) {
   6806     Key& key = keys_[index + i];
   6807     if ((key.map == map) && key.name->Equals(name)) {
   6808       return field_offsets_[index + i];
   6809     }
   6810   }
   6811   return kNotFound;
   6812 }
   6813 
   6814 
   6815 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
   6816   String* symbol;
   6817   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
   6818     int index = (Hash(map, symbol) & kHashMask);
   6819     // After a GC there will be free slots, so we use them in order (this may
   6820     // help to get the most frequently used one in position 0).
   6821     for (int i = 0; i< kEntriesPerBucket; i++) {
   6822       Key& key = keys_[index];
   6823       Object* free_entry_indicator = NULL;
   6824       if (key.map == free_entry_indicator) {
   6825         key.map = map;
   6826         key.name = symbol;
   6827         field_offsets_[index + i] = field_offset;
   6828         return;
   6829       }
   6830     }
   6831     // No free entry found in this bucket, so we move them all down one and
   6832     // put the new entry at position zero.
   6833     for (int i = kEntriesPerBucket - 1; i > 0; i--) {
   6834       Key& key = keys_[index + i];
   6835       Key& key2 = keys_[index + i - 1];
   6836       key = key2;
   6837       field_offsets_[index + i] = field_offsets_[index + i - 1];
   6838     }
   6839 
   6840     // Write the new first entry.
   6841     Key& key = keys_[index];
   6842     key.map = map;
   6843     key.name = symbol;
   6844     field_offsets_[index] = field_offset;
   6845   }
   6846 }
   6847 
   6848 
   6849 void KeyedLookupCache::Clear() {
   6850   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
   6851 }
   6852 
   6853 
   6854 void DescriptorLookupCache::Clear() {
   6855   for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
   6856 }
   6857 
   6858 
   6859 #ifdef DEBUG
   6860 void Heap::GarbageCollectionGreedyCheck() {
   6861   ASSERT(FLAG_gc_greedy);
   6862   if (isolate_->bootstrapper()->IsActive()) return;
   6863   if (disallow_allocation_failure()) return;
   6864   CollectGarbage(NEW_SPACE);
   6865 }
   6866 #endif
   6867 
   6868 
   6869 TranscendentalCache::SubCache::SubCache(Type t)
   6870   : type_(t),
   6871     isolate_(Isolate::Current()) {
   6872   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
   6873   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
   6874   for (int i = 0; i < kCacheSize; i++) {
   6875     elements_[i].in[0] = in0;
   6876     elements_[i].in[1] = in1;
   6877     elements_[i].output = NULL;
   6878   }
   6879 }
   6880 
   6881 
   6882 void TranscendentalCache::Clear() {
   6883   for (int i = 0; i < kNumberOfCaches; i++) {
   6884     if (caches_[i] != NULL) {
   6885       delete caches_[i];
   6886       caches_[i] = NULL;
   6887     }
   6888   }
   6889 }
   6890 
   6891 
   6892 void ExternalStringTable::CleanUp() {
   6893   int last = 0;
   6894   for (int i = 0; i < new_space_strings_.length(); ++i) {
   6895     if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
   6896       continue;
   6897     }
   6898     if (heap_->InNewSpace(new_space_strings_[i])) {
   6899       new_space_strings_[last++] = new_space_strings_[i];
   6900     } else {
   6901       old_space_strings_.Add(new_space_strings_[i]);
   6902     }
   6903   }
   6904   new_space_strings_.Rewind(last);
   6905   last = 0;
   6906   for (int i = 0; i < old_space_strings_.length(); ++i) {
   6907     if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
   6908       continue;
   6909     }
   6910     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
   6911     old_space_strings_[last++] = old_space_strings_[i];
   6912   }
   6913   old_space_strings_.Rewind(last);
   6914   if (FLAG_verify_heap) {
   6915     Verify();
   6916   }
   6917 }
   6918 
   6919 
   6920 void ExternalStringTable::TearDown() {
   6921   new_space_strings_.Free();
   6922   old_space_strings_.Free();
   6923 }
   6924 
   6925 
   6926 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
   6927   chunk->set_next_chunk(chunks_queued_for_free_);
   6928   chunks_queued_for_free_ = chunk;
   6929 }
   6930 
   6931 
   6932 void Heap::FreeQueuedChunks() {
   6933   if (chunks_queued_for_free_ == NULL) return;
   6934   MemoryChunk* next;
   6935   MemoryChunk* chunk;
   6936   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
   6937     next = chunk->next_chunk();
   6938     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
   6939 
   6940     if (chunk->owner()->identity() == LO_SPACE) {
   6941       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
   6942       // If FromAnyPointerAddress encounters a slot that belongs to a large
   6943       // chunk queued for deletion it will fail to find the chunk because
   6944       // it try to perform a search in the list of pages owned by of the large
   6945       // object space and queued chunks were detached from that list.
   6946       // To work around this we split large chunk into normal kPageSize aligned
   6947       // pieces and initialize size, owner and flags field of every piece.
   6948       // If FromAnyPointerAddress encounters a slot that belongs to one of
   6949       // these smaller pieces it will treat it as a slot on a normal Page.
   6950       Address chunk_end = chunk->address() + chunk->size();
   6951       MemoryChunk* inner = MemoryChunk::FromAddress(
   6952           chunk->address() + Page::kPageSize);
   6953       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
   6954       while (inner <= inner_last) {
   6955         // Size of a large chunk is always a multiple of
   6956         // OS::AllocateAlignment() so there is always
   6957         // enough space for a fake MemoryChunk header.
   6958         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
   6959         // Guard against overflow.
   6960         if (area_end < inner->address()) area_end = chunk_end;
   6961         inner->SetArea(inner->address(), area_end);
   6962         inner->set_size(Page::kPageSize);
   6963         inner->set_owner(lo_space());
   6964         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
   6965         inner = MemoryChunk::FromAddress(
   6966             inner->address() + Page::kPageSize);
   6967       }
   6968     }
   6969   }
   6970   isolate_->heap()->store_buffer()->Compact();
   6971   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
   6972   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
   6973     next = chunk->next_chunk();
   6974     isolate_->memory_allocator()->Free(chunk);
   6975   }
   6976   chunks_queued_for_free_ = NULL;
   6977 }
   6978 
   6979 
   6980 void Heap::RememberUnmappedPage(Address page, bool compacted) {
   6981   uintptr_t p = reinterpret_cast<uintptr_t>(page);
   6982   // Tag the page pointer to make it findable in the dump file.
   6983   if (compacted) {
   6984     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
   6985   } else {
   6986     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
   6987   }
   6988   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
   6989       reinterpret_cast<Address>(p);
   6990   remembered_unmapped_pages_index_++;
   6991   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
   6992 }
   6993 
   6994 } }  // namespace v8::internal
   6995