1 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_HEAP_HEAP_H_ 6 #define V8_HEAP_HEAP_H_ 7 8 #include <cmath> 9 10 #include "src/allocation.h" 11 #include "src/assert-scope.h" 12 #include "src/counters.h" 13 #include "src/globals.h" 14 #include "src/heap/gc-idle-time-handler.h" 15 #include "src/heap/gc-tracer.h" 16 #include "src/heap/incremental-marking.h" 17 #include "src/heap/mark-compact.h" 18 #include "src/heap/objects-visiting.h" 19 #include "src/heap/spaces.h" 20 #include "src/heap/store-buffer.h" 21 #include "src/list.h" 22 #include "src/splay-tree-inl.h" 23 24 namespace v8 { 25 namespace internal { 26 27 // Defines all the roots in Heap. 28 #define STRONG_ROOT_LIST(V) \ 29 V(Map, byte_array_map, ByteArrayMap) \ 30 V(Map, free_space_map, FreeSpaceMap) \ 31 V(Map, one_pointer_filler_map, OnePointerFillerMap) \ 32 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ 33 /* Cluster the most popular ones in a few cache lines here at the top. */ \ 34 V(Smi, store_buffer_top, StoreBufferTop) \ 35 V(Oddball, undefined_value, UndefinedValue) \ 36 V(Oddball, the_hole_value, TheHoleValue) \ 37 V(Oddball, null_value, NullValue) \ 38 V(Oddball, true_value, TrueValue) \ 39 V(Oddball, false_value, FalseValue) \ 40 V(Oddball, uninitialized_value, UninitializedValue) \ 41 V(Oddball, exception, Exception) \ 42 V(Map, cell_map, CellMap) \ 43 V(Map, global_property_cell_map, GlobalPropertyCellMap) \ 44 V(Map, shared_function_info_map, SharedFunctionInfoMap) \ 45 V(Map, meta_map, MetaMap) \ 46 V(Map, heap_number_map, HeapNumberMap) \ 47 V(Map, mutable_heap_number_map, MutableHeapNumberMap) \ 48 V(Map, native_context_map, NativeContextMap) \ 49 V(Map, fixed_array_map, FixedArrayMap) \ 50 V(Map, code_map, CodeMap) \ 51 V(Map, scope_info_map, ScopeInfoMap) \ 52 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ 53 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ 54 V(Map, constant_pool_array_map, ConstantPoolArrayMap) \ 55 V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ 56 V(Map, hash_table_map, HashTableMap) \ 57 V(Map, ordered_hash_table_map, OrderedHashTableMap) \ 58 V(FixedArray, empty_fixed_array, EmptyFixedArray) \ 59 V(ByteArray, empty_byte_array, EmptyByteArray) \ 60 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ 61 V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \ 62 V(Oddball, arguments_marker, ArgumentsMarker) \ 63 /* The roots above this line should be boring from a GC point of view. */ \ 64 /* This means they are never in new space and never on a page that is */ \ 65 /* being compacted. */ \ 66 V(FixedArray, number_string_cache, NumberStringCache) \ 67 V(Object, instanceof_cache_function, InstanceofCacheFunction) \ 68 V(Object, instanceof_cache_map, InstanceofCacheMap) \ 69 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ 70 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ 71 V(FixedArray, string_split_cache, StringSplitCache) \ 72 V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ 73 V(Oddball, termination_exception, TerminationException) \ 74 V(Smi, hash_seed, HashSeed) \ 75 V(Map, symbol_map, SymbolMap) \ 76 V(Map, string_map, StringMap) \ 77 V(Map, one_byte_string_map, OneByteStringMap) \ 78 V(Map, cons_string_map, ConsStringMap) \ 79 V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \ 80 V(Map, sliced_string_map, SlicedStringMap) \ 81 V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \ 82 V(Map, external_string_map, ExternalStringMap) \ 83 V(Map, external_string_with_one_byte_data_map, \ 84 ExternalStringWithOneByteDataMap) \ 85 V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \ 86 V(Map, short_external_string_map, ShortExternalStringMap) \ 87 V(Map, short_external_string_with_one_byte_data_map, \ 88 ShortExternalStringWithOneByteDataMap) \ 89 V(Map, internalized_string_map, InternalizedStringMap) \ 90 V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \ 91 V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \ 92 V(Map, external_internalized_string_with_one_byte_data_map, \ 93 ExternalInternalizedStringWithOneByteDataMap) \ 94 V(Map, external_one_byte_internalized_string_map, \ 95 ExternalOneByteInternalizedStringMap) \ 96 V(Map, short_external_internalized_string_map, \ 97 ShortExternalInternalizedStringMap) \ 98 V(Map, short_external_internalized_string_with_one_byte_data_map, \ 99 ShortExternalInternalizedStringWithOneByteDataMap) \ 100 V(Map, short_external_one_byte_internalized_string_map, \ 101 ShortExternalOneByteInternalizedStringMap) \ 102 V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \ 103 V(Map, undetectable_string_map, UndetectableStringMap) \ 104 V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap) \ 105 V(Map, external_int8_array_map, ExternalInt8ArrayMap) \ 106 V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \ 107 V(Map, external_int16_array_map, ExternalInt16ArrayMap) \ 108 V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \ 109 V(Map, external_int32_array_map, ExternalInt32ArrayMap) \ 110 V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \ 111 V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \ 112 V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \ 113 V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \ 114 V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \ 115 V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \ 116 V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \ 117 V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \ 118 V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \ 119 V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \ 120 V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \ 121 V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \ 122 V(ExternalArray, empty_external_uint8_clamped_array, \ 123 EmptyExternalUint8ClampedArray) \ 124 V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ 125 V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ 126 V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ 127 V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ 128 V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ 129 V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ 130 V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ 131 V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ 132 V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ 133 V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ 134 V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ 135 V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ 136 V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ 137 V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ 138 V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ 139 V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ 140 V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ 141 V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ 142 EmptyFixedUint8ClampedArray) \ 143 V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ 144 V(Map, function_context_map, FunctionContextMap) \ 145 V(Map, catch_context_map, CatchContextMap) \ 146 V(Map, with_context_map, WithContextMap) \ 147 V(Map, block_context_map, BlockContextMap) \ 148 V(Map, module_context_map, ModuleContextMap) \ 149 V(Map, global_context_map, GlobalContextMap) \ 150 V(Map, undefined_map, UndefinedMap) \ 151 V(Map, the_hole_map, TheHoleMap) \ 152 V(Map, null_map, NullMap) \ 153 V(Map, boolean_map, BooleanMap) \ 154 V(Map, uninitialized_map, UninitializedMap) \ 155 V(Map, arguments_marker_map, ArgumentsMarkerMap) \ 156 V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \ 157 V(Map, exception_map, ExceptionMap) \ 158 V(Map, termination_exception_map, TerminationExceptionMap) \ 159 V(Map, message_object_map, JSMessageObjectMap) \ 160 V(Map, foreign_map, ForeignMap) \ 161 V(HeapNumber, nan_value, NanValue) \ 162 V(HeapNumber, infinity_value, InfinityValue) \ 163 V(HeapNumber, minus_zero_value, MinusZeroValue) \ 164 V(Map, neander_map, NeanderMap) \ 165 V(JSObject, message_listeners, MessageListeners) \ 166 V(UnseededNumberDictionary, code_stubs, CodeStubs) \ 167 V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ 168 V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ 169 V(Code, js_entry_code, JsEntryCode) \ 170 V(Code, js_construct_entry_code, JsConstructEntryCode) \ 171 V(FixedArray, natives_source_cache, NativesSourceCache) \ 172 V(Script, empty_script, EmptyScript) \ 173 V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ 174 V(Cell, undefined_cell, UndefineCell) \ 175 V(JSObject, observation_state, ObservationState) \ 176 V(Map, external_map, ExternalMap) \ 177 V(Object, symbol_registry, SymbolRegistry) \ 178 V(Symbol, frozen_symbol, FrozenSymbol) \ 179 V(Symbol, nonexistent_symbol, NonExistentSymbol) \ 180 V(Symbol, elements_transition_symbol, ElementsTransitionSymbol) \ 181 V(SeededNumberDictionary, empty_slow_element_dictionary, \ 182 EmptySlowElementDictionary) \ 183 V(Symbol, observed_symbol, ObservedSymbol) \ 184 V(Symbol, uninitialized_symbol, UninitializedSymbol) \ 185 V(Symbol, megamorphic_symbol, MegamorphicSymbol) \ 186 V(Symbol, premonomorphic_symbol, PremonomorphicSymbol) \ 187 V(Symbol, generic_symbol, GenericSymbol) \ 188 V(Symbol, stack_trace_symbol, StackTraceSymbol) \ 189 V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol) \ 190 V(Symbol, normal_ic_symbol, NormalICSymbol) \ 191 V(Symbol, home_object_symbol, HomeObjectSymbol) \ 192 V(FixedArray, materialized_objects, MaterializedObjects) \ 193 V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \ 194 V(FixedArray, microtask_queue, MicrotaskQueue) 195 196 // Entries in this list are limited to Smis and are not visited during GC. 197 #define SMI_ROOT_LIST(V) \ 198 V(Smi, stack_limit, StackLimit) \ 199 V(Smi, real_stack_limit, RealStackLimit) \ 200 V(Smi, last_script_id, LastScriptId) \ 201 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ 202 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ 203 V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ 204 V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) 205 206 #define ROOT_LIST(V) \ 207 STRONG_ROOT_LIST(V) \ 208 SMI_ROOT_LIST(V) \ 209 V(StringTable, string_table, StringTable) 210 211 // Heap roots that are known to be immortal immovable, for which we can safely 212 // skip write barriers. 213 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ 214 V(byte_array_map) \ 215 V(free_space_map) \ 216 V(one_pointer_filler_map) \ 217 V(two_pointer_filler_map) \ 218 V(undefined_value) \ 219 V(the_hole_value) \ 220 V(null_value) \ 221 V(true_value) \ 222 V(false_value) \ 223 V(uninitialized_value) \ 224 V(cell_map) \ 225 V(global_property_cell_map) \ 226 V(shared_function_info_map) \ 227 V(meta_map) \ 228 V(heap_number_map) \ 229 V(mutable_heap_number_map) \ 230 V(native_context_map) \ 231 V(fixed_array_map) \ 232 V(code_map) \ 233 V(scope_info_map) \ 234 V(fixed_cow_array_map) \ 235 V(fixed_double_array_map) \ 236 V(constant_pool_array_map) \ 237 V(no_interceptor_result_sentinel) \ 238 V(hash_table_map) \ 239 V(ordered_hash_table_map) \ 240 V(empty_fixed_array) \ 241 V(empty_byte_array) \ 242 V(empty_descriptor_array) \ 243 V(empty_constant_pool_array) \ 244 V(arguments_marker) \ 245 V(symbol_map) \ 246 V(sloppy_arguments_elements_map) \ 247 V(function_context_map) \ 248 V(catch_context_map) \ 249 V(with_context_map) \ 250 V(block_context_map) \ 251 V(module_context_map) \ 252 V(global_context_map) \ 253 V(undefined_map) \ 254 V(the_hole_map) \ 255 V(null_map) \ 256 V(boolean_map) \ 257 V(uninitialized_map) \ 258 V(message_object_map) \ 259 V(foreign_map) \ 260 V(neander_map) 261 262 #define INTERNALIZED_STRING_LIST(V) \ 263 V(Object_string, "Object") \ 264 V(proto_string, "__proto__") \ 265 V(arguments_string, "arguments") \ 266 V(Arguments_string, "Arguments") \ 267 V(caller_string, "caller") \ 268 V(boolean_string, "boolean") \ 269 V(Boolean_string, "Boolean") \ 270 V(callee_string, "callee") \ 271 V(constructor_string, "constructor") \ 272 V(dot_result_string, ".result") \ 273 V(dot_for_string, ".for.") \ 274 V(eval_string, "eval") \ 275 V(empty_string, "") \ 276 V(function_string, "function") \ 277 V(Function_string, "Function") \ 278 V(length_string, "length") \ 279 V(name_string, "name") \ 280 V(null_string, "null") \ 281 V(number_string, "number") \ 282 V(Number_string, "Number") \ 283 V(nan_string, "NaN") \ 284 V(source_string, "source") \ 285 V(source_url_string, "source_url") \ 286 V(source_mapping_url_string, "source_mapping_url") \ 287 V(global_string, "global") \ 288 V(ignore_case_string, "ignoreCase") \ 289 V(multiline_string, "multiline") \ 290 V(sticky_string, "sticky") \ 291 V(harmony_regexps_string, "harmony_regexps") \ 292 V(input_string, "input") \ 293 V(index_string, "index") \ 294 V(last_index_string, "lastIndex") \ 295 V(object_string, "object") \ 296 V(prototype_string, "prototype") \ 297 V(string_string, "string") \ 298 V(String_string, "String") \ 299 V(symbol_string, "symbol") \ 300 V(Symbol_string, "Symbol") \ 301 V(Map_string, "Map") \ 302 V(Set_string, "Set") \ 303 V(WeakMap_string, "WeakMap") \ 304 V(WeakSet_string, "WeakSet") \ 305 V(for_string, "for") \ 306 V(for_api_string, "for_api") \ 307 V(for_intern_string, "for_intern") \ 308 V(private_api_string, "private_api") \ 309 V(private_intern_string, "private_intern") \ 310 V(Date_string, "Date") \ 311 V(char_at_string, "CharAt") \ 312 V(undefined_string, "undefined") \ 313 V(value_of_string, "valueOf") \ 314 V(stack_string, "stack") \ 315 V(toJSON_string, "toJSON") \ 316 V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \ 317 V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \ 318 V(stack_overflow_string, "kStackOverflowBoilerplate") \ 319 V(illegal_access_string, "illegal access") \ 320 V(cell_value_string, "%cell_value") \ 321 V(illegal_argument_string, "illegal argument") \ 322 V(identity_hash_string, "v8::IdentityHash") \ 323 V(closure_string, "(closure)") \ 324 V(dot_string, ".") \ 325 V(compare_ic_string, "==") \ 326 V(strict_compare_ic_string, "===") \ 327 V(infinity_string, "Infinity") \ 328 V(minus_infinity_string, "-Infinity") \ 329 V(query_colon_string, "(?:)") \ 330 V(Generator_string, "Generator") \ 331 V(throw_string, "throw") \ 332 V(done_string, "done") \ 333 V(value_string, "value") \ 334 V(next_string, "next") \ 335 V(byte_length_string, "byteLength") \ 336 V(byte_offset_string, "byteOffset") \ 337 V(intl_initialized_marker_string, "v8::intl_initialized_marker") \ 338 V(intl_impl_object_string, "v8::intl_object") 339 340 // Forward declarations. 341 class HeapStats; 342 class Isolate; 343 class WeakObjectRetainer; 344 345 346 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, 347 Object** pointer); 348 349 class StoreBufferRebuilder { 350 public: 351 explicit StoreBufferRebuilder(StoreBuffer* store_buffer) 352 : store_buffer_(store_buffer) {} 353 354 void Callback(MemoryChunk* page, StoreBufferEvent event); 355 356 private: 357 StoreBuffer* store_buffer_; 358 359 // We record in this variable how full the store buffer was when we started 360 // iterating over the current page, finding pointers to new space. If the 361 // store buffer overflows again we can exempt the page from the store buffer 362 // by rewinding to this point instead of having to search the store buffer. 363 Object*** start_of_current_page_; 364 // The current page we are scanning in the store buffer iterator. 365 MemoryChunk* current_page_; 366 }; 367 368 369 // A queue of objects promoted during scavenge. Each object is accompanied 370 // by it's size to avoid dereferencing a map pointer for scanning. 371 class PromotionQueue { 372 public: 373 explicit PromotionQueue(Heap* heap) 374 : front_(NULL), 375 rear_(NULL), 376 limit_(NULL), 377 emergency_stack_(0), 378 heap_(heap) {} 379 380 void Initialize(); 381 382 void Destroy() { 383 DCHECK(is_empty()); 384 delete emergency_stack_; 385 emergency_stack_ = NULL; 386 } 387 388 Page* GetHeadPage() { 389 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); 390 } 391 392 void SetNewLimit(Address limit) { 393 limit_ = reinterpret_cast<intptr_t*>(limit); 394 395 if (limit_ <= rear_) { 396 return; 397 } 398 399 RelocateQueueHead(); 400 } 401 402 bool IsBelowPromotionQueue(Address to_space_top) { 403 // If the given to-space top pointer and the head of the promotion queue 404 // are not on the same page, then the to-space objects are below the 405 // promotion queue. 406 if (GetHeadPage() != Page::FromAddress(to_space_top)) { 407 return true; 408 } 409 // If the to space top pointer is smaller or equal than the promotion 410 // queue head, then the to-space objects are below the promotion queue. 411 return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; 412 } 413 414 bool is_empty() { 415 return (front_ == rear_) && 416 (emergency_stack_ == NULL || emergency_stack_->length() == 0); 417 } 418 419 inline void insert(HeapObject* target, int size); 420 421 void remove(HeapObject** target, int* size) { 422 DCHECK(!is_empty()); 423 if (front_ == rear_) { 424 Entry e = emergency_stack_->RemoveLast(); 425 *target = e.obj_; 426 *size = e.size_; 427 return; 428 } 429 430 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { 431 NewSpacePage* front_page = 432 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); 433 DCHECK(!front_page->prev_page()->is_anchor()); 434 front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); 435 } 436 *target = reinterpret_cast<HeapObject*>(*(--front_)); 437 *size = static_cast<int>(*(--front_)); 438 // Assert no underflow. 439 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), 440 reinterpret_cast<Address>(front_)); 441 } 442 443 private: 444 // The front of the queue is higher in the memory page chain than the rear. 445 intptr_t* front_; 446 intptr_t* rear_; 447 intptr_t* limit_; 448 449 static const int kEntrySizeInWords = 2; 450 451 struct Entry { 452 Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {} 453 454 HeapObject* obj_; 455 int size_; 456 }; 457 List<Entry>* emergency_stack_; 458 459 Heap* heap_; 460 461 void RelocateQueueHead(); 462 463 DISALLOW_COPY_AND_ASSIGN(PromotionQueue); 464 }; 465 466 467 typedef void (*ScavengingCallback)(Map* map, HeapObject** slot, 468 HeapObject* object); 469 470 471 // External strings table is a place where all external strings are 472 // registered. We need to keep track of such strings to properly 473 // finalize them. 474 class ExternalStringTable { 475 public: 476 // Registers an external string. 477 inline void AddString(String* string); 478 479 inline void Iterate(ObjectVisitor* v); 480 481 // Restores internal invariant and gets rid of collected strings. 482 // Must be called after each Iterate() that modified the strings. 483 void CleanUp(); 484 485 // Destroys all allocated memory. 486 void TearDown(); 487 488 private: 489 explicit ExternalStringTable(Heap* heap) : heap_(heap) {} 490 491 friend class Heap; 492 493 inline void Verify(); 494 495 inline void AddOldString(String* string); 496 497 // Notifies the table that only a prefix of the new list is valid. 498 inline void ShrinkNewStrings(int position); 499 500 // To speed up scavenge collections new space string are kept 501 // separate from old space strings. 502 List<Object*> new_space_strings_; 503 List<Object*> old_space_strings_; 504 505 Heap* heap_; 506 507 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); 508 }; 509 510 511 enum ArrayStorageAllocationMode { 512 DONT_INITIALIZE_ARRAY_ELEMENTS, 513 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE 514 }; 515 516 517 class Heap { 518 public: 519 // Configure heap size in MB before setup. Return false if the heap has been 520 // set up already. 521 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, 522 int max_executable_size, size_t code_range_size); 523 bool ConfigureHeapDefault(); 524 525 // Prepares the heap, setting up memory areas that are needed in the isolate 526 // without actually creating any objects. 527 bool SetUp(); 528 529 // Bootstraps the object heap with the core set of objects required to run. 530 // Returns whether it succeeded. 531 bool CreateHeapObjects(); 532 533 // Destroys all memory allocated by the heap. 534 void TearDown(); 535 536 // Set the stack limit in the roots_ array. Some architectures generate 537 // code that looks here, because it is faster than loading from the static 538 // jslimit_/real_jslimit_ variable in the StackGuard. 539 void SetStackLimits(); 540 541 // Returns whether SetUp has been called. 542 bool HasBeenSetUp(); 543 544 // Returns the maximum amount of memory reserved for the heap. For 545 // the young generation, we reserve 4 times the amount needed for a 546 // semi space. The young generation consists of two semi spaces and 547 // we reserve twice the amount needed for those in order to ensure 548 // that new space can be aligned to its size. 549 intptr_t MaxReserved() { 550 return 4 * reserved_semispace_size_ + max_old_generation_size_; 551 } 552 int MaxSemiSpaceSize() { return max_semi_space_size_; } 553 int ReservedSemiSpaceSize() { return reserved_semispace_size_; } 554 int InitialSemiSpaceSize() { return initial_semispace_size_; } 555 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } 556 intptr_t MaxExecutableSize() { return max_executable_size_; } 557 558 // Returns the capacity of the heap in bytes w/o growing. Heap grows when 559 // more spaces are needed until it reaches the limit. 560 intptr_t Capacity(); 561 562 // Returns the amount of memory currently committed for the heap. 563 intptr_t CommittedMemory(); 564 565 // Returns the amount of executable memory currently committed for the heap. 566 intptr_t CommittedMemoryExecutable(); 567 568 // Returns the amount of phyical memory currently committed for the heap. 569 size_t CommittedPhysicalMemory(); 570 571 // Returns the maximum amount of memory ever committed for the heap. 572 intptr_t MaximumCommittedMemory() { return maximum_committed_; } 573 574 // Updates the maximum committed memory for the heap. Should be called 575 // whenever a space grows. 576 void UpdateMaximumCommitted(); 577 578 // Returns the available bytes in space w/o growing. 579 // Heap doesn't guarantee that it can allocate an object that requires 580 // all available bytes. Check MaxHeapObjectSize() instead. 581 intptr_t Available(); 582 583 // Returns of size of all objects residing in the heap. 584 intptr_t SizeOfObjects(); 585 586 // Return the starting address and a mask for the new space. And-masking an 587 // address with the mask will result in the start address of the new space 588 // for all addresses in either semispace. 589 Address NewSpaceStart() { return new_space_.start(); } 590 uintptr_t NewSpaceMask() { return new_space_.mask(); } 591 Address NewSpaceTop() { return new_space_.top(); } 592 593 NewSpace* new_space() { return &new_space_; } 594 OldSpace* old_pointer_space() { return old_pointer_space_; } 595 OldSpace* old_data_space() { return old_data_space_; } 596 OldSpace* code_space() { return code_space_; } 597 MapSpace* map_space() { return map_space_; } 598 CellSpace* cell_space() { return cell_space_; } 599 PropertyCellSpace* property_cell_space() { return property_cell_space_; } 600 LargeObjectSpace* lo_space() { return lo_space_; } 601 PagedSpace* paged_space(int idx) { 602 switch (idx) { 603 case OLD_POINTER_SPACE: 604 return old_pointer_space(); 605 case OLD_DATA_SPACE: 606 return old_data_space(); 607 case MAP_SPACE: 608 return map_space(); 609 case CELL_SPACE: 610 return cell_space(); 611 case PROPERTY_CELL_SPACE: 612 return property_cell_space(); 613 case CODE_SPACE: 614 return code_space(); 615 case NEW_SPACE: 616 case LO_SPACE: 617 UNREACHABLE(); 618 } 619 return NULL; 620 } 621 622 bool always_allocate() { return always_allocate_scope_depth_ != 0; } 623 Address always_allocate_scope_depth_address() { 624 return reinterpret_cast<Address>(&always_allocate_scope_depth_); 625 } 626 627 Address* NewSpaceAllocationTopAddress() { 628 return new_space_.allocation_top_address(); 629 } 630 Address* NewSpaceAllocationLimitAddress() { 631 return new_space_.allocation_limit_address(); 632 } 633 634 Address* OldPointerSpaceAllocationTopAddress() { 635 return old_pointer_space_->allocation_top_address(); 636 } 637 Address* OldPointerSpaceAllocationLimitAddress() { 638 return old_pointer_space_->allocation_limit_address(); 639 } 640 641 Address* OldDataSpaceAllocationTopAddress() { 642 return old_data_space_->allocation_top_address(); 643 } 644 Address* OldDataSpaceAllocationLimitAddress() { 645 return old_data_space_->allocation_limit_address(); 646 } 647 648 // Returns a deep copy of the JavaScript object. 649 // Properties and elements are copied too. 650 // Optionally takes an AllocationSite to be appended in an AllocationMemento. 651 MUST_USE_RESULT AllocationResult 652 CopyJSObject(JSObject* source, AllocationSite* site = NULL); 653 654 // Clear the Instanceof cache (used when a prototype changes). 655 inline void ClearInstanceofCache(); 656 657 // Iterates the whole code space to clear all ICs of the given kind. 658 void ClearAllICsByKind(Code::Kind kind); 659 660 // For use during bootup. 661 void RepairFreeListsAfterBoot(); 662 663 template <typename T> 664 static inline bool IsOneByte(T t, int chars); 665 666 // Move len elements within a given array from src_index index to dst_index 667 // index. 668 void MoveElements(FixedArray* array, int dst_index, int src_index, int len); 669 670 // Sloppy mode arguments object size. 671 static const int kSloppyArgumentsObjectSize = 672 JSObject::kHeaderSize + 2 * kPointerSize; 673 // Strict mode arguments has no callee so it is smaller. 674 static const int kStrictArgumentsObjectSize = 675 JSObject::kHeaderSize + 1 * kPointerSize; 676 // Indicies for direct access into argument objects. 677 static const int kArgumentsLengthIndex = 0; 678 // callee is only valid in sloppy mode. 679 static const int kArgumentsCalleeIndex = 1; 680 681 // Finalizes an external string by deleting the associated external 682 // data and clearing the resource pointer. 683 inline void FinalizeExternalString(String* string); 684 685 // Initialize a filler object to keep the ability to iterate over the heap 686 // when introducing gaps within pages. 687 void CreateFillerObjectAt(Address addr, int size); 688 689 bool CanMoveObjectStart(HeapObject* object); 690 691 // Indicates whether live bytes adjustment is triggered from within the GC 692 // code or from mutator code. 693 enum InvocationMode { FROM_GC, FROM_MUTATOR }; 694 695 // Maintain consistency of live bytes during incremental marking. 696 void AdjustLiveBytes(Address address, int by, InvocationMode mode); 697 698 // Trim the given array from the left. Note that this relocates the object 699 // start and hence is only valid if there is only a single reference to it. 700 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); 701 702 // Trim the given array from the right. 703 template<Heap::InvocationMode mode> 704 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); 705 706 // Converts the given boolean condition to JavaScript boolean value. 707 inline Object* ToBoolean(bool condition); 708 709 // Performs garbage collection operation. 710 // Returns whether there is a chance that another major GC could 711 // collect more garbage. 712 inline bool CollectGarbage( 713 AllocationSpace space, const char* gc_reason = NULL, 714 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); 715 716 static const int kNoGCFlags = 0; 717 static const int kReduceMemoryFootprintMask = 1; 718 static const int kAbortIncrementalMarkingMask = 2; 719 720 // Making the heap iterable requires us to abort incremental marking. 721 static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; 722 723 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is 724 // non-zero, then the slower precise sweeper is used, which leaves the heap 725 // in a state where we can iterate over the heap visiting all objects. 726 void CollectAllGarbage( 727 int flags, const char* gc_reason = NULL, 728 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); 729 730 // Last hope GC, should try to squeeze as much as possible. 731 void CollectAllAvailableGarbage(const char* gc_reason = NULL); 732 733 // Check whether the heap is currently iterable. 734 bool IsHeapIterable(); 735 736 // Notify the heap that a context has been disposed. 737 int NotifyContextDisposed(); 738 739 inline void increment_scan_on_scavenge_pages() { 740 scan_on_scavenge_pages_++; 741 if (FLAG_gc_verbose) { 742 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); 743 } 744 } 745 746 inline void decrement_scan_on_scavenge_pages() { 747 scan_on_scavenge_pages_--; 748 if (FLAG_gc_verbose) { 749 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); 750 } 751 } 752 753 PromotionQueue* promotion_queue() { return &promotion_queue_; } 754 755 void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, 756 GCType gc_type_filter, bool pass_isolate = true); 757 void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); 758 759 void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, 760 GCType gc_type_filter, bool pass_isolate = true); 761 void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); 762 763 // Heap root getters. We have versions with and without type::cast() here. 764 // You can't use type::cast during GC because the assert fails. 765 // TODO(1490): Try removing the unchecked accessors, now that GC marking does 766 // not corrupt the map. 767 #define ROOT_ACCESSOR(type, name, camel_name) \ 768 type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \ 769 type* raw_unchecked_##name() { \ 770 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ 771 } 772 ROOT_LIST(ROOT_ACCESSOR) 773 #undef ROOT_ACCESSOR 774 775 // Utility type maps 776 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ 777 Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); } 778 STRUCT_LIST(STRUCT_MAP_ACCESSOR) 779 #undef STRUCT_MAP_ACCESSOR 780 781 #define STRING_ACCESSOR(name, str) \ 782 String* name() { return String::cast(roots_[k##name##RootIndex]); } 783 INTERNALIZED_STRING_LIST(STRING_ACCESSOR) 784 #undef STRING_ACCESSOR 785 786 // The hidden_string is special because it is the empty string, but does 787 // not match the empty string. 788 String* hidden_string() { return hidden_string_; } 789 790 void set_native_contexts_list(Object* object) { 791 native_contexts_list_ = object; 792 } 793 Object* native_contexts_list() const { return native_contexts_list_; } 794 795 void set_array_buffers_list(Object* object) { array_buffers_list_ = object; } 796 Object* array_buffers_list() const { return array_buffers_list_; } 797 798 void set_allocation_sites_list(Object* object) { 799 allocation_sites_list_ = object; 800 } 801 Object* allocation_sites_list() { return allocation_sites_list_; } 802 803 // Used in CreateAllocationSiteStub and the (de)serializer. 804 Object** allocation_sites_list_address() { return &allocation_sites_list_; } 805 806 Object* weak_object_to_code_table() { return weak_object_to_code_table_; } 807 808 void set_encountered_weak_collections(Object* weak_collection) { 809 encountered_weak_collections_ = weak_collection; 810 } 811 Object* encountered_weak_collections() const { 812 return encountered_weak_collections_; 813 } 814 815 // Number of mark-sweeps. 816 unsigned int ms_count() { return ms_count_; } 817 818 // Iterates over all roots in the heap. 819 void IterateRoots(ObjectVisitor* v, VisitMode mode); 820 // Iterates over all strong roots in the heap. 821 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); 822 // Iterates over entries in the smi roots list. Only interesting to the 823 // serializer/deserializer, since GC does not care about smis. 824 void IterateSmiRoots(ObjectVisitor* v); 825 // Iterates over all the other roots in the heap. 826 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); 827 828 // Iterate pointers to from semispace of new space found in memory interval 829 // from start to end. 830 void IterateAndMarkPointersToFromSpace(Address start, Address end, 831 ObjectSlotCallback callback); 832 833 // Returns whether the object resides in new space. 834 inline bool InNewSpace(Object* object); 835 inline bool InNewSpace(Address address); 836 inline bool InNewSpacePage(Address address); 837 inline bool InFromSpace(Object* object); 838 inline bool InToSpace(Object* object); 839 840 // Returns whether the object resides in old pointer space. 841 inline bool InOldPointerSpace(Address address); 842 inline bool InOldPointerSpace(Object* object); 843 844 // Returns whether the object resides in old data space. 845 inline bool InOldDataSpace(Address address); 846 inline bool InOldDataSpace(Object* object); 847 848 // Checks whether an address/object in the heap (including auxiliary 849 // area and unused area). 850 bool Contains(Address addr); 851 bool Contains(HeapObject* value); 852 853 // Checks whether an address/object in a space. 854 // Currently used by tests, serialization and heap verification only. 855 bool InSpace(Address addr, AllocationSpace space); 856 bool InSpace(HeapObject* value, AllocationSpace space); 857 858 // Finds out which space an object should get promoted to based on its type. 859 inline OldSpace* TargetSpace(HeapObject* object); 860 static inline AllocationSpace TargetSpaceId(InstanceType type); 861 862 // Checks whether the given object is allowed to be migrated from it's 863 // current space into the given destination space. Used for debugging. 864 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); 865 866 // Sets the stub_cache_ (only used when expanding the dictionary). 867 void public_set_code_stubs(UnseededNumberDictionary* value) { 868 roots_[kCodeStubsRootIndex] = value; 869 } 870 871 // Support for computing object sizes for old objects during GCs. Returns 872 // a function that is guaranteed to be safe for computing object sizes in 873 // the current GC phase. 874 HeapObjectCallback GcSafeSizeOfOldObjectFunction() { 875 return gc_safe_size_of_old_object_; 876 } 877 878 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). 879 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) { 880 roots_[kNonMonomorphicCacheRootIndex] = value; 881 } 882 883 void public_set_empty_script(Script* script) { 884 roots_[kEmptyScriptRootIndex] = script; 885 } 886 887 void public_set_store_buffer_top(Address* top) { 888 roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); 889 } 890 891 void public_set_materialized_objects(FixedArray* objects) { 892 roots_[kMaterializedObjectsRootIndex] = objects; 893 } 894 895 // Generated code can embed this address to get access to the roots. 896 Object** roots_array_start() { return roots_; } 897 898 Address* store_buffer_top_address() { 899 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); 900 } 901 902 #ifdef VERIFY_HEAP 903 // Verify the heap is in its normal state before or after a GC. 904 void Verify(); 905 906 907 bool weak_embedded_objects_verification_enabled() { 908 return no_weak_object_verification_scope_depth_ == 0; 909 } 910 #endif 911 912 #ifdef DEBUG 913 void Print(); 914 void PrintHandles(); 915 916 void OldPointerSpaceCheckStoreBuffer(); 917 void MapSpaceCheckStoreBuffer(); 918 void LargeObjectSpaceCheckStoreBuffer(); 919 920 // Report heap statistics. 921 void ReportHeapStatistics(const char* title); 922 void ReportCodeStatistics(const char* title); 923 #endif 924 925 // Zapping is needed for verify heap, and always done in debug builds. 926 static inline bool ShouldZapGarbage() { 927 #ifdef DEBUG 928 return true; 929 #else 930 #ifdef VERIFY_HEAP 931 return FLAG_verify_heap; 932 #else 933 return false; 934 #endif 935 #endif 936 } 937 938 // Number of "runtime allocations" done so far. 939 uint32_t allocations_count() { return allocations_count_; } 940 941 // Returns deterministic "time" value in ms. Works only with 942 // FLAG_verify_predictable. 943 double synthetic_time() { return allocations_count_ / 2.0; } 944 945 // Print short heap statistics. 946 void PrintShortHeapStatistics(); 947 948 // Write barrier support for address[offset] = o. 949 INLINE(void RecordWrite(Address address, int offset)); 950 951 // Write barrier support for address[start : start + len[ = o. 952 INLINE(void RecordWrites(Address address, int start, int len)); 953 954 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; 955 inline HeapState gc_state() { return gc_state_; } 956 957 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } 958 959 #ifdef DEBUG 960 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } 961 962 void TracePathToObjectFrom(Object* target, Object* root); 963 void TracePathToObject(Object* target); 964 void TracePathToGlobal(); 965 #endif 966 967 // Callback function passed to Heap::Iterate etc. Copies an object if 968 // necessary, the object might be promoted to an old space. The caller must 969 // ensure the precondition that the object is (a) a heap object and (b) in 970 // the heap's from space. 971 static inline void ScavengePointer(HeapObject** p); 972 static inline void ScavengeObject(HeapObject** p, HeapObject* object); 973 974 enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; 975 976 // If an object has an AllocationMemento trailing it, return it, otherwise 977 // return NULL; 978 inline AllocationMemento* FindAllocationMemento(HeapObject* object); 979 980 // An object may have an AllocationSite associated with it through a trailing 981 // AllocationMemento. Its feedback should be updated when objects are found 982 // in the heap. 983 static inline void UpdateAllocationSiteFeedback(HeapObject* object, 984 ScratchpadSlotMode mode); 985 986 // Support for partial snapshots. After calling this we have a linear 987 // space to write objects in each space. 988 void ReserveSpace(int* sizes, Address* addresses); 989 990 // 991 // Support for the API. 992 // 993 994 void CreateApiObjects(); 995 996 inline intptr_t PromotedTotalSize() { 997 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); 998 if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt); 999 if (total < 0) return 0; 1000 return static_cast<intptr_t>(total); 1001 } 1002 1003 inline intptr_t OldGenerationSpaceAvailable() { 1004 return old_generation_allocation_limit_ - PromotedTotalSize(); 1005 } 1006 1007 inline intptr_t OldGenerationCapacityAvailable() { 1008 return max_old_generation_size_ - PromotedTotalSize(); 1009 } 1010 1011 static const intptr_t kMinimumOldGenerationAllocationLimit = 1012 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); 1013 1014 static const int kPointerMultiplier = i::kPointerSize / 4; 1015 1016 // The new space size has to be a power of 2. Sizes are in MB. 1017 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; 1018 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; 1019 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; 1020 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; 1021 1022 // The old space size has to be a multiple of Page::kPageSize. 1023 // Sizes are in MB. 1024 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; 1025 static const int kMaxOldSpaceSizeMediumMemoryDevice = 1026 256 * kPointerMultiplier; 1027 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; 1028 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; 1029 1030 // The executable size has to be a multiple of Page::kPageSize. 1031 // Sizes are in MB. 1032 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; 1033 static const int kMaxExecutableSizeMediumMemoryDevice = 1034 192 * kPointerMultiplier; 1035 static const int kMaxExecutableSizeHighMemoryDevice = 1036 256 * kPointerMultiplier; 1037 static const int kMaxExecutableSizeHugeMemoryDevice = 1038 256 * kPointerMultiplier; 1039 1040 intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size, 1041 int freed_global_handles); 1042 1043 // Indicates whether inline bump-pointer allocation has been disabled. 1044 bool inline_allocation_disabled() { return inline_allocation_disabled_; } 1045 1046 // Switch whether inline bump-pointer allocation should be used. 1047 void EnableInlineAllocation(); 1048 void DisableInlineAllocation(); 1049 1050 // Implements the corresponding V8 API function. 1051 bool IdleNotification(int idle_time_in_ms); 1052 1053 // Declare all the root indices. This defines the root list order. 1054 enum RootListIndex { 1055 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, 1056 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) 1057 #undef ROOT_INDEX_DECLARATION 1058 1059 #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, 1060 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) 1061 #undef STRING_DECLARATION 1062 1063 // Utility type maps 1064 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, 1065 STRUCT_LIST(DECLARE_STRUCT_MAP) 1066 #undef DECLARE_STRUCT_MAP 1067 kStringTableRootIndex, 1068 1069 #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, 1070 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) 1071 #undef ROOT_INDEX_DECLARATION 1072 kRootListLength, 1073 kStrongRootListLength = kStringTableRootIndex, 1074 kSmiRootsStart = kStringTableRootIndex + 1 1075 }; 1076 1077 STATIC_ASSERT(kUndefinedValueRootIndex == 1078 Internals::kUndefinedValueRootIndex); 1079 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); 1080 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); 1081 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); 1082 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); 1083 1084 // Generated code can embed direct references to non-writable roots if 1085 // they are in new space. 1086 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); 1087 // Generated code can treat direct references to this root as constant. 1088 bool RootCanBeTreatedAsConstant(RootListIndex root_index); 1089 1090 Map* MapForFixedTypedArray(ExternalArrayType array_type); 1091 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); 1092 1093 Map* MapForExternalArrayType(ExternalArrayType array_type); 1094 RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type); 1095 1096 RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind); 1097 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); 1098 ExternalArray* EmptyExternalArrayForMap(Map* map); 1099 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); 1100 1101 void RecordStats(HeapStats* stats, bool take_snapshot = false); 1102 1103 // Copy block of memory from src to dst. Size of block should be aligned 1104 // by pointer size. 1105 static inline void CopyBlock(Address dst, Address src, int byte_size); 1106 1107 // Optimized version of memmove for blocks with pointer size aligned sizes and 1108 // pointer size aligned addresses. 1109 static inline void MoveBlock(Address dst, Address src, int byte_size); 1110 1111 // Check new space expansion criteria and expand semispaces if it was hit. 1112 void CheckNewSpaceExpansionCriteria(); 1113 1114 inline void IncrementPromotedObjectsSize(int object_size) { 1115 DCHECK(object_size > 0); 1116 promoted_objects_size_ += object_size; 1117 } 1118 1119 inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { 1120 DCHECK(object_size > 0); 1121 semi_space_copied_object_size_ += object_size; 1122 } 1123 1124 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } 1125 1126 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } 1127 1128 inline void IncrementNodesPromoted() { nodes_promoted_++; } 1129 1130 inline void IncrementYoungSurvivorsCounter(int survived) { 1131 DCHECK(survived >= 0); 1132 survived_since_last_expansion_ += survived; 1133 } 1134 1135 inline bool NextGCIsLikelyToBeFull() { 1136 if (FLAG_gc_global) return true; 1137 1138 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; 1139 1140 intptr_t adjusted_allocation_limit = 1141 old_generation_allocation_limit_ - new_space_.Capacity(); 1142 1143 if (PromotedTotalSize() >= adjusted_allocation_limit) return true; 1144 1145 return false; 1146 } 1147 1148 void UpdateNewSpaceReferencesInExternalStringTable( 1149 ExternalStringTableUpdaterCallback updater_func); 1150 1151 void UpdateReferencesInExternalStringTable( 1152 ExternalStringTableUpdaterCallback updater_func); 1153 1154 void ProcessWeakReferences(WeakObjectRetainer* retainer); 1155 1156 void VisitExternalResources(v8::ExternalResourceVisitor* visitor); 1157 1158 // An object should be promoted if the object has survived a 1159 // scavenge operation. 1160 inline bool ShouldBePromoted(Address old_address, int object_size); 1161 1162 void ClearJSFunctionResultCaches(); 1163 1164 void ClearNormalizedMapCaches(); 1165 1166 GCTracer* tracer() { return &tracer_; } 1167 1168 // Returns the size of objects residing in non new spaces. 1169 intptr_t PromotedSpaceSizeOfObjects(); 1170 1171 double total_regexp_code_generated() { return total_regexp_code_generated_; } 1172 void IncreaseTotalRegexpCodeGenerated(int size) { 1173 total_regexp_code_generated_ += size; 1174 } 1175 1176 void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { 1177 if (is_crankshafted) { 1178 crankshaft_codegen_bytes_generated_ += size; 1179 } else { 1180 full_codegen_bytes_generated_ += size; 1181 } 1182 } 1183 1184 // Update GC statistics that are tracked on the Heap. 1185 void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, 1186 double marking_time); 1187 1188 // Returns maximum GC pause. 1189 double get_max_gc_pause() { return max_gc_pause_; } 1190 1191 // Returns maximum size of objects alive after GC. 1192 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } 1193 1194 // Returns minimal interval between two subsequent collections. 1195 double get_min_in_mutator() { return min_in_mutator_; } 1196 1197 MarkCompactCollector* mark_compact_collector() { 1198 return &mark_compact_collector_; 1199 } 1200 1201 StoreBuffer* store_buffer() { return &store_buffer_; } 1202 1203 Marking* marking() { return &marking_; } 1204 1205 IncrementalMarking* incremental_marking() { return &incremental_marking_; } 1206 1207 ExternalStringTable* external_string_table() { 1208 return &external_string_table_; 1209 } 1210 1211 // Returns the current sweep generation. 1212 int sweep_generation() { return sweep_generation_; } 1213 1214 inline Isolate* isolate(); 1215 1216 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); 1217 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); 1218 1219 inline bool OldGenerationAllocationLimitReached(); 1220 1221 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { 1222 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); 1223 } 1224 1225 void QueueMemoryChunkForFree(MemoryChunk* chunk); 1226 void FreeQueuedChunks(); 1227 1228 int gc_count() const { return gc_count_; } 1229 1230 // Completely clear the Instanceof cache (to stop it keeping objects alive 1231 // around a GC). 1232 inline void CompletelyClearInstanceofCache(); 1233 1234 // The roots that have an index less than this are always in old space. 1235 static const int kOldSpaceRoots = 0x20; 1236 1237 uint32_t HashSeed() { 1238 uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); 1239 DCHECK(FLAG_randomize_hashes || seed == 0); 1240 return seed; 1241 } 1242 1243 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { 1244 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); 1245 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); 1246 } 1247 1248 void SetConstructStubDeoptPCOffset(int pc_offset) { 1249 DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0)); 1250 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); 1251 } 1252 1253 void SetGetterStubDeoptPCOffset(int pc_offset) { 1254 DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0)); 1255 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); 1256 } 1257 1258 void SetSetterStubDeoptPCOffset(int pc_offset) { 1259 DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0)); 1260 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); 1261 } 1262 1263 // For post mortem debugging. 1264 void RememberUnmappedPage(Address page, bool compacted); 1265 1266 // Global inline caching age: it is incremented on some GCs after context 1267 // disposal. We use it to flush inline caches. 1268 int global_ic_age() { return global_ic_age_; } 1269 1270 void AgeInlineCaches() { 1271 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; 1272 } 1273 1274 bool flush_monomorphic_ics() { return flush_monomorphic_ics_; } 1275 1276 int64_t amount_of_external_allocated_memory() { 1277 return amount_of_external_allocated_memory_; 1278 } 1279 1280 void DeoptMarkedAllocationSites(); 1281 1282 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } 1283 1284 bool DeoptMaybeTenuredAllocationSites() { 1285 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; 1286 } 1287 1288 // ObjectStats are kept in two arrays, counts and sizes. Related stats are 1289 // stored in a contiguous linear buffer. Stats groups are stored one after 1290 // another. 1291 enum { 1292 FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, 1293 FIRST_FIXED_ARRAY_SUB_TYPE = 1294 FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, 1295 FIRST_CODE_AGE_SUB_TYPE = 1296 FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, 1297 OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 1298 }; 1299 1300 void RecordObjectStats(InstanceType type, size_t size) { 1301 DCHECK(type <= LAST_TYPE); 1302 object_counts_[type]++; 1303 object_sizes_[type] += size; 1304 } 1305 1306 void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { 1307 int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; 1308 int code_age_index = 1309 FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; 1310 DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && 1311 code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); 1312 DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && 1313 code_age_index < OBJECT_STATS_COUNT); 1314 object_counts_[code_sub_type_index]++; 1315 object_sizes_[code_sub_type_index] += size; 1316 object_counts_[code_age_index]++; 1317 object_sizes_[code_age_index] += size; 1318 } 1319 1320 void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { 1321 DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); 1322 object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; 1323 object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; 1324 } 1325 1326 void CheckpointObjectStats(); 1327 1328 // We don't use a LockGuard here since we want to lock the heap 1329 // only when FLAG_concurrent_recompilation is true. 1330 class RelocationLock { 1331 public: 1332 explicit RelocationLock(Heap* heap) : heap_(heap) { 1333 heap_->relocation_mutex_.Lock(); 1334 } 1335 1336 1337 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } 1338 1339 private: 1340 Heap* heap_; 1341 }; 1342 1343 void AddWeakObjectToCodeDependency(Handle<Object> obj, 1344 Handle<DependentCode> dep); 1345 1346 DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj); 1347 1348 void InitializeWeakObjectToCodeTable() { 1349 set_weak_object_to_code_table(undefined_value()); 1350 } 1351 1352 void EnsureWeakObjectToCodeTable(); 1353 1354 static void FatalProcessOutOfMemory(const char* location, 1355 bool take_snapshot = false); 1356 1357 // This event is triggered after successful allocation of a new object made 1358 // by runtime. Allocations of target space for object evacuation do not 1359 // trigger the event. In order to track ALL allocations one must turn off 1360 // FLAG_inline_new and FLAG_use_allocation_folding. 1361 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); 1362 1363 // This event is triggered after object is moved to a new place. 1364 inline void OnMoveEvent(HeapObject* target, HeapObject* source, 1365 int size_in_bytes); 1366 1367 protected: 1368 // Methods made available to tests. 1369 1370 // Allocates a JS Map in the heap. 1371 MUST_USE_RESULT AllocationResult 1372 AllocateMap(InstanceType instance_type, int instance_size, 1373 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); 1374 1375 // Allocates and initializes a new JavaScript object based on a 1376 // constructor. 1377 // If allocation_site is non-null, then a memento is emitted after the object 1378 // that points to the site. 1379 MUST_USE_RESULT AllocationResult 1380 AllocateJSObject(JSFunction* constructor, 1381 PretenureFlag pretenure = NOT_TENURED, 1382 AllocationSite* allocation_site = NULL); 1383 1384 // Allocates and initializes a new JavaScript object based on a map. 1385 // Passing an allocation site means that a memento will be created that 1386 // points to the site. 1387 MUST_USE_RESULT AllocationResult 1388 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, 1389 bool alloc_props = true, 1390 AllocationSite* allocation_site = NULL); 1391 1392 // Allocated a HeapNumber from value. 1393 MUST_USE_RESULT AllocationResult 1394 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, 1395 PretenureFlag pretenure = NOT_TENURED); 1396 1397 // Allocate a byte array of the specified length 1398 MUST_USE_RESULT AllocationResult 1399 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); 1400 1401 // Copy the code and scope info part of the code object, but insert 1402 // the provided data as the relocation information. 1403 MUST_USE_RESULT AllocationResult 1404 CopyCode(Code* code, Vector<byte> reloc_info); 1405 1406 MUST_USE_RESULT AllocationResult CopyCode(Code* code); 1407 1408 // Allocates a fixed array initialized with undefined values 1409 MUST_USE_RESULT AllocationResult 1410 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); 1411 1412 private: 1413 Heap(); 1414 1415 // The amount of external memory registered through the API kept alive 1416 // by global handles 1417 int64_t amount_of_external_allocated_memory_; 1418 1419 // Caches the amount of external memory registered at the last global gc. 1420 int64_t amount_of_external_allocated_memory_at_last_global_gc_; 1421 1422 // This can be calculated directly from a pointer to the heap; however, it is 1423 // more expedient to get at the isolate directly from within Heap methods. 1424 Isolate* isolate_; 1425 1426 Object* roots_[kRootListLength]; 1427 1428 size_t code_range_size_; 1429 int reserved_semispace_size_; 1430 int max_semi_space_size_; 1431 int initial_semispace_size_; 1432 intptr_t max_old_generation_size_; 1433 intptr_t max_executable_size_; 1434 intptr_t maximum_committed_; 1435 1436 // For keeping track of how much data has survived 1437 // scavenge since last new space expansion. 1438 int survived_since_last_expansion_; 1439 1440 // For keeping track on when to flush RegExp code. 1441 int sweep_generation_; 1442 1443 int always_allocate_scope_depth_; 1444 1445 // For keeping track of context disposals. 1446 int contexts_disposed_; 1447 1448 int global_ic_age_; 1449 1450 bool flush_monomorphic_ics_; 1451 1452 int scan_on_scavenge_pages_; 1453 1454 NewSpace new_space_; 1455 OldSpace* old_pointer_space_; 1456 OldSpace* old_data_space_; 1457 OldSpace* code_space_; 1458 MapSpace* map_space_; 1459 CellSpace* cell_space_; 1460 PropertyCellSpace* property_cell_space_; 1461 LargeObjectSpace* lo_space_; 1462 HeapState gc_state_; 1463 int gc_post_processing_depth_; 1464 Address new_space_top_after_last_gc_; 1465 1466 // Returns the amount of external memory registered since last global gc. 1467 int64_t PromotedExternalMemorySize(); 1468 1469 // How many "runtime allocations" happened. 1470 uint32_t allocations_count_; 1471 1472 // Running hash over allocations performed. 1473 uint32_t raw_allocations_hash_; 1474 1475 // Countdown counter, dumps allocation hash when 0. 1476 uint32_t dump_allocations_hash_countdown_; 1477 1478 // How many mark-sweep collections happened. 1479 unsigned int ms_count_; 1480 1481 // How many gc happened. 1482 unsigned int gc_count_; 1483 1484 // For post mortem debugging. 1485 static const int kRememberedUnmappedPages = 128; 1486 int remembered_unmapped_pages_index_; 1487 Address remembered_unmapped_pages_[kRememberedUnmappedPages]; 1488 1489 // Total length of the strings we failed to flatten since the last GC. 1490 int unflattened_strings_length_; 1491 1492 #define ROOT_ACCESSOR(type, name, camel_name) \ 1493 inline void set_##name(type* value) { \ 1494 /* The deserializer makes use of the fact that these common roots are */ \ 1495 /* never in new space and never on a page that is being compacted. */ \ 1496 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ 1497 roots_[k##camel_name##RootIndex] = value; \ 1498 } 1499 ROOT_LIST(ROOT_ACCESSOR) 1500 #undef ROOT_ACCESSOR 1501 1502 #ifdef DEBUG 1503 // If the --gc-interval flag is set to a positive value, this 1504 // variable holds the value indicating the number of allocations 1505 // remain until the next failure and garbage collection. 1506 int allocation_timeout_; 1507 #endif // DEBUG 1508 1509 // Limit that triggers a global GC on the next (normally caused) GC. This 1510 // is checked when we have already decided to do a GC to help determine 1511 // which collector to invoke, before expanding a paged space in the old 1512 // generation and on every allocation in large object space. 1513 intptr_t old_generation_allocation_limit_; 1514 1515 // Indicates that an allocation has failed in the old generation since the 1516 // last GC. 1517 bool old_gen_exhausted_; 1518 1519 // Indicates that inline bump-pointer allocation has been globally disabled 1520 // for all spaces. This is used to disable allocations in generated code. 1521 bool inline_allocation_disabled_; 1522 1523 // Weak list heads, threaded through the objects. 1524 // List heads are initilized lazily and contain the undefined_value at start. 1525 Object* native_contexts_list_; 1526 Object* array_buffers_list_; 1527 Object* allocation_sites_list_; 1528 1529 // WeakHashTable that maps objects embedded in optimized code to dependent 1530 // code list. It is initilized lazily and contains the undefined_value at 1531 // start. 1532 Object* weak_object_to_code_table_; 1533 1534 // List of encountered weak collections (JSWeakMap and JSWeakSet) during 1535 // marking. It is initialized during marking, destroyed after marking and 1536 // contains Smi(0) while marking is not active. 1537 Object* encountered_weak_collections_; 1538 1539 StoreBufferRebuilder store_buffer_rebuilder_; 1540 1541 struct StringTypeTable { 1542 InstanceType type; 1543 int size; 1544 RootListIndex index; 1545 }; 1546 1547 struct ConstantStringTable { 1548 const char* contents; 1549 RootListIndex index; 1550 }; 1551 1552 struct StructTable { 1553 InstanceType type; 1554 int size; 1555 RootListIndex index; 1556 }; 1557 1558 static const StringTypeTable string_type_table[]; 1559 static const ConstantStringTable constant_string_table[]; 1560 static const StructTable struct_table[]; 1561 1562 // The special hidden string which is an empty string, but does not match 1563 // any string when looked up in properties. 1564 String* hidden_string_; 1565 1566 // GC callback function, called before and after mark-compact GC. 1567 // Allocations in the callback function are disallowed. 1568 struct GCPrologueCallbackPair { 1569 GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback, 1570 GCType gc_type, bool pass_isolate) 1571 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {} 1572 bool operator==(const GCPrologueCallbackPair& pair) const { 1573 return pair.callback == callback; 1574 } 1575 v8::Isolate::GCPrologueCallback callback; 1576 GCType gc_type; 1577 // TODO(dcarney): remove variable 1578 bool pass_isolate_; 1579 }; 1580 List<GCPrologueCallbackPair> gc_prologue_callbacks_; 1581 1582 struct GCEpilogueCallbackPair { 1583 GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback, 1584 GCType gc_type, bool pass_isolate) 1585 : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {} 1586 bool operator==(const GCEpilogueCallbackPair& pair) const { 1587 return pair.callback == callback; 1588 } 1589 v8::Isolate::GCPrologueCallback callback; 1590 GCType gc_type; 1591 // TODO(dcarney): remove variable 1592 bool pass_isolate_; 1593 }; 1594 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; 1595 1596 // Support for computing object sizes during GC. 1597 HeapObjectCallback gc_safe_size_of_old_object_; 1598 static int GcSafeSizeOfOldObject(HeapObject* object); 1599 1600 // Update the GC state. Called from the mark-compact collector. 1601 void MarkMapPointersAsEncoded(bool encoded) { 1602 DCHECK(!encoded); 1603 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; 1604 } 1605 1606 // Code that should be run before and after each GC. Includes some 1607 // reporting/verification activities when compiled with DEBUG set. 1608 void GarbageCollectionPrologue(); 1609 void GarbageCollectionEpilogue(); 1610 1611 // Pretenuring decisions are made based on feedback collected during new 1612 // space evacuation. Note that between feedback collection and calling this 1613 // method object in old space must not move. 1614 // Right now we only process pretenuring feedback in high promotion mode. 1615 void ProcessPretenuringFeedback(); 1616 1617 // Checks whether a global GC is necessary 1618 GarbageCollector SelectGarbageCollector(AllocationSpace space, 1619 const char** reason); 1620 1621 // Make sure there is a filler value behind the top of the new space 1622 // so that the GC does not confuse some unintialized/stale memory 1623 // with the allocation memento of the object at the top 1624 void EnsureFillerObjectAtTop(); 1625 1626 // Ensure that we have swept all spaces in such a way that we can iterate 1627 // over all objects. May cause a GC. 1628 void MakeHeapIterable(); 1629 1630 // Performs garbage collection operation. 1631 // Returns whether there is a chance that another major GC could 1632 // collect more garbage. 1633 bool CollectGarbage( 1634 GarbageCollector collector, const char* gc_reason, 1635 const char* collector_reason, 1636 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); 1637 1638 // Performs garbage collection 1639 // Returns whether there is a chance another major GC could 1640 // collect more garbage. 1641 bool PerformGarbageCollection( 1642 GarbageCollector collector, 1643 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); 1644 1645 inline void UpdateOldSpaceLimits(); 1646 1647 // Selects the proper allocation space depending on the given object 1648 // size, pretenuring decision, and preferred old-space. 1649 static AllocationSpace SelectSpace(int object_size, 1650 AllocationSpace preferred_old_space, 1651 PretenureFlag pretenure) { 1652 DCHECK(preferred_old_space == OLD_POINTER_SPACE || 1653 preferred_old_space == OLD_DATA_SPACE); 1654 if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; 1655 return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; 1656 } 1657 1658 // Allocate an uninitialized object. The memory is non-executable if the 1659 // hardware and OS allow. This is the single choke-point for allocations 1660 // performed by the runtime and should not be bypassed (to extend this to 1661 // inlined allocations, use the Heap::DisableInlineAllocation() support). 1662 MUST_USE_RESULT inline AllocationResult AllocateRaw( 1663 int size_in_bytes, AllocationSpace space, AllocationSpace retry_space); 1664 1665 // Allocates a heap object based on the map. 1666 MUST_USE_RESULT AllocationResult 1667 Allocate(Map* map, AllocationSpace space, 1668 AllocationSite* allocation_site = NULL); 1669 1670 // Allocates a partial map for bootstrapping. 1671 MUST_USE_RESULT AllocationResult 1672 AllocatePartialMap(InstanceType instance_type, int instance_size); 1673 1674 // Initializes a JSObject based on its map. 1675 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, 1676 Map* map); 1677 void InitializeAllocationMemento(AllocationMemento* memento, 1678 AllocationSite* allocation_site); 1679 1680 // Allocate a block of memory in the given space (filled with a filler). 1681 // Used as a fall-back for generated code when the space is full. 1682 MUST_USE_RESULT AllocationResult 1683 AllocateFillerObject(int size, bool double_align, AllocationSpace space); 1684 1685 // Allocate an uninitialized fixed array. 1686 MUST_USE_RESULT AllocationResult 1687 AllocateRawFixedArray(int length, PretenureFlag pretenure); 1688 1689 // Allocate an uninitialized fixed double array. 1690 MUST_USE_RESULT AllocationResult 1691 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); 1692 1693 // Allocate an initialized fixed array with the given filler value. 1694 MUST_USE_RESULT AllocationResult 1695 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, 1696 Object* filler); 1697 1698 // Allocate and partially initializes a String. There are two String 1699 // encodings: one-byte and two-byte. These functions allocate a string of 1700 // the given length and set its map and length fields. The characters of 1701 // the string are uninitialized. 1702 MUST_USE_RESULT AllocationResult 1703 AllocateRawOneByteString(int length, PretenureFlag pretenure); 1704 MUST_USE_RESULT AllocationResult 1705 AllocateRawTwoByteString(int length, PretenureFlag pretenure); 1706 1707 bool CreateInitialMaps(); 1708 void CreateInitialObjects(); 1709 1710 // Allocates an internalized string in old space based on the character 1711 // stream. 1712 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( 1713 Vector<const char> str, int chars, uint32_t hash_field); 1714 1715 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( 1716 Vector<const uint8_t> str, uint32_t hash_field); 1717 1718 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( 1719 Vector<const uc16> str, uint32_t hash_field); 1720 1721 template <bool is_one_byte, typename T> 1722 MUST_USE_RESULT AllocationResult 1723 AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field); 1724 1725 template <typename T> 1726 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl( 1727 T t, int chars, uint32_t hash_field); 1728 1729 // Allocates an uninitialized fixed array. It must be filled by the caller. 1730 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length); 1731 1732 // Make a copy of src and return it. Returns 1733 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. 1734 MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src); 1735 1736 // Make a copy of src, set the map, and return the copy. Returns 1737 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. 1738 MUST_USE_RESULT AllocationResult 1739 CopyFixedArrayWithMap(FixedArray* src, Map* map); 1740 1741 // Make a copy of src and return it. Returns 1742 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. 1743 MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray( 1744 FixedDoubleArray* src); 1745 1746 // Make a copy of src and return it. Returns 1747 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. 1748 MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray( 1749 ConstantPoolArray* src); 1750 1751 1752 // Computes a single character string where the character has code. 1753 // A cache is used for one-byte (Latin1) codes. 1754 MUST_USE_RESULT AllocationResult 1755 LookupSingleCharacterStringFromCode(uint16_t code); 1756 1757 // Allocate a symbol in old space. 1758 MUST_USE_RESULT AllocationResult AllocateSymbol(); 1759 1760 // Make a copy of src, set the map, and return the copy. 1761 MUST_USE_RESULT AllocationResult 1762 CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map); 1763 1764 MUST_USE_RESULT AllocationResult AllocateConstantPoolArray( 1765 const ConstantPoolArray::NumberOfEntries& small); 1766 1767 MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray( 1768 const ConstantPoolArray::NumberOfEntries& small, 1769 const ConstantPoolArray::NumberOfEntries& extended); 1770 1771 // Allocates an external array of the specified length and type. 1772 MUST_USE_RESULT AllocationResult 1773 AllocateExternalArray(int length, ExternalArrayType array_type, 1774 void* external_pointer, PretenureFlag pretenure); 1775 1776 // Allocates a fixed typed array of the specified length and type. 1777 MUST_USE_RESULT AllocationResult 1778 AllocateFixedTypedArray(int length, ExternalArrayType array_type, 1779 PretenureFlag pretenure); 1780 1781 // Make a copy of src and return it. 1782 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); 1783 1784 // Make a copy of src, set the map, and return the copy. 1785 MUST_USE_RESULT AllocationResult 1786 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); 1787 1788 // Allocates a fixed double array with uninitialized values. Returns 1789 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( 1790 int length, PretenureFlag pretenure = NOT_TENURED); 1791 1792 // These five Create*EntryStub functions are here and forced to not be inlined 1793 // because of a gcc-4.4 bug that assigns wrong vtable entries. 1794 NO_INLINE(void CreateJSEntryStub()); 1795 NO_INLINE(void CreateJSConstructEntryStub()); 1796 1797 void CreateFixedStubs(); 1798 1799 // Allocate empty fixed array. 1800 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); 1801 1802 // Allocate empty external array of given type. 1803 MUST_USE_RESULT AllocationResult 1804 AllocateEmptyExternalArray(ExternalArrayType array_type); 1805 1806 // Allocate empty fixed typed array of given type. 1807 MUST_USE_RESULT AllocationResult 1808 AllocateEmptyFixedTypedArray(ExternalArrayType array_type); 1809 1810 // Allocate empty constant pool array. 1811 MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray(); 1812 1813 // Allocate a tenured simple cell. 1814 MUST_USE_RESULT AllocationResult AllocateCell(Object* value); 1815 1816 // Allocate a tenured JS global property cell initialized with the hole. 1817 MUST_USE_RESULT AllocationResult AllocatePropertyCell(); 1818 1819 // Allocates a new utility object in the old generation. 1820 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); 1821 1822 // Allocates a new foreign object. 1823 MUST_USE_RESULT AllocationResult 1824 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); 1825 1826 MUST_USE_RESULT AllocationResult 1827 AllocateCode(int object_size, bool immovable); 1828 1829 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); 1830 1831 MUST_USE_RESULT AllocationResult InternalizeString(String* str); 1832 1833 // Performs a minor collection in new generation. 1834 void Scavenge(); 1835 1836 // Commits from space if it is uncommitted. 1837 void EnsureFromSpaceIsCommitted(); 1838 1839 // Uncommit unused semi space. 1840 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } 1841 1842 // Fill in bogus values in from space 1843 void ZapFromSpace(); 1844 1845 static String* UpdateNewSpaceReferenceInExternalStringTableEntry( 1846 Heap* heap, Object** pointer); 1847 1848 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); 1849 static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, 1850 StoreBufferEvent event); 1851 1852 // Performs a major collection in the whole heap. 1853 void MarkCompact(); 1854 1855 // Code to be run before and after mark-compact. 1856 void MarkCompactPrologue(); 1857 1858 void ProcessNativeContexts(WeakObjectRetainer* retainer); 1859 void ProcessArrayBuffers(WeakObjectRetainer* retainer); 1860 void ProcessAllocationSites(WeakObjectRetainer* retainer); 1861 1862 // Deopts all code that contains allocation instruction which are tenured or 1863 // not tenured. Moreover it clears the pretenuring allocation site statistics. 1864 void ResetAllAllocationSitesDependentCode(PretenureFlag flag); 1865 1866 // Evaluates local pretenuring for the old space and calls 1867 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in 1868 // the old space. 1869 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); 1870 1871 // Called on heap tear-down. 1872 void TearDownArrayBuffers(); 1873 1874 // Record statistics before and after garbage collection. 1875 void ReportStatisticsBeforeGC(); 1876 void ReportStatisticsAfterGC(); 1877 1878 // Slow part of scavenge object. 1879 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); 1880 1881 // Total RegExp code ever generated 1882 double total_regexp_code_generated_; 1883 1884 GCTracer tracer_; 1885 1886 // Creates and installs the full-sized number string cache. 1887 int FullSizeNumberStringCacheLength(); 1888 // Flush the number to string cache. 1889 void FlushNumberStringCache(); 1890 1891 // Sets used allocation sites entries to undefined. 1892 void FlushAllocationSitesScratchpad(); 1893 1894 // Initializes the allocation sites scratchpad with undefined values. 1895 void InitializeAllocationSitesScratchpad(); 1896 1897 // Adds an allocation site to the scratchpad if there is space left. 1898 void AddAllocationSiteToScratchpad(AllocationSite* site, 1899 ScratchpadSlotMode mode); 1900 1901 void UpdateSurvivalStatistics(int start_new_space_size); 1902 1903 static const int kYoungSurvivalRateHighThreshold = 90; 1904 static const int kYoungSurvivalRateAllowedDeviation = 15; 1905 1906 static const int kOldSurvivalRateLowThreshold = 10; 1907 1908 int high_survival_rate_period_length_; 1909 intptr_t promoted_objects_size_; 1910 double promotion_rate_; 1911 intptr_t semi_space_copied_object_size_; 1912 double semi_space_copied_rate_; 1913 int nodes_died_in_new_space_; 1914 int nodes_copied_in_new_space_; 1915 int nodes_promoted_; 1916 1917 // This is the pretenuring trigger for allocation sites that are in maybe 1918 // tenure state. When we switched to the maximum new space size we deoptimize 1919 // the code that belongs to the allocation site and derive the lifetime 1920 // of the allocation site. 1921 unsigned int maximum_size_scavenges_; 1922 1923 // TODO(hpayer): Allocation site pretenuring may make this method obsolete. 1924 // Re-visit incremental marking heuristics. 1925 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } 1926 1927 void SelectScavengingVisitorsTable(); 1928 1929 void IdleMarkCompact(const char* message); 1930 1931 void AdvanceIdleIncrementalMarking(intptr_t step_size); 1932 1933 bool WorthActivatingIncrementalMarking(); 1934 1935 void ClearObjectStats(bool clear_last_time_stats = false); 1936 1937 void set_weak_object_to_code_table(Object* value) { 1938 DCHECK(!InNewSpace(value)); 1939 weak_object_to_code_table_ = value; 1940 } 1941 1942 Object** weak_object_to_code_table_address() { 1943 return &weak_object_to_code_table_; 1944 } 1945 1946 inline void UpdateAllocationsHash(HeapObject* object); 1947 inline void UpdateAllocationsHash(uint32_t value); 1948 inline void PrintAlloctionsHash(); 1949 1950 static const int kInitialStringTableSize = 2048; 1951 static const int kInitialEvalCacheSize = 64; 1952 static const int kInitialNumberStringCacheSize = 256; 1953 1954 // Object counts and used memory by InstanceType 1955 size_t object_counts_[OBJECT_STATS_COUNT]; 1956 size_t object_counts_last_time_[OBJECT_STATS_COUNT]; 1957 size_t object_sizes_[OBJECT_STATS_COUNT]; 1958 size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; 1959 1960 // Maximum GC pause. 1961 double max_gc_pause_; 1962 1963 // Total time spent in GC. 1964 double total_gc_time_ms_; 1965 1966 // Maximum size of objects alive after GC. 1967 intptr_t max_alive_after_gc_; 1968 1969 // Minimal interval between two subsequent collections. 1970 double min_in_mutator_; 1971 1972 // Cumulative GC time spent in marking 1973 double marking_time_; 1974 1975 // Cumulative GC time spent in sweeping 1976 double sweeping_time_; 1977 1978 MarkCompactCollector mark_compact_collector_; 1979 1980 StoreBuffer store_buffer_; 1981 1982 Marking marking_; 1983 1984 IncrementalMarking incremental_marking_; 1985 1986 GCIdleTimeHandler gc_idle_time_handler_; 1987 unsigned int gc_count_at_last_idle_gc_; 1988 1989 // These two counters are monotomically increasing and never reset. 1990 size_t full_codegen_bytes_generated_; 1991 size_t crankshaft_codegen_bytes_generated_; 1992 1993 // If the --deopt_every_n_garbage_collections flag is set to a positive value, 1994 // this variable holds the number of garbage collections since the last 1995 // deoptimization triggered by garbage collection. 1996 int gcs_since_last_deopt_; 1997 1998 #ifdef VERIFY_HEAP 1999 int no_weak_object_verification_scope_depth_; 2000 #endif 2001 2002 static const int kAllocationSiteScratchpadSize = 256; 2003 int allocation_sites_scratchpad_length_; 2004 2005 static const int kMaxMarkCompactsInIdleRound = 7; 2006 static const int kIdleScavengeThreshold = 5; 2007 2008 // Shared state read by the scavenge collector and set by ScavengeObject. 2009 PromotionQueue promotion_queue_; 2010 2011 // Flag is set when the heap has been configured. The heap can be repeatedly 2012 // configured through the API until it is set up. 2013 bool configured_; 2014 2015 ExternalStringTable external_string_table_; 2016 2017 VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; 2018 2019 MemoryChunk* chunks_queued_for_free_; 2020 2021 base::Mutex relocation_mutex_; 2022 2023 int gc_callbacks_depth_; 2024 2025 friend class AlwaysAllocateScope; 2026 friend class Factory; 2027 friend class GCCallbacksScope; 2028 friend class GCTracer; 2029 friend class HeapIterator; 2030 friend class Isolate; 2031 friend class MarkCompactCollector; 2032 friend class MarkCompactMarkingVisitor; 2033 friend class MapCompact; 2034 #ifdef VERIFY_HEAP 2035 friend class NoWeakObjectVerificationScope; 2036 #endif 2037 friend class Page; 2038 2039 DISALLOW_COPY_AND_ASSIGN(Heap); 2040 }; 2041 2042 2043 class HeapStats { 2044 public: 2045 static const int kStartMarker = 0xDECADE00; 2046 static const int kEndMarker = 0xDECADE01; 2047 2048 int* start_marker; // 0 2049 int* new_space_size; // 1 2050 int* new_space_capacity; // 2 2051 intptr_t* old_pointer_space_size; // 3 2052 intptr_t* old_pointer_space_capacity; // 4 2053 intptr_t* old_data_space_size; // 5 2054 intptr_t* old_data_space_capacity; // 6 2055 intptr_t* code_space_size; // 7 2056 intptr_t* code_space_capacity; // 8 2057 intptr_t* map_space_size; // 9 2058 intptr_t* map_space_capacity; // 10 2059 intptr_t* cell_space_size; // 11 2060 intptr_t* cell_space_capacity; // 12 2061 intptr_t* lo_space_size; // 13 2062 int* global_handle_count; // 14 2063 int* weak_global_handle_count; // 15 2064 int* pending_global_handle_count; // 16 2065 int* near_death_global_handle_count; // 17 2066 int* free_global_handle_count; // 18 2067 intptr_t* memory_allocator_size; // 19 2068 intptr_t* memory_allocator_capacity; // 20 2069 int* objects_per_type; // 21 2070 int* size_per_type; // 22 2071 int* os_error; // 23 2072 int* end_marker; // 24 2073 intptr_t* property_cell_space_size; // 25 2074 intptr_t* property_cell_space_capacity; // 26 2075 }; 2076 2077 2078 class AlwaysAllocateScope { 2079 public: 2080 explicit inline AlwaysAllocateScope(Isolate* isolate); 2081 inline ~AlwaysAllocateScope(); 2082 2083 private: 2084 // Implicitly disable artificial allocation failures. 2085 Heap* heap_; 2086 DisallowAllocationFailure daf_; 2087 }; 2088 2089 2090 #ifdef VERIFY_HEAP 2091 class NoWeakObjectVerificationScope { 2092 public: 2093 inline NoWeakObjectVerificationScope(); 2094 inline ~NoWeakObjectVerificationScope(); 2095 }; 2096 #endif 2097 2098 2099 class GCCallbacksScope { 2100 public: 2101 explicit inline GCCallbacksScope(Heap* heap); 2102 inline ~GCCallbacksScope(); 2103 2104 inline bool CheckReenter(); 2105 2106 private: 2107 Heap* heap_; 2108 }; 2109 2110 2111 // Visitor class to verify interior pointers in spaces that do not contain 2112 // or care about intergenerational references. All heap object pointers have to 2113 // point into the heap to a location that has a map pointer at its first word. 2114 // Caveat: Heap::Contains is an approximation because it can return true for 2115 // objects in a heap space but above the allocation pointer. 2116 class VerifyPointersVisitor : public ObjectVisitor { 2117 public: 2118 inline void VisitPointers(Object** start, Object** end); 2119 }; 2120 2121 2122 // Verify that all objects are Smis. 2123 class VerifySmisVisitor : public ObjectVisitor { 2124 public: 2125 inline void VisitPointers(Object** start, Object** end); 2126 }; 2127 2128 2129 // Space iterator for iterating over all spaces of the heap. Returns each space 2130 // in turn, and null when it is done. 2131 class AllSpaces BASE_EMBEDDED { 2132 public: 2133 explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} 2134 Space* next(); 2135 2136 private: 2137 Heap* heap_; 2138 int counter_; 2139 }; 2140 2141 2142 // Space iterator for iterating over all old spaces of the heap: Old pointer 2143 // space, old data space and code space. Returns each space in turn, and null 2144 // when it is done. 2145 class OldSpaces BASE_EMBEDDED { 2146 public: 2147 explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} 2148 OldSpace* next(); 2149 2150 private: 2151 Heap* heap_; 2152 int counter_; 2153 }; 2154 2155 2156 // Space iterator for iterating over all the paged spaces of the heap: Map 2157 // space, old pointer space, old data space, code space and cell space. Returns 2158 // each space in turn, and null when it is done. 2159 class PagedSpaces BASE_EMBEDDED { 2160 public: 2161 explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} 2162 PagedSpace* next(); 2163 2164 private: 2165 Heap* heap_; 2166 int counter_; 2167 }; 2168 2169 2170 // Space iterator for iterating over all spaces of the heap. 2171 // For each space an object iterator is provided. The deallocation of the 2172 // returned object iterators is handled by the space iterator. 2173 class SpaceIterator : public Malloced { 2174 public: 2175 explicit SpaceIterator(Heap* heap); 2176 SpaceIterator(Heap* heap, HeapObjectCallback size_func); 2177 virtual ~SpaceIterator(); 2178 2179 bool has_next(); 2180 ObjectIterator* next(); 2181 2182 private: 2183 ObjectIterator* CreateIterator(); 2184 2185 Heap* heap_; 2186 int current_space_; // from enum AllocationSpace. 2187 ObjectIterator* iterator_; // object iterator for the current space. 2188 HeapObjectCallback size_func_; 2189 }; 2190 2191 2192 // A HeapIterator provides iteration over the whole heap. It 2193 // aggregates the specific iterators for the different spaces as 2194 // these can only iterate over one space only. 2195 // 2196 // HeapIterator ensures there is no allocation during its lifetime 2197 // (using an embedded DisallowHeapAllocation instance). 2198 // 2199 // HeapIterator can skip free list nodes (that is, de-allocated heap 2200 // objects that still remain in the heap). As implementation of free 2201 // nodes filtering uses GC marks, it can't be used during MS/MC GC 2202 // phases. Also, it is forbidden to interrupt iteration in this mode, 2203 // as this will leave heap objects marked (and thus, unusable). 2204 class HeapObjectsFilter; 2205 2206 class HeapIterator BASE_EMBEDDED { 2207 public: 2208 enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable }; 2209 2210 explicit HeapIterator(Heap* heap); 2211 HeapIterator(Heap* heap, HeapObjectsFiltering filtering); 2212 ~HeapIterator(); 2213 2214 HeapObject* next(); 2215 void reset(); 2216 2217 private: 2218 struct MakeHeapIterableHelper { 2219 explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); } 2220 }; 2221 2222 // Perform the initialization. 2223 void Init(); 2224 // Perform all necessary shutdown (destruction) work. 2225 void Shutdown(); 2226 HeapObject* NextObject(); 2227 2228 MakeHeapIterableHelper make_heap_iterable_helper_; 2229 DisallowHeapAllocation no_heap_allocation_; 2230 Heap* heap_; 2231 HeapObjectsFiltering filtering_; 2232 HeapObjectsFilter* filter_; 2233 // Space iterator for iterating all the spaces. 2234 SpaceIterator* space_iterator_; 2235 // Object iterator for the space currently being iterated. 2236 ObjectIterator* object_iterator_; 2237 }; 2238 2239 2240 // Cache for mapping (map, property name) into field offset. 2241 // Cleared at startup and prior to mark sweep collection. 2242 class KeyedLookupCache { 2243 public: 2244 // Lookup field offset for (map, name). If absent, -1 is returned. 2245 int Lookup(Handle<Map> map, Handle<Name> name); 2246 2247 // Update an element in the cache. 2248 void Update(Handle<Map> map, Handle<Name> name, int field_offset); 2249 2250 // Clear the cache. 2251 void Clear(); 2252 2253 static const int kLength = 256; 2254 static const int kCapacityMask = kLength - 1; 2255 static const int kMapHashShift = 5; 2256 static const int kHashMask = -4; // Zero the last two bits. 2257 static const int kEntriesPerBucket = 4; 2258 static const int kEntryLength = 2; 2259 static const int kMapIndex = 0; 2260 static const int kKeyIndex = 1; 2261 static const int kNotFound = -1; 2262 2263 // kEntriesPerBucket should be a power of 2. 2264 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); 2265 STATIC_ASSERT(kEntriesPerBucket == -kHashMask); 2266 2267 private: 2268 KeyedLookupCache() { 2269 for (int i = 0; i < kLength; ++i) { 2270 keys_[i].map = NULL; 2271 keys_[i].name = NULL; 2272 field_offsets_[i] = kNotFound; 2273 } 2274 } 2275 2276 static inline int Hash(Handle<Map> map, Handle<Name> name); 2277 2278 // Get the address of the keys and field_offsets arrays. Used in 2279 // generated code to perform cache lookups. 2280 Address keys_address() { return reinterpret_cast<Address>(&keys_); } 2281 2282 Address field_offsets_address() { 2283 return reinterpret_cast<Address>(&field_offsets_); 2284 } 2285 2286 struct Key { 2287 Map* map; 2288 Name* name; 2289 }; 2290 2291 Key keys_[kLength]; 2292 int field_offsets_[kLength]; 2293 2294 friend class ExternalReference; 2295 friend class Isolate; 2296 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); 2297 }; 2298 2299 2300 // Cache for mapping (map, property name) into descriptor index. 2301 // The cache contains both positive and negative results. 2302 // Descriptor index equals kNotFound means the property is absent. 2303 // Cleared at startup and prior to any gc. 2304 class DescriptorLookupCache { 2305 public: 2306 // Lookup descriptor index for (map, name). 2307 // If absent, kAbsent is returned. 2308 int Lookup(Map* source, Name* name) { 2309 if (!name->IsUniqueName()) return kAbsent; 2310 int index = Hash(source, name); 2311 Key& key = keys_[index]; 2312 if ((key.source == source) && (key.name == name)) return results_[index]; 2313 return kAbsent; 2314 } 2315 2316 // Update an element in the cache. 2317 void Update(Map* source, Name* name, int result) { 2318 DCHECK(result != kAbsent); 2319 if (name->IsUniqueName()) { 2320 int index = Hash(source, name); 2321 Key& key = keys_[index]; 2322 key.source = source; 2323 key.name = name; 2324 results_[index] = result; 2325 } 2326 } 2327 2328 // Clear the cache. 2329 void Clear(); 2330 2331 static const int kAbsent = -2; 2332 2333 private: 2334 DescriptorLookupCache() { 2335 for (int i = 0; i < kLength; ++i) { 2336 keys_[i].source = NULL; 2337 keys_[i].name = NULL; 2338 results_[i] = kAbsent; 2339 } 2340 } 2341 2342 static int Hash(Object* source, Name* name) { 2343 // Uses only lower 32 bits if pointers are larger. 2344 uint32_t source_hash = 2345 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >> 2346 kPointerSizeLog2; 2347 uint32_t name_hash = 2348 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2349 kPointerSizeLog2; 2350 return (source_hash ^ name_hash) % kLength; 2351 } 2352 2353 static const int kLength = 64; 2354 struct Key { 2355 Map* source; 2356 Name* name; 2357 }; 2358 2359 Key keys_[kLength]; 2360 int results_[kLength]; 2361 2362 friend class Isolate; 2363 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); 2364 }; 2365 2366 2367 class RegExpResultsCache { 2368 public: 2369 enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS }; 2370 2371 // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi. 2372 // On success, the returned result is guaranteed to be a COW-array. 2373 static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern, 2374 ResultsCacheType type); 2375 // Attempt to add value_array to the cache specified by type. On success, 2376 // value_array is turned into a COW-array. 2377 static void Enter(Isolate* isolate, Handle<String> key_string, 2378 Handle<Object> key_pattern, Handle<FixedArray> value_array, 2379 ResultsCacheType type); 2380 static void Clear(FixedArray* cache); 2381 static const int kRegExpResultsCacheSize = 0x100; 2382 2383 private: 2384 static const int kArrayEntriesPerCacheEntry = 4; 2385 static const int kStringOffset = 0; 2386 static const int kPatternOffset = 1; 2387 static const int kArrayOffset = 2; 2388 }; 2389 2390 2391 // Abstract base class for checking whether a weak object should be retained. 2392 class WeakObjectRetainer { 2393 public: 2394 virtual ~WeakObjectRetainer() {} 2395 2396 // Return whether this object should be retained. If NULL is returned the 2397 // object has no references. Otherwise the address of the retained object 2398 // should be returned as in some GC situations the object has been moved. 2399 virtual Object* RetainAs(Object* object) = 0; 2400 }; 2401 2402 2403 // Intrusive object marking uses least significant bit of 2404 // heap object's map word to mark objects. 2405 // Normally all map words have least significant bit set 2406 // because they contain tagged map pointer. 2407 // If the bit is not set object is marked. 2408 // All objects should be unmarked before resuming 2409 // JavaScript execution. 2410 class IntrusiveMarking { 2411 public: 2412 static bool IsMarked(HeapObject* object) { 2413 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; 2414 } 2415 2416 static void ClearMark(HeapObject* object) { 2417 uintptr_t map_word = object->map_word().ToRawValue(); 2418 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); 2419 DCHECK(!IsMarked(object)); 2420 } 2421 2422 static void SetMark(HeapObject* object) { 2423 uintptr_t map_word = object->map_word().ToRawValue(); 2424 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); 2425 DCHECK(IsMarked(object)); 2426 } 2427 2428 static Map* MapOfMarkedObject(HeapObject* object) { 2429 uintptr_t map_word = object->map_word().ToRawValue(); 2430 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); 2431 } 2432 2433 static int SizeOfMarkedObject(HeapObject* object) { 2434 return object->SizeFromMap(MapOfMarkedObject(object)); 2435 } 2436 2437 private: 2438 static const uintptr_t kNotMarkedBit = 0x1; 2439 STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT 2440 }; 2441 2442 2443 #ifdef DEBUG 2444 // Helper class for tracing paths to a search target Object from all roots. 2445 // The TracePathFrom() method can be used to trace paths from a specific 2446 // object to the search target object. 2447 class PathTracer : public ObjectVisitor { 2448 public: 2449 enum WhatToFind { 2450 FIND_ALL, // Will find all matches. 2451 FIND_FIRST // Will stop the search after first match. 2452 }; 2453 2454 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. 2455 static const int kMarkTag = 2; 2456 2457 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop 2458 // after the first match. If FIND_ALL is specified, then tracing will be 2459 // done for all matches. 2460 PathTracer(Object* search_target, WhatToFind what_to_find, 2461 VisitMode visit_mode) 2462 : search_target_(search_target), 2463 found_target_(false), 2464 found_target_in_trace_(false), 2465 what_to_find_(what_to_find), 2466 visit_mode_(visit_mode), 2467 object_stack_(20), 2468 no_allocation() {} 2469 2470 virtual void VisitPointers(Object** start, Object** end); 2471 2472 void Reset(); 2473 void TracePathFrom(Object** root); 2474 2475 bool found() const { return found_target_; } 2476 2477 static Object* const kAnyGlobalObject; 2478 2479 protected: 2480 class MarkVisitor; 2481 class UnmarkVisitor; 2482 2483 void MarkRecursively(Object** p, MarkVisitor* mark_visitor); 2484 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); 2485 virtual void ProcessResults(); 2486 2487 Object* search_target_; 2488 bool found_target_; 2489 bool found_target_in_trace_; 2490 WhatToFind what_to_find_; 2491 VisitMode visit_mode_; 2492 List<Object*> object_stack_; 2493 2494 DisallowHeapAllocation no_allocation; // i.e. no gc allowed. 2495 2496 private: 2497 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); 2498 }; 2499 #endif // DEBUG 2500 } 2501 } // namespace v8::internal 2502 2503 #endif // V8_HEAP_HEAP_H_ 2504