1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 2 // All Rights Reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are 6 // met: 7 // 8 // - Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // 11 // - Redistribution in binary form must reproduce the above copyright 12 // notice, this list of conditions and the following disclaimer in the 13 // documentation and/or other materials provided with the distribution. 14 // 15 // - Neither the name of Sun Microsystems or the names of contributors may 16 // be used to endorse or promote products derived from this software without 17 // specific prior written permission. 18 // 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 // The original source code covered by the above license above has been 32 // modified significantly by Google Inc. 33 // Copyright 2012 the V8 project authors. All rights reserved. 34 35 #ifndef V8_ASSEMBLER_H_ 36 #define V8_ASSEMBLER_H_ 37 38 #include "src/allocation.h" 39 #include "src/builtins.h" 40 #include "src/isolate.h" 41 #include "src/parsing/token.h" 42 #include "src/runtime/runtime.h" 43 44 namespace v8 { 45 46 // Forward declarations. 47 class ApiFunction; 48 49 namespace internal { 50 51 // Forward declarations. 52 class SourcePosition; 53 class StatsCounter; 54 55 // ----------------------------------------------------------------------------- 56 // Platform independent assembler base class. 57 58 enum class CodeObjectRequired { kNo, kYes }; 59 60 61 class AssemblerBase: public Malloced { 62 public: 63 AssemblerBase(Isolate* isolate, void* buffer, int buffer_size); 64 virtual ~AssemblerBase(); 65 66 Isolate* isolate() const { return isolate_; } 67 int jit_cookie() const { return jit_cookie_; } 68 69 bool emit_debug_code() const { return emit_debug_code_; } 70 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } 71 72 bool serializer_enabled() const { return serializer_enabled_; } 73 void enable_serializer() { serializer_enabled_ = true; } 74 75 bool predictable_code_size() const { return predictable_code_size_; } 76 void set_predictable_code_size(bool value) { predictable_code_size_ = value; } 77 78 uint64_t enabled_cpu_features() const { return enabled_cpu_features_; } 79 void set_enabled_cpu_features(uint64_t features) { 80 enabled_cpu_features_ = features; 81 } 82 bool IsEnabled(CpuFeature f) { 83 return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0; 84 } 85 86 bool is_constant_pool_available() const { 87 if (FLAG_enable_embedded_constant_pool) { 88 return constant_pool_available_; 89 } else { 90 // Embedded constant pool not supported on this architecture. 91 UNREACHABLE(); 92 return false; 93 } 94 } 95 96 // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for 97 // cross-snapshotting. 98 static void QuietNaN(HeapObject* nan) { } 99 100 int pc_offset() const { return static_cast<int>(pc_ - buffer_); } 101 102 // This function is called when code generation is aborted, so that 103 // the assembler could clean up internal data structures. 104 virtual void AbortedCodeGeneration() { } 105 106 // Debugging 107 void Print(); 108 109 static const int kMinimalBufferSize = 4*KB; 110 111 static void FlushICache(Isolate* isolate, void* start, size_t size); 112 113 protected: 114 // The buffer into which code and relocation info are generated. It could 115 // either be owned by the assembler or be provided externally. 116 byte* buffer_; 117 int buffer_size_; 118 bool own_buffer_; 119 120 void set_constant_pool_available(bool available) { 121 if (FLAG_enable_embedded_constant_pool) { 122 constant_pool_available_ = available; 123 } else { 124 // Embedded constant pool not supported on this architecture. 125 UNREACHABLE(); 126 } 127 } 128 129 // The program counter, which points into the buffer above and moves forward. 130 byte* pc_; 131 132 private: 133 Isolate* isolate_; 134 int jit_cookie_; 135 uint64_t enabled_cpu_features_; 136 bool emit_debug_code_; 137 bool predictable_code_size_; 138 bool serializer_enabled_; 139 140 // Indicates whether the constant pool can be accessed, which is only possible 141 // if the pp register points to the current code object's constant pool. 142 bool constant_pool_available_; 143 144 // Constant pool. 145 friend class FrameAndConstantPoolScope; 146 friend class ConstantPoolUnavailableScope; 147 }; 148 149 150 // Avoids emitting debug code during the lifetime of this scope object. 151 class DontEmitDebugCodeScope BASE_EMBEDDED { 152 public: 153 explicit DontEmitDebugCodeScope(AssemblerBase* assembler) 154 : assembler_(assembler), old_value_(assembler->emit_debug_code()) { 155 assembler_->set_emit_debug_code(false); 156 } 157 ~DontEmitDebugCodeScope() { 158 assembler_->set_emit_debug_code(old_value_); 159 } 160 private: 161 AssemblerBase* assembler_; 162 bool old_value_; 163 }; 164 165 166 // Avoids using instructions that vary in size in unpredictable ways between the 167 // snapshot and the running VM. 168 class PredictableCodeSizeScope { 169 public: 170 explicit PredictableCodeSizeScope(AssemblerBase* assembler); 171 PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size); 172 ~PredictableCodeSizeScope(); 173 void ExpectSize(int expected_size) { expected_size_ = expected_size; } 174 175 private: 176 AssemblerBase* assembler_; 177 int expected_size_; 178 int start_offset_; 179 bool old_value_; 180 }; 181 182 183 // Enable a specified feature within a scope. 184 class CpuFeatureScope BASE_EMBEDDED { 185 public: 186 #ifdef DEBUG 187 CpuFeatureScope(AssemblerBase* assembler, CpuFeature f); 188 ~CpuFeatureScope(); 189 190 private: 191 AssemblerBase* assembler_; 192 uint64_t old_enabled_; 193 #else 194 CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {} 195 #endif 196 }; 197 198 199 // CpuFeatures keeps track of which features are supported by the target CPU. 200 // Supported features must be enabled by a CpuFeatureScope before use. 201 // Example: 202 // if (assembler->IsSupported(SSE3)) { 203 // CpuFeatureScope fscope(assembler, SSE3); 204 // // Generate code containing SSE3 instructions. 205 // } else { 206 // // Generate alternative code. 207 // } 208 class CpuFeatures : public AllStatic { 209 public: 210 static void Probe(bool cross_compile) { 211 STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt); 212 if (initialized_) return; 213 initialized_ = true; 214 ProbeImpl(cross_compile); 215 } 216 217 static unsigned SupportedFeatures() { 218 Probe(false); 219 return supported_; 220 } 221 222 static bool IsSupported(CpuFeature f) { 223 return (supported_ & (1u << f)) != 0; 224 } 225 226 static inline bool SupportsCrankshaft(); 227 228 static inline unsigned cache_line_size() { 229 DCHECK(cache_line_size_ != 0); 230 return cache_line_size_; 231 } 232 233 static void PrintTarget(); 234 static void PrintFeatures(); 235 236 private: 237 friend class ExternalReference; 238 friend class AssemblerBase; 239 // Flush instruction cache. 240 static void FlushICache(void* start, size_t size); 241 242 // Platform-dependent implementation. 243 static void ProbeImpl(bool cross_compile); 244 245 static unsigned supported_; 246 static unsigned cache_line_size_; 247 static bool initialized_; 248 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); 249 }; 250 251 252 // ----------------------------------------------------------------------------- 253 // Labels represent pc locations; they are typically jump or call targets. 254 // After declaration, a label can be freely used to denote known or (yet) 255 // unknown pc location. Assembler::bind() is used to bind a label to the 256 // current pc. A label can be bound only once. 257 258 class Label { 259 public: 260 enum Distance { 261 kNear, kFar 262 }; 263 264 INLINE(Label()) { 265 Unuse(); 266 UnuseNear(); 267 } 268 269 INLINE(~Label()) { 270 DCHECK(!is_linked()); 271 DCHECK(!is_near_linked()); 272 } 273 274 INLINE(void Unuse()) { pos_ = 0; } 275 INLINE(void UnuseNear()) { near_link_pos_ = 0; } 276 277 INLINE(bool is_bound() const) { return pos_ < 0; } 278 INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; } 279 INLINE(bool is_linked() const) { return pos_ > 0; } 280 INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; } 281 282 // Returns the position of bound or linked labels. Cannot be used 283 // for unused labels. 284 int pos() const; 285 int near_link_pos() const { return near_link_pos_ - 1; } 286 287 private: 288 // pos_ encodes both the binding state (via its sign) 289 // and the binding position (via its value) of a label. 290 // 291 // pos_ < 0 bound label, pos() returns the jump target position 292 // pos_ == 0 unused label 293 // pos_ > 0 linked label, pos() returns the last reference position 294 int pos_; 295 296 // Behaves like |pos_| in the "> 0" case, but for near jumps to this label. 297 int near_link_pos_; 298 299 void bind_to(int pos) { 300 pos_ = -pos - 1; 301 DCHECK(is_bound()); 302 } 303 void link_to(int pos, Distance distance = kFar) { 304 if (distance == kNear) { 305 near_link_pos_ = pos + 1; 306 DCHECK(is_near_linked()); 307 } else { 308 pos_ = pos + 1; 309 DCHECK(is_linked()); 310 } 311 } 312 313 friend class Assembler; 314 friend class Displacement; 315 friend class RegExpMacroAssemblerIrregexp; 316 317 #if V8_TARGET_ARCH_ARM64 318 // On ARM64, the Assembler keeps track of pointers to Labels to resolve 319 // branches to distant targets. Copying labels would confuse the Assembler. 320 DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT 321 #endif 322 }; 323 324 325 enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; 326 327 enum ArgvMode { kArgvOnStack, kArgvInRegister }; 328 329 // Specifies whether to perform icache flush operations on RelocInfo updates. 330 // If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an 331 // instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be 332 // skipped (only use this if you will flush the icache manually before it is 333 // executed). 334 enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH }; 335 336 // ----------------------------------------------------------------------------- 337 // Relocation information 338 339 340 // Relocation information consists of the address (pc) of the datum 341 // to which the relocation information applies, the relocation mode 342 // (rmode), and an optional data field. The relocation mode may be 343 // "descriptive" and not indicate a need for relocation, but simply 344 // describe a property of the datum. Such rmodes are useful for GC 345 // and nice disassembly output. 346 347 class RelocInfo { 348 public: 349 // The constant kNoPosition is used with the collecting of source positions 350 // in the relocation information. Two types of source positions are collected 351 // "position" (RelocMode position) and "statement position" (RelocMode 352 // statement_position). The "position" is collected at places in the source 353 // code which are of interest when making stack traces to pin-point the source 354 // location of a stack frame as close as possible. The "statement position" is 355 // collected at the beginning at each statement, and is used to indicate 356 // possible break locations. kNoPosition is used to indicate an 357 // invalid/uninitialized position value. 358 static const int kNoPosition = -1; 359 360 // This string is used to add padding comments to the reloc info in cases 361 // where we are not sure to have enough space for patching in during 362 // lazy deoptimization. This is the case if we have indirect calls for which 363 // we do not normally record relocation info. 364 static const char* const kFillerCommentString; 365 366 // The minimum size of a comment is equal to two bytes for the extra tagged 367 // pc and kPointerSize for the actual pointer to the comment. 368 static const int kMinRelocCommentSize = 2 + kPointerSize; 369 370 // The maximum size for a call instruction including pc-jump. 371 static const int kMaxCallSize = 6; 372 373 // The maximum pc delta that will use the short encoding. 374 static const int kMaxSmallPCDelta; 375 376 enum Mode { 377 // Please note the order is important (see IsCodeTarget, IsGCRelocMode). 378 CODE_TARGET, // Code target which is not any of the above. 379 CODE_TARGET_WITH_ID, 380 DEBUGGER_STATEMENT, // Code target for the debugger statement. 381 EMBEDDED_OBJECT, 382 CELL, 383 384 // Everything after runtime_entry (inclusive) is not GC'ed. 385 RUNTIME_ENTRY, 386 COMMENT, 387 POSITION, // See comment for kNoPosition above. 388 STATEMENT_POSITION, // See comment for kNoPosition above. 389 390 // Additional code inserted for debug break slot. 391 DEBUG_BREAK_SLOT_AT_POSITION, 392 DEBUG_BREAK_SLOT_AT_RETURN, 393 DEBUG_BREAK_SLOT_AT_CALL, 394 395 EXTERNAL_REFERENCE, // The address of an external C++ function. 396 INTERNAL_REFERENCE, // An address inside the same function. 397 398 // Encoded internal reference, used only on MIPS, MIPS64 and PPC. 399 INTERNAL_REFERENCE_ENCODED, 400 401 // Continuation points for a generator yield. 402 GENERATOR_CONTINUATION, 403 404 // Marks constant and veneer pools. Only used on ARM and ARM64. 405 // They use a custom noncompact encoding. 406 CONST_POOL, 407 VENEER_POOL, 408 409 DEOPT_REASON, // Deoptimization reason index. 410 411 // This is not an actual reloc mode, but used to encode a long pc jump that 412 // cannot be encoded as part of another record. 413 PC_JUMP, 414 415 // Pseudo-types 416 NUMBER_OF_MODES, 417 NONE32, // never recorded 32-bit value 418 NONE64, // never recorded 64-bit value 419 CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by 420 // code aging. 421 422 FIRST_REAL_RELOC_MODE = CODE_TARGET, 423 LAST_REAL_RELOC_MODE = VENEER_POOL, 424 LAST_CODE_ENUM = DEBUGGER_STATEMENT, 425 LAST_GCED_ENUM = CELL, 426 }; 427 428 STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt); 429 430 explicit RelocInfo(Isolate* isolate) : isolate_(isolate) { 431 DCHECK_NOT_NULL(isolate); 432 } 433 434 RelocInfo(Isolate* isolate, byte* pc, Mode rmode, intptr_t data, Code* host) 435 : isolate_(isolate), pc_(pc), rmode_(rmode), data_(data), host_(host) { 436 DCHECK_NOT_NULL(isolate); 437 } 438 439 static inline bool IsRealRelocMode(Mode mode) { 440 return mode >= FIRST_REAL_RELOC_MODE && 441 mode <= LAST_REAL_RELOC_MODE; 442 } 443 static inline bool IsCodeTarget(Mode mode) { 444 return mode <= LAST_CODE_ENUM; 445 } 446 static inline bool IsEmbeddedObject(Mode mode) { 447 return mode == EMBEDDED_OBJECT; 448 } 449 static inline bool IsCell(Mode mode) { return mode == CELL; } 450 static inline bool IsRuntimeEntry(Mode mode) { 451 return mode == RUNTIME_ENTRY; 452 } 453 // Is the relocation mode affected by GC? 454 static inline bool IsGCRelocMode(Mode mode) { 455 return mode <= LAST_GCED_ENUM; 456 } 457 static inline bool IsComment(Mode mode) { 458 return mode == COMMENT; 459 } 460 static inline bool IsConstPool(Mode mode) { 461 return mode == CONST_POOL; 462 } 463 static inline bool IsVeneerPool(Mode mode) { 464 return mode == VENEER_POOL; 465 } 466 static inline bool IsDeoptReason(Mode mode) { 467 return mode == DEOPT_REASON; 468 } 469 static inline bool IsPosition(Mode mode) { 470 return mode == POSITION || mode == STATEMENT_POSITION; 471 } 472 static inline bool IsStatementPosition(Mode mode) { 473 return mode == STATEMENT_POSITION; 474 } 475 static inline bool IsExternalReference(Mode mode) { 476 return mode == EXTERNAL_REFERENCE; 477 } 478 static inline bool IsInternalReference(Mode mode) { 479 return mode == INTERNAL_REFERENCE; 480 } 481 static inline bool IsInternalReferenceEncoded(Mode mode) { 482 return mode == INTERNAL_REFERENCE_ENCODED; 483 } 484 static inline bool IsDebugBreakSlot(Mode mode) { 485 return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) || 486 IsDebugBreakSlotAtCall(mode); 487 } 488 static inline bool IsDebugBreakSlotAtPosition(Mode mode) { 489 return mode == DEBUG_BREAK_SLOT_AT_POSITION; 490 } 491 static inline bool IsDebugBreakSlotAtReturn(Mode mode) { 492 return mode == DEBUG_BREAK_SLOT_AT_RETURN; 493 } 494 static inline bool IsDebugBreakSlotAtCall(Mode mode) { 495 return mode == DEBUG_BREAK_SLOT_AT_CALL; 496 } 497 static inline bool IsDebuggerStatement(Mode mode) { 498 return mode == DEBUGGER_STATEMENT; 499 } 500 static inline bool IsNone(Mode mode) { 501 return mode == NONE32 || mode == NONE64; 502 } 503 static inline bool IsCodeAgeSequence(Mode mode) { 504 return mode == CODE_AGE_SEQUENCE; 505 } 506 static inline bool IsGeneratorContinuation(Mode mode) { 507 return mode == GENERATOR_CONTINUATION; 508 } 509 static inline int ModeMask(Mode mode) { return 1 << mode; } 510 511 // Accessors 512 Isolate* isolate() const { return isolate_; } 513 byte* pc() const { return pc_; } 514 void set_pc(byte* pc) { pc_ = pc; } 515 Mode rmode() const { return rmode_; } 516 intptr_t data() const { return data_; } 517 Code* host() const { return host_; } 518 void set_host(Code* host) { host_ = host; } 519 520 // Apply a relocation by delta bytes. When the code object is moved, PC 521 // relative addresses have to be updated as well as absolute addresses 522 // inside the code (internal references). 523 // Do not forget to flush the icache afterwards! 524 INLINE(void apply(intptr_t delta)); 525 526 // Is the pointer this relocation info refers to coded like a plain pointer 527 // or is it strange in some way (e.g. relative or patched into a series of 528 // instructions). 529 bool IsCodedSpecially(); 530 531 // If true, the pointer this relocation info refers to is an entry in the 532 // constant pool, otherwise the pointer is embedded in the instruction stream. 533 bool IsInConstantPool(); 534 535 // this relocation applies to; 536 // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) 537 INLINE(Address target_address()); 538 INLINE(void set_target_address(Address target, 539 WriteBarrierMode write_barrier_mode = 540 UPDATE_WRITE_BARRIER, 541 ICacheFlushMode icache_flush_mode = 542 FLUSH_ICACHE_IF_NEEDED)); 543 INLINE(Object* target_object()); 544 INLINE(Handle<Object> target_object_handle(Assembler* origin)); 545 INLINE(void set_target_object(Object* target, 546 WriteBarrierMode write_barrier_mode = 547 UPDATE_WRITE_BARRIER, 548 ICacheFlushMode icache_flush_mode = 549 FLUSH_ICACHE_IF_NEEDED)); 550 INLINE(Address target_runtime_entry(Assembler* origin)); 551 INLINE(void set_target_runtime_entry(Address target, 552 WriteBarrierMode write_barrier_mode = 553 UPDATE_WRITE_BARRIER, 554 ICacheFlushMode icache_flush_mode = 555 FLUSH_ICACHE_IF_NEEDED)); 556 INLINE(Cell* target_cell()); 557 INLINE(Handle<Cell> target_cell_handle()); 558 INLINE(void set_target_cell(Cell* cell, 559 WriteBarrierMode write_barrier_mode = 560 UPDATE_WRITE_BARRIER, 561 ICacheFlushMode icache_flush_mode = 562 FLUSH_ICACHE_IF_NEEDED)); 563 INLINE(Handle<Object> code_age_stub_handle(Assembler* origin)); 564 INLINE(Code* code_age_stub()); 565 INLINE(void set_code_age_stub(Code* stub, 566 ICacheFlushMode icache_flush_mode = 567 FLUSH_ICACHE_IF_NEEDED)); 568 569 // Returns the address of the constant pool entry where the target address 570 // is held. This should only be called if IsInConstantPool returns true. 571 INLINE(Address constant_pool_entry_address()); 572 573 // Read the address of the word containing the target_address in an 574 // instruction stream. What this means exactly is architecture-independent. 575 // The only architecture-independent user of this function is the serializer. 576 // The serializer uses it to find out how many raw bytes of instruction to 577 // output before the next target. Architecture-independent code shouldn't 578 // dereference the pointer it gets back from this. 579 INLINE(Address target_address_address()); 580 581 // This indicates how much space a target takes up when deserializing a code 582 // stream. For most architectures this is just the size of a pointer. For 583 // an instruction like movw/movt where the target bits are mixed into the 584 // instruction bits the size of the target will be zero, indicating that the 585 // serializer should not step forwards in memory after a target is resolved 586 // and written. In this case the target_address_address function above 587 // should return the end of the instructions to be patched, allowing the 588 // deserializer to deserialize the instructions as raw bytes and put them in 589 // place, ready to be patched with the target. 590 INLINE(int target_address_size()); 591 592 // Read the reference in the instruction this relocation 593 // applies to; can only be called if rmode_ is EXTERNAL_REFERENCE. 594 INLINE(Address target_external_reference()); 595 596 // Read the reference in the instruction this relocation 597 // applies to; can only be called if rmode_ is INTERNAL_REFERENCE. 598 INLINE(Address target_internal_reference()); 599 600 // Return the reference address this relocation applies to; 601 // can only be called if rmode_ is INTERNAL_REFERENCE. 602 INLINE(Address target_internal_reference_address()); 603 604 // Read/modify the address of a call instruction. This is used to relocate 605 // the break points where straight-line code is patched with a call 606 // instruction. 607 INLINE(Address debug_call_address()); 608 INLINE(void set_debug_call_address(Address target)); 609 610 // Wipe out a relocation to a fixed value, used for making snapshots 611 // reproducible. 612 INLINE(void WipeOut()); 613 614 template<typename StaticVisitor> inline void Visit(Heap* heap); 615 inline void Visit(Isolate* isolate, ObjectVisitor* v); 616 617 // Check whether this return sequence has been patched 618 // with a call to the debugger. 619 INLINE(bool IsPatchedReturnSequence()); 620 621 // Check whether this debug break slot has been patched with a call to the 622 // debugger. 623 INLINE(bool IsPatchedDebugBreakSlotSequence()); 624 625 #ifdef DEBUG 626 // Check whether the given code contains relocation information that 627 // either is position-relative or movable by the garbage collector. 628 static bool RequiresRelocation(const CodeDesc& desc); 629 #endif 630 631 #ifdef ENABLE_DISASSEMBLER 632 // Printing 633 static const char* RelocModeName(Mode rmode); 634 void Print(Isolate* isolate, std::ostream& os); // NOLINT 635 #endif // ENABLE_DISASSEMBLER 636 #ifdef VERIFY_HEAP 637 void Verify(Isolate* isolate); 638 #endif 639 640 static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1; 641 static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION; 642 static const int kDataMask = 643 (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT); 644 static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION | 645 1 << DEBUG_BREAK_SLOT_AT_RETURN | 646 1 << DEBUG_BREAK_SLOT_AT_CALL; 647 static const int kApplyMask; // Modes affected by apply. Depends on arch. 648 649 private: 650 Isolate* isolate_; 651 // On ARM, note that pc_ is the address of the constant pool entry 652 // to be relocated and not the address of the instruction 653 // referencing the constant pool entry (except when rmode_ == 654 // comment). 655 byte* pc_; 656 Mode rmode_; 657 intptr_t data_; 658 Code* host_; 659 friend class RelocIterator; 660 }; 661 662 663 // RelocInfoWriter serializes a stream of relocation info. It writes towards 664 // lower addresses. 665 class RelocInfoWriter BASE_EMBEDDED { 666 public: 667 RelocInfoWriter() 668 : pos_(NULL), 669 last_pc_(NULL), 670 last_id_(0), 671 last_position_(0), 672 last_mode_(RelocInfo::NUMBER_OF_MODES), 673 next_position_candidate_pos_delta_(0), 674 next_position_candidate_pc_delta_(0), 675 next_position_candidate_flushed_(true) {} 676 RelocInfoWriter(byte* pos, byte* pc) 677 : pos_(pos), 678 last_pc_(pc), 679 last_id_(0), 680 last_position_(0), 681 last_mode_(RelocInfo::NUMBER_OF_MODES), 682 next_position_candidate_pos_delta_(0), 683 next_position_candidate_pc_delta_(0), 684 next_position_candidate_flushed_(true) {} 685 686 byte* pos() const { return pos_; } 687 byte* last_pc() const { return last_pc_; } 688 689 void Write(const RelocInfo* rinfo); 690 691 // Update the state of the stream after reloc info buffer 692 // and/or code is moved while the stream is active. 693 void Reposition(byte* pos, byte* pc) { 694 pos_ = pos; 695 last_pc_ = pc; 696 } 697 698 void Finish() { FlushPosition(); } 699 700 // Max size (bytes) of a written RelocInfo. Longest encoding is 701 // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta. 702 // On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11. 703 // On x64 this is 1 + 4 + 1 + 1 + 8 == 15; 704 // Here we use the maximum of the two. 705 static const int kMaxSize = 15; 706 707 private: 708 inline uint32_t WriteLongPCJump(uint32_t pc_delta); 709 710 inline void WriteShortTaggedPC(uint32_t pc_delta, int tag); 711 inline void WriteShortTaggedData(intptr_t data_delta, int tag); 712 713 inline void WriteMode(RelocInfo::Mode rmode); 714 inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode); 715 inline void WriteIntData(int data_delta); 716 inline void WriteData(intptr_t data_delta); 717 inline void WritePosition(int pc_delta, int pos_delta, RelocInfo::Mode rmode); 718 719 void FlushPosition(); 720 721 byte* pos_; 722 byte* last_pc_; 723 int last_id_; 724 int last_position_; 725 RelocInfo::Mode last_mode_; 726 int next_position_candidate_pos_delta_; 727 uint32_t next_position_candidate_pc_delta_; 728 bool next_position_candidate_flushed_; 729 730 DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter); 731 }; 732 733 734 // A RelocIterator iterates over relocation information. 735 // Typical use: 736 // 737 // for (RelocIterator it(code); !it.done(); it.next()) { 738 // // do something with it.rinfo() here 739 // } 740 // 741 // A mask can be specified to skip unwanted modes. 742 class RelocIterator: public Malloced { 743 public: 744 // Create a new iterator positioned at 745 // the beginning of the reloc info. 746 // Relocation information with mode k is included in the 747 // iteration iff bit k of mode_mask is set. 748 explicit RelocIterator(Code* code, int mode_mask = -1); 749 explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1); 750 751 // Iteration 752 bool done() const { return done_; } 753 void next(); 754 755 // Return pointer valid until next next(). 756 RelocInfo* rinfo() { 757 DCHECK(!done()); 758 return &rinfo_; 759 } 760 761 private: 762 // Advance* moves the position before/after reading. 763 // *Read* reads from current byte(s) into rinfo_. 764 // *Get* just reads and returns info on current byte. 765 void Advance(int bytes = 1) { pos_ -= bytes; } 766 int AdvanceGetTag(); 767 RelocInfo::Mode GetMode(); 768 769 void AdvanceReadLongPCJump(); 770 771 int GetShortDataTypeTag(); 772 void ReadShortTaggedPC(); 773 void ReadShortTaggedId(); 774 void ReadShortTaggedPosition(); 775 void ReadShortTaggedData(); 776 777 void AdvanceReadPC(); 778 void AdvanceReadId(); 779 void AdvanceReadInt(); 780 void AdvanceReadPosition(); 781 void AdvanceReadData(); 782 783 // If the given mode is wanted, set it in rinfo_ and return true. 784 // Else return false. Used for efficiently skipping unwanted modes. 785 bool SetMode(RelocInfo::Mode mode) { 786 return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false; 787 } 788 789 byte* pos_; 790 byte* end_; 791 byte* code_age_sequence_; 792 RelocInfo rinfo_; 793 bool done_; 794 int mode_mask_; 795 int last_id_; 796 int last_position_; 797 DISALLOW_COPY_AND_ASSIGN(RelocIterator); 798 }; 799 800 801 //------------------------------------------------------------------------------ 802 // External function 803 804 //---------------------------------------------------------------------------- 805 class SCTableReference; 806 class Debug_Address; 807 808 809 // An ExternalReference represents a C++ address used in the generated 810 // code. All references to C++ functions and variables must be encapsulated in 811 // an ExternalReference instance. This is done in order to track the origin of 812 // all external references in the code so that they can be bound to the correct 813 // addresses when deserializing a heap. 814 class ExternalReference BASE_EMBEDDED { 815 public: 816 // Used in the simulator to support different native api calls. 817 enum Type { 818 // Builtin call. 819 // Object* f(v8::internal::Arguments). 820 BUILTIN_CALL, // default 821 822 // Builtin that takes float arguments and returns an int. 823 // int f(double, double). 824 BUILTIN_COMPARE_CALL, 825 826 // Builtin call that returns floating point. 827 // double f(double, double). 828 BUILTIN_FP_FP_CALL, 829 830 // Builtin call that returns floating point. 831 // double f(double). 832 BUILTIN_FP_CALL, 833 834 // Builtin call that returns floating point. 835 // double f(double, int). 836 BUILTIN_FP_INT_CALL, 837 838 // Direct call to API function callback. 839 // void f(v8::FunctionCallbackInfo&) 840 DIRECT_API_CALL, 841 842 // Call to function callback via InvokeFunctionCallback. 843 // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback) 844 PROFILING_API_CALL, 845 846 // Direct call to accessor getter callback. 847 // void f(Local<Name> property, PropertyCallbackInfo& info) 848 DIRECT_GETTER_CALL, 849 850 // Call to accessor getter callback via InvokeAccessorGetterCallback. 851 // void f(Local<Name> property, PropertyCallbackInfo& info, 852 // AccessorNameGetterCallback callback) 853 PROFILING_GETTER_CALL 854 }; 855 856 static void SetUp(); 857 static void InitializeMathExpData(); 858 static void TearDownMathExpData(); 859 860 typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original, 861 Type type); 862 863 ExternalReference() : address_(NULL) {} 864 865 ExternalReference(Builtins::CFunctionId id, Isolate* isolate); 866 867 ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate); 868 869 ExternalReference(Builtins::Name name, Isolate* isolate); 870 871 ExternalReference(Runtime::FunctionId id, Isolate* isolate); 872 873 ExternalReference(const Runtime::Function* f, Isolate* isolate); 874 875 explicit ExternalReference(StatsCounter* counter); 876 877 ExternalReference(Isolate::AddressId id, Isolate* isolate); 878 879 explicit ExternalReference(const SCTableReference& table_ref); 880 881 // Isolate as an external reference. 882 static ExternalReference isolate_address(Isolate* isolate); 883 884 // One-of-a-kind references. These references are not part of a general 885 // pattern. This means that they have to be added to the 886 // ExternalReferenceTable in serialize.cc manually. 887 888 static ExternalReference incremental_marking_record_write_function( 889 Isolate* isolate); 890 static ExternalReference store_buffer_overflow_function( 891 Isolate* isolate); 892 static ExternalReference delete_handle_scope_extensions(Isolate* isolate); 893 894 static ExternalReference get_date_field_function(Isolate* isolate); 895 static ExternalReference date_cache_stamp(Isolate* isolate); 896 897 static ExternalReference get_make_code_young_function(Isolate* isolate); 898 static ExternalReference get_mark_code_as_executed_function(Isolate* isolate); 899 900 // Deoptimization support. 901 static ExternalReference new_deoptimizer_function(Isolate* isolate); 902 static ExternalReference compute_output_frames_function(Isolate* isolate); 903 904 // Log support. 905 static ExternalReference log_enter_external_function(Isolate* isolate); 906 static ExternalReference log_leave_external_function(Isolate* isolate); 907 908 // Static data in the keyed lookup cache. 909 static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); 910 static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); 911 912 // Static variable Heap::roots_array_start() 913 static ExternalReference roots_array_start(Isolate* isolate); 914 915 // Static variable Heap::allocation_sites_list_address() 916 static ExternalReference allocation_sites_list_address(Isolate* isolate); 917 918 // Static variable StackGuard::address_of_jslimit() 919 static ExternalReference address_of_stack_limit(Isolate* isolate); 920 921 // Static variable StackGuard::address_of_real_jslimit() 922 static ExternalReference address_of_real_stack_limit(Isolate* isolate); 923 924 // Static variable RegExpStack::limit_address() 925 static ExternalReference address_of_regexp_stack_limit(Isolate* isolate); 926 927 // Static variables for RegExp. 928 static ExternalReference address_of_static_offsets_vector(Isolate* isolate); 929 static ExternalReference address_of_regexp_stack_memory_address( 930 Isolate* isolate); 931 static ExternalReference address_of_regexp_stack_memory_size( 932 Isolate* isolate); 933 934 // Static variable Heap::NewSpaceStart() 935 static ExternalReference new_space_start(Isolate* isolate); 936 static ExternalReference new_space_mask(Isolate* isolate); 937 938 // Write barrier. 939 static ExternalReference store_buffer_top(Isolate* isolate); 940 941 // Used for fast allocation in generated code. 942 static ExternalReference new_space_allocation_top_address(Isolate* isolate); 943 static ExternalReference new_space_allocation_limit_address(Isolate* isolate); 944 static ExternalReference old_space_allocation_top_address(Isolate* isolate); 945 static ExternalReference old_space_allocation_limit_address(Isolate* isolate); 946 947 static ExternalReference mod_two_doubles_operation(Isolate* isolate); 948 static ExternalReference power_double_double_function(Isolate* isolate); 949 static ExternalReference power_double_int_function(Isolate* isolate); 950 951 static ExternalReference handle_scope_next_address(Isolate* isolate); 952 static ExternalReference handle_scope_limit_address(Isolate* isolate); 953 static ExternalReference handle_scope_level_address(Isolate* isolate); 954 955 static ExternalReference scheduled_exception_address(Isolate* isolate); 956 static ExternalReference address_of_pending_message_obj(Isolate* isolate); 957 958 // Static variables containing common double constants. 959 static ExternalReference address_of_min_int(); 960 static ExternalReference address_of_one_half(); 961 static ExternalReference address_of_minus_one_half(); 962 static ExternalReference address_of_negative_infinity(); 963 static ExternalReference address_of_the_hole_nan(); 964 static ExternalReference address_of_uint32_bias(); 965 966 static ExternalReference math_log_double_function(Isolate* isolate); 967 968 static ExternalReference math_exp_constants(int constant_index); 969 static ExternalReference math_exp_log_table(); 970 971 static ExternalReference page_flags(Page* page); 972 973 static ExternalReference ForDeoptEntry(Address entry); 974 975 static ExternalReference cpu_features(); 976 977 static ExternalReference debug_is_active_address(Isolate* isolate); 978 static ExternalReference debug_after_break_target_address(Isolate* isolate); 979 980 static ExternalReference is_profiling_address(Isolate* isolate); 981 static ExternalReference invoke_function_callback(Isolate* isolate); 982 static ExternalReference invoke_accessor_getter_callback(Isolate* isolate); 983 984 static ExternalReference virtual_handler_register(Isolate* isolate); 985 static ExternalReference virtual_slot_register(Isolate* isolate); 986 987 static ExternalReference runtime_function_table_address(Isolate* isolate); 988 989 Address address() const { return reinterpret_cast<Address>(address_); } 990 991 // Used to check if single stepping is enabled in generated code. 992 static ExternalReference debug_step_in_enabled_address(Isolate* isolate); 993 994 #ifndef V8_INTERPRETED_REGEXP 995 // C functions called from RegExp generated code. 996 997 // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16() 998 static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate); 999 1000 // Function RegExpMacroAssembler*::CheckStackGuardState() 1001 static ExternalReference re_check_stack_guard_state(Isolate* isolate); 1002 1003 // Function NativeRegExpMacroAssembler::GrowStack() 1004 static ExternalReference re_grow_stack(Isolate* isolate); 1005 1006 // byte NativeRegExpMacroAssembler::word_character_bitmap 1007 static ExternalReference re_word_character_map(); 1008 1009 #endif 1010 1011 // This lets you register a function that rewrites all external references. 1012 // Used by the ARM simulator to catch calls to external references. 1013 static void set_redirector(Isolate* isolate, 1014 ExternalReferenceRedirector* redirector) { 1015 // We can't stack them. 1016 DCHECK(isolate->external_reference_redirector() == NULL); 1017 isolate->set_external_reference_redirector( 1018 reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector)); 1019 } 1020 1021 static ExternalReference stress_deopt_count(Isolate* isolate); 1022 1023 static ExternalReference fixed_typed_array_base_data_offset(); 1024 1025 private: 1026 explicit ExternalReference(void* address) 1027 : address_(address) {} 1028 1029 static void* Redirect(Isolate* isolate, 1030 Address address_arg, 1031 Type type = ExternalReference::BUILTIN_CALL) { 1032 ExternalReferenceRedirector* redirector = 1033 reinterpret_cast<ExternalReferenceRedirector*>( 1034 isolate->external_reference_redirector()); 1035 void* address = reinterpret_cast<void*>(address_arg); 1036 void* answer = 1037 (redirector == NULL) ? address : (*redirector)(isolate, address, type); 1038 return answer; 1039 } 1040 1041 void* address_; 1042 }; 1043 1044 bool operator==(ExternalReference, ExternalReference); 1045 bool operator!=(ExternalReference, ExternalReference); 1046 1047 size_t hash_value(ExternalReference); 1048 1049 std::ostream& operator<<(std::ostream&, ExternalReference); 1050 1051 1052 // ----------------------------------------------------------------------------- 1053 // Position recording support 1054 1055 struct PositionState { 1056 PositionState() : current_position(RelocInfo::kNoPosition), 1057 written_position(RelocInfo::kNoPosition), 1058 current_statement_position(RelocInfo::kNoPosition), 1059 written_statement_position(RelocInfo::kNoPosition) {} 1060 1061 int current_position; 1062 int written_position; 1063 1064 int current_statement_position; 1065 int written_statement_position; 1066 }; 1067 1068 1069 class PositionsRecorder BASE_EMBEDDED { 1070 public: 1071 explicit PositionsRecorder(Assembler* assembler) 1072 : assembler_(assembler) { 1073 jit_handler_data_ = NULL; 1074 } 1075 1076 void AttachJITHandlerData(void* user_data) { 1077 jit_handler_data_ = user_data; 1078 } 1079 1080 void* DetachJITHandlerData() { 1081 void* old_data = jit_handler_data_; 1082 jit_handler_data_ = NULL; 1083 return old_data; 1084 } 1085 // Set current position to pos. 1086 void RecordPosition(int pos); 1087 1088 // Set current statement position to pos. 1089 void RecordStatementPosition(int pos); 1090 1091 // Write recorded positions to relocation information. 1092 bool WriteRecordedPositions(); 1093 1094 int current_position() const { return state_.current_position; } 1095 1096 int current_statement_position() const { 1097 return state_.current_statement_position; 1098 } 1099 1100 private: 1101 Assembler* assembler_; 1102 PositionState state_; 1103 1104 // Currently jit_handler_data_ is used to store JITHandler-specific data 1105 // over the lifetime of a PositionsRecorder 1106 void* jit_handler_data_; 1107 1108 DISALLOW_COPY_AND_ASSIGN(PositionsRecorder); 1109 }; 1110 1111 1112 // ----------------------------------------------------------------------------- 1113 // Utility functions 1114 1115 inline int NumberOfBitsSet(uint32_t x) { 1116 unsigned int num_bits_set; 1117 for (num_bits_set = 0; x; x >>= 1) { 1118 num_bits_set += x & 1; 1119 } 1120 return num_bits_set; 1121 } 1122 1123 bool EvalComparison(Token::Value op, double op1, double op2); 1124 1125 // Computes pow(x, y) with the special cases in the spec for Math.pow. 1126 double power_helper(Isolate* isolate, double x, double y); 1127 double power_double_int(double x, int y); 1128 double power_double_double(double x, double y); 1129 1130 // Helper class for generating code or data associated with the code 1131 // right after a call instruction. As an example this can be used to 1132 // generate safepoint data after calls for crankshaft. 1133 class CallWrapper { 1134 public: 1135 CallWrapper() { } 1136 virtual ~CallWrapper() { } 1137 // Called just before emitting a call. Argument is the size of the generated 1138 // call code. 1139 virtual void BeforeCall(int call_size) const = 0; 1140 // Called just after emitting a call, i.e., at the return site for the call. 1141 virtual void AfterCall() const = 0; 1142 // Return whether call needs to check for debug stepping. 1143 virtual bool NeedsDebugStepCheck() const { return false; } 1144 }; 1145 1146 1147 class NullCallWrapper : public CallWrapper { 1148 public: 1149 NullCallWrapper() { } 1150 virtual ~NullCallWrapper() { } 1151 virtual void BeforeCall(int call_size) const { } 1152 virtual void AfterCall() const { } 1153 }; 1154 1155 1156 class CheckDebugStepCallWrapper : public CallWrapper { 1157 public: 1158 CheckDebugStepCallWrapper() {} 1159 virtual ~CheckDebugStepCallWrapper() {} 1160 virtual void BeforeCall(int call_size) const {} 1161 virtual void AfterCall() const {} 1162 virtual bool NeedsDebugStepCheck() const { return true; } 1163 }; 1164 1165 1166 // ----------------------------------------------------------------------------- 1167 // Constant pool support 1168 1169 class ConstantPoolEntry { 1170 public: 1171 ConstantPoolEntry() {} 1172 ConstantPoolEntry(int position, intptr_t value, bool sharing_ok) 1173 : position_(position), 1174 merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED), 1175 value_(value) {} 1176 ConstantPoolEntry(int position, double value) 1177 : position_(position), merged_index_(SHARING_ALLOWED), value64_(value) {} 1178 1179 int position() const { return position_; } 1180 bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; } 1181 bool is_merged() const { return merged_index_ >= 0; } 1182 int merged_index(void) const { 1183 DCHECK(is_merged()); 1184 return merged_index_; 1185 } 1186 void set_merged_index(int index) { 1187 merged_index_ = index; 1188 DCHECK(is_merged()); 1189 } 1190 int offset(void) const { 1191 DCHECK(merged_index_ >= 0); 1192 return merged_index_; 1193 } 1194 void set_offset(int offset) { 1195 DCHECK(offset >= 0); 1196 merged_index_ = offset; 1197 } 1198 intptr_t value() const { return value_; } 1199 uint64_t value64() const { return bit_cast<uint64_t>(value64_); } 1200 1201 enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES }; 1202 1203 static int size(Type type) { 1204 return (type == INTPTR) ? kPointerSize : kDoubleSize; 1205 } 1206 1207 enum Access { REGULAR, OVERFLOWED }; 1208 1209 private: 1210 int position_; 1211 int merged_index_; 1212 union { 1213 intptr_t value_; 1214 double value64_; 1215 }; 1216 enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 }; 1217 }; 1218 1219 1220 // ----------------------------------------------------------------------------- 1221 // Embedded constant pool support 1222 1223 class ConstantPoolBuilder BASE_EMBEDDED { 1224 public: 1225 ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits); 1226 1227 // Add pointer-sized constant to the embedded constant pool 1228 ConstantPoolEntry::Access AddEntry(int position, intptr_t value, 1229 bool sharing_ok) { 1230 ConstantPoolEntry entry(position, value, sharing_ok); 1231 return AddEntry(entry, ConstantPoolEntry::INTPTR); 1232 } 1233 1234 // Add double constant to the embedded constant pool 1235 ConstantPoolEntry::Access AddEntry(int position, double value) { 1236 ConstantPoolEntry entry(position, value); 1237 return AddEntry(entry, ConstantPoolEntry::DOUBLE); 1238 } 1239 1240 // Previews the access type required for the next new entry to be added. 1241 ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const; 1242 1243 bool IsEmpty() { 1244 return info_[ConstantPoolEntry::INTPTR].entries.empty() && 1245 info_[ConstantPoolEntry::INTPTR].shared_entries.empty() && 1246 info_[ConstantPoolEntry::DOUBLE].entries.empty() && 1247 info_[ConstantPoolEntry::DOUBLE].shared_entries.empty(); 1248 } 1249 1250 // Emit the constant pool. Invoke only after all entries have been 1251 // added and all instructions have been emitted. 1252 // Returns position of the emitted pool (zero implies no constant pool). 1253 int Emit(Assembler* assm); 1254 1255 // Returns the label associated with the start of the constant pool. 1256 // Linking to this label in the function prologue may provide an 1257 // efficient means of constant pool pointer register initialization 1258 // on some architectures. 1259 inline Label* EmittedPosition() { return &emitted_label_; } 1260 1261 private: 1262 ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry, 1263 ConstantPoolEntry::Type type); 1264 void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type); 1265 void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access, 1266 ConstantPoolEntry::Type type); 1267 1268 struct PerTypeEntryInfo { 1269 PerTypeEntryInfo() : regular_count(0), overflow_start(-1) {} 1270 bool overflow() const { 1271 return (overflow_start >= 0 && 1272 overflow_start < static_cast<int>(entries.size())); 1273 } 1274 int regular_reach_bits; 1275 int regular_count; 1276 int overflow_start; 1277 std::vector<ConstantPoolEntry> entries; 1278 std::vector<ConstantPoolEntry> shared_entries; 1279 }; 1280 1281 Label emitted_label_; // Records pc_offset of emitted pool 1282 PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES]; 1283 }; 1284 1285 } // namespace internal 1286 } // namespace v8 1287 #endif // V8_ASSEMBLER_H_ 1288