1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 2 // All Rights Reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions 6 // are met: 7 // 8 // - Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // 11 // - Redistribution in binary form must reproduce the above copyright 12 // notice, this list of conditions and the following disclaimer in the 13 // documentation and/or other materials provided with the 14 // distribution. 15 // 16 // - Neither the name of Sun Microsystems or the names of contributors may 17 // be used to endorse or promote products derived from this software without 18 // specific prior written permission. 19 // 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 31 // OF THE POSSIBILITY OF SUCH DAMAGE. 32 33 // The original source code covered by the above license above has been 34 // modified significantly by Google Inc. 35 // Copyright 2011 the V8 project authors. All rights reserved. 36 37 // A light-weight ARM Assembler 38 // Generates user mode instructions for the ARM architecture up to version 5 39 40 #ifndef V8_ARM_ASSEMBLER_ARM_H_ 41 #define V8_ARM_ASSEMBLER_ARM_H_ 42 #include <stdio.h> 43 #include "assembler.h" 44 #include "constants-arm.h" 45 #include "serialize.h" 46 47 namespace v8 { 48 namespace internal { 49 50 // CPU Registers. 51 // 52 // 1) We would prefer to use an enum, but enum values are assignment- 53 // compatible with int, which has caused code-generation bugs. 54 // 55 // 2) We would prefer to use a class instead of a struct but we don't like 56 // the register initialization to depend on the particular initialization 57 // order (which appears to be different on OS X, Linux, and Windows for the 58 // installed versions of C++ we tried). Using a struct permits C-style 59 // "initialization". Also, the Register objects cannot be const as this 60 // forces initialization stubs in MSVC, making us dependent on initialization 61 // order. 62 // 63 // 3) By not using an enum, we are possibly preventing the compiler from 64 // doing certain constant folds, which may significantly reduce the 65 // code generated for some assembly instructions (because they boil down 66 // to a few constants). If this is a problem, we could change the code 67 // such that we use an enum in optimized mode, and the struct in debug 68 // mode. This way we get the compile-time error checking in debug mode 69 // and best performance in optimized code. 70 71 // Core register 72 struct Register { 73 static const int kNumRegisters = 16; 74 static const int kNumAllocatableRegisters = 8; 75 76 static int ToAllocationIndex(Register reg) { 77 ASSERT(reg.code() < kNumAllocatableRegisters); 78 return reg.code(); 79 } 80 81 static Register FromAllocationIndex(int index) { 82 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 83 return from_code(index); 84 } 85 86 static const char* AllocationIndexToString(int index) { 87 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 88 const char* const names[] = { 89 "r0", 90 "r1", 91 "r2", 92 "r3", 93 "r4", 94 "r5", 95 "r6", 96 "r7", 97 }; 98 return names[index]; 99 } 100 101 static Register from_code(int code) { 102 Register r = { code }; 103 return r; 104 } 105 106 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } 107 bool is(Register reg) const { return code_ == reg.code_; } 108 int code() const { 109 ASSERT(is_valid()); 110 return code_; 111 } 112 int bit() const { 113 ASSERT(is_valid()); 114 return 1 << code_; 115 } 116 117 void set_code(int code) { 118 code_ = code; 119 ASSERT(is_valid()); 120 } 121 122 // Unfortunately we can't make this private in a struct. 123 int code_; 124 }; 125 126 const Register no_reg = { -1 }; 127 128 const Register r0 = { 0 }; 129 const Register r1 = { 1 }; 130 const Register r2 = { 2 }; 131 const Register r3 = { 3 }; 132 const Register r4 = { 4 }; 133 const Register r5 = { 5 }; 134 const Register r6 = { 6 }; 135 const Register r7 = { 7 }; 136 const Register r8 = { 8 }; // Used as context register. 137 const Register r9 = { 9 }; // Used as lithium codegen scratch register. 138 const Register r10 = { 10 }; // Used as roots register. 139 const Register fp = { 11 }; 140 const Register ip = { 12 }; 141 const Register sp = { 13 }; 142 const Register lr = { 14 }; 143 const Register pc = { 15 }; 144 145 // Single word VFP register. 146 struct SwVfpRegister { 147 bool is_valid() const { return 0 <= code_ && code_ < 32; } 148 bool is(SwVfpRegister reg) const { return code_ == reg.code_; } 149 int code() const { 150 ASSERT(is_valid()); 151 return code_; 152 } 153 int bit() const { 154 ASSERT(is_valid()); 155 return 1 << code_; 156 } 157 void split_code(int* vm, int* m) const { 158 ASSERT(is_valid()); 159 *m = code_ & 0x1; 160 *vm = code_ >> 1; 161 } 162 163 int code_; 164 }; 165 166 167 // Double word VFP register. 168 struct DwVfpRegister { 169 // d0 has been excluded from allocation. This is following ia32 170 // where xmm0 is excluded. This should be revisited. 171 // Currently d0 is used as a scratch register. 172 // d1 has also been excluded from allocation to be used as a scratch 173 // register as well. 174 static const int kNumRegisters = 16; 175 static const int kNumAllocatableRegisters = 15; 176 177 static int ToAllocationIndex(DwVfpRegister reg) { 178 ASSERT(reg.code() != 0); 179 return reg.code() - 1; 180 } 181 182 static DwVfpRegister FromAllocationIndex(int index) { 183 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 184 return from_code(index + 1); 185 } 186 187 static const char* AllocationIndexToString(int index) { 188 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 189 const char* const names[] = { 190 "d1", 191 "d2", 192 "d3", 193 "d4", 194 "d5", 195 "d6", 196 "d7", 197 "d8", 198 "d9", 199 "d10", 200 "d11", 201 "d12", 202 "d13", 203 "d14", 204 "d15" 205 }; 206 return names[index]; 207 } 208 209 static DwVfpRegister from_code(int code) { 210 DwVfpRegister r = { code }; 211 return r; 212 } 213 214 // Supporting d0 to d15, can be later extended to d31. 215 bool is_valid() const { return 0 <= code_ && code_ < 16; } 216 bool is(DwVfpRegister reg) const { return code_ == reg.code_; } 217 SwVfpRegister low() const { 218 SwVfpRegister reg; 219 reg.code_ = code_ * 2; 220 221 ASSERT(reg.is_valid()); 222 return reg; 223 } 224 SwVfpRegister high() const { 225 SwVfpRegister reg; 226 reg.code_ = (code_ * 2) + 1; 227 228 ASSERT(reg.is_valid()); 229 return reg; 230 } 231 int code() const { 232 ASSERT(is_valid()); 233 return code_; 234 } 235 int bit() const { 236 ASSERT(is_valid()); 237 return 1 << code_; 238 } 239 void split_code(int* vm, int* m) const { 240 ASSERT(is_valid()); 241 *m = (code_ & 0x10) >> 4; 242 *vm = code_ & 0x0F; 243 } 244 245 int code_; 246 }; 247 248 249 typedef DwVfpRegister DoubleRegister; 250 251 252 // Support for the VFP registers s0 to s31 (d0 to d15). 253 // Note that "s(N):s(N+1)" is the same as "d(N/2)". 254 const SwVfpRegister s0 = { 0 }; 255 const SwVfpRegister s1 = { 1 }; 256 const SwVfpRegister s2 = { 2 }; 257 const SwVfpRegister s3 = { 3 }; 258 const SwVfpRegister s4 = { 4 }; 259 const SwVfpRegister s5 = { 5 }; 260 const SwVfpRegister s6 = { 6 }; 261 const SwVfpRegister s7 = { 7 }; 262 const SwVfpRegister s8 = { 8 }; 263 const SwVfpRegister s9 = { 9 }; 264 const SwVfpRegister s10 = { 10 }; 265 const SwVfpRegister s11 = { 11 }; 266 const SwVfpRegister s12 = { 12 }; 267 const SwVfpRegister s13 = { 13 }; 268 const SwVfpRegister s14 = { 14 }; 269 const SwVfpRegister s15 = { 15 }; 270 const SwVfpRegister s16 = { 16 }; 271 const SwVfpRegister s17 = { 17 }; 272 const SwVfpRegister s18 = { 18 }; 273 const SwVfpRegister s19 = { 19 }; 274 const SwVfpRegister s20 = { 20 }; 275 const SwVfpRegister s21 = { 21 }; 276 const SwVfpRegister s22 = { 22 }; 277 const SwVfpRegister s23 = { 23 }; 278 const SwVfpRegister s24 = { 24 }; 279 const SwVfpRegister s25 = { 25 }; 280 const SwVfpRegister s26 = { 26 }; 281 const SwVfpRegister s27 = { 27 }; 282 const SwVfpRegister s28 = { 28 }; 283 const SwVfpRegister s29 = { 29 }; 284 const SwVfpRegister s30 = { 30 }; 285 const SwVfpRegister s31 = { 31 }; 286 287 const DwVfpRegister no_dreg = { -1 }; 288 const DwVfpRegister d0 = { 0 }; 289 const DwVfpRegister d1 = { 1 }; 290 const DwVfpRegister d2 = { 2 }; 291 const DwVfpRegister d3 = { 3 }; 292 const DwVfpRegister d4 = { 4 }; 293 const DwVfpRegister d5 = { 5 }; 294 const DwVfpRegister d6 = { 6 }; 295 const DwVfpRegister d7 = { 7 }; 296 const DwVfpRegister d8 = { 8 }; 297 const DwVfpRegister d9 = { 9 }; 298 const DwVfpRegister d10 = { 10 }; 299 const DwVfpRegister d11 = { 11 }; 300 const DwVfpRegister d12 = { 12 }; 301 const DwVfpRegister d13 = { 13 }; 302 const DwVfpRegister d14 = { 14 }; 303 const DwVfpRegister d15 = { 15 }; 304 305 // Aliases for double registers. 306 const DwVfpRegister kFirstCalleeSavedDoubleReg = d8; 307 const DwVfpRegister kLastCalleeSavedDoubleReg = d15; 308 309 310 // Coprocessor register 311 struct CRegister { 312 bool is_valid() const { return 0 <= code_ && code_ < 16; } 313 bool is(CRegister creg) const { return code_ == creg.code_; } 314 int code() const { 315 ASSERT(is_valid()); 316 return code_; 317 } 318 int bit() const { 319 ASSERT(is_valid()); 320 return 1 << code_; 321 } 322 323 // Unfortunately we can't make this private in a struct. 324 int code_; 325 }; 326 327 328 const CRegister no_creg = { -1 }; 329 330 const CRegister cr0 = { 0 }; 331 const CRegister cr1 = { 1 }; 332 const CRegister cr2 = { 2 }; 333 const CRegister cr3 = { 3 }; 334 const CRegister cr4 = { 4 }; 335 const CRegister cr5 = { 5 }; 336 const CRegister cr6 = { 6 }; 337 const CRegister cr7 = { 7 }; 338 const CRegister cr8 = { 8 }; 339 const CRegister cr9 = { 9 }; 340 const CRegister cr10 = { 10 }; 341 const CRegister cr11 = { 11 }; 342 const CRegister cr12 = { 12 }; 343 const CRegister cr13 = { 13 }; 344 const CRegister cr14 = { 14 }; 345 const CRegister cr15 = { 15 }; 346 347 348 // Coprocessor number 349 enum Coprocessor { 350 p0 = 0, 351 p1 = 1, 352 p2 = 2, 353 p3 = 3, 354 p4 = 4, 355 p5 = 5, 356 p6 = 6, 357 p7 = 7, 358 p8 = 8, 359 p9 = 9, 360 p10 = 10, 361 p11 = 11, 362 p12 = 12, 363 p13 = 13, 364 p14 = 14, 365 p15 = 15 366 }; 367 368 369 // ----------------------------------------------------------------------------- 370 // Machine instruction Operands 371 372 // Class Operand represents a shifter operand in data processing instructions 373 class Operand BASE_EMBEDDED { 374 public: 375 // immediate 376 INLINE(explicit Operand(int32_t immediate, 377 RelocInfo::Mode rmode = RelocInfo::NONE)); 378 INLINE(explicit Operand(const ExternalReference& f)); 379 INLINE(explicit Operand(const char* s)); 380 explicit Operand(Handle<Object> handle); 381 INLINE(explicit Operand(Smi* value)); 382 383 // rm 384 INLINE(explicit Operand(Register rm)); 385 386 // rm <shift_op> shift_imm 387 explicit Operand(Register rm, ShiftOp shift_op, int shift_imm); 388 389 // rm <shift_op> rs 390 explicit Operand(Register rm, ShiftOp shift_op, Register rs); 391 392 // Return true if this is a register operand. 393 INLINE(bool is_reg() const); 394 395 // Return true if this operand fits in one instruction so that no 396 // 2-instruction solution with a load into the ip register is necessary. If 397 // the instruction this operand is used for is a MOV or MVN instruction the 398 // actual instruction to use is required for this calculation. For other 399 // instructions instr is ignored. 400 bool is_single_instruction(Instr instr = 0) const; 401 bool must_use_constant_pool() const; 402 403 inline int32_t immediate() const { 404 ASSERT(!rm_.is_valid()); 405 return imm32_; 406 } 407 408 Register rm() const { return rm_; } 409 Register rs() const { return rs_; } 410 ShiftOp shift_op() const { return shift_op_; } 411 412 private: 413 Register rm_; 414 Register rs_; 415 ShiftOp shift_op_; 416 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg 417 int32_t imm32_; // valid if rm_ == no_reg 418 RelocInfo::Mode rmode_; 419 420 friend class Assembler; 421 }; 422 423 424 // Class MemOperand represents a memory operand in load and store instructions 425 class MemOperand BASE_EMBEDDED { 426 public: 427 // [rn +/- offset] Offset/NegOffset 428 // [rn +/- offset]! PreIndex/NegPreIndex 429 // [rn], +/- offset PostIndex/NegPostIndex 430 // offset is any signed 32-bit value; offset is first loaded to register ip if 431 // it does not fit the addressing mode (12-bit unsigned and sign bit) 432 explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset); 433 434 // [rn +/- rm] Offset/NegOffset 435 // [rn +/- rm]! PreIndex/NegPreIndex 436 // [rn], +/- rm PostIndex/NegPostIndex 437 explicit MemOperand(Register rn, Register rm, AddrMode am = Offset); 438 439 // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset 440 // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex 441 // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex 442 explicit MemOperand(Register rn, Register rm, 443 ShiftOp shift_op, int shift_imm, AddrMode am = Offset); 444 445 void set_offset(int32_t offset) { 446 ASSERT(rm_.is(no_reg)); 447 offset_ = offset; 448 } 449 450 uint32_t offset() const { 451 ASSERT(rm_.is(no_reg)); 452 return offset_; 453 } 454 455 Register rn() const { return rn_; } 456 Register rm() const { return rm_; } 457 458 bool OffsetIsUint12Encodable() const { 459 return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); 460 } 461 462 private: 463 Register rn_; // base 464 Register rm_; // register offset 465 int32_t offset_; // valid if rm_ == no_reg 466 ShiftOp shift_op_; 467 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg 468 AddrMode am_; // bits P, U, and W 469 470 friend class Assembler; 471 }; 472 473 // CpuFeatures keeps track of which features are supported by the target CPU. 474 // Supported features must be enabled by a Scope before use. 475 class CpuFeatures : public AllStatic { 476 public: 477 // Detect features of the target CPU. Set safe defaults if the serializer 478 // is enabled (snapshots must be portable). 479 static void Probe(); 480 481 // Check whether a feature is supported by the target CPU. 482 static bool IsSupported(CpuFeature f) { 483 ASSERT(initialized_); 484 if (f == VFP3 && !FLAG_enable_vfp3) return false; 485 return (supported_ & (1u << f)) != 0; 486 } 487 488 #ifdef DEBUG 489 // Check whether a feature is currently enabled. 490 static bool IsEnabled(CpuFeature f) { 491 ASSERT(initialized_); 492 Isolate* isolate = Isolate::UncheckedCurrent(); 493 if (isolate == NULL) { 494 // When no isolate is available, work as if we're running in 495 // release mode. 496 return IsSupported(f); 497 } 498 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); 499 return (enabled & (1u << f)) != 0; 500 } 501 #endif 502 503 // Enable a specified feature within a scope. 504 class Scope BASE_EMBEDDED { 505 #ifdef DEBUG 506 public: 507 explicit Scope(CpuFeature f) { 508 unsigned mask = 1u << f; 509 ASSERT(CpuFeatures::IsSupported(f)); 510 ASSERT(!Serializer::enabled() || 511 (CpuFeatures::found_by_runtime_probing_ & mask) == 0); 512 isolate_ = Isolate::UncheckedCurrent(); 513 old_enabled_ = 0; 514 if (isolate_ != NULL) { 515 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); 516 isolate_->set_enabled_cpu_features(old_enabled_ | mask); 517 } 518 } 519 ~Scope() { 520 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); 521 if (isolate_ != NULL) { 522 isolate_->set_enabled_cpu_features(old_enabled_); 523 } 524 } 525 private: 526 Isolate* isolate_; 527 unsigned old_enabled_; 528 #else 529 public: 530 explicit Scope(CpuFeature f) {} 531 #endif 532 }; 533 534 class TryForceFeatureScope BASE_EMBEDDED { 535 public: 536 explicit TryForceFeatureScope(CpuFeature f) 537 : old_supported_(CpuFeatures::supported_) { 538 if (CanForce()) { 539 CpuFeatures::supported_ |= (1u << f); 540 } 541 } 542 543 ~TryForceFeatureScope() { 544 if (CanForce()) { 545 CpuFeatures::supported_ = old_supported_; 546 } 547 } 548 549 private: 550 static bool CanForce() { 551 // It's only safe to temporarily force support of CPU features 552 // when there's only a single isolate, which is guaranteed when 553 // the serializer is enabled. 554 return Serializer::enabled(); 555 } 556 557 const unsigned old_supported_; 558 }; 559 560 private: 561 #ifdef DEBUG 562 static bool initialized_; 563 #endif 564 static unsigned supported_; 565 static unsigned found_by_runtime_probing_; 566 567 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); 568 }; 569 570 571 extern const Instr kMovLrPc; 572 extern const Instr kLdrPCMask; 573 extern const Instr kLdrPCPattern; 574 extern const Instr kBlxRegMask; 575 extern const Instr kBlxRegPattern; 576 577 extern const Instr kMovMvnMask; 578 extern const Instr kMovMvnPattern; 579 extern const Instr kMovMvnFlip; 580 581 extern const Instr kMovLeaveCCMask; 582 extern const Instr kMovLeaveCCPattern; 583 extern const Instr kMovwMask; 584 extern const Instr kMovwPattern; 585 extern const Instr kMovwLeaveCCFlip; 586 587 extern const Instr kCmpCmnMask; 588 extern const Instr kCmpCmnPattern; 589 extern const Instr kCmpCmnFlip; 590 extern const Instr kAddSubFlip; 591 extern const Instr kAndBicFlip; 592 593 594 595 class Assembler : public AssemblerBase { 596 public: 597 // Create an assembler. Instructions and relocation information are emitted 598 // into a buffer, with the instructions starting from the beginning and the 599 // relocation information starting from the end of the buffer. See CodeDesc 600 // for a detailed comment on the layout (globals.h). 601 // 602 // If the provided buffer is NULL, the assembler allocates and grows its own 603 // buffer, and buffer_size determines the initial buffer size. The buffer is 604 // owned by the assembler and deallocated upon destruction of the assembler. 605 // 606 // If the provided buffer is not NULL, the assembler uses the provided buffer 607 // for code generation and assumes its size to be buffer_size. If the buffer 608 // is too small, a fatal error occurs. No deallocation of the buffer is done 609 // upon destruction of the assembler. 610 Assembler(Isolate* isolate, void* buffer, int buffer_size); 611 ~Assembler(); 612 613 // Overrides the default provided by FLAG_debug_code. 614 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } 615 616 // GetCode emits any pending (non-emitted) code and fills the descriptor 617 // desc. GetCode() is idempotent; it returns the same result if no other 618 // Assembler functions are invoked in between GetCode() calls. 619 void GetCode(CodeDesc* desc); 620 621 // Label operations & relative jumps (PPUM Appendix D) 622 // 623 // Takes a branch opcode (cc) and a label (L) and generates 624 // either a backward branch or a forward branch and links it 625 // to the label fixup chain. Usage: 626 // 627 // Label L; // unbound label 628 // j(cc, &L); // forward branch to unbound label 629 // bind(&L); // bind label to the current pc 630 // j(cc, &L); // backward branch to bound label 631 // bind(&L); // illegal: a label may be bound only once 632 // 633 // Note: The same Label can be used for forward and backward branches 634 // but it may be bound only once. 635 636 void bind(Label* L); // binds an unbound label L to the current code position 637 638 // Returns the branch offset to the given label from the current code position 639 // Links the label to the current position if it is still unbound 640 // Manages the jump elimination optimization if the second parameter is true. 641 int branch_offset(Label* L, bool jump_elimination_allowed); 642 643 // Puts a labels target address at the given position. 644 // The high 8 bits are set to zero. 645 void label_at_put(Label* L, int at_offset); 646 647 // Return the address in the constant pool of the code target address used by 648 // the branch/call instruction at pc. 649 INLINE(static Address target_address_address_at(Address pc)); 650 651 // Read/Modify the code target address in the branch/call instruction at pc. 652 INLINE(static Address target_address_at(Address pc)); 653 INLINE(static void set_target_address_at(Address pc, Address target)); 654 655 // This sets the branch destination (which is in the constant pool on ARM). 656 // This is for calls and branches within generated code. 657 inline static void set_target_at(Address constant_pool_entry, Address target); 658 659 // This sets the branch destination (which is in the constant pool on ARM). 660 // This is for calls and branches to runtime code. 661 inline static void set_external_target_at(Address constant_pool_entry, 662 Address target) { 663 set_target_at(constant_pool_entry, target); 664 } 665 666 // Here we are patching the address in the constant pool, not the actual call 667 // instruction. The address in the constant pool is the same size as a 668 // pointer. 669 static const int kCallTargetSize = kPointerSize; 670 static const int kExternalTargetSize = kPointerSize; 671 672 // Size of an instruction. 673 static const int kInstrSize = sizeof(Instr); 674 675 // Distance between the instruction referring to the address of the call 676 // target and the return address. 677 #ifdef USE_BLX 678 // Call sequence is: 679 // ldr ip, [pc, #...] @ call address 680 // blx ip 681 // @ return address 682 static const int kCallTargetAddressOffset = 2 * kInstrSize; 683 #else 684 // Call sequence is: 685 // mov lr, pc 686 // ldr pc, [pc, #...] @ call address 687 // @ return address 688 static const int kCallTargetAddressOffset = kInstrSize; 689 #endif 690 691 // Distance between start of patched return sequence and the emitted address 692 // to jump to. 693 #ifdef USE_BLX 694 // Patched return sequence is: 695 // ldr ip, [pc, #0] @ emited address and start 696 // blx ip 697 static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize; 698 #else 699 // Patched return sequence is: 700 // mov lr, pc @ start of sequence 701 // ldr pc, [pc, #-4] @ emited address 702 static const int kPatchReturnSequenceAddressOffset = kInstrSize; 703 #endif 704 705 // Distance between start of patched debug break slot and the emitted address 706 // to jump to. 707 #ifdef USE_BLX 708 // Patched debug break slot code is: 709 // ldr ip, [pc, #0] @ emited address and start 710 // blx ip 711 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; 712 #else 713 // Patched debug break slot code is: 714 // mov lr, pc @ start of sequence 715 // ldr pc, [pc, #-4] @ emited address 716 static const int kPatchDebugBreakSlotAddressOffset = kInstrSize; 717 #endif 718 719 // Difference between address of current opcode and value read from pc 720 // register. 721 static const int kPcLoadDelta = 8; 722 723 static const int kJSReturnSequenceInstructions = 4; 724 static const int kDebugBreakSlotInstructions = 3; 725 static const int kDebugBreakSlotLength = 726 kDebugBreakSlotInstructions * kInstrSize; 727 728 // --------------------------------------------------------------------------- 729 // Code generation 730 731 // Insert the smallest number of nop instructions 732 // possible to align the pc offset to a multiple 733 // of m. m must be a power of 2 (>= 4). 734 void Align(int m); 735 // Aligns code to something that's optimal for a jump target for the platform. 736 void CodeTargetAlign(); 737 738 // Branch instructions 739 void b(int branch_offset, Condition cond = al); 740 void bl(int branch_offset, Condition cond = al); 741 void blx(int branch_offset); // v5 and above 742 void blx(Register target, Condition cond = al); // v5 and above 743 void bx(Register target, Condition cond = al); // v5 and above, plus v4t 744 745 // Convenience branch instructions using labels 746 void b(Label* L, Condition cond = al) { 747 b(branch_offset(L, cond == al), cond); 748 } 749 void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); } 750 void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); } 751 void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); } 752 void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above 753 754 // Data-processing instructions 755 756 void and_(Register dst, Register src1, const Operand& src2, 757 SBit s = LeaveCC, Condition cond = al); 758 759 void eor(Register dst, Register src1, const Operand& src2, 760 SBit s = LeaveCC, Condition cond = al); 761 762 void sub(Register dst, Register src1, const Operand& src2, 763 SBit s = LeaveCC, Condition cond = al); 764 void sub(Register dst, Register src1, Register src2, 765 SBit s = LeaveCC, Condition cond = al) { 766 sub(dst, src1, Operand(src2), s, cond); 767 } 768 769 void rsb(Register dst, Register src1, const Operand& src2, 770 SBit s = LeaveCC, Condition cond = al); 771 772 void add(Register dst, Register src1, const Operand& src2, 773 SBit s = LeaveCC, Condition cond = al); 774 void add(Register dst, Register src1, Register src2, 775 SBit s = LeaveCC, Condition cond = al) { 776 add(dst, src1, Operand(src2), s, cond); 777 } 778 779 void adc(Register dst, Register src1, const Operand& src2, 780 SBit s = LeaveCC, Condition cond = al); 781 782 void sbc(Register dst, Register src1, const Operand& src2, 783 SBit s = LeaveCC, Condition cond = al); 784 785 void rsc(Register dst, Register src1, const Operand& src2, 786 SBit s = LeaveCC, Condition cond = al); 787 788 void tst(Register src1, const Operand& src2, Condition cond = al); 789 void tst(Register src1, Register src2, Condition cond = al) { 790 tst(src1, Operand(src2), cond); 791 } 792 793 void teq(Register src1, const Operand& src2, Condition cond = al); 794 795 void cmp(Register src1, const Operand& src2, Condition cond = al); 796 void cmp(Register src1, Register src2, Condition cond = al) { 797 cmp(src1, Operand(src2), cond); 798 } 799 void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al); 800 801 void cmn(Register src1, const Operand& src2, Condition cond = al); 802 803 void orr(Register dst, Register src1, const Operand& src2, 804 SBit s = LeaveCC, Condition cond = al); 805 void orr(Register dst, Register src1, Register src2, 806 SBit s = LeaveCC, Condition cond = al) { 807 orr(dst, src1, Operand(src2), s, cond); 808 } 809 810 void mov(Register dst, const Operand& src, 811 SBit s = LeaveCC, Condition cond = al); 812 void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) { 813 mov(dst, Operand(src), s, cond); 814 } 815 816 // ARMv7 instructions for loading a 32 bit immediate in two instructions. 817 // This may actually emit a different mov instruction, but on an ARMv7 it 818 // is guaranteed to only emit one instruction. 819 void movw(Register reg, uint32_t immediate, Condition cond = al); 820 // The constant for movt should be in the range 0-0xffff. 821 void movt(Register reg, uint32_t immediate, Condition cond = al); 822 823 void bic(Register dst, Register src1, const Operand& src2, 824 SBit s = LeaveCC, Condition cond = al); 825 826 void mvn(Register dst, const Operand& src, 827 SBit s = LeaveCC, Condition cond = al); 828 829 // Multiply instructions 830 831 void mla(Register dst, Register src1, Register src2, Register srcA, 832 SBit s = LeaveCC, Condition cond = al); 833 834 void mul(Register dst, Register src1, Register src2, 835 SBit s = LeaveCC, Condition cond = al); 836 837 void smlal(Register dstL, Register dstH, Register src1, Register src2, 838 SBit s = LeaveCC, Condition cond = al); 839 840 void smull(Register dstL, Register dstH, Register src1, Register src2, 841 SBit s = LeaveCC, Condition cond = al); 842 843 void umlal(Register dstL, Register dstH, Register src1, Register src2, 844 SBit s = LeaveCC, Condition cond = al); 845 846 void umull(Register dstL, Register dstH, Register src1, Register src2, 847 SBit s = LeaveCC, Condition cond = al); 848 849 // Miscellaneous arithmetic instructions 850 851 void clz(Register dst, Register src, Condition cond = al); // v5 and above 852 853 // Saturating instructions. v6 and above. 854 855 // Unsigned saturate. 856 // 857 // Saturate an optionally shifted signed value to an unsigned range. 858 // 859 // usat dst, #satpos, src 860 // usat dst, #satpos, src, lsl #sh 861 // usat dst, #satpos, src, asr #sh 862 // 863 // Register dst will contain: 864 // 865 // 0, if s < 0 866 // (1 << satpos) - 1, if s > ((1 << satpos) - 1) 867 // s, otherwise 868 // 869 // where s is the contents of src after shifting (if used.) 870 void usat(Register dst, int satpos, const Operand& src, Condition cond = al); 871 872 // Bitfield manipulation instructions. v7 and above. 873 874 void ubfx(Register dst, Register src, int lsb, int width, 875 Condition cond = al); 876 877 void sbfx(Register dst, Register src, int lsb, int width, 878 Condition cond = al); 879 880 void bfc(Register dst, int lsb, int width, Condition cond = al); 881 882 void bfi(Register dst, Register src, int lsb, int width, 883 Condition cond = al); 884 885 // Status register access instructions 886 887 void mrs(Register dst, SRegister s, Condition cond = al); 888 void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al); 889 890 // Load/Store instructions 891 void ldr(Register dst, const MemOperand& src, Condition cond = al); 892 void str(Register src, const MemOperand& dst, Condition cond = al); 893 void ldrb(Register dst, const MemOperand& src, Condition cond = al); 894 void strb(Register src, const MemOperand& dst, Condition cond = al); 895 void ldrh(Register dst, const MemOperand& src, Condition cond = al); 896 void strh(Register src, const MemOperand& dst, Condition cond = al); 897 void ldrsb(Register dst, const MemOperand& src, Condition cond = al); 898 void ldrsh(Register dst, const MemOperand& src, Condition cond = al); 899 void ldrd(Register dst1, 900 Register dst2, 901 const MemOperand& src, Condition cond = al); 902 void strd(Register src1, 903 Register src2, 904 const MemOperand& dst, Condition cond = al); 905 906 // Load/Store multiple instructions 907 void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al); 908 void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); 909 910 // Exception-generating instructions and debugging support 911 void stop(const char* msg, 912 Condition cond = al, 913 int32_t code = kDefaultStopCode); 914 915 void bkpt(uint32_t imm16); // v5 and above 916 void svc(uint32_t imm24, Condition cond = al); 917 918 // Coprocessor instructions 919 920 void cdp(Coprocessor coproc, int opcode_1, 921 CRegister crd, CRegister crn, CRegister crm, 922 int opcode_2, Condition cond = al); 923 924 void cdp2(Coprocessor coproc, int opcode_1, 925 CRegister crd, CRegister crn, CRegister crm, 926 int opcode_2); // v5 and above 927 928 void mcr(Coprocessor coproc, int opcode_1, 929 Register rd, CRegister crn, CRegister crm, 930 int opcode_2 = 0, Condition cond = al); 931 932 void mcr2(Coprocessor coproc, int opcode_1, 933 Register rd, CRegister crn, CRegister crm, 934 int opcode_2 = 0); // v5 and above 935 936 void mrc(Coprocessor coproc, int opcode_1, 937 Register rd, CRegister crn, CRegister crm, 938 int opcode_2 = 0, Condition cond = al); 939 940 void mrc2(Coprocessor coproc, int opcode_1, 941 Register rd, CRegister crn, CRegister crm, 942 int opcode_2 = 0); // v5 and above 943 944 void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src, 945 LFlag l = Short, Condition cond = al); 946 void ldc(Coprocessor coproc, CRegister crd, Register base, int option, 947 LFlag l = Short, Condition cond = al); 948 949 void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src, 950 LFlag l = Short); // v5 and above 951 void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, 952 LFlag l = Short); // v5 and above 953 954 // Support for VFP. 955 // All these APIs support S0 to S31 and D0 to D15. 956 // Currently these APIs do not support extended D registers, i.e, D16 to D31. 957 // However, some simple modifications can allow 958 // these APIs to support D16 to D31. 959 960 void vldr(const DwVfpRegister dst, 961 const Register base, 962 int offset, 963 const Condition cond = al); 964 void vldr(const DwVfpRegister dst, 965 const MemOperand& src, 966 const Condition cond = al); 967 968 void vldr(const SwVfpRegister dst, 969 const Register base, 970 int offset, 971 const Condition cond = al); 972 void vldr(const SwVfpRegister dst, 973 const MemOperand& src, 974 const Condition cond = al); 975 976 void vstr(const DwVfpRegister src, 977 const Register base, 978 int offset, 979 const Condition cond = al); 980 void vstr(const DwVfpRegister src, 981 const MemOperand& dst, 982 const Condition cond = al); 983 984 void vstr(const SwVfpRegister src, 985 const Register base, 986 int offset, 987 const Condition cond = al); 988 void vstr(const SwVfpRegister src, 989 const MemOperand& dst, 990 const Condition cond = al); 991 992 void vldm(BlockAddrMode am, 993 Register base, 994 DwVfpRegister first, 995 DwVfpRegister last, 996 Condition cond = al); 997 998 void vstm(BlockAddrMode am, 999 Register base, 1000 DwVfpRegister first, 1001 DwVfpRegister last, 1002 Condition cond = al); 1003 1004 void vldm(BlockAddrMode am, 1005 Register base, 1006 SwVfpRegister first, 1007 SwVfpRegister last, 1008 Condition cond = al); 1009 1010 void vstm(BlockAddrMode am, 1011 Register base, 1012 SwVfpRegister first, 1013 SwVfpRegister last, 1014 Condition cond = al); 1015 1016 void vmov(const DwVfpRegister dst, 1017 double imm, 1018 const Condition cond = al); 1019 void vmov(const SwVfpRegister dst, 1020 const SwVfpRegister src, 1021 const Condition cond = al); 1022 void vmov(const DwVfpRegister dst, 1023 const DwVfpRegister src, 1024 const Condition cond = al); 1025 void vmov(const DwVfpRegister dst, 1026 const Register src1, 1027 const Register src2, 1028 const Condition cond = al); 1029 void vmov(const Register dst1, 1030 const Register dst2, 1031 const DwVfpRegister src, 1032 const Condition cond = al); 1033 void vmov(const SwVfpRegister dst, 1034 const Register src, 1035 const Condition cond = al); 1036 void vmov(const Register dst, 1037 const SwVfpRegister src, 1038 const Condition cond = al); 1039 void vcvt_f64_s32(const DwVfpRegister dst, 1040 const SwVfpRegister src, 1041 VFPConversionMode mode = kDefaultRoundToZero, 1042 const Condition cond = al); 1043 void vcvt_f32_s32(const SwVfpRegister dst, 1044 const SwVfpRegister src, 1045 VFPConversionMode mode = kDefaultRoundToZero, 1046 const Condition cond = al); 1047 void vcvt_f64_u32(const DwVfpRegister dst, 1048 const SwVfpRegister src, 1049 VFPConversionMode mode = kDefaultRoundToZero, 1050 const Condition cond = al); 1051 void vcvt_s32_f64(const SwVfpRegister dst, 1052 const DwVfpRegister src, 1053 VFPConversionMode mode = kDefaultRoundToZero, 1054 const Condition cond = al); 1055 void vcvt_u32_f64(const SwVfpRegister dst, 1056 const DwVfpRegister src, 1057 VFPConversionMode mode = kDefaultRoundToZero, 1058 const Condition cond = al); 1059 void vcvt_f64_f32(const DwVfpRegister dst, 1060 const SwVfpRegister src, 1061 VFPConversionMode mode = kDefaultRoundToZero, 1062 const Condition cond = al); 1063 void vcvt_f32_f64(const SwVfpRegister dst, 1064 const DwVfpRegister src, 1065 VFPConversionMode mode = kDefaultRoundToZero, 1066 const Condition cond = al); 1067 1068 void vneg(const DwVfpRegister dst, 1069 const DwVfpRegister src, 1070 const Condition cond = al); 1071 void vabs(const DwVfpRegister dst, 1072 const DwVfpRegister src, 1073 const Condition cond = al); 1074 void vadd(const DwVfpRegister dst, 1075 const DwVfpRegister src1, 1076 const DwVfpRegister src2, 1077 const Condition cond = al); 1078 void vsub(const DwVfpRegister dst, 1079 const DwVfpRegister src1, 1080 const DwVfpRegister src2, 1081 const Condition cond = al); 1082 void vmul(const DwVfpRegister dst, 1083 const DwVfpRegister src1, 1084 const DwVfpRegister src2, 1085 const Condition cond = al); 1086 void vdiv(const DwVfpRegister dst, 1087 const DwVfpRegister src1, 1088 const DwVfpRegister src2, 1089 const Condition cond = al); 1090 void vcmp(const DwVfpRegister src1, 1091 const DwVfpRegister src2, 1092 const Condition cond = al); 1093 void vcmp(const DwVfpRegister src1, 1094 const double src2, 1095 const Condition cond = al); 1096 void vmrs(const Register dst, 1097 const Condition cond = al); 1098 void vmsr(const Register dst, 1099 const Condition cond = al); 1100 void vsqrt(const DwVfpRegister dst, 1101 const DwVfpRegister src, 1102 const Condition cond = al); 1103 1104 // Pseudo instructions 1105 1106 // Different nop operations are used by the code generator to detect certain 1107 // states of the generated code. 1108 enum NopMarkerTypes { 1109 NON_MARKING_NOP = 0, 1110 DEBUG_BREAK_NOP, 1111 // IC markers. 1112 PROPERTY_ACCESS_INLINED, 1113 PROPERTY_ACCESS_INLINED_CONTEXT, 1114 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 1115 // Helper values. 1116 LAST_CODE_MARKER, 1117 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED 1118 }; 1119 1120 void nop(int type = 0); // 0 is the default non-marking type. 1121 1122 void push(Register src, Condition cond = al) { 1123 str(src, MemOperand(sp, 4, NegPreIndex), cond); 1124 } 1125 1126 void pop(Register dst, Condition cond = al) { 1127 ldr(dst, MemOperand(sp, 4, PostIndex), cond); 1128 } 1129 1130 void pop() { 1131 add(sp, sp, Operand(kPointerSize)); 1132 } 1133 1134 // Jump unconditionally to given label. 1135 void jmp(Label* L) { b(L, al); } 1136 1137 // Check the code size generated from label to here. 1138 int InstructionsGeneratedSince(Label* l) { 1139 return (pc_offset() - l->pos()) / kInstrSize; 1140 } 1141 1142 // Check whether an immediate fits an addressing mode 1 instruction. 1143 bool ImmediateFitsAddrMode1Instruction(int32_t imm32); 1144 1145 // Class for scoping postponing the constant pool generation. 1146 class BlockConstPoolScope { 1147 public: 1148 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) { 1149 assem_->StartBlockConstPool(); 1150 } 1151 ~BlockConstPoolScope() { 1152 assem_->EndBlockConstPool(); 1153 } 1154 1155 private: 1156 Assembler* assem_; 1157 1158 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); 1159 }; 1160 1161 // Postpone the generation of the constant pool for the specified number of 1162 // instructions. 1163 void BlockConstPoolFor(int instructions); 1164 1165 // Debugging 1166 1167 // Mark address of the ExitJSFrame code. 1168 void RecordJSReturn(); 1169 1170 // Mark address of a debug break slot. 1171 void RecordDebugBreakSlot(); 1172 1173 // Record a comment relocation entry that can be used by a disassembler. 1174 // Use --code-comments to enable. 1175 void RecordComment(const char* msg); 1176 1177 // Writes a single byte or word of data in the code stream. Used 1178 // for inline tables, e.g., jump-tables. The constant pool should be 1179 // emitted before any use of db and dd to ensure that constant pools 1180 // are not emitted as part of the tables generated. 1181 void db(uint8_t data); 1182 void dd(uint32_t data); 1183 1184 int pc_offset() const { return pc_ - buffer_; } 1185 1186 PositionsRecorder* positions_recorder() { return &positions_recorder_; } 1187 1188 bool can_peephole_optimize(int instructions) { 1189 if (!allow_peephole_optimization_) return false; 1190 if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false; 1191 return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize; 1192 } 1193 1194 // Read/patch instructions 1195 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } 1196 static void instr_at_put(byte* pc, Instr instr) { 1197 *reinterpret_cast<Instr*>(pc) = instr; 1198 } 1199 static Condition GetCondition(Instr instr); 1200 static bool IsBranch(Instr instr); 1201 static int GetBranchOffset(Instr instr); 1202 static bool IsLdrRegisterImmediate(Instr instr); 1203 static int GetLdrRegisterImmediateOffset(Instr instr); 1204 static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset); 1205 static bool IsStrRegisterImmediate(Instr instr); 1206 static Instr SetStrRegisterImmediateOffset(Instr instr, int offset); 1207 static bool IsAddRegisterImmediate(Instr instr); 1208 static Instr SetAddRegisterImmediateOffset(Instr instr, int offset); 1209 static Register GetRd(Instr instr); 1210 static Register GetRn(Instr instr); 1211 static Register GetRm(Instr instr); 1212 static bool IsPush(Instr instr); 1213 static bool IsPop(Instr instr); 1214 static bool IsStrRegFpOffset(Instr instr); 1215 static bool IsLdrRegFpOffset(Instr instr); 1216 static bool IsStrRegFpNegOffset(Instr instr); 1217 static bool IsLdrRegFpNegOffset(Instr instr); 1218 static bool IsLdrPcImmediateOffset(Instr instr); 1219 static bool IsTstImmediate(Instr instr); 1220 static bool IsCmpRegister(Instr instr); 1221 static bool IsCmpImmediate(Instr instr); 1222 static Register GetCmpImmediateRegister(Instr instr); 1223 static int GetCmpImmediateRawImmediate(Instr instr); 1224 static bool IsNop(Instr instr, int type = NON_MARKING_NOP); 1225 1226 // Check if is time to emit a constant pool for pending reloc info entries 1227 void CheckConstPool(bool force_emit, bool require_jump); 1228 1229 protected: 1230 bool emit_debug_code() const { return emit_debug_code_; } 1231 1232 int buffer_space() const { return reloc_info_writer.pos() - pc_; } 1233 1234 // Read/patch instructions 1235 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } 1236 void instr_at_put(int pos, Instr instr) { 1237 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; 1238 } 1239 1240 // Decode branch instruction at pos and return branch target pos 1241 int target_at(int pos); 1242 1243 // Patch branch instruction at pos to branch to given branch target pos 1244 void target_at_put(int pos, int target_pos); 1245 1246 // Block the emission of the constant pool before pc_offset 1247 void BlockConstPoolBefore(int pc_offset) { 1248 if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset; 1249 } 1250 1251 void StartBlockConstPool() { 1252 const_pool_blocked_nesting_++; 1253 } 1254 void EndBlockConstPool() { 1255 const_pool_blocked_nesting_--; 1256 } 1257 bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; } 1258 1259 private: 1260 // Code buffer: 1261 // The buffer into which code and relocation info are generated. 1262 byte* buffer_; 1263 int buffer_size_; 1264 // True if the assembler owns the buffer, false if buffer is external. 1265 bool own_buffer_; 1266 1267 // Buffer size and constant pool distance are checked together at regular 1268 // intervals of kBufferCheckInterval emitted bytes 1269 static const int kBufferCheckInterval = 1*KB/2; 1270 int next_buffer_check_; // pc offset of next buffer check 1271 1272 // Code generation 1273 // The relocation writer's position is at least kGap bytes below the end of 1274 // the generated instructions. This is so that multi-instruction sequences do 1275 // not have to check for overflow. The same is true for writes of large 1276 // relocation info entries. 1277 static const int kGap = 32; 1278 byte* pc_; // the program counter; moves forward 1279 1280 // Constant pool generation 1281 // Pools are emitted in the instruction stream, preferably after unconditional 1282 // jumps or after returns from functions (in dead code locations). 1283 // If a long code sequence does not contain unconditional jumps, it is 1284 // necessary to emit the constant pool before the pool gets too far from the 1285 // location it is accessed from. In this case, we emit a jump over the emitted 1286 // constant pool. 1287 // Constants in the pool may be addresses of functions that gets relocated; 1288 // if so, a relocation info entry is associated to the constant pool entry. 1289 1290 // Repeated checking whether the constant pool should be emitted is rather 1291 // expensive. By default we only check again once a number of instructions 1292 // has been generated. That also means that the sizing of the buffers is not 1293 // an exact science, and that we rely on some slop to not overrun buffers. 1294 static const int kCheckConstIntervalInst = 32; 1295 static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; 1296 1297 1298 // Pools are emitted after function return and in dead code at (more or less) 1299 // regular intervals of kDistBetweenPools bytes 1300 static const int kDistBetweenPools = 1*KB; 1301 1302 // Constants in pools are accessed via pc relative addressing, which can 1303 // reach +/-4KB thereby defining a maximum distance between the instruction 1304 // and the accessed constant. We satisfy this constraint by limiting the 1305 // distance between pools. 1306 static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval; 1307 1308 // Emission of the constant pool may be blocked in some code sequences. 1309 int const_pool_blocked_nesting_; // Block emission if this is not zero. 1310 int no_const_pool_before_; // Block emission before this pc offset. 1311 1312 // Keep track of the last emitted pool to guarantee a maximal distance 1313 int last_const_pool_end_; // pc offset following the last constant pool 1314 1315 // Relocation info generation 1316 // Each relocation is encoded as a variable size value 1317 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1318 RelocInfoWriter reloc_info_writer; 1319 // Relocation info records are also used during code generation as temporary 1320 // containers for constants and code target addresses until they are emitted 1321 // to the constant pool. These pending relocation info records are temporarily 1322 // stored in a separate buffer until a constant pool is emitted. 1323 // If every instruction in a long sequence is accessing the pool, we need one 1324 // pending relocation entry per instruction. 1325 static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize; 1326 RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info 1327 int num_prinfo_; // number of pending reloc info entries in the buffer 1328 1329 // The bound position, before this we cannot do instruction elimination. 1330 int last_bound_pos_; 1331 1332 // Code emission 1333 inline void CheckBuffer(); 1334 void GrowBuffer(); 1335 inline void emit(Instr x); 1336 1337 // Instruction generation 1338 void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); 1339 void addrmod2(Instr instr, Register rd, const MemOperand& x); 1340 void addrmod3(Instr instr, Register rd, const MemOperand& x); 1341 void addrmod4(Instr instr, Register rn, RegList rl); 1342 void addrmod5(Instr instr, CRegister crd, const MemOperand& x); 1343 1344 // Labels 1345 void print(Label* L); 1346 void bind_to(Label* L, int pos); 1347 void link_to(Label* L, Label* appendix); 1348 void next(Label* L); 1349 1350 // Record reloc info for current pc_ 1351 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 1352 1353 friend class RegExpMacroAssemblerARM; 1354 friend class RelocInfo; 1355 friend class CodePatcher; 1356 friend class BlockConstPoolScope; 1357 1358 PositionsRecorder positions_recorder_; 1359 bool allow_peephole_optimization_; 1360 bool emit_debug_code_; 1361 friend class PositionsRecorder; 1362 friend class EnsureSpace; 1363 }; 1364 1365 1366 class EnsureSpace BASE_EMBEDDED { 1367 public: 1368 explicit EnsureSpace(Assembler* assembler) { 1369 assembler->CheckBuffer(); 1370 } 1371 }; 1372 1373 1374 } } // namespace v8::internal 1375 1376 #endif // V8_ARM_ASSEMBLER_ARM_H_ 1377