1 /* 2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. 3 * Copyright (C) 2010 University of Szeged 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #ifndef MacroAssemblerARMv7_h 28 #define MacroAssemblerARMv7_h 29 30 #if ENABLE(ASSEMBLER) 31 32 #include "ARMv7Assembler.h" 33 #include "AbstractMacroAssembler.h" 34 35 namespace JSC { 36 37 class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { 38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7? 39 // - dTR is likely used more than aTR, and we'll get better instruction 40 // encoding if it's in the low 8 registers. 41 static const RegisterID dataTempRegister = ARMRegisters::ip; 42 static const RegisterID addressTempRegister = ARMRegisters::r3; 43 44 static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7; 45 inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); } 46 47 public: 48 typedef ARMv7Assembler::LinkRecord LinkRecord; 49 typedef ARMv7Assembler::JumpType JumpType; 50 typedef ARMv7Assembler::JumpLinkType JumpLinkType; 51 52 MacroAssemblerARMv7() 53 : m_inUninterruptedSequence(false) 54 { 55 } 56 57 void beginUninterruptedSequence() { m_inUninterruptedSequence = true; } 58 void endUninterruptedSequence() { m_inUninterruptedSequence = false; } 59 Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); } 60 void* unlinkedCode() { return m_assembler.unlinkedCode(); } 61 bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); } 62 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); } 63 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); } 64 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } 65 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); } 66 void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); } 67 68 struct ArmAddress { 69 enum AddressType { 70 HasOffset, 71 HasIndex, 72 } type; 73 RegisterID base; 74 union { 75 int32_t offset; 76 struct { 77 RegisterID index; 78 Scale scale; 79 }; 80 } u; 81 82 explicit ArmAddress(RegisterID base, int32_t offset = 0) 83 : type(HasOffset) 84 , base(base) 85 { 86 u.offset = offset; 87 } 88 89 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne) 90 : type(HasIndex) 91 , base(base) 92 { 93 u.index = index; 94 u.scale = scale; 95 } 96 }; 97 98 public: 99 typedef ARMRegisters::FPDoubleRegisterID FPRegisterID; 100 101 static const Scale ScalePtr = TimesFour; 102 103 enum Condition { 104 Equal = ARMv7Assembler::ConditionEQ, 105 NotEqual = ARMv7Assembler::ConditionNE, 106 Above = ARMv7Assembler::ConditionHI, 107 AboveOrEqual = ARMv7Assembler::ConditionHS, 108 Below = ARMv7Assembler::ConditionLO, 109 BelowOrEqual = ARMv7Assembler::ConditionLS, 110 GreaterThan = ARMv7Assembler::ConditionGT, 111 GreaterThanOrEqual = ARMv7Assembler::ConditionGE, 112 LessThan = ARMv7Assembler::ConditionLT, 113 LessThanOrEqual = ARMv7Assembler::ConditionLE, 114 Overflow = ARMv7Assembler::ConditionVS, 115 Signed = ARMv7Assembler::ConditionMI, 116 Zero = ARMv7Assembler::ConditionEQ, 117 NonZero = ARMv7Assembler::ConditionNE 118 }; 119 enum DoubleCondition { 120 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. 121 DoubleEqual = ARMv7Assembler::ConditionEQ, 122 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently. 123 DoubleGreaterThan = ARMv7Assembler::ConditionGT, 124 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE, 125 DoubleLessThan = ARMv7Assembler::ConditionLO, 126 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS, 127 // If either operand is NaN, these conditions always evaluate to true. 128 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently. 129 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE, 130 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI, 131 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS, 132 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT, 133 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE, 134 }; 135 136 static const RegisterID stackPointerRegister = ARMRegisters::sp; 137 static const RegisterID linkRegister = ARMRegisters::lr; 138 139 // Integer arithmetic operations: 140 // 141 // Operations are typically two operand - operation(source, srcDst) 142 // For many operations the source may be an TrustedImm32, the srcDst operand 143 // may often be a memory location (explictly described using an Address 144 // object). 145 146 void add32(RegisterID src, RegisterID dest) 147 { 148 m_assembler.add(dest, dest, src); 149 } 150 151 void add32(TrustedImm32 imm, RegisterID dest) 152 { 153 add32(imm, dest, dest); 154 } 155 156 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) 157 { 158 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 159 if (armImm.isValid()) 160 m_assembler.add(dest, src, armImm); 161 else { 162 move(imm, dataTempRegister); 163 m_assembler.add(dest, src, dataTempRegister); 164 } 165 } 166 167 void add32(TrustedImm32 imm, Address address) 168 { 169 load32(address, dataTempRegister); 170 171 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 172 if (armImm.isValid()) 173 m_assembler.add(dataTempRegister, dataTempRegister, armImm); 174 else { 175 // Hrrrm, since dataTempRegister holds the data loaded, 176 // use addressTempRegister to hold the immediate. 177 move(imm, addressTempRegister); 178 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); 179 } 180 181 store32(dataTempRegister, address); 182 } 183 184 void add32(Address src, RegisterID dest) 185 { 186 load32(src, dataTempRegister); 187 add32(dataTempRegister, dest); 188 } 189 190 void add32(TrustedImm32 imm, AbsoluteAddress address) 191 { 192 load32(address.m_ptr, dataTempRegister); 193 194 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 195 if (armImm.isValid()) 196 m_assembler.add(dataTempRegister, dataTempRegister, armImm); 197 else { 198 // Hrrrm, since dataTempRegister holds the data loaded, 199 // use addressTempRegister to hold the immediate. 200 move(imm, addressTempRegister); 201 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); 202 } 203 204 store32(dataTempRegister, address.m_ptr); 205 } 206 207 void and32(RegisterID src, RegisterID dest) 208 { 209 m_assembler.ARM_and(dest, dest, src); 210 } 211 212 void and32(TrustedImm32 imm, RegisterID dest) 213 { 214 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 215 if (armImm.isValid()) 216 m_assembler.ARM_and(dest, dest, armImm); 217 else { 218 move(imm, dataTempRegister); 219 m_assembler.ARM_and(dest, dest, dataTempRegister); 220 } 221 } 222 223 void countLeadingZeros32(RegisterID src, RegisterID dest) 224 { 225 m_assembler.clz(dest, src); 226 } 227 228 void lshift32(RegisterID shift_amount, RegisterID dest) 229 { 230 // Clamp the shift to the range 0..31 231 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); 232 ASSERT(armImm.isValid()); 233 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); 234 235 m_assembler.lsl(dest, dest, dataTempRegister); 236 } 237 238 void lshift32(TrustedImm32 imm, RegisterID dest) 239 { 240 m_assembler.lsl(dest, dest, imm.m_value & 0x1f); 241 } 242 243 void mul32(RegisterID src, RegisterID dest) 244 { 245 m_assembler.smull(dest, dataTempRegister, dest, src); 246 } 247 248 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) 249 { 250 move(imm, dataTempRegister); 251 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister); 252 } 253 254 void neg32(RegisterID srcDest) 255 { 256 m_assembler.neg(srcDest, srcDest); 257 } 258 259 void not32(RegisterID srcDest) 260 { 261 m_assembler.mvn(srcDest, srcDest); 262 } 263 264 void or32(RegisterID src, RegisterID dest) 265 { 266 m_assembler.orr(dest, dest, src); 267 } 268 269 void or32(TrustedImm32 imm, RegisterID dest) 270 { 271 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 272 if (armImm.isValid()) 273 m_assembler.orr(dest, dest, armImm); 274 else { 275 move(imm, dataTempRegister); 276 m_assembler.orr(dest, dest, dataTempRegister); 277 } 278 } 279 280 void rshift32(RegisterID shift_amount, RegisterID dest) 281 { 282 // Clamp the shift to the range 0..31 283 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); 284 ASSERT(armImm.isValid()); 285 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); 286 287 m_assembler.asr(dest, dest, dataTempRegister); 288 } 289 290 void rshift32(TrustedImm32 imm, RegisterID dest) 291 { 292 m_assembler.asr(dest, dest, imm.m_value & 0x1f); 293 } 294 295 void urshift32(RegisterID shift_amount, RegisterID dest) 296 { 297 // Clamp the shift to the range 0..31 298 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); 299 ASSERT(armImm.isValid()); 300 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm); 301 302 m_assembler.lsr(dest, dest, dataTempRegister); 303 } 304 305 void urshift32(TrustedImm32 imm, RegisterID dest) 306 { 307 m_assembler.lsr(dest, dest, imm.m_value & 0x1f); 308 } 309 310 void sub32(RegisterID src, RegisterID dest) 311 { 312 m_assembler.sub(dest, dest, src); 313 } 314 315 void sub32(TrustedImm32 imm, RegisterID dest) 316 { 317 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 318 if (armImm.isValid()) 319 m_assembler.sub(dest, dest, armImm); 320 else { 321 move(imm, dataTempRegister); 322 m_assembler.sub(dest, dest, dataTempRegister); 323 } 324 } 325 326 void sub32(TrustedImm32 imm, Address address) 327 { 328 load32(address, dataTempRegister); 329 330 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 331 if (armImm.isValid()) 332 m_assembler.sub(dataTempRegister, dataTempRegister, armImm); 333 else { 334 // Hrrrm, since dataTempRegister holds the data loaded, 335 // use addressTempRegister to hold the immediate. 336 move(imm, addressTempRegister); 337 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); 338 } 339 340 store32(dataTempRegister, address); 341 } 342 343 void sub32(Address src, RegisterID dest) 344 { 345 load32(src, dataTempRegister); 346 sub32(dataTempRegister, dest); 347 } 348 349 void sub32(TrustedImm32 imm, AbsoluteAddress address) 350 { 351 load32(address.m_ptr, dataTempRegister); 352 353 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); 354 if (armImm.isValid()) 355 m_assembler.sub(dataTempRegister, dataTempRegister, armImm); 356 else { 357 // Hrrrm, since dataTempRegister holds the data loaded, 358 // use addressTempRegister to hold the immediate. 359 move(imm, addressTempRegister); 360 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); 361 } 362 363 store32(dataTempRegister, address.m_ptr); 364 } 365 366 void xor32(RegisterID src, RegisterID dest) 367 { 368 m_assembler.eor(dest, dest, src); 369 } 370 371 void xor32(TrustedImm32 imm, RegisterID dest) 372 { 373 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 374 if (armImm.isValid()) 375 m_assembler.eor(dest, dest, armImm); 376 else { 377 move(imm, dataTempRegister); 378 m_assembler.eor(dest, dest, dataTempRegister); 379 } 380 } 381 382 383 // Memory access operations: 384 // 385 // Loads are of the form load(address, destination) and stores of the form 386 // store(source, address). The source for a store may be an TrustedImm32. Address 387 // operand objects to loads and store will be implicitly constructed if a 388 // register is passed. 389 390 private: 391 void load32(ArmAddress address, RegisterID dest) 392 { 393 if (address.type == ArmAddress::HasIndex) 394 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale); 395 else if (address.u.offset >= 0) { 396 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 397 ASSERT(armImm.isValid()); 398 m_assembler.ldr(dest, address.base, armImm); 399 } else { 400 ASSERT(address.u.offset >= -255); 401 m_assembler.ldr(dest, address.base, address.u.offset, true, false); 402 } 403 } 404 405 void load16(ArmAddress address, RegisterID dest) 406 { 407 if (address.type == ArmAddress::HasIndex) 408 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale); 409 else if (address.u.offset >= 0) { 410 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 411 ASSERT(armImm.isValid()); 412 m_assembler.ldrh(dest, address.base, armImm); 413 } else { 414 ASSERT(address.u.offset >= -255); 415 m_assembler.ldrh(dest, address.base, address.u.offset, true, false); 416 } 417 } 418 419 void load8(ArmAddress address, RegisterID dest) 420 { 421 if (address.type == ArmAddress::HasIndex) 422 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale); 423 else if (address.u.offset >= 0) { 424 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 425 ASSERT(armImm.isValid()); 426 m_assembler.ldrb(dest, address.base, armImm); 427 } else { 428 ASSERT(address.u.offset >= -255); 429 m_assembler.ldrb(dest, address.base, address.u.offset, true, false); 430 } 431 } 432 433 void store32(RegisterID src, ArmAddress address) 434 { 435 if (address.type == ArmAddress::HasIndex) 436 m_assembler.str(src, address.base, address.u.index, address.u.scale); 437 else if (address.u.offset >= 0) { 438 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); 439 ASSERT(armImm.isValid()); 440 m_assembler.str(src, address.base, armImm); 441 } else { 442 ASSERT(address.u.offset >= -255); 443 m_assembler.str(src, address.base, address.u.offset, true, false); 444 } 445 } 446 447 public: 448 void load32(ImplicitAddress address, RegisterID dest) 449 { 450 load32(setupArmAddress(address), dest); 451 } 452 453 void load32(BaseIndex address, RegisterID dest) 454 { 455 load32(setupArmAddress(address), dest); 456 } 457 458 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) 459 { 460 load32(setupArmAddress(address), dest); 461 } 462 463 void load32(const void* address, RegisterID dest) 464 { 465 move(TrustedImmPtr(address), addressTempRegister); 466 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 467 } 468 469 void load8(ImplicitAddress address, RegisterID dest) 470 { 471 load8(setupArmAddress(address), dest); 472 } 473 474 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) 475 { 476 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister); 477 load32(ArmAddress(address.base, dataTempRegister), dest); 478 return label; 479 } 480 481 void load16(BaseIndex address, RegisterID dest) 482 { 483 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale); 484 } 485 486 void load16(ImplicitAddress address, RegisterID dest) 487 { 488 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset); 489 if (armImm.isValid()) 490 m_assembler.ldrh(dest, address.base, armImm); 491 else { 492 move(TrustedImm32(address.offset), dataTempRegister); 493 m_assembler.ldrh(dest, address.base, dataTempRegister); 494 } 495 } 496 497 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) 498 { 499 DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister); 500 store32(src, ArmAddress(address.base, dataTempRegister)); 501 return label; 502 } 503 504 void store32(RegisterID src, ImplicitAddress address) 505 { 506 store32(src, setupArmAddress(address)); 507 } 508 509 void store32(RegisterID src, BaseIndex address) 510 { 511 store32(src, setupArmAddress(address)); 512 } 513 514 void store32(TrustedImm32 imm, ImplicitAddress address) 515 { 516 move(imm, dataTempRegister); 517 store32(dataTempRegister, setupArmAddress(address)); 518 } 519 520 void store32(RegisterID src, const void* address) 521 { 522 move(TrustedImmPtr(address), addressTempRegister); 523 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); 524 } 525 526 void store32(TrustedImm32 imm, const void* address) 527 { 528 move(imm, dataTempRegister); 529 store32(dataTempRegister, address); 530 } 531 532 533 // Floating-point operations: 534 535 bool supportsFloatingPoint() const { return true; } 536 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer. 537 // If a value is not representable as an integer, and possibly for some values that are, 538 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input) 539 // a branch will be taken. It is not clear whether this interface will be well suited to 540 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible 541 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a 542 // temporary solution while we work out what this interface should be. Either we need to 543 // decide to make this interface work on all platforms, rework the interface to make it more 544 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these 545 // operations, and make clients go directly to the m_assembler to plant truncation instructions. 546 // In short, FIXME:. 547 bool supportsFloatingPointTruncate() const { return false; } 548 549 bool supportsFloatingPointSqrt() const 550 { 551 return false; 552 } 553 554 void loadDouble(ImplicitAddress address, FPRegisterID dest) 555 { 556 RegisterID base = address.base; 557 int32_t offset = address.offset; 558 559 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 560 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { 561 add32(TrustedImm32(offset), base, addressTempRegister); 562 base = addressTempRegister; 563 offset = 0; 564 } 565 566 m_assembler.vldr(dest, base, offset); 567 } 568 569 void loadDouble(const void* address, FPRegisterID dest) 570 { 571 move(TrustedImmPtr(address), addressTempRegister); 572 m_assembler.vldr(dest, addressTempRegister, 0); 573 } 574 575 void storeDouble(FPRegisterID src, ImplicitAddress address) 576 { 577 RegisterID base = address.base; 578 int32_t offset = address.offset; 579 580 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 581 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { 582 add32(TrustedImm32(offset), base, addressTempRegister); 583 base = addressTempRegister; 584 offset = 0; 585 } 586 587 m_assembler.vstr(src, base, offset); 588 } 589 590 void addDouble(FPRegisterID src, FPRegisterID dest) 591 { 592 m_assembler.vadd_F64(dest, dest, src); 593 } 594 595 void addDouble(Address src, FPRegisterID dest) 596 { 597 loadDouble(src, fpTempRegister); 598 addDouble(fpTempRegister, dest); 599 } 600 601 void divDouble(FPRegisterID src, FPRegisterID dest) 602 { 603 m_assembler.vdiv_F64(dest, dest, src); 604 } 605 606 void subDouble(FPRegisterID src, FPRegisterID dest) 607 { 608 m_assembler.vsub_F64(dest, dest, src); 609 } 610 611 void subDouble(Address src, FPRegisterID dest) 612 { 613 loadDouble(src, fpTempRegister); 614 subDouble(fpTempRegister, dest); 615 } 616 617 void mulDouble(FPRegisterID src, FPRegisterID dest) 618 { 619 m_assembler.vmul_F64(dest, dest, src); 620 } 621 622 void mulDouble(Address src, FPRegisterID dest) 623 { 624 loadDouble(src, fpTempRegister); 625 mulDouble(fpTempRegister, dest); 626 } 627 628 void sqrtDouble(FPRegisterID, FPRegisterID) 629 { 630 ASSERT_NOT_REACHED(); 631 } 632 633 void convertInt32ToDouble(RegisterID src, FPRegisterID dest) 634 { 635 m_assembler.vmov(fpTempRegisterAsSingle(), src); 636 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle()); 637 } 638 639 void convertInt32ToDouble(Address address, FPRegisterID dest) 640 { 641 // Fixme: load directly into the fpr! 642 load32(address, dataTempRegister); 643 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister); 644 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle()); 645 } 646 647 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest) 648 { 649 // Fixme: load directly into the fpr! 650 load32(address.m_ptr, dataTempRegister); 651 m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister); 652 m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle()); 653 } 654 655 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) 656 { 657 m_assembler.vcmp_F64(left, right); 658 m_assembler.vmrs(); 659 660 if (cond == DoubleNotEqual) { 661 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. 662 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 663 Jump result = makeBranch(ARMv7Assembler::ConditionNE); 664 unordered.link(this); 665 return result; 666 } 667 if (cond == DoubleEqualOrUnordered) { 668 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 669 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); 670 unordered.link(this); 671 // We get here if either unordered or equal. 672 Jump result = makeJump(); 673 notEqual.link(this); 674 return result; 675 } 676 return makeBranch(cond); 677 } 678 679 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID) 680 { 681 ASSERT_NOT_REACHED(); 682 return jump(); 683 } 684 685 // Convert 'src' to an integer, and places the resulting 'dest'. 686 // If the result is not representable as a 32 bit value, branch. 687 // May also branch for some values that are representable in 32 bits 688 // (specifically, in this case, 0). 689 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID) 690 { 691 m_assembler.vcvtr_S32_F64(fpTempRegisterAsSingle(), src); 692 m_assembler.vmov(dest, fpTempRegisterAsSingle()); 693 694 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. 695 m_assembler.vcvt_F64_S32(fpTempRegister, fpTempRegisterAsSingle()); 696 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); 697 698 // If the result is zero, it might have been -0.0, and the double comparison won't catch this! 699 failureCases.append(branchTest32(Zero, dest)); 700 } 701 702 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) 703 { 704 m_assembler.vcmpz_F64(reg); 705 m_assembler.vmrs(); 706 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 707 Jump result = makeBranch(ARMv7Assembler::ConditionNE); 708 unordered.link(this); 709 return result; 710 } 711 712 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID) 713 { 714 m_assembler.vcmpz_F64(reg); 715 m_assembler.vmrs(); 716 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); 717 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); 718 unordered.link(this); 719 // We get here if either unordered or equal. 720 Jump result = makeJump(); 721 notEqual.link(this); 722 return result; 723 } 724 725 // Stack manipulation operations: 726 // 727 // The ABI is assumed to provide a stack abstraction to memory, 728 // containing machine word sized units of data. Push and pop 729 // operations add and remove a single register sized unit of data 730 // to or from the stack. Peek and poke operations read or write 731 // values on the stack, without moving the current stack position. 732 733 void pop(RegisterID dest) 734 { 735 // store postindexed with writeback 736 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); 737 } 738 739 void push(RegisterID src) 740 { 741 // store preindexed with writeback 742 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true); 743 } 744 745 void push(Address address) 746 { 747 load32(address, dataTempRegister); 748 push(dataTempRegister); 749 } 750 751 void push(TrustedImm32 imm) 752 { 753 move(imm, dataTempRegister); 754 push(dataTempRegister); 755 } 756 757 // Register move operations: 758 // 759 // Move values in registers. 760 761 void move(TrustedImm32 imm, RegisterID dest) 762 { 763 uint32_t value = imm.m_value; 764 765 if (imm.m_isPointer) 766 moveFixedWidthEncoding(imm, dest); 767 else { 768 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value); 769 770 if (armImm.isValid()) 771 m_assembler.mov(dest, armImm); 772 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid()) 773 m_assembler.mvn(dest, armImm); 774 else { 775 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value)); 776 if (value & 0xffff0000) 777 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16)); 778 } 779 } 780 } 781 782 void move(RegisterID src, RegisterID dest) 783 { 784 m_assembler.mov(dest, src); 785 } 786 787 void move(TrustedImmPtr imm, RegisterID dest) 788 { 789 move(TrustedImm32(imm), dest); 790 } 791 792 void swap(RegisterID reg1, RegisterID reg2) 793 { 794 move(reg1, dataTempRegister); 795 move(reg2, reg1); 796 move(dataTempRegister, reg2); 797 } 798 799 void signExtend32ToPtr(RegisterID src, RegisterID dest) 800 { 801 if (src != dest) 802 move(src, dest); 803 } 804 805 void zeroExtend32ToPtr(RegisterID src, RegisterID dest) 806 { 807 if (src != dest) 808 move(src, dest); 809 } 810 811 812 // Forwards / external control flow operations: 813 // 814 // This set of jump and conditional branch operations return a Jump 815 // object which may linked at a later point, allow forwards jump, 816 // or jumps that will require external linkage (after the code has been 817 // relocated). 818 // 819 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge 820 // respecitvely, for unsigned comparisons the names b, a, be, and ae are 821 // used (representing the names 'below' and 'above'). 822 // 823 // Operands to the comparision are provided in the expected order, e.g. 824 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when 825 // treated as a signed 32bit value, is less than or equal to 5. 826 // 827 // jz and jnz test whether the first operand is equal to zero, and take 828 // an optional second operand of a mask under which to perform the test. 829 private: 830 831 // Should we be using TEQ for equal/not-equal? 832 void compare32(RegisterID left, TrustedImm32 right) 833 { 834 int32_t imm = right.m_value; 835 if (!imm) 836 m_assembler.tst(left, left); 837 else { 838 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); 839 if (armImm.isValid()) 840 m_assembler.cmp(left, armImm); 841 else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) 842 m_assembler.cmn(left, armImm); 843 else { 844 move(TrustedImm32(imm), dataTempRegister); 845 m_assembler.cmp(left, dataTempRegister); 846 } 847 } 848 } 849 850 void test32(RegisterID reg, TrustedImm32 mask) 851 { 852 int32_t imm = mask.m_value; 853 854 if (imm == -1) 855 m_assembler.tst(reg, reg); 856 else { 857 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); 858 if (armImm.isValid()) 859 m_assembler.tst(reg, armImm); 860 else { 861 move(mask, dataTempRegister); 862 m_assembler.tst(reg, dataTempRegister); 863 } 864 } 865 } 866 867 public: 868 Jump branch32(Condition cond, RegisterID left, RegisterID right) 869 { 870 m_assembler.cmp(left, right); 871 return Jump(makeBranch(cond)); 872 } 873 874 Jump branch32(Condition cond, RegisterID left, TrustedImm32 right) 875 { 876 compare32(left, right); 877 return Jump(makeBranch(cond)); 878 } 879 880 Jump branch32(Condition cond, RegisterID left, Address right) 881 { 882 load32(right, dataTempRegister); 883 return branch32(cond, left, dataTempRegister); 884 } 885 886 Jump branch32(Condition cond, Address left, RegisterID right) 887 { 888 load32(left, dataTempRegister); 889 return branch32(cond, dataTempRegister, right); 890 } 891 892 Jump branch32(Condition cond, Address left, TrustedImm32 right) 893 { 894 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 895 load32(left, addressTempRegister); 896 return branch32(cond, addressTempRegister, right); 897 } 898 899 Jump branch32(Condition cond, BaseIndex left, TrustedImm32 right) 900 { 901 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 902 load32(left, addressTempRegister); 903 return branch32(cond, addressTempRegister, right); 904 } 905 906 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, TrustedImm32 right) 907 { 908 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 909 load32WithUnalignedHalfWords(left, addressTempRegister); 910 return branch32(cond, addressTempRegister, right); 911 } 912 913 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) 914 { 915 load32(left.m_ptr, dataTempRegister); 916 return branch32(cond, dataTempRegister, right); 917 } 918 919 Jump branch32(Condition cond, AbsoluteAddress left, TrustedImm32 right) 920 { 921 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 922 load32(left.m_ptr, addressTempRegister); 923 return branch32(cond, addressTempRegister, right); 924 } 925 926 Jump branch16(Condition cond, BaseIndex left, RegisterID right) 927 { 928 load16(left, dataTempRegister); 929 m_assembler.lsl(addressTempRegister, right, 16); 930 m_assembler.lsl(dataTempRegister, dataTempRegister, 16); 931 return branch32(cond, dataTempRegister, addressTempRegister); 932 } 933 934 Jump branch16(Condition cond, BaseIndex left, TrustedImm32 right) 935 { 936 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ 937 load16(left, addressTempRegister); 938 m_assembler.lsl(addressTempRegister, addressTempRegister, 16); 939 return branch32(cond, addressTempRegister, TrustedImm32(right.m_value << 16)); 940 } 941 942 Jump branch8(Condition cond, RegisterID left, TrustedImm32 right) 943 { 944 compare32(left, right); 945 return Jump(makeBranch(cond)); 946 } 947 948 Jump branch8(Condition cond, Address left, TrustedImm32 right) 949 { 950 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/ 951 load8(left, addressTempRegister); 952 return branch8(cond, addressTempRegister, right); 953 } 954 955 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) 956 { 957 ASSERT((cond == Zero) || (cond == NonZero)); 958 m_assembler.tst(reg, mask); 959 return Jump(makeBranch(cond)); 960 } 961 962 Jump branchTest32(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) 963 { 964 ASSERT((cond == Zero) || (cond == NonZero)); 965 test32(reg, mask); 966 return Jump(makeBranch(cond)); 967 } 968 969 Jump branchTest32(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) 970 { 971 ASSERT((cond == Zero) || (cond == NonZero)); 972 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ 973 load32(address, addressTempRegister); 974 return branchTest32(cond, addressTempRegister, mask); 975 } 976 977 Jump branchTest32(Condition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) 978 { 979 ASSERT((cond == Zero) || (cond == NonZero)); 980 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ 981 load32(address, addressTempRegister); 982 return branchTest32(cond, addressTempRegister, mask); 983 } 984 985 Jump branchTest8(Condition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) 986 { 987 ASSERT((cond == Zero) || (cond == NonZero)); 988 test32(reg, mask); 989 return Jump(makeBranch(cond)); 990 } 991 992 Jump branchTest8(Condition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) 993 { 994 ASSERT((cond == Zero) || (cond == NonZero)); 995 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ 996 load8(address, addressTempRegister); 997 return branchTest8(cond, addressTempRegister, mask); 998 } 999 1000 Jump jump() 1001 { 1002 return Jump(makeJump()); 1003 } 1004 1005 void jump(RegisterID target) 1006 { 1007 m_assembler.bx(target, ARMv7Assembler::JumpFixed); 1008 } 1009 1010 // Address is a memory location containing the address to jump to 1011 void jump(Address address) 1012 { 1013 load32(address, dataTempRegister); 1014 m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed); 1015 } 1016 1017 1018 // Arithmetic control flow operations: 1019 // 1020 // This set of conditional branch operations branch based 1021 // on the result of an arithmetic operation. The operation 1022 // is performed as normal, storing the result. 1023 // 1024 // * jz operations branch if the result is zero. 1025 // * jo operations branch if the (signed) arithmetic 1026 // operation caused an overflow to occur. 1027 1028 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest) 1029 { 1030 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); 1031 m_assembler.add_S(dest, dest, src); 1032 return Jump(makeBranch(cond)); 1033 } 1034 1035 Jump branchAdd32(Condition cond, TrustedImm32 imm, RegisterID dest) 1036 { 1037 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); 1038 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 1039 if (armImm.isValid()) 1040 m_assembler.add_S(dest, dest, armImm); 1041 else { 1042 move(imm, dataTempRegister); 1043 m_assembler.add_S(dest, dest, dataTempRegister); 1044 } 1045 return Jump(makeBranch(cond)); 1046 } 1047 1048 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) 1049 { 1050 ASSERT_UNUSED(cond, cond == Overflow); 1051 m_assembler.smull(dest, dataTempRegister, dest, src); 1052 m_assembler.asr(addressTempRegister, dest, 31); 1053 return branch32(NotEqual, addressTempRegister, dataTempRegister); 1054 } 1055 1056 Jump branchMul32(Condition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) 1057 { 1058 ASSERT_UNUSED(cond, cond == Overflow); 1059 move(imm, dataTempRegister); 1060 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister); 1061 m_assembler.asr(addressTempRegister, dest, 31); 1062 return branch32(NotEqual, addressTempRegister, dataTempRegister); 1063 } 1064 1065 Jump branchOr32(Condition cond, RegisterID src, RegisterID dest) 1066 { 1067 ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero)); 1068 m_assembler.orr_S(dest, dest, src); 1069 return Jump(makeBranch(cond)); 1070 } 1071 1072 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) 1073 { 1074 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); 1075 m_assembler.sub_S(dest, dest, src); 1076 return Jump(makeBranch(cond)); 1077 } 1078 1079 Jump branchSub32(Condition cond, TrustedImm32 imm, RegisterID dest) 1080 { 1081 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); 1082 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); 1083 if (armImm.isValid()) 1084 m_assembler.sub_S(dest, dest, armImm); 1085 else { 1086 move(imm, dataTempRegister); 1087 m_assembler.sub_S(dest, dest, dataTempRegister); 1088 } 1089 return Jump(makeBranch(cond)); 1090 } 1091 1092 void relativeTableJump(RegisterID index, int scale) 1093 { 1094 ASSERT(scale >= 0 && scale <= 31); 1095 1096 // dataTempRegister will point after the jump if index register contains zero 1097 move(ARMRegisters::pc, dataTempRegister); 1098 m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9)); 1099 1100 ShiftTypeAndAmount shift(SRType_LSL, scale); 1101 m_assembler.add(dataTempRegister, dataTempRegister, index, shift); 1102 jump(dataTempRegister); 1103 } 1104 1105 // Miscellaneous operations: 1106 1107 void breakpoint() 1108 { 1109 m_assembler.bkpt(0); 1110 } 1111 1112 Call nearCall() 1113 { 1114 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1115 return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::LinkableNear); 1116 } 1117 1118 Call call() 1119 { 1120 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1121 return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable); 1122 } 1123 1124 Call call(RegisterID target) 1125 { 1126 return Call(m_assembler.blx(target, ARMv7Assembler::JumpFixed), Call::None); 1127 } 1128 1129 Call call(Address address) 1130 { 1131 load32(address, dataTempRegister); 1132 return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::None); 1133 } 1134 1135 void ret() 1136 { 1137 m_assembler.bx(linkRegister, ARMv7Assembler::JumpFixed); 1138 } 1139 1140 void set32Compare32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) 1141 { 1142 m_assembler.cmp(left, right); 1143 m_assembler.it(armV7Condition(cond), false); 1144 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1145 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1146 } 1147 1148 void set32Compare32(Condition cond, Address left, RegisterID right, RegisterID dest) 1149 { 1150 load32(left, dataTempRegister); 1151 set32Compare32(cond, dataTempRegister, right, dest); 1152 } 1153 1154 void set32Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest) 1155 { 1156 compare32(left, right); 1157 m_assembler.it(armV7Condition(cond), false); 1158 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1159 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1160 } 1161 1162 void set8Compare32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) 1163 { 1164 set32Compare32(cond, left, right, dest); 1165 } 1166 1167 void set8Compare32(Condition cond, Address left, RegisterID right, RegisterID dest) 1168 { 1169 set32Compare32(cond, left, right, dest); 1170 } 1171 1172 void set8Compare32(Condition cond, RegisterID left, TrustedImm32 right, RegisterID dest) 1173 { 1174 set32Compare32(cond, left, right, dest); 1175 } 1176 1177 // FIXME: 1178 // The mask should be optional... paerhaps the argument order should be 1179 // dest-src, operations always have a dest? ... possibly not true, considering 1180 // asm ops like test, or pseudo ops like pop(). 1181 void set32Test32(Condition cond, Address address, TrustedImm32 mask, RegisterID dest) 1182 { 1183 load32(address, dataTempRegister); 1184 test32(dataTempRegister, mask); 1185 m_assembler.it(armV7Condition(cond), false); 1186 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1187 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1188 } 1189 1190 void set32Test8(Condition cond, Address address, TrustedImm32 mask, RegisterID dest) 1191 { 1192 load8(address, dataTempRegister); 1193 test32(dataTempRegister, mask); 1194 m_assembler.it(armV7Condition(cond), false); 1195 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); 1196 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); 1197 } 1198 1199 DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst) 1200 { 1201 moveFixedWidthEncoding(imm, dst); 1202 return DataLabel32(this); 1203 } 1204 1205 DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst) 1206 { 1207 moveFixedWidthEncoding(TrustedImm32(imm), dst); 1208 return DataLabelPtr(this); 1209 } 1210 1211 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) 1212 { 1213 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); 1214 return branch32(cond, left, dataTempRegister); 1215 } 1216 1217 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) 1218 { 1219 load32(left, addressTempRegister); 1220 dataLabel = moveWithPatch(initialRightValue, dataTempRegister); 1221 return branch32(cond, addressTempRegister, dataTempRegister); 1222 } 1223 1224 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) 1225 { 1226 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister); 1227 store32(dataTempRegister, address); 1228 return label; 1229 } 1230 DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); } 1231 1232 1233 Call tailRecursiveCall() 1234 { 1235 // Like a normal call, but don't link. 1236 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1237 return Call(m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFixed), Call::Linkable); 1238 } 1239 1240 Call makeTailRecursiveCall(Jump oldJump) 1241 { 1242 oldJump.link(this); 1243 return tailRecursiveCall(); 1244 } 1245 1246 1247 int executableOffsetFor(int location) 1248 { 1249 return m_assembler.executableOffsetFor(location); 1250 } 1251 1252 protected: 1253 bool inUninterruptedSequence() 1254 { 1255 return m_inUninterruptedSequence; 1256 } 1257 1258 ARMv7Assembler::JmpSrc makeJump() 1259 { 1260 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1261 return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition); 1262 } 1263 1264 ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond) 1265 { 1266 m_assembler.it(cond, true, true); 1267 moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); 1268 return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond); 1269 } 1270 ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); } 1271 ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); } 1272 1273 ArmAddress setupArmAddress(BaseIndex address) 1274 { 1275 if (address.offset) { 1276 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); 1277 if (imm.isValid()) 1278 m_assembler.add(addressTempRegister, address.base, imm); 1279 else { 1280 move(TrustedImm32(address.offset), addressTempRegister); 1281 m_assembler.add(addressTempRegister, addressTempRegister, address.base); 1282 } 1283 1284 return ArmAddress(addressTempRegister, address.index, address.scale); 1285 } else 1286 return ArmAddress(address.base, address.index, address.scale); 1287 } 1288 1289 ArmAddress setupArmAddress(Address address) 1290 { 1291 if ((address.offset >= -0xff) && (address.offset <= 0xfff)) 1292 return ArmAddress(address.base, address.offset); 1293 1294 move(TrustedImm32(address.offset), addressTempRegister); 1295 return ArmAddress(address.base, addressTempRegister); 1296 } 1297 1298 ArmAddress setupArmAddress(ImplicitAddress address) 1299 { 1300 if ((address.offset >= -0xff) && (address.offset <= 0xfff)) 1301 return ArmAddress(address.base, address.offset); 1302 1303 move(TrustedImm32(address.offset), addressTempRegister); 1304 return ArmAddress(address.base, addressTempRegister); 1305 } 1306 1307 RegisterID makeBaseIndexBase(BaseIndex address) 1308 { 1309 if (!address.offset) 1310 return address.base; 1311 1312 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); 1313 if (imm.isValid()) 1314 m_assembler.add(addressTempRegister, address.base, imm); 1315 else { 1316 move(TrustedImm32(address.offset), addressTempRegister); 1317 m_assembler.add(addressTempRegister, addressTempRegister, address.base); 1318 } 1319 1320 return addressTempRegister; 1321 } 1322 1323 void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst) 1324 { 1325 uint32_t value = imm.m_value; 1326 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff)); 1327 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16)); 1328 } 1329 1330 ARMv7Assembler::Condition armV7Condition(Condition cond) 1331 { 1332 return static_cast<ARMv7Assembler::Condition>(cond); 1333 } 1334 1335 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond) 1336 { 1337 return static_cast<ARMv7Assembler::Condition>(cond); 1338 } 1339 1340 private: 1341 friend class LinkBuffer; 1342 friend class RepatchBuffer; 1343 1344 static void linkCall(void* code, Call call, FunctionPtr function) 1345 { 1346 ARMv7Assembler::linkCall(code, call.m_jmp, function.value()); 1347 } 1348 1349 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) 1350 { 1351 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 1352 } 1353 1354 static void repatchCall(CodeLocationCall call, FunctionPtr destination) 1355 { 1356 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 1357 } 1358 1359 bool m_inUninterruptedSequence; 1360 }; 1361 1362 } // namespace JSC 1363 1364 #endif // ENABLE(ASSEMBLER) 1365 1366 #endif // MacroAssemblerARMv7_h 1367