1 // Copyright 2013 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_ 6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_ 7 8 #include <vector> 9 10 #include "src/arm64/assembler-arm64.h" 11 #include "src/bailout-reason.h" 12 #include "src/base/bits.h" 13 #include "src/globals.h" 14 15 // Simulator specific helpers. 16 #if USE_SIMULATOR 17 // TODO(all): If possible automatically prepend an indicator like 18 // UNIMPLEMENTED or LOCATION. 19 #define ASM_UNIMPLEMENTED(message) \ 20 __ Debug(message, __LINE__, NO_PARAM) 21 #define ASM_UNIMPLEMENTED_BREAK(message) \ 22 __ Debug(message, __LINE__, \ 23 FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK) 24 #if DEBUG 25 #define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM) 26 #else 27 #define ASM_LOCATION(message) 28 #endif 29 #else 30 #define ASM_UNIMPLEMENTED(message) 31 #define ASM_UNIMPLEMENTED_BREAK(message) 32 #define ASM_LOCATION(message) 33 #endif 34 35 36 namespace v8 { 37 namespace internal { 38 39 // Give alias names to registers for calling conventions. 40 #define kReturnRegister0 x0 41 #define kReturnRegister1 x1 42 #define kReturnRegister2 x2 43 #define kJSFunctionRegister x1 44 #define kContextRegister cp 45 #define kAllocateSizeRegister x1 46 #define kInterpreterAccumulatorRegister x0 47 #define kInterpreterBytecodeOffsetRegister x19 48 #define kInterpreterBytecodeArrayRegister x20 49 #define kInterpreterDispatchTableRegister x21 50 #define kJavaScriptCallArgCountRegister x0 51 #define kJavaScriptCallNewTargetRegister x3 52 #define kRuntimeCallFunctionRegister x1 53 #define kRuntimeCallArgCountRegister x0 54 55 #define LS_MACRO_LIST(V) \ 56 V(Ldrb, Register&, rt, LDRB_w) \ 57 V(Strb, Register&, rt, STRB_w) \ 58 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ 59 V(Ldrh, Register&, rt, LDRH_w) \ 60 V(Strh, Register&, rt, STRH_w) \ 61 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ 62 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ 63 V(Str, CPURegister&, rt, StoreOpFor(rt)) \ 64 V(Ldrsw, Register&, rt, LDRSW_x) 65 66 #define LSPAIR_MACRO_LIST(V) \ 67 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ 68 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ 69 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) 70 71 #define LDA_STL_MACRO_LIST(V) \ 72 V(Ldarb, ldarb) \ 73 V(Ldarh, ldarh) \ 74 V(Ldar, ldar) \ 75 V(Ldaxrb, ldaxrb) \ 76 V(Ldaxrh, ldaxrh) \ 77 V(Ldaxr, ldaxr) \ 78 V(Stlrb, stlrb) \ 79 V(Stlrh, stlrh) \ 80 V(Stlr, stlr) 81 82 #define STLX_MACRO_LIST(V) \ 83 V(Stlxrb, stlxrb) \ 84 V(Stlxrh, stlxrh) \ 85 V(Stlxr, stlxr) 86 87 // ---------------------------------------------------------------------------- 88 // Static helper functions 89 90 // Generate a MemOperand for loading a field from an object. 91 inline MemOperand FieldMemOperand(Register object, int offset); 92 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset); 93 94 // Generate a MemOperand for loading a SMI from memory. 95 inline MemOperand UntagSmiMemOperand(Register object, int offset); 96 97 98 // ---------------------------------------------------------------------------- 99 // MacroAssembler 100 101 enum BranchType { 102 // Copies of architectural conditions. 103 // The associated conditions can be used in place of those, the code will 104 // take care of reinterpreting them with the correct type. 105 integer_eq = eq, 106 integer_ne = ne, 107 integer_hs = hs, 108 integer_lo = lo, 109 integer_mi = mi, 110 integer_pl = pl, 111 integer_vs = vs, 112 integer_vc = vc, 113 integer_hi = hi, 114 integer_ls = ls, 115 integer_ge = ge, 116 integer_lt = lt, 117 integer_gt = gt, 118 integer_le = le, 119 integer_al = al, 120 integer_nv = nv, 121 122 // These two are *different* from the architectural codes al and nv. 123 // 'always' is used to generate unconditional branches. 124 // 'never' is used to not generate a branch (generally as the inverse 125 // branch type of 'always). 126 always, never, 127 // cbz and cbnz 128 reg_zero, reg_not_zero, 129 // tbz and tbnz 130 reg_bit_clear, reg_bit_set, 131 132 // Aliases. 133 kBranchTypeFirstCondition = eq, 134 kBranchTypeLastCondition = nv, 135 kBranchTypeFirstUsingReg = reg_zero, 136 kBranchTypeFirstUsingBit = reg_bit_clear 137 }; 138 139 inline BranchType InvertBranchType(BranchType type) { 140 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { 141 return static_cast<BranchType>( 142 NegateCondition(static_cast<Condition>(type))); 143 } else { 144 return static_cast<BranchType>(type ^ 1); 145 } 146 } 147 148 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 149 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 150 enum PointersToHereCheck { 151 kPointersToHereMaybeInteresting, 152 kPointersToHereAreAlwaysInteresting 153 }; 154 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 155 enum TargetAddressStorageMode { 156 CAN_INLINE_TARGET_ADDRESS, 157 NEVER_INLINE_TARGET_ADDRESS 158 }; 159 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag }; 160 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles }; 161 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong }; 162 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; 163 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 }; 164 165 class MacroAssembler : public Assembler { 166 public: 167 MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size, 168 CodeObjectRequired create_code_object); 169 170 inline Handle<Object> CodeObject(); 171 172 // Instruction set functions ------------------------------------------------ 173 // Logical macros. 174 inline void And(const Register& rd, 175 const Register& rn, 176 const Operand& operand); 177 inline void Ands(const Register& rd, 178 const Register& rn, 179 const Operand& operand); 180 inline void Bic(const Register& rd, 181 const Register& rn, 182 const Operand& operand); 183 inline void Bics(const Register& rd, 184 const Register& rn, 185 const Operand& operand); 186 inline void Orr(const Register& rd, 187 const Register& rn, 188 const Operand& operand); 189 inline void Orn(const Register& rd, 190 const Register& rn, 191 const Operand& operand); 192 inline void Eor(const Register& rd, 193 const Register& rn, 194 const Operand& operand); 195 inline void Eon(const Register& rd, 196 const Register& rn, 197 const Operand& operand); 198 inline void Tst(const Register& rn, const Operand& operand); 199 void LogicalMacro(const Register& rd, 200 const Register& rn, 201 const Operand& operand, 202 LogicalOp op); 203 204 // Add and sub macros. 205 inline void Add(const Register& rd, 206 const Register& rn, 207 const Operand& operand); 208 inline void Adds(const Register& rd, 209 const Register& rn, 210 const Operand& operand); 211 inline void Sub(const Register& rd, 212 const Register& rn, 213 const Operand& operand); 214 inline void Subs(const Register& rd, 215 const Register& rn, 216 const Operand& operand); 217 inline void Cmn(const Register& rn, const Operand& operand); 218 inline void Cmp(const Register& rn, const Operand& operand); 219 inline void Neg(const Register& rd, 220 const Operand& operand); 221 inline void Negs(const Register& rd, 222 const Operand& operand); 223 224 void AddSubMacro(const Register& rd, 225 const Register& rn, 226 const Operand& operand, 227 FlagsUpdate S, 228 AddSubOp op); 229 230 // Add/sub with carry macros. 231 inline void Adc(const Register& rd, 232 const Register& rn, 233 const Operand& operand); 234 inline void Adcs(const Register& rd, 235 const Register& rn, 236 const Operand& operand); 237 inline void Sbc(const Register& rd, 238 const Register& rn, 239 const Operand& operand); 240 inline void Sbcs(const Register& rd, 241 const Register& rn, 242 const Operand& operand); 243 inline void Ngc(const Register& rd, 244 const Operand& operand); 245 inline void Ngcs(const Register& rd, 246 const Operand& operand); 247 void AddSubWithCarryMacro(const Register& rd, 248 const Register& rn, 249 const Operand& operand, 250 FlagsUpdate S, 251 AddSubWithCarryOp op); 252 253 // Move macros. 254 void Mov(const Register& rd, 255 const Operand& operand, 256 DiscardMoveMode discard_mode = kDontDiscardForSameWReg); 257 void Mov(const Register& rd, uint64_t imm); 258 inline void Mvn(const Register& rd, uint64_t imm); 259 void Mvn(const Register& rd, const Operand& operand); 260 static bool IsImmMovn(uint64_t imm, unsigned reg_size); 261 static bool IsImmMovz(uint64_t imm, unsigned reg_size); 262 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); 263 264 // Try to move an immediate into the destination register in a single 265 // instruction. Returns true for success, and updates the contents of dst. 266 // Returns false, otherwise. 267 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); 268 269 // Move an immediate into register dst, and return an Operand object for use 270 // with a subsequent instruction that accepts a shift. The value moved into 271 // dst is not necessarily equal to imm; it may have had a shifting operation 272 // applied to it that will be subsequently undone by the shift applied in the 273 // Operand. 274 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm); 275 276 // Conditional macros. 277 inline void Ccmp(const Register& rn, 278 const Operand& operand, 279 StatusFlags nzcv, 280 Condition cond); 281 inline void Ccmn(const Register& rn, 282 const Operand& operand, 283 StatusFlags nzcv, 284 Condition cond); 285 void ConditionalCompareMacro(const Register& rn, 286 const Operand& operand, 287 StatusFlags nzcv, 288 Condition cond, 289 ConditionalCompareOp op); 290 void Csel(const Register& rd, 291 const Register& rn, 292 const Operand& operand, 293 Condition cond); 294 295 // Load/store macros. 296 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ 297 inline void FN(const REGTYPE REG, const MemOperand& addr); 298 LS_MACRO_LIST(DECLARE_FUNCTION) 299 #undef DECLARE_FUNCTION 300 301 void LoadStoreMacro(const CPURegister& rt, 302 const MemOperand& addr, 303 LoadStoreOp op); 304 305 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ 306 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); 307 LSPAIR_MACRO_LIST(DECLARE_FUNCTION) 308 #undef DECLARE_FUNCTION 309 310 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, 311 const MemOperand& addr, LoadStorePairOp op); 312 313 // Load-acquire/store-release macros. 314 #define DECLARE_FUNCTION(FN, OP) \ 315 inline void FN(const Register& rt, const Register& rn); 316 LDA_STL_MACRO_LIST(DECLARE_FUNCTION) 317 #undef DECLARE_FUNCTION 318 319 #define DECLARE_FUNCTION(FN, OP) \ 320 inline void FN(const Register& rs, const Register& rt, const Register& rn); 321 STLX_MACRO_LIST(DECLARE_FUNCTION) 322 #undef DECLARE_FUNCTION 323 324 // V8-specific load/store helpers. 325 void Load(const Register& rt, const MemOperand& addr, Representation r); 326 void Store(const Register& rt, const MemOperand& addr, Representation r); 327 328 enum AdrHint { 329 // The target must be within the immediate range of adr. 330 kAdrNear, 331 // The target may be outside of the immediate range of adr. Additional 332 // instructions may be emitted. 333 kAdrFar 334 }; 335 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear); 336 337 // Remaining instructions are simple pass-through calls to the assembler. 338 inline void Asr(const Register& rd, const Register& rn, unsigned shift); 339 inline void Asr(const Register& rd, const Register& rn, const Register& rm); 340 341 // Branch type inversion relies on these relations. 342 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && 343 (reg_bit_clear == (reg_bit_set ^ 1)) && 344 (always == (never ^ 1))); 345 346 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); 347 348 inline void B(Label* label); 349 inline void B(Condition cond, Label* label); 350 void B(Label* label, Condition cond); 351 inline void Bfi(const Register& rd, 352 const Register& rn, 353 unsigned lsb, 354 unsigned width); 355 inline void Bfxil(const Register& rd, 356 const Register& rn, 357 unsigned lsb, 358 unsigned width); 359 inline void Bind(Label* label); 360 inline void Bl(Label* label); 361 inline void Blr(const Register& xn); 362 inline void Br(const Register& xn); 363 inline void Brk(int code); 364 void Cbnz(const Register& rt, Label* label); 365 void Cbz(const Register& rt, Label* label); 366 inline void Cinc(const Register& rd, const Register& rn, Condition cond); 367 inline void Cinv(const Register& rd, const Register& rn, Condition cond); 368 inline void Cls(const Register& rd, const Register& rn); 369 inline void Clz(const Register& rd, const Register& rn); 370 inline void Cneg(const Register& rd, const Register& rn, Condition cond); 371 inline void CzeroX(const Register& rd, Condition cond); 372 inline void CmovX(const Register& rd, const Register& rn, Condition cond); 373 inline void Cset(const Register& rd, Condition cond); 374 inline void Csetm(const Register& rd, Condition cond); 375 inline void Csinc(const Register& rd, 376 const Register& rn, 377 const Register& rm, 378 Condition cond); 379 inline void Csinv(const Register& rd, 380 const Register& rn, 381 const Register& rm, 382 Condition cond); 383 inline void Csneg(const Register& rd, 384 const Register& rn, 385 const Register& rm, 386 Condition cond); 387 inline void Dmb(BarrierDomain domain, BarrierType type); 388 inline void Dsb(BarrierDomain domain, BarrierType type); 389 inline void Debug(const char* message, uint32_t code, Instr params = BREAK); 390 inline void Extr(const Register& rd, 391 const Register& rn, 392 const Register& rm, 393 unsigned lsb); 394 inline void Fabs(const FPRegister& fd, const FPRegister& fn); 395 inline void Fadd(const FPRegister& fd, 396 const FPRegister& fn, 397 const FPRegister& fm); 398 inline void Fccmp(const FPRegister& fn, 399 const FPRegister& fm, 400 StatusFlags nzcv, 401 Condition cond); 402 inline void Fcmp(const FPRegister& fn, const FPRegister& fm); 403 inline void Fcmp(const FPRegister& fn, double value); 404 inline void Fcsel(const FPRegister& fd, 405 const FPRegister& fn, 406 const FPRegister& fm, 407 Condition cond); 408 inline void Fcvt(const FPRegister& fd, const FPRegister& fn); 409 inline void Fcvtas(const Register& rd, const FPRegister& fn); 410 inline void Fcvtau(const Register& rd, const FPRegister& fn); 411 inline void Fcvtms(const Register& rd, const FPRegister& fn); 412 inline void Fcvtmu(const Register& rd, const FPRegister& fn); 413 inline void Fcvtns(const Register& rd, const FPRegister& fn); 414 inline void Fcvtnu(const Register& rd, const FPRegister& fn); 415 inline void Fcvtzs(const Register& rd, const FPRegister& fn); 416 inline void Fcvtzu(const Register& rd, const FPRegister& fn); 417 inline void Fdiv(const FPRegister& fd, 418 const FPRegister& fn, 419 const FPRegister& fm); 420 inline void Fmadd(const FPRegister& fd, 421 const FPRegister& fn, 422 const FPRegister& fm, 423 const FPRegister& fa); 424 inline void Fmax(const FPRegister& fd, 425 const FPRegister& fn, 426 const FPRegister& fm); 427 inline void Fmaxnm(const FPRegister& fd, 428 const FPRegister& fn, 429 const FPRegister& fm); 430 inline void Fmin(const FPRegister& fd, 431 const FPRegister& fn, 432 const FPRegister& fm); 433 inline void Fminnm(const FPRegister& fd, 434 const FPRegister& fn, 435 const FPRegister& fm); 436 inline void Fmov(FPRegister fd, FPRegister fn); 437 inline void Fmov(FPRegister fd, Register rn); 438 // Provide explicit double and float interfaces for FP immediate moves, rather 439 // than relying on implicit C++ casts. This allows signalling NaNs to be 440 // preserved when the immediate matches the format of fd. Most systems convert 441 // signalling NaNs to quiet NaNs when converting between float and double. 442 inline void Fmov(FPRegister fd, double imm); 443 inline void Fmov(FPRegister fd, float imm); 444 // Provide a template to allow other types to be converted automatically. 445 template<typename T> 446 void Fmov(FPRegister fd, T imm) { 447 DCHECK(allow_macro_instructions_); 448 Fmov(fd, static_cast<double>(imm)); 449 } 450 inline void Fmov(Register rd, FPRegister fn); 451 inline void Fmsub(const FPRegister& fd, 452 const FPRegister& fn, 453 const FPRegister& fm, 454 const FPRegister& fa); 455 inline void Fmul(const FPRegister& fd, 456 const FPRegister& fn, 457 const FPRegister& fm); 458 inline void Fneg(const FPRegister& fd, const FPRegister& fn); 459 inline void Fnmadd(const FPRegister& fd, 460 const FPRegister& fn, 461 const FPRegister& fm, 462 const FPRegister& fa); 463 inline void Fnmsub(const FPRegister& fd, 464 const FPRegister& fn, 465 const FPRegister& fm, 466 const FPRegister& fa); 467 inline void Frinta(const FPRegister& fd, const FPRegister& fn); 468 inline void Frintm(const FPRegister& fd, const FPRegister& fn); 469 inline void Frintn(const FPRegister& fd, const FPRegister& fn); 470 inline void Frintp(const FPRegister& fd, const FPRegister& fn); 471 inline void Frintz(const FPRegister& fd, const FPRegister& fn); 472 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn); 473 inline void Fsub(const FPRegister& fd, 474 const FPRegister& fn, 475 const FPRegister& fm); 476 inline void Hint(SystemHint code); 477 inline void Hlt(int code); 478 inline void Isb(); 479 inline void Ldnp(const CPURegister& rt, 480 const CPURegister& rt2, 481 const MemOperand& src); 482 // Load a literal from the inline constant pool. 483 inline void Ldr(const CPURegister& rt, const Immediate& imm); 484 // Helper function for double immediate. 485 inline void Ldr(const CPURegister& rt, double imm); 486 inline void Lsl(const Register& rd, const Register& rn, unsigned shift); 487 inline void Lsl(const Register& rd, const Register& rn, const Register& rm); 488 inline void Lsr(const Register& rd, const Register& rn, unsigned shift); 489 inline void Lsr(const Register& rd, const Register& rn, const Register& rm); 490 inline void Madd(const Register& rd, 491 const Register& rn, 492 const Register& rm, 493 const Register& ra); 494 inline void Mneg(const Register& rd, const Register& rn, const Register& rm); 495 inline void Mov(const Register& rd, const Register& rm); 496 inline void Movk(const Register& rd, uint64_t imm, int shift = -1); 497 inline void Mrs(const Register& rt, SystemRegister sysreg); 498 inline void Msr(SystemRegister sysreg, const Register& rt); 499 inline void Msub(const Register& rd, 500 const Register& rn, 501 const Register& rm, 502 const Register& ra); 503 inline void Mul(const Register& rd, const Register& rn, const Register& rm); 504 inline void Nop() { nop(); } 505 inline void Rbit(const Register& rd, const Register& rn); 506 inline void Ret(const Register& xn = lr); 507 inline void Rev(const Register& rd, const Register& rn); 508 inline void Rev16(const Register& rd, const Register& rn); 509 inline void Rev32(const Register& rd, const Register& rn); 510 inline void Ror(const Register& rd, const Register& rs, unsigned shift); 511 inline void Ror(const Register& rd, const Register& rn, const Register& rm); 512 inline void Sbfiz(const Register& rd, 513 const Register& rn, 514 unsigned lsb, 515 unsigned width); 516 inline void Sbfx(const Register& rd, 517 const Register& rn, 518 unsigned lsb, 519 unsigned width); 520 inline void Scvtf(const FPRegister& fd, 521 const Register& rn, 522 unsigned fbits = 0); 523 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm); 524 inline void Smaddl(const Register& rd, 525 const Register& rn, 526 const Register& rm, 527 const Register& ra); 528 inline void Smsubl(const Register& rd, 529 const Register& rn, 530 const Register& rm, 531 const Register& ra); 532 inline void Smull(const Register& rd, 533 const Register& rn, 534 const Register& rm); 535 inline void Smulh(const Register& rd, 536 const Register& rn, 537 const Register& rm); 538 inline void Umull(const Register& rd, const Register& rn, const Register& rm); 539 inline void Stnp(const CPURegister& rt, 540 const CPURegister& rt2, 541 const MemOperand& dst); 542 inline void Sxtb(const Register& rd, const Register& rn); 543 inline void Sxth(const Register& rd, const Register& rn); 544 inline void Sxtw(const Register& rd, const Register& rn); 545 void Tbnz(const Register& rt, unsigned bit_pos, Label* label); 546 void Tbz(const Register& rt, unsigned bit_pos, Label* label); 547 inline void Ubfiz(const Register& rd, 548 const Register& rn, 549 unsigned lsb, 550 unsigned width); 551 inline void Ubfx(const Register& rd, 552 const Register& rn, 553 unsigned lsb, 554 unsigned width); 555 inline void Ucvtf(const FPRegister& fd, 556 const Register& rn, 557 unsigned fbits = 0); 558 inline void Udiv(const Register& rd, const Register& rn, const Register& rm); 559 inline void Umaddl(const Register& rd, 560 const Register& rn, 561 const Register& rm, 562 const Register& ra); 563 inline void Umsubl(const Register& rd, 564 const Register& rn, 565 const Register& rm, 566 const Register& ra); 567 inline void Uxtb(const Register& rd, const Register& rn); 568 inline void Uxth(const Register& rd, const Register& rn); 569 inline void Uxtw(const Register& rd, const Register& rn); 570 571 // Pseudo-instructions ------------------------------------------------------ 572 573 // Compute rd = abs(rm). 574 // This function clobbers the condition flags. On output the overflow flag is 575 // set iff the negation overflowed. 576 // 577 // If rm is the minimum representable value, the result is not representable. 578 // Handlers for each case can be specified using the relevant labels. 579 void Abs(const Register& rd, const Register& rm, 580 Label * is_not_representable = NULL, 581 Label * is_representable = NULL); 582 583 // Push or pop up to 4 registers of the same width to or from the stack, 584 // using the current stack pointer as set by SetStackPointer. 585 // 586 // If an argument register is 'NoReg', all further arguments are also assumed 587 // to be 'NoReg', and are thus not pushed or popped. 588 // 589 // Arguments are ordered such that "Push(a, b);" is functionally equivalent 590 // to "Push(a); Push(b);". 591 // 592 // It is valid to push the same register more than once, and there is no 593 // restriction on the order in which registers are specified. 594 // 595 // It is not valid to pop into the same register more than once in one 596 // operation, not even into the zero register. 597 // 598 // If the current stack pointer (as set by SetStackPointer) is csp, then it 599 // must be aligned to 16 bytes on entry and the total size of the specified 600 // registers must also be a multiple of 16 bytes. 601 // 602 // Even if the current stack pointer is not the system stack pointer (csp), 603 // Push (and derived methods) will still modify the system stack pointer in 604 // order to comply with ABI rules about accessing memory below the system 605 // stack pointer. 606 // 607 // Other than the registers passed into Pop, the stack pointer and (possibly) 608 // the system stack pointer, these methods do not modify any other registers. 609 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, 610 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); 611 void Push(const CPURegister& src0, const CPURegister& src1, 612 const CPURegister& src2, const CPURegister& src3, 613 const CPURegister& src4, const CPURegister& src5 = NoReg, 614 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg); 615 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, 616 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); 617 void Pop(const CPURegister& dst0, const CPURegister& dst1, 618 const CPURegister& dst2, const CPURegister& dst3, 619 const CPURegister& dst4, const CPURegister& dst5 = NoReg, 620 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg); 621 void Push(const Register& src0, const FPRegister& src1); 622 623 // Alternative forms of Push and Pop, taking a RegList or CPURegList that 624 // specifies the registers that are to be pushed or popped. Higher-numbered 625 // registers are associated with higher memory addresses (as in the A32 push 626 // and pop instructions). 627 // 628 // (Push|Pop)SizeRegList allow you to specify the register size as a 629 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and 630 // kSRegSizeInBits are supported. 631 // 632 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. 633 void PushCPURegList(CPURegList registers); 634 void PopCPURegList(CPURegList registers); 635 636 inline void PushSizeRegList(RegList registers, unsigned reg_size, 637 CPURegister::RegisterType type = CPURegister::kRegister) { 638 PushCPURegList(CPURegList(type, reg_size, registers)); 639 } 640 inline void PopSizeRegList(RegList registers, unsigned reg_size, 641 CPURegister::RegisterType type = CPURegister::kRegister) { 642 PopCPURegList(CPURegList(type, reg_size, registers)); 643 } 644 inline void PushXRegList(RegList regs) { 645 PushSizeRegList(regs, kXRegSizeInBits); 646 } 647 inline void PopXRegList(RegList regs) { 648 PopSizeRegList(regs, kXRegSizeInBits); 649 } 650 inline void PushWRegList(RegList regs) { 651 PushSizeRegList(regs, kWRegSizeInBits); 652 } 653 inline void PopWRegList(RegList regs) { 654 PopSizeRegList(regs, kWRegSizeInBits); 655 } 656 inline void PushDRegList(RegList regs) { 657 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister); 658 } 659 inline void PopDRegList(RegList regs) { 660 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister); 661 } 662 inline void PushSRegList(RegList regs) { 663 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister); 664 } 665 inline void PopSRegList(RegList regs) { 666 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister); 667 } 668 669 // Push the specified register 'count' times. 670 void PushMultipleTimes(CPURegister src, Register count); 671 void PushMultipleTimes(CPURegister src, int count); 672 673 // This is a convenience method for pushing a single Handle<Object>. 674 inline void Push(Handle<Object> handle); 675 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 676 677 // Aliases of Push and Pop, required for V8 compatibility. 678 inline void push(Register src) { 679 Push(src); 680 } 681 inline void pop(Register dst) { 682 Pop(dst); 683 } 684 685 // Sometimes callers need to push or pop multiple registers in a way that is 686 // difficult to structure efficiently for fixed Push or Pop calls. This scope 687 // allows push requests to be queued up, then flushed at once. The 688 // MacroAssembler will try to generate the most efficient sequence required. 689 // 690 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of 691 // register sizes and types. 692 class PushPopQueue { 693 public: 694 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { } 695 696 ~PushPopQueue() { 697 DCHECK(queued_.empty()); 698 } 699 700 void Queue(const CPURegister& rt) { 701 size_ += rt.SizeInBytes(); 702 queued_.push_back(rt); 703 } 704 705 enum PreambleDirective { 706 WITH_PREAMBLE, 707 SKIP_PREAMBLE 708 }; 709 void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE); 710 void PopQueued(); 711 712 private: 713 MacroAssembler* masm_; 714 int size_; 715 std::vector<CPURegister> queued_; 716 }; 717 718 // Poke 'src' onto the stack. The offset is in bytes. 719 // 720 // If the current stack pointer (according to StackPointer()) is csp, then 721 // csp must be aligned to 16 bytes. 722 void Poke(const CPURegister& src, const Operand& offset); 723 724 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. 725 // 726 // If the current stack pointer (according to StackPointer()) is csp, then 727 // csp must be aligned to 16 bytes. 728 void Peek(const CPURegister& dst, const Operand& offset); 729 730 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent 731 // with 'src2' at a higher address than 'src1'. The offset is in bytes. 732 // 733 // If the current stack pointer (according to StackPointer()) is csp, then 734 // csp must be aligned to 16 bytes. 735 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset); 736 737 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The 738 // values peeked will be adjacent, with the value in 'dst2' being from a 739 // higher address than 'dst1'. The offset is in bytes. 740 // 741 // If the current stack pointer (according to StackPointer()) is csp, then 742 // csp must be aligned to 16 bytes. 743 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset); 744 745 // Claim or drop stack space without actually accessing memory. 746 // 747 // In debug mode, both of these will write invalid data into the claimed or 748 // dropped space. 749 // 750 // If the current stack pointer (according to StackPointer()) is csp, then it 751 // must be aligned to 16 bytes and the size claimed or dropped must be a 752 // multiple of 16 bytes. 753 // 754 // Note that unit_size must be specified in bytes. For variants which take a 755 // Register count, the unit size must be a power of two. 756 inline void Claim(int64_t count, uint64_t unit_size = kXRegSize); 757 inline void Claim(const Register& count, 758 uint64_t unit_size = kXRegSize); 759 inline void Drop(int64_t count, uint64_t unit_size = kXRegSize); 760 inline void Drop(const Register& count, 761 uint64_t unit_size = kXRegSize); 762 763 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a 764 // register. 765 inline void ClaimBySMI(const Register& count_smi, 766 uint64_t unit_size = kXRegSize); 767 inline void DropBySMI(const Register& count_smi, 768 uint64_t unit_size = kXRegSize); 769 770 // Compare a register with an operand, and branch to label depending on the 771 // condition. May corrupt the status flags. 772 inline void CompareAndBranch(const Register& lhs, 773 const Operand& rhs, 774 Condition cond, 775 Label* label); 776 777 // Test the bits of register defined by bit_pattern, and branch if ANY of 778 // those bits are set. May corrupt the status flags. 779 inline void TestAndBranchIfAnySet(const Register& reg, 780 const uint64_t bit_pattern, 781 Label* label); 782 783 // Test the bits of register defined by bit_pattern, and branch if ALL of 784 // those bits are clear (ie. not set.) May corrupt the status flags. 785 inline void TestAndBranchIfAllClear(const Register& reg, 786 const uint64_t bit_pattern, 787 Label* label); 788 789 // Insert one or more instructions into the instruction stream that encode 790 // some caller-defined data. The instructions used will be executable with no 791 // side effects. 792 inline void InlineData(uint64_t data); 793 794 // Insert an instrumentation enable marker into the instruction stream. 795 inline void EnableInstrumentation(); 796 797 // Insert an instrumentation disable marker into the instruction stream. 798 inline void DisableInstrumentation(); 799 800 // Insert an instrumentation event marker into the instruction stream. These 801 // will be picked up by the instrumentation system to annotate an instruction 802 // profile. The argument marker_name must be a printable two character string; 803 // it will be encoded in the event marker. 804 inline void AnnotateInstrumentation(const char* marker_name); 805 806 // If emit_debug_code() is true, emit a run-time check to ensure that 807 // StackPointer() does not point below the system stack pointer. 808 // 809 // Whilst it is architecturally legal for StackPointer() to point below csp, 810 // it can be evidence of a potential bug because the ABI forbids accesses 811 // below csp. 812 // 813 // If StackPointer() is the system stack pointer (csp), then csp will be 814 // dereferenced to cause the processor (or simulator) to abort if it is not 815 // properly aligned. 816 // 817 // If emit_debug_code() is false, this emits no code. 818 void AssertStackConsistency(); 819 820 // Emits a runtime assert that the CSP is aligned. 821 void AssertCspAligned(); 822 823 // Preserve the callee-saved registers (as defined by AAPCS64). 824 // 825 // Higher-numbered registers are pushed before lower-numbered registers, and 826 // thus get higher addresses. 827 // Floating-point registers are pushed before general-purpose registers, and 828 // thus get higher addresses. 829 // 830 // Note that registers are not checked for invalid values. Use this method 831 // only if you know that the GC won't try to examine the values on the stack. 832 // 833 // This method must not be called unless the current stack pointer (as set by 834 // SetStackPointer) is the system stack pointer (csp), and is aligned to 835 // ActivationFrameAlignment(). 836 void PushCalleeSavedRegisters(); 837 838 // Restore the callee-saved registers (as defined by AAPCS64). 839 // 840 // Higher-numbered registers are popped after lower-numbered registers, and 841 // thus come from higher addresses. 842 // Floating-point registers are popped after general-purpose registers, and 843 // thus come from higher addresses. 844 // 845 // This method must not be called unless the current stack pointer (as set by 846 // SetStackPointer) is the system stack pointer (csp), and is aligned to 847 // ActivationFrameAlignment(). 848 void PopCalleeSavedRegisters(); 849 850 // Set the current stack pointer, but don't generate any code. 851 inline void SetStackPointer(const Register& stack_pointer) { 852 DCHECK(!TmpList()->IncludesAliasOf(stack_pointer)); 853 sp_ = stack_pointer; 854 } 855 856 // Return the current stack pointer, as set by SetStackPointer. 857 inline const Register& StackPointer() const { 858 return sp_; 859 } 860 861 // Align csp for a frame, as per ActivationFrameAlignment, and make it the 862 // current stack pointer. 863 inline void AlignAndSetCSPForFrame() { 864 int sp_alignment = ActivationFrameAlignment(); 865 // AAPCS64 mandates at least 16-byte alignment. 866 DCHECK(sp_alignment >= 16); 867 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment)); 868 Bic(csp, StackPointer(), sp_alignment - 1); 869 SetStackPointer(csp); 870 } 871 872 // Push the system stack pointer (csp) down to allow the same to be done to 873 // the current stack pointer (according to StackPointer()). This must be 874 // called _before_ accessing the memory. 875 // 876 // This is necessary when pushing or otherwise adding things to the stack, to 877 // satisfy the AAPCS64 constraint that the memory below the system stack 878 // pointer is not accessed. The amount pushed will be increased as necessary 879 // to ensure csp remains aligned to 16 bytes. 880 // 881 // This method asserts that StackPointer() is not csp, since the call does 882 // not make sense in that context. 883 inline void BumpSystemStackPointer(const Operand& space); 884 885 // Re-synchronizes the system stack pointer (csp) with the current stack 886 // pointer (according to StackPointer()). 887 // 888 // This method asserts that StackPointer() is not csp, since the call does 889 // not make sense in that context. 890 inline void SyncSystemStackPointer(); 891 892 // Helpers ------------------------------------------------------------------ 893 // Root register. 894 inline void InitializeRootRegister(); 895 896 void AssertFPCRState(Register fpcr = NoReg); 897 void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src); 898 void CanonicalizeNaN(const FPRegister& reg) { 899 CanonicalizeNaN(reg, reg); 900 } 901 902 // Load an object from the root table. 903 void LoadRoot(CPURegister destination, 904 Heap::RootListIndex index); 905 // Store an object to the root table. 906 void StoreRoot(Register source, 907 Heap::RootListIndex index); 908 909 // Load both TrueValue and FalseValue roots. 910 void LoadTrueFalseRoots(Register true_root, Register false_root); 911 912 void LoadHeapObject(Register dst, Handle<HeapObject> object); 913 914 void LoadObject(Register result, Handle<Object> object) { 915 AllowDeferredHandleDereference heap_object_check; 916 if (object->IsHeapObject()) { 917 LoadHeapObject(result, Handle<HeapObject>::cast(object)); 918 } else { 919 DCHECK(object->IsSmi()); 920 Mov(result, Operand(object)); 921 } 922 } 923 924 static int SafepointRegisterStackIndex(int reg_code); 925 926 // This is required for compatibility with architecture independant code. 927 // Remove if not needed. 928 inline void Move(Register dst, Register src) { Mov(dst, src); } 929 inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); } 930 inline void Move(Register dst, Smi* src) { Mov(dst, src); } 931 932 void LoadInstanceDescriptors(Register map, 933 Register descriptors); 934 void EnumLengthUntagged(Register dst, Register map); 935 void EnumLengthSmi(Register dst, Register map); 936 void NumberOfOwnDescriptors(Register dst, Register map); 937 void LoadAccessor(Register dst, Register holder, int accessor_index, 938 AccessorComponent accessor); 939 940 template<typename Field> 941 void DecodeField(Register dst, Register src) { 942 static const int shift = Field::kShift; 943 static const int setbits = CountSetBits(Field::kMask, 32); 944 Ubfx(dst, src, shift, setbits); 945 } 946 947 template<typename Field> 948 void DecodeField(Register reg) { 949 DecodeField<Field>(reg, reg); 950 } 951 952 // ---- SMI and Number Utilities ---- 953 954 inline void SmiTag(Register dst, Register src); 955 inline void SmiTag(Register smi); 956 inline void SmiUntag(Register dst, Register src); 957 inline void SmiUntag(Register smi); 958 inline void SmiUntagToDouble(FPRegister dst, 959 Register src, 960 UntagMode mode = kNotSpeculativeUntag); 961 inline void SmiUntagToFloat(FPRegister dst, 962 Register src, 963 UntagMode mode = kNotSpeculativeUntag); 964 965 // Tag and push in one step. 966 inline void SmiTagAndPush(Register src); 967 inline void SmiTagAndPush(Register src1, Register src2); 968 969 inline void JumpIfSmi(Register value, 970 Label* smi_label, 971 Label* not_smi_label = NULL); 972 inline void JumpIfNotSmi(Register value, Label* not_smi_label); 973 inline void JumpIfBothSmi(Register value1, 974 Register value2, 975 Label* both_smi_label, 976 Label* not_smi_label = NULL); 977 inline void JumpIfEitherSmi(Register value1, 978 Register value2, 979 Label* either_smi_label, 980 Label* not_smi_label = NULL); 981 inline void JumpIfEitherNotSmi(Register value1, 982 Register value2, 983 Label* not_smi_label); 984 inline void JumpIfBothNotSmi(Register value1, 985 Register value2, 986 Label* not_smi_label); 987 988 // Abort execution if argument is a smi, enabled via --debug-code. 989 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi); 990 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi); 991 992 inline void ObjectTag(Register tagged_obj, Register obj); 993 inline void ObjectUntag(Register untagged_obj, Register obj); 994 995 // Abort execution if argument is not a name, enabled via --debug-code. 996 void AssertName(Register object); 997 998 // Abort execution if argument is not a JSFunction, enabled via --debug-code. 999 void AssertFunction(Register object); 1000 1001 // Abort execution if argument is not a JSGeneratorObject, 1002 // enabled via --debug-code. 1003 void AssertGeneratorObject(Register object); 1004 1005 // Abort execution if argument is not a JSBoundFunction, 1006 // enabled via --debug-code. 1007 void AssertBoundFunction(Register object); 1008 1009 // Abort execution if argument is not a JSReceiver, enabled via --debug-code. 1010 void AssertReceiver(Register object); 1011 1012 // Abort execution if argument is not undefined or an AllocationSite, enabled 1013 // via --debug-code. 1014 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1015 1016 // Abort execution if argument is not a string, enabled via --debug-code. 1017 void AssertString(Register object); 1018 1019 // Abort execution if argument is not a positive or zero integer, enabled via 1020 // --debug-code. 1021 void AssertPositiveOrZero(Register value); 1022 1023 // Abort execution if argument is not a number (heap number or smi). 1024 void AssertNumber(Register value); 1025 void AssertNotNumber(Register value); 1026 1027 void JumpIfHeapNumber(Register object, Label* on_heap_number, 1028 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK); 1029 void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number, 1030 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK); 1031 1032 // Sets the vs flag if the input is -0.0. 1033 void TestForMinusZero(DoubleRegister input); 1034 1035 // Jump to label if the input double register contains -0.0. 1036 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero); 1037 1038 // Jump to label if the input integer register contains the double precision 1039 // floating point representation of -0.0. 1040 void JumpIfMinusZero(Register input, Label* on_negative_zero); 1041 1042 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in 1043 // output. 1044 void ClampInt32ToUint8(Register in_out); 1045 void ClampInt32ToUint8(Register output, Register input); 1046 1047 // Saturate a double in input to an unsigned 8-bit integer in output. 1048 void ClampDoubleToUint8(Register output, 1049 DoubleRegister input, 1050 DoubleRegister dbl_scratch); 1051 1052 // Try to represent a double as a signed 32-bit int. 1053 // This succeeds if the result compares equal to the input, so inputs of -0.0 1054 // are represented as 0 and handled as a success. 1055 // 1056 // On output the Z flag is set if the operation was successful. 1057 void TryRepresentDoubleAsInt32(Register as_int, 1058 FPRegister value, 1059 FPRegister scratch_d, 1060 Label* on_successful_conversion = NULL, 1061 Label* on_failed_conversion = NULL) { 1062 DCHECK(as_int.Is32Bits()); 1063 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion, 1064 on_failed_conversion); 1065 } 1066 1067 // Try to represent a double as a signed 64-bit int. 1068 // This succeeds if the result compares equal to the input, so inputs of -0.0 1069 // are represented as 0 and handled as a success. 1070 // 1071 // On output the Z flag is set if the operation was successful. 1072 void TryRepresentDoubleAsInt64(Register as_int, 1073 FPRegister value, 1074 FPRegister scratch_d, 1075 Label* on_successful_conversion = NULL, 1076 Label* on_failed_conversion = NULL) { 1077 DCHECK(as_int.Is64Bits()); 1078 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion, 1079 on_failed_conversion); 1080 } 1081 1082 // ---- Object Utilities ---- 1083 1084 // Initialize fields with filler values. Fields starting at |current_address| 1085 // not including |end_address| are overwritten with the value in |filler|. At 1086 // the end the loop, |current_address| takes the value of |end_address|. 1087 void InitializeFieldsWithFiller(Register current_address, 1088 Register end_address, Register filler); 1089 1090 // Copies a number of bytes from src to dst. All passed registers are 1091 // clobbered. On exit src and dst will point to the place just after where the 1092 // last byte was read or written and length will be zero. Hint may be used to 1093 // determine which is the most efficient algorithm to use for copying. 1094 void CopyBytes(Register dst, 1095 Register src, 1096 Register length, 1097 Register scratch, 1098 CopyHint hint = kCopyUnknown); 1099 1100 // ---- String Utilities ---- 1101 1102 1103 // Jump to label if either object is not a sequential one-byte string. 1104 // Optionally perform a smi check on the objects first. 1105 void JumpIfEitherIsNotSequentialOneByteStrings( 1106 Register first, Register second, Register scratch1, Register scratch2, 1107 Label* failure, SmiCheckType smi_check = DO_SMI_CHECK); 1108 1109 // Check if instance type is sequential one-byte string and jump to label if 1110 // it is not. 1111 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1112 Label* failure); 1113 1114 // Checks if both instance types are sequential one-byte strings and jumps to 1115 // label if either is not. 1116 void JumpIfEitherInstanceTypeIsNotSequentialOneByte( 1117 Register first_object_instance_type, Register second_object_instance_type, 1118 Register scratch1, Register scratch2, Label* failure); 1119 1120 // Checks if both instance types are sequential one-byte strings and jumps to 1121 // label if either is not. 1122 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1123 Register first_object_instance_type, Register second_object_instance_type, 1124 Register scratch1, Register scratch2, Label* failure); 1125 1126 void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name); 1127 1128 // ---- Calling / Jumping helpers ---- 1129 1130 // This is required for compatibility in architecture indepenedant code. 1131 inline void jmp(Label* L) { B(L); } 1132 1133 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); 1134 void TailCallStub(CodeStub* stub); 1135 1136 void CallRuntime(const Runtime::Function* f, 1137 int num_arguments, 1138 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1139 1140 // Convenience function: Same as above, but takes the fid instead. 1141 void CallRuntime(Runtime::FunctionId fid, int num_arguments, 1142 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1143 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); 1144 } 1145 1146 // Convenience function: Same as above, but takes the fid instead. 1147 void CallRuntime(Runtime::FunctionId fid, 1148 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1149 const Runtime::Function* function = Runtime::FunctionForId(fid); 1150 CallRuntime(function, function->nargs, save_doubles); 1151 } 1152 1153 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { 1154 const Runtime::Function* function = Runtime::FunctionForId(fid); 1155 CallRuntime(function, function->nargs, kSaveFPRegs); 1156 } 1157 1158 void TailCallRuntime(Runtime::FunctionId fid); 1159 1160 int ActivationFrameAlignment(); 1161 1162 // Calls a C function. 1163 // The called function is not allowed to trigger a 1164 // garbage collection, since that might move the code and invalidate the 1165 // return address (unless this is somehow accounted for by the called 1166 // function). 1167 void CallCFunction(ExternalReference function, 1168 int num_reg_arguments); 1169 void CallCFunction(ExternalReference function, 1170 int num_reg_arguments, 1171 int num_double_arguments); 1172 void CallCFunction(Register function, 1173 int num_reg_arguments, 1174 int num_double_arguments); 1175 1176 // Jump to a runtime routine. 1177 void JumpToExternalReference(const ExternalReference& builtin); 1178 1179 // Convenience function: call an external reference. 1180 void CallExternalReference(const ExternalReference& ext, 1181 int num_arguments); 1182 1183 1184 void Jump(Register target); 1185 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 1186 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 1187 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1188 1189 void Call(Register target); 1190 void Call(Label* target); 1191 void Call(Address target, RelocInfo::Mode rmode); 1192 void Call(Handle<Code> code, 1193 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 1194 TypeFeedbackId ast_id = TypeFeedbackId::None()); 1195 1196 // For every Call variant, there is a matching CallSize function that returns 1197 // the size (in bytes) of the call sequence. 1198 static int CallSize(Register target); 1199 static int CallSize(Label* target); 1200 static int CallSize(Address target, RelocInfo::Mode rmode); 1201 static int CallSize(Handle<Code> code, 1202 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 1203 TypeFeedbackId ast_id = TypeFeedbackId::None()); 1204 1205 // Removes current frame and its arguments from the stack preserving 1206 // the arguments and a return address pushed to the stack for the next call. 1207 // Both |callee_args_count| and |caller_args_count_reg| do not include 1208 // receiver. |callee_args_count| is not modified, |caller_args_count_reg| 1209 // is trashed. 1210 void PrepareForTailCall(const ParameterCount& callee_args_count, 1211 Register caller_args_count_reg, Register scratch0, 1212 Register scratch1); 1213 1214 // Registers used through the invocation chain are hard-coded. 1215 // We force passing the parameters to ensure the contracts are correctly 1216 // honoured by the caller. 1217 // 'function' must be x1. 1218 // 'actual' must use an immediate or x0. 1219 // 'expected' must use an immediate or x2. 1220 // 'call_kind' must be x5. 1221 void InvokePrologue(const ParameterCount& expected, 1222 const ParameterCount& actual, 1223 Label* done, 1224 InvokeFlag flag, 1225 bool* definitely_mismatches, 1226 const CallWrapper& call_wrapper); 1227 void FloodFunctionIfStepping(Register fun, Register new_target, 1228 const ParameterCount& expected, 1229 const ParameterCount& actual); 1230 void InvokeFunctionCode(Register function, Register new_target, 1231 const ParameterCount& expected, 1232 const ParameterCount& actual, InvokeFlag flag, 1233 const CallWrapper& call_wrapper); 1234 // Invoke the JavaScript function in the given register. 1235 // Changes the current context to the context in the function before invoking. 1236 void InvokeFunction(Register function, 1237 Register new_target, 1238 const ParameterCount& actual, 1239 InvokeFlag flag, 1240 const CallWrapper& call_wrapper); 1241 void InvokeFunction(Register function, 1242 const ParameterCount& expected, 1243 const ParameterCount& actual, 1244 InvokeFlag flag, 1245 const CallWrapper& call_wrapper); 1246 void InvokeFunction(Handle<JSFunction> function, 1247 const ParameterCount& expected, 1248 const ParameterCount& actual, 1249 InvokeFlag flag, 1250 const CallWrapper& call_wrapper); 1251 1252 1253 // ---- Floating point helpers ---- 1254 1255 // Perform a conversion from a double to a signed int64. If the input fits in 1256 // range of the 64-bit result, execution branches to done. Otherwise, 1257 // execution falls through, and the sign of the result can be used to 1258 // determine if overflow was towards positive or negative infinity. 1259 // 1260 // On successful conversion, the least significant 32 bits of the result are 1261 // equivalent to the ECMA-262 operation "ToInt32". 1262 // 1263 // Only public for the test code in test-code-stubs-arm64.cc. 1264 void TryConvertDoubleToInt64(Register result, 1265 DoubleRegister input, 1266 Label* done); 1267 1268 // Performs a truncating conversion of a floating point number as used by 1269 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1270 // Exits with 'result' holding the answer. 1271 void TruncateDoubleToI(Register result, DoubleRegister double_input); 1272 1273 // Performs a truncating conversion of a heap number as used by 1274 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1275 // must be different registers. Exits with 'result' holding the answer. 1276 void TruncateHeapNumberToI(Register result, Register object); 1277 1278 // Converts the smi or heap number in object to an int32 using the rules 1279 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1280 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1281 // different registers. 1282 void TruncateNumberToI(Register object, 1283 Register result, 1284 Register heap_number_map, 1285 Label* not_int32); 1286 1287 // ---- Code generation helpers ---- 1288 1289 void set_generating_stub(bool value) { generating_stub_ = value; } 1290 bool generating_stub() const { return generating_stub_; } 1291 #if DEBUG 1292 void set_allow_macro_instructions(bool value) { 1293 allow_macro_instructions_ = value; 1294 } 1295 bool allow_macro_instructions() const { return allow_macro_instructions_; } 1296 #endif 1297 bool use_real_aborts() const { return use_real_aborts_; } 1298 void set_has_frame(bool value) { has_frame_ = value; } 1299 bool has_frame() const { return has_frame_; } 1300 bool AllowThisStubCall(CodeStub* stub); 1301 1302 class NoUseRealAbortsScope { 1303 public: 1304 explicit NoUseRealAbortsScope(MacroAssembler* masm) : 1305 saved_(masm->use_real_aborts_), masm_(masm) { 1306 masm_->use_real_aborts_ = false; 1307 } 1308 ~NoUseRealAbortsScope() { 1309 masm_->use_real_aborts_ = saved_; 1310 } 1311 private: 1312 bool saved_; 1313 MacroAssembler* masm_; 1314 }; 1315 1316 // --------------------------------------------------------------------------- 1317 // Debugger Support 1318 1319 void DebugBreak(); 1320 1321 // --------------------------------------------------------------------------- 1322 // Exception handling 1323 1324 // Push a new stack handler and link into stack handler chain. 1325 void PushStackHandler(); 1326 1327 // Unlink the stack handler on top of the stack from the stack handler chain. 1328 // Must preserve the result register. 1329 void PopStackHandler(); 1330 1331 1332 // --------------------------------------------------------------------------- 1333 // Allocation support 1334 1335 // Allocate an object in new space or old space. The object_size is 1336 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 1337 // is passed. The allocated object is returned in result. 1338 // 1339 // If the new space is exhausted control continues at the gc_required label. 1340 // In this case, the result and scratch registers may still be clobbered. 1341 void Allocate(Register object_size, Register result, Register result_end, 1342 Register scratch, Label* gc_required, AllocationFlags flags); 1343 1344 void Allocate(int object_size, 1345 Register result, 1346 Register scratch1, 1347 Register scratch2, 1348 Label* gc_required, 1349 AllocationFlags flags); 1350 1351 // FastAllocate is right now only used for folded allocations. It just 1352 // increments the top pointer without checking against limit. This can only 1353 // be done if it was proved earlier that the allocation will succeed. 1354 void FastAllocate(Register object_size, Register result, Register result_end, 1355 Register scratch, AllocationFlags flags); 1356 1357 void FastAllocate(int object_size, Register result, Register scratch1, 1358 Register scratch2, AllocationFlags flags); 1359 1360 void AllocateTwoByteString(Register result, 1361 Register length, 1362 Register scratch1, 1363 Register scratch2, 1364 Register scratch3, 1365 Label* gc_required); 1366 void AllocateOneByteString(Register result, Register length, 1367 Register scratch1, Register scratch2, 1368 Register scratch3, Label* gc_required); 1369 void AllocateTwoByteConsString(Register result, 1370 Register length, 1371 Register scratch1, 1372 Register scratch2, 1373 Label* gc_required); 1374 void AllocateOneByteConsString(Register result, Register length, 1375 Register scratch1, Register scratch2, 1376 Label* gc_required); 1377 void AllocateTwoByteSlicedString(Register result, 1378 Register length, 1379 Register scratch1, 1380 Register scratch2, 1381 Label* gc_required); 1382 void AllocateOneByteSlicedString(Register result, Register length, 1383 Register scratch1, Register scratch2, 1384 Label* gc_required); 1385 1386 // Allocates a heap number or jumps to the gc_required label if the young 1387 // space is full and a scavenge is needed. 1388 // All registers are clobbered. 1389 // If no heap_number_map register is provided, the function will take care of 1390 // loading it. 1391 void AllocateHeapNumber(Register result, 1392 Label* gc_required, 1393 Register scratch1, 1394 Register scratch2, 1395 CPURegister value = NoFPReg, 1396 CPURegister heap_number_map = NoReg, 1397 MutableMode mode = IMMUTABLE); 1398 1399 // Allocate and initialize a JSValue wrapper with the specified {constructor} 1400 // and {value}. 1401 void AllocateJSValue(Register result, Register constructor, Register value, 1402 Register scratch1, Register scratch2, 1403 Label* gc_required); 1404 1405 // --------------------------------------------------------------------------- 1406 // Support functions. 1407 1408 // Machine code version of Map::GetConstructor(). 1409 // |temp| holds |result|'s map when done, and |temp2| its instance type. 1410 void GetMapConstructor(Register result, Register map, Register temp, 1411 Register temp2); 1412 1413 void TryGetFunctionPrototype(Register function, Register result, 1414 Register scratch, Label* miss); 1415 1416 // Compare object type for heap object. heap_object contains a non-Smi 1417 // whose object type should be compared with the given type. This both 1418 // sets the flags and leaves the object type in the type_reg register. 1419 // It leaves the map in the map register (unless the type_reg and map register 1420 // are the same register). It leaves the heap object in the heap_object 1421 // register unless the heap_object register is the same register as one of the 1422 // other registers. 1423 void CompareObjectType(Register heap_object, 1424 Register map, 1425 Register type_reg, 1426 InstanceType type); 1427 1428 1429 // Compare object type for heap object, and branch if equal (or not.) 1430 // heap_object contains a non-Smi whose object type should be compared with 1431 // the given type. This both sets the flags and leaves the object type in 1432 // the type_reg register. It leaves the map in the map register (unless the 1433 // type_reg and map register are the same register). It leaves the heap 1434 // object in the heap_object register unless the heap_object register is the 1435 // same register as one of the other registers. 1436 void JumpIfObjectType(Register object, 1437 Register map, 1438 Register type_reg, 1439 InstanceType type, 1440 Label* if_cond_pass, 1441 Condition cond = eq); 1442 1443 void JumpIfNotObjectType(Register object, 1444 Register map, 1445 Register type_reg, 1446 InstanceType type, 1447 Label* if_not_object); 1448 1449 // Compare instance type in a map. map contains a valid map object whose 1450 // object type should be compared with the given type. This both 1451 // sets the flags and leaves the object type in the type_reg register. 1452 void CompareInstanceType(Register map, 1453 Register type_reg, 1454 InstanceType type); 1455 1456 // Compare an object's map with the specified map. Condition flags are set 1457 // with result of map compare. 1458 void CompareObjectMap(Register obj, Heap::RootListIndex index); 1459 1460 // Compare an object's map with the specified map. Condition flags are set 1461 // with result of map compare. 1462 void CompareObjectMap(Register obj, Register scratch, Handle<Map> map); 1463 1464 // As above, but the map of the object is already loaded into the register 1465 // which is preserved by the code generated. 1466 void CompareMap(Register obj_map, 1467 Handle<Map> map); 1468 1469 // Check if the map of an object is equal to a specified map and branch to 1470 // label if not. Skip the smi check if not required (object is known to be a 1471 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 1472 // against maps that are ElementsKind transition maps of the specified map. 1473 void CheckMap(Register obj, 1474 Register scratch, 1475 Handle<Map> map, 1476 Label* fail, 1477 SmiCheckType smi_check_type); 1478 1479 1480 void CheckMap(Register obj, 1481 Register scratch, 1482 Heap::RootListIndex index, 1483 Label* fail, 1484 SmiCheckType smi_check_type); 1485 1486 // As above, but the map of the object is already loaded into obj_map, and is 1487 // preserved. 1488 void CheckMap(Register obj_map, 1489 Handle<Map> map, 1490 Label* fail, 1491 SmiCheckType smi_check_type); 1492 1493 // Check if the map of an object is equal to a specified weak map and branch 1494 // to a specified target if equal. Skip the smi check if not required 1495 // (object is known to be a heap object) 1496 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2, 1497 Handle<WeakCell> cell, Handle<Code> success, 1498 SmiCheckType smi_check_type); 1499 1500 // Compare the given value and the value of weak cell. 1501 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch); 1502 1503 void GetWeakValue(Register value, Handle<WeakCell> cell); 1504 1505 // Load the value of the weak cell in the value register. Branch to the given 1506 // miss label if the weak cell was cleared. 1507 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss); 1508 1509 // Test the bitfield of the heap object map with mask and set the condition 1510 // flags. The object register is preserved. 1511 void TestMapBitfield(Register object, uint64_t mask); 1512 1513 // Load the elements kind field from a map, and return it in the result 1514 // register. 1515 void LoadElementsKindFromMap(Register result, Register map); 1516 1517 // Load the value from the root list and push it onto the stack. 1518 void PushRoot(Heap::RootListIndex index); 1519 1520 // Compare the object in a register to a value from the root list. 1521 void CompareRoot(const Register& obj, Heap::RootListIndex index); 1522 1523 // Compare the object in a register to a value and jump if they are equal. 1524 void JumpIfRoot(const Register& obj, 1525 Heap::RootListIndex index, 1526 Label* if_equal); 1527 1528 // Compare the object in a register to a value and jump if they are not equal. 1529 void JumpIfNotRoot(const Register& obj, 1530 Heap::RootListIndex index, 1531 Label* if_not_equal); 1532 1533 // Load and check the instance type of an object for being a unique name. 1534 // Loads the type into the second argument register. 1535 // The object and type arguments can be the same register; in that case it 1536 // will be overwritten with the type. 1537 // Fall-through if the object was a string and jump on fail otherwise. 1538 inline void IsObjectNameType(Register object, Register type, Label* fail); 1539 1540 // Load and check the instance type of an object for being a string. 1541 // Loads the type into the second argument register. 1542 // The object and type arguments can be the same register; in that case it 1543 // will be overwritten with the type. 1544 // Jumps to not_string or string appropriate. If the appropriate label is 1545 // NULL, fall through. 1546 inline void IsObjectJSStringType(Register object, Register type, 1547 Label* not_string, Label* string = NULL); 1548 1549 // Compare the contents of a register with an operand, and branch to true, 1550 // false or fall through, depending on condition. 1551 void CompareAndSplit(const Register& lhs, 1552 const Operand& rhs, 1553 Condition cond, 1554 Label* if_true, 1555 Label* if_false, 1556 Label* fall_through); 1557 1558 // Test the bits of register defined by bit_pattern, and branch to 1559 // if_any_set, if_all_clear or fall_through accordingly. 1560 void TestAndSplit(const Register& reg, 1561 uint64_t bit_pattern, 1562 Label* if_all_clear, 1563 Label* if_any_set, 1564 Label* fall_through); 1565 1566 // Check if a map for a JSObject indicates that the object has fast elements. 1567 // Jump to the specified label if it does not. 1568 void CheckFastElements(Register map, Register scratch, Label* fail); 1569 1570 // Check if a map for a JSObject indicates that the object can have both smi 1571 // and HeapObject elements. Jump to the specified label if it does not. 1572 void CheckFastObjectElements(Register map, Register scratch, Label* fail); 1573 1574 // Check to see if number can be stored as a double in FastDoubleElements. 1575 // If it can, store it at the index specified by key_reg in the array, 1576 // otherwise jump to fail. 1577 void StoreNumberToDoubleElements(Register value_reg, 1578 Register key_reg, 1579 Register elements_reg, 1580 Register scratch1, 1581 FPRegister fpscratch1, 1582 Label* fail, 1583 int elements_offset = 0); 1584 1585 // Picks out an array index from the hash field. 1586 // Register use: 1587 // hash - holds the index's hash. Clobbered. 1588 // index - holds the overwritten index on exit. 1589 void IndexFromHash(Register hash, Register index); 1590 1591 // --------------------------------------------------------------------------- 1592 // Inline caching support. 1593 1594 void EmitSeqStringSetCharCheck(Register string, 1595 Register index, 1596 SeqStringSetCharCheckIndexType index_type, 1597 Register scratch, 1598 uint32_t encoding_mask); 1599 1600 // Generate code for checking access rights - used for security checks 1601 // on access to global objects across environments. The holder register 1602 // is left untouched, whereas both scratch registers are clobbered. 1603 void CheckAccessGlobalProxy(Register holder_reg, 1604 Register scratch1, 1605 Register scratch2, 1606 Label* miss); 1607 1608 // Hash the interger value in 'key' register. 1609 // It uses the same algorithm as ComputeIntegerHash in utils.h. 1610 void GetNumberHash(Register key, Register scratch); 1611 1612 // Load value from the dictionary. 1613 // 1614 // elements - holds the slow-case elements of the receiver on entry. 1615 // Unchanged unless 'result' is the same register. 1616 // 1617 // key - holds the smi key on entry. 1618 // Unchanged unless 'result' is the same register. 1619 // 1620 // result - holds the result on exit if the load succeeded. 1621 // Allowed to be the same as 'key' or 'result'. 1622 // Unchanged on bailout so 'key' or 'result' can be used 1623 // in further computation. 1624 void LoadFromNumberDictionary(Label* miss, 1625 Register elements, 1626 Register key, 1627 Register result, 1628 Register scratch0, 1629 Register scratch1, 1630 Register scratch2, 1631 Register scratch3); 1632 1633 // --------------------------------------------------------------------------- 1634 // Frames. 1635 1636 // Load the type feedback vector from a JavaScript frame. 1637 void EmitLoadTypeFeedbackVector(Register vector); 1638 1639 // Activation support. 1640 void EnterFrame(StackFrame::Type type); 1641 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg); 1642 void LeaveFrame(StackFrame::Type type); 1643 1644 // Returns map with validated enum cache in object register. 1645 void CheckEnumCache(Register object, Register scratch0, Register scratch1, 1646 Register scratch2, Register scratch3, Register scratch4, 1647 Label* call_runtime); 1648 1649 // AllocationMemento support. Arrays may have an associated 1650 // AllocationMemento object that can be checked for in order to pretransition 1651 // to another type. 1652 // On entry, receiver should point to the array object. 1653 // If allocation info is present, the Z flag is set (so that the eq 1654 // condition will pass). 1655 void TestJSArrayForAllocationMemento(Register receiver, 1656 Register scratch1, 1657 Register scratch2, 1658 Label* no_memento_found); 1659 1660 void JumpIfJSArrayHasAllocationMemento(Register receiver, 1661 Register scratch1, 1662 Register scratch2, 1663 Label* memento_found) { 1664 Label no_memento_found; 1665 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2, 1666 &no_memento_found); 1667 B(eq, memento_found); 1668 Bind(&no_memento_found); 1669 } 1670 1671 // The stack pointer has to switch between csp and jssp when setting up and 1672 // destroying the exit frame. Hence preserving/restoring the registers is 1673 // slightly more complicated than simple push/pop operations. 1674 void ExitFramePreserveFPRegs(); 1675 void ExitFrameRestoreFPRegs(); 1676 1677 // Generates function and stub prologue code. 1678 void StubPrologue(StackFrame::Type type, int frame_slots); 1679 void Prologue(bool code_pre_aging); 1680 1681 // Enter exit frame. Exit frames are used when calling C code from generated 1682 // (JavaScript) code. 1683 // 1684 // The stack pointer must be jssp on entry, and will be set to csp by this 1685 // function. The frame pointer is also configured, but the only other 1686 // registers modified by this function are the provided scratch register, and 1687 // jssp. 1688 // 1689 // The 'extra_space' argument can be used to allocate some space in the exit 1690 // frame that will be ignored by the GC. This space will be reserved in the 1691 // bottom of the frame immediately above the return address slot. 1692 // 1693 // Set up a stack frame and registers as follows: 1694 // fp[8]: CallerPC (lr) 1695 // fp -> fp[0]: CallerFP (old fp) 1696 // fp[-8]: SPOffset (new csp) 1697 // fp[-16]: CodeObject() 1698 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true. 1699 // csp[8]: Memory reserved for the caller if extra_space != 0. 1700 // Alignment padding, if necessary. 1701 // csp -> csp[0]: Space reserved for the return address. 1702 // 1703 // This function also stores the new frame information in the top frame, so 1704 // that the new frame becomes the current frame. 1705 void EnterExitFrame(bool save_doubles, 1706 const Register& scratch, 1707 int extra_space = 0); 1708 1709 // Leave the current exit frame, after a C function has returned to generated 1710 // (JavaScript) code. 1711 // 1712 // This effectively unwinds the operation of EnterExitFrame: 1713 // * Preserved doubles are restored (if restore_doubles is true). 1714 // * The frame information is removed from the top frame. 1715 // * The exit frame is dropped. 1716 // * The stack pointer is reset to jssp. 1717 // 1718 // The stack pointer must be csp on entry. 1719 void LeaveExitFrame(bool save_doubles, 1720 const Register& scratch, 1721 bool restore_context); 1722 1723 void LoadContext(Register dst, int context_chain_length); 1724 1725 // Load the global object from the current context. 1726 void LoadGlobalObject(Register dst) { 1727 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst); 1728 } 1729 1730 // Load the global proxy from the current context. 1731 void LoadGlobalProxy(Register dst) { 1732 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); 1733 } 1734 1735 // Emit code for a truncating division by a constant. The dividend register is 1736 // unchanged. Dividend and result must be different. 1737 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1738 1739 // --------------------------------------------------------------------------- 1740 // StatsCounter support 1741 1742 void SetCounter(StatsCounter* counter, int value, Register scratch1, 1743 Register scratch2); 1744 void IncrementCounter(StatsCounter* counter, int value, Register scratch1, 1745 Register scratch2); 1746 void DecrementCounter(StatsCounter* counter, int value, Register scratch1, 1747 Register scratch2); 1748 1749 // --------------------------------------------------------------------------- 1750 // Garbage collector support (GC). 1751 1752 enum RememberedSetFinalAction { 1753 kReturnAtEnd, 1754 kFallThroughAtEnd 1755 }; 1756 1757 // Record in the remembered set the fact that we have a pointer to new space 1758 // at the address pointed to by the addr register. Only works if addr is not 1759 // in new space. 1760 void RememberedSetHelper(Register object, // Used for debug code. 1761 Register addr, 1762 Register scratch1, 1763 SaveFPRegsMode save_fp, 1764 RememberedSetFinalAction and_then); 1765 1766 // Push and pop the registers that can hold pointers, as defined by the 1767 // RegList constant kSafepointSavedRegisters. 1768 void PushSafepointRegisters(); 1769 void PopSafepointRegisters(); 1770 1771 void PushSafepointRegistersAndDoubles(); 1772 void PopSafepointRegistersAndDoubles(); 1773 1774 // Store value in register src in the safepoint stack slot for register dst. 1775 void StoreToSafepointRegisterSlot(Register src, Register dst) { 1776 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize); 1777 } 1778 1779 // Load the value of the src register from its safepoint stack slot 1780 // into register dst. 1781 void LoadFromSafepointRegisterSlot(Register dst, Register src) { 1782 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize); 1783 } 1784 1785 void CheckPageFlag(const Register& object, const Register& scratch, int mask, 1786 Condition cc, Label* condition_met); 1787 1788 void CheckPageFlagSet(const Register& object, 1789 const Register& scratch, 1790 int mask, 1791 Label* if_any_set); 1792 1793 void CheckPageFlagClear(const Register& object, 1794 const Register& scratch, 1795 int mask, 1796 Label* if_all_clear); 1797 1798 // Check if object is in new space and jump accordingly. 1799 // Register 'object' is preserved. 1800 void JumpIfNotInNewSpace(Register object, 1801 Label* branch) { 1802 InNewSpace(object, ne, branch); 1803 } 1804 1805 void JumpIfInNewSpace(Register object, 1806 Label* branch) { 1807 InNewSpace(object, eq, branch); 1808 } 1809 1810 // Notify the garbage collector that we wrote a pointer into an object. 1811 // |object| is the object being stored into, |value| is the object being 1812 // stored. value and scratch registers are clobbered by the operation. 1813 // The offset is the offset from the start of the object, not the offset from 1814 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). 1815 void RecordWriteField( 1816 Register object, 1817 int offset, 1818 Register value, 1819 Register scratch, 1820 LinkRegisterStatus lr_status, 1821 SaveFPRegsMode save_fp, 1822 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 1823 SmiCheck smi_check = INLINE_SMI_CHECK, 1824 PointersToHereCheck pointers_to_here_check_for_value = 1825 kPointersToHereMaybeInteresting); 1826 1827 // As above, but the offset has the tag presubtracted. For use with 1828 // MemOperand(reg, off). 1829 inline void RecordWriteContextSlot( 1830 Register context, 1831 int offset, 1832 Register value, 1833 Register scratch, 1834 LinkRegisterStatus lr_status, 1835 SaveFPRegsMode save_fp, 1836 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 1837 SmiCheck smi_check = INLINE_SMI_CHECK, 1838 PointersToHereCheck pointers_to_here_check_for_value = 1839 kPointersToHereMaybeInteresting) { 1840 RecordWriteField(context, 1841 offset + kHeapObjectTag, 1842 value, 1843 scratch, 1844 lr_status, 1845 save_fp, 1846 remembered_set_action, 1847 smi_check, 1848 pointers_to_here_check_for_value); 1849 } 1850 1851 // Notify the garbage collector that we wrote a code entry into a 1852 // JSFunction. Only scratch is clobbered by the operation. 1853 void RecordWriteCodeEntryField(Register js_function, Register code_entry, 1854 Register scratch); 1855 1856 void RecordWriteForMap( 1857 Register object, 1858 Register map, 1859 Register dst, 1860 LinkRegisterStatus lr_status, 1861 SaveFPRegsMode save_fp); 1862 1863 // For a given |object| notify the garbage collector that the slot |address| 1864 // has been written. |value| is the object being stored. The value and 1865 // address registers are clobbered by the operation. 1866 void RecordWrite( 1867 Register object, 1868 Register address, 1869 Register value, 1870 LinkRegisterStatus lr_status, 1871 SaveFPRegsMode save_fp, 1872 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 1873 SmiCheck smi_check = INLINE_SMI_CHECK, 1874 PointersToHereCheck pointers_to_here_check_for_value = 1875 kPointersToHereMaybeInteresting); 1876 1877 // Checks the color of an object. If the object is white we jump to the 1878 // incremental marker. 1879 void JumpIfWhite(Register value, Register scratch1, Register scratch2, 1880 Register scratch3, Register scratch4, Label* value_is_white); 1881 1882 // Helper for finding the mark bits for an address. 1883 // Note that the behaviour slightly differs from other architectures. 1884 // On exit: 1885 // - addr_reg is unchanged. 1886 // - The bitmap register points at the word with the mark bits. 1887 // - The shift register contains the index of the first color bit for this 1888 // object in the bitmap. 1889 inline void GetMarkBits(Register addr_reg, 1890 Register bitmap_reg, 1891 Register shift_reg); 1892 1893 // Check if an object has a given incremental marking color. 1894 void HasColor(Register object, 1895 Register scratch0, 1896 Register scratch1, 1897 Label* has_color, 1898 int first_bit, 1899 int second_bit); 1900 1901 void JumpIfBlack(Register object, 1902 Register scratch0, 1903 Register scratch1, 1904 Label* on_black); 1905 1906 1907 // --------------------------------------------------------------------------- 1908 // Debugging. 1909 1910 // Calls Abort(msg) if the condition cond is not satisfied. 1911 // Use --debug_code to enable. 1912 void Assert(Condition cond, BailoutReason reason); 1913 void AssertRegisterIsClear(Register reg, BailoutReason reason); 1914 void AssertRegisterIsRoot( 1915 Register reg, 1916 Heap::RootListIndex index, 1917 BailoutReason reason = kRegisterDidNotMatchExpectedRoot); 1918 void AssertFastElements(Register elements); 1919 1920 // Abort if the specified register contains the invalid color bit pattern. 1921 // The pattern must be in bits [1:0] of 'reg' register. 1922 // 1923 // If emit_debug_code() is false, this emits no code. 1924 void AssertHasValidColor(const Register& reg); 1925 1926 // Abort if 'object' register doesn't point to a string object. 1927 // 1928 // If emit_debug_code() is false, this emits no code. 1929 void AssertIsString(const Register& object); 1930 1931 // Like Assert(), but always enabled. 1932 void Check(Condition cond, BailoutReason reason); 1933 void CheckRegisterIsClear(Register reg, BailoutReason reason); 1934 1935 // Print a message to stderr and abort execution. 1936 void Abort(BailoutReason reason); 1937 1938 // Conditionally load the cached Array transitioned map of type 1939 // transitioned_kind from the native context if the map in register 1940 // map_in_out is the cached Array map in the native context of 1941 // expected_kind. 1942 void LoadTransitionedArrayMapConditional( 1943 ElementsKind expected_kind, 1944 ElementsKind transitioned_kind, 1945 Register map_in_out, 1946 Register scratch1, 1947 Register scratch2, 1948 Label* no_map_match); 1949 1950 void LoadNativeContextSlot(int index, Register dst); 1951 1952 // Load the initial map from the global function. The registers function and 1953 // map can be the same, function is then overwritten. 1954 void LoadGlobalFunctionInitialMap(Register function, 1955 Register map, 1956 Register scratch); 1957 1958 CPURegList* TmpList() { return &tmp_list_; } 1959 CPURegList* FPTmpList() { return &fptmp_list_; } 1960 1961 static CPURegList DefaultTmpList(); 1962 static CPURegList DefaultFPTmpList(); 1963 1964 // Like printf, but print at run-time from generated code. 1965 // 1966 // The caller must ensure that arguments for floating-point placeholders 1967 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer 1968 // placeholders are Registers. 1969 // 1970 // At the moment it is only possible to print the value of csp if it is the 1971 // current stack pointer. Otherwise, the MacroAssembler will automatically 1972 // update csp on every push (using BumpSystemStackPointer), so determining its 1973 // value is difficult. 1974 // 1975 // Format placeholders that refer to more than one argument, or to a specific 1976 // argument, are not supported. This includes formats like "%1$d" or "%.*d". 1977 // 1978 // This function automatically preserves caller-saved registers so that 1979 // calling code can use Printf at any point without having to worry about 1980 // corruption. The preservation mechanism generates a lot of code. If this is 1981 // a problem, preserve the important registers manually and then call 1982 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are 1983 // implicitly preserved. 1984 void Printf(const char * format, 1985 CPURegister arg0 = NoCPUReg, 1986 CPURegister arg1 = NoCPUReg, 1987 CPURegister arg2 = NoCPUReg, 1988 CPURegister arg3 = NoCPUReg); 1989 1990 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'. 1991 // 1992 // The return code from the system printf call will be returned in x0. 1993 void PrintfNoPreserve(const char * format, 1994 const CPURegister& arg0 = NoCPUReg, 1995 const CPURegister& arg1 = NoCPUReg, 1996 const CPURegister& arg2 = NoCPUReg, 1997 const CPURegister& arg3 = NoCPUReg); 1998 1999 // Code ageing support functions. 2000 2001 // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a 2002 // function as old, it replaces some of the function prologue (generated by 2003 // FullCodeGenerator::Generate) with a call to a special stub (ultimately 2004 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the 2005 // function prologue to its initial young state (indicating that it has been 2006 // recently run) and continues. A young function is therefore one which has a 2007 // normal frame setup sequence, and an old function has a code age sequence 2008 // which calls a code ageing stub. 2009 2010 // Set up a basic stack frame for young code (or code exempt from ageing) with 2011 // type FUNCTION. It may be patched later for code ageing support. This is 2012 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence. 2013 // 2014 // This function takes an Assembler so it can be called from either a 2015 // MacroAssembler or a PatchingAssembler context. 2016 static void EmitFrameSetupForCodeAgePatching(Assembler* assm); 2017 2018 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context. 2019 void EmitFrameSetupForCodeAgePatching(); 2020 2021 // Emit a code age sequence that calls the relevant code age stub. The code 2022 // generated by this sequence is expected to replace the code generated by 2023 // EmitFrameSetupForCodeAgePatching, and represents an old function. 2024 // 2025 // If stub is NULL, this function generates the code age sequence but omits 2026 // the stub address that is normally embedded in the instruction stream. This 2027 // can be used by debug code to verify code age sequences. 2028 static void EmitCodeAgeSequence(Assembler* assm, Code* stub); 2029 2030 // Call EmitCodeAgeSequence from a MacroAssembler context. 2031 void EmitCodeAgeSequence(Code* stub); 2032 2033 // Return true if the sequence is a young sequence geneated by 2034 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the 2035 // sequence is a code age sequence (emitted by EmitCodeAgeSequence). 2036 static bool IsYoungSequence(Isolate* isolate, byte* sequence); 2037 2038 // Jumps to found label if a prototype map has dictionary elements. 2039 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 2040 Register scratch1, Label* found); 2041 2042 // Perform necessary maintenance operations before a push or after a pop. 2043 // 2044 // Note that size is specified in bytes. 2045 void PushPreamble(Operand total_size); 2046 void PopPostamble(Operand total_size); 2047 2048 void PushPreamble(int count, int size) { PushPreamble(count * size); } 2049 void PopPostamble(int count, int size) { PopPostamble(count * size); } 2050 2051 private: 2052 // The actual Push and Pop implementations. These don't generate any code 2053 // other than that required for the push or pop. This allows 2054 // (Push|Pop)CPURegList to bundle together run-time assertions for a large 2055 // block of registers. 2056 // 2057 // Note that size is per register, and is specified in bytes. 2058 void PushHelper(int count, int size, 2059 const CPURegister& src0, const CPURegister& src1, 2060 const CPURegister& src2, const CPURegister& src3); 2061 void PopHelper(int count, int size, 2062 const CPURegister& dst0, const CPURegister& dst1, 2063 const CPURegister& dst2, const CPURegister& dst3); 2064 2065 // Call Printf. On a native build, a simple call will be generated, but if the 2066 // simulator is being used then a suitable pseudo-instruction is used. The 2067 // arguments and stack (csp) must be prepared by the caller as for a normal 2068 // AAPCS64 call to 'printf'. 2069 // 2070 // The 'args' argument should point to an array of variable arguments in their 2071 // proper PCS registers (and in calling order). The argument registers can 2072 // have mixed types. The format string (x0) should not be included. 2073 void CallPrintf(int arg_count = 0, const CPURegister * args = NULL); 2074 2075 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 2076 void InNewSpace(Register object, 2077 Condition cond, // eq for new space, ne otherwise. 2078 Label* branch); 2079 2080 // Try to represent a double as an int so that integer fast-paths may be 2081 // used. Not every valid integer value is guaranteed to be caught. 2082 // It supports both 32-bit and 64-bit integers depending whether 'as_int' 2083 // is a W or X register. 2084 // 2085 // This does not distinguish between +0 and -0, so if this distinction is 2086 // important it must be checked separately. 2087 // 2088 // On output the Z flag is set if the operation was successful. 2089 void TryRepresentDoubleAsInt(Register as_int, 2090 FPRegister value, 2091 FPRegister scratch_d, 2092 Label* on_successful_conversion = NULL, 2093 Label* on_failed_conversion = NULL); 2094 2095 bool generating_stub_; 2096 #if DEBUG 2097 // Tell whether any of the macro instruction can be used. When false the 2098 // MacroAssembler will assert if a method which can emit a variable number 2099 // of instructions is called. 2100 bool allow_macro_instructions_; 2101 #endif 2102 bool has_frame_; 2103 2104 // The Abort method should call a V8 runtime function, but the CallRuntime 2105 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will 2106 // use a simpler abort mechanism that doesn't depend on CEntryStub. 2107 // 2108 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is 2109 // being generated. 2110 bool use_real_aborts_; 2111 2112 // This handle will be patched with the code object on installation. 2113 Handle<Object> code_object_; 2114 2115 // The register to use as a stack pointer for stack operations. 2116 Register sp_; 2117 2118 // Scratch registers available for use by the MacroAssembler. 2119 CPURegList tmp_list_; 2120 CPURegList fptmp_list_; 2121 2122 void InitializeNewString(Register string, 2123 Register length, 2124 Heap::RootListIndex map_index, 2125 Register scratch1, 2126 Register scratch2); 2127 2128 public: 2129 // Far branches resolving. 2130 // 2131 // The various classes of branch instructions with immediate offsets have 2132 // different ranges. While the Assembler will fail to assemble a branch 2133 // exceeding its range, the MacroAssembler offers a mechanism to resolve 2134 // branches to too distant targets, either by tweaking the generated code to 2135 // use branch instructions with wider ranges or generating veneers. 2136 // 2137 // Currently branches to distant targets are resolved using unconditional 2138 // branch isntructions with a range of +-128MB. If that becomes too little 2139 // (!), the mechanism can be extended to generate special veneers for really 2140 // far targets. 2141 2142 // Helps resolve branching to labels potentially out of range. 2143 // If the label is not bound, it registers the information necessary to later 2144 // be able to emit a veneer for this branch if necessary. 2145 // If the label is bound, it returns true if the label (or the previous link 2146 // in the label chain) is out of range. In that case the caller is responsible 2147 // for generating appropriate code. 2148 // Otherwise it returns false. 2149 // This function also checks wether veneers need to be emitted. 2150 bool NeedExtraInstructionsOrRegisterBranch(Label *label, 2151 ImmBranchType branch_type); 2152 }; 2153 2154 2155 // Use this scope when you need a one-to-one mapping bewteen methods and 2156 // instructions. This scope prevents the MacroAssembler from being called and 2157 // literal pools from being emitted. It also asserts the number of instructions 2158 // emitted is what you specified when creating the scope. 2159 class InstructionAccurateScope BASE_EMBEDDED { 2160 public: 2161 explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) 2162 : masm_(masm) 2163 #ifdef DEBUG 2164 , 2165 size_(count * kInstructionSize) 2166 #endif 2167 { 2168 // Before blocking the const pool, see if it needs to be emitted. 2169 masm_->CheckConstPool(false, true); 2170 masm_->CheckVeneerPool(false, true); 2171 2172 masm_->StartBlockPools(); 2173 #ifdef DEBUG 2174 if (count != 0) { 2175 masm_->bind(&start_); 2176 } 2177 previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); 2178 masm_->set_allow_macro_instructions(false); 2179 #endif 2180 } 2181 2182 ~InstructionAccurateScope() { 2183 masm_->EndBlockPools(); 2184 #ifdef DEBUG 2185 if (start_.is_bound()) { 2186 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_); 2187 } 2188 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); 2189 #endif 2190 } 2191 2192 private: 2193 MacroAssembler* masm_; 2194 #ifdef DEBUG 2195 size_t size_; 2196 Label start_; 2197 bool previous_allow_macro_instructions_; 2198 #endif 2199 }; 2200 2201 2202 // This scope utility allows scratch registers to be managed safely. The 2203 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch 2204 // registers. These registers can be allocated on demand, and will be returned 2205 // at the end of the scope. 2206 // 2207 // When the scope ends, the MacroAssembler's lists will be restored to their 2208 // original state, even if the lists were modified by some other means. 2209 class UseScratchRegisterScope { 2210 public: 2211 explicit UseScratchRegisterScope(MacroAssembler* masm) 2212 : available_(masm->TmpList()), 2213 availablefp_(masm->FPTmpList()), 2214 old_available_(available_->list()), 2215 old_availablefp_(availablefp_->list()) { 2216 DCHECK(available_->type() == CPURegister::kRegister); 2217 DCHECK(availablefp_->type() == CPURegister::kFPRegister); 2218 } 2219 2220 ~UseScratchRegisterScope(); 2221 2222 // Take a register from the appropriate temps list. It will be returned 2223 // automatically when the scope ends. 2224 Register AcquireW() { return AcquireNextAvailable(available_).W(); } 2225 Register AcquireX() { return AcquireNextAvailable(available_).X(); } 2226 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } 2227 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } 2228 2229 Register UnsafeAcquire(const Register& reg) { 2230 return Register(UnsafeAcquire(available_, reg)); 2231 } 2232 2233 Register AcquireSameSizeAs(const Register& reg); 2234 FPRegister AcquireSameSizeAs(const FPRegister& reg); 2235 2236 private: 2237 static CPURegister AcquireNextAvailable(CPURegList* available); 2238 static CPURegister UnsafeAcquire(CPURegList* available, 2239 const CPURegister& reg); 2240 2241 // Available scratch registers. 2242 CPURegList* available_; // kRegister 2243 CPURegList* availablefp_; // kFPRegister 2244 2245 // The state of the available lists at the start of this scope. 2246 RegList old_available_; // kRegister 2247 RegList old_availablefp_; // kFPRegister 2248 }; 2249 2250 2251 inline MemOperand ContextMemOperand(Register context, int index = 0) { 2252 return MemOperand(context, Context::SlotOffset(index)); 2253 } 2254 2255 inline MemOperand NativeContextMemOperand() { 2256 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX); 2257 } 2258 2259 2260 // Encode and decode information about patchable inline SMI checks. 2261 class InlineSmiCheckInfo { 2262 public: 2263 explicit InlineSmiCheckInfo(Address info); 2264 2265 bool HasSmiCheck() const { 2266 return smi_check_ != NULL; 2267 } 2268 2269 const Register& SmiRegister() const { 2270 return reg_; 2271 } 2272 2273 Instruction* SmiCheck() const { 2274 return smi_check_; 2275 } 2276 2277 // Use MacroAssembler::InlineData to emit information about patchable inline 2278 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to 2279 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp. 2280 // 2281 // The generated patch information can be read using the InlineSMICheckInfo 2282 // class. 2283 static void Emit(MacroAssembler* masm, const Register& reg, 2284 const Label* smi_check); 2285 2286 // Emit information to indicate that there is no inline SMI check. 2287 static void EmitNotInlined(MacroAssembler* masm) { 2288 Label unbound; 2289 Emit(masm, NoReg, &unbound); 2290 } 2291 2292 private: 2293 Register reg_; 2294 Instruction* smi_check_; 2295 2296 // Fields in the data encoded by InlineData. 2297 2298 // A width of 5 (Rd_width) for the SMI register preclues the use of csp, 2299 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be 2300 // used in a patchable check. The Emit() method checks this. 2301 // 2302 // Note that the total size of the fields is restricted by the underlying 2303 // storage size handled by the BitField class, which is a uint32_t. 2304 class RegisterBits : public BitField<unsigned, 0, 5> {}; 2305 class DeltaBits : public BitField<uint32_t, 5, 32-5> {}; 2306 }; 2307 2308 } // namespace internal 2309 } // namespace v8 2310 2311 #ifdef GENERATED_CODE_COVERAGE 2312 #error "Unsupported option" 2313 #define CODE_COVERAGE_STRINGIFY(x) #x 2314 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 2315 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 2316 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 2317 #else 2318 #define ACCESS_MASM(masm) masm-> 2319 #endif 2320 2321 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_ 2322