1 // Copyright 2013 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #if V8_TARGET_ARCH_ARM64 6 7 #include "src/base/bits.h" 8 #include "src/base/division-by-constant.h" 9 #include "src/bootstrapper.h" 10 #include "src/codegen.h" 11 #include "src/debug/debug.h" 12 #include "src/register-configuration.h" 13 #include "src/runtime/runtime.h" 14 15 #include "src/arm64/frames-arm64.h" 16 #include "src/arm64/macro-assembler-arm64.h" 17 18 namespace v8 { 19 namespace internal { 20 21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros. 22 #define __ 23 24 25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer, 26 unsigned buffer_size, 27 CodeObjectRequired create_code_object) 28 : Assembler(arg_isolate, buffer, buffer_size), 29 generating_stub_(false), 30 #if DEBUG 31 allow_macro_instructions_(true), 32 #endif 33 has_frame_(false), 34 use_real_aborts_(true), 35 sp_(jssp), 36 tmp_list_(DefaultTmpList()), 37 fptmp_list_(DefaultFPTmpList()) { 38 if (create_code_object == CodeObjectRequired::kYes) { 39 code_object_ = 40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); 41 } 42 } 43 44 45 CPURegList MacroAssembler::DefaultTmpList() { 46 return CPURegList(ip0, ip1); 47 } 48 49 50 CPURegList MacroAssembler::DefaultFPTmpList() { 51 return CPURegList(fp_scratch1, fp_scratch2); 52 } 53 54 55 void MacroAssembler::LogicalMacro(const Register& rd, 56 const Register& rn, 57 const Operand& operand, 58 LogicalOp op) { 59 UseScratchRegisterScope temps(this); 60 61 if (operand.NeedsRelocation(this)) { 62 Register temp = temps.AcquireX(); 63 Ldr(temp, operand.immediate()); 64 Logical(rd, rn, temp, op); 65 66 } else if (operand.IsImmediate()) { 67 int64_t immediate = operand.ImmediateValue(); 68 unsigned reg_size = rd.SizeInBits(); 69 70 // If the operation is NOT, invert the operation and immediate. 71 if ((op & NOT) == NOT) { 72 op = static_cast<LogicalOp>(op & ~NOT); 73 immediate = ~immediate; 74 } 75 76 // Ignore the top 32 bits of an immediate if we're moving to a W register. 77 if (rd.Is32Bits()) { 78 // Check that the top 32 bits are consistent. 79 DCHECK(((immediate >> kWRegSizeInBits) == 0) || 80 ((immediate >> kWRegSizeInBits) == -1)); 81 immediate &= kWRegMask; 82 } 83 84 DCHECK(rd.Is64Bits() || is_uint32(immediate)); 85 86 // Special cases for all set or all clear immediates. 87 if (immediate == 0) { 88 switch (op) { 89 case AND: 90 Mov(rd, 0); 91 return; 92 case ORR: // Fall through. 93 case EOR: 94 Mov(rd, rn); 95 return; 96 case ANDS: // Fall through. 97 case BICS: 98 break; 99 default: 100 UNREACHABLE(); 101 } 102 } else if ((rd.Is64Bits() && (immediate == -1L)) || 103 (rd.Is32Bits() && (immediate == 0xffffffffL))) { 104 switch (op) { 105 case AND: 106 Mov(rd, rn); 107 return; 108 case ORR: 109 Mov(rd, immediate); 110 return; 111 case EOR: 112 Mvn(rd, rn); 113 return; 114 case ANDS: // Fall through. 115 case BICS: 116 break; 117 default: 118 UNREACHABLE(); 119 } 120 } 121 122 unsigned n, imm_s, imm_r; 123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { 124 // Immediate can be encoded in the instruction. 125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); 126 } else { 127 // Immediate can't be encoded: synthesize using move immediate. 128 Register temp = temps.AcquireSameSizeAs(rn); 129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate); 130 if (rd.Is(csp)) { 131 // If rd is the stack pointer we cannot use it as the destination 132 // register so we use the temp register as an intermediate again. 133 Logical(temp, rn, imm_operand, op); 134 Mov(csp, temp); 135 AssertStackConsistency(); 136 } else { 137 Logical(rd, rn, imm_operand, op); 138 } 139 } 140 141 } else if (operand.IsExtendedRegister()) { 142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits()); 143 // Add/sub extended supports shift <= 4. We want to support exactly the 144 // same modes here. 145 DCHECK(operand.shift_amount() <= 4); 146 DCHECK(operand.reg().Is64Bits() || 147 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); 148 Register temp = temps.AcquireSameSizeAs(rn); 149 EmitExtendShift(temp, operand.reg(), operand.extend(), 150 operand.shift_amount()); 151 Logical(rd, rn, temp, op); 152 153 } else { 154 // The operand can be encoded in the instruction. 155 DCHECK(operand.IsShiftedRegister()); 156 Logical(rd, rn, operand, op); 157 } 158 } 159 160 161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) { 162 DCHECK(allow_macro_instructions_); 163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); 164 DCHECK(!rd.IsZero()); 165 166 // TODO(all) extend to support more immediates. 167 // 168 // Immediates on Aarch64 can be produced using an initial value, and zero to 169 // three move keep operations. 170 // 171 // Initial values can be generated with: 172 // 1. 64-bit move zero (movz). 173 // 2. 32-bit move inverted (movn). 174 // 3. 64-bit move inverted. 175 // 4. 32-bit orr immediate. 176 // 5. 64-bit orr immediate. 177 // Move-keep may then be used to modify each of the 16-bit half-words. 178 // 179 // The code below supports all five initial value generators, and 180 // applying move-keep operations to move-zero and move-inverted initial 181 // values. 182 183 // Try to move the immediate in one instruction, and if that fails, switch to 184 // using multiple instructions. 185 if (!TryOneInstrMoveImmediate(rd, imm)) { 186 unsigned reg_size = rd.SizeInBits(); 187 188 // Generic immediate case. Imm will be represented by 189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits. 190 // A move-zero or move-inverted is generated for the first non-zero or 191 // non-0xffff immX, and a move-keep for subsequent non-zero immX. 192 193 uint64_t ignored_halfword = 0; 194 bool invert_move = false; 195 // If the number of 0xffff halfwords is greater than the number of 0x0000 196 // halfwords, it's more efficient to use move-inverted. 197 if (CountClearHalfWords(~imm, reg_size) > 198 CountClearHalfWords(imm, reg_size)) { 199 ignored_halfword = 0xffffL; 200 invert_move = true; 201 } 202 203 // Mov instructions can't move immediate values into the stack pointer, so 204 // set up a temporary register, if needed. 205 UseScratchRegisterScope temps(this); 206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; 207 208 // Iterate through the halfwords. Use movn/movz for the first non-ignored 209 // halfword, and movk for subsequent halfwords. 210 DCHECK((reg_size % 16) == 0); 211 bool first_mov_done = false; 212 for (int i = 0; i < (rd.SizeInBits() / 16); i++) { 213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; 214 if (imm16 != ignored_halfword) { 215 if (!first_mov_done) { 216 if (invert_move) { 217 movn(temp, (~imm16) & 0xffffL, 16 * i); 218 } else { 219 movz(temp, imm16, 16 * i); 220 } 221 first_mov_done = true; 222 } else { 223 // Construct a wider constant. 224 movk(temp, imm16, 16 * i); 225 } 226 } 227 } 228 DCHECK(first_mov_done); 229 230 // Move the temporary if the original destination register was the stack 231 // pointer. 232 if (rd.IsSP()) { 233 mov(rd, temp); 234 AssertStackConsistency(); 235 } 236 } 237 } 238 239 240 void MacroAssembler::Mov(const Register& rd, 241 const Operand& operand, 242 DiscardMoveMode discard_mode) { 243 DCHECK(allow_macro_instructions_); 244 DCHECK(!rd.IsZero()); 245 246 // Provide a swap register for instructions that need to write into the 247 // system stack pointer (and can't do this inherently). 248 UseScratchRegisterScope temps(this); 249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; 250 251 if (operand.NeedsRelocation(this)) { 252 Ldr(dst, operand.immediate()); 253 254 } else if (operand.IsImmediate()) { 255 // Call the macro assembler for generic immediates. 256 Mov(dst, operand.ImmediateValue()); 257 258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { 259 // Emit a shift instruction if moving a shifted register. This operation 260 // could also be achieved using an orr instruction (like orn used by Mvn), 261 // but using a shift instruction makes the disassembly clearer. 262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount()); 263 264 } else if (operand.IsExtendedRegister()) { 265 // Emit an extend instruction if moving an extended register. This handles 266 // extend with post-shift operations, too. 267 EmitExtendShift(dst, operand.reg(), operand.extend(), 268 operand.shift_amount()); 269 270 } else { 271 // Otherwise, emit a register move only if the registers are distinct, or 272 // if they are not X registers. 273 // 274 // Note that mov(w0, w0) is not a no-op because it clears the top word of 275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W 276 // registers is not required to clear the top word of the X register. In 277 // this case, the instruction is discarded. 278 // 279 // If csp is an operand, add #0 is emitted, otherwise, orr #0. 280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && 281 (discard_mode == kDontDiscardForSameWReg))) { 282 Assembler::mov(rd, operand.reg()); 283 } 284 // This case can handle writes into the system stack pointer directly. 285 dst = rd; 286 } 287 288 // Copy the result to the system stack pointer. 289 if (!dst.Is(rd)) { 290 DCHECK(rd.IsSP()); 291 Assembler::mov(rd, dst); 292 } 293 } 294 295 296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { 297 DCHECK(allow_macro_instructions_); 298 299 if (operand.NeedsRelocation(this)) { 300 Ldr(rd, operand.immediate()); 301 mvn(rd, rd); 302 303 } else if (operand.IsImmediate()) { 304 // Call the macro assembler for generic immediates. 305 Mov(rd, ~operand.ImmediateValue()); 306 307 } else if (operand.IsExtendedRegister()) { 308 // Emit two instructions for the extend case. This differs from Mov, as 309 // the extend and invert can't be achieved in one instruction. 310 EmitExtendShift(rd, operand.reg(), operand.extend(), 311 operand.shift_amount()); 312 mvn(rd, rd); 313 314 } else { 315 mvn(rd, operand); 316 } 317 } 318 319 320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { 321 DCHECK((reg_size % 8) == 0); 322 int count = 0; 323 for (unsigned i = 0; i < (reg_size / 16); i++) { 324 if ((imm & 0xffff) == 0) { 325 count++; 326 } 327 imm >>= 16; 328 } 329 return count; 330 } 331 332 333 // The movz instruction can generate immediates containing an arbitrary 16-bit 334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. 335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { 336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); 337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); 338 } 339 340 341 // The movn instruction can generate immediates containing an arbitrary 16-bit 342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. 343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { 344 return IsImmMovz(~imm, reg_size); 345 } 346 347 348 void MacroAssembler::ConditionalCompareMacro(const Register& rn, 349 const Operand& operand, 350 StatusFlags nzcv, 351 Condition cond, 352 ConditionalCompareOp op) { 353 DCHECK((cond != al) && (cond != nv)); 354 if (operand.NeedsRelocation(this)) { 355 UseScratchRegisterScope temps(this); 356 Register temp = temps.AcquireX(); 357 Ldr(temp, operand.immediate()); 358 ConditionalCompareMacro(rn, temp, nzcv, cond, op); 359 360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || 361 (operand.IsImmediate() && 362 IsImmConditionalCompare(operand.ImmediateValue()))) { 363 // The immediate can be encoded in the instruction, or the operand is an 364 // unshifted register: call the assembler. 365 ConditionalCompare(rn, operand, nzcv, cond, op); 366 367 } else { 368 // The operand isn't directly supported by the instruction: perform the 369 // operation on a temporary register. 370 UseScratchRegisterScope temps(this); 371 Register temp = temps.AcquireSameSizeAs(rn); 372 Mov(temp, operand); 373 ConditionalCompare(rn, temp, nzcv, cond, op); 374 } 375 } 376 377 378 void MacroAssembler::Csel(const Register& rd, 379 const Register& rn, 380 const Operand& operand, 381 Condition cond) { 382 DCHECK(allow_macro_instructions_); 383 DCHECK(!rd.IsZero()); 384 DCHECK((cond != al) && (cond != nv)); 385 if (operand.IsImmediate()) { 386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero 387 // register. 388 int64_t imm = operand.ImmediateValue(); 389 Register zr = AppropriateZeroRegFor(rn); 390 if (imm == 0) { 391 csel(rd, rn, zr, cond); 392 } else if (imm == 1) { 393 csinc(rd, rn, zr, cond); 394 } else if (imm == -1) { 395 csinv(rd, rn, zr, cond); 396 } else { 397 UseScratchRegisterScope temps(this); 398 Register temp = temps.AcquireSameSizeAs(rn); 399 Mov(temp, imm); 400 csel(rd, rn, temp, cond); 401 } 402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { 403 // Unshifted register argument. 404 csel(rd, rn, operand.reg(), cond); 405 } else { 406 // All other arguments. 407 UseScratchRegisterScope temps(this); 408 Register temp = temps.AcquireSameSizeAs(rn); 409 Mov(temp, operand); 410 csel(rd, rn, temp, cond); 411 } 412 } 413 414 415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, 416 int64_t imm) { 417 unsigned n, imm_s, imm_r; 418 int reg_size = dst.SizeInBits(); 419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) { 420 // Immediate can be represented in a move zero instruction. Movz can't write 421 // to the stack pointer. 422 movz(dst, imm); 423 return true; 424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) { 425 // Immediate can be represented in a move not instruction. Movn can't write 426 // to the stack pointer. 427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); 428 return true; 429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { 430 // Immediate can be represented in a logical orr instruction. 431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR); 432 return true; 433 } 434 return false; 435 } 436 437 438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, 439 int64_t imm) { 440 int reg_size = dst.SizeInBits(); 441 442 // Encode the immediate in a single move instruction, if possible. 443 if (TryOneInstrMoveImmediate(dst, imm)) { 444 // The move was successful; nothing to do here. 445 } else { 446 // Pre-shift the immediate to the least-significant bits of the register. 447 int shift_low = CountTrailingZeros(imm, reg_size); 448 int64_t imm_low = imm >> shift_low; 449 450 // Pre-shift the immediate to the most-significant bits of the register. We 451 // insert set bits in the least-significant bits, as this creates a 452 // different immediate that may be encodable using movn or orr-immediate. 453 // If this new immediate is encodable, the set bits will be eliminated by 454 // the post shift on the following instruction. 455 int shift_high = CountLeadingZeros(imm, reg_size); 456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1); 457 458 if (TryOneInstrMoveImmediate(dst, imm_low)) { 459 // The new immediate has been moved into the destination's low bits: 460 // return a new leftward-shifting operand. 461 return Operand(dst, LSL, shift_low); 462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) { 463 // The new immediate has been moved into the destination's high bits: 464 // return a new rightward-shifting operand. 465 return Operand(dst, LSR, shift_high); 466 } else { 467 // Use the generic move operation to set up the immediate. 468 Mov(dst, imm); 469 } 470 } 471 return Operand(dst); 472 } 473 474 475 void MacroAssembler::AddSubMacro(const Register& rd, 476 const Register& rn, 477 const Operand& operand, 478 FlagsUpdate S, 479 AddSubOp op) { 480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && 481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) { 482 // The instruction would be a nop. Avoid generating useless code. 483 return; 484 } 485 486 if (operand.NeedsRelocation(this)) { 487 UseScratchRegisterScope temps(this); 488 Register temp = temps.AcquireX(); 489 Ldr(temp, operand.immediate()); 490 AddSubMacro(rd, rn, temp, S, op); 491 } else if ((operand.IsImmediate() && 492 !IsImmAddSub(operand.ImmediateValue())) || 493 (rn.IsZero() && !operand.IsShiftedRegister()) || 494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { 495 UseScratchRegisterScope temps(this); 496 Register temp = temps.AcquireSameSizeAs(rn); 497 if (operand.IsImmediate()) { 498 Operand imm_operand = 499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue()); 500 AddSub(rd, rn, imm_operand, S, op); 501 } else { 502 Mov(temp, operand); 503 AddSub(rd, rn, temp, S, op); 504 } 505 } else { 506 AddSub(rd, rn, operand, S, op); 507 } 508 } 509 510 511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, 512 const Register& rn, 513 const Operand& operand, 514 FlagsUpdate S, 515 AddSubWithCarryOp op) { 516 DCHECK(rd.SizeInBits() == rn.SizeInBits()); 517 UseScratchRegisterScope temps(this); 518 519 if (operand.NeedsRelocation(this)) { 520 Register temp = temps.AcquireX(); 521 Ldr(temp, operand.immediate()); 522 AddSubWithCarryMacro(rd, rn, temp, S, op); 523 524 } else if (operand.IsImmediate() || 525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { 526 // Add/sub with carry (immediate or ROR shifted register.) 527 Register temp = temps.AcquireSameSizeAs(rn); 528 Mov(temp, operand); 529 AddSubWithCarry(rd, rn, temp, S, op); 530 531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { 532 // Add/sub with carry (shifted register). 533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); 534 DCHECK(operand.shift() != ROR); 535 DCHECK(is_uintn(operand.shift_amount(), 536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2 537 : kWRegSizeInBitsLog2)); 538 Register temp = temps.AcquireSameSizeAs(rn); 539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); 540 AddSubWithCarry(rd, rn, temp, S, op); 541 542 } else if (operand.IsExtendedRegister()) { 543 // Add/sub with carry (extended register). 544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits()); 545 // Add/sub extended supports a shift <= 4. We want to support exactly the 546 // same modes. 547 DCHECK(operand.shift_amount() <= 4); 548 DCHECK(operand.reg().Is64Bits() || 549 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); 550 Register temp = temps.AcquireSameSizeAs(rn); 551 EmitExtendShift(temp, operand.reg(), operand.extend(), 552 operand.shift_amount()); 553 AddSubWithCarry(rd, rn, temp, S, op); 554 555 } else { 556 // The addressing mode is directly supported by the instruction. 557 AddSubWithCarry(rd, rn, operand, S, op); 558 } 559 } 560 561 562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, 563 const MemOperand& addr, 564 LoadStoreOp op) { 565 int64_t offset = addr.offset(); 566 LSDataSize size = CalcLSDataSize(op); 567 568 // Check if an immediate offset fits in the immediate field of the 569 // appropriate instruction. If not, emit two instructions to perform 570 // the operation. 571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && 572 !IsImmLSUnscaled(offset)) { 573 // Immediate offset that can't be encoded using unsigned or unscaled 574 // addressing modes. 575 UseScratchRegisterScope temps(this); 576 Register temp = temps.AcquireSameSizeAs(addr.base()); 577 Mov(temp, addr.offset()); 578 LoadStore(rt, MemOperand(addr.base(), temp), op); 579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { 580 // Post-index beyond unscaled addressing range. 581 LoadStore(rt, MemOperand(addr.base()), op); 582 add(addr.base(), addr.base(), offset); 583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { 584 // Pre-index beyond unscaled addressing range. 585 add(addr.base(), addr.base(), offset); 586 LoadStore(rt, MemOperand(addr.base()), op); 587 } else { 588 // Encodable in one load/store instruction. 589 LoadStore(rt, addr, op); 590 } 591 } 592 593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, 594 const CPURegister& rt2, 595 const MemOperand& addr, 596 LoadStorePairOp op) { 597 // TODO(all): Should we support register offset for load-store-pair? 598 DCHECK(!addr.IsRegisterOffset()); 599 600 int64_t offset = addr.offset(); 601 LSDataSize size = CalcLSPairDataSize(op); 602 603 // Check if the offset fits in the immediate field of the appropriate 604 // instruction. If not, emit two instructions to perform the operation. 605 if (IsImmLSPair(offset, size)) { 606 // Encodable in one load/store pair instruction. 607 LoadStorePair(rt, rt2, addr, op); 608 } else { 609 Register base = addr.base(); 610 if (addr.IsImmediateOffset()) { 611 UseScratchRegisterScope temps(this); 612 Register temp = temps.AcquireSameSizeAs(base); 613 Add(temp, base, offset); 614 LoadStorePair(rt, rt2, MemOperand(temp), op); 615 } else if (addr.IsPostIndex()) { 616 LoadStorePair(rt, rt2, MemOperand(base), op); 617 Add(base, base, offset); 618 } else { 619 DCHECK(addr.IsPreIndex()); 620 Add(base, base, offset); 621 LoadStorePair(rt, rt2, MemOperand(base), op); 622 } 623 } 624 } 625 626 627 void MacroAssembler::Load(const Register& rt, 628 const MemOperand& addr, 629 Representation r) { 630 DCHECK(!r.IsDouble()); 631 632 if (r.IsInteger8()) { 633 Ldrsb(rt, addr); 634 } else if (r.IsUInteger8()) { 635 Ldrb(rt, addr); 636 } else if (r.IsInteger16()) { 637 Ldrsh(rt, addr); 638 } else if (r.IsUInteger16()) { 639 Ldrh(rt, addr); 640 } else if (r.IsInteger32()) { 641 Ldr(rt.W(), addr); 642 } else { 643 DCHECK(rt.Is64Bits()); 644 Ldr(rt, addr); 645 } 646 } 647 648 649 void MacroAssembler::Store(const Register& rt, 650 const MemOperand& addr, 651 Representation r) { 652 DCHECK(!r.IsDouble()); 653 654 if (r.IsInteger8() || r.IsUInteger8()) { 655 Strb(rt, addr); 656 } else if (r.IsInteger16() || r.IsUInteger16()) { 657 Strh(rt, addr); 658 } else if (r.IsInteger32()) { 659 Str(rt.W(), addr); 660 } else { 661 DCHECK(rt.Is64Bits()); 662 if (r.IsHeapObject()) { 663 AssertNotSmi(rt); 664 } else if (r.IsSmi()) { 665 AssertSmi(rt); 666 } 667 Str(rt, addr); 668 } 669 } 670 671 672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( 673 Label *label, ImmBranchType b_type) { 674 bool need_longer_range = false; 675 // There are two situations in which we care about the offset being out of 676 // range: 677 // - The label is bound but too far away. 678 // - The label is not bound but linked, and the previous branch 679 // instruction in the chain is too far away. 680 if (label->is_bound() || label->is_linked()) { 681 need_longer_range = 682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset()); 683 } 684 if (!need_longer_range && !label->is_bound()) { 685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type); 686 unresolved_branches_.insert( 687 std::pair<int, FarBranchInfo>(max_reachable_pc, 688 FarBranchInfo(pc_offset(), label))); 689 // Also maintain the next pool check. 690 next_veneer_pool_check_ = 691 Min(next_veneer_pool_check_, 692 max_reachable_pc - kVeneerDistanceCheckMargin); 693 } 694 return need_longer_range; 695 } 696 697 698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { 699 DCHECK(allow_macro_instructions_); 700 DCHECK(!rd.IsZero()); 701 702 if (hint == kAdrNear) { 703 adr(rd, label); 704 return; 705 } 706 707 DCHECK(hint == kAdrFar); 708 if (label->is_bound()) { 709 int label_offset = label->pos() - pc_offset(); 710 if (Instruction::IsValidPCRelOffset(label_offset)) { 711 adr(rd, label); 712 } else { 713 DCHECK(label_offset <= 0); 714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1)); 715 adr(rd, min_adr_offset); 716 Add(rd, rd, label_offset - min_adr_offset); 717 } 718 } else { 719 UseScratchRegisterScope temps(this); 720 Register scratch = temps.AcquireX(); 721 722 InstructionAccurateScope scope( 723 this, PatchingAssembler::kAdrFarPatchableNInstrs); 724 adr(rd, label); 725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) { 726 nop(ADR_FAR_NOP); 727 } 728 movz(scratch, 0); 729 } 730 } 731 732 733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { 734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && 735 (bit == -1 || type >= kBranchTypeFirstUsingBit)); 736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { 737 B(static_cast<Condition>(type), label); 738 } else { 739 switch (type) { 740 case always: B(label); break; 741 case never: break; 742 case reg_zero: Cbz(reg, label); break; 743 case reg_not_zero: Cbnz(reg, label); break; 744 case reg_bit_clear: Tbz(reg, bit, label); break; 745 case reg_bit_set: Tbnz(reg, bit, label); break; 746 default: 747 UNREACHABLE(); 748 } 749 } 750 } 751 752 753 void MacroAssembler::B(Label* label, Condition cond) { 754 DCHECK(allow_macro_instructions_); 755 DCHECK((cond != al) && (cond != nv)); 756 757 Label done; 758 bool need_extra_instructions = 759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); 760 761 if (need_extra_instructions) { 762 b(&done, NegateCondition(cond)); 763 B(label); 764 } else { 765 b(label, cond); 766 } 767 bind(&done); 768 } 769 770 771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { 772 DCHECK(allow_macro_instructions_); 773 774 Label done; 775 bool need_extra_instructions = 776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); 777 778 if (need_extra_instructions) { 779 tbz(rt, bit_pos, &done); 780 B(label); 781 } else { 782 tbnz(rt, bit_pos, label); 783 } 784 bind(&done); 785 } 786 787 788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { 789 DCHECK(allow_macro_instructions_); 790 791 Label done; 792 bool need_extra_instructions = 793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); 794 795 if (need_extra_instructions) { 796 tbnz(rt, bit_pos, &done); 797 B(label); 798 } else { 799 tbz(rt, bit_pos, label); 800 } 801 bind(&done); 802 } 803 804 805 void MacroAssembler::Cbnz(const Register& rt, Label* label) { 806 DCHECK(allow_macro_instructions_); 807 808 Label done; 809 bool need_extra_instructions = 810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); 811 812 if (need_extra_instructions) { 813 cbz(rt, &done); 814 B(label); 815 } else { 816 cbnz(rt, label); 817 } 818 bind(&done); 819 } 820 821 822 void MacroAssembler::Cbz(const Register& rt, Label* label) { 823 DCHECK(allow_macro_instructions_); 824 825 Label done; 826 bool need_extra_instructions = 827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); 828 829 if (need_extra_instructions) { 830 cbnz(rt, &done); 831 B(label); 832 } else { 833 cbz(rt, label); 834 } 835 bind(&done); 836 } 837 838 839 // Pseudo-instructions. 840 841 842 void MacroAssembler::Abs(const Register& rd, const Register& rm, 843 Label* is_not_representable, 844 Label* is_representable) { 845 DCHECK(allow_macro_instructions_); 846 DCHECK(AreSameSizeAndType(rd, rm)); 847 848 Cmp(rm, 1); 849 Cneg(rd, rm, lt); 850 851 // If the comparison sets the v flag, the input was the smallest value 852 // representable by rm, and the mathematical result of abs(rm) is not 853 // representable using two's complement. 854 if ((is_not_representable != NULL) && (is_representable != NULL)) { 855 B(is_not_representable, vs); 856 B(is_representable); 857 } else if (is_not_representable != NULL) { 858 B(is_not_representable, vs); 859 } else if (is_representable != NULL) { 860 B(is_representable, vc); 861 } 862 } 863 864 865 // Abstracted stack operations. 866 867 868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, 869 const CPURegister& src2, const CPURegister& src3) { 870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); 871 872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); 873 int size = src0.SizeInBytes(); 874 875 PushPreamble(count, size); 876 PushHelper(count, size, src0, src1, src2, src3); 877 } 878 879 880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, 881 const CPURegister& src2, const CPURegister& src3, 882 const CPURegister& src4, const CPURegister& src5, 883 const CPURegister& src6, const CPURegister& src7) { 884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); 885 886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); 887 int size = src0.SizeInBytes(); 888 889 PushPreamble(count, size); 890 PushHelper(4, size, src0, src1, src2, src3); 891 PushHelper(count - 4, size, src4, src5, src6, src7); 892 } 893 894 895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, 896 const CPURegister& dst2, const CPURegister& dst3) { 897 // It is not valid to pop into the same register more than once in one 898 // instruction, not even into the zero register. 899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3)); 900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3)); 901 DCHECK(dst0.IsValid()); 902 903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); 904 int size = dst0.SizeInBytes(); 905 906 PopHelper(count, size, dst0, dst1, dst2, dst3); 907 PopPostamble(count, size); 908 } 909 910 911 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, 912 const CPURegister& dst2, const CPURegister& dst3, 913 const CPURegister& dst4, const CPURegister& dst5, 914 const CPURegister& dst6, const CPURegister& dst7) { 915 // It is not valid to pop into the same register more than once in one 916 // instruction, not even into the zero register. 917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7)); 918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7)); 919 DCHECK(dst0.IsValid()); 920 921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid(); 922 int size = dst0.SizeInBytes(); 923 924 PopHelper(4, size, dst0, dst1, dst2, dst3); 925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7); 926 PopPostamble(count, size); 927 } 928 929 930 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) { 931 int size = src0.SizeInBytes() + src1.SizeInBytes(); 932 933 PushPreamble(size); 934 // Reserve room for src0 and push src1. 935 str(src1, MemOperand(StackPointer(), -size, PreIndex)); 936 // Fill the gap with src0. 937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes())); 938 } 939 940 941 void MacroAssembler::PushPopQueue::PushQueued( 942 PreambleDirective preamble_directive) { 943 if (queued_.empty()) return; 944 945 if (preamble_directive == WITH_PREAMBLE) { 946 masm_->PushPreamble(size_); 947 } 948 949 size_t count = queued_.size(); 950 size_t index = 0; 951 while (index < count) { 952 // PushHelper can only handle registers with the same size and type, and it 953 // can handle only four at a time. Batch them up accordingly. 954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; 955 int batch_index = 0; 956 do { 957 batch[batch_index++] = queued_[index++]; 958 } while ((batch_index < 4) && (index < count) && 959 batch[0].IsSameSizeAndType(queued_[index])); 960 961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), 962 batch[0], batch[1], batch[2], batch[3]); 963 } 964 965 queued_.clear(); 966 } 967 968 969 void MacroAssembler::PushPopQueue::PopQueued() { 970 if (queued_.empty()) return; 971 972 size_t count = queued_.size(); 973 size_t index = 0; 974 while (index < count) { 975 // PopHelper can only handle registers with the same size and type, and it 976 // can handle only four at a time. Batch them up accordingly. 977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; 978 int batch_index = 0; 979 do { 980 batch[batch_index++] = queued_[index++]; 981 } while ((batch_index < 4) && (index < count) && 982 batch[0].IsSameSizeAndType(queued_[index])); 983 984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(), 985 batch[0], batch[1], batch[2], batch[3]); 986 } 987 988 masm_->PopPostamble(size_); 989 queued_.clear(); 990 } 991 992 993 void MacroAssembler::PushCPURegList(CPURegList registers) { 994 int size = registers.RegisterSizeInBytes(); 995 996 PushPreamble(registers.Count(), size); 997 // Push up to four registers at a time because if the current stack pointer is 998 // csp and reg_size is 32, registers must be pushed in blocks of four in order 999 // to maintain the 16-byte alignment for csp. 1000 while (!registers.IsEmpty()) { 1001 int count_before = registers.Count(); 1002 const CPURegister& src0 = registers.PopHighestIndex(); 1003 const CPURegister& src1 = registers.PopHighestIndex(); 1004 const CPURegister& src2 = registers.PopHighestIndex(); 1005 const CPURegister& src3 = registers.PopHighestIndex(); 1006 int count = count_before - registers.Count(); 1007 PushHelper(count, size, src0, src1, src2, src3); 1008 } 1009 } 1010 1011 1012 void MacroAssembler::PopCPURegList(CPURegList registers) { 1013 int size = registers.RegisterSizeInBytes(); 1014 1015 // Pop up to four registers at a time because if the current stack pointer is 1016 // csp and reg_size is 32, registers must be pushed in blocks of four in 1017 // order to maintain the 16-byte alignment for csp. 1018 while (!registers.IsEmpty()) { 1019 int count_before = registers.Count(); 1020 const CPURegister& dst0 = registers.PopLowestIndex(); 1021 const CPURegister& dst1 = registers.PopLowestIndex(); 1022 const CPURegister& dst2 = registers.PopLowestIndex(); 1023 const CPURegister& dst3 = registers.PopLowestIndex(); 1024 int count = count_before - registers.Count(); 1025 PopHelper(count, size, dst0, dst1, dst2, dst3); 1026 } 1027 PopPostamble(registers.Count(), size); 1028 } 1029 1030 1031 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { 1032 int size = src.SizeInBytes(); 1033 1034 PushPreamble(count, size); 1035 1036 if (FLAG_optimize_for_size && count > 8) { 1037 UseScratchRegisterScope temps(this); 1038 Register temp = temps.AcquireX(); 1039 1040 Label loop; 1041 __ Mov(temp, count / 2); 1042 __ Bind(&loop); 1043 PushHelper(2, size, src, src, NoReg, NoReg); 1044 __ Subs(temp, temp, 1); 1045 __ B(ne, &loop); 1046 1047 count %= 2; 1048 } 1049 1050 // Push up to four registers at a time if possible because if the current 1051 // stack pointer is csp and the register size is 32, registers must be pushed 1052 // in blocks of four in order to maintain the 16-byte alignment for csp. 1053 while (count >= 4) { 1054 PushHelper(4, size, src, src, src, src); 1055 count -= 4; 1056 } 1057 if (count >= 2) { 1058 PushHelper(2, size, src, src, NoReg, NoReg); 1059 count -= 2; 1060 } 1061 if (count == 1) { 1062 PushHelper(1, size, src, NoReg, NoReg, NoReg); 1063 count -= 1; 1064 } 1065 DCHECK(count == 0); 1066 } 1067 1068 1069 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { 1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); 1071 1072 UseScratchRegisterScope temps(this); 1073 Register temp = temps.AcquireSameSizeAs(count); 1074 1075 if (FLAG_optimize_for_size) { 1076 Label loop, done; 1077 1078 Subs(temp, count, 1); 1079 B(mi, &done); 1080 1081 // Push all registers individually, to save code size. 1082 Bind(&loop); 1083 Subs(temp, temp, 1); 1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); 1085 B(pl, &loop); 1086 1087 Bind(&done); 1088 } else { 1089 Label loop, leftover2, leftover1, done; 1090 1091 Subs(temp, count, 4); 1092 B(mi, &leftover2); 1093 1094 // Push groups of four first. 1095 Bind(&loop); 1096 Subs(temp, temp, 4); 1097 PushHelper(4, src.SizeInBytes(), src, src, src, src); 1098 B(pl, &loop); 1099 1100 // Push groups of two. 1101 Bind(&leftover2); 1102 Tbz(count, 1, &leftover1); 1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg); 1104 1105 // Push the last one (if required). 1106 Bind(&leftover1); 1107 Tbz(count, 0, &done); 1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); 1109 1110 Bind(&done); 1111 } 1112 } 1113 1114 1115 void MacroAssembler::PushHelper(int count, int size, 1116 const CPURegister& src0, 1117 const CPURegister& src1, 1118 const CPURegister& src2, 1119 const CPURegister& src3) { 1120 // Ensure that we don't unintentially modify scratch or debug registers. 1121 InstructionAccurateScope scope(this); 1122 1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); 1124 DCHECK(size == src0.SizeInBytes()); 1125 1126 // When pushing multiple registers, the store order is chosen such that 1127 // Push(a, b) is equivalent to Push(a) followed by Push(b). 1128 switch (count) { 1129 case 1: 1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone()); 1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); 1132 break; 1133 case 2: 1134 DCHECK(src2.IsNone() && src3.IsNone()); 1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); 1136 break; 1137 case 3: 1138 DCHECK(src3.IsNone()); 1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); 1140 str(src0, MemOperand(StackPointer(), 2 * size)); 1141 break; 1142 case 4: 1143 // Skip over 4 * size, then fill in the gap. This allows four W registers 1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp 1145 // at all times. 1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); 1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size)); 1148 break; 1149 default: 1150 UNREACHABLE(); 1151 } 1152 } 1153 1154 1155 void MacroAssembler::PopHelper(int count, int size, 1156 const CPURegister& dst0, 1157 const CPURegister& dst1, 1158 const CPURegister& dst2, 1159 const CPURegister& dst3) { 1160 // Ensure that we don't unintentially modify scratch or debug registers. 1161 InstructionAccurateScope scope(this); 1162 1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3)); 1164 DCHECK(size == dst0.SizeInBytes()); 1165 1166 // When popping multiple registers, the load order is chosen such that 1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). 1168 switch (count) { 1169 case 1: 1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); 1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); 1172 break; 1173 case 2: 1174 DCHECK(dst2.IsNone() && dst3.IsNone()); 1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); 1176 break; 1177 case 3: 1178 DCHECK(dst3.IsNone()); 1179 ldr(dst2, MemOperand(StackPointer(), 2 * size)); 1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); 1181 break; 1182 case 4: 1183 // Load the higher addresses first, then load the lower addresses and 1184 // skip the whole block in the second instruction. This allows four W 1185 // registers to be popped using csp, whilst maintaining 16-byte alignment 1186 // for csp at all times. 1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); 1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); 1189 break; 1190 default: 1191 UNREACHABLE(); 1192 } 1193 } 1194 1195 1196 void MacroAssembler::PushPreamble(Operand total_size) { 1197 if (csp.Is(StackPointer())) { 1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes 1199 // on entry and the total size of the specified registers must also be a 1200 // multiple of 16 bytes. 1201 if (total_size.IsImmediate()) { 1202 DCHECK((total_size.ImmediateValue() % 16) == 0); 1203 } 1204 1205 // Don't check access size for non-immediate sizes. It's difficult to do 1206 // well, and it will be caught by hardware (or the simulator) anyway. 1207 } else { 1208 // Even if the current stack pointer is not the system stack pointer (csp), 1209 // the system stack pointer will still be modified in order to comply with 1210 // ABI rules about accessing memory below the system stack pointer. 1211 BumpSystemStackPointer(total_size); 1212 } 1213 } 1214 1215 1216 void MacroAssembler::PopPostamble(Operand total_size) { 1217 if (csp.Is(StackPointer())) { 1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes 1219 // on entry and the total size of the specified registers must also be a 1220 // multiple of 16 bytes. 1221 if (total_size.IsImmediate()) { 1222 DCHECK((total_size.ImmediateValue() % 16) == 0); 1223 } 1224 1225 // Don't check access size for non-immediate sizes. It's difficult to do 1226 // well, and it will be caught by hardware (or the simulator) anyway. 1227 } else if (emit_debug_code()) { 1228 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1229 // but if we keep it matching StackPointer, the simulator can detect memory 1230 // accesses in the now-free part of the stack. 1231 SyncSystemStackPointer(); 1232 } 1233 } 1234 1235 1236 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { 1237 if (offset.IsImmediate()) { 1238 DCHECK(offset.ImmediateValue() >= 0); 1239 } else if (emit_debug_code()) { 1240 Cmp(xzr, offset); 1241 Check(le, kStackAccessBelowStackPointer); 1242 } 1243 1244 Str(src, MemOperand(StackPointer(), offset)); 1245 } 1246 1247 1248 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { 1249 if (offset.IsImmediate()) { 1250 DCHECK(offset.ImmediateValue() >= 0); 1251 } else if (emit_debug_code()) { 1252 Cmp(xzr, offset); 1253 Check(le, kStackAccessBelowStackPointer); 1254 } 1255 1256 Ldr(dst, MemOperand(StackPointer(), offset)); 1257 } 1258 1259 1260 void MacroAssembler::PokePair(const CPURegister& src1, 1261 const CPURegister& src2, 1262 int offset) { 1263 DCHECK(AreSameSizeAndType(src1, src2)); 1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); 1265 Stp(src1, src2, MemOperand(StackPointer(), offset)); 1266 } 1267 1268 1269 void MacroAssembler::PeekPair(const CPURegister& dst1, 1270 const CPURegister& dst2, 1271 int offset) { 1272 DCHECK(AreSameSizeAndType(dst1, dst2)); 1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0)); 1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset)); 1275 } 1276 1277 1278 void MacroAssembler::PushCalleeSavedRegisters() { 1279 // Ensure that the macro-assembler doesn't use any scratch registers. 1280 InstructionAccurateScope scope(this); 1281 1282 // This method must not be called unless the current stack pointer is the 1283 // system stack pointer (csp). 1284 DCHECK(csp.Is(StackPointer())); 1285 1286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex); 1287 1288 stp(d14, d15, tos); 1289 stp(d12, d13, tos); 1290 stp(d10, d11, tos); 1291 stp(d8, d9, tos); 1292 1293 stp(x29, x30, tos); 1294 stp(x27, x28, tos); // x28 = jssp 1295 stp(x25, x26, tos); 1296 stp(x23, x24, tos); 1297 stp(x21, x22, tos); 1298 stp(x19, x20, tos); 1299 } 1300 1301 1302 void MacroAssembler::PopCalleeSavedRegisters() { 1303 // Ensure that the macro-assembler doesn't use any scratch registers. 1304 InstructionAccurateScope scope(this); 1305 1306 // This method must not be called unless the current stack pointer is the 1307 // system stack pointer (csp). 1308 DCHECK(csp.Is(StackPointer())); 1309 1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex); 1311 1312 ldp(x19, x20, tos); 1313 ldp(x21, x22, tos); 1314 ldp(x23, x24, tos); 1315 ldp(x25, x26, tos); 1316 ldp(x27, x28, tos); // x28 = jssp 1317 ldp(x29, x30, tos); 1318 1319 ldp(d8, d9, tos); 1320 ldp(d10, d11, tos); 1321 ldp(d12, d13, tos); 1322 ldp(d14, d15, tos); 1323 } 1324 1325 1326 void MacroAssembler::AssertStackConsistency() { 1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too 1328 // much code to be generated. 1329 if (emit_debug_code() && use_real_aborts()) { 1330 if (csp.Is(StackPointer())) { 1331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We 1332 // can't check the alignment of csp without using a scratch register (or 1333 // clobbering the flags), but the processor (or simulator) will abort if 1334 // it is not properly aligned during a load. 1335 ldr(xzr, MemOperand(csp, 0)); 1336 } 1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) { 1338 Label ok; 1339 // Check that csp <= StackPointer(), preserving all registers and NZCV. 1340 sub(StackPointer(), csp, StackPointer()); 1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). 1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). 1343 1344 // Avoid generating AssertStackConsistency checks for the Push in Abort. 1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this); 1346 // Restore StackPointer(). 1347 sub(StackPointer(), csp, StackPointer()); 1348 Abort(kTheCurrentStackPointerIsBelowCsp); 1349 } 1350 1351 bind(&ok); 1352 // Restore StackPointer(). 1353 sub(StackPointer(), csp, StackPointer()); 1354 } 1355 } 1356 } 1357 1358 1359 void MacroAssembler::AssertFPCRState(Register fpcr) { 1360 if (emit_debug_code()) { 1361 Label unexpected_mode, done; 1362 UseScratchRegisterScope temps(this); 1363 if (fpcr.IsNone()) { 1364 fpcr = temps.AcquireX(); 1365 Mrs(fpcr, FPCR); 1366 } 1367 1368 // Settings overridden by ConfiugreFPCR(): 1369 // - Assert that default-NaN mode is set. 1370 Tbz(fpcr, DN_offset, &unexpected_mode); 1371 1372 // Settings left to their default values: 1373 // - Assert that flush-to-zero is not set. 1374 Tbnz(fpcr, FZ_offset, &unexpected_mode); 1375 // - Assert that the rounding mode is nearest-with-ties-to-even. 1376 STATIC_ASSERT(FPTieEven == 0); 1377 Tst(fpcr, RMode_mask); 1378 B(eq, &done); 1379 1380 Bind(&unexpected_mode); 1381 Abort(kUnexpectedFPCRMode); 1382 1383 Bind(&done); 1384 } 1385 } 1386 1387 1388 void MacroAssembler::ConfigureFPCR() { 1389 UseScratchRegisterScope temps(this); 1390 Register fpcr = temps.AcquireX(); 1391 Mrs(fpcr, FPCR); 1392 1393 // If necessary, enable default-NaN mode. The default values of the other FPCR 1394 // options should be suitable, and AssertFPCRState will verify that. 1395 Label no_write_required; 1396 Tbnz(fpcr, DN_offset, &no_write_required); 1397 1398 Orr(fpcr, fpcr, DN_mask); 1399 Msr(FPCR, fpcr); 1400 1401 Bind(&no_write_required); 1402 AssertFPCRState(fpcr); 1403 } 1404 1405 1406 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, 1407 const FPRegister& src) { 1408 AssertFPCRState(); 1409 1410 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except 1411 // for NaNs, which become the default NaN. We use fsub rather than fadd 1412 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. 1413 Fsub(dst, src, fp_zero); 1414 } 1415 1416 1417 void MacroAssembler::LoadRoot(CPURegister destination, 1418 Heap::RootListIndex index) { 1419 // TODO(jbramley): Most root values are constants, and can be synthesized 1420 // without a load. Refer to the ARM back end for details. 1421 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); 1422 } 1423 1424 1425 void MacroAssembler::StoreRoot(Register source, 1426 Heap::RootListIndex index) { 1427 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); 1428 Str(source, MemOperand(root, index << kPointerSizeLog2)); 1429 } 1430 1431 1432 void MacroAssembler::LoadTrueFalseRoots(Register true_root, 1433 Register false_root) { 1434 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex); 1435 Ldp(true_root, false_root, 1436 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2)); 1437 } 1438 1439 1440 void MacroAssembler::LoadHeapObject(Register result, 1441 Handle<HeapObject> object) { 1442 AllowDeferredHandleDereference using_raw_address; 1443 if (isolate()->heap()->InNewSpace(*object)) { 1444 Handle<Cell> cell = isolate()->factory()->NewCell(object); 1445 Mov(result, Operand(cell)); 1446 Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); 1447 } else { 1448 Mov(result, Operand(object)); 1449 } 1450 } 1451 1452 1453 void MacroAssembler::LoadInstanceDescriptors(Register map, 1454 Register descriptors) { 1455 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); 1456 } 1457 1458 1459 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { 1460 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); 1461 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); 1462 } 1463 1464 1465 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) { 1466 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); 1467 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset)); 1468 And(dst, dst, Map::EnumLengthBits::kMask); 1469 } 1470 1471 1472 void MacroAssembler::EnumLengthSmi(Register dst, Register map) { 1473 EnumLengthUntagged(dst, map); 1474 SmiTag(dst, dst); 1475 } 1476 1477 1478 void MacroAssembler::LoadAccessor(Register dst, Register holder, 1479 int accessor_index, 1480 AccessorComponent accessor) { 1481 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset)); 1482 LoadInstanceDescriptors(dst, dst); 1483 Ldr(dst, 1484 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index))); 1485 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset 1486 : AccessorPair::kSetterOffset; 1487 Ldr(dst, FieldMemOperand(dst, offset)); 1488 } 1489 1490 1491 void MacroAssembler::CheckEnumCache(Register object, 1492 Register null_value, 1493 Register scratch0, 1494 Register scratch1, 1495 Register scratch2, 1496 Register scratch3, 1497 Label* call_runtime) { 1498 DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2, 1499 scratch3)); 1500 1501 Register empty_fixed_array_value = scratch0; 1502 Register current_object = scratch1; 1503 1504 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); 1505 Label next, start; 1506 1507 Mov(current_object, object); 1508 1509 // Check if the enum length field is properly initialized, indicating that 1510 // there is an enum cache. 1511 Register map = scratch2; 1512 Register enum_length = scratch3; 1513 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); 1514 1515 EnumLengthUntagged(enum_length, map); 1516 Cmp(enum_length, kInvalidEnumCacheSentinel); 1517 B(eq, call_runtime); 1518 1519 B(&start); 1520 1521 Bind(&next); 1522 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); 1523 1524 // For all objects but the receiver, check that the cache is empty. 1525 EnumLengthUntagged(enum_length, map); 1526 Cbnz(enum_length, call_runtime); 1527 1528 Bind(&start); 1529 1530 // Check that there are no elements. Register current_object contains the 1531 // current JS object we've reached through the prototype chain. 1532 Label no_elements; 1533 Ldr(current_object, FieldMemOperand(current_object, 1534 JSObject::kElementsOffset)); 1535 Cmp(current_object, empty_fixed_array_value); 1536 B(eq, &no_elements); 1537 1538 // Second chance, the object may be using the empty slow element dictionary. 1539 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex); 1540 B(ne, call_runtime); 1541 1542 Bind(&no_elements); 1543 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset)); 1544 Cmp(current_object, null_value); 1545 B(ne, &next); 1546 } 1547 1548 1549 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver, 1550 Register scratch1, 1551 Register scratch2, 1552 Label* no_memento_found) { 1553 ExternalReference new_space_start = 1554 ExternalReference::new_space_start(isolate()); 1555 ExternalReference new_space_allocation_top = 1556 ExternalReference::new_space_allocation_top_address(isolate()); 1557 1558 Add(scratch1, receiver, 1559 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag); 1560 Cmp(scratch1, new_space_start); 1561 B(lt, no_memento_found); 1562 1563 Mov(scratch2, new_space_allocation_top); 1564 Ldr(scratch2, MemOperand(scratch2)); 1565 Cmp(scratch1, scratch2); 1566 B(gt, no_memento_found); 1567 1568 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize)); 1569 Cmp(scratch1, 1570 Operand(isolate()->factory()->allocation_memento_map())); 1571 } 1572 1573 1574 void MacroAssembler::InNewSpace(Register object, 1575 Condition cond, 1576 Label* branch) { 1577 DCHECK(cond == eq || cond == ne); 1578 UseScratchRegisterScope temps(this); 1579 Register temp = temps.AcquireX(); 1580 And(temp, object, ExternalReference::new_space_mask(isolate())); 1581 Cmp(temp, ExternalReference::new_space_start(isolate())); 1582 B(cond, branch); 1583 } 1584 1585 1586 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) { 1587 if (emit_debug_code()) { 1588 STATIC_ASSERT(kSmiTag == 0); 1589 Tst(object, kSmiTagMask); 1590 Check(eq, reason); 1591 } 1592 } 1593 1594 1595 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) { 1596 if (emit_debug_code()) { 1597 STATIC_ASSERT(kSmiTag == 0); 1598 Tst(object, kSmiTagMask); 1599 Check(ne, reason); 1600 } 1601 } 1602 1603 1604 void MacroAssembler::AssertName(Register object) { 1605 if (emit_debug_code()) { 1606 AssertNotSmi(object, kOperandIsASmiAndNotAName); 1607 1608 UseScratchRegisterScope temps(this); 1609 Register temp = temps.AcquireX(); 1610 1611 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 1612 CompareInstanceType(temp, temp, LAST_NAME_TYPE); 1613 Check(ls, kOperandIsNotAName); 1614 } 1615 } 1616 1617 1618 void MacroAssembler::AssertFunction(Register object) { 1619 if (emit_debug_code()) { 1620 AssertNotSmi(object, kOperandIsASmiAndNotAFunction); 1621 1622 UseScratchRegisterScope temps(this); 1623 Register temp = temps.AcquireX(); 1624 1625 CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE); 1626 Check(eq, kOperandIsNotAFunction); 1627 } 1628 } 1629 1630 1631 void MacroAssembler::AssertBoundFunction(Register object) { 1632 if (emit_debug_code()) { 1633 AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction); 1634 1635 UseScratchRegisterScope temps(this); 1636 Register temp = temps.AcquireX(); 1637 1638 CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE); 1639 Check(eq, kOperandIsNotABoundFunction); 1640 } 1641 } 1642 1643 1644 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, 1645 Register scratch) { 1646 if (emit_debug_code()) { 1647 Label done_checking; 1648 AssertNotSmi(object); 1649 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking); 1650 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 1651 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); 1652 Assert(eq, kExpectedUndefinedOrCell); 1653 Bind(&done_checking); 1654 } 1655 } 1656 1657 1658 void MacroAssembler::AssertString(Register object) { 1659 if (emit_debug_code()) { 1660 UseScratchRegisterScope temps(this); 1661 Register temp = temps.AcquireX(); 1662 STATIC_ASSERT(kSmiTag == 0); 1663 Tst(object, kSmiTagMask); 1664 Check(ne, kOperandIsASmiAndNotAString); 1665 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 1666 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); 1667 Check(lo, kOperandIsNotAString); 1668 } 1669 } 1670 1671 1672 void MacroAssembler::AssertPositiveOrZero(Register value) { 1673 if (emit_debug_code()) { 1674 Label done; 1675 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit; 1676 Tbz(value, sign_bit, &done); 1677 Abort(kUnexpectedNegativeValue); 1678 Bind(&done); 1679 } 1680 } 1681 1682 1683 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { 1684 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 1685 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); 1686 } 1687 1688 1689 void MacroAssembler::TailCallStub(CodeStub* stub) { 1690 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); 1691 } 1692 1693 1694 void MacroAssembler::CallRuntime(const Runtime::Function* f, 1695 int num_arguments, 1696 SaveFPRegsMode save_doubles) { 1697 // All arguments must be on the stack before this function is called. 1698 // x0 holds the return value after the call. 1699 1700 // Check that the number of arguments matches what the function expects. 1701 // If f->nargs is -1, the function can accept a variable number of arguments. 1702 CHECK(f->nargs < 0 || f->nargs == num_arguments); 1703 1704 // Place the necessary arguments. 1705 Mov(x0, num_arguments); 1706 Mov(x1, ExternalReference(f, isolate())); 1707 1708 CEntryStub stub(isolate(), 1, save_doubles); 1709 CallStub(&stub); 1710 } 1711 1712 1713 void MacroAssembler::CallExternalReference(const ExternalReference& ext, 1714 int num_arguments) { 1715 Mov(x0, num_arguments); 1716 Mov(x1, ext); 1717 1718 CEntryStub stub(isolate(), 1); 1719 CallStub(&stub); 1720 } 1721 1722 1723 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { 1724 Mov(x1, builtin); 1725 CEntryStub stub(isolate(), 1); 1726 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 1727 } 1728 1729 1730 void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag, 1731 const CallWrapper& call_wrapper) { 1732 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); 1733 // You can't call a builtin without a valid frame. 1734 DCHECK(flag == JUMP_FUNCTION || has_frame()); 1735 1736 // Fake a parameter count to avoid emitting code to do the check. 1737 ParameterCount expected(0); 1738 LoadNativeContextSlot(native_context_index, x1); 1739 InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper); 1740 } 1741 1742 1743 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { 1744 const Runtime::Function* function = Runtime::FunctionForId(fid); 1745 DCHECK_EQ(1, function->result_size); 1746 if (function->nargs >= 0) { 1747 // TODO(1236192): Most runtime routines don't need the number of 1748 // arguments passed in because it is constant. At some point we 1749 // should remove this need and make the runtime routine entry code 1750 // smarter. 1751 Mov(x0, function->nargs); 1752 } 1753 JumpToExternalReference(ExternalReference(fid, isolate())); 1754 } 1755 1756 1757 void MacroAssembler::InitializeNewString(Register string, 1758 Register length, 1759 Heap::RootListIndex map_index, 1760 Register scratch1, 1761 Register scratch2) { 1762 DCHECK(!AreAliased(string, length, scratch1, scratch2)); 1763 LoadRoot(scratch2, map_index); 1764 SmiTag(scratch1, length); 1765 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); 1766 1767 Mov(scratch2, String::kEmptyHashField); 1768 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); 1769 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); 1770 } 1771 1772 1773 int MacroAssembler::ActivationFrameAlignment() { 1774 #if V8_HOST_ARCH_ARM64 1775 // Running on the real platform. Use the alignment as mandated by the local 1776 // environment. 1777 // Note: This will break if we ever start generating snapshots on one ARM 1778 // platform for another ARM platform with a different alignment. 1779 return base::OS::ActivationFrameAlignment(); 1780 #else // V8_HOST_ARCH_ARM64 1781 // If we are using the simulator then we should always align to the expected 1782 // alignment. As the simulator is used to generate snapshots we do not know 1783 // if the target platform will need alignment, so this is controlled from a 1784 // flag. 1785 return FLAG_sim_stack_alignment; 1786 #endif // V8_HOST_ARCH_ARM64 1787 } 1788 1789 1790 void MacroAssembler::CallCFunction(ExternalReference function, 1791 int num_of_reg_args) { 1792 CallCFunction(function, num_of_reg_args, 0); 1793 } 1794 1795 1796 void MacroAssembler::CallCFunction(ExternalReference function, 1797 int num_of_reg_args, 1798 int num_of_double_args) { 1799 UseScratchRegisterScope temps(this); 1800 Register temp = temps.AcquireX(); 1801 Mov(temp, function); 1802 CallCFunction(temp, num_of_reg_args, num_of_double_args); 1803 } 1804 1805 1806 void MacroAssembler::CallCFunction(Register function, 1807 int num_of_reg_args, 1808 int num_of_double_args) { 1809 DCHECK(has_frame()); 1810 // We can pass 8 integer arguments in registers. If we need to pass more than 1811 // that, we'll need to implement support for passing them on the stack. 1812 DCHECK(num_of_reg_args <= 8); 1813 1814 // If we're passing doubles, we're limited to the following prototypes 1815 // (defined by ExternalReference::Type): 1816 // BUILTIN_COMPARE_CALL: int f(double, double) 1817 // BUILTIN_FP_FP_CALL: double f(double, double) 1818 // BUILTIN_FP_CALL: double f(double) 1819 // BUILTIN_FP_INT_CALL: double f(double, int) 1820 if (num_of_double_args > 0) { 1821 DCHECK(num_of_reg_args <= 1); 1822 DCHECK((num_of_double_args + num_of_reg_args) <= 2); 1823 } 1824 1825 1826 // If the stack pointer is not csp, we need to derive an aligned csp from the 1827 // current stack pointer. 1828 const Register old_stack_pointer = StackPointer(); 1829 if (!csp.Is(old_stack_pointer)) { 1830 AssertStackConsistency(); 1831 1832 int sp_alignment = ActivationFrameAlignment(); 1833 // The ABI mandates at least 16-byte alignment. 1834 DCHECK(sp_alignment >= 16); 1835 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment)); 1836 1837 // The current stack pointer is a callee saved register, and is preserved 1838 // across the call. 1839 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); 1840 1841 // Align and synchronize the system stack pointer with jssp. 1842 Bic(csp, old_stack_pointer, sp_alignment - 1); 1843 SetStackPointer(csp); 1844 } 1845 1846 // Call directly. The function called cannot cause a GC, or allow preemption, 1847 // so the return address in the link register stays correct. 1848 Call(function); 1849 1850 if (!csp.Is(old_stack_pointer)) { 1851 if (emit_debug_code()) { 1852 // Because the stack pointer must be aligned on a 16-byte boundary, the 1853 // aligned csp can be up to 12 bytes below the jssp. This is the case 1854 // where we only pushed one W register on top of an aligned jssp. 1855 UseScratchRegisterScope temps(this); 1856 Register temp = temps.AcquireX(); 1857 DCHECK(ActivationFrameAlignment() == 16); 1858 Sub(temp, csp, old_stack_pointer); 1859 // We want temp <= 0 && temp >= -12. 1860 Cmp(temp, 0); 1861 Ccmp(temp, -12, NFlag, le); 1862 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); 1863 } 1864 SetStackPointer(old_stack_pointer); 1865 } 1866 } 1867 1868 1869 void MacroAssembler::Jump(Register target) { 1870 Br(target); 1871 } 1872 1873 1874 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, 1875 Condition cond) { 1876 if (cond == nv) return; 1877 UseScratchRegisterScope temps(this); 1878 Register temp = temps.AcquireX(); 1879 Label done; 1880 if (cond != al) B(NegateCondition(cond), &done); 1881 Mov(temp, Operand(target, rmode)); 1882 Br(temp); 1883 Bind(&done); 1884 } 1885 1886 1887 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, 1888 Condition cond) { 1889 DCHECK(!RelocInfo::IsCodeTarget(rmode)); 1890 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); 1891 } 1892 1893 1894 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, 1895 Condition cond) { 1896 DCHECK(RelocInfo::IsCodeTarget(rmode)); 1897 AllowDeferredHandleDereference embedding_raw_address; 1898 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); 1899 } 1900 1901 1902 void MacroAssembler::Call(Register target) { 1903 BlockPoolsScope scope(this); 1904 #ifdef DEBUG 1905 Label start_call; 1906 Bind(&start_call); 1907 #endif 1908 1909 Blr(target); 1910 1911 #ifdef DEBUG 1912 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); 1913 #endif 1914 } 1915 1916 1917 void MacroAssembler::Call(Label* target) { 1918 BlockPoolsScope scope(this); 1919 #ifdef DEBUG 1920 Label start_call; 1921 Bind(&start_call); 1922 #endif 1923 1924 Bl(target); 1925 1926 #ifdef DEBUG 1927 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); 1928 #endif 1929 } 1930 1931 1932 // MacroAssembler::CallSize is sensitive to changes in this function, as it 1933 // requires to know how many instructions are used to branch to the target. 1934 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { 1935 BlockPoolsScope scope(this); 1936 #ifdef DEBUG 1937 Label start_call; 1938 Bind(&start_call); 1939 #endif 1940 // Statement positions are expected to be recorded when the target 1941 // address is loaded. 1942 positions_recorder()->WriteRecordedPositions(); 1943 1944 // Addresses always have 64 bits, so we shouldn't encounter NONE32. 1945 DCHECK(rmode != RelocInfo::NONE32); 1946 1947 UseScratchRegisterScope temps(this); 1948 Register temp = temps.AcquireX(); 1949 1950 if (rmode == RelocInfo::NONE64) { 1951 // Addresses are 48 bits so we never need to load the upper 16 bits. 1952 uint64_t imm = reinterpret_cast<uint64_t>(target); 1953 // If we don't use ARM tagged addresses, the 16 higher bits must be 0. 1954 DCHECK(((imm >> 48) & 0xffff) == 0); 1955 movz(temp, (imm >> 0) & 0xffff, 0); 1956 movk(temp, (imm >> 16) & 0xffff, 16); 1957 movk(temp, (imm >> 32) & 0xffff, 32); 1958 } else { 1959 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode)); 1960 } 1961 Blr(temp); 1962 #ifdef DEBUG 1963 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); 1964 #endif 1965 } 1966 1967 1968 void MacroAssembler::Call(Handle<Code> code, 1969 RelocInfo::Mode rmode, 1970 TypeFeedbackId ast_id) { 1971 #ifdef DEBUG 1972 Label start_call; 1973 Bind(&start_call); 1974 #endif 1975 1976 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) { 1977 SetRecordedAstId(ast_id); 1978 rmode = RelocInfo::CODE_TARGET_WITH_ID; 1979 } 1980 1981 AllowDeferredHandleDereference embedding_raw_address; 1982 Call(reinterpret_cast<Address>(code.location()), rmode); 1983 1984 #ifdef DEBUG 1985 // Check the size of the code generated. 1986 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id)); 1987 #endif 1988 } 1989 1990 1991 int MacroAssembler::CallSize(Register target) { 1992 USE(target); 1993 return kInstructionSize; 1994 } 1995 1996 1997 int MacroAssembler::CallSize(Label* target) { 1998 USE(target); 1999 return kInstructionSize; 2000 } 2001 2002 2003 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { 2004 USE(target); 2005 2006 // Addresses always have 64 bits, so we shouldn't encounter NONE32. 2007 DCHECK(rmode != RelocInfo::NONE32); 2008 2009 if (rmode == RelocInfo::NONE64) { 2010 return kCallSizeWithoutRelocation; 2011 } else { 2012 return kCallSizeWithRelocation; 2013 } 2014 } 2015 2016 2017 int MacroAssembler::CallSize(Handle<Code> code, 2018 RelocInfo::Mode rmode, 2019 TypeFeedbackId ast_id) { 2020 USE(code); 2021 USE(ast_id); 2022 2023 // Addresses always have 64 bits, so we shouldn't encounter NONE32. 2024 DCHECK(rmode != RelocInfo::NONE32); 2025 2026 if (rmode == RelocInfo::NONE64) { 2027 return kCallSizeWithoutRelocation; 2028 } else { 2029 return kCallSizeWithRelocation; 2030 } 2031 } 2032 2033 2034 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number, 2035 SmiCheckType smi_check_type) { 2036 Label on_not_heap_number; 2037 2038 if (smi_check_type == DO_SMI_CHECK) { 2039 JumpIfSmi(object, &on_not_heap_number); 2040 } 2041 2042 AssertNotSmi(object); 2043 2044 UseScratchRegisterScope temps(this); 2045 Register temp = temps.AcquireX(); 2046 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 2047 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number); 2048 2049 Bind(&on_not_heap_number); 2050 } 2051 2052 2053 void MacroAssembler::JumpIfNotHeapNumber(Register object, 2054 Label* on_not_heap_number, 2055 SmiCheckType smi_check_type) { 2056 if (smi_check_type == DO_SMI_CHECK) { 2057 JumpIfSmi(object, on_not_heap_number); 2058 } 2059 2060 AssertNotSmi(object); 2061 2062 UseScratchRegisterScope temps(this); 2063 Register temp = temps.AcquireX(); 2064 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 2065 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number); 2066 } 2067 2068 2069 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, 2070 FPRegister value, 2071 FPRegister scratch_d, 2072 Label* on_successful_conversion, 2073 Label* on_failed_conversion) { 2074 // Convert to an int and back again, then compare with the original value. 2075 Fcvtzs(as_int, value); 2076 Scvtf(scratch_d, as_int); 2077 Fcmp(value, scratch_d); 2078 2079 if (on_successful_conversion) { 2080 B(on_successful_conversion, eq); 2081 } 2082 if (on_failed_conversion) { 2083 B(on_failed_conversion, ne); 2084 } 2085 } 2086 2087 2088 void MacroAssembler::TestForMinusZero(DoubleRegister input) { 2089 UseScratchRegisterScope temps(this); 2090 Register temp = temps.AcquireX(); 2091 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will 2092 // cause overflow. 2093 Fmov(temp, input); 2094 Cmp(temp, 1); 2095 } 2096 2097 2098 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, 2099 Label* on_negative_zero) { 2100 TestForMinusZero(input); 2101 B(vs, on_negative_zero); 2102 } 2103 2104 2105 void MacroAssembler::JumpIfMinusZero(Register input, 2106 Label* on_negative_zero) { 2107 DCHECK(input.Is64Bits()); 2108 // Floating point value is in an integer register. Detect -0.0 by subtracting 2109 // 1 (cmp), which will cause overflow. 2110 Cmp(input, 1); 2111 B(vs, on_negative_zero); 2112 } 2113 2114 2115 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { 2116 // Clamp the value to [0..255]. 2117 Cmp(input.W(), Operand(input.W(), UXTB)); 2118 // If input < input & 0xff, it must be < 0, so saturate to 0. 2119 Csel(output.W(), wzr, input.W(), lt); 2120 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255. 2121 Csel(output.W(), output.W(), 255, le); 2122 } 2123 2124 2125 void MacroAssembler::ClampInt32ToUint8(Register in_out) { 2126 ClampInt32ToUint8(in_out, in_out); 2127 } 2128 2129 2130 void MacroAssembler::ClampDoubleToUint8(Register output, 2131 DoubleRegister input, 2132 DoubleRegister dbl_scratch) { 2133 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types: 2134 // - Inputs lower than 0 (including -infinity) produce 0. 2135 // - Inputs higher than 255 (including +infinity) produce 255. 2136 // Also, it seems that PIXEL types use round-to-nearest rather than 2137 // round-towards-zero. 2138 2139 // Squash +infinity before the conversion, since Fcvtnu will normally 2140 // convert it to 0. 2141 Fmov(dbl_scratch, 255); 2142 Fmin(dbl_scratch, dbl_scratch, input); 2143 2144 // Convert double to unsigned integer. Values less than zero become zero. 2145 // Values greater than 255 have already been clamped to 255. 2146 Fcvtnu(output, dbl_scratch); 2147 } 2148 2149 2150 void MacroAssembler::CopyBytes(Register dst, 2151 Register src, 2152 Register length, 2153 Register scratch, 2154 CopyHint hint) { 2155 UseScratchRegisterScope temps(this); 2156 Register tmp1 = temps.AcquireX(); 2157 Register tmp2 = temps.AcquireX(); 2158 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2)); 2159 DCHECK(!AreAliased(src, dst, csp)); 2160 2161 if (emit_debug_code()) { 2162 // Check copy length. 2163 Cmp(length, 0); 2164 Assert(ge, kUnexpectedNegativeValue); 2165 2166 // Check src and dst buffers don't overlap. 2167 Add(scratch, src, length); // Calculate end of src buffer. 2168 Cmp(scratch, dst); 2169 Add(scratch, dst, length); // Calculate end of dst buffer. 2170 Ccmp(scratch, src, ZFlag, gt); 2171 Assert(le, kCopyBuffersOverlap); 2172 } 2173 2174 Label short_copy, short_loop, bulk_loop, done; 2175 2176 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) { 2177 Register bulk_length = scratch; 2178 int pair_size = 2 * kXRegSize; 2179 int pair_mask = pair_size - 1; 2180 2181 Bic(bulk_length, length, pair_mask); 2182 Cbz(bulk_length, &short_copy); 2183 Bind(&bulk_loop); 2184 Sub(bulk_length, bulk_length, pair_size); 2185 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex)); 2186 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex)); 2187 Cbnz(bulk_length, &bulk_loop); 2188 2189 And(length, length, pair_mask); 2190 } 2191 2192 Bind(&short_copy); 2193 Cbz(length, &done); 2194 Bind(&short_loop); 2195 Sub(length, length, 1); 2196 Ldrb(tmp1, MemOperand(src, 1, PostIndex)); 2197 Strb(tmp1, MemOperand(dst, 1, PostIndex)); 2198 Cbnz(length, &short_loop); 2199 2200 2201 Bind(&done); 2202 } 2203 2204 2205 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, 2206 Register end_address, 2207 Register filler) { 2208 DCHECK(!current_address.Is(csp)); 2209 UseScratchRegisterScope temps(this); 2210 Register distance_in_words = temps.AcquireX(); 2211 Label done; 2212 2213 // Calculate the distance. If it's <= zero then there's nothing to do. 2214 Subs(distance_in_words, end_address, current_address); 2215 B(le, &done); 2216 2217 // There's at least one field to fill, so do this unconditionally. 2218 Str(filler, MemOperand(current_address)); 2219 2220 // If the distance_in_words consists of odd number of words we advance 2221 // start_address by one word, otherwise the pairs loop will ovwerite the 2222 // field that was stored above. 2223 And(distance_in_words, distance_in_words, kPointerSize); 2224 Add(current_address, current_address, distance_in_words); 2225 2226 // Store filler to memory in pairs. 2227 Label loop, entry; 2228 B(&entry); 2229 Bind(&loop); 2230 Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex)); 2231 Bind(&entry); 2232 Cmp(current_address, end_address); 2233 B(lo, &loop); 2234 2235 Bind(&done); 2236 } 2237 2238 2239 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings( 2240 Register first, Register second, Register scratch1, Register scratch2, 2241 Label* failure, SmiCheckType smi_check) { 2242 if (smi_check == DO_SMI_CHECK) { 2243 JumpIfEitherSmi(first, second, failure); 2244 } else if (emit_debug_code()) { 2245 DCHECK(smi_check == DONT_DO_SMI_CHECK); 2246 Label not_smi; 2247 JumpIfEitherSmi(first, second, NULL, ¬_smi); 2248 2249 // At least one input is a smi, but the flags indicated a smi check wasn't 2250 // needed. 2251 Abort(kUnexpectedSmi); 2252 2253 Bind(¬_smi); 2254 } 2255 2256 // Test that both first and second are sequential one-byte strings. 2257 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); 2258 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); 2259 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); 2260 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); 2261 2262 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1, 2263 scratch2, failure); 2264 } 2265 2266 2267 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte( 2268 Register first, Register second, Register scratch1, Register scratch2, 2269 Label* failure) { 2270 DCHECK(!AreAliased(scratch1, second)); 2271 DCHECK(!AreAliased(scratch1, scratch2)); 2272 const int kFlatOneByteStringMask = 2273 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 2274 const int kFlatOneByteStringTag = 2275 kStringTag | kOneByteStringTag | kSeqStringTag; 2276 And(scratch1, first, kFlatOneByteStringMask); 2277 And(scratch2, second, kFlatOneByteStringMask); 2278 Cmp(scratch1, kFlatOneByteStringTag); 2279 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq); 2280 B(ne, failure); 2281 } 2282 2283 2284 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type, 2285 Register scratch, 2286 Label* failure) { 2287 const int kFlatOneByteStringMask = 2288 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 2289 const int kFlatOneByteStringTag = 2290 kStringTag | kOneByteStringTag | kSeqStringTag; 2291 And(scratch, type, kFlatOneByteStringMask); 2292 Cmp(scratch, kFlatOneByteStringTag); 2293 B(ne, failure); 2294 } 2295 2296 2297 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( 2298 Register first, Register second, Register scratch1, Register scratch2, 2299 Label* failure) { 2300 DCHECK(!AreAliased(first, second, scratch1, scratch2)); 2301 const int kFlatOneByteStringMask = 2302 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 2303 const int kFlatOneByteStringTag = 2304 kStringTag | kOneByteStringTag | kSeqStringTag; 2305 And(scratch1, first, kFlatOneByteStringMask); 2306 And(scratch2, second, kFlatOneByteStringMask); 2307 Cmp(scratch1, kFlatOneByteStringTag); 2308 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq); 2309 B(ne, failure); 2310 } 2311 2312 2313 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type, 2314 Label* not_unique_name) { 2315 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); 2316 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) { 2317 // continue 2318 // } else { 2319 // goto not_unique_name 2320 // } 2321 Tst(type, kIsNotStringMask | kIsNotInternalizedMask); 2322 Ccmp(type, SYMBOL_TYPE, ZFlag, ne); 2323 B(ne, not_unique_name); 2324 } 2325 2326 2327 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 2328 const ParameterCount& actual, 2329 Label* done, 2330 InvokeFlag flag, 2331 bool* definitely_mismatches, 2332 const CallWrapper& call_wrapper) { 2333 bool definitely_matches = false; 2334 *definitely_mismatches = false; 2335 Label regular_invoke; 2336 2337 // Check whether the expected and actual arguments count match. If not, 2338 // setup registers according to contract with ArgumentsAdaptorTrampoline: 2339 // x0: actual arguments count. 2340 // x1: function (passed through to callee). 2341 // x2: expected arguments count. 2342 2343 // The code below is made a lot easier because the calling code already sets 2344 // up actual and expected registers according to the contract if values are 2345 // passed in registers. 2346 DCHECK(actual.is_immediate() || actual.reg().is(x0)); 2347 DCHECK(expected.is_immediate() || expected.reg().is(x2)); 2348 2349 if (expected.is_immediate()) { 2350 DCHECK(actual.is_immediate()); 2351 Mov(x0, actual.immediate()); 2352 if (expected.immediate() == actual.immediate()) { 2353 definitely_matches = true; 2354 2355 } else { 2356 if (expected.immediate() == 2357 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { 2358 // Don't worry about adapting arguments for builtins that 2359 // don't want that done. Skip adaption code by making it look 2360 // like we have a match between expected and actual number of 2361 // arguments. 2362 definitely_matches = true; 2363 } else { 2364 *definitely_mismatches = true; 2365 // Set up x2 for the argument adaptor. 2366 Mov(x2, expected.immediate()); 2367 } 2368 } 2369 2370 } else { // expected is a register. 2371 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) 2372 : Operand(actual.reg()); 2373 Mov(x0, actual_op); 2374 // If actual == expected perform a regular invocation. 2375 Cmp(expected.reg(), actual_op); 2376 B(eq, ®ular_invoke); 2377 } 2378 2379 // If the argument counts may mismatch, generate a call to the argument 2380 // adaptor. 2381 if (!definitely_matches) { 2382 Handle<Code> adaptor = 2383 isolate()->builtins()->ArgumentsAdaptorTrampoline(); 2384 if (flag == CALL_FUNCTION) { 2385 call_wrapper.BeforeCall(CallSize(adaptor)); 2386 Call(adaptor); 2387 call_wrapper.AfterCall(); 2388 if (!*definitely_mismatches) { 2389 // If the arg counts don't match, no extra code is emitted by 2390 // MAsm::InvokeFunctionCode and we can just fall through. 2391 B(done); 2392 } 2393 } else { 2394 Jump(adaptor, RelocInfo::CODE_TARGET); 2395 } 2396 } 2397 Bind(®ular_invoke); 2398 } 2399 2400 2401 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, 2402 const ParameterCount& expected, 2403 const ParameterCount& actual) { 2404 Label skip_flooding; 2405 ExternalReference step_in_enabled = 2406 ExternalReference::debug_step_in_enabled_address(isolate()); 2407 Mov(x4, Operand(step_in_enabled)); 2408 ldrb(x4, MemOperand(x4)); 2409 CompareAndBranch(x4, Operand(0), eq, &skip_flooding); 2410 { 2411 FrameScope frame(this, 2412 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); 2413 if (expected.is_reg()) { 2414 SmiTag(expected.reg()); 2415 Push(expected.reg()); 2416 } 2417 if (actual.is_reg()) { 2418 SmiTag(actual.reg()); 2419 Push(actual.reg()); 2420 } 2421 if (new_target.is_valid()) { 2422 Push(new_target); 2423 } 2424 Push(fun); 2425 Push(fun); 2426 CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1); 2427 Pop(fun); 2428 if (new_target.is_valid()) { 2429 Pop(new_target); 2430 } 2431 if (actual.is_reg()) { 2432 Pop(actual.reg()); 2433 SmiUntag(actual.reg()); 2434 } 2435 if (expected.is_reg()) { 2436 Pop(expected.reg()); 2437 SmiUntag(expected.reg()); 2438 } 2439 } 2440 bind(&skip_flooding); 2441 } 2442 2443 2444 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, 2445 const ParameterCount& expected, 2446 const ParameterCount& actual, 2447 InvokeFlag flag, 2448 const CallWrapper& call_wrapper) { 2449 // You can't call a function without a valid frame. 2450 DCHECK(flag == JUMP_FUNCTION || has_frame()); 2451 DCHECK(function.is(x1)); 2452 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3)); 2453 2454 FloodFunctionIfStepping(function, new_target, expected, actual); 2455 2456 // Clear the new.target register if not given. 2457 if (!new_target.is_valid()) { 2458 LoadRoot(x3, Heap::kUndefinedValueRootIndex); 2459 } 2460 2461 Label done; 2462 bool definitely_mismatches = false; 2463 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches, 2464 call_wrapper); 2465 2466 // If we are certain that actual != expected, then we know InvokePrologue will 2467 // have handled the call through the argument adaptor mechanism. 2468 // The called function expects the call kind in x5. 2469 if (!definitely_mismatches) { 2470 // We call indirectly through the code field in the function to 2471 // allow recompilation to take effect without changing any of the 2472 // call sites. 2473 Register code = x4; 2474 Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 2475 if (flag == CALL_FUNCTION) { 2476 call_wrapper.BeforeCall(CallSize(code)); 2477 Call(code); 2478 call_wrapper.AfterCall(); 2479 } else { 2480 DCHECK(flag == JUMP_FUNCTION); 2481 Jump(code); 2482 } 2483 } 2484 2485 // Continue here if InvokePrologue does handle the invocation due to 2486 // mismatched parameter counts. 2487 Bind(&done); 2488 } 2489 2490 2491 void MacroAssembler::InvokeFunction(Register function, 2492 Register new_target, 2493 const ParameterCount& actual, 2494 InvokeFlag flag, 2495 const CallWrapper& call_wrapper) { 2496 // You can't call a function without a valid frame. 2497 DCHECK(flag == JUMP_FUNCTION || has_frame()); 2498 2499 // Contract with called JS functions requires that function is passed in x1. 2500 // (See FullCodeGenerator::Generate().) 2501 DCHECK(function.is(x1)); 2502 2503 Register expected_reg = x2; 2504 2505 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); 2506 // The number of arguments is stored as an int32_t, and -1 is a marker 2507 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign 2508 // extension to correctly handle it. 2509 Ldr(expected_reg, FieldMemOperand(function, 2510 JSFunction::kSharedFunctionInfoOffset)); 2511 Ldrsw(expected_reg, 2512 FieldMemOperand(expected_reg, 2513 SharedFunctionInfo::kFormalParameterCountOffset)); 2514 2515 ParameterCount expected(expected_reg); 2516 InvokeFunctionCode(function, new_target, expected, actual, flag, 2517 call_wrapper); 2518 } 2519 2520 2521 void MacroAssembler::InvokeFunction(Register function, 2522 const ParameterCount& expected, 2523 const ParameterCount& actual, 2524 InvokeFlag flag, 2525 const CallWrapper& call_wrapper) { 2526 // You can't call a function without a valid frame. 2527 DCHECK(flag == JUMP_FUNCTION || has_frame()); 2528 2529 // Contract with called JS functions requires that function is passed in x1. 2530 // (See FullCodeGenerator::Generate().) 2531 DCHECK(function.Is(x1)); 2532 2533 // Set up the context. 2534 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); 2535 2536 InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper); 2537 } 2538 2539 2540 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, 2541 const ParameterCount& expected, 2542 const ParameterCount& actual, 2543 InvokeFlag flag, 2544 const CallWrapper& call_wrapper) { 2545 // Contract with called JS functions requires that function is passed in x1. 2546 // (See FullCodeGenerator::Generate().) 2547 __ LoadObject(x1, function); 2548 InvokeFunction(x1, expected, actual, flag, call_wrapper); 2549 } 2550 2551 2552 void MacroAssembler::TryConvertDoubleToInt64(Register result, 2553 DoubleRegister double_input, 2554 Label* done) { 2555 // Try to convert with an FPU convert instruction. It's trivial to compute 2556 // the modulo operation on an integer register so we convert to a 64-bit 2557 // integer. 2558 // 2559 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) 2560 // when the double is out of range. NaNs and infinities will be converted to 0 2561 // (as ECMA-262 requires). 2562 Fcvtzs(result.X(), double_input); 2563 2564 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not 2565 // representable using a double, so if the result is one of those then we know 2566 // that saturation occured, and we need to manually handle the conversion. 2567 // 2568 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting 2569 // 1 will cause signed overflow. 2570 Cmp(result.X(), 1); 2571 Ccmp(result.X(), -1, VFlag, vc); 2572 2573 B(vc, done); 2574 } 2575 2576 2577 void MacroAssembler::TruncateDoubleToI(Register result, 2578 DoubleRegister double_input) { 2579 Label done; 2580 2581 // Try to convert the double to an int64. If successful, the bottom 32 bits 2582 // contain our truncated int32 result. 2583 TryConvertDoubleToInt64(result, double_input, &done); 2584 2585 const Register old_stack_pointer = StackPointer(); 2586 if (csp.Is(old_stack_pointer)) { 2587 // This currently only happens during compiler-unittest. If it arises 2588 // during regular code generation the DoubleToI stub should be updated to 2589 // cope with csp and have an extra parameter indicating which stack pointer 2590 // it should use. 2591 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment. 2592 Mov(jssp, csp); 2593 SetStackPointer(jssp); 2594 } 2595 2596 // If we fell through then inline version didn't succeed - call stub instead. 2597 Push(lr, double_input); 2598 2599 DoubleToIStub stub(isolate(), 2600 jssp, 2601 result, 2602 0, 2603 true, // is_truncating 2604 true); // skip_fastpath 2605 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber 2606 2607 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes()); 2608 Pop(xzr, lr); // xzr to drop the double input on the stack. 2609 2610 if (csp.Is(old_stack_pointer)) { 2611 Mov(csp, jssp); 2612 SetStackPointer(csp); 2613 AssertStackConsistency(); 2614 Pop(xzr, jssp); 2615 } 2616 2617 Bind(&done); 2618 } 2619 2620 2621 void MacroAssembler::TruncateHeapNumberToI(Register result, 2622 Register object) { 2623 Label done; 2624 DCHECK(!result.is(object)); 2625 DCHECK(jssp.Is(StackPointer())); 2626 2627 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); 2628 2629 // Try to convert the double to an int64. If successful, the bottom 32 bits 2630 // contain our truncated int32 result. 2631 TryConvertDoubleToInt64(result, fp_scratch, &done); 2632 2633 // If we fell through then inline version didn't succeed - call stub instead. 2634 Push(lr); 2635 DoubleToIStub stub(isolate(), 2636 object, 2637 result, 2638 HeapNumber::kValueOffset - kHeapObjectTag, 2639 true, // is_truncating 2640 true); // skip_fastpath 2641 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber 2642 Pop(lr); 2643 2644 Bind(&done); 2645 } 2646 2647 2648 void MacroAssembler::StubPrologue() { 2649 UseScratchRegisterScope temps(this); 2650 Register temp = temps.AcquireX(); 2651 __ Mov(temp, Smi::FromInt(StackFrame::STUB)); 2652 // Compiled stubs don't age, and so they don't need the predictable code 2653 // ageing sequence. 2654 __ Push(lr, fp, cp, temp); 2655 __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp); 2656 } 2657 2658 2659 void MacroAssembler::Prologue(bool code_pre_aging) { 2660 if (code_pre_aging) { 2661 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); 2662 __ EmitCodeAgeSequence(stub); 2663 } else { 2664 __ EmitFrameSetupForCodeAgePatching(); 2665 } 2666 } 2667 2668 2669 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) { 2670 Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 2671 Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset)); 2672 Ldr(vector, 2673 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset)); 2674 } 2675 2676 2677 void MacroAssembler::EnterFrame(StackFrame::Type type, 2678 bool load_constant_pool_pointer_reg) { 2679 // Out-of-line constant pool not implemented on arm64. 2680 UNREACHABLE(); 2681 } 2682 2683 2684 void MacroAssembler::EnterFrame(StackFrame::Type type) { 2685 DCHECK(jssp.Is(StackPointer())); 2686 UseScratchRegisterScope temps(this); 2687 Register type_reg = temps.AcquireX(); 2688 Register code_reg = temps.AcquireX(); 2689 2690 Push(lr, fp, cp); 2691 Mov(type_reg, Smi::FromInt(type)); 2692 Mov(code_reg, Operand(CodeObject())); 2693 Push(type_reg, code_reg); 2694 // jssp[4] : lr 2695 // jssp[3] : fp 2696 // jssp[2] : cp 2697 // jssp[1] : type 2698 // jssp[0] : code object 2699 2700 // Adjust FP to point to saved FP. 2701 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); 2702 } 2703 2704 2705 void MacroAssembler::LeaveFrame(StackFrame::Type type) { 2706 DCHECK(jssp.Is(StackPointer())); 2707 // Drop the execution stack down to the frame pointer and restore 2708 // the caller frame pointer and return address. 2709 Mov(jssp, fp); 2710 AssertStackConsistency(); 2711 Pop(fp, lr); 2712 } 2713 2714 2715 void MacroAssembler::ExitFramePreserveFPRegs() { 2716 PushCPURegList(kCallerSavedFP); 2717 } 2718 2719 2720 void MacroAssembler::ExitFrameRestoreFPRegs() { 2721 // Read the registers from the stack without popping them. The stack pointer 2722 // will be reset as part of the unwinding process. 2723 CPURegList saved_fp_regs = kCallerSavedFP; 2724 DCHECK(saved_fp_regs.Count() % 2 == 0); 2725 2726 int offset = ExitFrameConstants::kLastExitFrameField; 2727 while (!saved_fp_regs.IsEmpty()) { 2728 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); 2729 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); 2730 offset -= 2 * kDRegSize; 2731 Ldp(dst1, dst0, MemOperand(fp, offset)); 2732 } 2733 } 2734 2735 2736 void MacroAssembler::EnterExitFrame(bool save_doubles, 2737 const Register& scratch, 2738 int extra_space) { 2739 DCHECK(jssp.Is(StackPointer())); 2740 2741 // Set up the new stack frame. 2742 Mov(scratch, Operand(CodeObject())); 2743 Push(lr, fp); 2744 Mov(fp, StackPointer()); 2745 Push(xzr, scratch); 2746 // fp[8]: CallerPC (lr) 2747 // fp -> fp[0]: CallerFP (old fp) 2748 // fp[-8]: Space reserved for SPOffset. 2749 // jssp -> fp[-16]: CodeObject() 2750 STATIC_ASSERT((2 * kPointerSize) == 2751 ExitFrameConstants::kCallerSPDisplacement); 2752 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset); 2753 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset); 2754 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset); 2755 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset); 2756 2757 // Save the frame pointer and context pointer in the top frame. 2758 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, 2759 isolate()))); 2760 Str(fp, MemOperand(scratch)); 2761 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, 2762 isolate()))); 2763 Str(cp, MemOperand(scratch)); 2764 2765 STATIC_ASSERT((-2 * kPointerSize) == 2766 ExitFrameConstants::kLastExitFrameField); 2767 if (save_doubles) { 2768 ExitFramePreserveFPRegs(); 2769 } 2770 2771 // Reserve space for the return address and for user requested memory. 2772 // We do this before aligning to make sure that we end up correctly 2773 // aligned with the minimum of wasted space. 2774 Claim(extra_space + 1, kXRegSize); 2775 // fp[8]: CallerPC (lr) 2776 // fp -> fp[0]: CallerFP (old fp) 2777 // fp[-8]: Space reserved for SPOffset. 2778 // fp[-16]: CodeObject() 2779 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). 2780 // jssp[8]: Extra space reserved for caller (if extra_space != 0). 2781 // jssp -> jssp[0]: Space reserved for the return address. 2782 2783 // Align and synchronize the system stack pointer with jssp. 2784 AlignAndSetCSPForFrame(); 2785 DCHECK(csp.Is(StackPointer())); 2786 2787 // fp[8]: CallerPC (lr) 2788 // fp -> fp[0]: CallerFP (old fp) 2789 // fp[-8]: Space reserved for SPOffset. 2790 // fp[-16]: CodeObject() 2791 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). 2792 // csp[8]: Memory reserved for the caller if extra_space != 0. 2793 // Alignment padding, if necessary. 2794 // csp -> csp[0]: Space reserved for the return address. 2795 2796 // ExitFrame::GetStateForFramePointer expects to find the return address at 2797 // the memory address immediately below the pointer stored in SPOffset. 2798 // It is not safe to derive much else from SPOffset, because the size of the 2799 // padding can vary. 2800 Add(scratch, csp, kXRegSize); 2801 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); 2802 } 2803 2804 2805 // Leave the current exit frame. 2806 void MacroAssembler::LeaveExitFrame(bool restore_doubles, 2807 const Register& scratch, 2808 bool restore_context) { 2809 DCHECK(csp.Is(StackPointer())); 2810 2811 if (restore_doubles) { 2812 ExitFrameRestoreFPRegs(); 2813 } 2814 2815 // Restore the context pointer from the top frame. 2816 if (restore_context) { 2817 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, 2818 isolate()))); 2819 Ldr(cp, MemOperand(scratch)); 2820 } 2821 2822 if (emit_debug_code()) { 2823 // Also emit debug code to clear the cp in the top frame. 2824 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, 2825 isolate()))); 2826 Str(xzr, MemOperand(scratch)); 2827 } 2828 // Clear the frame pointer from the top frame. 2829 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, 2830 isolate()))); 2831 Str(xzr, MemOperand(scratch)); 2832 2833 // Pop the exit frame. 2834 // fp[8]: CallerPC (lr) 2835 // fp -> fp[0]: CallerFP (old fp) 2836 // fp[...]: The rest of the frame. 2837 Mov(jssp, fp); 2838 SetStackPointer(jssp); 2839 AssertStackConsistency(); 2840 Pop(fp, lr); 2841 } 2842 2843 2844 void MacroAssembler::SetCounter(StatsCounter* counter, int value, 2845 Register scratch1, Register scratch2) { 2846 if (FLAG_native_code_counters && counter->Enabled()) { 2847 Mov(scratch1, value); 2848 Mov(scratch2, ExternalReference(counter)); 2849 Str(scratch1, MemOperand(scratch2)); 2850 } 2851 } 2852 2853 2854 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 2855 Register scratch1, Register scratch2) { 2856 DCHECK(value != 0); 2857 if (FLAG_native_code_counters && counter->Enabled()) { 2858 Mov(scratch2, ExternalReference(counter)); 2859 Ldr(scratch1, MemOperand(scratch2)); 2860 Add(scratch1, scratch1, value); 2861 Str(scratch1, MemOperand(scratch2)); 2862 } 2863 } 2864 2865 2866 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 2867 Register scratch1, Register scratch2) { 2868 IncrementCounter(counter, -value, scratch1, scratch2); 2869 } 2870 2871 2872 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 2873 if (context_chain_length > 0) { 2874 // Move up the chain of contexts to the context containing the slot. 2875 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); 2876 for (int i = 1; i < context_chain_length; i++) { 2877 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); 2878 } 2879 } else { 2880 // Slot is in the current function context. Move it into the 2881 // destination register in case we store into it (the write barrier 2882 // cannot be allowed to destroy the context in cp). 2883 Mov(dst, cp); 2884 } 2885 } 2886 2887 2888 void MacroAssembler::DebugBreak() { 2889 Mov(x0, 0); 2890 Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate())); 2891 CEntryStub ces(isolate(), 1); 2892 DCHECK(AllowThisStubCall(&ces)); 2893 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT); 2894 } 2895 2896 2897 void MacroAssembler::PushStackHandler() { 2898 DCHECK(jssp.Is(StackPointer())); 2899 // Adjust this code if the asserts don't hold. 2900 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); 2901 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 2902 2903 // For the JSEntry handler, we must preserve the live registers x0-x4. 2904 // (See JSEntryStub::GenerateBody().) 2905 2906 // Link the current handler as the next handler. 2907 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); 2908 Ldr(x10, MemOperand(x11)); 2909 Push(x10); 2910 2911 // Set this new handler as the current one. 2912 Str(jssp, MemOperand(x11)); 2913 } 2914 2915 2916 void MacroAssembler::PopStackHandler() { 2917 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 2918 Pop(x10); 2919 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); 2920 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes); 2921 Str(x10, MemOperand(x11)); 2922 } 2923 2924 2925 void MacroAssembler::Allocate(int object_size, 2926 Register result, 2927 Register scratch1, 2928 Register scratch2, 2929 Label* gc_required, 2930 AllocationFlags flags) { 2931 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); 2932 if (!FLAG_inline_new) { 2933 if (emit_debug_code()) { 2934 // Trash the registers to simulate an allocation failure. 2935 // We apply salt to the original zap value to easily spot the values. 2936 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); 2937 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); 2938 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); 2939 } 2940 B(gc_required); 2941 return; 2942 } 2943 2944 UseScratchRegisterScope temps(this); 2945 Register scratch3 = temps.AcquireX(); 2946 2947 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3)); 2948 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); 2949 2950 // Make object size into bytes. 2951 if ((flags & SIZE_IN_WORDS) != 0) { 2952 object_size *= kPointerSize; 2953 } 2954 DCHECK(0 == (object_size & kObjectAlignmentMask)); 2955 2956 // Check relative positions of allocation top and limit addresses. 2957 // The values must be adjacent in memory to allow the use of LDP. 2958 ExternalReference heap_allocation_top = 2959 AllocationUtils::GetAllocationTopReference(isolate(), flags); 2960 ExternalReference heap_allocation_limit = 2961 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 2962 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); 2963 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); 2964 DCHECK((limit - top) == kPointerSize); 2965 2966 // Set up allocation top address and allocation limit registers. 2967 Register top_address = scratch1; 2968 Register alloc_limit = scratch2; 2969 Register result_end = scratch3; 2970 Mov(top_address, Operand(heap_allocation_top)); 2971 2972 if ((flags & RESULT_CONTAINS_TOP) == 0) { 2973 // Load allocation top into result and allocation limit into alloc_limit. 2974 Ldp(result, alloc_limit, MemOperand(top_address)); 2975 } else { 2976 if (emit_debug_code()) { 2977 // Assert that result actually contains top on entry. 2978 Ldr(alloc_limit, MemOperand(top_address)); 2979 Cmp(result, alloc_limit); 2980 Check(eq, kUnexpectedAllocationTop); 2981 } 2982 // Load allocation limit. Result already contains allocation top. 2983 Ldr(alloc_limit, MemOperand(top_address, limit - top)); 2984 } 2985 2986 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have 2987 // the same alignment on ARM64. 2988 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); 2989 2990 // Calculate new top and bail out if new space is exhausted. 2991 Adds(result_end, result, object_size); 2992 Ccmp(result_end, alloc_limit, CFlag, cc); 2993 B(hi, gc_required); 2994 Str(result_end, MemOperand(top_address)); 2995 2996 // Tag the object if requested. 2997 if ((flags & TAG_OBJECT) != 0) { 2998 ObjectTag(result, result); 2999 } 3000 } 3001 3002 3003 void MacroAssembler::Allocate(Register object_size, Register result, 3004 Register result_end, Register scratch, 3005 Label* gc_required, AllocationFlags flags) { 3006 if (!FLAG_inline_new) { 3007 if (emit_debug_code()) { 3008 // Trash the registers to simulate an allocation failure. 3009 // We apply salt to the original zap value to easily spot the values. 3010 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); 3011 Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L); 3012 Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L); 3013 } 3014 B(gc_required); 3015 return; 3016 } 3017 3018 UseScratchRegisterScope temps(this); 3019 Register scratch2 = temps.AcquireX(); 3020 3021 // |object_size| and |result_end| may overlap, other registers must not. 3022 DCHECK(!AreAliased(object_size, result, scratch, scratch2)); 3023 DCHECK(!AreAliased(result_end, result, scratch, scratch2)); 3024 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() && 3025 result_end.Is64Bits()); 3026 3027 // Check relative positions of allocation top and limit addresses. 3028 // The values must be adjacent in memory to allow the use of LDP. 3029 ExternalReference heap_allocation_top = 3030 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3031 ExternalReference heap_allocation_limit = 3032 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3033 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); 3034 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); 3035 DCHECK((limit - top) == kPointerSize); 3036 3037 // Set up allocation top address and allocation limit registers. 3038 Register top_address = scratch; 3039 Register alloc_limit = scratch2; 3040 Mov(top_address, heap_allocation_top); 3041 3042 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3043 // Load allocation top into result and allocation limit into alloc_limit. 3044 Ldp(result, alloc_limit, MemOperand(top_address)); 3045 } else { 3046 if (emit_debug_code()) { 3047 // Assert that result actually contains top on entry. 3048 Ldr(alloc_limit, MemOperand(top_address)); 3049 Cmp(result, alloc_limit); 3050 Check(eq, kUnexpectedAllocationTop); 3051 } 3052 // Load allocation limit. Result already contains allocation top. 3053 Ldr(alloc_limit, MemOperand(top_address, limit - top)); 3054 } 3055 3056 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have 3057 // the same alignment on ARM64. 3058 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); 3059 3060 // Calculate new top and bail out if new space is exhausted 3061 if ((flags & SIZE_IN_WORDS) != 0) { 3062 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2)); 3063 } else { 3064 Adds(result_end, result, object_size); 3065 } 3066 3067 if (emit_debug_code()) { 3068 Tst(result_end, kObjectAlignmentMask); 3069 Check(eq, kUnalignedAllocationInNewSpace); 3070 } 3071 3072 Ccmp(result_end, alloc_limit, CFlag, cc); 3073 B(hi, gc_required); 3074 Str(result_end, MemOperand(top_address)); 3075 3076 // Tag the object if requested. 3077 if ((flags & TAG_OBJECT) != 0) { 3078 ObjectTag(result, result); 3079 } 3080 } 3081 3082 3083 void MacroAssembler::AllocateTwoByteString(Register result, 3084 Register length, 3085 Register scratch1, 3086 Register scratch2, 3087 Register scratch3, 3088 Label* gc_required) { 3089 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3)); 3090 // Calculate the number of bytes needed for the characters in the string while 3091 // observing object alignment. 3092 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3093 Add(scratch1, length, length); // Length in bytes, not chars. 3094 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); 3095 Bic(scratch1, scratch1, kObjectAlignmentMask); 3096 3097 // Allocate two-byte string in new space. 3098 Allocate(scratch1, 3099 result, 3100 scratch2, 3101 scratch3, 3102 gc_required, 3103 TAG_OBJECT); 3104 3105 // Set the map, length and hash field. 3106 InitializeNewString(result, 3107 length, 3108 Heap::kStringMapRootIndex, 3109 scratch1, 3110 scratch2); 3111 } 3112 3113 3114 void MacroAssembler::AllocateOneByteString(Register result, Register length, 3115 Register scratch1, Register scratch2, 3116 Register scratch3, 3117 Label* gc_required) { 3118 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3)); 3119 // Calculate the number of bytes needed for the characters in the string while 3120 // observing object alignment. 3121 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3122 STATIC_ASSERT(kCharSize == 1); 3123 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); 3124 Bic(scratch1, scratch1, kObjectAlignmentMask); 3125 3126 // Allocate one-byte string in new space. 3127 Allocate(scratch1, 3128 result, 3129 scratch2, 3130 scratch3, 3131 gc_required, 3132 TAG_OBJECT); 3133 3134 // Set the map, length and hash field. 3135 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex, 3136 scratch1, scratch2); 3137 } 3138 3139 3140 void MacroAssembler::AllocateTwoByteConsString(Register result, 3141 Register length, 3142 Register scratch1, 3143 Register scratch2, 3144 Label* gc_required) { 3145 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, 3146 TAG_OBJECT); 3147 3148 InitializeNewString(result, 3149 length, 3150 Heap::kConsStringMapRootIndex, 3151 scratch1, 3152 scratch2); 3153 } 3154 3155 3156 void MacroAssembler::AllocateOneByteConsString(Register result, Register length, 3157 Register scratch1, 3158 Register scratch2, 3159 Label* gc_required) { 3160 Allocate(ConsString::kSize, 3161 result, 3162 scratch1, 3163 scratch2, 3164 gc_required, 3165 TAG_OBJECT); 3166 3167 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex, 3168 scratch1, scratch2); 3169 } 3170 3171 3172 void MacroAssembler::AllocateTwoByteSlicedString(Register result, 3173 Register length, 3174 Register scratch1, 3175 Register scratch2, 3176 Label* gc_required) { 3177 DCHECK(!AreAliased(result, length, scratch1, scratch2)); 3178 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, 3179 TAG_OBJECT); 3180 3181 InitializeNewString(result, 3182 length, 3183 Heap::kSlicedStringMapRootIndex, 3184 scratch1, 3185 scratch2); 3186 } 3187 3188 3189 void MacroAssembler::AllocateOneByteSlicedString(Register result, 3190 Register length, 3191 Register scratch1, 3192 Register scratch2, 3193 Label* gc_required) { 3194 DCHECK(!AreAliased(result, length, scratch1, scratch2)); 3195 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, 3196 TAG_OBJECT); 3197 3198 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex, 3199 scratch1, scratch2); 3200 } 3201 3202 3203 // Allocates a heap number or jumps to the need_gc label if the young space 3204 // is full and a scavenge is needed. 3205 void MacroAssembler::AllocateHeapNumber(Register result, 3206 Label* gc_required, 3207 Register scratch1, 3208 Register scratch2, 3209 CPURegister value, 3210 CPURegister heap_number_map, 3211 MutableMode mode) { 3212 DCHECK(!value.IsValid() || value.Is64Bits()); 3213 UseScratchRegisterScope temps(this); 3214 3215 // Allocate an object in the heap for the heap number and tag it as a heap 3216 // object. 3217 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, 3218 NO_ALLOCATION_FLAGS); 3219 3220 Heap::RootListIndex map_index = mode == MUTABLE 3221 ? Heap::kMutableHeapNumberMapRootIndex 3222 : Heap::kHeapNumberMapRootIndex; 3223 3224 // Prepare the heap number map. 3225 if (!heap_number_map.IsValid()) { 3226 // If we have a valid value register, use the same type of register to store 3227 // the map so we can use STP to store both in one instruction. 3228 if (value.IsValid() && value.IsFPRegister()) { 3229 heap_number_map = temps.AcquireD(); 3230 } else { 3231 heap_number_map = scratch1; 3232 } 3233 LoadRoot(heap_number_map, map_index); 3234 } 3235 if (emit_debug_code()) { 3236 Register map; 3237 if (heap_number_map.IsFPRegister()) { 3238 map = scratch1; 3239 Fmov(map, DoubleRegister(heap_number_map)); 3240 } else { 3241 map = Register(heap_number_map); 3242 } 3243 AssertRegisterIsRoot(map, map_index); 3244 } 3245 3246 // Store the heap number map and the value in the allocated object. 3247 if (value.IsSameSizeAndType(heap_number_map)) { 3248 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize == 3249 HeapNumber::kValueOffset); 3250 Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset)); 3251 } else { 3252 Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); 3253 if (value.IsValid()) { 3254 Str(value, MemOperand(result, HeapNumber::kValueOffset)); 3255 } 3256 } 3257 ObjectTag(result, result); 3258 } 3259 3260 3261 void MacroAssembler::JumpIfObjectType(Register object, 3262 Register map, 3263 Register type_reg, 3264 InstanceType type, 3265 Label* if_cond_pass, 3266 Condition cond) { 3267 CompareObjectType(object, map, type_reg, type); 3268 B(cond, if_cond_pass); 3269 } 3270 3271 3272 void MacroAssembler::AllocateJSValue(Register result, Register constructor, 3273 Register value, Register scratch1, 3274 Register scratch2, Label* gc_required) { 3275 DCHECK(!result.is(constructor)); 3276 DCHECK(!result.is(scratch1)); 3277 DCHECK(!result.is(scratch2)); 3278 DCHECK(!result.is(value)); 3279 3280 // Allocate JSValue in new space. 3281 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); 3282 3283 // Initialize the JSValue. 3284 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); 3285 Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); 3286 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); 3287 Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); 3288 Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); 3289 Str(value, FieldMemOperand(result, JSValue::kValueOffset)); 3290 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); 3291 } 3292 3293 3294 void MacroAssembler::JumpIfNotObjectType(Register object, 3295 Register map, 3296 Register type_reg, 3297 InstanceType type, 3298 Label* if_not_object) { 3299 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne); 3300 } 3301 3302 3303 // Sets condition flags based on comparison, and returns type in type_reg. 3304 void MacroAssembler::CompareObjectType(Register object, 3305 Register map, 3306 Register type_reg, 3307 InstanceType type) { 3308 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 3309 CompareInstanceType(map, type_reg, type); 3310 } 3311 3312 3313 // Sets condition flags based on comparison, and returns type in type_reg. 3314 void MacroAssembler::CompareInstanceType(Register map, 3315 Register type_reg, 3316 InstanceType type) { 3317 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 3318 Cmp(type_reg, type); 3319 } 3320 3321 3322 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) { 3323 UseScratchRegisterScope temps(this); 3324 Register obj_map = temps.AcquireX(); 3325 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset)); 3326 CompareRoot(obj_map, index); 3327 } 3328 3329 3330 void MacroAssembler::CompareObjectMap(Register obj, Register scratch, 3331 Handle<Map> map) { 3332 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 3333 CompareMap(scratch, map); 3334 } 3335 3336 3337 void MacroAssembler::CompareMap(Register obj_map, 3338 Handle<Map> map) { 3339 Cmp(obj_map, Operand(map)); 3340 } 3341 3342 3343 void MacroAssembler::CheckMap(Register obj, 3344 Register scratch, 3345 Handle<Map> map, 3346 Label* fail, 3347 SmiCheckType smi_check_type) { 3348 if (smi_check_type == DO_SMI_CHECK) { 3349 JumpIfSmi(obj, fail); 3350 } 3351 3352 CompareObjectMap(obj, scratch, map); 3353 B(ne, fail); 3354 } 3355 3356 3357 void MacroAssembler::CheckMap(Register obj, 3358 Register scratch, 3359 Heap::RootListIndex index, 3360 Label* fail, 3361 SmiCheckType smi_check_type) { 3362 if (smi_check_type == DO_SMI_CHECK) { 3363 JumpIfSmi(obj, fail); 3364 } 3365 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 3366 JumpIfNotRoot(scratch, index, fail); 3367 } 3368 3369 3370 void MacroAssembler::CheckMap(Register obj_map, 3371 Handle<Map> map, 3372 Label* fail, 3373 SmiCheckType smi_check_type) { 3374 if (smi_check_type == DO_SMI_CHECK) { 3375 JumpIfSmi(obj_map, fail); 3376 } 3377 3378 CompareMap(obj_map, map); 3379 B(ne, fail); 3380 } 3381 3382 3383 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1, 3384 Register scratch2, Handle<WeakCell> cell, 3385 Handle<Code> success, 3386 SmiCheckType smi_check_type) { 3387 Label fail; 3388 if (smi_check_type == DO_SMI_CHECK) { 3389 JumpIfSmi(obj, &fail); 3390 } 3391 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset)); 3392 CmpWeakValue(scratch1, cell, scratch2); 3393 B(ne, &fail); 3394 Jump(success, RelocInfo::CODE_TARGET); 3395 Bind(&fail); 3396 } 3397 3398 3399 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell, 3400 Register scratch) { 3401 Mov(scratch, Operand(cell)); 3402 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset)); 3403 Cmp(value, scratch); 3404 } 3405 3406 3407 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { 3408 Mov(value, Operand(cell)); 3409 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset)); 3410 } 3411 3412 3413 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, 3414 Label* miss) { 3415 GetWeakValue(value, cell); 3416 JumpIfSmi(value, miss); 3417 } 3418 3419 3420 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { 3421 UseScratchRegisterScope temps(this); 3422 Register temp = temps.AcquireX(); 3423 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 3424 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 3425 Tst(temp, mask); 3426 } 3427 3428 3429 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) { 3430 // Load the map's "bit field 2". 3431 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset)); 3432 // Retrieve elements_kind from bit field 2. 3433 DecodeField<Map::ElementsKindBits>(result); 3434 } 3435 3436 3437 void MacroAssembler::GetMapConstructor(Register result, Register map, 3438 Register temp, Register temp2) { 3439 Label done, loop; 3440 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset)); 3441 Bind(&loop); 3442 JumpIfSmi(result, &done); 3443 CompareObjectType(result, temp, temp2, MAP_TYPE); 3444 B(ne, &done); 3445 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset)); 3446 B(&loop); 3447 Bind(&done); 3448 } 3449 3450 3451 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, 3452 Register scratch, Label* miss) { 3453 DCHECK(!AreAliased(function, result, scratch)); 3454 3455 // Get the prototype or initial map from the function. 3456 Ldr(result, 3457 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3458 3459 // If the prototype or initial map is the hole, don't return it and simply 3460 // miss the cache instead. This will allow us to allocate a prototype object 3461 // on-demand in the runtime system. 3462 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss); 3463 3464 // If the function does not have an initial map, we're done. 3465 Label done; 3466 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done); 3467 3468 // Get the prototype from the initial map. 3469 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3470 3471 // All done. 3472 Bind(&done); 3473 } 3474 3475 3476 void MacroAssembler::PushRoot(Heap::RootListIndex index) { 3477 UseScratchRegisterScope temps(this); 3478 Register temp = temps.AcquireX(); 3479 LoadRoot(temp, index); 3480 Push(temp); 3481 } 3482 3483 3484 void MacroAssembler::CompareRoot(const Register& obj, 3485 Heap::RootListIndex index) { 3486 UseScratchRegisterScope temps(this); 3487 Register temp = temps.AcquireX(); 3488 DCHECK(!AreAliased(obj, temp)); 3489 LoadRoot(temp, index); 3490 Cmp(obj, temp); 3491 } 3492 3493 3494 void MacroAssembler::JumpIfRoot(const Register& obj, 3495 Heap::RootListIndex index, 3496 Label* if_equal) { 3497 CompareRoot(obj, index); 3498 B(eq, if_equal); 3499 } 3500 3501 3502 void MacroAssembler::JumpIfNotRoot(const Register& obj, 3503 Heap::RootListIndex index, 3504 Label* if_not_equal) { 3505 CompareRoot(obj, index); 3506 B(ne, if_not_equal); 3507 } 3508 3509 3510 void MacroAssembler::CompareAndSplit(const Register& lhs, 3511 const Operand& rhs, 3512 Condition cond, 3513 Label* if_true, 3514 Label* if_false, 3515 Label* fall_through) { 3516 if ((if_true == if_false) && (if_false == fall_through)) { 3517 // Fall through. 3518 } else if (if_true == if_false) { 3519 B(if_true); 3520 } else if (if_false == fall_through) { 3521 CompareAndBranch(lhs, rhs, cond, if_true); 3522 } else if (if_true == fall_through) { 3523 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false); 3524 } else { 3525 CompareAndBranch(lhs, rhs, cond, if_true); 3526 B(if_false); 3527 } 3528 } 3529 3530 3531 void MacroAssembler::TestAndSplit(const Register& reg, 3532 uint64_t bit_pattern, 3533 Label* if_all_clear, 3534 Label* if_any_set, 3535 Label* fall_through) { 3536 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) { 3537 // Fall through. 3538 } else if (if_all_clear == if_any_set) { 3539 B(if_all_clear); 3540 } else if (if_all_clear == fall_through) { 3541 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); 3542 } else if (if_any_set == fall_through) { 3543 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear); 3544 } else { 3545 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); 3546 B(if_all_clear); 3547 } 3548 } 3549 3550 3551 void MacroAssembler::CheckFastElements(Register map, 3552 Register scratch, 3553 Label* fail) { 3554 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); 3555 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); 3556 STATIC_ASSERT(FAST_ELEMENTS == 2); 3557 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); 3558 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 3559 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue); 3560 B(hi, fail); 3561 } 3562 3563 3564 void MacroAssembler::CheckFastObjectElements(Register map, 3565 Register scratch, 3566 Label* fail) { 3567 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); 3568 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); 3569 STATIC_ASSERT(FAST_ELEMENTS == 2); 3570 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); 3571 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 3572 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); 3573 // If cond==ls, set cond=hi, otherwise compare. 3574 Ccmp(scratch, 3575 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi); 3576 B(hi, fail); 3577 } 3578 3579 3580 // Note: The ARM version of this clobbers elements_reg, but this version does 3581 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. 3582 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, 3583 Register key_reg, 3584 Register elements_reg, 3585 Register scratch1, 3586 FPRegister fpscratch1, 3587 Label* fail, 3588 int elements_offset) { 3589 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); 3590 Label store_num; 3591 3592 // Speculatively convert the smi to a double - all smis can be exactly 3593 // represented as a double. 3594 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); 3595 3596 // If value_reg is a smi, we're done. 3597 JumpIfSmi(value_reg, &store_num); 3598 3599 // Ensure that the object is a heap number. 3600 JumpIfNotHeapNumber(value_reg, fail); 3601 3602 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); 3603 3604 // Canonicalize NaNs. 3605 CanonicalizeNaN(fpscratch1); 3606 3607 // Store the result. 3608 Bind(&store_num); 3609 Add(scratch1, elements_reg, 3610 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); 3611 Str(fpscratch1, 3612 FieldMemOperand(scratch1, 3613 FixedDoubleArray::kHeaderSize - elements_offset)); 3614 } 3615 3616 3617 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 3618 return has_frame_ || !stub->SometimesSetsUpAFrame(); 3619 } 3620 3621 3622 void MacroAssembler::IndexFromHash(Register hash, Register index) { 3623 // If the hash field contains an array index pick it out. The assert checks 3624 // that the constants for the maximum number of digits for an array index 3625 // cached in the hash field and the number of bits reserved for it does not 3626 // conflict. 3627 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < 3628 (1 << String::kArrayIndexValueBits)); 3629 DecodeField<String::ArrayIndexValueBits>(index, hash); 3630 SmiTag(index, index); 3631 } 3632 3633 3634 void MacroAssembler::EmitSeqStringSetCharCheck( 3635 Register string, 3636 Register index, 3637 SeqStringSetCharCheckIndexType index_type, 3638 Register scratch, 3639 uint32_t encoding_mask) { 3640 DCHECK(!AreAliased(string, index, scratch)); 3641 3642 if (index_type == kIndexIsSmi) { 3643 AssertSmi(index); 3644 } 3645 3646 // Check that string is an object. 3647 AssertNotSmi(string, kNonObject); 3648 3649 // Check that string has an appropriate map. 3650 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 3651 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 3652 3653 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask); 3654 Cmp(scratch, encoding_mask); 3655 Check(eq, kUnexpectedStringType); 3656 3657 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset)); 3658 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); 3659 Check(lt, kIndexIsTooLarge); 3660 3661 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0)); 3662 Cmp(index, 0); 3663 Check(ge, kIndexIsNegative); 3664 } 3665 3666 3667 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 3668 Register scratch1, 3669 Register scratch2, 3670 Label* miss) { 3671 DCHECK(!AreAliased(holder_reg, scratch1, scratch2)); 3672 Label same_contexts; 3673 3674 // Load current lexical context from the stack frame. 3675 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3676 // In debug mode, make sure the lexical context is set. 3677 #ifdef DEBUG 3678 Cmp(scratch1, 0); 3679 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); 3680 #endif 3681 3682 // Load the native context of the current context. 3683 Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX)); 3684 3685 // Check the context is a native context. 3686 if (emit_debug_code()) { 3687 // Read the first word and compare to the native_context_map. 3688 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset)); 3689 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex); 3690 Check(eq, kExpectedNativeContext); 3691 } 3692 3693 // Check if both contexts are the same. 3694 Ldr(scratch2, FieldMemOperand(holder_reg, 3695 JSGlobalProxy::kNativeContextOffset)); 3696 Cmp(scratch1, scratch2); 3697 B(&same_contexts, eq); 3698 3699 // Check the context is a native context. 3700 if (emit_debug_code()) { 3701 // We're short on scratch registers here, so use holder_reg as a scratch. 3702 Push(holder_reg); 3703 Register scratch3 = holder_reg; 3704 3705 CompareRoot(scratch2, Heap::kNullValueRootIndex); 3706 Check(ne, kExpectedNonNullContext); 3707 3708 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); 3709 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex); 3710 Check(eq, kExpectedNativeContext); 3711 Pop(holder_reg); 3712 } 3713 3714 // Check that the security token in the calling global object is 3715 // compatible with the security token in the receiving global 3716 // object. 3717 int token_offset = Context::kHeaderSize + 3718 Context::SECURITY_TOKEN_INDEX * kPointerSize; 3719 3720 Ldr(scratch1, FieldMemOperand(scratch1, token_offset)); 3721 Ldr(scratch2, FieldMemOperand(scratch2, token_offset)); 3722 Cmp(scratch1, scratch2); 3723 B(miss, ne); 3724 3725 Bind(&same_contexts); 3726 } 3727 3728 3729 // Compute the hash code from the untagged key. This must be kept in sync with 3730 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in 3731 // code-stub-hydrogen.cc 3732 void MacroAssembler::GetNumberHash(Register key, Register scratch) { 3733 DCHECK(!AreAliased(key, scratch)); 3734 3735 // Xor original key with a seed. 3736 LoadRoot(scratch, Heap::kHashSeedRootIndex); 3737 Eor(key, key, Operand::UntagSmi(scratch)); 3738 3739 // The algorithm uses 32-bit integer values. 3740 key = key.W(); 3741 scratch = scratch.W(); 3742 3743 // Compute the hash code from the untagged key. This must be kept in sync 3744 // with ComputeIntegerHash in utils.h. 3745 // 3746 // hash = ~hash + (hash <<1 15); 3747 Mvn(scratch, key); 3748 Add(key, scratch, Operand(key, LSL, 15)); 3749 // hash = hash ^ (hash >> 12); 3750 Eor(key, key, Operand(key, LSR, 12)); 3751 // hash = hash + (hash << 2); 3752 Add(key, key, Operand(key, LSL, 2)); 3753 // hash = hash ^ (hash >> 4); 3754 Eor(key, key, Operand(key, LSR, 4)); 3755 // hash = hash * 2057; 3756 Mov(scratch, Operand(key, LSL, 11)); 3757 Add(key, key, Operand(key, LSL, 3)); 3758 Add(key, key, scratch); 3759 // hash = hash ^ (hash >> 16); 3760 Eor(key, key, Operand(key, LSR, 16)); 3761 Bic(key, key, Operand(0xc0000000u)); 3762 } 3763 3764 3765 void MacroAssembler::LoadFromNumberDictionary(Label* miss, 3766 Register elements, 3767 Register key, 3768 Register result, 3769 Register scratch0, 3770 Register scratch1, 3771 Register scratch2, 3772 Register scratch3) { 3773 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); 3774 3775 Label done; 3776 3777 SmiUntag(scratch0, key); 3778 GetNumberHash(scratch0, scratch1); 3779 3780 // Compute the capacity mask. 3781 Ldrsw(scratch1, 3782 UntagSmiFieldMemOperand(elements, 3783 SeededNumberDictionary::kCapacityOffset)); 3784 Sub(scratch1, scratch1, 1); 3785 3786 // Generate an unrolled loop that performs a few probes before giving up. 3787 for (int i = 0; i < kNumberDictionaryProbes; i++) { 3788 // Compute the masked index: (hash + i + i * i) & mask. 3789 if (i > 0) { 3790 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i)); 3791 } else { 3792 Mov(scratch2, scratch0); 3793 } 3794 And(scratch2, scratch2, scratch1); 3795 3796 // Scale the index by multiplying by the element size. 3797 DCHECK(SeededNumberDictionary::kEntrySize == 3); 3798 Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); 3799 3800 // Check if the key is identical to the name. 3801 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); 3802 Ldr(scratch3, 3803 FieldMemOperand(scratch2, 3804 SeededNumberDictionary::kElementsStartOffset)); 3805 Cmp(key, scratch3); 3806 if (i != (kNumberDictionaryProbes - 1)) { 3807 B(eq, &done); 3808 } else { 3809 B(ne, miss); 3810 } 3811 } 3812 3813 Bind(&done); 3814 // Check that the value is a field property. 3815 const int kDetailsOffset = 3816 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; 3817 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); 3818 DCHECK_EQ(DATA, 0); 3819 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss); 3820 3821 // Get the value at the masked, scaled index and return. 3822 const int kValueOffset = 3823 SeededNumberDictionary::kElementsStartOffset + kPointerSize; 3824 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); 3825 } 3826 3827 3828 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. 3829 Register address, 3830 Register scratch1, 3831 SaveFPRegsMode fp_mode, 3832 RememberedSetFinalAction and_then) { 3833 DCHECK(!AreAliased(object, address, scratch1)); 3834 Label done, store_buffer_overflow; 3835 if (emit_debug_code()) { 3836 Label ok; 3837 JumpIfNotInNewSpace(object, &ok); 3838 Abort(kRememberedSetPointerInNewSpace); 3839 bind(&ok); 3840 } 3841 UseScratchRegisterScope temps(this); 3842 Register scratch2 = temps.AcquireX(); 3843 3844 // Load store buffer top. 3845 Mov(scratch2, ExternalReference::store_buffer_top(isolate())); 3846 Ldr(scratch1, MemOperand(scratch2)); 3847 // Store pointer to buffer and increment buffer top. 3848 Str(address, MemOperand(scratch1, kPointerSize, PostIndex)); 3849 // Write back new top of buffer. 3850 Str(scratch1, MemOperand(scratch2)); 3851 // Call stub on end of buffer. 3852 // Check for end of buffer. 3853 DCHECK(StoreBuffer::kStoreBufferOverflowBit == 3854 (1 << (14 + kPointerSizeLog2))); 3855 if (and_then == kFallThroughAtEnd) { 3856 Tbz(scratch1, (14 + kPointerSizeLog2), &done); 3857 } else { 3858 DCHECK(and_then == kReturnAtEnd); 3859 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); 3860 Ret(); 3861 } 3862 3863 Bind(&store_buffer_overflow); 3864 Push(lr); 3865 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode); 3866 CallStub(&store_buffer_overflow_stub); 3867 Pop(lr); 3868 3869 Bind(&done); 3870 if (and_then == kReturnAtEnd) { 3871 Ret(); 3872 } 3873 } 3874 3875 3876 void MacroAssembler::PopSafepointRegisters() { 3877 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 3878 PopXRegList(kSafepointSavedRegisters); 3879 Drop(num_unsaved); 3880 } 3881 3882 3883 void MacroAssembler::PushSafepointRegisters() { 3884 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so 3885 // adjust the stack for unsaved registers. 3886 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 3887 DCHECK(num_unsaved >= 0); 3888 Claim(num_unsaved); 3889 PushXRegList(kSafepointSavedRegisters); 3890 } 3891 3892 3893 void MacroAssembler::PushSafepointRegistersAndDoubles() { 3894 PushSafepointRegisters(); 3895 PushCPURegList(CPURegList( 3896 CPURegister::kFPRegister, kDRegSizeInBits, 3897 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT) 3898 ->allocatable_double_codes_mask())); 3899 } 3900 3901 3902 void MacroAssembler::PopSafepointRegistersAndDoubles() { 3903 PopCPURegList(CPURegList( 3904 CPURegister::kFPRegister, kDRegSizeInBits, 3905 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT) 3906 ->allocatable_double_codes_mask())); 3907 PopSafepointRegisters(); 3908 } 3909 3910 3911 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 3912 // Make sure the safepoint registers list is what we expect. 3913 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); 3914 3915 // Safepoint registers are stored contiguously on the stack, but not all the 3916 // registers are saved. The following registers are excluded: 3917 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of 3918 // the macro assembler. 3919 // - x28 (jssp) because JS stack pointer doesn't need to be included in 3920 // safepoint registers. 3921 // - x31 (csp) because the system stack pointer doesn't need to be included 3922 // in safepoint registers. 3923 // 3924 // This function implements the mapping of register code to index into the 3925 // safepoint register slots. 3926 if ((reg_code >= 0) && (reg_code <= 15)) { 3927 return reg_code; 3928 } else if ((reg_code >= 18) && (reg_code <= 27)) { 3929 // Skip ip0 and ip1. 3930 return reg_code - 2; 3931 } else if ((reg_code == 29) || (reg_code == 30)) { 3932 // Also skip jssp. 3933 return reg_code - 3; 3934 } else { 3935 // This register has no safepoint register slot. 3936 UNREACHABLE(); 3937 return -1; 3938 } 3939 } 3940 3941 3942 void MacroAssembler::CheckPageFlagSet(const Register& object, 3943 const Register& scratch, 3944 int mask, 3945 Label* if_any_set) { 3946 And(scratch, object, ~Page::kPageAlignmentMask); 3947 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); 3948 TestAndBranchIfAnySet(scratch, mask, if_any_set); 3949 } 3950 3951 3952 void MacroAssembler::CheckPageFlagClear(const Register& object, 3953 const Register& scratch, 3954 int mask, 3955 Label* if_all_clear) { 3956 And(scratch, object, ~Page::kPageAlignmentMask); 3957 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); 3958 TestAndBranchIfAllClear(scratch, mask, if_all_clear); 3959 } 3960 3961 3962 void MacroAssembler::RecordWriteField( 3963 Register object, 3964 int offset, 3965 Register value, 3966 Register scratch, 3967 LinkRegisterStatus lr_status, 3968 SaveFPRegsMode save_fp, 3969 RememberedSetAction remembered_set_action, 3970 SmiCheck smi_check, 3971 PointersToHereCheck pointers_to_here_check_for_value) { 3972 // First, check if a write barrier is even needed. The tests below 3973 // catch stores of Smis. 3974 Label done; 3975 3976 // Skip the barrier if writing a smi. 3977 if (smi_check == INLINE_SMI_CHECK) { 3978 JumpIfSmi(value, &done); 3979 } 3980 3981 // Although the object register is tagged, the offset is relative to the start 3982 // of the object, so offset must be a multiple of kPointerSize. 3983 DCHECK(IsAligned(offset, kPointerSize)); 3984 3985 Add(scratch, object, offset - kHeapObjectTag); 3986 if (emit_debug_code()) { 3987 Label ok; 3988 Tst(scratch, (1 << kPointerSizeLog2) - 1); 3989 B(eq, &ok); 3990 Abort(kUnalignedCellInWriteBarrier); 3991 Bind(&ok); 3992 } 3993 3994 RecordWrite(object, 3995 scratch, 3996 value, 3997 lr_status, 3998 save_fp, 3999 remembered_set_action, 4000 OMIT_SMI_CHECK, 4001 pointers_to_here_check_for_value); 4002 4003 Bind(&done); 4004 4005 // Clobber clobbered input registers when running with the debug-code flag 4006 // turned on to provoke errors. 4007 if (emit_debug_code()) { 4008 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4))); 4009 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8))); 4010 } 4011 } 4012 4013 4014 // Will clobber: object, map, dst. 4015 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. 4016 void MacroAssembler::RecordWriteForMap(Register object, 4017 Register map, 4018 Register dst, 4019 LinkRegisterStatus lr_status, 4020 SaveFPRegsMode fp_mode) { 4021 ASM_LOCATION("MacroAssembler::RecordWrite"); 4022 DCHECK(!AreAliased(object, map)); 4023 4024 if (emit_debug_code()) { 4025 UseScratchRegisterScope temps(this); 4026 Register temp = temps.AcquireX(); 4027 4028 CompareObjectMap(map, temp, isolate()->factory()->meta_map()); 4029 Check(eq, kWrongAddressOrValuePassedToRecordWrite); 4030 } 4031 4032 if (!FLAG_incremental_marking) { 4033 return; 4034 } 4035 4036 if (emit_debug_code()) { 4037 UseScratchRegisterScope temps(this); 4038 Register temp = temps.AcquireX(); 4039 4040 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 4041 Cmp(temp, map); 4042 Check(eq, kWrongAddressOrValuePassedToRecordWrite); 4043 } 4044 4045 // First, check if a write barrier is even needed. The tests below 4046 // catch stores of smis and stores into the young generation. 4047 Label done; 4048 4049 // A single check of the map's pages interesting flag suffices, since it is 4050 // only set during incremental collection, and then it's also guaranteed that 4051 // the from object's page's interesting flag is also set. This optimization 4052 // relies on the fact that maps can never be in new space. 4053 CheckPageFlagClear(map, 4054 map, // Used as scratch. 4055 MemoryChunk::kPointersToHereAreInterestingMask, 4056 &done); 4057 4058 // Record the actual write. 4059 if (lr_status == kLRHasNotBeenSaved) { 4060 Push(lr); 4061 } 4062 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag); 4063 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, 4064 fp_mode); 4065 CallStub(&stub); 4066 if (lr_status == kLRHasNotBeenSaved) { 4067 Pop(lr); 4068 } 4069 4070 Bind(&done); 4071 4072 // Count number of write barriers in generated code. 4073 isolate()->counters()->write_barriers_static()->Increment(); 4074 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map, 4075 dst); 4076 4077 // Clobber clobbered registers when running with the debug-code flag 4078 // turned on to provoke errors. 4079 if (emit_debug_code()) { 4080 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12))); 4081 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16))); 4082 } 4083 } 4084 4085 4086 // Will clobber: object, address, value. 4087 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. 4088 // 4089 // The register 'object' contains a heap object pointer. The heap object tag is 4090 // shifted away. 4091 void MacroAssembler::RecordWrite( 4092 Register object, 4093 Register address, 4094 Register value, 4095 LinkRegisterStatus lr_status, 4096 SaveFPRegsMode fp_mode, 4097 RememberedSetAction remembered_set_action, 4098 SmiCheck smi_check, 4099 PointersToHereCheck pointers_to_here_check_for_value) { 4100 ASM_LOCATION("MacroAssembler::RecordWrite"); 4101 DCHECK(!AreAliased(object, value)); 4102 4103 if (emit_debug_code()) { 4104 UseScratchRegisterScope temps(this); 4105 Register temp = temps.AcquireX(); 4106 4107 Ldr(temp, MemOperand(address)); 4108 Cmp(temp, value); 4109 Check(eq, kWrongAddressOrValuePassedToRecordWrite); 4110 } 4111 4112 // First, check if a write barrier is even needed. The tests below 4113 // catch stores of smis and stores into the young generation. 4114 Label done; 4115 4116 if (smi_check == INLINE_SMI_CHECK) { 4117 DCHECK_EQ(0, kSmiTag); 4118 JumpIfSmi(value, &done); 4119 } 4120 4121 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { 4122 CheckPageFlagClear(value, 4123 value, // Used as scratch. 4124 MemoryChunk::kPointersToHereAreInterestingMask, 4125 &done); 4126 } 4127 CheckPageFlagClear(object, 4128 value, // Used as scratch. 4129 MemoryChunk::kPointersFromHereAreInterestingMask, 4130 &done); 4131 4132 // Record the actual write. 4133 if (lr_status == kLRHasNotBeenSaved) { 4134 Push(lr); 4135 } 4136 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, 4137 fp_mode); 4138 CallStub(&stub); 4139 if (lr_status == kLRHasNotBeenSaved) { 4140 Pop(lr); 4141 } 4142 4143 Bind(&done); 4144 4145 // Count number of write barriers in generated code. 4146 isolate()->counters()->write_barriers_static()->Increment(); 4147 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address, 4148 value); 4149 4150 // Clobber clobbered registers when running with the debug-code flag 4151 // turned on to provoke errors. 4152 if (emit_debug_code()) { 4153 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12))); 4154 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16))); 4155 } 4156 } 4157 4158 4159 void MacroAssembler::AssertHasValidColor(const Register& reg) { 4160 if (emit_debug_code()) { 4161 // The bit sequence is backward. The first character in the string 4162 // represents the least significant bit. 4163 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 4164 4165 Label color_is_valid; 4166 Tbnz(reg, 0, &color_is_valid); 4167 Tbz(reg, 1, &color_is_valid); 4168 Abort(kUnexpectedColorFound); 4169 Bind(&color_is_valid); 4170 } 4171 } 4172 4173 4174 void MacroAssembler::GetMarkBits(Register addr_reg, 4175 Register bitmap_reg, 4176 Register shift_reg) { 4177 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg)); 4178 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); 4179 // addr_reg is divided into fields: 4180 // |63 page base 20|19 high 8|7 shift 3|2 0| 4181 // 'high' gives the index of the cell holding color bits for the object. 4182 // 'shift' gives the offset in the cell for this object's color. 4183 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; 4184 UseScratchRegisterScope temps(this); 4185 Register temp = temps.AcquireX(); 4186 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits); 4187 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); 4188 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2)); 4189 // bitmap_reg: 4190 // |63 page base 20|19 zeros 15|14 high 3|2 0| 4191 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); 4192 } 4193 4194 4195 void MacroAssembler::HasColor(Register object, 4196 Register bitmap_scratch, 4197 Register shift_scratch, 4198 Label* has_color, 4199 int first_bit, 4200 int second_bit) { 4201 // See mark-compact.h for color definitions. 4202 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch)); 4203 4204 GetMarkBits(object, bitmap_scratch, shift_scratch); 4205 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 4206 // Shift the bitmap down to get the color of the object in bits [1:0]. 4207 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch); 4208 4209 AssertHasValidColor(bitmap_scratch); 4210 4211 // These bit sequences are backwards. The first character in the string 4212 // represents the least significant bit. 4213 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); 4214 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); 4215 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); 4216 4217 // Check for the color. 4218 if (first_bit == 0) { 4219 // Checking for white. 4220 DCHECK(second_bit == 0); 4221 // We only need to test the first bit. 4222 Tbz(bitmap_scratch, 0, has_color); 4223 } else { 4224 Label other_color; 4225 // Checking for grey or black. 4226 Tbz(bitmap_scratch, 0, &other_color); 4227 if (second_bit == 0) { 4228 Tbz(bitmap_scratch, 1, has_color); 4229 } else { 4230 Tbnz(bitmap_scratch, 1, has_color); 4231 } 4232 Bind(&other_color); 4233 } 4234 4235 // Fall through if it does not have the right color. 4236 } 4237 4238 4239 void MacroAssembler::JumpIfBlack(Register object, 4240 Register scratch0, 4241 Register scratch1, 4242 Label* on_black) { 4243 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); 4244 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. 4245 } 4246 4247 4248 void MacroAssembler::JumpIfDictionaryInPrototypeChain( 4249 Register object, 4250 Register scratch0, 4251 Register scratch1, 4252 Label* found) { 4253 DCHECK(!AreAliased(object, scratch0, scratch1)); 4254 Register current = scratch0; 4255 Label loop_again, end; 4256 4257 // Scratch contains elements pointer. 4258 Mov(current, object); 4259 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); 4260 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); 4261 CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end); 4262 4263 // Loop based on the map going up the prototype chain. 4264 Bind(&loop_again); 4265 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); 4266 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE); 4267 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE); 4268 CompareInstanceType(current, scratch1, JS_OBJECT_TYPE); 4269 B(lo, found); 4270 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); 4271 DecodeField<Map::ElementsKindBits>(scratch1); 4272 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found); 4273 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); 4274 CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again); 4275 4276 Bind(&end); 4277 } 4278 4279 4280 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, 4281 Register shift_scratch, Register load_scratch, 4282 Register length_scratch, 4283 Label* value_is_white) { 4284 DCHECK(!AreAliased( 4285 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch)); 4286 4287 // These bit sequences are backwards. The first character in the string 4288 // represents the least significant bit. 4289 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); 4290 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); 4291 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); 4292 4293 GetMarkBits(value, bitmap_scratch, shift_scratch); 4294 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 4295 Lsr(load_scratch, load_scratch, shift_scratch); 4296 4297 AssertHasValidColor(load_scratch); 4298 4299 // If the value is black or grey we don't need to do anything. 4300 // Since both black and grey have a 1 in the first position and white does 4301 // not have a 1 there we only need to check one bit. 4302 Tbz(load_scratch, 0, value_is_white); 4303 } 4304 4305 4306 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { 4307 if (emit_debug_code()) { 4308 Check(cond, reason); 4309 } 4310 } 4311 4312 4313 4314 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { 4315 if (emit_debug_code()) { 4316 CheckRegisterIsClear(reg, reason); 4317 } 4318 } 4319 4320 4321 void MacroAssembler::AssertRegisterIsRoot(Register reg, 4322 Heap::RootListIndex index, 4323 BailoutReason reason) { 4324 if (emit_debug_code()) { 4325 CompareRoot(reg, index); 4326 Check(eq, reason); 4327 } 4328 } 4329 4330 4331 void MacroAssembler::AssertFastElements(Register elements) { 4332 if (emit_debug_code()) { 4333 UseScratchRegisterScope temps(this); 4334 Register temp = temps.AcquireX(); 4335 Label ok; 4336 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); 4337 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); 4338 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); 4339 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); 4340 Abort(kJSObjectWithFastElementsMapHasSlowElements); 4341 Bind(&ok); 4342 } 4343 } 4344 4345 4346 void MacroAssembler::AssertIsString(const Register& object) { 4347 if (emit_debug_code()) { 4348 UseScratchRegisterScope temps(this); 4349 Register temp = temps.AcquireX(); 4350 STATIC_ASSERT(kSmiTag == 0); 4351 Tst(object, kSmiTagMask); 4352 Check(ne, kOperandIsNotAString); 4353 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 4354 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); 4355 Check(lo, kOperandIsNotAString); 4356 } 4357 } 4358 4359 4360 void MacroAssembler::Check(Condition cond, BailoutReason reason) { 4361 Label ok; 4362 B(cond, &ok); 4363 Abort(reason); 4364 // Will not return here. 4365 Bind(&ok); 4366 } 4367 4368 4369 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) { 4370 Label ok; 4371 Cbz(reg, &ok); 4372 Abort(reason); 4373 // Will not return here. 4374 Bind(&ok); 4375 } 4376 4377 4378 void MacroAssembler::Abort(BailoutReason reason) { 4379 #ifdef DEBUG 4380 RecordComment("Abort message: "); 4381 RecordComment(GetBailoutReason(reason)); 4382 4383 if (FLAG_trap_on_abort) { 4384 Brk(0); 4385 return; 4386 } 4387 #endif 4388 4389 // Abort is used in some contexts where csp is the stack pointer. In order to 4390 // simplify the CallRuntime code, make sure that jssp is the stack pointer. 4391 // There is no risk of register corruption here because Abort doesn't return. 4392 Register old_stack_pointer = StackPointer(); 4393 SetStackPointer(jssp); 4394 Mov(jssp, old_stack_pointer); 4395 4396 // We need some scratch registers for the MacroAssembler, so make sure we have 4397 // some. This is safe here because Abort never returns. 4398 RegList old_tmp_list = TmpList()->list(); 4399 TmpList()->Combine(MacroAssembler::DefaultTmpList()); 4400 4401 if (use_real_aborts()) { 4402 // Avoid infinite recursion; Push contains some assertions that use Abort. 4403 NoUseRealAbortsScope no_real_aborts(this); 4404 4405 Mov(x0, Smi::FromInt(reason)); 4406 Push(x0); 4407 4408 if (!has_frame_) { 4409 // We don't actually want to generate a pile of code for this, so just 4410 // claim there is a stack frame, without generating one. 4411 FrameScope scope(this, StackFrame::NONE); 4412 CallRuntime(Runtime::kAbort, 1); 4413 } else { 4414 CallRuntime(Runtime::kAbort, 1); 4415 } 4416 } else { 4417 // Load the string to pass to Printf. 4418 Label msg_address; 4419 Adr(x0, &msg_address); 4420 4421 // Call Printf directly to report the error. 4422 CallPrintf(); 4423 4424 // We need a way to stop execution on both the simulator and real hardware, 4425 // and Unreachable() is the best option. 4426 Unreachable(); 4427 4428 // Emit the message string directly in the instruction stream. 4429 { 4430 BlockPoolsScope scope(this); 4431 Bind(&msg_address); 4432 EmitStringData(GetBailoutReason(reason)); 4433 } 4434 } 4435 4436 SetStackPointer(old_stack_pointer); 4437 TmpList()->set_list(old_tmp_list); 4438 } 4439 4440 4441 void MacroAssembler::LoadTransitionedArrayMapConditional( 4442 ElementsKind expected_kind, 4443 ElementsKind transitioned_kind, 4444 Register map_in_out, 4445 Register scratch1, 4446 Register scratch2, 4447 Label* no_map_match) { 4448 DCHECK(IsFastElementsKind(expected_kind)); 4449 DCHECK(IsFastElementsKind(transitioned_kind)); 4450 4451 // Check that the function's map is the same as the expected cached map. 4452 Ldr(scratch1, NativeContextMemOperand()); 4453 Ldr(scratch2, 4454 ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind))); 4455 Cmp(map_in_out, scratch2); 4456 B(ne, no_map_match); 4457 4458 // Use the transitioned cached map. 4459 Ldr(map_in_out, 4460 ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind))); 4461 } 4462 4463 4464 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { 4465 Ldr(dst, NativeContextMemOperand()); 4466 Ldr(dst, ContextMemOperand(dst, index)); 4467 } 4468 4469 4470 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, 4471 Register map, 4472 Register scratch) { 4473 // Load the initial map. The global functions all have initial maps. 4474 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 4475 if (emit_debug_code()) { 4476 Label ok, fail; 4477 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); 4478 B(&ok); 4479 Bind(&fail); 4480 Abort(kGlobalFunctionsMustHaveInitialMap); 4481 Bind(&ok); 4482 } 4483 } 4484 4485 4486 // This is the main Printf implementation. All other Printf variants call 4487 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. 4488 void MacroAssembler::PrintfNoPreserve(const char * format, 4489 const CPURegister& arg0, 4490 const CPURegister& arg1, 4491 const CPURegister& arg2, 4492 const CPURegister& arg3) { 4493 // We cannot handle a caller-saved stack pointer. It doesn't make much sense 4494 // in most cases anyway, so this restriction shouldn't be too serious. 4495 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer())); 4496 4497 // The provided arguments, and their proper procedure-call standard registers. 4498 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; 4499 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg}; 4500 4501 int arg_count = kPrintfMaxArgCount; 4502 4503 // The PCS varargs registers for printf. Note that x0 is used for the printf 4504 // format string. 4505 static const CPURegList kPCSVarargs = 4506 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count); 4507 static const CPURegList kPCSVarargsFP = 4508 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1); 4509 4510 // We can use caller-saved registers as scratch values, except for the 4511 // arguments and the PCS registers where they might need to go. 4512 CPURegList tmp_list = kCallerSaved; 4513 tmp_list.Remove(x0); // Used to pass the format string. 4514 tmp_list.Remove(kPCSVarargs); 4515 tmp_list.Remove(arg0, arg1, arg2, arg3); 4516 4517 CPURegList fp_tmp_list = kCallerSavedFP; 4518 fp_tmp_list.Remove(kPCSVarargsFP); 4519 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); 4520 4521 // Override the MacroAssembler's scratch register list. The lists will be 4522 // reset automatically at the end of the UseScratchRegisterScope. 4523 UseScratchRegisterScope temps(this); 4524 TmpList()->set_list(tmp_list.list()); 4525 FPTmpList()->set_list(fp_tmp_list.list()); 4526 4527 // Copies of the printf vararg registers that we can pop from. 4528 CPURegList pcs_varargs = kPCSVarargs; 4529 CPURegList pcs_varargs_fp = kPCSVarargsFP; 4530 4531 // Place the arguments. There are lots of clever tricks and optimizations we 4532 // could use here, but Printf is a debug tool so instead we just try to keep 4533 // it simple: Move each input that isn't already in the right place to a 4534 // scratch register, then move everything back. 4535 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) { 4536 // Work out the proper PCS register for this argument. 4537 if (args[i].IsRegister()) { 4538 pcs[i] = pcs_varargs.PopLowestIndex().X(); 4539 // We might only need a W register here. We need to know the size of the 4540 // argument so we can properly encode it for the simulator call. 4541 if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); 4542 } else if (args[i].IsFPRegister()) { 4543 // In C, floats are always cast to doubles for varargs calls. 4544 pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); 4545 } else { 4546 DCHECK(args[i].IsNone()); 4547 arg_count = i; 4548 break; 4549 } 4550 4551 // If the argument is already in the right place, leave it where it is. 4552 if (args[i].Aliases(pcs[i])) continue; 4553 4554 // Otherwise, if the argument is in a PCS argument register, allocate an 4555 // appropriate scratch register and then move it out of the way. 4556 if (kPCSVarargs.IncludesAliasOf(args[i]) || 4557 kPCSVarargsFP.IncludesAliasOf(args[i])) { 4558 if (args[i].IsRegister()) { 4559 Register old_arg = Register(args[i]); 4560 Register new_arg = temps.AcquireSameSizeAs(old_arg); 4561 Mov(new_arg, old_arg); 4562 args[i] = new_arg; 4563 } else { 4564 FPRegister old_arg = FPRegister(args[i]); 4565 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); 4566 Fmov(new_arg, old_arg); 4567 args[i] = new_arg; 4568 } 4569 } 4570 } 4571 4572 // Do a second pass to move values into their final positions and perform any 4573 // conversions that may be required. 4574 for (int i = 0; i < arg_count; i++) { 4575 DCHECK(pcs[i].type() == args[i].type()); 4576 if (pcs[i].IsRegister()) { 4577 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); 4578 } else { 4579 DCHECK(pcs[i].IsFPRegister()); 4580 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) { 4581 Fmov(FPRegister(pcs[i]), FPRegister(args[i])); 4582 } else { 4583 Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); 4584 } 4585 } 4586 } 4587 4588 // Load the format string into x0, as per the procedure-call standard. 4589 // 4590 // To make the code as portable as possible, the format string is encoded 4591 // directly in the instruction stream. It might be cleaner to encode it in a 4592 // literal pool, but since Printf is usually used for debugging, it is 4593 // beneficial for it to be minimally dependent on other features. 4594 Label format_address; 4595 Adr(x0, &format_address); 4596 4597 // Emit the format string directly in the instruction stream. 4598 { BlockPoolsScope scope(this); 4599 Label after_data; 4600 B(&after_data); 4601 Bind(&format_address); 4602 EmitStringData(format); 4603 Unreachable(); 4604 Bind(&after_data); 4605 } 4606 4607 // We don't pass any arguments on the stack, but we still need to align the C 4608 // stack pointer to a 16-byte boundary for PCS compliance. 4609 if (!csp.Is(StackPointer())) { 4610 Bic(csp, StackPointer(), 0xf); 4611 } 4612 4613 CallPrintf(arg_count, pcs); 4614 } 4615 4616 4617 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) { 4618 // A call to printf needs special handling for the simulator, since the system 4619 // printf function will use a different instruction set and the procedure-call 4620 // standard will not be compatible. 4621 #ifdef USE_SIMULATOR 4622 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize); 4623 hlt(kImmExceptionIsPrintf); 4624 dc32(arg_count); // kPrintfArgCountOffset 4625 4626 // Determine the argument pattern. 4627 uint32_t arg_pattern_list = 0; 4628 for (int i = 0; i < arg_count; i++) { 4629 uint32_t arg_pattern; 4630 if (args[i].IsRegister()) { 4631 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX; 4632 } else { 4633 DCHECK(args[i].Is64Bits()); 4634 arg_pattern = kPrintfArgD; 4635 } 4636 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits)); 4637 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i)); 4638 } 4639 dc32(arg_pattern_list); // kPrintfArgPatternListOffset 4640 } 4641 #else 4642 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); 4643 #endif 4644 } 4645 4646 4647 void MacroAssembler::Printf(const char * format, 4648 CPURegister arg0, 4649 CPURegister arg1, 4650 CPURegister arg2, 4651 CPURegister arg3) { 4652 // We can only print sp if it is the current stack pointer. 4653 if (!csp.Is(StackPointer())) { 4654 DCHECK(!csp.Aliases(arg0)); 4655 DCHECK(!csp.Aliases(arg1)); 4656 DCHECK(!csp.Aliases(arg2)); 4657 DCHECK(!csp.Aliases(arg3)); 4658 } 4659 4660 // Printf is expected to preserve all registers, so make sure that none are 4661 // available as scratch registers until we've preserved them. 4662 RegList old_tmp_list = TmpList()->list(); 4663 RegList old_fp_tmp_list = FPTmpList()->list(); 4664 TmpList()->set_list(0); 4665 FPTmpList()->set_list(0); 4666 4667 // Preserve all caller-saved registers as well as NZCV. 4668 // If csp is the stack pointer, PushCPURegList asserts that the size of each 4669 // list is a multiple of 16 bytes. 4670 PushCPURegList(kCallerSaved); 4671 PushCPURegList(kCallerSavedFP); 4672 4673 // We can use caller-saved registers as scratch values (except for argN). 4674 CPURegList tmp_list = kCallerSaved; 4675 CPURegList fp_tmp_list = kCallerSavedFP; 4676 tmp_list.Remove(arg0, arg1, arg2, arg3); 4677 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); 4678 TmpList()->set_list(tmp_list.list()); 4679 FPTmpList()->set_list(fp_tmp_list.list()); 4680 4681 { UseScratchRegisterScope temps(this); 4682 // If any of the arguments are the current stack pointer, allocate a new 4683 // register for them, and adjust the value to compensate for pushing the 4684 // caller-saved registers. 4685 bool arg0_sp = StackPointer().Aliases(arg0); 4686 bool arg1_sp = StackPointer().Aliases(arg1); 4687 bool arg2_sp = StackPointer().Aliases(arg2); 4688 bool arg3_sp = StackPointer().Aliases(arg3); 4689 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) { 4690 // Allocate a register to hold the original stack pointer value, to pass 4691 // to PrintfNoPreserve as an argument. 4692 Register arg_sp = temps.AcquireX(); 4693 Add(arg_sp, StackPointer(), 4694 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes()); 4695 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits()); 4696 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits()); 4697 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits()); 4698 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits()); 4699 } 4700 4701 // Preserve NZCV. 4702 { UseScratchRegisterScope temps(this); 4703 Register tmp = temps.AcquireX(); 4704 Mrs(tmp, NZCV); 4705 Push(tmp, xzr); 4706 } 4707 4708 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); 4709 4710 // Restore NZCV. 4711 { UseScratchRegisterScope temps(this); 4712 Register tmp = temps.AcquireX(); 4713 Pop(xzr, tmp); 4714 Msr(NZCV, tmp); 4715 } 4716 } 4717 4718 PopCPURegList(kCallerSavedFP); 4719 PopCPURegList(kCallerSaved); 4720 4721 TmpList()->set_list(old_tmp_list); 4722 FPTmpList()->set_list(old_fp_tmp_list); 4723 } 4724 4725 4726 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { 4727 // TODO(jbramley): Other architectures use the internal memcpy to copy the 4728 // sequence. If this is a performance bottleneck, we should consider caching 4729 // the sequence and copying it in the same way. 4730 InstructionAccurateScope scope(this, 4731 kNoCodeAgeSequenceLength / kInstructionSize); 4732 DCHECK(jssp.Is(StackPointer())); 4733 EmitFrameSetupForCodeAgePatching(this); 4734 } 4735 4736 4737 4738 void MacroAssembler::EmitCodeAgeSequence(Code* stub) { 4739 InstructionAccurateScope scope(this, 4740 kNoCodeAgeSequenceLength / kInstructionSize); 4741 DCHECK(jssp.Is(StackPointer())); 4742 EmitCodeAgeSequence(this, stub); 4743 } 4744 4745 4746 #undef __ 4747 #define __ assm-> 4748 4749 4750 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { 4751 Label start; 4752 __ bind(&start); 4753 4754 // We can do this sequence using four instructions, but the code ageing 4755 // sequence that patches it needs five, so we use the extra space to try to 4756 // simplify some addressing modes and remove some dependencies (compared to 4757 // using two stp instructions with write-back). 4758 __ sub(jssp, jssp, 4 * kXRegSize); 4759 __ sub(csp, csp, 4 * kXRegSize); 4760 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize)); 4761 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize)); 4762 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); 4763 4764 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); 4765 } 4766 4767 4768 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, 4769 Code * stub) { 4770 Label start; 4771 __ bind(&start); 4772 // When the stub is called, the sequence is replaced with the young sequence 4773 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the 4774 // stub jumps to &start, stored in x0. The young sequence does not call the 4775 // stub so there is no infinite loop here. 4776 // 4777 // A branch (br) is used rather than a call (blr) because this code replaces 4778 // the frame setup code that would normally preserve lr. 4779 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2); 4780 __ adr(x0, &start); 4781 __ br(ip0); 4782 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up 4783 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. 4784 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); 4785 if (stub) { 4786 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); 4787 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); 4788 } 4789 } 4790 4791 4792 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) { 4793 bool is_young = isolate->code_aging_helper()->IsYoung(sequence); 4794 DCHECK(is_young || 4795 isolate->code_aging_helper()->IsOld(sequence)); 4796 return is_young; 4797 } 4798 4799 4800 void MacroAssembler::TruncatingDiv(Register result, 4801 Register dividend, 4802 int32_t divisor) { 4803 DCHECK(!AreAliased(result, dividend)); 4804 DCHECK(result.Is32Bits() && dividend.Is32Bits()); 4805 base::MagicNumbersForDivision<uint32_t> mag = 4806 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor)); 4807 Mov(result, mag.multiplier); 4808 Smull(result.X(), dividend, result); 4809 Asr(result.X(), result.X(), 32); 4810 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0; 4811 if (divisor > 0 && neg) Add(result, result, dividend); 4812 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend); 4813 if (mag.shift > 0) Asr(result, result, mag.shift); 4814 Add(result, result, Operand(dividend, LSR, 31)); 4815 } 4816 4817 4818 #undef __ 4819 4820 4821 UseScratchRegisterScope::~UseScratchRegisterScope() { 4822 available_->set_list(old_available_); 4823 availablefp_->set_list(old_availablefp_); 4824 } 4825 4826 4827 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { 4828 int code = AcquireNextAvailable(available_).code(); 4829 return Register::Create(code, reg.SizeInBits()); 4830 } 4831 4832 4833 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { 4834 int code = AcquireNextAvailable(availablefp_).code(); 4835 return FPRegister::Create(code, reg.SizeInBits()); 4836 } 4837 4838 4839 CPURegister UseScratchRegisterScope::AcquireNextAvailable( 4840 CPURegList* available) { 4841 CHECK(!available->IsEmpty()); 4842 CPURegister result = available->PopLowestIndex(); 4843 DCHECK(!AreAliased(result, xzr, csp)); 4844 return result; 4845 } 4846 4847 4848 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available, 4849 const CPURegister& reg) { 4850 DCHECK(available->IncludesAliasOf(reg)); 4851 available->Remove(reg); 4852 return reg; 4853 } 4854 4855 4856 #define __ masm-> 4857 4858 4859 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, 4860 const Label* smi_check) { 4861 Assembler::BlockPoolsScope scope(masm); 4862 if (reg.IsValid()) { 4863 DCHECK(smi_check->is_bound()); 4864 DCHECK(reg.Is64Bits()); 4865 4866 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to 4867 // 'check' in the other bits. The possible offset is limited in that we 4868 // use BitField to pack the data, and the underlying data type is a 4869 // uint32_t. 4870 uint32_t delta = 4871 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check)); 4872 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); 4873 } else { 4874 DCHECK(!smi_check->is_bound()); 4875 4876 // An offset of 0 indicates that there is no patch site. 4877 __ InlineData(0); 4878 } 4879 } 4880 4881 4882 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) 4883 : reg_(NoReg), smi_check_(NULL) { 4884 InstructionSequence* inline_data = InstructionSequence::At(info); 4885 DCHECK(inline_data->IsInlineData()); 4886 if (inline_data->IsInlineData()) { 4887 uint64_t payload = inline_data->InlineData(); 4888 // We use BitField to decode the payload, and BitField can only handle 4889 // 32-bit values. 4890 DCHECK(is_uint32(payload)); 4891 if (payload != 0) { 4892 uint32_t payload32 = static_cast<uint32_t>(payload); 4893 int reg_code = RegisterBits::decode(payload32); 4894 reg_ = Register::XRegFromCode(reg_code); 4895 int smi_check_delta = DeltaBits::decode(payload32); 4896 DCHECK(smi_check_delta != 0); 4897 smi_check_ = inline_data->preceding(smi_check_delta); 4898 } 4899 } 4900 } 4901 4902 4903 #undef __ 4904 4905 4906 } // namespace internal 4907 } // namespace v8 4908 4909 #endif // V8_TARGET_ARCH_ARM64 4910