Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #if V8_TARGET_ARCH_ARM64
      8 
      9 #include "src/base/bits.h"
     10 #include "src/base/division-by-constant.h"
     11 #include "src/bootstrapper.h"
     12 #include "src/codegen.h"
     13 #include "src/cpu-profiler.h"
     14 #include "src/debug.h"
     15 #include "src/isolate-inl.h"
     16 #include "src/runtime.h"
     17 
     18 namespace v8 {
     19 namespace internal {
     20 
     21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
     22 #define __
     23 
     24 
     25 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
     26                                byte * buffer,
     27                                unsigned buffer_size)
     28     : Assembler(arg_isolate, buffer, buffer_size),
     29       generating_stub_(false),
     30 #if DEBUG
     31       allow_macro_instructions_(true),
     32 #endif
     33       has_frame_(false),
     34       use_real_aborts_(true),
     35       sp_(jssp),
     36       tmp_list_(DefaultTmpList()),
     37       fptmp_list_(DefaultFPTmpList()) {
     38   if (isolate() != NULL) {
     39     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
     40                                   isolate());
     41   }
     42 }
     43 
     44 
     45 CPURegList MacroAssembler::DefaultTmpList() {
     46   return CPURegList(ip0, ip1);
     47 }
     48 
     49 
     50 CPURegList MacroAssembler::DefaultFPTmpList() {
     51   return CPURegList(fp_scratch1, fp_scratch2);
     52 }
     53 
     54 
     55 void MacroAssembler::LogicalMacro(const Register& rd,
     56                                   const Register& rn,
     57                                   const Operand& operand,
     58                                   LogicalOp op) {
     59   UseScratchRegisterScope temps(this);
     60 
     61   if (operand.NeedsRelocation(this)) {
     62     Register temp = temps.AcquireX();
     63     Ldr(temp, operand.immediate());
     64     Logical(rd, rn, temp, op);
     65 
     66   } else if (operand.IsImmediate()) {
     67     int64_t immediate = operand.ImmediateValue();
     68     unsigned reg_size = rd.SizeInBits();
     69 
     70     // If the operation is NOT, invert the operation and immediate.
     71     if ((op & NOT) == NOT) {
     72       op = static_cast<LogicalOp>(op & ~NOT);
     73       immediate = ~immediate;
     74     }
     75 
     76     // Ignore the top 32 bits of an immediate if we're moving to a W register.
     77     if (rd.Is32Bits()) {
     78       // Check that the top 32 bits are consistent.
     79       DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
     80              ((immediate >> kWRegSizeInBits) == -1));
     81       immediate &= kWRegMask;
     82     }
     83 
     84     DCHECK(rd.Is64Bits() || is_uint32(immediate));
     85 
     86     // Special cases for all set or all clear immediates.
     87     if (immediate == 0) {
     88       switch (op) {
     89         case AND:
     90           Mov(rd, 0);
     91           return;
     92         case ORR:  // Fall through.
     93         case EOR:
     94           Mov(rd, rn);
     95           return;
     96         case ANDS:  // Fall through.
     97         case BICS:
     98           break;
     99         default:
    100           UNREACHABLE();
    101       }
    102     } else if ((rd.Is64Bits() && (immediate == -1L)) ||
    103                (rd.Is32Bits() && (immediate == 0xffffffffL))) {
    104       switch (op) {
    105         case AND:
    106           Mov(rd, rn);
    107           return;
    108         case ORR:
    109           Mov(rd, immediate);
    110           return;
    111         case EOR:
    112           Mvn(rd, rn);
    113           return;
    114         case ANDS:  // Fall through.
    115         case BICS:
    116           break;
    117         default:
    118           UNREACHABLE();
    119       }
    120     }
    121 
    122     unsigned n, imm_s, imm_r;
    123     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
    124       // Immediate can be encoded in the instruction.
    125       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
    126     } else {
    127       // Immediate can't be encoded: synthesize using move immediate.
    128       Register temp = temps.AcquireSameSizeAs(rn);
    129       Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
    130       if (rd.Is(csp)) {
    131         // If rd is the stack pointer we cannot use it as the destination
    132         // register so we use the temp register as an intermediate again.
    133         Logical(temp, rn, imm_operand, op);
    134         Mov(csp, temp);
    135         AssertStackConsistency();
    136       } else {
    137         Logical(rd, rn, imm_operand, op);
    138       }
    139     }
    140 
    141   } else if (operand.IsExtendedRegister()) {
    142     DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
    143     // Add/sub extended supports shift <= 4. We want to support exactly the
    144     // same modes here.
    145     DCHECK(operand.shift_amount() <= 4);
    146     DCHECK(operand.reg().Is64Bits() ||
    147            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
    148     Register temp = temps.AcquireSameSizeAs(rn);
    149     EmitExtendShift(temp, operand.reg(), operand.extend(),
    150                     operand.shift_amount());
    151     Logical(rd, rn, temp, op);
    152 
    153   } else {
    154     // The operand can be encoded in the instruction.
    155     DCHECK(operand.IsShiftedRegister());
    156     Logical(rd, rn, operand, op);
    157   }
    158 }
    159 
    160 
    161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
    162   DCHECK(allow_macro_instructions_);
    163   DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
    164   DCHECK(!rd.IsZero());
    165 
    166   // TODO(all) extend to support more immediates.
    167   //
    168   // Immediates on Aarch64 can be produced using an initial value, and zero to
    169   // three move keep operations.
    170   //
    171   // Initial values can be generated with:
    172   //  1. 64-bit move zero (movz).
    173   //  2. 32-bit move inverted (movn).
    174   //  3. 64-bit move inverted.
    175   //  4. 32-bit orr immediate.
    176   //  5. 64-bit orr immediate.
    177   // Move-keep may then be used to modify each of the 16-bit half-words.
    178   //
    179   // The code below supports all five initial value generators, and
    180   // applying move-keep operations to move-zero and move-inverted initial
    181   // values.
    182 
    183   // Try to move the immediate in one instruction, and if that fails, switch to
    184   // using multiple instructions.
    185   if (!TryOneInstrMoveImmediate(rd, imm)) {
    186     unsigned reg_size = rd.SizeInBits();
    187 
    188     // Generic immediate case. Imm will be represented by
    189     //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
    190     // A move-zero or move-inverted is generated for the first non-zero or
    191     // non-0xffff immX, and a move-keep for subsequent non-zero immX.
    192 
    193     uint64_t ignored_halfword = 0;
    194     bool invert_move = false;
    195     // If the number of 0xffff halfwords is greater than the number of 0x0000
    196     // halfwords, it's more efficient to use move-inverted.
    197     if (CountClearHalfWords(~imm, reg_size) >
    198         CountClearHalfWords(imm, reg_size)) {
    199       ignored_halfword = 0xffffL;
    200       invert_move = true;
    201     }
    202 
    203     // Mov instructions can't move immediate values into the stack pointer, so
    204     // set up a temporary register, if needed.
    205     UseScratchRegisterScope temps(this);
    206     Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
    207 
    208     // Iterate through the halfwords. Use movn/movz for the first non-ignored
    209     // halfword, and movk for subsequent halfwords.
    210     DCHECK((reg_size % 16) == 0);
    211     bool first_mov_done = false;
    212     for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
    213       uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
    214       if (imm16 != ignored_halfword) {
    215         if (!first_mov_done) {
    216           if (invert_move) {
    217             movn(temp, (~imm16) & 0xffffL, 16 * i);
    218           } else {
    219             movz(temp, imm16, 16 * i);
    220           }
    221           first_mov_done = true;
    222         } else {
    223           // Construct a wider constant.
    224           movk(temp, imm16, 16 * i);
    225         }
    226       }
    227     }
    228     DCHECK(first_mov_done);
    229 
    230     // Move the temporary if the original destination register was the stack
    231     // pointer.
    232     if (rd.IsSP()) {
    233       mov(rd, temp);
    234       AssertStackConsistency();
    235     }
    236   }
    237 }
    238 
    239 
    240 void MacroAssembler::Mov(const Register& rd,
    241                          const Operand& operand,
    242                          DiscardMoveMode discard_mode) {
    243   DCHECK(allow_macro_instructions_);
    244   DCHECK(!rd.IsZero());
    245 
    246   // Provide a swap register for instructions that need to write into the
    247   // system stack pointer (and can't do this inherently).
    248   UseScratchRegisterScope temps(this);
    249   Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
    250 
    251   if (operand.NeedsRelocation(this)) {
    252     Ldr(dst, operand.immediate());
    253 
    254   } else if (operand.IsImmediate()) {
    255     // Call the macro assembler for generic immediates.
    256     Mov(dst, operand.ImmediateValue());
    257 
    258   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
    259     // Emit a shift instruction if moving a shifted register. This operation
    260     // could also be achieved using an orr instruction (like orn used by Mvn),
    261     // but using a shift instruction makes the disassembly clearer.
    262     EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
    263 
    264   } else if (operand.IsExtendedRegister()) {
    265     // Emit an extend instruction if moving an extended register. This handles
    266     // extend with post-shift operations, too.
    267     EmitExtendShift(dst, operand.reg(), operand.extend(),
    268                     operand.shift_amount());
    269 
    270   } else {
    271     // Otherwise, emit a register move only if the registers are distinct, or
    272     // if they are not X registers.
    273     //
    274     // Note that mov(w0, w0) is not a no-op because it clears the top word of
    275     // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
    276     // registers is not required to clear the top word of the X register. In
    277     // this case, the instruction is discarded.
    278     //
    279     // If csp is an operand, add #0 is emitted, otherwise, orr #0.
    280     if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
    281                                   (discard_mode == kDontDiscardForSameWReg))) {
    282       Assembler::mov(rd, operand.reg());
    283     }
    284     // This case can handle writes into the system stack pointer directly.
    285     dst = rd;
    286   }
    287 
    288   // Copy the result to the system stack pointer.
    289   if (!dst.Is(rd)) {
    290     DCHECK(rd.IsSP());
    291     Assembler::mov(rd, dst);
    292   }
    293 }
    294 
    295 
    296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
    297   DCHECK(allow_macro_instructions_);
    298 
    299   if (operand.NeedsRelocation(this)) {
    300     Ldr(rd, operand.immediate());
    301     mvn(rd, rd);
    302 
    303   } else if (operand.IsImmediate()) {
    304     // Call the macro assembler for generic immediates.
    305     Mov(rd, ~operand.ImmediateValue());
    306 
    307   } else if (operand.IsExtendedRegister()) {
    308     // Emit two instructions for the extend case. This differs from Mov, as
    309     // the extend and invert can't be achieved in one instruction.
    310     EmitExtendShift(rd, operand.reg(), operand.extend(),
    311                     operand.shift_amount());
    312     mvn(rd, rd);
    313 
    314   } else {
    315     mvn(rd, operand);
    316   }
    317 }
    318 
    319 
    320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
    321   DCHECK((reg_size % 8) == 0);
    322   int count = 0;
    323   for (unsigned i = 0; i < (reg_size / 16); i++) {
    324     if ((imm & 0xffff) == 0) {
    325       count++;
    326     }
    327     imm >>= 16;
    328   }
    329   return count;
    330 }
    331 
    332 
    333 // The movz instruction can generate immediates containing an arbitrary 16-bit
    334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
    335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
    336   DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
    337   return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
    338 }
    339 
    340 
    341 // The movn instruction can generate immediates containing an arbitrary 16-bit
    342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
    343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
    344   return IsImmMovz(~imm, reg_size);
    345 }
    346 
    347 
    348 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
    349                                              const Operand& operand,
    350                                              StatusFlags nzcv,
    351                                              Condition cond,
    352                                              ConditionalCompareOp op) {
    353   DCHECK((cond != al) && (cond != nv));
    354   if (operand.NeedsRelocation(this)) {
    355     UseScratchRegisterScope temps(this);
    356     Register temp = temps.AcquireX();
    357     Ldr(temp, operand.immediate());
    358     ConditionalCompareMacro(rn, temp, nzcv, cond, op);
    359 
    360   } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
    361              (operand.IsImmediate() &&
    362               IsImmConditionalCompare(operand.ImmediateValue()))) {
    363     // The immediate can be encoded in the instruction, or the operand is an
    364     // unshifted register: call the assembler.
    365     ConditionalCompare(rn, operand, nzcv, cond, op);
    366 
    367   } else {
    368     // The operand isn't directly supported by the instruction: perform the
    369     // operation on a temporary register.
    370     UseScratchRegisterScope temps(this);
    371     Register temp = temps.AcquireSameSizeAs(rn);
    372     Mov(temp, operand);
    373     ConditionalCompare(rn, temp, nzcv, cond, op);
    374   }
    375 }
    376 
    377 
    378 void MacroAssembler::Csel(const Register& rd,
    379                           const Register& rn,
    380                           const Operand& operand,
    381                           Condition cond) {
    382   DCHECK(allow_macro_instructions_);
    383   DCHECK(!rd.IsZero());
    384   DCHECK((cond != al) && (cond != nv));
    385   if (operand.IsImmediate()) {
    386     // Immediate argument. Handle special cases of 0, 1 and -1 using zero
    387     // register.
    388     int64_t imm = operand.ImmediateValue();
    389     Register zr = AppropriateZeroRegFor(rn);
    390     if (imm == 0) {
    391       csel(rd, rn, zr, cond);
    392     } else if (imm == 1) {
    393       csinc(rd, rn, zr, cond);
    394     } else if (imm == -1) {
    395       csinv(rd, rn, zr, cond);
    396     } else {
    397       UseScratchRegisterScope temps(this);
    398       Register temp = temps.AcquireSameSizeAs(rn);
    399       Mov(temp, imm);
    400       csel(rd, rn, temp, cond);
    401     }
    402   } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
    403     // Unshifted register argument.
    404     csel(rd, rn, operand.reg(), cond);
    405   } else {
    406     // All other arguments.
    407     UseScratchRegisterScope temps(this);
    408     Register temp = temps.AcquireSameSizeAs(rn);
    409     Mov(temp, operand);
    410     csel(rd, rn, temp, cond);
    411   }
    412 }
    413 
    414 
    415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
    416                                               int64_t imm) {
    417   unsigned n, imm_s, imm_r;
    418   int reg_size = dst.SizeInBits();
    419   if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
    420     // Immediate can be represented in a move zero instruction. Movz can't write
    421     // to the stack pointer.
    422     movz(dst, imm);
    423     return true;
    424   } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
    425     // Immediate can be represented in a move not instruction. Movn can't write
    426     // to the stack pointer.
    427     movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
    428     return true;
    429   } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
    430     // Immediate can be represented in a logical orr instruction.
    431     LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
    432     return true;
    433   }
    434   return false;
    435 }
    436 
    437 
    438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
    439                                                   int64_t imm) {
    440   int reg_size = dst.SizeInBits();
    441 
    442   // Encode the immediate in a single move instruction, if possible.
    443   if (TryOneInstrMoveImmediate(dst, imm)) {
    444     // The move was successful; nothing to do here.
    445   } else {
    446     // Pre-shift the immediate to the least-significant bits of the register.
    447     int shift_low = CountTrailingZeros(imm, reg_size);
    448     int64_t imm_low = imm >> shift_low;
    449 
    450     // Pre-shift the immediate to the most-significant bits of the register. We
    451     // insert set bits in the least-significant bits, as this creates a
    452     // different immediate that may be encodable using movn or orr-immediate.
    453     // If this new immediate is encodable, the set bits will be eliminated by
    454     // the post shift on the following instruction.
    455     int shift_high = CountLeadingZeros(imm, reg_size);
    456     int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
    457 
    458     if (TryOneInstrMoveImmediate(dst, imm_low)) {
    459       // The new immediate has been moved into the destination's low bits:
    460       // return a new leftward-shifting operand.
    461       return Operand(dst, LSL, shift_low);
    462     } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
    463       // The new immediate has been moved into the destination's high bits:
    464       // return a new rightward-shifting operand.
    465       return Operand(dst, LSR, shift_high);
    466     } else {
    467       // Use the generic move operation to set up the immediate.
    468       Mov(dst, imm);
    469     }
    470   }
    471   return Operand(dst);
    472 }
    473 
    474 
    475 void MacroAssembler::AddSubMacro(const Register& rd,
    476                                  const Register& rn,
    477                                  const Operand& operand,
    478                                  FlagsUpdate S,
    479                                  AddSubOp op) {
    480   if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
    481       !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
    482     // The instruction would be a nop. Avoid generating useless code.
    483     return;
    484   }
    485 
    486   if (operand.NeedsRelocation(this)) {
    487     UseScratchRegisterScope temps(this);
    488     Register temp = temps.AcquireX();
    489     Ldr(temp, operand.immediate());
    490     AddSubMacro(rd, rn, temp, S, op);
    491   } else if ((operand.IsImmediate() &&
    492               !IsImmAddSub(operand.ImmediateValue()))      ||
    493              (rn.IsZero() && !operand.IsShiftedRegister()) ||
    494              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
    495     UseScratchRegisterScope temps(this);
    496     Register temp = temps.AcquireSameSizeAs(rn);
    497     if (operand.IsImmediate()) {
    498       Operand imm_operand =
    499           MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
    500       AddSub(rd, rn, imm_operand, S, op);
    501     } else {
    502       Mov(temp, operand);
    503       AddSub(rd, rn, temp, S, op);
    504     }
    505   } else {
    506     AddSub(rd, rn, operand, S, op);
    507   }
    508 }
    509 
    510 
    511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
    512                                           const Register& rn,
    513                                           const Operand& operand,
    514                                           FlagsUpdate S,
    515                                           AddSubWithCarryOp op) {
    516   DCHECK(rd.SizeInBits() == rn.SizeInBits());
    517   UseScratchRegisterScope temps(this);
    518 
    519   if (operand.NeedsRelocation(this)) {
    520     Register temp = temps.AcquireX();
    521     Ldr(temp, operand.immediate());
    522     AddSubWithCarryMacro(rd, rn, temp, S, op);
    523 
    524   } else if (operand.IsImmediate() ||
    525              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
    526     // Add/sub with carry (immediate or ROR shifted register.)
    527     Register temp = temps.AcquireSameSizeAs(rn);
    528     Mov(temp, operand);
    529     AddSubWithCarry(rd, rn, temp, S, op);
    530 
    531   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
    532     // Add/sub with carry (shifted register).
    533     DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
    534     DCHECK(operand.shift() != ROR);
    535     DCHECK(is_uintn(operand.shift_amount(),
    536           rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
    537                                              : kWRegSizeInBitsLog2));
    538     Register temp = temps.AcquireSameSizeAs(rn);
    539     EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
    540     AddSubWithCarry(rd, rn, temp, S, op);
    541 
    542   } else if (operand.IsExtendedRegister()) {
    543     // Add/sub with carry (extended register).
    544     DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
    545     // Add/sub extended supports a shift <= 4. We want to support exactly the
    546     // same modes.
    547     DCHECK(operand.shift_amount() <= 4);
    548     DCHECK(operand.reg().Is64Bits() ||
    549            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
    550     Register temp = temps.AcquireSameSizeAs(rn);
    551     EmitExtendShift(temp, operand.reg(), operand.extend(),
    552                     operand.shift_amount());
    553     AddSubWithCarry(rd, rn, temp, S, op);
    554 
    555   } else {
    556     // The addressing mode is directly supported by the instruction.
    557     AddSubWithCarry(rd, rn, operand, S, op);
    558   }
    559 }
    560 
    561 
    562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
    563                                     const MemOperand& addr,
    564                                     LoadStoreOp op) {
    565   int64_t offset = addr.offset();
    566   LSDataSize size = CalcLSDataSize(op);
    567 
    568   // Check if an immediate offset fits in the immediate field of the
    569   // appropriate instruction. If not, emit two instructions to perform
    570   // the operation.
    571   if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
    572       !IsImmLSUnscaled(offset)) {
    573     // Immediate offset that can't be encoded using unsigned or unscaled
    574     // addressing modes.
    575     UseScratchRegisterScope temps(this);
    576     Register temp = temps.AcquireSameSizeAs(addr.base());
    577     Mov(temp, addr.offset());
    578     LoadStore(rt, MemOperand(addr.base(), temp), op);
    579   } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
    580     // Post-index beyond unscaled addressing range.
    581     LoadStore(rt, MemOperand(addr.base()), op);
    582     add(addr.base(), addr.base(), offset);
    583   } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
    584     // Pre-index beyond unscaled addressing range.
    585     add(addr.base(), addr.base(), offset);
    586     LoadStore(rt, MemOperand(addr.base()), op);
    587   } else {
    588     // Encodable in one load/store instruction.
    589     LoadStore(rt, addr, op);
    590   }
    591 }
    592 
    593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
    594                                         const CPURegister& rt2,
    595                                         const MemOperand& addr,
    596                                         LoadStorePairOp op) {
    597   // TODO(all): Should we support register offset for load-store-pair?
    598   DCHECK(!addr.IsRegisterOffset());
    599 
    600   int64_t offset = addr.offset();
    601   LSDataSize size = CalcLSPairDataSize(op);
    602 
    603   // Check if the offset fits in the immediate field of the appropriate
    604   // instruction. If not, emit two instructions to perform the operation.
    605   if (IsImmLSPair(offset, size)) {
    606     // Encodable in one load/store pair instruction.
    607     LoadStorePair(rt, rt2, addr, op);
    608   } else {
    609     Register base = addr.base();
    610     if (addr.IsImmediateOffset()) {
    611       UseScratchRegisterScope temps(this);
    612       Register temp = temps.AcquireSameSizeAs(base);
    613       Add(temp, base, offset);
    614       LoadStorePair(rt, rt2, MemOperand(temp), op);
    615     } else if (addr.IsPostIndex()) {
    616       LoadStorePair(rt, rt2, MemOperand(base), op);
    617       Add(base, base, offset);
    618     } else {
    619       DCHECK(addr.IsPreIndex());
    620       Add(base, base, offset);
    621       LoadStorePair(rt, rt2, MemOperand(base), op);
    622     }
    623   }
    624 }
    625 
    626 
    627 void MacroAssembler::Load(const Register& rt,
    628                           const MemOperand& addr,
    629                           Representation r) {
    630   DCHECK(!r.IsDouble());
    631 
    632   if (r.IsInteger8()) {
    633     Ldrsb(rt, addr);
    634   } else if (r.IsUInteger8()) {
    635     Ldrb(rt, addr);
    636   } else if (r.IsInteger16()) {
    637     Ldrsh(rt, addr);
    638   } else if (r.IsUInteger16()) {
    639     Ldrh(rt, addr);
    640   } else if (r.IsInteger32()) {
    641     Ldr(rt.W(), addr);
    642   } else {
    643     DCHECK(rt.Is64Bits());
    644     Ldr(rt, addr);
    645   }
    646 }
    647 
    648 
    649 void MacroAssembler::Store(const Register& rt,
    650                            const MemOperand& addr,
    651                            Representation r) {
    652   DCHECK(!r.IsDouble());
    653 
    654   if (r.IsInteger8() || r.IsUInteger8()) {
    655     Strb(rt, addr);
    656   } else if (r.IsInteger16() || r.IsUInteger16()) {
    657     Strh(rt, addr);
    658   } else if (r.IsInteger32()) {
    659     Str(rt.W(), addr);
    660   } else {
    661     DCHECK(rt.Is64Bits());
    662     if (r.IsHeapObject()) {
    663       AssertNotSmi(rt);
    664     } else if (r.IsSmi()) {
    665       AssertSmi(rt);
    666     }
    667     Str(rt, addr);
    668   }
    669 }
    670 
    671 
    672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
    673     Label *label, ImmBranchType b_type) {
    674   bool need_longer_range = false;
    675   // There are two situations in which we care about the offset being out of
    676   // range:
    677   //  - The label is bound but too far away.
    678   //  - The label is not bound but linked, and the previous branch
    679   //    instruction in the chain is too far away.
    680   if (label->is_bound() || label->is_linked()) {
    681     need_longer_range =
    682       !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
    683   }
    684   if (!need_longer_range && !label->is_bound()) {
    685     int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
    686     unresolved_branches_.insert(
    687         std::pair<int, FarBranchInfo>(max_reachable_pc,
    688                                       FarBranchInfo(pc_offset(), label)));
    689     // Also maintain the next pool check.
    690     next_veneer_pool_check_ =
    691       Min(next_veneer_pool_check_,
    692           max_reachable_pc - kVeneerDistanceCheckMargin);
    693   }
    694   return need_longer_range;
    695 }
    696 
    697 
    698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
    699   DCHECK(allow_macro_instructions_);
    700   DCHECK(!rd.IsZero());
    701 
    702   if (hint == kAdrNear) {
    703     adr(rd, label);
    704     return;
    705   }
    706 
    707   DCHECK(hint == kAdrFar);
    708   if (label->is_bound()) {
    709     int label_offset = label->pos() - pc_offset();
    710     if (Instruction::IsValidPCRelOffset(label_offset)) {
    711       adr(rd, label);
    712     } else {
    713       DCHECK(label_offset <= 0);
    714       int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
    715       adr(rd, min_adr_offset);
    716       Add(rd, rd, label_offset - min_adr_offset);
    717     }
    718   } else {
    719     UseScratchRegisterScope temps(this);
    720     Register scratch = temps.AcquireX();
    721 
    722     InstructionAccurateScope scope(
    723         this, PatchingAssembler::kAdrFarPatchableNInstrs);
    724     adr(rd, label);
    725     for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
    726       nop(ADR_FAR_NOP);
    727     }
    728     movz(scratch, 0);
    729   }
    730 }
    731 
    732 
    733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
    734   DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
    735          (bit == -1 || type >= kBranchTypeFirstUsingBit));
    736   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
    737     B(static_cast<Condition>(type), label);
    738   } else {
    739     switch (type) {
    740       case always:        B(label);              break;
    741       case never:         break;
    742       case reg_zero:      Cbz(reg, label);       break;
    743       case reg_not_zero:  Cbnz(reg, label);      break;
    744       case reg_bit_clear: Tbz(reg, bit, label);  break;
    745       case reg_bit_set:   Tbnz(reg, bit, label); break;
    746       default:
    747         UNREACHABLE();
    748     }
    749   }
    750 }
    751 
    752 
    753 void MacroAssembler::B(Label* label, Condition cond) {
    754   DCHECK(allow_macro_instructions_);
    755   DCHECK((cond != al) && (cond != nv));
    756 
    757   Label done;
    758   bool need_extra_instructions =
    759     NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
    760 
    761   if (need_extra_instructions) {
    762     b(&done, NegateCondition(cond));
    763     B(label);
    764   } else {
    765     b(label, cond);
    766   }
    767   bind(&done);
    768 }
    769 
    770 
    771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
    772   DCHECK(allow_macro_instructions_);
    773 
    774   Label done;
    775   bool need_extra_instructions =
    776     NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
    777 
    778   if (need_extra_instructions) {
    779     tbz(rt, bit_pos, &done);
    780     B(label);
    781   } else {
    782     tbnz(rt, bit_pos, label);
    783   }
    784   bind(&done);
    785 }
    786 
    787 
    788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
    789   DCHECK(allow_macro_instructions_);
    790 
    791   Label done;
    792   bool need_extra_instructions =
    793     NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
    794 
    795   if (need_extra_instructions) {
    796     tbnz(rt, bit_pos, &done);
    797     B(label);
    798   } else {
    799     tbz(rt, bit_pos, label);
    800   }
    801   bind(&done);
    802 }
    803 
    804 
    805 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
    806   DCHECK(allow_macro_instructions_);
    807 
    808   Label done;
    809   bool need_extra_instructions =
    810     NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
    811 
    812   if (need_extra_instructions) {
    813     cbz(rt, &done);
    814     B(label);
    815   } else {
    816     cbnz(rt, label);
    817   }
    818   bind(&done);
    819 }
    820 
    821 
    822 void MacroAssembler::Cbz(const Register& rt, Label* label) {
    823   DCHECK(allow_macro_instructions_);
    824 
    825   Label done;
    826   bool need_extra_instructions =
    827     NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
    828 
    829   if (need_extra_instructions) {
    830     cbnz(rt, &done);
    831     B(label);
    832   } else {
    833     cbz(rt, label);
    834   }
    835   bind(&done);
    836 }
    837 
    838 
    839 // Pseudo-instructions.
    840 
    841 
    842 void MacroAssembler::Abs(const Register& rd, const Register& rm,
    843                          Label* is_not_representable,
    844                          Label* is_representable) {
    845   DCHECK(allow_macro_instructions_);
    846   DCHECK(AreSameSizeAndType(rd, rm));
    847 
    848   Cmp(rm, 1);
    849   Cneg(rd, rm, lt);
    850 
    851   // If the comparison sets the v flag, the input was the smallest value
    852   // representable by rm, and the mathematical result of abs(rm) is not
    853   // representable using two's complement.
    854   if ((is_not_representable != NULL) && (is_representable != NULL)) {
    855     B(is_not_representable, vs);
    856     B(is_representable);
    857   } else if (is_not_representable != NULL) {
    858     B(is_not_representable, vs);
    859   } else if (is_representable != NULL) {
    860     B(is_representable, vc);
    861   }
    862 }
    863 
    864 
    865 // Abstracted stack operations.
    866 
    867 
    868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
    869                           const CPURegister& src2, const CPURegister& src3) {
    870   DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
    871 
    872   int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
    873   int size = src0.SizeInBytes();
    874 
    875   PushPreamble(count, size);
    876   PushHelper(count, size, src0, src1, src2, src3);
    877 }
    878 
    879 
    880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
    881                           const CPURegister& src2, const CPURegister& src3,
    882                           const CPURegister& src4, const CPURegister& src5,
    883                           const CPURegister& src6, const CPURegister& src7) {
    884   DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
    885 
    886   int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
    887   int size = src0.SizeInBytes();
    888 
    889   PushPreamble(count, size);
    890   PushHelper(4, size, src0, src1, src2, src3);
    891   PushHelper(count - 4, size, src4, src5, src6, src7);
    892 }
    893 
    894 
    895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
    896                          const CPURegister& dst2, const CPURegister& dst3) {
    897   // It is not valid to pop into the same register more than once in one
    898   // instruction, not even into the zero register.
    899   DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
    900   DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
    901   DCHECK(dst0.IsValid());
    902 
    903   int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
    904   int size = dst0.SizeInBytes();
    905 
    906   PopHelper(count, size, dst0, dst1, dst2, dst3);
    907   PopPostamble(count, size);
    908 }
    909 
    910 
    911 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
    912   int size = src0.SizeInBytes() + src1.SizeInBytes();
    913 
    914   PushPreamble(size);
    915   // Reserve room for src0 and push src1.
    916   str(src1, MemOperand(StackPointer(), -size, PreIndex));
    917   // Fill the gap with src0.
    918   str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
    919 }
    920 
    921 
    922 void MacroAssembler::PushPopQueue::PushQueued(
    923     PreambleDirective preamble_directive) {
    924   if (queued_.empty()) return;
    925 
    926   if (preamble_directive == WITH_PREAMBLE) {
    927     masm_->PushPreamble(size_);
    928   }
    929 
    930   int count = queued_.size();
    931   int index = 0;
    932   while (index < count) {
    933     // PushHelper can only handle registers with the same size and type, and it
    934     // can handle only four at a time. Batch them up accordingly.
    935     CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
    936     int batch_index = 0;
    937     do {
    938       batch[batch_index++] = queued_[index++];
    939     } while ((batch_index < 4) && (index < count) &&
    940              batch[0].IsSameSizeAndType(queued_[index]));
    941 
    942     masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
    943                       batch[0], batch[1], batch[2], batch[3]);
    944   }
    945 
    946   queued_.clear();
    947 }
    948 
    949 
    950 void MacroAssembler::PushPopQueue::PopQueued() {
    951   if (queued_.empty()) return;
    952 
    953   int count = queued_.size();
    954   int index = 0;
    955   while (index < count) {
    956     // PopHelper can only handle registers with the same size and type, and it
    957     // can handle only four at a time. Batch them up accordingly.
    958     CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
    959     int batch_index = 0;
    960     do {
    961       batch[batch_index++] = queued_[index++];
    962     } while ((batch_index < 4) && (index < count) &&
    963              batch[0].IsSameSizeAndType(queued_[index]));
    964 
    965     masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
    966                      batch[0], batch[1], batch[2], batch[3]);
    967   }
    968 
    969   masm_->PopPostamble(size_);
    970   queued_.clear();
    971 }
    972 
    973 
    974 void MacroAssembler::PushCPURegList(CPURegList registers) {
    975   int size = registers.RegisterSizeInBytes();
    976 
    977   PushPreamble(registers.Count(), size);
    978   // Push up to four registers at a time because if the current stack pointer is
    979   // csp and reg_size is 32, registers must be pushed in blocks of four in order
    980   // to maintain the 16-byte alignment for csp.
    981   while (!registers.IsEmpty()) {
    982     int count_before = registers.Count();
    983     const CPURegister& src0 = registers.PopHighestIndex();
    984     const CPURegister& src1 = registers.PopHighestIndex();
    985     const CPURegister& src2 = registers.PopHighestIndex();
    986     const CPURegister& src3 = registers.PopHighestIndex();
    987     int count = count_before - registers.Count();
    988     PushHelper(count, size, src0, src1, src2, src3);
    989   }
    990 }
    991 
    992 
    993 void MacroAssembler::PopCPURegList(CPURegList registers) {
    994   int size = registers.RegisterSizeInBytes();
    995 
    996   // Pop up to four registers at a time because if the current stack pointer is
    997   // csp and reg_size is 32, registers must be pushed in blocks of four in
    998   // order to maintain the 16-byte alignment for csp.
    999   while (!registers.IsEmpty()) {
   1000     int count_before = registers.Count();
   1001     const CPURegister& dst0 = registers.PopLowestIndex();
   1002     const CPURegister& dst1 = registers.PopLowestIndex();
   1003     const CPURegister& dst2 = registers.PopLowestIndex();
   1004     const CPURegister& dst3 = registers.PopLowestIndex();
   1005     int count = count_before - registers.Count();
   1006     PopHelper(count, size, dst0, dst1, dst2, dst3);
   1007   }
   1008   PopPostamble(registers.Count(), size);
   1009 }
   1010 
   1011 
   1012 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
   1013   int size = src.SizeInBytes();
   1014 
   1015   PushPreamble(count, size);
   1016 
   1017   if (FLAG_optimize_for_size && count > 8) {
   1018     UseScratchRegisterScope temps(this);
   1019     Register temp = temps.AcquireX();
   1020 
   1021     Label loop;
   1022     __ Mov(temp, count / 2);
   1023     __ Bind(&loop);
   1024     PushHelper(2, size, src, src, NoReg, NoReg);
   1025     __ Subs(temp, temp, 1);
   1026     __ B(ne, &loop);
   1027 
   1028     count %= 2;
   1029   }
   1030 
   1031   // Push up to four registers at a time if possible because if the current
   1032   // stack pointer is csp and the register size is 32, registers must be pushed
   1033   // in blocks of four in order to maintain the 16-byte alignment for csp.
   1034   while (count >= 4) {
   1035     PushHelper(4, size, src, src, src, src);
   1036     count -= 4;
   1037   }
   1038   if (count >= 2) {
   1039     PushHelper(2, size, src, src, NoReg, NoReg);
   1040     count -= 2;
   1041   }
   1042   if (count == 1) {
   1043     PushHelper(1, size, src, NoReg, NoReg, NoReg);
   1044     count -= 1;
   1045   }
   1046   DCHECK(count == 0);
   1047 }
   1048 
   1049 
   1050 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
   1051   PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
   1052 
   1053   UseScratchRegisterScope temps(this);
   1054   Register temp = temps.AcquireSameSizeAs(count);
   1055 
   1056   if (FLAG_optimize_for_size) {
   1057     Label loop, done;
   1058 
   1059     Subs(temp, count, 1);
   1060     B(mi, &done);
   1061 
   1062     // Push all registers individually, to save code size.
   1063     Bind(&loop);
   1064     Subs(temp, temp, 1);
   1065     PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
   1066     B(pl, &loop);
   1067 
   1068     Bind(&done);
   1069   } else {
   1070     Label loop, leftover2, leftover1, done;
   1071 
   1072     Subs(temp, count, 4);
   1073     B(mi, &leftover2);
   1074 
   1075     // Push groups of four first.
   1076     Bind(&loop);
   1077     Subs(temp, temp, 4);
   1078     PushHelper(4, src.SizeInBytes(), src, src, src, src);
   1079     B(pl, &loop);
   1080 
   1081     // Push groups of two.
   1082     Bind(&leftover2);
   1083     Tbz(count, 1, &leftover1);
   1084     PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
   1085 
   1086     // Push the last one (if required).
   1087     Bind(&leftover1);
   1088     Tbz(count, 0, &done);
   1089     PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
   1090 
   1091     Bind(&done);
   1092   }
   1093 }
   1094 
   1095 
   1096 void MacroAssembler::PushHelper(int count, int size,
   1097                                 const CPURegister& src0,
   1098                                 const CPURegister& src1,
   1099                                 const CPURegister& src2,
   1100                                 const CPURegister& src3) {
   1101   // Ensure that we don't unintentially modify scratch or debug registers.
   1102   InstructionAccurateScope scope(this);
   1103 
   1104   DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
   1105   DCHECK(size == src0.SizeInBytes());
   1106 
   1107   // When pushing multiple registers, the store order is chosen such that
   1108   // Push(a, b) is equivalent to Push(a) followed by Push(b).
   1109   switch (count) {
   1110     case 1:
   1111       DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
   1112       str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
   1113       break;
   1114     case 2:
   1115       DCHECK(src2.IsNone() && src3.IsNone());
   1116       stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
   1117       break;
   1118     case 3:
   1119       DCHECK(src3.IsNone());
   1120       stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
   1121       str(src0, MemOperand(StackPointer(), 2 * size));
   1122       break;
   1123     case 4:
   1124       // Skip over 4 * size, then fill in the gap. This allows four W registers
   1125       // to be pushed using csp, whilst maintaining 16-byte alignment for csp
   1126       // at all times.
   1127       stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
   1128       stp(src1, src0, MemOperand(StackPointer(), 2 * size));
   1129       break;
   1130     default:
   1131       UNREACHABLE();
   1132   }
   1133 }
   1134 
   1135 
   1136 void MacroAssembler::PopHelper(int count, int size,
   1137                                const CPURegister& dst0,
   1138                                const CPURegister& dst1,
   1139                                const CPURegister& dst2,
   1140                                const CPURegister& dst3) {
   1141   // Ensure that we don't unintentially modify scratch or debug registers.
   1142   InstructionAccurateScope scope(this);
   1143 
   1144   DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
   1145   DCHECK(size == dst0.SizeInBytes());
   1146 
   1147   // When popping multiple registers, the load order is chosen such that
   1148   // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
   1149   switch (count) {
   1150     case 1:
   1151       DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
   1152       ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
   1153       break;
   1154     case 2:
   1155       DCHECK(dst2.IsNone() && dst3.IsNone());
   1156       ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
   1157       break;
   1158     case 3:
   1159       DCHECK(dst3.IsNone());
   1160       ldr(dst2, MemOperand(StackPointer(), 2 * size));
   1161       ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
   1162       break;
   1163     case 4:
   1164       // Load the higher addresses first, then load the lower addresses and
   1165       // skip the whole block in the second instruction. This allows four W
   1166       // registers to be popped using csp, whilst maintaining 16-byte alignment
   1167       // for csp at all times.
   1168       ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
   1169       ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
   1170       break;
   1171     default:
   1172       UNREACHABLE();
   1173   }
   1174 }
   1175 
   1176 
   1177 void MacroAssembler::PushPreamble(Operand total_size) {
   1178   if (csp.Is(StackPointer())) {
   1179     // If the current stack pointer is csp, then it must be aligned to 16 bytes
   1180     // on entry and the total size of the specified registers must also be a
   1181     // multiple of 16 bytes.
   1182     if (total_size.IsImmediate()) {
   1183       DCHECK((total_size.ImmediateValue() % 16) == 0);
   1184     }
   1185 
   1186     // Don't check access size for non-immediate sizes. It's difficult to do
   1187     // well, and it will be caught by hardware (or the simulator) anyway.
   1188   } else {
   1189     // Even if the current stack pointer is not the system stack pointer (csp),
   1190     // the system stack pointer will still be modified in order to comply with
   1191     // ABI rules about accessing memory below the system stack pointer.
   1192     BumpSystemStackPointer(total_size);
   1193   }
   1194 }
   1195 
   1196 
   1197 void MacroAssembler::PopPostamble(Operand total_size) {
   1198   if (csp.Is(StackPointer())) {
   1199     // If the current stack pointer is csp, then it must be aligned to 16 bytes
   1200     // on entry and the total size of the specified registers must also be a
   1201     // multiple of 16 bytes.
   1202     if (total_size.IsImmediate()) {
   1203       DCHECK((total_size.ImmediateValue() % 16) == 0);
   1204     }
   1205 
   1206     // Don't check access size for non-immediate sizes. It's difficult to do
   1207     // well, and it will be caught by hardware (or the simulator) anyway.
   1208   } else if (emit_debug_code()) {
   1209     // It is safe to leave csp where it is when unwinding the JavaScript stack,
   1210     // but if we keep it matching StackPointer, the simulator can detect memory
   1211     // accesses in the now-free part of the stack.
   1212     SyncSystemStackPointer();
   1213   }
   1214 }
   1215 
   1216 
   1217 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
   1218   if (offset.IsImmediate()) {
   1219     DCHECK(offset.ImmediateValue() >= 0);
   1220   } else if (emit_debug_code()) {
   1221     Cmp(xzr, offset);
   1222     Check(le, kStackAccessBelowStackPointer);
   1223   }
   1224 
   1225   Str(src, MemOperand(StackPointer(), offset));
   1226 }
   1227 
   1228 
   1229 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
   1230   if (offset.IsImmediate()) {
   1231     DCHECK(offset.ImmediateValue() >= 0);
   1232   } else if (emit_debug_code()) {
   1233     Cmp(xzr, offset);
   1234     Check(le, kStackAccessBelowStackPointer);
   1235   }
   1236 
   1237   Ldr(dst, MemOperand(StackPointer(), offset));
   1238 }
   1239 
   1240 
   1241 void MacroAssembler::PokePair(const CPURegister& src1,
   1242                               const CPURegister& src2,
   1243                               int offset) {
   1244   DCHECK(AreSameSizeAndType(src1, src2));
   1245   DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
   1246   Stp(src1, src2, MemOperand(StackPointer(), offset));
   1247 }
   1248 
   1249 
   1250 void MacroAssembler::PeekPair(const CPURegister& dst1,
   1251                               const CPURegister& dst2,
   1252                               int offset) {
   1253   DCHECK(AreSameSizeAndType(dst1, dst2));
   1254   DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
   1255   Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
   1256 }
   1257 
   1258 
   1259 void MacroAssembler::PushCalleeSavedRegisters() {
   1260   // Ensure that the macro-assembler doesn't use any scratch registers.
   1261   InstructionAccurateScope scope(this);
   1262 
   1263   // This method must not be called unless the current stack pointer is the
   1264   // system stack pointer (csp).
   1265   DCHECK(csp.Is(StackPointer()));
   1266 
   1267   MemOperand tos(csp, -2 * kXRegSize, PreIndex);
   1268 
   1269   stp(d14, d15, tos);
   1270   stp(d12, d13, tos);
   1271   stp(d10, d11, tos);
   1272   stp(d8, d9, tos);
   1273 
   1274   stp(x29, x30, tos);
   1275   stp(x27, x28, tos);    // x28 = jssp
   1276   stp(x25, x26, tos);
   1277   stp(x23, x24, tos);
   1278   stp(x21, x22, tos);
   1279   stp(x19, x20, tos);
   1280 }
   1281 
   1282 
   1283 void MacroAssembler::PopCalleeSavedRegisters() {
   1284   // Ensure that the macro-assembler doesn't use any scratch registers.
   1285   InstructionAccurateScope scope(this);
   1286 
   1287   // This method must not be called unless the current stack pointer is the
   1288   // system stack pointer (csp).
   1289   DCHECK(csp.Is(StackPointer()));
   1290 
   1291   MemOperand tos(csp, 2 * kXRegSize, PostIndex);
   1292 
   1293   ldp(x19, x20, tos);
   1294   ldp(x21, x22, tos);
   1295   ldp(x23, x24, tos);
   1296   ldp(x25, x26, tos);
   1297   ldp(x27, x28, tos);    // x28 = jssp
   1298   ldp(x29, x30, tos);
   1299 
   1300   ldp(d8, d9, tos);
   1301   ldp(d10, d11, tos);
   1302   ldp(d12, d13, tos);
   1303   ldp(d14, d15, tos);
   1304 }
   1305 
   1306 
   1307 void MacroAssembler::AssertStackConsistency() {
   1308   // Avoid emitting code when !use_real_abort() since non-real aborts cause too
   1309   // much code to be generated.
   1310   if (emit_debug_code() && use_real_aborts()) {
   1311     if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
   1312       // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true.  We
   1313       // can't check the alignment of csp without using a scratch register (or
   1314       // clobbering the flags), but the processor (or simulator) will abort if
   1315       // it is not properly aligned during a load.
   1316       ldr(xzr, MemOperand(csp, 0));
   1317     }
   1318     if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
   1319       Label ok;
   1320       // Check that csp <= StackPointer(), preserving all registers and NZCV.
   1321       sub(StackPointer(), csp, StackPointer());
   1322       cbz(StackPointer(), &ok);                 // Ok if csp == StackPointer().
   1323       tbnz(StackPointer(), kXSignBit, &ok);     // Ok if csp < StackPointer().
   1324 
   1325       // Avoid generating AssertStackConsistency checks for the Push in Abort.
   1326       { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
   1327         Abort(kTheCurrentStackPointerIsBelowCsp);
   1328       }
   1329 
   1330       bind(&ok);
   1331       // Restore StackPointer().
   1332       sub(StackPointer(), csp, StackPointer());
   1333     }
   1334   }
   1335 }
   1336 
   1337 
   1338 void MacroAssembler::AssertFPCRState(Register fpcr) {
   1339   if (emit_debug_code()) {
   1340     Label unexpected_mode, done;
   1341     UseScratchRegisterScope temps(this);
   1342     if (fpcr.IsNone()) {
   1343       fpcr = temps.AcquireX();
   1344       Mrs(fpcr, FPCR);
   1345     }
   1346 
   1347     // Settings overridden by ConfiugreFPCR():
   1348     //   - Assert that default-NaN mode is set.
   1349     Tbz(fpcr, DN_offset, &unexpected_mode);
   1350 
   1351     // Settings left to their default values:
   1352     //   - Assert that flush-to-zero is not set.
   1353     Tbnz(fpcr, FZ_offset, &unexpected_mode);
   1354     //   - Assert that the rounding mode is nearest-with-ties-to-even.
   1355     STATIC_ASSERT(FPTieEven == 0);
   1356     Tst(fpcr, RMode_mask);
   1357     B(eq, &done);
   1358 
   1359     Bind(&unexpected_mode);
   1360     Abort(kUnexpectedFPCRMode);
   1361 
   1362     Bind(&done);
   1363   }
   1364 }
   1365 
   1366 
   1367 void MacroAssembler::ConfigureFPCR() {
   1368   UseScratchRegisterScope temps(this);
   1369   Register fpcr = temps.AcquireX();
   1370   Mrs(fpcr, FPCR);
   1371 
   1372   // If necessary, enable default-NaN mode. The default values of the other FPCR
   1373   // options should be suitable, and AssertFPCRState will verify that.
   1374   Label no_write_required;
   1375   Tbnz(fpcr, DN_offset, &no_write_required);
   1376 
   1377   Orr(fpcr, fpcr, DN_mask);
   1378   Msr(FPCR, fpcr);
   1379 
   1380   Bind(&no_write_required);
   1381   AssertFPCRState(fpcr);
   1382 }
   1383 
   1384 
   1385 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
   1386                                      const FPRegister& src) {
   1387   AssertFPCRState();
   1388 
   1389   // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
   1390   // for NaNs, which become the default NaN. We use fsub rather than fadd
   1391   // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
   1392   Fsub(dst, src, fp_zero);
   1393 }
   1394 
   1395 
   1396 void MacroAssembler::LoadRoot(CPURegister destination,
   1397                               Heap::RootListIndex index) {
   1398   // TODO(jbramley): Most root values are constants, and can be synthesized
   1399   // without a load. Refer to the ARM back end for details.
   1400   Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
   1401 }
   1402 
   1403 
   1404 void MacroAssembler::StoreRoot(Register source,
   1405                                Heap::RootListIndex index) {
   1406   Str(source, MemOperand(root, index << kPointerSizeLog2));
   1407 }
   1408 
   1409 
   1410 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
   1411                                         Register false_root) {
   1412   STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
   1413   Ldp(true_root, false_root,
   1414       MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
   1415 }
   1416 
   1417 
   1418 void MacroAssembler::LoadHeapObject(Register result,
   1419                                     Handle<HeapObject> object) {
   1420   AllowDeferredHandleDereference using_raw_address;
   1421   if (isolate()->heap()->InNewSpace(*object)) {
   1422     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   1423     Mov(result, Operand(cell));
   1424     Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
   1425   } else {
   1426     Mov(result, Operand(object));
   1427   }
   1428 }
   1429 
   1430 
   1431 void MacroAssembler::LoadInstanceDescriptors(Register map,
   1432                                              Register descriptors) {
   1433   Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
   1434 }
   1435 
   1436 
   1437 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
   1438   Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
   1439   DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
   1440 }
   1441 
   1442 
   1443 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
   1444   STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
   1445   Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
   1446   And(dst, dst, Map::EnumLengthBits::kMask);
   1447 }
   1448 
   1449 
   1450 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
   1451   EnumLengthUntagged(dst, map);
   1452   SmiTag(dst, dst);
   1453 }
   1454 
   1455 
   1456 void MacroAssembler::CheckEnumCache(Register object,
   1457                                     Register null_value,
   1458                                     Register scratch0,
   1459                                     Register scratch1,
   1460                                     Register scratch2,
   1461                                     Register scratch3,
   1462                                     Label* call_runtime) {
   1463   DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
   1464                      scratch3));
   1465 
   1466   Register empty_fixed_array_value = scratch0;
   1467   Register current_object = scratch1;
   1468 
   1469   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   1470   Label next, start;
   1471 
   1472   Mov(current_object, object);
   1473 
   1474   // Check if the enum length field is properly initialized, indicating that
   1475   // there is an enum cache.
   1476   Register map = scratch2;
   1477   Register enum_length = scratch3;
   1478   Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
   1479 
   1480   EnumLengthUntagged(enum_length, map);
   1481   Cmp(enum_length, kInvalidEnumCacheSentinel);
   1482   B(eq, call_runtime);
   1483 
   1484   B(&start);
   1485 
   1486   Bind(&next);
   1487   Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
   1488 
   1489   // For all objects but the receiver, check that the cache is empty.
   1490   EnumLengthUntagged(enum_length, map);
   1491   Cbnz(enum_length, call_runtime);
   1492 
   1493   Bind(&start);
   1494 
   1495   // Check that there are no elements. Register current_object contains the
   1496   // current JS object we've reached through the prototype chain.
   1497   Label no_elements;
   1498   Ldr(current_object, FieldMemOperand(current_object,
   1499                                       JSObject::kElementsOffset));
   1500   Cmp(current_object, empty_fixed_array_value);
   1501   B(eq, &no_elements);
   1502 
   1503   // Second chance, the object may be using the empty slow element dictionary.
   1504   CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
   1505   B(ne, call_runtime);
   1506 
   1507   Bind(&no_elements);
   1508   Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
   1509   Cmp(current_object, null_value);
   1510   B(ne, &next);
   1511 }
   1512 
   1513 
   1514 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
   1515                                                      Register scratch1,
   1516                                                      Register scratch2,
   1517                                                      Label* no_memento_found) {
   1518   ExternalReference new_space_start =
   1519       ExternalReference::new_space_start(isolate());
   1520   ExternalReference new_space_allocation_top =
   1521       ExternalReference::new_space_allocation_top_address(isolate());
   1522 
   1523   Add(scratch1, receiver,
   1524       JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
   1525   Cmp(scratch1, new_space_start);
   1526   B(lt, no_memento_found);
   1527 
   1528   Mov(scratch2, new_space_allocation_top);
   1529   Ldr(scratch2, MemOperand(scratch2));
   1530   Cmp(scratch1, scratch2);
   1531   B(gt, no_memento_found);
   1532 
   1533   Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
   1534   Cmp(scratch1,
   1535       Operand(isolate()->factory()->allocation_memento_map()));
   1536 }
   1537 
   1538 
   1539 void MacroAssembler::JumpToHandlerEntry(Register exception,
   1540                                         Register object,
   1541                                         Register state,
   1542                                         Register scratch1,
   1543                                         Register scratch2) {
   1544   // Handler expects argument in x0.
   1545   DCHECK(exception.Is(x0));
   1546 
   1547   // Compute the handler entry address and jump to it. The handler table is
   1548   // a fixed array of (smi-tagged) code offsets.
   1549   Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
   1550   Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
   1551   STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
   1552   Lsr(scratch2, state, StackHandler::kKindWidth);
   1553   Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
   1554   Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
   1555   Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
   1556   Br(scratch1);
   1557 }
   1558 
   1559 
   1560 void MacroAssembler::InNewSpace(Register object,
   1561                                 Condition cond,
   1562                                 Label* branch) {
   1563   DCHECK(cond == eq || cond == ne);
   1564   UseScratchRegisterScope temps(this);
   1565   Register temp = temps.AcquireX();
   1566   And(temp, object, ExternalReference::new_space_mask(isolate()));
   1567   Cmp(temp, ExternalReference::new_space_start(isolate()));
   1568   B(cond, branch);
   1569 }
   1570 
   1571 
   1572 void MacroAssembler::Throw(Register value,
   1573                            Register scratch1,
   1574                            Register scratch2,
   1575                            Register scratch3,
   1576                            Register scratch4) {
   1577   // Adjust this code if not the case.
   1578   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   1579   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   1580   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   1581   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   1582   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   1583   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   1584 
   1585   // The handler expects the exception in x0.
   1586   DCHECK(value.Is(x0));
   1587 
   1588   // Drop the stack pointer to the top of the top handler.
   1589   DCHECK(jssp.Is(StackPointer()));
   1590   Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
   1591                                           isolate())));
   1592   Ldr(jssp, MemOperand(scratch1));
   1593   // Restore the next handler.
   1594   Pop(scratch2);
   1595   Str(scratch2, MemOperand(scratch1));
   1596 
   1597   // Get the code object and state.  Restore the context and frame pointer.
   1598   Register object = scratch1;
   1599   Register state = scratch2;
   1600   Pop(object, state, cp, fp);
   1601 
   1602   // If the handler is a JS frame, restore the context to the frame.
   1603   // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
   1604   // or cp.
   1605   Label not_js_frame;
   1606   Cbz(cp, &not_js_frame);
   1607   Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   1608   Bind(&not_js_frame);
   1609 
   1610   JumpToHandlerEntry(value, object, state, scratch3, scratch4);
   1611 }
   1612 
   1613 
   1614 void MacroAssembler::ThrowUncatchable(Register value,
   1615                                       Register scratch1,
   1616                                       Register scratch2,
   1617                                       Register scratch3,
   1618                                       Register scratch4) {
   1619   // Adjust this code if not the case.
   1620   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   1621   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
   1622   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   1623   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   1624   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   1625   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   1626 
   1627   // The handler expects the exception in x0.
   1628   DCHECK(value.Is(x0));
   1629 
   1630   // Drop the stack pointer to the top of the top stack handler.
   1631   DCHECK(jssp.Is(StackPointer()));
   1632   Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
   1633                                           isolate())));
   1634   Ldr(jssp, MemOperand(scratch1));
   1635 
   1636   // Unwind the handlers until the ENTRY handler is found.
   1637   Label fetch_next, check_kind;
   1638   B(&check_kind);
   1639   Bind(&fetch_next);
   1640   Peek(jssp, StackHandlerConstants::kNextOffset);
   1641 
   1642   Bind(&check_kind);
   1643   STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
   1644   Peek(scratch2, StackHandlerConstants::kStateOffset);
   1645   TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
   1646 
   1647   // Set the top handler address to next handler past the top ENTRY handler.
   1648   Pop(scratch2);
   1649   Str(scratch2, MemOperand(scratch1));
   1650 
   1651   // Get the code object and state.  Clear the context and frame pointer (0 was
   1652   // saved in the handler).
   1653   Register object = scratch1;
   1654   Register state = scratch2;
   1655   Pop(object, state, cp, fp);
   1656 
   1657   JumpToHandlerEntry(value, object, state, scratch3, scratch4);
   1658 }
   1659 
   1660 
   1661 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
   1662   if (emit_debug_code()) {
   1663     STATIC_ASSERT(kSmiTag == 0);
   1664     Tst(object, kSmiTagMask);
   1665     Check(eq, reason);
   1666   }
   1667 }
   1668 
   1669 
   1670 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
   1671   if (emit_debug_code()) {
   1672     STATIC_ASSERT(kSmiTag == 0);
   1673     Tst(object, kSmiTagMask);
   1674     Check(ne, reason);
   1675   }
   1676 }
   1677 
   1678 
   1679 void MacroAssembler::AssertName(Register object) {
   1680   if (emit_debug_code()) {
   1681     AssertNotSmi(object, kOperandIsASmiAndNotAName);
   1682 
   1683     UseScratchRegisterScope temps(this);
   1684     Register temp = temps.AcquireX();
   1685 
   1686     Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   1687     CompareInstanceType(temp, temp, LAST_NAME_TYPE);
   1688     Check(ls, kOperandIsNotAName);
   1689   }
   1690 }
   1691 
   1692 
   1693 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
   1694                                                      Register scratch) {
   1695   if (emit_debug_code()) {
   1696     Label done_checking;
   1697     AssertNotSmi(object);
   1698     JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
   1699     Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   1700     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
   1701     Assert(eq, kExpectedUndefinedOrCell);
   1702     Bind(&done_checking);
   1703   }
   1704 }
   1705 
   1706 
   1707 void MacroAssembler::AssertString(Register object) {
   1708   if (emit_debug_code()) {
   1709     UseScratchRegisterScope temps(this);
   1710     Register temp = temps.AcquireX();
   1711     STATIC_ASSERT(kSmiTag == 0);
   1712     Tst(object, kSmiTagMask);
   1713     Check(ne, kOperandIsASmiAndNotAString);
   1714     Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   1715     CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
   1716     Check(lo, kOperandIsNotAString);
   1717   }
   1718 }
   1719 
   1720 
   1721 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
   1722   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   1723   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
   1724 }
   1725 
   1726 
   1727 void MacroAssembler::TailCallStub(CodeStub* stub) {
   1728   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
   1729 }
   1730 
   1731 
   1732 void MacroAssembler::CallRuntime(const Runtime::Function* f,
   1733                                  int num_arguments,
   1734                                  SaveFPRegsMode save_doubles) {
   1735   // All arguments must be on the stack before this function is called.
   1736   // x0 holds the return value after the call.
   1737 
   1738   // Check that the number of arguments matches what the function expects.
   1739   // If f->nargs is -1, the function can accept a variable number of arguments.
   1740   CHECK(f->nargs < 0 || f->nargs == num_arguments);
   1741 
   1742   // Place the necessary arguments.
   1743   Mov(x0, num_arguments);
   1744   Mov(x1, ExternalReference(f, isolate()));
   1745 
   1746   CEntryStub stub(isolate(), 1, save_doubles);
   1747   CallStub(&stub);
   1748 }
   1749 
   1750 
   1751 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   1752   return ref0.address() - ref1.address();
   1753 }
   1754 
   1755 
   1756 void MacroAssembler::CallApiFunctionAndReturn(
   1757     Register function_address,
   1758     ExternalReference thunk_ref,
   1759     int stack_space,
   1760     int spill_offset,
   1761     MemOperand return_value_operand,
   1762     MemOperand* context_restore_operand) {
   1763   ASM_LOCATION("CallApiFunctionAndReturn");
   1764   ExternalReference next_address =
   1765       ExternalReference::handle_scope_next_address(isolate());
   1766   const int kNextOffset = 0;
   1767   const int kLimitOffset = AddressOffset(
   1768       ExternalReference::handle_scope_limit_address(isolate()),
   1769       next_address);
   1770   const int kLevelOffset = AddressOffset(
   1771       ExternalReference::handle_scope_level_address(isolate()),
   1772       next_address);
   1773 
   1774   DCHECK(function_address.is(x1) || function_address.is(x2));
   1775 
   1776   Label profiler_disabled;
   1777   Label end_profiler_check;
   1778   Mov(x10, ExternalReference::is_profiling_address(isolate()));
   1779   Ldrb(w10, MemOperand(x10));
   1780   Cbz(w10, &profiler_disabled);
   1781   Mov(x3, thunk_ref);
   1782   B(&end_profiler_check);
   1783 
   1784   Bind(&profiler_disabled);
   1785   Mov(x3, function_address);
   1786   Bind(&end_profiler_check);
   1787 
   1788   // Save the callee-save registers we are going to use.
   1789   // TODO(all): Is this necessary? ARM doesn't do it.
   1790   STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
   1791   Poke(x19, (spill_offset + 0) * kXRegSize);
   1792   Poke(x20, (spill_offset + 1) * kXRegSize);
   1793   Poke(x21, (spill_offset + 2) * kXRegSize);
   1794   Poke(x22, (spill_offset + 3) * kXRegSize);
   1795 
   1796   // Allocate HandleScope in callee-save registers.
   1797   // We will need to restore the HandleScope after the call to the API function,
   1798   // by allocating it in callee-save registers they will be preserved by C code.
   1799   Register handle_scope_base = x22;
   1800   Register next_address_reg = x19;
   1801   Register limit_reg = x20;
   1802   Register level_reg = w21;
   1803 
   1804   Mov(handle_scope_base, next_address);
   1805   Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
   1806   Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
   1807   Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
   1808   Add(level_reg, level_reg, 1);
   1809   Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
   1810 
   1811   if (FLAG_log_timer_events) {
   1812     FrameScope frame(this, StackFrame::MANUAL);
   1813     PushSafepointRegisters();
   1814     Mov(x0, ExternalReference::isolate_address(isolate()));
   1815     CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
   1816     PopSafepointRegisters();
   1817   }
   1818 
   1819   // Native call returns to the DirectCEntry stub which redirects to the
   1820   // return address pushed on stack (could have moved after GC).
   1821   // DirectCEntry stub itself is generated early and never moves.
   1822   DirectCEntryStub stub(isolate());
   1823   stub.GenerateCall(this, x3);
   1824 
   1825   if (FLAG_log_timer_events) {
   1826     FrameScope frame(this, StackFrame::MANUAL);
   1827     PushSafepointRegisters();
   1828     Mov(x0, ExternalReference::isolate_address(isolate()));
   1829     CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
   1830     PopSafepointRegisters();
   1831   }
   1832 
   1833   Label promote_scheduled_exception;
   1834   Label exception_handled;
   1835   Label delete_allocated_handles;
   1836   Label leave_exit_frame;
   1837   Label return_value_loaded;
   1838 
   1839   // Load value from ReturnValue.
   1840   Ldr(x0, return_value_operand);
   1841   Bind(&return_value_loaded);
   1842   // No more valid handles (the result handle was the last one). Restore
   1843   // previous handle scope.
   1844   Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
   1845   if (emit_debug_code()) {
   1846     Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
   1847     Cmp(w1, level_reg);
   1848     Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
   1849   }
   1850   Sub(level_reg, level_reg, 1);
   1851   Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
   1852   Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
   1853   Cmp(limit_reg, x1);
   1854   B(ne, &delete_allocated_handles);
   1855 
   1856   Bind(&leave_exit_frame);
   1857   // Restore callee-saved registers.
   1858   Peek(x19, (spill_offset + 0) * kXRegSize);
   1859   Peek(x20, (spill_offset + 1) * kXRegSize);
   1860   Peek(x21, (spill_offset + 2) * kXRegSize);
   1861   Peek(x22, (spill_offset + 3) * kXRegSize);
   1862 
   1863   // Check if the function scheduled an exception.
   1864   Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
   1865   Ldr(x5, MemOperand(x5));
   1866   JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
   1867   Bind(&exception_handled);
   1868 
   1869   bool restore_context = context_restore_operand != NULL;
   1870   if (restore_context) {
   1871     Ldr(cp, *context_restore_operand);
   1872   }
   1873 
   1874   LeaveExitFrame(false, x1, !restore_context);
   1875   Drop(stack_space);
   1876   Ret();
   1877 
   1878   Bind(&promote_scheduled_exception);
   1879   {
   1880     FrameScope frame(this, StackFrame::INTERNAL);
   1881     CallExternalReference(
   1882         ExternalReference(
   1883             Runtime::kPromoteScheduledException, isolate()), 0);
   1884   }
   1885   B(&exception_handled);
   1886 
   1887   // HandleScope limit has changed. Delete allocated extensions.
   1888   Bind(&delete_allocated_handles);
   1889   Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
   1890   // Save the return value in a callee-save register.
   1891   Register saved_result = x19;
   1892   Mov(saved_result, x0);
   1893   Mov(x0, ExternalReference::isolate_address(isolate()));
   1894   CallCFunction(
   1895       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   1896   Mov(x0, saved_result);
   1897   B(&leave_exit_frame);
   1898 }
   1899 
   1900 
   1901 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
   1902                                            int num_arguments) {
   1903   Mov(x0, num_arguments);
   1904   Mov(x1, ext);
   1905 
   1906   CEntryStub stub(isolate(), 1);
   1907   CallStub(&stub);
   1908 }
   1909 
   1910 
   1911 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
   1912   Mov(x1, builtin);
   1913   CEntryStub stub(isolate(), 1);
   1914   Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
   1915 }
   1916 
   1917 
   1918 void MacroAssembler::GetBuiltinFunction(Register target,
   1919                                         Builtins::JavaScript id) {
   1920   // Load the builtins object into target register.
   1921   Ldr(target, GlobalObjectMemOperand());
   1922   Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
   1923   // Load the JavaScript builtin function from the builtins object.
   1924   Ldr(target, FieldMemOperand(target,
   1925                           JSBuiltinsObject::OffsetOfFunctionWithId(id)));
   1926 }
   1927 
   1928 
   1929 void MacroAssembler::GetBuiltinEntry(Register target,
   1930                                      Register function,
   1931                                      Builtins::JavaScript id) {
   1932   DCHECK(!AreAliased(target, function));
   1933   GetBuiltinFunction(function, id);
   1934   // Load the code entry point from the builtins object.
   1935   Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   1936 }
   1937 
   1938 
   1939 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
   1940                                    InvokeFlag flag,
   1941                                    const CallWrapper& call_wrapper) {
   1942   ASM_LOCATION("MacroAssembler::InvokeBuiltin");
   1943   // You can't call a builtin without a valid frame.
   1944   DCHECK(flag == JUMP_FUNCTION || has_frame());
   1945 
   1946   // Get the builtin entry in x2 and setup the function object in x1.
   1947   GetBuiltinEntry(x2, x1, id);
   1948   if (flag == CALL_FUNCTION) {
   1949     call_wrapper.BeforeCall(CallSize(x2));
   1950     Call(x2);
   1951     call_wrapper.AfterCall();
   1952   } else {
   1953     DCHECK(flag == JUMP_FUNCTION);
   1954     Jump(x2);
   1955   }
   1956 }
   1957 
   1958 
   1959 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
   1960                                                int num_arguments,
   1961                                                int result_size) {
   1962   // TODO(1236192): Most runtime routines don't need the number of
   1963   // arguments passed in because it is constant. At some point we
   1964   // should remove this need and make the runtime routine entry code
   1965   // smarter.
   1966   Mov(x0, num_arguments);
   1967   JumpToExternalReference(ext);
   1968 }
   1969 
   1970 
   1971 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
   1972                                      int num_arguments,
   1973                                      int result_size) {
   1974   TailCallExternalReference(ExternalReference(fid, isolate()),
   1975                             num_arguments,
   1976                             result_size);
   1977 }
   1978 
   1979 
   1980 void MacroAssembler::InitializeNewString(Register string,
   1981                                          Register length,
   1982                                          Heap::RootListIndex map_index,
   1983                                          Register scratch1,
   1984                                          Register scratch2) {
   1985   DCHECK(!AreAliased(string, length, scratch1, scratch2));
   1986   LoadRoot(scratch2, map_index);
   1987   SmiTag(scratch1, length);
   1988   Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
   1989 
   1990   Mov(scratch2, String::kEmptyHashField);
   1991   Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
   1992   Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
   1993 }
   1994 
   1995 
   1996 int MacroAssembler::ActivationFrameAlignment() {
   1997 #if V8_HOST_ARCH_ARM64
   1998   // Running on the real platform. Use the alignment as mandated by the local
   1999   // environment.
   2000   // Note: This will break if we ever start generating snapshots on one ARM
   2001   // platform for another ARM platform with a different alignment.
   2002   return base::OS::ActivationFrameAlignment();
   2003 #else  // V8_HOST_ARCH_ARM64
   2004   // If we are using the simulator then we should always align to the expected
   2005   // alignment. As the simulator is used to generate snapshots we do not know
   2006   // if the target platform will need alignment, so this is controlled from a
   2007   // flag.
   2008   return FLAG_sim_stack_alignment;
   2009 #endif  // V8_HOST_ARCH_ARM64
   2010 }
   2011 
   2012 
   2013 void MacroAssembler::CallCFunction(ExternalReference function,
   2014                                    int num_of_reg_args) {
   2015   CallCFunction(function, num_of_reg_args, 0);
   2016 }
   2017 
   2018 
   2019 void MacroAssembler::CallCFunction(ExternalReference function,
   2020                                    int num_of_reg_args,
   2021                                    int num_of_double_args) {
   2022   UseScratchRegisterScope temps(this);
   2023   Register temp = temps.AcquireX();
   2024   Mov(temp, function);
   2025   CallCFunction(temp, num_of_reg_args, num_of_double_args);
   2026 }
   2027 
   2028 
   2029 void MacroAssembler::CallCFunction(Register function,
   2030                                    int num_of_reg_args,
   2031                                    int num_of_double_args) {
   2032   DCHECK(has_frame());
   2033   // We can pass 8 integer arguments in registers. If we need to pass more than
   2034   // that, we'll need to implement support for passing them on the stack.
   2035   DCHECK(num_of_reg_args <= 8);
   2036 
   2037   // If we're passing doubles, we're limited to the following prototypes
   2038   // (defined by ExternalReference::Type):
   2039   //  BUILTIN_COMPARE_CALL:  int f(double, double)
   2040   //  BUILTIN_FP_FP_CALL:    double f(double, double)
   2041   //  BUILTIN_FP_CALL:       double f(double)
   2042   //  BUILTIN_FP_INT_CALL:   double f(double, int)
   2043   if (num_of_double_args > 0) {
   2044     DCHECK(num_of_reg_args <= 1);
   2045     DCHECK((num_of_double_args + num_of_reg_args) <= 2);
   2046   }
   2047 
   2048 
   2049   // If the stack pointer is not csp, we need to derive an aligned csp from the
   2050   // current stack pointer.
   2051   const Register old_stack_pointer = StackPointer();
   2052   if (!csp.Is(old_stack_pointer)) {
   2053     AssertStackConsistency();
   2054 
   2055     int sp_alignment = ActivationFrameAlignment();
   2056     // The ABI mandates at least 16-byte alignment.
   2057     DCHECK(sp_alignment >= 16);
   2058     DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
   2059 
   2060     // The current stack pointer is a callee saved register, and is preserved
   2061     // across the call.
   2062     DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
   2063 
   2064     // Align and synchronize the system stack pointer with jssp.
   2065     Bic(csp, old_stack_pointer, sp_alignment - 1);
   2066     SetStackPointer(csp);
   2067   }
   2068 
   2069   // Call directly. The function called cannot cause a GC, or allow preemption,
   2070   // so the return address in the link register stays correct.
   2071   Call(function);
   2072 
   2073   if (!csp.Is(old_stack_pointer)) {
   2074     if (emit_debug_code()) {
   2075       // Because the stack pointer must be aligned on a 16-byte boundary, the
   2076       // aligned csp can be up to 12 bytes below the jssp. This is the case
   2077       // where we only pushed one W register on top of an aligned jssp.
   2078       UseScratchRegisterScope temps(this);
   2079       Register temp = temps.AcquireX();
   2080       DCHECK(ActivationFrameAlignment() == 16);
   2081       Sub(temp, csp, old_stack_pointer);
   2082       // We want temp <= 0 && temp >= -12.
   2083       Cmp(temp, 0);
   2084       Ccmp(temp, -12, NFlag, le);
   2085       Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
   2086     }
   2087     SetStackPointer(old_stack_pointer);
   2088   }
   2089 }
   2090 
   2091 
   2092 void MacroAssembler::Jump(Register target) {
   2093   Br(target);
   2094 }
   2095 
   2096 
   2097 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
   2098   UseScratchRegisterScope temps(this);
   2099   Register temp = temps.AcquireX();
   2100   Mov(temp, Operand(target, rmode));
   2101   Br(temp);
   2102 }
   2103 
   2104 
   2105 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
   2106   DCHECK(!RelocInfo::IsCodeTarget(rmode));
   2107   Jump(reinterpret_cast<intptr_t>(target), rmode);
   2108 }
   2109 
   2110 
   2111 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
   2112   DCHECK(RelocInfo::IsCodeTarget(rmode));
   2113   AllowDeferredHandleDereference embedding_raw_address;
   2114   Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
   2115 }
   2116 
   2117 
   2118 void MacroAssembler::Call(Register target) {
   2119   BlockPoolsScope scope(this);
   2120 #ifdef DEBUG
   2121   Label start_call;
   2122   Bind(&start_call);
   2123 #endif
   2124 
   2125   Blr(target);
   2126 
   2127 #ifdef DEBUG
   2128   AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
   2129 #endif
   2130 }
   2131 
   2132 
   2133 void MacroAssembler::Call(Label* target) {
   2134   BlockPoolsScope scope(this);
   2135 #ifdef DEBUG
   2136   Label start_call;
   2137   Bind(&start_call);
   2138 #endif
   2139 
   2140   Bl(target);
   2141 
   2142 #ifdef DEBUG
   2143   AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
   2144 #endif
   2145 }
   2146 
   2147 
   2148 // MacroAssembler::CallSize is sensitive to changes in this function, as it
   2149 // requires to know how many instructions are used to branch to the target.
   2150 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
   2151   BlockPoolsScope scope(this);
   2152 #ifdef DEBUG
   2153   Label start_call;
   2154   Bind(&start_call);
   2155 #endif
   2156   // Statement positions are expected to be recorded when the target
   2157   // address is loaded.
   2158   positions_recorder()->WriteRecordedPositions();
   2159 
   2160   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
   2161   DCHECK(rmode != RelocInfo::NONE32);
   2162 
   2163   UseScratchRegisterScope temps(this);
   2164   Register temp = temps.AcquireX();
   2165 
   2166   if (rmode == RelocInfo::NONE64) {
   2167     // Addresses are 48 bits so we never need to load the upper 16 bits.
   2168     uint64_t imm = reinterpret_cast<uint64_t>(target);
   2169     // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
   2170     DCHECK(((imm >> 48) & 0xffff) == 0);
   2171     movz(temp, (imm >> 0) & 0xffff, 0);
   2172     movk(temp, (imm >> 16) & 0xffff, 16);
   2173     movk(temp, (imm >> 32) & 0xffff, 32);
   2174   } else {
   2175     Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
   2176   }
   2177   Blr(temp);
   2178 #ifdef DEBUG
   2179   AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
   2180 #endif
   2181 }
   2182 
   2183 
   2184 void MacroAssembler::Call(Handle<Code> code,
   2185                           RelocInfo::Mode rmode,
   2186                           TypeFeedbackId ast_id) {
   2187 #ifdef DEBUG
   2188   Label start_call;
   2189   Bind(&start_call);
   2190 #endif
   2191 
   2192   if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
   2193     SetRecordedAstId(ast_id);
   2194     rmode = RelocInfo::CODE_TARGET_WITH_ID;
   2195   }
   2196 
   2197   AllowDeferredHandleDereference embedding_raw_address;
   2198   Call(reinterpret_cast<Address>(code.location()), rmode);
   2199 
   2200 #ifdef DEBUG
   2201   // Check the size of the code generated.
   2202   AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
   2203 #endif
   2204 }
   2205 
   2206 
   2207 int MacroAssembler::CallSize(Register target) {
   2208   USE(target);
   2209   return kInstructionSize;
   2210 }
   2211 
   2212 
   2213 int MacroAssembler::CallSize(Label* target) {
   2214   USE(target);
   2215   return kInstructionSize;
   2216 }
   2217 
   2218 
   2219 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
   2220   USE(target);
   2221 
   2222   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
   2223   DCHECK(rmode != RelocInfo::NONE32);
   2224 
   2225   if (rmode == RelocInfo::NONE64) {
   2226     return kCallSizeWithoutRelocation;
   2227   } else {
   2228     return kCallSizeWithRelocation;
   2229   }
   2230 }
   2231 
   2232 
   2233 int MacroAssembler::CallSize(Handle<Code> code,
   2234                              RelocInfo::Mode rmode,
   2235                              TypeFeedbackId ast_id) {
   2236   USE(code);
   2237   USE(ast_id);
   2238 
   2239   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
   2240   DCHECK(rmode != RelocInfo::NONE32);
   2241 
   2242   if (rmode == RelocInfo::NONE64) {
   2243     return kCallSizeWithoutRelocation;
   2244   } else {
   2245     return kCallSizeWithRelocation;
   2246   }
   2247 }
   2248 
   2249 
   2250 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
   2251                                       SmiCheckType smi_check_type) {
   2252   Label on_not_heap_number;
   2253 
   2254   if (smi_check_type == DO_SMI_CHECK) {
   2255     JumpIfSmi(object, &on_not_heap_number);
   2256   }
   2257 
   2258   AssertNotSmi(object);
   2259 
   2260   UseScratchRegisterScope temps(this);
   2261   Register temp = temps.AcquireX();
   2262   Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   2263   JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
   2264 
   2265   Bind(&on_not_heap_number);
   2266 }
   2267 
   2268 
   2269 void MacroAssembler::JumpIfNotHeapNumber(Register object,
   2270                                          Label* on_not_heap_number,
   2271                                          SmiCheckType smi_check_type) {
   2272   if (smi_check_type == DO_SMI_CHECK) {
   2273     JumpIfSmi(object, on_not_heap_number);
   2274   }
   2275 
   2276   AssertNotSmi(object);
   2277 
   2278   UseScratchRegisterScope temps(this);
   2279   Register temp = temps.AcquireX();
   2280   Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   2281   JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
   2282 }
   2283 
   2284 
   2285 void MacroAssembler::LookupNumberStringCache(Register object,
   2286                                              Register result,
   2287                                              Register scratch1,
   2288                                              Register scratch2,
   2289                                              Register scratch3,
   2290                                              Label* not_found) {
   2291   DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
   2292 
   2293   // Use of registers. Register result is used as a temporary.
   2294   Register number_string_cache = result;
   2295   Register mask = scratch3;
   2296 
   2297   // Load the number string cache.
   2298   LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
   2299 
   2300   // Make the hash mask from the length of the number string cache. It
   2301   // contains two elements (number and string) for each cache entry.
   2302   Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
   2303                                       FixedArray::kLengthOffset));
   2304   Asr(mask, mask, 1);  // Divide length by two.
   2305   Sub(mask, mask, 1);  // Make mask.
   2306 
   2307   // Calculate the entry in the number string cache. The hash value in the
   2308   // number string cache for smis is just the smi value, and the hash for
   2309   // doubles is the xor of the upper and lower words. See
   2310   // Heap::GetNumberStringCache.
   2311   Label is_smi;
   2312   Label load_result_from_cache;
   2313 
   2314   JumpIfSmi(object, &is_smi);
   2315   JumpIfNotHeapNumber(object, not_found);
   2316 
   2317   STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
   2318   Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
   2319   Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
   2320   Eor(scratch1, scratch1, scratch2);
   2321   And(scratch1, scratch1, mask);
   2322 
   2323   // Calculate address of entry in string cache: each entry consists of two
   2324   // pointer sized fields.
   2325   Add(scratch1, number_string_cache,
   2326       Operand(scratch1, LSL, kPointerSizeLog2 + 1));
   2327 
   2328   Register probe = mask;
   2329   Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   2330   JumpIfSmi(probe, not_found);
   2331   Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
   2332   Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
   2333   Fcmp(d0, d1);
   2334   B(ne, not_found);
   2335   B(&load_result_from_cache);
   2336 
   2337   Bind(&is_smi);
   2338   Register scratch = scratch1;
   2339   And(scratch, mask, Operand::UntagSmi(object));
   2340   // Calculate address of entry in string cache: each entry consists
   2341   // of two pointer sized fields.
   2342   Add(scratch, number_string_cache,
   2343       Operand(scratch, LSL, kPointerSizeLog2 + 1));
   2344 
   2345   // Check if the entry is the smi we are looking for.
   2346   Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
   2347   Cmp(object, probe);
   2348   B(ne, not_found);
   2349 
   2350   // Get the result from the cache.
   2351   Bind(&load_result_from_cache);
   2352   Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
   2353   IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
   2354                    scratch1, scratch2);
   2355 }
   2356 
   2357 
   2358 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
   2359                                              FPRegister value,
   2360                                              FPRegister scratch_d,
   2361                                              Label* on_successful_conversion,
   2362                                              Label* on_failed_conversion) {
   2363   // Convert to an int and back again, then compare with the original value.
   2364   Fcvtzs(as_int, value);
   2365   Scvtf(scratch_d, as_int);
   2366   Fcmp(value, scratch_d);
   2367 
   2368   if (on_successful_conversion) {
   2369     B(on_successful_conversion, eq);
   2370   }
   2371   if (on_failed_conversion) {
   2372     B(on_failed_conversion, ne);
   2373   }
   2374 }
   2375 
   2376 
   2377 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
   2378   UseScratchRegisterScope temps(this);
   2379   Register temp = temps.AcquireX();
   2380   // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
   2381   // cause overflow.
   2382   Fmov(temp, input);
   2383   Cmp(temp, 1);
   2384 }
   2385 
   2386 
   2387 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
   2388                                      Label* on_negative_zero) {
   2389   TestForMinusZero(input);
   2390   B(vs, on_negative_zero);
   2391 }
   2392 
   2393 
   2394 void MacroAssembler::JumpIfMinusZero(Register input,
   2395                                      Label* on_negative_zero) {
   2396   DCHECK(input.Is64Bits());
   2397   // Floating point value is in an integer register. Detect -0.0 by subtracting
   2398   // 1 (cmp), which will cause overflow.
   2399   Cmp(input, 1);
   2400   B(vs, on_negative_zero);
   2401 }
   2402 
   2403 
   2404 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
   2405   // Clamp the value to [0..255].
   2406   Cmp(input.W(), Operand(input.W(), UXTB));
   2407   // If input < input & 0xff, it must be < 0, so saturate to 0.
   2408   Csel(output.W(), wzr, input.W(), lt);
   2409   // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
   2410   Csel(output.W(), output.W(), 255, le);
   2411 }
   2412 
   2413 
   2414 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
   2415   ClampInt32ToUint8(in_out, in_out);
   2416 }
   2417 
   2418 
   2419 void MacroAssembler::ClampDoubleToUint8(Register output,
   2420                                         DoubleRegister input,
   2421                                         DoubleRegister dbl_scratch) {
   2422   // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
   2423   //   - Inputs lower than 0 (including -infinity) produce 0.
   2424   //   - Inputs higher than 255 (including +infinity) produce 255.
   2425   // Also, it seems that PIXEL types use round-to-nearest rather than
   2426   // round-towards-zero.
   2427 
   2428   // Squash +infinity before the conversion, since Fcvtnu will normally
   2429   // convert it to 0.
   2430   Fmov(dbl_scratch, 255);
   2431   Fmin(dbl_scratch, dbl_scratch, input);
   2432 
   2433   // Convert double to unsigned integer. Values less than zero become zero.
   2434   // Values greater than 255 have already been clamped to 255.
   2435   Fcvtnu(output, dbl_scratch);
   2436 }
   2437 
   2438 
   2439 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
   2440                                                Register src,
   2441                                                unsigned count,
   2442                                                Register scratch1,
   2443                                                Register scratch2,
   2444                                                Register scratch3,
   2445                                                Register scratch4,
   2446                                                Register scratch5) {
   2447   // Untag src and dst into scratch registers.
   2448   // Copy src->dst in a tight loop.
   2449   DCHECK(!AreAliased(dst, src,
   2450                      scratch1, scratch2, scratch3, scratch4, scratch5));
   2451   DCHECK(count >= 2);
   2452 
   2453   const Register& remaining = scratch3;
   2454   Mov(remaining, count / 2);
   2455 
   2456   const Register& dst_untagged = scratch1;
   2457   const Register& src_untagged = scratch2;
   2458   Sub(dst_untagged, dst, kHeapObjectTag);
   2459   Sub(src_untagged, src, kHeapObjectTag);
   2460 
   2461   // Copy fields in pairs.
   2462   Label loop;
   2463   Bind(&loop);
   2464   Ldp(scratch4, scratch5,
   2465       MemOperand(src_untagged, kXRegSize* 2, PostIndex));
   2466   Stp(scratch4, scratch5,
   2467       MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
   2468   Sub(remaining, remaining, 1);
   2469   Cbnz(remaining, &loop);
   2470 
   2471   // Handle the leftovers.
   2472   if (count & 1) {
   2473     Ldr(scratch4, MemOperand(src_untagged));
   2474     Str(scratch4, MemOperand(dst_untagged));
   2475   }
   2476 }
   2477 
   2478 
   2479 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
   2480                                                    Register src,
   2481                                                    unsigned count,
   2482                                                    Register scratch1,
   2483                                                    Register scratch2,
   2484                                                    Register scratch3,
   2485                                                    Register scratch4) {
   2486   // Untag src and dst into scratch registers.
   2487   // Copy src->dst in an unrolled loop.
   2488   DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
   2489 
   2490   const Register& dst_untagged = scratch1;
   2491   const Register& src_untagged = scratch2;
   2492   sub(dst_untagged, dst, kHeapObjectTag);
   2493   sub(src_untagged, src, kHeapObjectTag);
   2494 
   2495   // Copy fields in pairs.
   2496   for (unsigned i = 0; i < count / 2; i++) {
   2497     Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
   2498     Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
   2499   }
   2500 
   2501   // Handle the leftovers.
   2502   if (count & 1) {
   2503     Ldr(scratch3, MemOperand(src_untagged));
   2504     Str(scratch3, MemOperand(dst_untagged));
   2505   }
   2506 }
   2507 
   2508 
   2509 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
   2510                                               Register src,
   2511                                               unsigned count,
   2512                                               Register scratch1,
   2513                                               Register scratch2,
   2514                                               Register scratch3) {
   2515   // Untag src and dst into scratch registers.
   2516   // Copy src->dst in an unrolled loop.
   2517   DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
   2518 
   2519   const Register& dst_untagged = scratch1;
   2520   const Register& src_untagged = scratch2;
   2521   Sub(dst_untagged, dst, kHeapObjectTag);
   2522   Sub(src_untagged, src, kHeapObjectTag);
   2523 
   2524   // Copy fields one by one.
   2525   for (unsigned i = 0; i < count; i++) {
   2526     Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
   2527     Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
   2528   }
   2529 }
   2530 
   2531 
   2532 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
   2533                                 unsigned count) {
   2534   // One of two methods is used:
   2535   //
   2536   // For high 'count' values where many scratch registers are available:
   2537   //    Untag src and dst into scratch registers.
   2538   //    Copy src->dst in a tight loop.
   2539   //
   2540   // For low 'count' values or where few scratch registers are available:
   2541   //    Untag src and dst into scratch registers.
   2542   //    Copy src->dst in an unrolled loop.
   2543   //
   2544   // In both cases, fields are copied in pairs if possible, and left-overs are
   2545   // handled separately.
   2546   DCHECK(!AreAliased(dst, src));
   2547   DCHECK(!temps.IncludesAliasOf(dst));
   2548   DCHECK(!temps.IncludesAliasOf(src));
   2549   DCHECK(!temps.IncludesAliasOf(xzr));
   2550 
   2551   if (emit_debug_code()) {
   2552     Cmp(dst, src);
   2553     Check(ne, kTheSourceAndDestinationAreTheSame);
   2554   }
   2555 
   2556   // The value of 'count' at which a loop will be generated (if there are
   2557   // enough scratch registers).
   2558   static const unsigned kLoopThreshold = 8;
   2559 
   2560   UseScratchRegisterScope masm_temps(this);
   2561   if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
   2562     CopyFieldsLoopPairsHelper(dst, src, count,
   2563                               Register(temps.PopLowestIndex()),
   2564                               Register(temps.PopLowestIndex()),
   2565                               Register(temps.PopLowestIndex()),
   2566                               masm_temps.AcquireX(),
   2567                               masm_temps.AcquireX());
   2568   } else if (temps.Count() >= 2) {
   2569     CopyFieldsUnrolledPairsHelper(dst, src, count,
   2570                                   Register(temps.PopLowestIndex()),
   2571                                   Register(temps.PopLowestIndex()),
   2572                                   masm_temps.AcquireX(),
   2573                                   masm_temps.AcquireX());
   2574   } else if (temps.Count() == 1) {
   2575     CopyFieldsUnrolledHelper(dst, src, count,
   2576                              Register(temps.PopLowestIndex()),
   2577                              masm_temps.AcquireX(),
   2578                              masm_temps.AcquireX());
   2579   } else {
   2580     UNREACHABLE();
   2581   }
   2582 }
   2583 
   2584 
   2585 void MacroAssembler::CopyBytes(Register dst,
   2586                                Register src,
   2587                                Register length,
   2588                                Register scratch,
   2589                                CopyHint hint) {
   2590   UseScratchRegisterScope temps(this);
   2591   Register tmp1 = temps.AcquireX();
   2592   Register tmp2 = temps.AcquireX();
   2593   DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
   2594   DCHECK(!AreAliased(src, dst, csp));
   2595 
   2596   if (emit_debug_code()) {
   2597     // Check copy length.
   2598     Cmp(length, 0);
   2599     Assert(ge, kUnexpectedNegativeValue);
   2600 
   2601     // Check src and dst buffers don't overlap.
   2602     Add(scratch, src, length);  // Calculate end of src buffer.
   2603     Cmp(scratch, dst);
   2604     Add(scratch, dst, length);  // Calculate end of dst buffer.
   2605     Ccmp(scratch, src, ZFlag, gt);
   2606     Assert(le, kCopyBuffersOverlap);
   2607   }
   2608 
   2609   Label short_copy, short_loop, bulk_loop, done;
   2610 
   2611   if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
   2612     Register bulk_length = scratch;
   2613     int pair_size = 2 * kXRegSize;
   2614     int pair_mask = pair_size - 1;
   2615 
   2616     Bic(bulk_length, length, pair_mask);
   2617     Cbz(bulk_length, &short_copy);
   2618     Bind(&bulk_loop);
   2619     Sub(bulk_length, bulk_length, pair_size);
   2620     Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
   2621     Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
   2622     Cbnz(bulk_length, &bulk_loop);
   2623 
   2624     And(length, length, pair_mask);
   2625   }
   2626 
   2627   Bind(&short_copy);
   2628   Cbz(length, &done);
   2629   Bind(&short_loop);
   2630   Sub(length, length, 1);
   2631   Ldrb(tmp1, MemOperand(src, 1, PostIndex));
   2632   Strb(tmp1, MemOperand(dst, 1, PostIndex));
   2633   Cbnz(length, &short_loop);
   2634 
   2635 
   2636   Bind(&done);
   2637 }
   2638 
   2639 
   2640 void MacroAssembler::FillFields(Register dst,
   2641                                 Register field_count,
   2642                                 Register filler) {
   2643   DCHECK(!dst.Is(csp));
   2644   UseScratchRegisterScope temps(this);
   2645   Register field_ptr = temps.AcquireX();
   2646   Register counter = temps.AcquireX();
   2647   Label done;
   2648 
   2649   // Decrement count. If the result < zero, count was zero, and there's nothing
   2650   // to do. If count was one, flags are set to fail the gt condition at the end
   2651   // of the pairs loop.
   2652   Subs(counter, field_count, 1);
   2653   B(lt, &done);
   2654 
   2655   // There's at least one field to fill, so do this unconditionally.
   2656   Str(filler, MemOperand(dst, kPointerSize, PostIndex));
   2657 
   2658   // If the bottom bit of counter is set, there are an even number of fields to
   2659   // fill, so pull the start pointer back by one field, allowing the pairs loop
   2660   // to overwrite the field that was stored above.
   2661   And(field_ptr, counter, 1);
   2662   Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
   2663 
   2664   // Store filler to memory in pairs.
   2665   Label entry, loop;
   2666   B(&entry);
   2667   Bind(&loop);
   2668   Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
   2669   Subs(counter, counter, 2);
   2670   Bind(&entry);
   2671   B(gt, &loop);
   2672 
   2673   Bind(&done);
   2674 }
   2675 
   2676 
   2677 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
   2678     Register first, Register second, Register scratch1, Register scratch2,
   2679     Label* failure, SmiCheckType smi_check) {
   2680   if (smi_check == DO_SMI_CHECK) {
   2681     JumpIfEitherSmi(first, second, failure);
   2682   } else if (emit_debug_code()) {
   2683     DCHECK(smi_check == DONT_DO_SMI_CHECK);
   2684     Label not_smi;
   2685     JumpIfEitherSmi(first, second, NULL, &not_smi);
   2686 
   2687     // At least one input is a smi, but the flags indicated a smi check wasn't
   2688     // needed.
   2689     Abort(kUnexpectedSmi);
   2690 
   2691     Bind(&not_smi);
   2692   }
   2693 
   2694   // Test that both first and second are sequential one-byte strings.
   2695   Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   2696   Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   2697   Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   2698   Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
   2699 
   2700   JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
   2701                                                  scratch2, failure);
   2702 }
   2703 
   2704 
   2705 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
   2706     Register first, Register second, Register scratch1, Register scratch2,
   2707     Label* failure) {
   2708   DCHECK(!AreAliased(scratch1, second));
   2709   DCHECK(!AreAliased(scratch1, scratch2));
   2710   static const int kFlatOneByteStringMask =
   2711       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   2712   static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
   2713   And(scratch1, first, kFlatOneByteStringMask);
   2714   And(scratch2, second, kFlatOneByteStringMask);
   2715   Cmp(scratch1, kFlatOneByteStringTag);
   2716   Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
   2717   B(ne, failure);
   2718 }
   2719 
   2720 
   2721 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
   2722                                                               Register scratch,
   2723                                                               Label* failure) {
   2724   const int kFlatOneByteStringMask =
   2725       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   2726   const int kFlatOneByteStringTag =
   2727       kStringTag | kOneByteStringTag | kSeqStringTag;
   2728   And(scratch, type, kFlatOneByteStringMask);
   2729   Cmp(scratch, kFlatOneByteStringTag);
   2730   B(ne, failure);
   2731 }
   2732 
   2733 
   2734 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
   2735     Register first, Register second, Register scratch1, Register scratch2,
   2736     Label* failure) {
   2737   DCHECK(!AreAliased(first, second, scratch1, scratch2));
   2738   const int kFlatOneByteStringMask =
   2739       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
   2740   const int kFlatOneByteStringTag =
   2741       kStringTag | kOneByteStringTag | kSeqStringTag;
   2742   And(scratch1, first, kFlatOneByteStringMask);
   2743   And(scratch2, second, kFlatOneByteStringMask);
   2744   Cmp(scratch1, kFlatOneByteStringTag);
   2745   Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
   2746   B(ne, failure);
   2747 }
   2748 
   2749 
   2750 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
   2751                                                      Label* not_unique_name) {
   2752   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   2753   // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
   2754   //   continue
   2755   // } else {
   2756   //   goto not_unique_name
   2757   // }
   2758   Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
   2759   Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
   2760   B(ne, not_unique_name);
   2761 }
   2762 
   2763 
   2764 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
   2765                                     const ParameterCount& actual,
   2766                                     Handle<Code> code_constant,
   2767                                     Register code_reg,
   2768                                     Label* done,
   2769                                     InvokeFlag flag,
   2770                                     bool* definitely_mismatches,
   2771                                     const CallWrapper& call_wrapper) {
   2772   bool definitely_matches = false;
   2773   *definitely_mismatches = false;
   2774   Label regular_invoke;
   2775 
   2776   // Check whether the expected and actual arguments count match. If not,
   2777   // setup registers according to contract with ArgumentsAdaptorTrampoline:
   2778   //  x0: actual arguments count.
   2779   //  x1: function (passed through to callee).
   2780   //  x2: expected arguments count.
   2781 
   2782   // The code below is made a lot easier because the calling code already sets
   2783   // up actual and expected registers according to the contract if values are
   2784   // passed in registers.
   2785   DCHECK(actual.is_immediate() || actual.reg().is(x0));
   2786   DCHECK(expected.is_immediate() || expected.reg().is(x2));
   2787   DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
   2788 
   2789   if (expected.is_immediate()) {
   2790     DCHECK(actual.is_immediate());
   2791     if (expected.immediate() == actual.immediate()) {
   2792       definitely_matches = true;
   2793 
   2794     } else {
   2795       Mov(x0, actual.immediate());
   2796       if (expected.immediate() ==
   2797           SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
   2798         // Don't worry about adapting arguments for builtins that
   2799         // don't want that done. Skip adaption code by making it look
   2800         // like we have a match between expected and actual number of
   2801         // arguments.
   2802         definitely_matches = true;
   2803       } else {
   2804         *definitely_mismatches = true;
   2805         // Set up x2 for the argument adaptor.
   2806         Mov(x2, expected.immediate());
   2807       }
   2808     }
   2809 
   2810   } else {  // expected is a register.
   2811     Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
   2812                                               : Operand(actual.reg());
   2813     // If actual == expected perform a regular invocation.
   2814     Cmp(expected.reg(), actual_op);
   2815     B(eq, &regular_invoke);
   2816     // Otherwise set up x0 for the argument adaptor.
   2817     Mov(x0, actual_op);
   2818   }
   2819 
   2820   // If the argument counts may mismatch, generate a call to the argument
   2821   // adaptor.
   2822   if (!definitely_matches) {
   2823     if (!code_constant.is_null()) {
   2824       Mov(x3, Operand(code_constant));
   2825       Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
   2826     }
   2827 
   2828     Handle<Code> adaptor =
   2829         isolate()->builtins()->ArgumentsAdaptorTrampoline();
   2830     if (flag == CALL_FUNCTION) {
   2831       call_wrapper.BeforeCall(CallSize(adaptor));
   2832       Call(adaptor);
   2833       call_wrapper.AfterCall();
   2834       if (!*definitely_mismatches) {
   2835         // If the arg counts don't match, no extra code is emitted by
   2836         // MAsm::InvokeCode and we can just fall through.
   2837         B(done);
   2838       }
   2839     } else {
   2840       Jump(adaptor, RelocInfo::CODE_TARGET);
   2841     }
   2842   }
   2843   Bind(&regular_invoke);
   2844 }
   2845 
   2846 
   2847 void MacroAssembler::InvokeCode(Register code,
   2848                                 const ParameterCount& expected,
   2849                                 const ParameterCount& actual,
   2850                                 InvokeFlag flag,
   2851                                 const CallWrapper& call_wrapper) {
   2852   // You can't call a function without a valid frame.
   2853   DCHECK(flag == JUMP_FUNCTION || has_frame());
   2854 
   2855   Label done;
   2856 
   2857   bool definitely_mismatches = false;
   2858   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
   2859                  &definitely_mismatches, call_wrapper);
   2860 
   2861   // If we are certain that actual != expected, then we know InvokePrologue will
   2862   // have handled the call through the argument adaptor mechanism.
   2863   // The called function expects the call kind in x5.
   2864   if (!definitely_mismatches) {
   2865     if (flag == CALL_FUNCTION) {
   2866       call_wrapper.BeforeCall(CallSize(code));
   2867       Call(code);
   2868       call_wrapper.AfterCall();
   2869     } else {
   2870       DCHECK(flag == JUMP_FUNCTION);
   2871       Jump(code);
   2872     }
   2873   }
   2874 
   2875   // Continue here if InvokePrologue does handle the invocation due to
   2876   // mismatched parameter counts.
   2877   Bind(&done);
   2878 }
   2879 
   2880 
   2881 void MacroAssembler::InvokeFunction(Register function,
   2882                                     const ParameterCount& actual,
   2883                                     InvokeFlag flag,
   2884                                     const CallWrapper& call_wrapper) {
   2885   // You can't call a function without a valid frame.
   2886   DCHECK(flag == JUMP_FUNCTION || has_frame());
   2887 
   2888   // Contract with called JS functions requires that function is passed in x1.
   2889   // (See FullCodeGenerator::Generate().)
   2890   DCHECK(function.is(x1));
   2891 
   2892   Register expected_reg = x2;
   2893   Register code_reg = x3;
   2894 
   2895   Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
   2896   // The number of arguments is stored as an int32_t, and -1 is a marker
   2897   // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
   2898   // extension to correctly handle it.
   2899   Ldr(expected_reg, FieldMemOperand(function,
   2900                                     JSFunction::kSharedFunctionInfoOffset));
   2901   Ldrsw(expected_reg,
   2902         FieldMemOperand(expected_reg,
   2903                         SharedFunctionInfo::kFormalParameterCountOffset));
   2904   Ldr(code_reg,
   2905       FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   2906 
   2907   ParameterCount expected(expected_reg);
   2908   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
   2909 }
   2910 
   2911 
   2912 void MacroAssembler::InvokeFunction(Register function,
   2913                                     const ParameterCount& expected,
   2914                                     const ParameterCount& actual,
   2915                                     InvokeFlag flag,
   2916                                     const CallWrapper& call_wrapper) {
   2917   // You can't call a function without a valid frame.
   2918   DCHECK(flag == JUMP_FUNCTION || has_frame());
   2919 
   2920   // Contract with called JS functions requires that function is passed in x1.
   2921   // (See FullCodeGenerator::Generate().)
   2922   DCHECK(function.Is(x1));
   2923 
   2924   Register code_reg = x3;
   2925 
   2926   // Set up the context.
   2927   Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
   2928 
   2929   // We call indirectly through the code field in the function to
   2930   // allow recompilation to take effect without changing any of the
   2931   // call sites.
   2932   Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   2933   InvokeCode(code_reg, expected, actual, flag, call_wrapper);
   2934 }
   2935 
   2936 
   2937 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
   2938                                     const ParameterCount& expected,
   2939                                     const ParameterCount& actual,
   2940                                     InvokeFlag flag,
   2941                                     const CallWrapper& call_wrapper) {
   2942   // Contract with called JS functions requires that function is passed in x1.
   2943   // (See FullCodeGenerator::Generate().)
   2944   __ LoadObject(x1, function);
   2945   InvokeFunction(x1, expected, actual, flag, call_wrapper);
   2946 }
   2947 
   2948 
   2949 void MacroAssembler::TryConvertDoubleToInt64(Register result,
   2950                                              DoubleRegister double_input,
   2951                                              Label* done) {
   2952   // Try to convert with an FPU convert instruction. It's trivial to compute
   2953   // the modulo operation on an integer register so we convert to a 64-bit
   2954   // integer.
   2955   //
   2956   // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
   2957   // when the double is out of range. NaNs and infinities will be converted to 0
   2958   // (as ECMA-262 requires).
   2959   Fcvtzs(result.X(), double_input);
   2960 
   2961   // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
   2962   // representable using a double, so if the result is one of those then we know
   2963   // that saturation occured, and we need to manually handle the conversion.
   2964   //
   2965   // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
   2966   // 1 will cause signed overflow.
   2967   Cmp(result.X(), 1);
   2968   Ccmp(result.X(), -1, VFlag, vc);
   2969 
   2970   B(vc, done);
   2971 }
   2972 
   2973 
   2974 void MacroAssembler::TruncateDoubleToI(Register result,
   2975                                        DoubleRegister double_input) {
   2976   Label done;
   2977 
   2978   // Try to convert the double to an int64. If successful, the bottom 32 bits
   2979   // contain our truncated int32 result.
   2980   TryConvertDoubleToInt64(result, double_input, &done);
   2981 
   2982   const Register old_stack_pointer = StackPointer();
   2983   if (csp.Is(old_stack_pointer)) {
   2984     // This currently only happens during compiler-unittest. If it arises
   2985     // during regular code generation the DoubleToI stub should be updated to
   2986     // cope with csp and have an extra parameter indicating which stack pointer
   2987     // it should use.
   2988     Push(jssp, xzr);  // Push xzr to maintain csp required 16-bytes alignment.
   2989     Mov(jssp, csp);
   2990     SetStackPointer(jssp);
   2991   }
   2992 
   2993   // If we fell through then inline version didn't succeed - call stub instead.
   2994   Push(lr, double_input);
   2995 
   2996   DoubleToIStub stub(isolate(),
   2997                      jssp,
   2998                      result,
   2999                      0,
   3000                      true,   // is_truncating
   3001                      true);  // skip_fastpath
   3002   CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber
   3003 
   3004   DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
   3005   Pop(xzr, lr);  // xzr to drop the double input on the stack.
   3006 
   3007   if (csp.Is(old_stack_pointer)) {
   3008     Mov(csp, jssp);
   3009     SetStackPointer(csp);
   3010     AssertStackConsistency();
   3011     Pop(xzr, jssp);
   3012   }
   3013 
   3014   Bind(&done);
   3015 }
   3016 
   3017 
   3018 void MacroAssembler::TruncateHeapNumberToI(Register result,
   3019                                            Register object) {
   3020   Label done;
   3021   DCHECK(!result.is(object));
   3022   DCHECK(jssp.Is(StackPointer()));
   3023 
   3024   Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
   3025 
   3026   // Try to convert the double to an int64. If successful, the bottom 32 bits
   3027   // contain our truncated int32 result.
   3028   TryConvertDoubleToInt64(result, fp_scratch, &done);
   3029 
   3030   // If we fell through then inline version didn't succeed - call stub instead.
   3031   Push(lr);
   3032   DoubleToIStub stub(isolate(),
   3033                      object,
   3034                      result,
   3035                      HeapNumber::kValueOffset - kHeapObjectTag,
   3036                      true,   // is_truncating
   3037                      true);  // skip_fastpath
   3038   CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber
   3039   Pop(lr);
   3040 
   3041   Bind(&done);
   3042 }
   3043 
   3044 
   3045 void MacroAssembler::StubPrologue() {
   3046   DCHECK(StackPointer().Is(jssp));
   3047   UseScratchRegisterScope temps(this);
   3048   Register temp = temps.AcquireX();
   3049   __ Mov(temp, Smi::FromInt(StackFrame::STUB));
   3050   // Compiled stubs don't age, and so they don't need the predictable code
   3051   // ageing sequence.
   3052   __ Push(lr, fp, cp, temp);
   3053   __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
   3054 }
   3055 
   3056 
   3057 void MacroAssembler::Prologue(bool code_pre_aging) {
   3058   if (code_pre_aging) {
   3059     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
   3060     __ EmitCodeAgeSequence(stub);
   3061   } else {
   3062     __ EmitFrameSetupForCodeAgePatching();
   3063   }
   3064 }
   3065 
   3066 
   3067 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   3068   DCHECK(jssp.Is(StackPointer()));
   3069   UseScratchRegisterScope temps(this);
   3070   Register type_reg = temps.AcquireX();
   3071   Register code_reg = temps.AcquireX();
   3072 
   3073   Push(lr, fp, cp);
   3074   Mov(type_reg, Smi::FromInt(type));
   3075   Mov(code_reg, Operand(CodeObject()));
   3076   Push(type_reg, code_reg);
   3077   // jssp[4] : lr
   3078   // jssp[3] : fp
   3079   // jssp[2] : cp
   3080   // jssp[1] : type
   3081   // jssp[0] : code object
   3082 
   3083   // Adjust FP to point to saved FP.
   3084   Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
   3085 }
   3086 
   3087 
   3088 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   3089   DCHECK(jssp.Is(StackPointer()));
   3090   // Drop the execution stack down to the frame pointer and restore
   3091   // the caller frame pointer and return address.
   3092   Mov(jssp, fp);
   3093   AssertStackConsistency();
   3094   Pop(fp, lr);
   3095 }
   3096 
   3097 
   3098 void MacroAssembler::ExitFramePreserveFPRegs() {
   3099   PushCPURegList(kCallerSavedFP);
   3100 }
   3101 
   3102 
   3103 void MacroAssembler::ExitFrameRestoreFPRegs() {
   3104   // Read the registers from the stack without popping them. The stack pointer
   3105   // will be reset as part of the unwinding process.
   3106   CPURegList saved_fp_regs = kCallerSavedFP;
   3107   DCHECK(saved_fp_regs.Count() % 2 == 0);
   3108 
   3109   int offset = ExitFrameConstants::kLastExitFrameField;
   3110   while (!saved_fp_regs.IsEmpty()) {
   3111     const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
   3112     const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
   3113     offset -= 2 * kDRegSize;
   3114     Ldp(dst1, dst0, MemOperand(fp, offset));
   3115   }
   3116 }
   3117 
   3118 
   3119 void MacroAssembler::EnterExitFrame(bool save_doubles,
   3120                                     const Register& scratch,
   3121                                     int extra_space) {
   3122   DCHECK(jssp.Is(StackPointer()));
   3123 
   3124   // Set up the new stack frame.
   3125   Mov(scratch, Operand(CodeObject()));
   3126   Push(lr, fp);
   3127   Mov(fp, StackPointer());
   3128   Push(xzr, scratch);
   3129   //          fp[8]: CallerPC (lr)
   3130   //    fp -> fp[0]: CallerFP (old fp)
   3131   //          fp[-8]: Space reserved for SPOffset.
   3132   //  jssp -> fp[-16]: CodeObject()
   3133   STATIC_ASSERT((2 * kPointerSize) ==
   3134                 ExitFrameConstants::kCallerSPDisplacement);
   3135   STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
   3136   STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
   3137   STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
   3138   STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
   3139 
   3140   // Save the frame pointer and context pointer in the top frame.
   3141   Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
   3142                                          isolate())));
   3143   Str(fp, MemOperand(scratch));
   3144   Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
   3145                                          isolate())));
   3146   Str(cp, MemOperand(scratch));
   3147 
   3148   STATIC_ASSERT((-2 * kPointerSize) ==
   3149                 ExitFrameConstants::kLastExitFrameField);
   3150   if (save_doubles) {
   3151     ExitFramePreserveFPRegs();
   3152   }
   3153 
   3154   // Reserve space for the return address and for user requested memory.
   3155   // We do this before aligning to make sure that we end up correctly
   3156   // aligned with the minimum of wasted space.
   3157   Claim(extra_space + 1, kXRegSize);
   3158   //         fp[8]: CallerPC (lr)
   3159   //   fp -> fp[0]: CallerFP (old fp)
   3160   //         fp[-8]: Space reserved for SPOffset.
   3161   //         fp[-16]: CodeObject()
   3162   //         fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
   3163   //         jssp[8]: Extra space reserved for caller (if extra_space != 0).
   3164   // jssp -> jssp[0]: Space reserved for the return address.
   3165 
   3166   // Align and synchronize the system stack pointer with jssp.
   3167   AlignAndSetCSPForFrame();
   3168   DCHECK(csp.Is(StackPointer()));
   3169 
   3170   //         fp[8]: CallerPC (lr)
   3171   //   fp -> fp[0]: CallerFP (old fp)
   3172   //         fp[-8]: Space reserved for SPOffset.
   3173   //         fp[-16]: CodeObject()
   3174   //         fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
   3175   //         csp[8]: Memory reserved for the caller if extra_space != 0.
   3176   //                 Alignment padding, if necessary.
   3177   //  csp -> csp[0]: Space reserved for the return address.
   3178 
   3179   // ExitFrame::GetStateForFramePointer expects to find the return address at
   3180   // the memory address immediately below the pointer stored in SPOffset.
   3181   // It is not safe to derive much else from SPOffset, because the size of the
   3182   // padding can vary.
   3183   Add(scratch, csp, kXRegSize);
   3184   Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
   3185 }
   3186 
   3187 
   3188 // Leave the current exit frame.
   3189 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
   3190                                     const Register& scratch,
   3191                                     bool restore_context) {
   3192   DCHECK(csp.Is(StackPointer()));
   3193 
   3194   if (restore_doubles) {
   3195     ExitFrameRestoreFPRegs();
   3196   }
   3197 
   3198   // Restore the context pointer from the top frame.
   3199   if (restore_context) {
   3200     Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
   3201                                            isolate())));
   3202     Ldr(cp, MemOperand(scratch));
   3203   }
   3204 
   3205   if (emit_debug_code()) {
   3206     // Also emit debug code to clear the cp in the top frame.
   3207     Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
   3208                                            isolate())));
   3209     Str(xzr, MemOperand(scratch));
   3210   }
   3211   // Clear the frame pointer from the top frame.
   3212   Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
   3213                                          isolate())));
   3214   Str(xzr, MemOperand(scratch));
   3215 
   3216   // Pop the exit frame.
   3217   //         fp[8]: CallerPC (lr)
   3218   //   fp -> fp[0]: CallerFP (old fp)
   3219   //         fp[...]: The rest of the frame.
   3220   Mov(jssp, fp);
   3221   SetStackPointer(jssp);
   3222   AssertStackConsistency();
   3223   Pop(fp, lr);
   3224 }
   3225 
   3226 
   3227 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
   3228                                 Register scratch1, Register scratch2) {
   3229   if (FLAG_native_code_counters && counter->Enabled()) {
   3230     Mov(scratch1, value);
   3231     Mov(scratch2, ExternalReference(counter));
   3232     Str(scratch1, MemOperand(scratch2));
   3233   }
   3234 }
   3235 
   3236 
   3237 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
   3238                                       Register scratch1, Register scratch2) {
   3239   DCHECK(value != 0);
   3240   if (FLAG_native_code_counters && counter->Enabled()) {
   3241     Mov(scratch2, ExternalReference(counter));
   3242     Ldr(scratch1, MemOperand(scratch2));
   3243     Add(scratch1, scratch1, value);
   3244     Str(scratch1, MemOperand(scratch2));
   3245   }
   3246 }
   3247 
   3248 
   3249 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
   3250                                       Register scratch1, Register scratch2) {
   3251   IncrementCounter(counter, -value, scratch1, scratch2);
   3252 }
   3253 
   3254 
   3255 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   3256   if (context_chain_length > 0) {
   3257     // Move up the chain of contexts to the context containing the slot.
   3258     Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   3259     for (int i = 1; i < context_chain_length; i++) {
   3260       Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   3261     }
   3262   } else {
   3263     // Slot is in the current function context.  Move it into the
   3264     // destination register in case we store into it (the write barrier
   3265     // cannot be allowed to destroy the context in cp).
   3266     Mov(dst, cp);
   3267   }
   3268 }
   3269 
   3270 
   3271 void MacroAssembler::DebugBreak() {
   3272   Mov(x0, 0);
   3273   Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
   3274   CEntryStub ces(isolate(), 1);
   3275   DCHECK(AllowThisStubCall(&ces));
   3276   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
   3277 }
   3278 
   3279 
   3280 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
   3281                                     int handler_index) {
   3282   DCHECK(jssp.Is(StackPointer()));
   3283   // Adjust this code if the asserts don't hold.
   3284   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   3285   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
   3286   STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
   3287   STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
   3288   STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
   3289   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
   3290 
   3291   // For the JSEntry handler, we must preserve the live registers x0-x4.
   3292   // (See JSEntryStub::GenerateBody().)
   3293 
   3294   unsigned state =
   3295       StackHandler::IndexField::encode(handler_index) |
   3296       StackHandler::KindField::encode(kind);
   3297 
   3298   // Set up the code object and the state for pushing.
   3299   Mov(x10, Operand(CodeObject()));
   3300   Mov(x11, state);
   3301 
   3302   // Push the frame pointer, context, state, and code object.
   3303   if (kind == StackHandler::JS_ENTRY) {
   3304     DCHECK(Smi::FromInt(0) == 0);
   3305     Push(xzr, xzr, x11, x10);
   3306   } else {
   3307     Push(fp, cp, x11, x10);
   3308   }
   3309 
   3310   // Link the current handler as the next handler.
   3311   Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
   3312   Ldr(x10, MemOperand(x11));
   3313   Push(x10);
   3314   // Set this new handler as the current one.
   3315   Str(jssp, MemOperand(x11));
   3316 }
   3317 
   3318 
   3319 void MacroAssembler::PopTryHandler() {
   3320   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   3321   Pop(x10);
   3322   Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
   3323   Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
   3324   Str(x10, MemOperand(x11));
   3325 }
   3326 
   3327 
   3328 void MacroAssembler::Allocate(int object_size,
   3329                               Register result,
   3330                               Register scratch1,
   3331                               Register scratch2,
   3332                               Label* gc_required,
   3333                               AllocationFlags flags) {
   3334   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   3335   if (!FLAG_inline_new) {
   3336     if (emit_debug_code()) {
   3337       // Trash the registers to simulate an allocation failure.
   3338       // We apply salt to the original zap value to easily spot the values.
   3339       Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
   3340       Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
   3341       Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
   3342     }
   3343     B(gc_required);
   3344     return;
   3345   }
   3346 
   3347   UseScratchRegisterScope temps(this);
   3348   Register scratch3 = temps.AcquireX();
   3349 
   3350   DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
   3351   DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
   3352 
   3353   // Make object size into bytes.
   3354   if ((flags & SIZE_IN_WORDS) != 0) {
   3355     object_size *= kPointerSize;
   3356   }
   3357   DCHECK(0 == (object_size & kObjectAlignmentMask));
   3358 
   3359   // Check relative positions of allocation top and limit addresses.
   3360   // The values must be adjacent in memory to allow the use of LDP.
   3361   ExternalReference heap_allocation_top =
   3362       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   3363   ExternalReference heap_allocation_limit =
   3364       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   3365   intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
   3366   intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
   3367   DCHECK((limit - top) == kPointerSize);
   3368 
   3369   // Set up allocation top address and object size registers.
   3370   Register top_address = scratch1;
   3371   Register allocation_limit = scratch2;
   3372   Mov(top_address, Operand(heap_allocation_top));
   3373 
   3374   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   3375     // Load allocation top into result and the allocation limit.
   3376     Ldp(result, allocation_limit, MemOperand(top_address));
   3377   } else {
   3378     if (emit_debug_code()) {
   3379       // Assert that result actually contains top on entry.
   3380       Ldr(scratch3, MemOperand(top_address));
   3381       Cmp(result, scratch3);
   3382       Check(eq, kUnexpectedAllocationTop);
   3383     }
   3384     // Load the allocation limit. 'result' already contains the allocation top.
   3385     Ldr(allocation_limit, MemOperand(top_address, limit - top));
   3386   }
   3387 
   3388   // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
   3389   // the same alignment on ARM64.
   3390   STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
   3391 
   3392   // Calculate new top and bail out if new space is exhausted.
   3393   Adds(scratch3, result, object_size);
   3394   Ccmp(scratch3, allocation_limit, CFlag, cc);
   3395   B(hi, gc_required);
   3396   Str(scratch3, MemOperand(top_address));
   3397 
   3398   // Tag the object if requested.
   3399   if ((flags & TAG_OBJECT) != 0) {
   3400     ObjectTag(result, result);
   3401   }
   3402 }
   3403 
   3404 
   3405 void MacroAssembler::Allocate(Register object_size,
   3406                               Register result,
   3407                               Register scratch1,
   3408                               Register scratch2,
   3409                               Label* gc_required,
   3410                               AllocationFlags flags) {
   3411   if (!FLAG_inline_new) {
   3412     if (emit_debug_code()) {
   3413       // Trash the registers to simulate an allocation failure.
   3414       // We apply salt to the original zap value to easily spot the values.
   3415       Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
   3416       Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
   3417       Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
   3418     }
   3419     B(gc_required);
   3420     return;
   3421   }
   3422 
   3423   UseScratchRegisterScope temps(this);
   3424   Register scratch3 = temps.AcquireX();
   3425 
   3426   DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
   3427   DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
   3428          scratch1.Is64Bits() && scratch2.Is64Bits());
   3429 
   3430   // Check relative positions of allocation top and limit addresses.
   3431   // The values must be adjacent in memory to allow the use of LDP.
   3432   ExternalReference heap_allocation_top =
   3433       AllocationUtils::GetAllocationTopReference(isolate(), flags);
   3434   ExternalReference heap_allocation_limit =
   3435       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   3436   intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
   3437   intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
   3438   DCHECK((limit - top) == kPointerSize);
   3439 
   3440   // Set up allocation top address and object size registers.
   3441   Register top_address = scratch1;
   3442   Register allocation_limit = scratch2;
   3443   Mov(top_address, heap_allocation_top);
   3444 
   3445   if ((flags & RESULT_CONTAINS_TOP) == 0) {
   3446     // Load allocation top into result and the allocation limit.
   3447     Ldp(result, allocation_limit, MemOperand(top_address));
   3448   } else {
   3449     if (emit_debug_code()) {
   3450       // Assert that result actually contains top on entry.
   3451       Ldr(scratch3, MemOperand(top_address));
   3452       Cmp(result, scratch3);
   3453       Check(eq, kUnexpectedAllocationTop);
   3454     }
   3455     // Load the allocation limit. 'result' already contains the allocation top.
   3456     Ldr(allocation_limit, MemOperand(top_address, limit - top));
   3457   }
   3458 
   3459   // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
   3460   // the same alignment on ARM64.
   3461   STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
   3462 
   3463   // Calculate new top and bail out if new space is exhausted
   3464   if ((flags & SIZE_IN_WORDS) != 0) {
   3465     Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
   3466   } else {
   3467     Adds(scratch3, result, object_size);
   3468   }
   3469 
   3470   if (emit_debug_code()) {
   3471     Tst(scratch3, kObjectAlignmentMask);
   3472     Check(eq, kUnalignedAllocationInNewSpace);
   3473   }
   3474 
   3475   Ccmp(scratch3, allocation_limit, CFlag, cc);
   3476   B(hi, gc_required);
   3477   Str(scratch3, MemOperand(top_address));
   3478 
   3479   // Tag the object if requested.
   3480   if ((flags & TAG_OBJECT) != 0) {
   3481     ObjectTag(result, result);
   3482   }
   3483 }
   3484 
   3485 
   3486 void MacroAssembler::UndoAllocationInNewSpace(Register object,
   3487                                               Register scratch) {
   3488   ExternalReference new_space_allocation_top =
   3489       ExternalReference::new_space_allocation_top_address(isolate());
   3490 
   3491   // Make sure the object has no tag before resetting top.
   3492   Bic(object, object, kHeapObjectTagMask);
   3493 #ifdef DEBUG
   3494   // Check that the object un-allocated is below the current top.
   3495   Mov(scratch, new_space_allocation_top);
   3496   Ldr(scratch, MemOperand(scratch));
   3497   Cmp(object, scratch);
   3498   Check(lt, kUndoAllocationOfNonAllocatedMemory);
   3499 #endif
   3500   // Write the address of the object to un-allocate as the current top.
   3501   Mov(scratch, new_space_allocation_top);
   3502   Str(object, MemOperand(scratch));
   3503 }
   3504 
   3505 
   3506 void MacroAssembler::AllocateTwoByteString(Register result,
   3507                                            Register length,
   3508                                            Register scratch1,
   3509                                            Register scratch2,
   3510                                            Register scratch3,
   3511                                            Label* gc_required) {
   3512   DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
   3513   // Calculate the number of bytes needed for the characters in the string while
   3514   // observing object alignment.
   3515   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3516   Add(scratch1, length, length);  // Length in bytes, not chars.
   3517   Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
   3518   Bic(scratch1, scratch1, kObjectAlignmentMask);
   3519 
   3520   // Allocate two-byte string in new space.
   3521   Allocate(scratch1,
   3522            result,
   3523            scratch2,
   3524            scratch3,
   3525            gc_required,
   3526            TAG_OBJECT);
   3527 
   3528   // Set the map, length and hash field.
   3529   InitializeNewString(result,
   3530                       length,
   3531                       Heap::kStringMapRootIndex,
   3532                       scratch1,
   3533                       scratch2);
   3534 }
   3535 
   3536 
   3537 void MacroAssembler::AllocateOneByteString(Register result, Register length,
   3538                                            Register scratch1, Register scratch2,
   3539                                            Register scratch3,
   3540                                            Label* gc_required) {
   3541   DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
   3542   // Calculate the number of bytes needed for the characters in the string while
   3543   // observing object alignment.
   3544   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   3545   STATIC_ASSERT(kCharSize == 1);
   3546   Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
   3547   Bic(scratch1, scratch1, kObjectAlignmentMask);
   3548 
   3549   // Allocate one-byte string in new space.
   3550   Allocate(scratch1,
   3551            result,
   3552            scratch2,
   3553            scratch3,
   3554            gc_required,
   3555            TAG_OBJECT);
   3556 
   3557   // Set the map, length and hash field.
   3558   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
   3559                       scratch1, scratch2);
   3560 }
   3561 
   3562 
   3563 void MacroAssembler::AllocateTwoByteConsString(Register result,
   3564                                                Register length,
   3565                                                Register scratch1,
   3566                                                Register scratch2,
   3567                                                Label* gc_required) {
   3568   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
   3569            TAG_OBJECT);
   3570 
   3571   InitializeNewString(result,
   3572                       length,
   3573                       Heap::kConsStringMapRootIndex,
   3574                       scratch1,
   3575                       scratch2);
   3576 }
   3577 
   3578 
   3579 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
   3580                                                Register scratch1,
   3581                                                Register scratch2,
   3582                                                Label* gc_required) {
   3583   Allocate(ConsString::kSize,
   3584            result,
   3585            scratch1,
   3586            scratch2,
   3587            gc_required,
   3588            TAG_OBJECT);
   3589 
   3590   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
   3591                       scratch1, scratch2);
   3592 }
   3593 
   3594 
   3595 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
   3596                                                  Register length,
   3597                                                  Register scratch1,
   3598                                                  Register scratch2,
   3599                                                  Label* gc_required) {
   3600   DCHECK(!AreAliased(result, length, scratch1, scratch2));
   3601   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   3602            TAG_OBJECT);
   3603 
   3604   InitializeNewString(result,
   3605                       length,
   3606                       Heap::kSlicedStringMapRootIndex,
   3607                       scratch1,
   3608                       scratch2);
   3609 }
   3610 
   3611 
   3612 void MacroAssembler::AllocateOneByteSlicedString(Register result,
   3613                                                  Register length,
   3614                                                  Register scratch1,
   3615                                                  Register scratch2,
   3616                                                  Label* gc_required) {
   3617   DCHECK(!AreAliased(result, length, scratch1, scratch2));
   3618   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
   3619            TAG_OBJECT);
   3620 
   3621   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
   3622                       scratch1, scratch2);
   3623 }
   3624 
   3625 
   3626 // Allocates a heap number or jumps to the need_gc label if the young space
   3627 // is full and a scavenge is needed.
   3628 void MacroAssembler::AllocateHeapNumber(Register result,
   3629                                         Label* gc_required,
   3630                                         Register scratch1,
   3631                                         Register scratch2,
   3632                                         CPURegister value,
   3633                                         CPURegister heap_number_map,
   3634                                         MutableMode mode) {
   3635   DCHECK(!value.IsValid() || value.Is64Bits());
   3636   UseScratchRegisterScope temps(this);
   3637 
   3638   // Allocate an object in the heap for the heap number and tag it as a heap
   3639   // object.
   3640   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
   3641            NO_ALLOCATION_FLAGS);
   3642 
   3643   Heap::RootListIndex map_index = mode == MUTABLE
   3644       ? Heap::kMutableHeapNumberMapRootIndex
   3645       : Heap::kHeapNumberMapRootIndex;
   3646 
   3647   // Prepare the heap number map.
   3648   if (!heap_number_map.IsValid()) {
   3649     // If we have a valid value register, use the same type of register to store
   3650     // the map so we can use STP to store both in one instruction.
   3651     if (value.IsValid() && value.IsFPRegister()) {
   3652       heap_number_map = temps.AcquireD();
   3653     } else {
   3654       heap_number_map = scratch1;
   3655     }
   3656     LoadRoot(heap_number_map, map_index);
   3657   }
   3658   if (emit_debug_code()) {
   3659     Register map;
   3660     if (heap_number_map.IsFPRegister()) {
   3661       map = scratch1;
   3662       Fmov(map, DoubleRegister(heap_number_map));
   3663     } else {
   3664       map = Register(heap_number_map);
   3665     }
   3666     AssertRegisterIsRoot(map, map_index);
   3667   }
   3668 
   3669   // Store the heap number map and the value in the allocated object.
   3670   if (value.IsSameSizeAndType(heap_number_map)) {
   3671     STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
   3672                   HeapNumber::kValueOffset);
   3673     Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
   3674   } else {
   3675     Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
   3676     if (value.IsValid()) {
   3677       Str(value, MemOperand(result, HeapNumber::kValueOffset));
   3678     }
   3679   }
   3680   ObjectTag(result, result);
   3681 }
   3682 
   3683 
   3684 void MacroAssembler::JumpIfObjectType(Register object,
   3685                                       Register map,
   3686                                       Register type_reg,
   3687                                       InstanceType type,
   3688                                       Label* if_cond_pass,
   3689                                       Condition cond) {
   3690   CompareObjectType(object, map, type_reg, type);
   3691   B(cond, if_cond_pass);
   3692 }
   3693 
   3694 
   3695 void MacroAssembler::JumpIfNotObjectType(Register object,
   3696                                          Register map,
   3697                                          Register type_reg,
   3698                                          InstanceType type,
   3699                                          Label* if_not_object) {
   3700   JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
   3701 }
   3702 
   3703 
   3704 // Sets condition flags based on comparison, and returns type in type_reg.
   3705 void MacroAssembler::CompareObjectType(Register object,
   3706                                        Register map,
   3707                                        Register type_reg,
   3708                                        InstanceType type) {
   3709   Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
   3710   CompareInstanceType(map, type_reg, type);
   3711 }
   3712 
   3713 
   3714 // Sets condition flags based on comparison, and returns type in type_reg.
   3715 void MacroAssembler::CompareInstanceType(Register map,
   3716                                          Register type_reg,
   3717                                          InstanceType type) {
   3718   Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
   3719   Cmp(type_reg, type);
   3720 }
   3721 
   3722 
   3723 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
   3724   UseScratchRegisterScope temps(this);
   3725   Register obj_map = temps.AcquireX();
   3726   Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
   3727   CompareRoot(obj_map, index);
   3728 }
   3729 
   3730 
   3731 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
   3732                                       Handle<Map> map) {
   3733   Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   3734   CompareMap(scratch, map);
   3735 }
   3736 
   3737 
   3738 void MacroAssembler::CompareMap(Register obj_map,
   3739                                 Handle<Map> map) {
   3740   Cmp(obj_map, Operand(map));
   3741 }
   3742 
   3743 
   3744 void MacroAssembler::CheckMap(Register obj,
   3745                               Register scratch,
   3746                               Handle<Map> map,
   3747                               Label* fail,
   3748                               SmiCheckType smi_check_type) {
   3749   if (smi_check_type == DO_SMI_CHECK) {
   3750     JumpIfSmi(obj, fail);
   3751   }
   3752 
   3753   CompareObjectMap(obj, scratch, map);
   3754   B(ne, fail);
   3755 }
   3756 
   3757 
   3758 void MacroAssembler::CheckMap(Register obj,
   3759                               Register scratch,
   3760                               Heap::RootListIndex index,
   3761                               Label* fail,
   3762                               SmiCheckType smi_check_type) {
   3763   if (smi_check_type == DO_SMI_CHECK) {
   3764     JumpIfSmi(obj, fail);
   3765   }
   3766   Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   3767   JumpIfNotRoot(scratch, index, fail);
   3768 }
   3769 
   3770 
   3771 void MacroAssembler::CheckMap(Register obj_map,
   3772                               Handle<Map> map,
   3773                               Label* fail,
   3774                               SmiCheckType smi_check_type) {
   3775   if (smi_check_type == DO_SMI_CHECK) {
   3776     JumpIfSmi(obj_map, fail);
   3777   }
   3778 
   3779   CompareMap(obj_map, map);
   3780   B(ne, fail);
   3781 }
   3782 
   3783 
   3784 void MacroAssembler::DispatchMap(Register obj,
   3785                                  Register scratch,
   3786                                  Handle<Map> map,
   3787                                  Handle<Code> success,
   3788                                  SmiCheckType smi_check_type) {
   3789   Label fail;
   3790   if (smi_check_type == DO_SMI_CHECK) {
   3791     JumpIfSmi(obj, &fail);
   3792   }
   3793   Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   3794   Cmp(scratch, Operand(map));
   3795   B(ne, &fail);
   3796   Jump(success, RelocInfo::CODE_TARGET);
   3797   Bind(&fail);
   3798 }
   3799 
   3800 
   3801 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
   3802   UseScratchRegisterScope temps(this);
   3803   Register temp = temps.AcquireX();
   3804   Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   3805   Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   3806   Tst(temp, mask);
   3807 }
   3808 
   3809 
   3810 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
   3811   // Load the map's "bit field 2".
   3812   __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
   3813   // Retrieve elements_kind from bit field 2.
   3814   DecodeField<Map::ElementsKindBits>(result);
   3815 }
   3816 
   3817 
   3818 void MacroAssembler::TryGetFunctionPrototype(Register function,
   3819                                              Register result,
   3820                                              Register scratch,
   3821                                              Label* miss,
   3822                                              BoundFunctionAction action) {
   3823   DCHECK(!AreAliased(function, result, scratch));
   3824 
   3825   Label non_instance;
   3826   if (action == kMissOnBoundFunction) {
   3827     // Check that the receiver isn't a smi.
   3828     JumpIfSmi(function, miss);
   3829 
   3830     // Check that the function really is a function. Load map into result reg.
   3831     JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
   3832 
   3833     Register scratch_w = scratch.W();
   3834     Ldr(scratch,
   3835         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3836     // On 64-bit platforms, compiler hints field is not a smi. See definition of
   3837     // kCompilerHintsOffset in src/objects.h.
   3838     Ldr(scratch_w,
   3839         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3840     Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
   3841 
   3842     // Make sure that the function has an instance prototype.
   3843     Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
   3844     Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
   3845   }
   3846 
   3847   // Get the prototype or initial map from the function.
   3848   Ldr(result,
   3849       FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   3850 
   3851   // If the prototype or initial map is the hole, don't return it and simply
   3852   // miss the cache instead. This will allow us to allocate a prototype object
   3853   // on-demand in the runtime system.
   3854   JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
   3855 
   3856   // If the function does not have an initial map, we're done.
   3857   Label done;
   3858   JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
   3859 
   3860   // Get the prototype from the initial map.
   3861   Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   3862 
   3863   if (action == kMissOnBoundFunction) {
   3864     B(&done);
   3865 
   3866     // Non-instance prototype: fetch prototype from constructor field in initial
   3867     // map.
   3868     Bind(&non_instance);
   3869     Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
   3870   }
   3871 
   3872   // All done.
   3873   Bind(&done);
   3874 }
   3875 
   3876 
   3877 void MacroAssembler::CompareRoot(const Register& obj,
   3878                                  Heap::RootListIndex index) {
   3879   UseScratchRegisterScope temps(this);
   3880   Register temp = temps.AcquireX();
   3881   DCHECK(!AreAliased(obj, temp));
   3882   LoadRoot(temp, index);
   3883   Cmp(obj, temp);
   3884 }
   3885 
   3886 
   3887 void MacroAssembler::JumpIfRoot(const Register& obj,
   3888                                 Heap::RootListIndex index,
   3889                                 Label* if_equal) {
   3890   CompareRoot(obj, index);
   3891   B(eq, if_equal);
   3892 }
   3893 
   3894 
   3895 void MacroAssembler::JumpIfNotRoot(const Register& obj,
   3896                                    Heap::RootListIndex index,
   3897                                    Label* if_not_equal) {
   3898   CompareRoot(obj, index);
   3899   B(ne, if_not_equal);
   3900 }
   3901 
   3902 
   3903 void MacroAssembler::CompareAndSplit(const Register& lhs,
   3904                                      const Operand& rhs,
   3905                                      Condition cond,
   3906                                      Label* if_true,
   3907                                      Label* if_false,
   3908                                      Label* fall_through) {
   3909   if ((if_true == if_false) && (if_false == fall_through)) {
   3910     // Fall through.
   3911   } else if (if_true == if_false) {
   3912     B(if_true);
   3913   } else if (if_false == fall_through) {
   3914     CompareAndBranch(lhs, rhs, cond, if_true);
   3915   } else if (if_true == fall_through) {
   3916     CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
   3917   } else {
   3918     CompareAndBranch(lhs, rhs, cond, if_true);
   3919     B(if_false);
   3920   }
   3921 }
   3922 
   3923 
   3924 void MacroAssembler::TestAndSplit(const Register& reg,
   3925                                   uint64_t bit_pattern,
   3926                                   Label* if_all_clear,
   3927                                   Label* if_any_set,
   3928                                   Label* fall_through) {
   3929   if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
   3930     // Fall through.
   3931   } else if (if_all_clear == if_any_set) {
   3932     B(if_all_clear);
   3933   } else if (if_all_clear == fall_through) {
   3934     TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
   3935   } else if (if_any_set == fall_through) {
   3936     TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
   3937   } else {
   3938     TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
   3939     B(if_all_clear);
   3940   }
   3941 }
   3942 
   3943 
   3944 void MacroAssembler::CheckFastElements(Register map,
   3945                                        Register scratch,
   3946                                        Label* fail) {
   3947   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   3948   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   3949   STATIC_ASSERT(FAST_ELEMENTS == 2);
   3950   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   3951   Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   3952   Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
   3953   B(hi, fail);
   3954 }
   3955 
   3956 
   3957 void MacroAssembler::CheckFastObjectElements(Register map,
   3958                                              Register scratch,
   3959                                              Label* fail) {
   3960   STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
   3961   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
   3962   STATIC_ASSERT(FAST_ELEMENTS == 2);
   3963   STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
   3964   Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   3965   Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
   3966   // If cond==ls, set cond=hi, otherwise compare.
   3967   Ccmp(scratch,
   3968        Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
   3969   B(hi, fail);
   3970 }
   3971 
   3972 
   3973 // Note: The ARM version of this clobbers elements_reg, but this version does
   3974 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
   3975 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
   3976                                                  Register key_reg,
   3977                                                  Register elements_reg,
   3978                                                  Register scratch1,
   3979                                                  FPRegister fpscratch1,
   3980                                                  Label* fail,
   3981                                                  int elements_offset) {
   3982   DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
   3983   Label store_num;
   3984 
   3985   // Speculatively convert the smi to a double - all smis can be exactly
   3986   // represented as a double.
   3987   SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
   3988 
   3989   // If value_reg is a smi, we're done.
   3990   JumpIfSmi(value_reg, &store_num);
   3991 
   3992   // Ensure that the object is a heap number.
   3993   JumpIfNotHeapNumber(value_reg, fail);
   3994 
   3995   Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
   3996 
   3997   // Canonicalize NaNs.
   3998   CanonicalizeNaN(fpscratch1);
   3999 
   4000   // Store the result.
   4001   Bind(&store_num);
   4002   Add(scratch1, elements_reg,
   4003       Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
   4004   Str(fpscratch1,
   4005       FieldMemOperand(scratch1,
   4006                       FixedDoubleArray::kHeaderSize - elements_offset));
   4007 }
   4008 
   4009 
   4010 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
   4011   return has_frame_ || !stub->SometimesSetsUpAFrame();
   4012 }
   4013 
   4014 
   4015 void MacroAssembler::IndexFromHash(Register hash, Register index) {
   4016   // If the hash field contains an array index pick it out. The assert checks
   4017   // that the constants for the maximum number of digits for an array index
   4018   // cached in the hash field and the number of bits reserved for it does not
   4019   // conflict.
   4020   DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
   4021          (1 << String::kArrayIndexValueBits));
   4022   DecodeField<String::ArrayIndexValueBits>(index, hash);
   4023   SmiTag(index, index);
   4024 }
   4025 
   4026 
   4027 void MacroAssembler::EmitSeqStringSetCharCheck(
   4028     Register string,
   4029     Register index,
   4030     SeqStringSetCharCheckIndexType index_type,
   4031     Register scratch,
   4032     uint32_t encoding_mask) {
   4033   DCHECK(!AreAliased(string, index, scratch));
   4034 
   4035   if (index_type == kIndexIsSmi) {
   4036     AssertSmi(index);
   4037   }
   4038 
   4039   // Check that string is an object.
   4040   AssertNotSmi(string, kNonObject);
   4041 
   4042   // Check that string has an appropriate map.
   4043   Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   4044   Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   4045 
   4046   And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
   4047   Cmp(scratch, encoding_mask);
   4048   Check(eq, kUnexpectedStringType);
   4049 
   4050   Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
   4051   Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
   4052   Check(lt, kIndexIsTooLarge);
   4053 
   4054   DCHECK_EQ(0, Smi::FromInt(0));
   4055   Cmp(index, 0);
   4056   Check(ge, kIndexIsNegative);
   4057 }
   4058 
   4059 
   4060 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
   4061                                             Register scratch1,
   4062                                             Register scratch2,
   4063                                             Label* miss) {
   4064   DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
   4065   Label same_contexts;
   4066 
   4067   // Load current lexical context from the stack frame.
   4068   Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4069   // In debug mode, make sure the lexical context is set.
   4070 #ifdef DEBUG
   4071   Cmp(scratch1, 0);
   4072   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
   4073 #endif
   4074 
   4075   // Load the native context of the current context.
   4076   int offset =
   4077       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
   4078   Ldr(scratch1, FieldMemOperand(scratch1, offset));
   4079   Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
   4080 
   4081   // Check the context is a native context.
   4082   if (emit_debug_code()) {
   4083     // Read the first word and compare to the global_context_map.
   4084     Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
   4085     CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
   4086     Check(eq, kExpectedNativeContext);
   4087   }
   4088 
   4089   // Check if both contexts are the same.
   4090   Ldr(scratch2, FieldMemOperand(holder_reg,
   4091                                 JSGlobalProxy::kNativeContextOffset));
   4092   Cmp(scratch1, scratch2);
   4093   B(&same_contexts, eq);
   4094 
   4095   // Check the context is a native context.
   4096   if (emit_debug_code()) {
   4097     // We're short on scratch registers here, so use holder_reg as a scratch.
   4098     Push(holder_reg);
   4099     Register scratch3 = holder_reg;
   4100 
   4101     CompareRoot(scratch2, Heap::kNullValueRootIndex);
   4102     Check(ne, kExpectedNonNullContext);
   4103 
   4104     Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
   4105     CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
   4106     Check(eq, kExpectedNativeContext);
   4107     Pop(holder_reg);
   4108   }
   4109 
   4110   // Check that the security token in the calling global object is
   4111   // compatible with the security token in the receiving global
   4112   // object.
   4113   int token_offset = Context::kHeaderSize +
   4114                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
   4115 
   4116   Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
   4117   Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
   4118   Cmp(scratch1, scratch2);
   4119   B(miss, ne);
   4120 
   4121   Bind(&same_contexts);
   4122 }
   4123 
   4124 
   4125 // Compute the hash code from the untagged key. This must be kept in sync with
   4126 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
   4127 // code-stub-hydrogen.cc
   4128 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
   4129   DCHECK(!AreAliased(key, scratch));
   4130 
   4131   // Xor original key with a seed.
   4132   LoadRoot(scratch, Heap::kHashSeedRootIndex);
   4133   Eor(key, key, Operand::UntagSmi(scratch));
   4134 
   4135   // The algorithm uses 32-bit integer values.
   4136   key = key.W();
   4137   scratch = scratch.W();
   4138 
   4139   // Compute the hash code from the untagged key.  This must be kept in sync
   4140   // with ComputeIntegerHash in utils.h.
   4141   //
   4142   // hash = ~hash + (hash <<1 15);
   4143   Mvn(scratch, key);
   4144   Add(key, scratch, Operand(key, LSL, 15));
   4145   // hash = hash ^ (hash >> 12);
   4146   Eor(key, key, Operand(key, LSR, 12));
   4147   // hash = hash + (hash << 2);
   4148   Add(key, key, Operand(key, LSL, 2));
   4149   // hash = hash ^ (hash >> 4);
   4150   Eor(key, key, Operand(key, LSR, 4));
   4151   // hash = hash * 2057;
   4152   Mov(scratch, Operand(key, LSL, 11));
   4153   Add(key, key, Operand(key, LSL, 3));
   4154   Add(key, key, scratch);
   4155   // hash = hash ^ (hash >> 16);
   4156   Eor(key, key, Operand(key, LSR, 16));
   4157 }
   4158 
   4159 
   4160 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   4161                                               Register elements,
   4162                                               Register key,
   4163                                               Register result,
   4164                                               Register scratch0,
   4165                                               Register scratch1,
   4166                                               Register scratch2,
   4167                                               Register scratch3) {
   4168   DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
   4169 
   4170   Label done;
   4171 
   4172   SmiUntag(scratch0, key);
   4173   GetNumberHash(scratch0, scratch1);
   4174 
   4175   // Compute the capacity mask.
   4176   Ldrsw(scratch1,
   4177         UntagSmiFieldMemOperand(elements,
   4178                                 SeededNumberDictionary::kCapacityOffset));
   4179   Sub(scratch1, scratch1, 1);
   4180 
   4181   // Generate an unrolled loop that performs a few probes before giving up.
   4182   for (int i = 0; i < kNumberDictionaryProbes; i++) {
   4183     // Compute the masked index: (hash + i + i * i) & mask.
   4184     if (i > 0) {
   4185       Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
   4186     } else {
   4187       Mov(scratch2, scratch0);
   4188     }
   4189     And(scratch2, scratch2, scratch1);
   4190 
   4191     // Scale the index by multiplying by the element size.
   4192     DCHECK(SeededNumberDictionary::kEntrySize == 3);
   4193     Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
   4194 
   4195     // Check if the key is identical to the name.
   4196     Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
   4197     Ldr(scratch3,
   4198         FieldMemOperand(scratch2,
   4199                         SeededNumberDictionary::kElementsStartOffset));
   4200     Cmp(key, scratch3);
   4201     if (i != (kNumberDictionaryProbes - 1)) {
   4202       B(eq, &done);
   4203     } else {
   4204       B(ne, miss);
   4205     }
   4206   }
   4207 
   4208   Bind(&done);
   4209   // Check that the value is a normal property.
   4210   const int kDetailsOffset =
   4211       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   4212   Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
   4213   TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
   4214 
   4215   // Get the value at the masked, scaled index and return.
   4216   const int kValueOffset =
   4217       SeededNumberDictionary::kElementsStartOffset + kPointerSize;
   4218   Ldr(result, FieldMemOperand(scratch2, kValueOffset));
   4219 }
   4220 
   4221 
   4222 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
   4223                                          Register address,
   4224                                          Register scratch1,
   4225                                          SaveFPRegsMode fp_mode,
   4226                                          RememberedSetFinalAction and_then) {
   4227   DCHECK(!AreAliased(object, address, scratch1));
   4228   Label done, store_buffer_overflow;
   4229   if (emit_debug_code()) {
   4230     Label ok;
   4231     JumpIfNotInNewSpace(object, &ok);
   4232     Abort(kRememberedSetPointerInNewSpace);
   4233     bind(&ok);
   4234   }
   4235   UseScratchRegisterScope temps(this);
   4236   Register scratch2 = temps.AcquireX();
   4237 
   4238   // Load store buffer top.
   4239   Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
   4240   Ldr(scratch1, MemOperand(scratch2));
   4241   // Store pointer to buffer and increment buffer top.
   4242   Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
   4243   // Write back new top of buffer.
   4244   Str(scratch1, MemOperand(scratch2));
   4245   // Call stub on end of buffer.
   4246   // Check for end of buffer.
   4247   DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
   4248          (1 << (14 + kPointerSizeLog2)));
   4249   if (and_then == kFallThroughAtEnd) {
   4250     Tbz(scratch1, (14 + kPointerSizeLog2), &done);
   4251   } else {
   4252     DCHECK(and_then == kReturnAtEnd);
   4253     Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
   4254     Ret();
   4255   }
   4256 
   4257   Bind(&store_buffer_overflow);
   4258   Push(lr);
   4259   StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
   4260   CallStub(&store_buffer_overflow_stub);
   4261   Pop(lr);
   4262 
   4263   Bind(&done);
   4264   if (and_then == kReturnAtEnd) {
   4265     Ret();
   4266   }
   4267 }
   4268 
   4269 
   4270 void MacroAssembler::PopSafepointRegisters() {
   4271   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   4272   PopXRegList(kSafepointSavedRegisters);
   4273   Drop(num_unsaved);
   4274 }
   4275 
   4276 
   4277 void MacroAssembler::PushSafepointRegisters() {
   4278   // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
   4279   // adjust the stack for unsaved registers.
   4280   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   4281   DCHECK(num_unsaved >= 0);
   4282   Claim(num_unsaved);
   4283   PushXRegList(kSafepointSavedRegisters);
   4284 }
   4285 
   4286 
   4287 void MacroAssembler::PushSafepointRegistersAndDoubles() {
   4288   PushSafepointRegisters();
   4289   PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
   4290                             FPRegister::kAllocatableFPRegisters));
   4291 }
   4292 
   4293 
   4294 void MacroAssembler::PopSafepointRegistersAndDoubles() {
   4295   PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
   4296                            FPRegister::kAllocatableFPRegisters));
   4297   PopSafepointRegisters();
   4298 }
   4299 
   4300 
   4301 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
   4302   // Make sure the safepoint registers list is what we expect.
   4303   DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
   4304 
   4305   // Safepoint registers are stored contiguously on the stack, but not all the
   4306   // registers are saved. The following registers are excluded:
   4307   //  - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
   4308   //    the macro assembler.
   4309   //  - x28 (jssp) because JS stack pointer doesn't need to be included in
   4310   //    safepoint registers.
   4311   //  - x31 (csp) because the system stack pointer doesn't need to be included
   4312   //    in safepoint registers.
   4313   //
   4314   // This function implements the mapping of register code to index into the
   4315   // safepoint register slots.
   4316   if ((reg_code >= 0) && (reg_code <= 15)) {
   4317     return reg_code;
   4318   } else if ((reg_code >= 18) && (reg_code <= 27)) {
   4319     // Skip ip0 and ip1.
   4320     return reg_code - 2;
   4321   } else if ((reg_code == 29) || (reg_code == 30)) {
   4322     // Also skip jssp.
   4323     return reg_code - 3;
   4324   } else {
   4325     // This register has no safepoint register slot.
   4326     UNREACHABLE();
   4327     return -1;
   4328   }
   4329 }
   4330 
   4331 
   4332 void MacroAssembler::CheckPageFlagSet(const Register& object,
   4333                                       const Register& scratch,
   4334                                       int mask,
   4335                                       Label* if_any_set) {
   4336   And(scratch, object, ~Page::kPageAlignmentMask);
   4337   Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
   4338   TestAndBranchIfAnySet(scratch, mask, if_any_set);
   4339 }
   4340 
   4341 
   4342 void MacroAssembler::CheckPageFlagClear(const Register& object,
   4343                                         const Register& scratch,
   4344                                         int mask,
   4345                                         Label* if_all_clear) {
   4346   And(scratch, object, ~Page::kPageAlignmentMask);
   4347   Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
   4348   TestAndBranchIfAllClear(scratch, mask, if_all_clear);
   4349 }
   4350 
   4351 
   4352 void MacroAssembler::RecordWriteField(
   4353     Register object,
   4354     int offset,
   4355     Register value,
   4356     Register scratch,
   4357     LinkRegisterStatus lr_status,
   4358     SaveFPRegsMode save_fp,
   4359     RememberedSetAction remembered_set_action,
   4360     SmiCheck smi_check,
   4361     PointersToHereCheck pointers_to_here_check_for_value) {
   4362   // First, check if a write barrier is even needed. The tests below
   4363   // catch stores of Smis.
   4364   Label done;
   4365 
   4366   // Skip the barrier if writing a smi.
   4367   if (smi_check == INLINE_SMI_CHECK) {
   4368     JumpIfSmi(value, &done);
   4369   }
   4370 
   4371   // Although the object register is tagged, the offset is relative to the start
   4372   // of the object, so offset must be a multiple of kPointerSize.
   4373   DCHECK(IsAligned(offset, kPointerSize));
   4374 
   4375   Add(scratch, object, offset - kHeapObjectTag);
   4376   if (emit_debug_code()) {
   4377     Label ok;
   4378     Tst(scratch, (1 << kPointerSizeLog2) - 1);
   4379     B(eq, &ok);
   4380     Abort(kUnalignedCellInWriteBarrier);
   4381     Bind(&ok);
   4382   }
   4383 
   4384   RecordWrite(object,
   4385               scratch,
   4386               value,
   4387               lr_status,
   4388               save_fp,
   4389               remembered_set_action,
   4390               OMIT_SMI_CHECK,
   4391               pointers_to_here_check_for_value);
   4392 
   4393   Bind(&done);
   4394 
   4395   // Clobber clobbered input registers when running with the debug-code flag
   4396   // turned on to provoke errors.
   4397   if (emit_debug_code()) {
   4398     Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
   4399     Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
   4400   }
   4401 }
   4402 
   4403 
   4404 // Will clobber: object, map, dst.
   4405 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
   4406 void MacroAssembler::RecordWriteForMap(Register object,
   4407                                        Register map,
   4408                                        Register dst,
   4409                                        LinkRegisterStatus lr_status,
   4410                                        SaveFPRegsMode fp_mode) {
   4411   ASM_LOCATION("MacroAssembler::RecordWrite");
   4412   DCHECK(!AreAliased(object, map));
   4413 
   4414   if (emit_debug_code()) {
   4415     UseScratchRegisterScope temps(this);
   4416     Register temp = temps.AcquireX();
   4417 
   4418     CompareObjectMap(map, temp, isolate()->factory()->meta_map());
   4419     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   4420   }
   4421 
   4422   if (!FLAG_incremental_marking) {
   4423     return;
   4424   }
   4425 
   4426   if (emit_debug_code()) {
   4427     UseScratchRegisterScope temps(this);
   4428     Register temp = temps.AcquireX();
   4429 
   4430     Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   4431     Cmp(temp, map);
   4432     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   4433   }
   4434 
   4435   // First, check if a write barrier is even needed. The tests below
   4436   // catch stores of smis and stores into the young generation.
   4437   Label done;
   4438 
   4439   // A single check of the map's pages interesting flag suffices, since it is
   4440   // only set during incremental collection, and then it's also guaranteed that
   4441   // the from object's page's interesting flag is also set.  This optimization
   4442   // relies on the fact that maps can never be in new space.
   4443   CheckPageFlagClear(map,
   4444                      map,  // Used as scratch.
   4445                      MemoryChunk::kPointersToHereAreInterestingMask,
   4446                      &done);
   4447 
   4448   // Record the actual write.
   4449   if (lr_status == kLRHasNotBeenSaved) {
   4450     Push(lr);
   4451   }
   4452   Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
   4453   RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
   4454                        fp_mode);
   4455   CallStub(&stub);
   4456   if (lr_status == kLRHasNotBeenSaved) {
   4457     Pop(lr);
   4458   }
   4459 
   4460   Bind(&done);
   4461 
   4462   // Count number of write barriers in generated code.
   4463   isolate()->counters()->write_barriers_static()->Increment();
   4464   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
   4465                    dst);
   4466 
   4467   // Clobber clobbered registers when running with the debug-code flag
   4468   // turned on to provoke errors.
   4469   if (emit_debug_code()) {
   4470     Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
   4471     Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
   4472   }
   4473 }
   4474 
   4475 
   4476 // Will clobber: object, address, value.
   4477 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
   4478 //
   4479 // The register 'object' contains a heap object pointer. The heap object tag is
   4480 // shifted away.
   4481 void MacroAssembler::RecordWrite(
   4482     Register object,
   4483     Register address,
   4484     Register value,
   4485     LinkRegisterStatus lr_status,
   4486     SaveFPRegsMode fp_mode,
   4487     RememberedSetAction remembered_set_action,
   4488     SmiCheck smi_check,
   4489     PointersToHereCheck pointers_to_here_check_for_value) {
   4490   ASM_LOCATION("MacroAssembler::RecordWrite");
   4491   DCHECK(!AreAliased(object, value));
   4492 
   4493   if (emit_debug_code()) {
   4494     UseScratchRegisterScope temps(this);
   4495     Register temp = temps.AcquireX();
   4496 
   4497     Ldr(temp, MemOperand(address));
   4498     Cmp(temp, value);
   4499     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   4500   }
   4501 
   4502   // First, check if a write barrier is even needed. The tests below
   4503   // catch stores of smis and stores into the young generation.
   4504   Label done;
   4505 
   4506   if (smi_check == INLINE_SMI_CHECK) {
   4507     DCHECK_EQ(0, kSmiTag);
   4508     JumpIfSmi(value, &done);
   4509   }
   4510 
   4511   if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
   4512     CheckPageFlagClear(value,
   4513                        value,  // Used as scratch.
   4514                        MemoryChunk::kPointersToHereAreInterestingMask,
   4515                        &done);
   4516   }
   4517   CheckPageFlagClear(object,
   4518                      value,  // Used as scratch.
   4519                      MemoryChunk::kPointersFromHereAreInterestingMask,
   4520                      &done);
   4521 
   4522   // Record the actual write.
   4523   if (lr_status == kLRHasNotBeenSaved) {
   4524     Push(lr);
   4525   }
   4526   RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
   4527                        fp_mode);
   4528   CallStub(&stub);
   4529   if (lr_status == kLRHasNotBeenSaved) {
   4530     Pop(lr);
   4531   }
   4532 
   4533   Bind(&done);
   4534 
   4535   // Count number of write barriers in generated code.
   4536   isolate()->counters()->write_barriers_static()->Increment();
   4537   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
   4538                    value);
   4539 
   4540   // Clobber clobbered registers when running with the debug-code flag
   4541   // turned on to provoke errors.
   4542   if (emit_debug_code()) {
   4543     Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
   4544     Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
   4545   }
   4546 }
   4547 
   4548 
   4549 void MacroAssembler::AssertHasValidColor(const Register& reg) {
   4550   if (emit_debug_code()) {
   4551     // The bit sequence is backward. The first character in the string
   4552     // represents the least significant bit.
   4553     DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
   4554 
   4555     Label color_is_valid;
   4556     Tbnz(reg, 0, &color_is_valid);
   4557     Tbz(reg, 1, &color_is_valid);
   4558     Abort(kUnexpectedColorFound);
   4559     Bind(&color_is_valid);
   4560   }
   4561 }
   4562 
   4563 
   4564 void MacroAssembler::GetMarkBits(Register addr_reg,
   4565                                  Register bitmap_reg,
   4566                                  Register shift_reg) {
   4567   DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
   4568   DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
   4569   // addr_reg is divided into fields:
   4570   // |63        page base        20|19    high      8|7   shift   3|2  0|
   4571   // 'high' gives the index of the cell holding color bits for the object.
   4572   // 'shift' gives the offset in the cell for this object's color.
   4573   const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
   4574   UseScratchRegisterScope temps(this);
   4575   Register temp = temps.AcquireX();
   4576   Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
   4577   Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
   4578   Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
   4579   // bitmap_reg:
   4580   // |63        page base        20|19 zeros 15|14      high      3|2  0|
   4581   Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
   4582 }
   4583 
   4584 
   4585 void MacroAssembler::HasColor(Register object,
   4586                               Register bitmap_scratch,
   4587                               Register shift_scratch,
   4588                               Label* has_color,
   4589                               int first_bit,
   4590                               int second_bit) {
   4591   // See mark-compact.h for color definitions.
   4592   DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
   4593 
   4594   GetMarkBits(object, bitmap_scratch, shift_scratch);
   4595   Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   4596   // Shift the bitmap down to get the color of the object in bits [1:0].
   4597   Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
   4598 
   4599   AssertHasValidColor(bitmap_scratch);
   4600 
   4601   // These bit sequences are backwards. The first character in the string
   4602   // represents the least significant bit.
   4603   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   4604   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4605   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
   4606 
   4607   // Check for the color.
   4608   if (first_bit == 0) {
   4609     // Checking for white.
   4610     DCHECK(second_bit == 0);
   4611     // We only need to test the first bit.
   4612     Tbz(bitmap_scratch, 0, has_color);
   4613   } else {
   4614     Label other_color;
   4615     // Checking for grey or black.
   4616     Tbz(bitmap_scratch, 0, &other_color);
   4617     if (second_bit == 0) {
   4618       Tbz(bitmap_scratch, 1, has_color);
   4619     } else {
   4620       Tbnz(bitmap_scratch, 1, has_color);
   4621     }
   4622     Bind(&other_color);
   4623   }
   4624 
   4625   // Fall through if it does not have the right color.
   4626 }
   4627 
   4628 
   4629 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
   4630                                         Register scratch,
   4631                                         Label* if_deprecated) {
   4632   if (map->CanBeDeprecated()) {
   4633     Mov(scratch, Operand(map));
   4634     Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
   4635     TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
   4636   }
   4637 }
   4638 
   4639 
   4640 void MacroAssembler::JumpIfBlack(Register object,
   4641                                  Register scratch0,
   4642                                  Register scratch1,
   4643                                  Label* on_black) {
   4644   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4645   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
   4646 }
   4647 
   4648 
   4649 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
   4650     Register object,
   4651     Register scratch0,
   4652     Register scratch1,
   4653     Label* found) {
   4654   DCHECK(!AreAliased(object, scratch0, scratch1));
   4655   Factory* factory = isolate()->factory();
   4656   Register current = scratch0;
   4657   Label loop_again;
   4658 
   4659   // Scratch contains elements pointer.
   4660   Mov(current, object);
   4661 
   4662   // Loop based on the map going up the prototype chain.
   4663   Bind(&loop_again);
   4664   Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
   4665   Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
   4666   DecodeField<Map::ElementsKindBits>(scratch1);
   4667   CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
   4668   Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
   4669   CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
   4670 }
   4671 
   4672 
   4673 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
   4674                                                Register result) {
   4675   DCHECK(!result.Is(ldr_location));
   4676   const uint32_t kLdrLitOffset_lsb = 5;
   4677   const uint32_t kLdrLitOffset_width = 19;
   4678   Ldr(result, MemOperand(ldr_location));
   4679   if (emit_debug_code()) {
   4680     And(result, result, LoadLiteralFMask);
   4681     Cmp(result, LoadLiteralFixed);
   4682     Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
   4683     // The instruction was clobbered. Reload it.
   4684     Ldr(result, MemOperand(ldr_location));
   4685   }
   4686   Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
   4687   Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
   4688 }
   4689 
   4690 
   4691 void MacroAssembler::EnsureNotWhite(
   4692     Register value,
   4693     Register bitmap_scratch,
   4694     Register shift_scratch,
   4695     Register load_scratch,
   4696     Register length_scratch,
   4697     Label* value_is_white_and_not_data) {
   4698   DCHECK(!AreAliased(
   4699       value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
   4700 
   4701   // These bit sequences are backwards. The first character in the string
   4702   // represents the least significant bit.
   4703   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   4704   DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
   4705   DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
   4706 
   4707   GetMarkBits(value, bitmap_scratch, shift_scratch);
   4708   Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   4709   Lsr(load_scratch, load_scratch, shift_scratch);
   4710 
   4711   AssertHasValidColor(load_scratch);
   4712 
   4713   // If the value is black or grey we don't need to do anything.
   4714   // Since both black and grey have a 1 in the first position and white does
   4715   // not have a 1 there we only need to check one bit.
   4716   Label done;
   4717   Tbnz(load_scratch, 0, &done);
   4718 
   4719   // Value is white.  We check whether it is data that doesn't need scanning.
   4720   Register map = load_scratch;  // Holds map while checking type.
   4721   Label is_data_object;
   4722 
   4723   // Check for heap-number.
   4724   Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
   4725   Mov(length_scratch, HeapNumber::kSize);
   4726   JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
   4727 
   4728   // Check for strings.
   4729   DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
   4730   DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   4731   // If it's a string and it's not a cons string then it's an object containing
   4732   // no GC pointers.
   4733   Register instance_type = load_scratch;
   4734   Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
   4735   TestAndBranchIfAnySet(instance_type,
   4736                         kIsIndirectStringMask | kIsNotStringMask,
   4737                         value_is_white_and_not_data);
   4738 
   4739   // It's a non-indirect (non-cons and non-slice) string.
   4740   // If it's external, the length is just ExternalString::kSize.
   4741   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   4742   // External strings are the only ones with the kExternalStringTag bit
   4743   // set.
   4744   DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
   4745   DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   4746   Mov(length_scratch, ExternalString::kSize);
   4747   TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
   4748 
   4749   // Sequential string, either Latin1 or UC16.
   4750   // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
   4751   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
   4752   // getting the length multiplied by 2.
   4753   DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
   4754   Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
   4755                                                 String::kLengthOffset));
   4756   Tst(instance_type, kStringEncodingMask);
   4757   Cset(load_scratch, eq);
   4758   Lsl(length_scratch, length_scratch, load_scratch);
   4759   Add(length_scratch,
   4760       length_scratch,
   4761       SeqString::kHeaderSize + kObjectAlignmentMask);
   4762   Bic(length_scratch, length_scratch, kObjectAlignmentMask);
   4763 
   4764   Bind(&is_data_object);
   4765   // Value is a data object, and it is white.  Mark it black.  Since we know
   4766   // that the object is white we can make it black by flipping one bit.
   4767   Register mask = shift_scratch;
   4768   Mov(load_scratch, 1);
   4769   Lsl(mask, load_scratch, shift_scratch);
   4770 
   4771   Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   4772   Orr(load_scratch, load_scratch, mask);
   4773   Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
   4774 
   4775   Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
   4776   Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
   4777   Add(load_scratch, load_scratch, length_scratch);
   4778   Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
   4779 
   4780   Bind(&done);
   4781 }
   4782 
   4783 
   4784 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
   4785   if (emit_debug_code()) {
   4786     Check(cond, reason);
   4787   }
   4788 }
   4789 
   4790 
   4791 
   4792 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
   4793   if (emit_debug_code()) {
   4794     CheckRegisterIsClear(reg, reason);
   4795   }
   4796 }
   4797 
   4798 
   4799 void MacroAssembler::AssertRegisterIsRoot(Register reg,
   4800                                           Heap::RootListIndex index,
   4801                                           BailoutReason reason) {
   4802   if (emit_debug_code()) {
   4803     CompareRoot(reg, index);
   4804     Check(eq, reason);
   4805   }
   4806 }
   4807 
   4808 
   4809 void MacroAssembler::AssertFastElements(Register elements) {
   4810   if (emit_debug_code()) {
   4811     UseScratchRegisterScope temps(this);
   4812     Register temp = temps.AcquireX();
   4813     Label ok;
   4814     Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
   4815     JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
   4816     JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
   4817     JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
   4818     Abort(kJSObjectWithFastElementsMapHasSlowElements);
   4819     Bind(&ok);
   4820   }
   4821 }
   4822 
   4823 
   4824 void MacroAssembler::AssertIsString(const Register& object) {
   4825   if (emit_debug_code()) {
   4826     UseScratchRegisterScope temps(this);
   4827     Register temp = temps.AcquireX();
   4828     STATIC_ASSERT(kSmiTag == 0);
   4829     Tst(object, kSmiTagMask);
   4830     Check(ne, kOperandIsNotAString);
   4831     Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   4832     CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
   4833     Check(lo, kOperandIsNotAString);
   4834   }
   4835 }
   4836 
   4837 
   4838 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
   4839   Label ok;
   4840   B(cond, &ok);
   4841   Abort(reason);
   4842   // Will not return here.
   4843   Bind(&ok);
   4844 }
   4845 
   4846 
   4847 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
   4848   Label ok;
   4849   Cbz(reg, &ok);
   4850   Abort(reason);
   4851   // Will not return here.
   4852   Bind(&ok);
   4853 }
   4854 
   4855 
   4856 void MacroAssembler::Abort(BailoutReason reason) {
   4857 #ifdef DEBUG
   4858   RecordComment("Abort message: ");
   4859   RecordComment(GetBailoutReason(reason));
   4860 
   4861   if (FLAG_trap_on_abort) {
   4862     Brk(0);
   4863     return;
   4864   }
   4865 #endif
   4866 
   4867   // Abort is used in some contexts where csp is the stack pointer. In order to
   4868   // simplify the CallRuntime code, make sure that jssp is the stack pointer.
   4869   // There is no risk of register corruption here because Abort doesn't return.
   4870   Register old_stack_pointer = StackPointer();
   4871   SetStackPointer(jssp);
   4872   Mov(jssp, old_stack_pointer);
   4873 
   4874   // We need some scratch registers for the MacroAssembler, so make sure we have
   4875   // some. This is safe here because Abort never returns.
   4876   RegList old_tmp_list = TmpList()->list();
   4877   TmpList()->Combine(MacroAssembler::DefaultTmpList());
   4878 
   4879   if (use_real_aborts()) {
   4880     // Avoid infinite recursion; Push contains some assertions that use Abort.
   4881     NoUseRealAbortsScope no_real_aborts(this);
   4882 
   4883     Mov(x0, Smi::FromInt(reason));
   4884     Push(x0);
   4885 
   4886     if (!has_frame_) {
   4887       // We don't actually want to generate a pile of code for this, so just
   4888       // claim there is a stack frame, without generating one.
   4889       FrameScope scope(this, StackFrame::NONE);
   4890       CallRuntime(Runtime::kAbort, 1);
   4891     } else {
   4892       CallRuntime(Runtime::kAbort, 1);
   4893     }
   4894   } else {
   4895     // Load the string to pass to Printf.
   4896     Label msg_address;
   4897     Adr(x0, &msg_address);
   4898 
   4899     // Call Printf directly to report the error.
   4900     CallPrintf();
   4901 
   4902     // We need a way to stop execution on both the simulator and real hardware,
   4903     // and Unreachable() is the best option.
   4904     Unreachable();
   4905 
   4906     // Emit the message string directly in the instruction stream.
   4907     {
   4908       BlockPoolsScope scope(this);
   4909       Bind(&msg_address);
   4910       EmitStringData(GetBailoutReason(reason));
   4911     }
   4912   }
   4913 
   4914   SetStackPointer(old_stack_pointer);
   4915   TmpList()->set_list(old_tmp_list);
   4916 }
   4917 
   4918 
   4919 void MacroAssembler::LoadTransitionedArrayMapConditional(
   4920     ElementsKind expected_kind,
   4921     ElementsKind transitioned_kind,
   4922     Register map_in_out,
   4923     Register scratch1,
   4924     Register scratch2,
   4925     Label* no_map_match) {
   4926   // Load the global or builtins object from the current context.
   4927   Ldr(scratch1, GlobalObjectMemOperand());
   4928   Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
   4929 
   4930   // Check that the function's map is the same as the expected cached map.
   4931   Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
   4932   size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
   4933   Ldr(scratch2, FieldMemOperand(scratch1, offset));
   4934   Cmp(map_in_out, scratch2);
   4935   B(ne, no_map_match);
   4936 
   4937   // Use the transitioned cached map.
   4938   offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
   4939   Ldr(map_in_out, FieldMemOperand(scratch1, offset));
   4940 }
   4941 
   4942 
   4943 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   4944   // Load the global or builtins object from the current context.
   4945   Ldr(function, GlobalObjectMemOperand());
   4946   // Load the native context from the global or builtins object.
   4947   Ldr(function, FieldMemOperand(function,
   4948                                 GlobalObject::kNativeContextOffset));
   4949   // Load the function from the native context.
   4950   Ldr(function, ContextMemOperand(function, index));
   4951 }
   4952 
   4953 
   4954 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
   4955                                                   Register map,
   4956                                                   Register scratch) {
   4957   // Load the initial map. The global functions all have initial maps.
   4958   Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   4959   if (emit_debug_code()) {
   4960     Label ok, fail;
   4961     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
   4962     B(&ok);
   4963     Bind(&fail);
   4964     Abort(kGlobalFunctionsMustHaveInitialMap);
   4965     Bind(&ok);
   4966   }
   4967 }
   4968 
   4969 
   4970 // This is the main Printf implementation. All other Printf variants call
   4971 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
   4972 void MacroAssembler::PrintfNoPreserve(const char * format,
   4973                                       const CPURegister& arg0,
   4974                                       const CPURegister& arg1,
   4975                                       const CPURegister& arg2,
   4976                                       const CPURegister& arg3) {
   4977   // We cannot handle a caller-saved stack pointer. It doesn't make much sense
   4978   // in most cases anyway, so this restriction shouldn't be too serious.
   4979   DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
   4980 
   4981   // The provided arguments, and their proper procedure-call standard registers.
   4982   CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
   4983   CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
   4984 
   4985   int arg_count = kPrintfMaxArgCount;
   4986 
   4987   // The PCS varargs registers for printf. Note that x0 is used for the printf
   4988   // format string.
   4989   static const CPURegList kPCSVarargs =
   4990       CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
   4991   static const CPURegList kPCSVarargsFP =
   4992       CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
   4993 
   4994   // We can use caller-saved registers as scratch values, except for the
   4995   // arguments and the PCS registers where they might need to go.
   4996   CPURegList tmp_list = kCallerSaved;
   4997   tmp_list.Remove(x0);      // Used to pass the format string.
   4998   tmp_list.Remove(kPCSVarargs);
   4999   tmp_list.Remove(arg0, arg1, arg2, arg3);
   5000 
   5001   CPURegList fp_tmp_list = kCallerSavedFP;
   5002   fp_tmp_list.Remove(kPCSVarargsFP);
   5003   fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
   5004 
   5005   // Override the MacroAssembler's scratch register list. The lists will be
   5006   // reset automatically at the end of the UseScratchRegisterScope.
   5007   UseScratchRegisterScope temps(this);
   5008   TmpList()->set_list(tmp_list.list());
   5009   FPTmpList()->set_list(fp_tmp_list.list());
   5010 
   5011   // Copies of the printf vararg registers that we can pop from.
   5012   CPURegList pcs_varargs = kPCSVarargs;
   5013   CPURegList pcs_varargs_fp = kPCSVarargsFP;
   5014 
   5015   // Place the arguments. There are lots of clever tricks and optimizations we
   5016   // could use here, but Printf is a debug tool so instead we just try to keep
   5017   // it simple: Move each input that isn't already in the right place to a
   5018   // scratch register, then move everything back.
   5019   for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
   5020     // Work out the proper PCS register for this argument.
   5021     if (args[i].IsRegister()) {
   5022       pcs[i] = pcs_varargs.PopLowestIndex().X();
   5023       // We might only need a W register here. We need to know the size of the
   5024       // argument so we can properly encode it for the simulator call.
   5025       if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
   5026     } else if (args[i].IsFPRegister()) {
   5027       // In C, floats are always cast to doubles for varargs calls.
   5028       pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
   5029     } else {
   5030       DCHECK(args[i].IsNone());
   5031       arg_count = i;
   5032       break;
   5033     }
   5034 
   5035     // If the argument is already in the right place, leave it where it is.
   5036     if (args[i].Aliases(pcs[i])) continue;
   5037 
   5038     // Otherwise, if the argument is in a PCS argument register, allocate an
   5039     // appropriate scratch register and then move it out of the way.
   5040     if (kPCSVarargs.IncludesAliasOf(args[i]) ||
   5041         kPCSVarargsFP.IncludesAliasOf(args[i])) {
   5042       if (args[i].IsRegister()) {
   5043         Register old_arg = Register(args[i]);
   5044         Register new_arg = temps.AcquireSameSizeAs(old_arg);
   5045         Mov(new_arg, old_arg);
   5046         args[i] = new_arg;
   5047       } else {
   5048         FPRegister old_arg = FPRegister(args[i]);
   5049         FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
   5050         Fmov(new_arg, old_arg);
   5051         args[i] = new_arg;
   5052       }
   5053     }
   5054   }
   5055 
   5056   // Do a second pass to move values into their final positions and perform any
   5057   // conversions that may be required.
   5058   for (int i = 0; i < arg_count; i++) {
   5059     DCHECK(pcs[i].type() == args[i].type());
   5060     if (pcs[i].IsRegister()) {
   5061       Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
   5062     } else {
   5063       DCHECK(pcs[i].IsFPRegister());
   5064       if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
   5065         Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
   5066       } else {
   5067         Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
   5068       }
   5069     }
   5070   }
   5071 
   5072   // Load the format string into x0, as per the procedure-call standard.
   5073   //
   5074   // To make the code as portable as possible, the format string is encoded
   5075   // directly in the instruction stream. It might be cleaner to encode it in a
   5076   // literal pool, but since Printf is usually used for debugging, it is
   5077   // beneficial for it to be minimally dependent on other features.
   5078   Label format_address;
   5079   Adr(x0, &format_address);
   5080 
   5081   // Emit the format string directly in the instruction stream.
   5082   { BlockPoolsScope scope(this);
   5083     Label after_data;
   5084     B(&after_data);
   5085     Bind(&format_address);
   5086     EmitStringData(format);
   5087     Unreachable();
   5088     Bind(&after_data);
   5089   }
   5090 
   5091   // We don't pass any arguments on the stack, but we still need to align the C
   5092   // stack pointer to a 16-byte boundary for PCS compliance.
   5093   if (!csp.Is(StackPointer())) {
   5094     Bic(csp, StackPointer(), 0xf);
   5095   }
   5096 
   5097   CallPrintf(arg_count, pcs);
   5098 }
   5099 
   5100 
   5101 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
   5102   // A call to printf needs special handling for the simulator, since the system
   5103   // printf function will use a different instruction set and the procedure-call
   5104   // standard will not be compatible.
   5105 #ifdef USE_SIMULATOR
   5106   { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
   5107     hlt(kImmExceptionIsPrintf);
   5108     dc32(arg_count);          // kPrintfArgCountOffset
   5109 
   5110     // Determine the argument pattern.
   5111     uint32_t arg_pattern_list = 0;
   5112     for (int i = 0; i < arg_count; i++) {
   5113       uint32_t arg_pattern;
   5114       if (args[i].IsRegister()) {
   5115         arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
   5116       } else {
   5117         DCHECK(args[i].Is64Bits());
   5118         arg_pattern = kPrintfArgD;
   5119       }
   5120       DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
   5121       arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
   5122     }
   5123     dc32(arg_pattern_list);   // kPrintfArgPatternListOffset
   5124   }
   5125 #else
   5126   Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
   5127 #endif
   5128 }
   5129 
   5130 
   5131 void MacroAssembler::Printf(const char * format,
   5132                             CPURegister arg0,
   5133                             CPURegister arg1,
   5134                             CPURegister arg2,
   5135                             CPURegister arg3) {
   5136   // We can only print sp if it is the current stack pointer.
   5137   if (!csp.Is(StackPointer())) {
   5138     DCHECK(!csp.Aliases(arg0));
   5139     DCHECK(!csp.Aliases(arg1));
   5140     DCHECK(!csp.Aliases(arg2));
   5141     DCHECK(!csp.Aliases(arg3));
   5142   }
   5143 
   5144   // Printf is expected to preserve all registers, so make sure that none are
   5145   // available as scratch registers until we've preserved them.
   5146   RegList old_tmp_list = TmpList()->list();
   5147   RegList old_fp_tmp_list = FPTmpList()->list();
   5148   TmpList()->set_list(0);
   5149   FPTmpList()->set_list(0);
   5150 
   5151   // Preserve all caller-saved registers as well as NZCV.
   5152   // If csp is the stack pointer, PushCPURegList asserts that the size of each
   5153   // list is a multiple of 16 bytes.
   5154   PushCPURegList(kCallerSaved);
   5155   PushCPURegList(kCallerSavedFP);
   5156 
   5157   // We can use caller-saved registers as scratch values (except for argN).
   5158   CPURegList tmp_list = kCallerSaved;
   5159   CPURegList fp_tmp_list = kCallerSavedFP;
   5160   tmp_list.Remove(arg0, arg1, arg2, arg3);
   5161   fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
   5162   TmpList()->set_list(tmp_list.list());
   5163   FPTmpList()->set_list(fp_tmp_list.list());
   5164 
   5165   { UseScratchRegisterScope temps(this);
   5166     // If any of the arguments are the current stack pointer, allocate a new
   5167     // register for them, and adjust the value to compensate for pushing the
   5168     // caller-saved registers.
   5169     bool arg0_sp = StackPointer().Aliases(arg0);
   5170     bool arg1_sp = StackPointer().Aliases(arg1);
   5171     bool arg2_sp = StackPointer().Aliases(arg2);
   5172     bool arg3_sp = StackPointer().Aliases(arg3);
   5173     if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
   5174       // Allocate a register to hold the original stack pointer value, to pass
   5175       // to PrintfNoPreserve as an argument.
   5176       Register arg_sp = temps.AcquireX();
   5177       Add(arg_sp, StackPointer(),
   5178           kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
   5179       if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
   5180       if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
   5181       if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
   5182       if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
   5183     }
   5184 
   5185     // Preserve NZCV.
   5186     { UseScratchRegisterScope temps(this);
   5187       Register tmp = temps.AcquireX();
   5188       Mrs(tmp, NZCV);
   5189       Push(tmp, xzr);
   5190     }
   5191 
   5192     PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
   5193 
   5194     // Restore NZCV.
   5195     { UseScratchRegisterScope temps(this);
   5196       Register tmp = temps.AcquireX();
   5197       Pop(xzr, tmp);
   5198       Msr(NZCV, tmp);
   5199     }
   5200   }
   5201 
   5202   PopCPURegList(kCallerSavedFP);
   5203   PopCPURegList(kCallerSaved);
   5204 
   5205   TmpList()->set_list(old_tmp_list);
   5206   FPTmpList()->set_list(old_fp_tmp_list);
   5207 }
   5208 
   5209 
   5210 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
   5211   // TODO(jbramley): Other architectures use the internal memcpy to copy the
   5212   // sequence. If this is a performance bottleneck, we should consider caching
   5213   // the sequence and copying it in the same way.
   5214   InstructionAccurateScope scope(this,
   5215                                  kNoCodeAgeSequenceLength / kInstructionSize);
   5216   DCHECK(jssp.Is(StackPointer()));
   5217   EmitFrameSetupForCodeAgePatching(this);
   5218 }
   5219 
   5220 
   5221 
   5222 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
   5223   InstructionAccurateScope scope(this,
   5224                                  kNoCodeAgeSequenceLength / kInstructionSize);
   5225   DCHECK(jssp.Is(StackPointer()));
   5226   EmitCodeAgeSequence(this, stub);
   5227 }
   5228 
   5229 
   5230 #undef __
   5231 #define __ assm->
   5232 
   5233 
   5234 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
   5235   Label start;
   5236   __ bind(&start);
   5237 
   5238   // We can do this sequence using four instructions, but the code ageing
   5239   // sequence that patches it needs five, so we use the extra space to try to
   5240   // simplify some addressing modes and remove some dependencies (compared to
   5241   // using two stp instructions with write-back).
   5242   __ sub(jssp, jssp, 4 * kXRegSize);
   5243   __ sub(csp, csp, 4 * kXRegSize);
   5244   __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
   5245   __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
   5246   __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
   5247 
   5248   __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
   5249 }
   5250 
   5251 
   5252 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
   5253                                          Code * stub) {
   5254   Label start;
   5255   __ bind(&start);
   5256   // When the stub is called, the sequence is replaced with the young sequence
   5257   // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
   5258   // stub jumps to &start, stored in x0. The young sequence does not call the
   5259   // stub so there is no infinite loop here.
   5260   //
   5261   // A branch (br) is used rather than a call (blr) because this code replaces
   5262   // the frame setup code that would normally preserve lr.
   5263   __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
   5264   __ adr(x0, &start);
   5265   __ br(ip0);
   5266   // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
   5267   // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
   5268   __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
   5269   if (stub) {
   5270     __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
   5271     __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
   5272   }
   5273 }
   5274 
   5275 
   5276 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
   5277   bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
   5278   DCHECK(is_young ||
   5279          isolate->code_aging_helper()->IsOld(sequence));
   5280   return is_young;
   5281 }
   5282 
   5283 
   5284 void MacroAssembler::TruncatingDiv(Register result,
   5285                                    Register dividend,
   5286                                    int32_t divisor) {
   5287   DCHECK(!AreAliased(result, dividend));
   5288   DCHECK(result.Is32Bits() && dividend.Is32Bits());
   5289   base::MagicNumbersForDivision<uint32_t> mag =
   5290       base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
   5291   Mov(result, mag.multiplier);
   5292   Smull(result.X(), dividend, result);
   5293   Asr(result.X(), result.X(), 32);
   5294   bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
   5295   if (divisor > 0 && neg) Add(result, result, dividend);
   5296   if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
   5297   if (mag.shift > 0) Asr(result, result, mag.shift);
   5298   Add(result, result, Operand(dividend, LSR, 31));
   5299 }
   5300 
   5301 
   5302 #undef __
   5303 
   5304 
   5305 UseScratchRegisterScope::~UseScratchRegisterScope() {
   5306   available_->set_list(old_available_);
   5307   availablefp_->set_list(old_availablefp_);
   5308 }
   5309 
   5310 
   5311 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
   5312   int code = AcquireNextAvailable(available_).code();
   5313   return Register::Create(code, reg.SizeInBits());
   5314 }
   5315 
   5316 
   5317 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
   5318   int code = AcquireNextAvailable(availablefp_).code();
   5319   return FPRegister::Create(code, reg.SizeInBits());
   5320 }
   5321 
   5322 
   5323 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
   5324     CPURegList* available) {
   5325   CHECK(!available->IsEmpty());
   5326   CPURegister result = available->PopLowestIndex();
   5327   DCHECK(!AreAliased(result, xzr, csp));
   5328   return result;
   5329 }
   5330 
   5331 
   5332 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
   5333                                                    const CPURegister& reg) {
   5334   DCHECK(available->IncludesAliasOf(reg));
   5335   available->Remove(reg);
   5336   return reg;
   5337 }
   5338 
   5339 
   5340 #define __ masm->
   5341 
   5342 
   5343 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
   5344                               const Label* smi_check) {
   5345   Assembler::BlockPoolsScope scope(masm);
   5346   if (reg.IsValid()) {
   5347     DCHECK(smi_check->is_bound());
   5348     DCHECK(reg.Is64Bits());
   5349 
   5350     // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
   5351     // 'check' in the other bits. The possible offset is limited in that we
   5352     // use BitField to pack the data, and the underlying data type is a
   5353     // uint32_t.
   5354     uint32_t delta = __ InstructionsGeneratedSince(smi_check);
   5355     __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
   5356   } else {
   5357     DCHECK(!smi_check->is_bound());
   5358 
   5359     // An offset of 0 indicates that there is no patch site.
   5360     __ InlineData(0);
   5361   }
   5362 }
   5363 
   5364 
   5365 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
   5366     : reg_(NoReg), smi_check_(NULL) {
   5367   InstructionSequence* inline_data = InstructionSequence::At(info);
   5368   DCHECK(inline_data->IsInlineData());
   5369   if (inline_data->IsInlineData()) {
   5370     uint64_t payload = inline_data->InlineData();
   5371     // We use BitField to decode the payload, and BitField can only handle
   5372     // 32-bit values.
   5373     DCHECK(is_uint32(payload));
   5374     if (payload != 0) {
   5375       int reg_code = RegisterBits::decode(payload);
   5376       reg_ = Register::XRegFromCode(reg_code);
   5377       uint64_t smi_check_delta = DeltaBits::decode(payload);
   5378       DCHECK(smi_check_delta != 0);
   5379       smi_check_ = inline_data->preceding(smi_check_delta);
   5380     }
   5381   }
   5382 }
   5383 
   5384 
   5385 #undef __
   5386 
   5387 
   5388 } }  // namespace v8::internal
   5389 
   5390 #endif  // V8_TARGET_ARCH_ARM64
   5391