Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
      6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
      7 
      8 #include <vector>
      9 
     10 #include "src/globals.h"
     11 
     12 #include "src/arm64/assembler-arm64-inl.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 
     17 #define LS_MACRO_LIST(V)                                      \
     18   V(Ldrb, Register&, rt, LDRB_w)                              \
     19   V(Strb, Register&, rt, STRB_w)                              \
     20   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w)  \
     21   V(Ldrh, Register&, rt, LDRH_w)                              \
     22   V(Strh, Register&, rt, STRH_w)                              \
     23   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w)  \
     24   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                     \
     25   V(Str, CPURegister&, rt, StoreOpFor(rt))                    \
     26   V(Ldrsw, Register&, rt, LDRSW_x)
     27 
     28 
     29 // ----------------------------------------------------------------------------
     30 // Static helper functions
     31 
     32 // Generate a MemOperand for loading a field from an object.
     33 inline MemOperand FieldMemOperand(Register object, int offset);
     34 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
     35 
     36 // Generate a MemOperand for loading a SMI from memory.
     37 inline MemOperand UntagSmiMemOperand(Register object, int offset);
     38 
     39 
     40 // ----------------------------------------------------------------------------
     41 // MacroAssembler
     42 
     43 enum BranchType {
     44   // Copies of architectural conditions.
     45   // The associated conditions can be used in place of those, the code will
     46   // take care of reinterpreting them with the correct type.
     47   integer_eq = eq,
     48   integer_ne = ne,
     49   integer_hs = hs,
     50   integer_lo = lo,
     51   integer_mi = mi,
     52   integer_pl = pl,
     53   integer_vs = vs,
     54   integer_vc = vc,
     55   integer_hi = hi,
     56   integer_ls = ls,
     57   integer_ge = ge,
     58   integer_lt = lt,
     59   integer_gt = gt,
     60   integer_le = le,
     61   integer_al = al,
     62   integer_nv = nv,
     63 
     64   // These two are *different* from the architectural codes al and nv.
     65   // 'always' is used to generate unconditional branches.
     66   // 'never' is used to not generate a branch (generally as the inverse
     67   // branch type of 'always).
     68   always, never,
     69   // cbz and cbnz
     70   reg_zero, reg_not_zero,
     71   // tbz and tbnz
     72   reg_bit_clear, reg_bit_set,
     73 
     74   // Aliases.
     75   kBranchTypeFirstCondition = eq,
     76   kBranchTypeLastCondition = nv,
     77   kBranchTypeFirstUsingReg = reg_zero,
     78   kBranchTypeFirstUsingBit = reg_bit_clear
     79 };
     80 
     81 inline BranchType InvertBranchType(BranchType type) {
     82   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
     83     return static_cast<BranchType>(
     84         NegateCondition(static_cast<Condition>(type)));
     85   } else {
     86     return static_cast<BranchType>(type ^ 1);
     87   }
     88 }
     89 
     90 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
     91 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
     92 enum PointersToHereCheck {
     93   kPointersToHereMaybeInteresting,
     94   kPointersToHereAreAlwaysInteresting
     95 };
     96 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
     97 enum TargetAddressStorageMode {
     98   CAN_INLINE_TARGET_ADDRESS,
     99   NEVER_INLINE_TARGET_ADDRESS
    100 };
    101 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
    102 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
    103 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
    104 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
    105 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
    106 
    107 class MacroAssembler : public Assembler {
    108  public:
    109   MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
    110 
    111   inline Handle<Object> CodeObject();
    112 
    113   // Instruction set functions ------------------------------------------------
    114   // Logical macros.
    115   inline void And(const Register& rd,
    116                   const Register& rn,
    117                   const Operand& operand);
    118   inline void Ands(const Register& rd,
    119                    const Register& rn,
    120                    const Operand& operand);
    121   inline void Bic(const Register& rd,
    122                   const Register& rn,
    123                   const Operand& operand);
    124   inline void Bics(const Register& rd,
    125                    const Register& rn,
    126                    const Operand& operand);
    127   inline void Orr(const Register& rd,
    128                   const Register& rn,
    129                   const Operand& operand);
    130   inline void Orn(const Register& rd,
    131                   const Register& rn,
    132                   const Operand& operand);
    133   inline void Eor(const Register& rd,
    134                   const Register& rn,
    135                   const Operand& operand);
    136   inline void Eon(const Register& rd,
    137                   const Register& rn,
    138                   const Operand& operand);
    139   inline void Tst(const Register& rn, const Operand& operand);
    140   void LogicalMacro(const Register& rd,
    141                     const Register& rn,
    142                     const Operand& operand,
    143                     LogicalOp op);
    144 
    145   // Add and sub macros.
    146   inline void Add(const Register& rd,
    147                   const Register& rn,
    148                   const Operand& operand);
    149   inline void Adds(const Register& rd,
    150                    const Register& rn,
    151                    const Operand& operand);
    152   inline void Sub(const Register& rd,
    153                   const Register& rn,
    154                   const Operand& operand);
    155   inline void Subs(const Register& rd,
    156                    const Register& rn,
    157                    const Operand& operand);
    158   inline void Cmn(const Register& rn, const Operand& operand);
    159   inline void Cmp(const Register& rn, const Operand& operand);
    160   inline void Neg(const Register& rd,
    161                   const Operand& operand);
    162   inline void Negs(const Register& rd,
    163                    const Operand& operand);
    164 
    165   void AddSubMacro(const Register& rd,
    166                    const Register& rn,
    167                    const Operand& operand,
    168                    FlagsUpdate S,
    169                    AddSubOp op);
    170 
    171   // Add/sub with carry macros.
    172   inline void Adc(const Register& rd,
    173                   const Register& rn,
    174                   const Operand& operand);
    175   inline void Adcs(const Register& rd,
    176                    const Register& rn,
    177                    const Operand& operand);
    178   inline void Sbc(const Register& rd,
    179                   const Register& rn,
    180                   const Operand& operand);
    181   inline void Sbcs(const Register& rd,
    182                    const Register& rn,
    183                    const Operand& operand);
    184   inline void Ngc(const Register& rd,
    185                   const Operand& operand);
    186   inline void Ngcs(const Register& rd,
    187                    const Operand& operand);
    188   void AddSubWithCarryMacro(const Register& rd,
    189                             const Register& rn,
    190                             const Operand& operand,
    191                             FlagsUpdate S,
    192                             AddSubWithCarryOp op);
    193 
    194   // Move macros.
    195   void Mov(const Register& rd,
    196            const Operand& operand,
    197            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
    198   void Mov(const Register& rd, uint64_t imm);
    199   inline void Mvn(const Register& rd, uint64_t imm);
    200   void Mvn(const Register& rd, const Operand& operand);
    201   static bool IsImmMovn(uint64_t imm, unsigned reg_size);
    202   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
    203   static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
    204 
    205   // Conditional macros.
    206   inline void Ccmp(const Register& rn,
    207                    const Operand& operand,
    208                    StatusFlags nzcv,
    209                    Condition cond);
    210   inline void Ccmn(const Register& rn,
    211                    const Operand& operand,
    212                    StatusFlags nzcv,
    213                    Condition cond);
    214   void ConditionalCompareMacro(const Register& rn,
    215                                const Operand& operand,
    216                                StatusFlags nzcv,
    217                                Condition cond,
    218                                ConditionalCompareOp op);
    219   void Csel(const Register& rd,
    220             const Register& rn,
    221             const Operand& operand,
    222             Condition cond);
    223 
    224   // Load/store macros.
    225 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
    226   inline void FN(const REGTYPE REG, const MemOperand& addr);
    227   LS_MACRO_LIST(DECLARE_FUNCTION)
    228 #undef DECLARE_FUNCTION
    229 
    230   void LoadStoreMacro(const CPURegister& rt,
    231                       const MemOperand& addr,
    232                       LoadStoreOp op);
    233 
    234   // V8-specific load/store helpers.
    235   void Load(const Register& rt, const MemOperand& addr, Representation r);
    236   void Store(const Register& rt, const MemOperand& addr, Representation r);
    237 
    238   enum AdrHint {
    239     // The target must be within the immediate range of adr.
    240     kAdrNear,
    241     // The target may be outside of the immediate range of adr. Additional
    242     // instructions may be emitted.
    243     kAdrFar
    244   };
    245   void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
    246 
    247   // Remaining instructions are simple pass-through calls to the assembler.
    248   inline void Asr(const Register& rd, const Register& rn, unsigned shift);
    249   inline void Asr(const Register& rd, const Register& rn, const Register& rm);
    250 
    251   // Branch type inversion relies on these relations.
    252   STATIC_ASSERT((reg_zero      == (reg_not_zero ^ 1)) &&
    253                 (reg_bit_clear == (reg_bit_set ^ 1)) &&
    254                 (always        == (never ^ 1)));
    255 
    256   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
    257 
    258   inline void B(Label* label);
    259   inline void B(Condition cond, Label* label);
    260   void B(Label* label, Condition cond);
    261   inline void Bfi(const Register& rd,
    262                   const Register& rn,
    263                   unsigned lsb,
    264                   unsigned width);
    265   inline void Bfxil(const Register& rd,
    266                     const Register& rn,
    267                     unsigned lsb,
    268                     unsigned width);
    269   inline void Bind(Label* label);
    270   inline void Bl(Label* label);
    271   inline void Blr(const Register& xn);
    272   inline void Br(const Register& xn);
    273   inline void Brk(int code);
    274   void Cbnz(const Register& rt, Label* label);
    275   void Cbz(const Register& rt, Label* label);
    276   inline void Cinc(const Register& rd, const Register& rn, Condition cond);
    277   inline void Cinv(const Register& rd, const Register& rn, Condition cond);
    278   inline void Cls(const Register& rd, const Register& rn);
    279   inline void Clz(const Register& rd, const Register& rn);
    280   inline void Cneg(const Register& rd, const Register& rn, Condition cond);
    281   inline void CzeroX(const Register& rd, Condition cond);
    282   inline void CmovX(const Register& rd, const Register& rn, Condition cond);
    283   inline void Cset(const Register& rd, Condition cond);
    284   inline void Csetm(const Register& rd, Condition cond);
    285   inline void Csinc(const Register& rd,
    286                     const Register& rn,
    287                     const Register& rm,
    288                     Condition cond);
    289   inline void Csinv(const Register& rd,
    290                     const Register& rn,
    291                     const Register& rm,
    292                     Condition cond);
    293   inline void Csneg(const Register& rd,
    294                     const Register& rn,
    295                     const Register& rm,
    296                     Condition cond);
    297   inline void Dmb(BarrierDomain domain, BarrierType type);
    298   inline void Dsb(BarrierDomain domain, BarrierType type);
    299   inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
    300   inline void Extr(const Register& rd,
    301                    const Register& rn,
    302                    const Register& rm,
    303                    unsigned lsb);
    304   inline void Fabs(const FPRegister& fd, const FPRegister& fn);
    305   inline void Fadd(const FPRegister& fd,
    306                    const FPRegister& fn,
    307                    const FPRegister& fm);
    308   inline void Fccmp(const FPRegister& fn,
    309                     const FPRegister& fm,
    310                     StatusFlags nzcv,
    311                     Condition cond);
    312   inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
    313   inline void Fcmp(const FPRegister& fn, double value);
    314   inline void Fcsel(const FPRegister& fd,
    315                     const FPRegister& fn,
    316                     const FPRegister& fm,
    317                     Condition cond);
    318   inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
    319   inline void Fcvtas(const Register& rd, const FPRegister& fn);
    320   inline void Fcvtau(const Register& rd, const FPRegister& fn);
    321   inline void Fcvtms(const Register& rd, const FPRegister& fn);
    322   inline void Fcvtmu(const Register& rd, const FPRegister& fn);
    323   inline void Fcvtns(const Register& rd, const FPRegister& fn);
    324   inline void Fcvtnu(const Register& rd, const FPRegister& fn);
    325   inline void Fcvtzs(const Register& rd, const FPRegister& fn);
    326   inline void Fcvtzu(const Register& rd, const FPRegister& fn);
    327   inline void Fdiv(const FPRegister& fd,
    328                    const FPRegister& fn,
    329                    const FPRegister& fm);
    330   inline void Fmadd(const FPRegister& fd,
    331                     const FPRegister& fn,
    332                     const FPRegister& fm,
    333                     const FPRegister& fa);
    334   inline void Fmax(const FPRegister& fd,
    335                    const FPRegister& fn,
    336                    const FPRegister& fm);
    337   inline void Fmaxnm(const FPRegister& fd,
    338                      const FPRegister& fn,
    339                      const FPRegister& fm);
    340   inline void Fmin(const FPRegister& fd,
    341                    const FPRegister& fn,
    342                    const FPRegister& fm);
    343   inline void Fminnm(const FPRegister& fd,
    344                      const FPRegister& fn,
    345                      const FPRegister& fm);
    346   inline void Fmov(FPRegister fd, FPRegister fn);
    347   inline void Fmov(FPRegister fd, Register rn);
    348   // Provide explicit double and float interfaces for FP immediate moves, rather
    349   // than relying on implicit C++ casts. This allows signalling NaNs to be
    350   // preserved when the immediate matches the format of fd. Most systems convert
    351   // signalling NaNs to quiet NaNs when converting between float and double.
    352   inline void Fmov(FPRegister fd, double imm);
    353   inline void Fmov(FPRegister fd, float imm);
    354   // Provide a template to allow other types to be converted automatically.
    355   template<typename T>
    356   void Fmov(FPRegister fd, T imm) {
    357     ASSERT(allow_macro_instructions_);
    358     Fmov(fd, static_cast<double>(imm));
    359   }
    360   inline void Fmov(Register rd, FPRegister fn);
    361   inline void Fmsub(const FPRegister& fd,
    362                     const FPRegister& fn,
    363                     const FPRegister& fm,
    364                     const FPRegister& fa);
    365   inline void Fmul(const FPRegister& fd,
    366                    const FPRegister& fn,
    367                    const FPRegister& fm);
    368   inline void Fneg(const FPRegister& fd, const FPRegister& fn);
    369   inline void Fnmadd(const FPRegister& fd,
    370                      const FPRegister& fn,
    371                      const FPRegister& fm,
    372                      const FPRegister& fa);
    373   inline void Fnmsub(const FPRegister& fd,
    374                      const FPRegister& fn,
    375                      const FPRegister& fm,
    376                      const FPRegister& fa);
    377   inline void Frinta(const FPRegister& fd, const FPRegister& fn);
    378   inline void Frintm(const FPRegister& fd, const FPRegister& fn);
    379   inline void Frintn(const FPRegister& fd, const FPRegister& fn);
    380   inline void Frintz(const FPRegister& fd, const FPRegister& fn);
    381   inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
    382   inline void Fsub(const FPRegister& fd,
    383                    const FPRegister& fn,
    384                    const FPRegister& fm);
    385   inline void Hint(SystemHint code);
    386   inline void Hlt(int code);
    387   inline void Isb();
    388   inline void Ldnp(const CPURegister& rt,
    389                    const CPURegister& rt2,
    390                    const MemOperand& src);
    391   inline void Ldp(const CPURegister& rt,
    392                   const CPURegister& rt2,
    393                   const MemOperand& src);
    394   inline void Ldpsw(const Register& rt,
    395                     const Register& rt2,
    396                     const MemOperand& src);
    397   // Load a literal from the inline constant pool.
    398   inline void Ldr(const CPURegister& rt, const Immediate& imm);
    399   // Helper function for double immediate.
    400   inline void Ldr(const CPURegister& rt, double imm);
    401   inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
    402   inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
    403   inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
    404   inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
    405   inline void Madd(const Register& rd,
    406                    const Register& rn,
    407                    const Register& rm,
    408                    const Register& ra);
    409   inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
    410   inline void Mov(const Register& rd, const Register& rm);
    411   inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
    412   inline void Mrs(const Register& rt, SystemRegister sysreg);
    413   inline void Msr(SystemRegister sysreg, const Register& rt);
    414   inline void Msub(const Register& rd,
    415                    const Register& rn,
    416                    const Register& rm,
    417                    const Register& ra);
    418   inline void Mul(const Register& rd, const Register& rn, const Register& rm);
    419   inline void Nop() { nop(); }
    420   inline void Rbit(const Register& rd, const Register& rn);
    421   inline void Ret(const Register& xn = lr);
    422   inline void Rev(const Register& rd, const Register& rn);
    423   inline void Rev16(const Register& rd, const Register& rn);
    424   inline void Rev32(const Register& rd, const Register& rn);
    425   inline void Ror(const Register& rd, const Register& rs, unsigned shift);
    426   inline void Ror(const Register& rd, const Register& rn, const Register& rm);
    427   inline void Sbfiz(const Register& rd,
    428                     const Register& rn,
    429                     unsigned lsb,
    430                     unsigned width);
    431   inline void Sbfx(const Register& rd,
    432                    const Register& rn,
    433                    unsigned lsb,
    434                    unsigned width);
    435   inline void Scvtf(const FPRegister& fd,
    436                     const Register& rn,
    437                     unsigned fbits = 0);
    438   inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
    439   inline void Smaddl(const Register& rd,
    440                      const Register& rn,
    441                      const Register& rm,
    442                      const Register& ra);
    443   inline void Smsubl(const Register& rd,
    444                      const Register& rn,
    445                      const Register& rm,
    446                      const Register& ra);
    447   inline void Smull(const Register& rd,
    448                     const Register& rn,
    449                     const Register& rm);
    450   inline void Smulh(const Register& rd,
    451                     const Register& rn,
    452                     const Register& rm);
    453   inline void Stnp(const CPURegister& rt,
    454                    const CPURegister& rt2,
    455                    const MemOperand& dst);
    456   inline void Stp(const CPURegister& rt,
    457                   const CPURegister& rt2,
    458                   const MemOperand& dst);
    459   inline void Sxtb(const Register& rd, const Register& rn);
    460   inline void Sxth(const Register& rd, const Register& rn);
    461   inline void Sxtw(const Register& rd, const Register& rn);
    462   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
    463   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
    464   inline void Ubfiz(const Register& rd,
    465                     const Register& rn,
    466                     unsigned lsb,
    467                     unsigned width);
    468   inline void Ubfx(const Register& rd,
    469                    const Register& rn,
    470                    unsigned lsb,
    471                    unsigned width);
    472   inline void Ucvtf(const FPRegister& fd,
    473                     const Register& rn,
    474                     unsigned fbits = 0);
    475   inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
    476   inline void Umaddl(const Register& rd,
    477                      const Register& rn,
    478                      const Register& rm,
    479                      const Register& ra);
    480   inline void Umsubl(const Register& rd,
    481                      const Register& rn,
    482                      const Register& rm,
    483                      const Register& ra);
    484   inline void Uxtb(const Register& rd, const Register& rn);
    485   inline void Uxth(const Register& rd, const Register& rn);
    486   inline void Uxtw(const Register& rd, const Register& rn);
    487 
    488   // Pseudo-instructions ------------------------------------------------------
    489 
    490   // Compute rd = abs(rm).
    491   // This function clobbers the condition flags. On output the overflow flag is
    492   // set iff the negation overflowed.
    493   //
    494   // If rm is the minimum representable value, the result is not representable.
    495   // Handlers for each case can be specified using the relevant labels.
    496   void Abs(const Register& rd, const Register& rm,
    497            Label * is_not_representable = NULL,
    498            Label * is_representable = NULL);
    499 
    500   // Push or pop up to 4 registers of the same width to or from the stack,
    501   // using the current stack pointer as set by SetStackPointer.
    502   //
    503   // If an argument register is 'NoReg', all further arguments are also assumed
    504   // to be 'NoReg', and are thus not pushed or popped.
    505   //
    506   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
    507   // to "Push(a); Push(b);".
    508   //
    509   // It is valid to push the same register more than once, and there is no
    510   // restriction on the order in which registers are specified.
    511   //
    512   // It is not valid to pop into the same register more than once in one
    513   // operation, not even into the zero register.
    514   //
    515   // If the current stack pointer (as set by SetStackPointer) is csp, then it
    516   // must be aligned to 16 bytes on entry and the total size of the specified
    517   // registers must also be a multiple of 16 bytes.
    518   //
    519   // Even if the current stack pointer is not the system stack pointer (csp),
    520   // Push (and derived methods) will still modify the system stack pointer in
    521   // order to comply with ABI rules about accessing memory below the system
    522   // stack pointer.
    523   //
    524   // Other than the registers passed into Pop, the stack pointer and (possibly)
    525   // the system stack pointer, these methods do not modify any other registers.
    526   void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
    527             const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
    528   void Push(const CPURegister& src0, const CPURegister& src1,
    529             const CPURegister& src2, const CPURegister& src3,
    530             const CPURegister& src4, const CPURegister& src5 = NoReg,
    531             const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
    532   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
    533            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
    534 
    535   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
    536   // specifies the registers that are to be pushed or popped. Higher-numbered
    537   // registers are associated with higher memory addresses (as in the A32 push
    538   // and pop instructions).
    539   //
    540   // (Push|Pop)SizeRegList allow you to specify the register size as a
    541   // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
    542   // kSRegSizeInBits are supported.
    543   //
    544   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
    545   void PushCPURegList(CPURegList registers);
    546   void PopCPURegList(CPURegList registers);
    547 
    548   inline void PushSizeRegList(RegList registers, unsigned reg_size,
    549       CPURegister::RegisterType type = CPURegister::kRegister) {
    550     PushCPURegList(CPURegList(type, reg_size, registers));
    551   }
    552   inline void PopSizeRegList(RegList registers, unsigned reg_size,
    553       CPURegister::RegisterType type = CPURegister::kRegister) {
    554     PopCPURegList(CPURegList(type, reg_size, registers));
    555   }
    556   inline void PushXRegList(RegList regs) {
    557     PushSizeRegList(regs, kXRegSizeInBits);
    558   }
    559   inline void PopXRegList(RegList regs) {
    560     PopSizeRegList(regs, kXRegSizeInBits);
    561   }
    562   inline void PushWRegList(RegList regs) {
    563     PushSizeRegList(regs, kWRegSizeInBits);
    564   }
    565   inline void PopWRegList(RegList regs) {
    566     PopSizeRegList(regs, kWRegSizeInBits);
    567   }
    568   inline void PushDRegList(RegList regs) {
    569     PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
    570   }
    571   inline void PopDRegList(RegList regs) {
    572     PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
    573   }
    574   inline void PushSRegList(RegList regs) {
    575     PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
    576   }
    577   inline void PopSRegList(RegList regs) {
    578     PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
    579   }
    580 
    581   // Push the specified register 'count' times.
    582   void PushMultipleTimes(CPURegister src, Register count);
    583   void PushMultipleTimes(CPURegister src, int count);
    584 
    585   // This is a convenience method for pushing a single Handle<Object>.
    586   inline void Push(Handle<Object> handle);
    587   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
    588 
    589   // Aliases of Push and Pop, required for V8 compatibility.
    590   inline void push(Register src) {
    591     Push(src);
    592   }
    593   inline void pop(Register dst) {
    594     Pop(dst);
    595   }
    596 
    597   // Sometimes callers need to push or pop multiple registers in a way that is
    598   // difficult to structure efficiently for fixed Push or Pop calls. This scope
    599   // allows push requests to be queued up, then flushed at once. The
    600   // MacroAssembler will try to generate the most efficient sequence required.
    601   //
    602   // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
    603   // register sizes and types.
    604   class PushPopQueue {
    605    public:
    606     explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
    607 
    608     ~PushPopQueue() {
    609       ASSERT(queued_.empty());
    610     }
    611 
    612     void Queue(const CPURegister& rt) {
    613       size_ += rt.SizeInBytes();
    614       queued_.push_back(rt);
    615     }
    616 
    617     enum PreambleDirective {
    618       WITH_PREAMBLE,
    619       SKIP_PREAMBLE
    620     };
    621     void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
    622     void PopQueued();
    623 
    624    private:
    625     MacroAssembler* masm_;
    626     int size_;
    627     std::vector<CPURegister> queued_;
    628   };
    629 
    630   // Poke 'src' onto the stack. The offset is in bytes.
    631   //
    632   // If the current stack pointer (according to StackPointer()) is csp, then
    633   // csp must be aligned to 16 bytes.
    634   void Poke(const CPURegister& src, const Operand& offset);
    635 
    636   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
    637   //
    638   // If the current stack pointer (according to StackPointer()) is csp, then
    639   // csp must be aligned to 16 bytes.
    640   void Peek(const CPURegister& dst, const Operand& offset);
    641 
    642   // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
    643   // with 'src2' at a higher address than 'src1'. The offset is in bytes.
    644   //
    645   // If the current stack pointer (according to StackPointer()) is csp, then
    646   // csp must be aligned to 16 bytes.
    647   void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
    648 
    649   // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
    650   // values peeked will be adjacent, with the value in 'dst2' being from a
    651   // higher address than 'dst1'. The offset is in bytes.
    652   //
    653   // If the current stack pointer (according to StackPointer()) is csp, then
    654   // csp must be aligned to 16 bytes.
    655   void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
    656 
    657   // Claim or drop stack space without actually accessing memory.
    658   //
    659   // In debug mode, both of these will write invalid data into the claimed or
    660   // dropped space.
    661   //
    662   // If the current stack pointer (according to StackPointer()) is csp, then it
    663   // must be aligned to 16 bytes and the size claimed or dropped must be a
    664   // multiple of 16 bytes.
    665   //
    666   // Note that unit_size must be specified in bytes. For variants which take a
    667   // Register count, the unit size must be a power of two.
    668   inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
    669   inline void Claim(const Register& count,
    670                     uint64_t unit_size = kXRegSize);
    671   inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
    672   inline void Drop(const Register& count,
    673                    uint64_t unit_size = kXRegSize);
    674 
    675   // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
    676   // register.
    677   inline void ClaimBySMI(const Register& count_smi,
    678                          uint64_t unit_size = kXRegSize);
    679   inline void DropBySMI(const Register& count_smi,
    680                         uint64_t unit_size = kXRegSize);
    681 
    682   // Compare a register with an operand, and branch to label depending on the
    683   // condition. May corrupt the status flags.
    684   inline void CompareAndBranch(const Register& lhs,
    685                                const Operand& rhs,
    686                                Condition cond,
    687                                Label* label);
    688 
    689   // Test the bits of register defined by bit_pattern, and branch if ANY of
    690   // those bits are set. May corrupt the status flags.
    691   inline void TestAndBranchIfAnySet(const Register& reg,
    692                                     const uint64_t bit_pattern,
    693                                     Label* label);
    694 
    695   // Test the bits of register defined by bit_pattern, and branch if ALL of
    696   // those bits are clear (ie. not set.) May corrupt the status flags.
    697   inline void TestAndBranchIfAllClear(const Register& reg,
    698                                       const uint64_t bit_pattern,
    699                                       Label* label);
    700 
    701   // Insert one or more instructions into the instruction stream that encode
    702   // some caller-defined data. The instructions used will be executable with no
    703   // side effects.
    704   inline void InlineData(uint64_t data);
    705 
    706   // Insert an instrumentation enable marker into the instruction stream.
    707   inline void EnableInstrumentation();
    708 
    709   // Insert an instrumentation disable marker into the instruction stream.
    710   inline void DisableInstrumentation();
    711 
    712   // Insert an instrumentation event marker into the instruction stream. These
    713   // will be picked up by the instrumentation system to annotate an instruction
    714   // profile. The argument marker_name must be a printable two character string;
    715   // it will be encoded in the event marker.
    716   inline void AnnotateInstrumentation(const char* marker_name);
    717 
    718   // If emit_debug_code() is true, emit a run-time check to ensure that
    719   // StackPointer() does not point below the system stack pointer.
    720   //
    721   // Whilst it is architecturally legal for StackPointer() to point below csp,
    722   // it can be evidence of a potential bug because the ABI forbids accesses
    723   // below csp.
    724   //
    725   // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
    726   // enabled, then csp will be dereferenced to  cause the processor
    727   // (or simulator) to abort if it is not properly aligned.
    728   //
    729   // If emit_debug_code() is false, this emits no code.
    730   void AssertStackConsistency();
    731 
    732   // Preserve the callee-saved registers (as defined by AAPCS64).
    733   //
    734   // Higher-numbered registers are pushed before lower-numbered registers, and
    735   // thus get higher addresses.
    736   // Floating-point registers are pushed before general-purpose registers, and
    737   // thus get higher addresses.
    738   //
    739   // Note that registers are not checked for invalid values. Use this method
    740   // only if you know that the GC won't try to examine the values on the stack.
    741   //
    742   // This method must not be called unless the current stack pointer (as set by
    743   // SetStackPointer) is the system stack pointer (csp), and is aligned to
    744   // ActivationFrameAlignment().
    745   void PushCalleeSavedRegisters();
    746 
    747   // Restore the callee-saved registers (as defined by AAPCS64).
    748   //
    749   // Higher-numbered registers are popped after lower-numbered registers, and
    750   // thus come from higher addresses.
    751   // Floating-point registers are popped after general-purpose registers, and
    752   // thus come from higher addresses.
    753   //
    754   // This method must not be called unless the current stack pointer (as set by
    755   // SetStackPointer) is the system stack pointer (csp), and is aligned to
    756   // ActivationFrameAlignment().
    757   void PopCalleeSavedRegisters();
    758 
    759   // Set the current stack pointer, but don't generate any code.
    760   inline void SetStackPointer(const Register& stack_pointer) {
    761     ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
    762     sp_ = stack_pointer;
    763   }
    764 
    765   // Return the current stack pointer, as set by SetStackPointer.
    766   inline const Register& StackPointer() const {
    767     return sp_;
    768   }
    769 
    770   // Align csp for a frame, as per ActivationFrameAlignment, and make it the
    771   // current stack pointer.
    772   inline void AlignAndSetCSPForFrame() {
    773     int sp_alignment = ActivationFrameAlignment();
    774     // AAPCS64 mandates at least 16-byte alignment.
    775     ASSERT(sp_alignment >= 16);
    776     ASSERT(IsPowerOf2(sp_alignment));
    777     Bic(csp, StackPointer(), sp_alignment - 1);
    778     SetStackPointer(csp);
    779   }
    780 
    781   // Push the system stack pointer (csp) down to allow the same to be done to
    782   // the current stack pointer (according to StackPointer()). This must be
    783   // called _before_ accessing the memory.
    784   //
    785   // This is necessary when pushing or otherwise adding things to the stack, to
    786   // satisfy the AAPCS64 constraint that the memory below the system stack
    787   // pointer is not accessed.  The amount pushed will be increased as necessary
    788   // to ensure csp remains aligned to 16 bytes.
    789   //
    790   // This method asserts that StackPointer() is not csp, since the call does
    791   // not make sense in that context.
    792   inline void BumpSystemStackPointer(const Operand& space);
    793 
    794   // Re-synchronizes the system stack pointer (csp) with the current stack
    795   // pointer (according to StackPointer()).  This function will ensure the
    796   // new value of the system stack pointer is remains aligned to 16 bytes, and
    797   // is lower than or equal to the value of the current stack pointer.
    798   //
    799   // This method asserts that StackPointer() is not csp, since the call does
    800   // not make sense in that context.
    801   inline void SyncSystemStackPointer();
    802 
    803   // Helpers ------------------------------------------------------------------
    804   // Root register.
    805   inline void InitializeRootRegister();
    806 
    807   void AssertFPCRState(Register fpcr = NoReg);
    808   void ConfigureFPCR();
    809   void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
    810   void CanonicalizeNaN(const FPRegister& reg) {
    811     CanonicalizeNaN(reg, reg);
    812   }
    813 
    814   // Load an object from the root table.
    815   void LoadRoot(CPURegister destination,
    816                 Heap::RootListIndex index);
    817   // Store an object to the root table.
    818   void StoreRoot(Register source,
    819                  Heap::RootListIndex index);
    820 
    821   // Load both TrueValue and FalseValue roots.
    822   void LoadTrueFalseRoots(Register true_root, Register false_root);
    823 
    824   void LoadHeapObject(Register dst, Handle<HeapObject> object);
    825 
    826   void LoadObject(Register result, Handle<Object> object) {
    827     AllowDeferredHandleDereference heap_object_check;
    828     if (object->IsHeapObject()) {
    829       LoadHeapObject(result, Handle<HeapObject>::cast(object));
    830     } else {
    831       ASSERT(object->IsSmi());
    832       Mov(result, Operand(object));
    833     }
    834   }
    835 
    836   static int SafepointRegisterStackIndex(int reg_code);
    837 
    838   // This is required for compatibility with architecture independant code.
    839   // Remove if not needed.
    840   inline void Move(Register dst, Register src) { Mov(dst, src); }
    841 
    842   void LoadInstanceDescriptors(Register map,
    843                                Register descriptors);
    844   void EnumLengthUntagged(Register dst, Register map);
    845   void EnumLengthSmi(Register dst, Register map);
    846   void NumberOfOwnDescriptors(Register dst, Register map);
    847 
    848   template<typename Field>
    849   void DecodeField(Register dst, Register src) {
    850     static const uint64_t shift = Field::kShift;
    851     static const uint64_t setbits = CountSetBits(Field::kMask, 32);
    852     Ubfx(dst, src, shift, setbits);
    853   }
    854 
    855   template<typename Field>
    856   void DecodeField(Register reg) {
    857     DecodeField<Field>(reg, reg);
    858   }
    859 
    860   // ---- SMI and Number Utilities ----
    861 
    862   inline void SmiTag(Register dst, Register src);
    863   inline void SmiTag(Register smi);
    864   inline void SmiUntag(Register dst, Register src);
    865   inline void SmiUntag(Register smi);
    866   inline void SmiUntagToDouble(FPRegister dst,
    867                                Register src,
    868                                UntagMode mode = kNotSpeculativeUntag);
    869   inline void SmiUntagToFloat(FPRegister dst,
    870                               Register src,
    871                               UntagMode mode = kNotSpeculativeUntag);
    872 
    873   // Tag and push in one step.
    874   inline void SmiTagAndPush(Register src);
    875   inline void SmiTagAndPush(Register src1, Register src2);
    876 
    877   // Compute the absolute value of 'smi' and leave the result in 'smi'
    878   // register. If 'smi' is the most negative SMI, the absolute value cannot
    879   // be represented as a SMI and a jump to 'slow' is done.
    880   void SmiAbs(const Register& smi, Label* slow);
    881 
    882   inline void JumpIfSmi(Register value,
    883                         Label* smi_label,
    884                         Label* not_smi_label = NULL);
    885   inline void JumpIfNotSmi(Register value, Label* not_smi_label);
    886   inline void JumpIfBothSmi(Register value1,
    887                             Register value2,
    888                             Label* both_smi_label,
    889                             Label* not_smi_label = NULL);
    890   inline void JumpIfEitherSmi(Register value1,
    891                               Register value2,
    892                               Label* either_smi_label,
    893                               Label* not_smi_label = NULL);
    894   inline void JumpIfEitherNotSmi(Register value1,
    895                                  Register value2,
    896                                  Label* not_smi_label);
    897   inline void JumpIfBothNotSmi(Register value1,
    898                                Register value2,
    899                                Label* not_smi_label);
    900 
    901   // Abort execution if argument is a smi, enabled via --debug-code.
    902   void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
    903   void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
    904 
    905   inline void ObjectTag(Register tagged_obj, Register obj);
    906   inline void ObjectUntag(Register untagged_obj, Register obj);
    907 
    908   // Abort execution if argument is not a name, enabled via --debug-code.
    909   void AssertName(Register object);
    910 
    911   // Abort execution if argument is not undefined or an AllocationSite, enabled
    912   // via --debug-code.
    913   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
    914 
    915   // Abort execution if argument is not a string, enabled via --debug-code.
    916   void AssertString(Register object);
    917 
    918   void JumpForHeapNumber(Register object,
    919                          Register heap_number_map,
    920                          Label* on_heap_number,
    921                          Label* on_not_heap_number = NULL);
    922   void JumpIfHeapNumber(Register object,
    923                         Label* on_heap_number,
    924                         Register heap_number_map = NoReg);
    925   void JumpIfNotHeapNumber(Register object,
    926                            Label* on_not_heap_number,
    927                            Register heap_number_map = NoReg);
    928 
    929   // Sets the vs flag if the input is -0.0.
    930   void TestForMinusZero(DoubleRegister input);
    931 
    932   // Jump to label if the input double register contains -0.0.
    933   void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
    934 
    935   // Jump to label if the input integer register contains the double precision
    936   // floating point representation of -0.0.
    937   void JumpIfMinusZero(Register input, Label* on_negative_zero);
    938 
    939   // Generate code to do a lookup in the number string cache. If the number in
    940   // the register object is found in the cache the generated code falls through
    941   // with the result in the result register. The object and the result register
    942   // can be the same. If the number is not found in the cache the code jumps to
    943   // the label not_found with only the content of register object unchanged.
    944   void LookupNumberStringCache(Register object,
    945                                Register result,
    946                                Register scratch1,
    947                                Register scratch2,
    948                                Register scratch3,
    949                                Label* not_found);
    950 
    951   // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
    952   // output.
    953   void ClampInt32ToUint8(Register in_out);
    954   void ClampInt32ToUint8(Register output, Register input);
    955 
    956   // Saturate a double in input to an unsigned 8-bit integer in output.
    957   void ClampDoubleToUint8(Register output,
    958                           DoubleRegister input,
    959                           DoubleRegister dbl_scratch);
    960 
    961   // Try to represent a double as a signed 32-bit int.
    962   // This succeeds if the result compares equal to the input, so inputs of -0.0
    963   // are represented as 0 and handled as a success.
    964   //
    965   // On output the Z flag is set if the operation was successful.
    966   void TryRepresentDoubleAsInt32(Register as_int,
    967                                  FPRegister value,
    968                                  FPRegister scratch_d,
    969                                  Label* on_successful_conversion = NULL,
    970                                  Label* on_failed_conversion = NULL) {
    971     ASSERT(as_int.Is32Bits());
    972     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
    973                             on_failed_conversion);
    974   }
    975 
    976   // Try to represent a double as a signed 64-bit int.
    977   // This succeeds if the result compares equal to the input, so inputs of -0.0
    978   // are represented as 0 and handled as a success.
    979   //
    980   // On output the Z flag is set if the operation was successful.
    981   void TryRepresentDoubleAsInt64(Register as_int,
    982                                  FPRegister value,
    983                                  FPRegister scratch_d,
    984                                  Label* on_successful_conversion = NULL,
    985                                  Label* on_failed_conversion = NULL) {
    986     ASSERT(as_int.Is64Bits());
    987     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
    988                             on_failed_conversion);
    989   }
    990 
    991   // ---- Object Utilities ----
    992 
    993   // Copy fields from 'src' to 'dst', where both are tagged objects.
    994   // The 'temps' list is a list of X registers which can be used for scratch
    995   // values. The temps list must include at least one register.
    996   //
    997   // Currently, CopyFields cannot make use of more than three registers from
    998   // the 'temps' list.
    999   //
   1000   // CopyFields expects to be able to take at least two registers from
   1001   // MacroAssembler::TmpList().
   1002   void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
   1003 
   1004   // Starting at address in dst, initialize field_count 64-bit fields with
   1005   // 64-bit value in register filler. Register dst is corrupted.
   1006   void FillFields(Register dst,
   1007                   Register field_count,
   1008                   Register filler);
   1009 
   1010   // Copies a number of bytes from src to dst. All passed registers are
   1011   // clobbered. On exit src and dst will point to the place just after where the
   1012   // last byte was read or written and length will be zero. Hint may be used to
   1013   // determine which is the most efficient algorithm to use for copying.
   1014   void CopyBytes(Register dst,
   1015                  Register src,
   1016                  Register length,
   1017                  Register scratch,
   1018                  CopyHint hint = kCopyUnknown);
   1019 
   1020   // ---- String Utilities ----
   1021 
   1022 
   1023   // Jump to label if either object is not a sequential ASCII string.
   1024   // Optionally perform a smi check on the objects first.
   1025   void JumpIfEitherIsNotSequentialAsciiStrings(
   1026       Register first,
   1027       Register second,
   1028       Register scratch1,
   1029       Register scratch2,
   1030       Label* failure,
   1031       SmiCheckType smi_check = DO_SMI_CHECK);
   1032 
   1033   // Check if instance type is sequential ASCII string and jump to label if
   1034   // it is not.
   1035   void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
   1036                                               Register scratch,
   1037                                               Label* failure);
   1038 
   1039   // Checks if both instance types are sequential ASCII strings and jumps to
   1040   // label if either is not.
   1041   void JumpIfEitherInstanceTypeIsNotSequentialAscii(
   1042       Register first_object_instance_type,
   1043       Register second_object_instance_type,
   1044       Register scratch1,
   1045       Register scratch2,
   1046       Label* failure);
   1047 
   1048   // Checks if both instance types are sequential ASCII strings and jumps to
   1049   // label if either is not.
   1050   void JumpIfBothInstanceTypesAreNotSequentialAscii(
   1051       Register first_object_instance_type,
   1052       Register second_object_instance_type,
   1053       Register scratch1,
   1054       Register scratch2,
   1055       Label* failure);
   1056 
   1057   void JumpIfNotUniqueName(Register type, Label* not_unique_name);
   1058 
   1059   // ---- Calling / Jumping helpers ----
   1060 
   1061   // This is required for compatibility in architecture indepenedant code.
   1062   inline void jmp(Label* L) { B(L); }
   1063 
   1064   // Passes thrown value to the handler of top of the try handler chain.
   1065   // Register value must be x0.
   1066   void Throw(Register value,
   1067              Register scratch1,
   1068              Register scratch2,
   1069              Register scratch3,
   1070              Register scratch4);
   1071 
   1072   // Propagates an uncatchable exception to the top of the current JS stack's
   1073   // handler chain. Register value must be x0.
   1074   void ThrowUncatchable(Register value,
   1075                         Register scratch1,
   1076                         Register scratch2,
   1077                         Register scratch3,
   1078                         Register scratch4);
   1079 
   1080   void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
   1081   void TailCallStub(CodeStub* stub);
   1082 
   1083   void CallRuntime(const Runtime::Function* f,
   1084                    int num_arguments,
   1085                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
   1086 
   1087   void CallRuntime(Runtime::FunctionId id,
   1088                    int num_arguments,
   1089                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
   1090     CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
   1091   }
   1092 
   1093   void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   1094     const Runtime::Function* function = Runtime::FunctionForId(id);
   1095     CallRuntime(function, function->nargs, kSaveFPRegs);
   1096   }
   1097 
   1098   void TailCallRuntime(Runtime::FunctionId fid,
   1099                        int num_arguments,
   1100                        int result_size);
   1101 
   1102   int ActivationFrameAlignment();
   1103 
   1104   // Calls a C function.
   1105   // The called function is not allowed to trigger a
   1106   // garbage collection, since that might move the code and invalidate the
   1107   // return address (unless this is somehow accounted for by the called
   1108   // function).
   1109   void CallCFunction(ExternalReference function,
   1110                      int num_reg_arguments);
   1111   void CallCFunction(ExternalReference function,
   1112                      int num_reg_arguments,
   1113                      int num_double_arguments);
   1114   void CallCFunction(Register function,
   1115                      int num_reg_arguments,
   1116                      int num_double_arguments);
   1117 
   1118   // Calls an API function. Allocates HandleScope, extracts returned value
   1119   // from handle and propagates exceptions.
   1120   // 'stack_space' is the space to be unwound on exit (includes the call JS
   1121   // arguments space and the additional space allocated for the fast call).
   1122   // 'spill_offset' is the offset from the stack pointer where
   1123   // CallApiFunctionAndReturn can spill registers.
   1124   void CallApiFunctionAndReturn(Register function_address,
   1125                                 ExternalReference thunk_ref,
   1126                                 int stack_space,
   1127                                 int spill_offset,
   1128                                 MemOperand return_value_operand,
   1129                                 MemOperand* context_restore_operand);
   1130 
   1131   // The number of register that CallApiFunctionAndReturn will need to save on
   1132   // the stack. The space for these registers need to be allocated in the
   1133   // ExitFrame before calling CallApiFunctionAndReturn.
   1134   static const int kCallApiFunctionSpillSpace = 4;
   1135 
   1136   // Jump to a runtime routine.
   1137   void JumpToExternalReference(const ExternalReference& builtin);
   1138   // Tail call of a runtime routine (jump).
   1139   // Like JumpToExternalReference, but also takes care of passing the number
   1140   // of parameters.
   1141   void TailCallExternalReference(const ExternalReference& ext,
   1142                                  int num_arguments,
   1143                                  int result_size);
   1144   void CallExternalReference(const ExternalReference& ext,
   1145                              int num_arguments);
   1146 
   1147 
   1148   // Invoke specified builtin JavaScript function. Adds an entry to
   1149   // the unresolved list if the name does not resolve.
   1150   void InvokeBuiltin(Builtins::JavaScript id,
   1151                      InvokeFlag flag,
   1152                      const CallWrapper& call_wrapper = NullCallWrapper());
   1153 
   1154   // Store the code object for the given builtin in the target register and
   1155   // setup the function in the function register.
   1156   void GetBuiltinEntry(Register target,
   1157                        Register function,
   1158                        Builtins::JavaScript id);
   1159 
   1160   // Store the function for the given builtin in the target register.
   1161   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
   1162 
   1163   void Jump(Register target);
   1164   void Jump(Address target, RelocInfo::Mode rmode);
   1165   void Jump(Handle<Code> code, RelocInfo::Mode rmode);
   1166   void Jump(intptr_t target, RelocInfo::Mode rmode);
   1167 
   1168   void Call(Register target);
   1169   void Call(Label* target);
   1170   void Call(Address target, RelocInfo::Mode rmode);
   1171   void Call(Handle<Code> code,
   1172             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
   1173             TypeFeedbackId ast_id = TypeFeedbackId::None());
   1174 
   1175   // For every Call variant, there is a matching CallSize function that returns
   1176   // the size (in bytes) of the call sequence.
   1177   static int CallSize(Register target);
   1178   static int CallSize(Label* target);
   1179   static int CallSize(Address target, RelocInfo::Mode rmode);
   1180   static int CallSize(Handle<Code> code,
   1181                       RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
   1182                       TypeFeedbackId ast_id = TypeFeedbackId::None());
   1183 
   1184   // Registers used through the invocation chain are hard-coded.
   1185   // We force passing the parameters to ensure the contracts are correctly
   1186   // honoured by the caller.
   1187   // 'function' must be x1.
   1188   // 'actual' must use an immediate or x0.
   1189   // 'expected' must use an immediate or x2.
   1190   // 'call_kind' must be x5.
   1191   void InvokePrologue(const ParameterCount& expected,
   1192                       const ParameterCount& actual,
   1193                       Handle<Code> code_constant,
   1194                       Register code_reg,
   1195                       Label* done,
   1196                       InvokeFlag flag,
   1197                       bool* definitely_mismatches,
   1198                       const CallWrapper& call_wrapper);
   1199   void InvokeCode(Register code,
   1200                   const ParameterCount& expected,
   1201                   const ParameterCount& actual,
   1202                   InvokeFlag flag,
   1203                   const CallWrapper& call_wrapper);
   1204   // Invoke the JavaScript function in the given register.
   1205   // Changes the current context to the context in the function before invoking.
   1206   void InvokeFunction(Register function,
   1207                       const ParameterCount& actual,
   1208                       InvokeFlag flag,
   1209                       const CallWrapper& call_wrapper);
   1210   void InvokeFunction(Register function,
   1211                       const ParameterCount& expected,
   1212                       const ParameterCount& actual,
   1213                       InvokeFlag flag,
   1214                       const CallWrapper& call_wrapper);
   1215   void InvokeFunction(Handle<JSFunction> function,
   1216                       const ParameterCount& expected,
   1217                       const ParameterCount& actual,
   1218                       InvokeFlag flag,
   1219                       const CallWrapper& call_wrapper);
   1220 
   1221 
   1222   // ---- Floating point helpers ----
   1223 
   1224   // Perform a conversion from a double to a signed int64. If the input fits in
   1225   // range of the 64-bit result, execution branches to done. Otherwise,
   1226   // execution falls through, and the sign of the result can be used to
   1227   // determine if overflow was towards positive or negative infinity.
   1228   //
   1229   // On successful conversion, the least significant 32 bits of the result are
   1230   // equivalent to the ECMA-262 operation "ToInt32".
   1231   //
   1232   // Only public for the test code in test-code-stubs-arm64.cc.
   1233   void TryConvertDoubleToInt64(Register result,
   1234                                DoubleRegister input,
   1235                                Label* done);
   1236 
   1237   // Performs a truncating conversion of a floating point number as used by
   1238   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
   1239   // Exits with 'result' holding the answer.
   1240   void TruncateDoubleToI(Register result, DoubleRegister double_input);
   1241 
   1242   // Performs a truncating conversion of a heap number as used by
   1243   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
   1244   // must be different registers.  Exits with 'result' holding the answer.
   1245   void TruncateHeapNumberToI(Register result, Register object);
   1246 
   1247   // Converts the smi or heap number in object to an int32 using the rules
   1248   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
   1249   // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
   1250   // different registers.
   1251   void TruncateNumberToI(Register object,
   1252                          Register result,
   1253                          Register heap_number_map,
   1254                          Label* not_int32);
   1255 
   1256   // ---- Code generation helpers ----
   1257 
   1258   void set_generating_stub(bool value) { generating_stub_ = value; }
   1259   bool generating_stub() const { return generating_stub_; }
   1260 #if DEBUG
   1261   void set_allow_macro_instructions(bool value) {
   1262     allow_macro_instructions_ = value;
   1263   }
   1264   bool allow_macro_instructions() const { return allow_macro_instructions_; }
   1265 #endif
   1266   bool use_real_aborts() const { return use_real_aborts_; }
   1267   void set_has_frame(bool value) { has_frame_ = value; }
   1268   bool has_frame() const { return has_frame_; }
   1269   bool AllowThisStubCall(CodeStub* stub);
   1270 
   1271   class NoUseRealAbortsScope {
   1272    public:
   1273     explicit NoUseRealAbortsScope(MacroAssembler* masm) :
   1274         saved_(masm->use_real_aborts_), masm_(masm) {
   1275       masm_->use_real_aborts_ = false;
   1276     }
   1277     ~NoUseRealAbortsScope() {
   1278       masm_->use_real_aborts_ = saved_;
   1279     }
   1280    private:
   1281     bool saved_;
   1282     MacroAssembler* masm_;
   1283   };
   1284 
   1285   // ---------------------------------------------------------------------------
   1286   // Debugger Support
   1287 
   1288   void DebugBreak();
   1289 
   1290   // ---------------------------------------------------------------------------
   1291   // Exception handling
   1292 
   1293   // Push a new try handler and link into try handler chain.
   1294   void PushTryHandler(StackHandler::Kind kind, int handler_index);
   1295 
   1296   // Unlink the stack handler on top of the stack from the try handler chain.
   1297   // Must preserve the result register.
   1298   void PopTryHandler();
   1299 
   1300 
   1301   // ---------------------------------------------------------------------------
   1302   // Allocation support
   1303 
   1304   // Allocate an object in new space or old pointer space. The object_size is
   1305   // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
   1306   // is passed. The allocated object is returned in result.
   1307   //
   1308   // If the new space is exhausted control continues at the gc_required label.
   1309   // In this case, the result and scratch registers may still be clobbered.
   1310   // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
   1311   void Allocate(Register object_size,
   1312                 Register result,
   1313                 Register scratch1,
   1314                 Register scratch2,
   1315                 Label* gc_required,
   1316                 AllocationFlags flags);
   1317 
   1318   void Allocate(int object_size,
   1319                 Register result,
   1320                 Register scratch1,
   1321                 Register scratch2,
   1322                 Label* gc_required,
   1323                 AllocationFlags flags);
   1324 
   1325   // Undo allocation in new space. The object passed and objects allocated after
   1326   // it will no longer be allocated. The caller must make sure that no pointers
   1327   // are left to the object(s) no longer allocated as they would be invalid when
   1328   // allocation is undone.
   1329   void UndoAllocationInNewSpace(Register object, Register scratch);
   1330 
   1331   void AllocateTwoByteString(Register result,
   1332                              Register length,
   1333                              Register scratch1,
   1334                              Register scratch2,
   1335                              Register scratch3,
   1336                              Label* gc_required);
   1337   void AllocateAsciiString(Register result,
   1338                            Register length,
   1339                            Register scratch1,
   1340                            Register scratch2,
   1341                            Register scratch3,
   1342                            Label* gc_required);
   1343   void AllocateTwoByteConsString(Register result,
   1344                                  Register length,
   1345                                  Register scratch1,
   1346                                  Register scratch2,
   1347                                  Label* gc_required);
   1348   void AllocateAsciiConsString(Register result,
   1349                                Register length,
   1350                                Register scratch1,
   1351                                Register scratch2,
   1352                                Label* gc_required);
   1353   void AllocateTwoByteSlicedString(Register result,
   1354                                    Register length,
   1355                                    Register scratch1,
   1356                                    Register scratch2,
   1357                                    Label* gc_required);
   1358   void AllocateAsciiSlicedString(Register result,
   1359                                  Register length,
   1360                                  Register scratch1,
   1361                                  Register scratch2,
   1362                                  Label* gc_required);
   1363 
   1364   // Allocates a heap number or jumps to the gc_required label if the young
   1365   // space is full and a scavenge is needed.
   1366   // All registers are clobbered.
   1367   // If no heap_number_map register is provided, the function will take care of
   1368   // loading it.
   1369   void AllocateHeapNumber(Register result,
   1370                           Label* gc_required,
   1371                           Register scratch1,
   1372                           Register scratch2,
   1373                           CPURegister value = NoFPReg,
   1374                           CPURegister heap_number_map = NoReg);
   1375 
   1376   // ---------------------------------------------------------------------------
   1377   // Support functions.
   1378 
   1379   // Try to get function prototype of a function and puts the value in the
   1380   // result register. Checks that the function really is a function and jumps
   1381   // to the miss label if the fast checks fail. The function register will be
   1382   // untouched; the other registers may be clobbered.
   1383   enum BoundFunctionAction {
   1384     kMissOnBoundFunction,
   1385     kDontMissOnBoundFunction
   1386   };
   1387 
   1388   void TryGetFunctionPrototype(Register function,
   1389                                Register result,
   1390                                Register scratch,
   1391                                Label* miss,
   1392                                BoundFunctionAction action =
   1393                                  kDontMissOnBoundFunction);
   1394 
   1395   // Compare object type for heap object.  heap_object contains a non-Smi
   1396   // whose object type should be compared with the given type.  This both
   1397   // sets the flags and leaves the object type in the type_reg register.
   1398   // It leaves the map in the map register (unless the type_reg and map register
   1399   // are the same register).  It leaves the heap object in the heap_object
   1400   // register unless the heap_object register is the same register as one of the
   1401   // other registers.
   1402   void CompareObjectType(Register heap_object,
   1403                          Register map,
   1404                          Register type_reg,
   1405                          InstanceType type);
   1406 
   1407 
   1408   // Compare object type for heap object, and branch if equal (or not.)
   1409   // heap_object contains a non-Smi whose object type should be compared with
   1410   // the given type.  This both sets the flags and leaves the object type in
   1411   // the type_reg register. It leaves the map in the map register (unless the
   1412   // type_reg and map register are the same register).  It leaves the heap
   1413   // object in the heap_object register unless the heap_object register is the
   1414   // same register as one of the other registers.
   1415   void JumpIfObjectType(Register object,
   1416                         Register map,
   1417                         Register type_reg,
   1418                         InstanceType type,
   1419                         Label* if_cond_pass,
   1420                         Condition cond = eq);
   1421 
   1422   void JumpIfNotObjectType(Register object,
   1423                            Register map,
   1424                            Register type_reg,
   1425                            InstanceType type,
   1426                            Label* if_not_object);
   1427 
   1428   // Compare instance type in a map.  map contains a valid map object whose
   1429   // object type should be compared with the given type.  This both
   1430   // sets the flags and leaves the object type in the type_reg register.
   1431   void CompareInstanceType(Register map,
   1432                            Register type_reg,
   1433                            InstanceType type);
   1434 
   1435   // Compare an object's map with the specified map. Condition flags are set
   1436   // with result of map compare.
   1437   void CompareMap(Register obj,
   1438                   Register scratch,
   1439                   Handle<Map> map);
   1440 
   1441   // As above, but the map of the object is already loaded into the register
   1442   // which is preserved by the code generated.
   1443   void CompareMap(Register obj_map,
   1444                   Handle<Map> map);
   1445 
   1446   // Check if the map of an object is equal to a specified map and branch to
   1447   // label if not. Skip the smi check if not required (object is known to be a
   1448   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
   1449   // against maps that are ElementsKind transition maps of the specified map.
   1450   void CheckMap(Register obj,
   1451                 Register scratch,
   1452                 Handle<Map> map,
   1453                 Label* fail,
   1454                 SmiCheckType smi_check_type);
   1455 
   1456 
   1457   void CheckMap(Register obj,
   1458                 Register scratch,
   1459                 Heap::RootListIndex index,
   1460                 Label* fail,
   1461                 SmiCheckType smi_check_type);
   1462 
   1463   // As above, but the map of the object is already loaded into obj_map, and is
   1464   // preserved.
   1465   void CheckMap(Register obj_map,
   1466                 Handle<Map> map,
   1467                 Label* fail,
   1468                 SmiCheckType smi_check_type);
   1469 
   1470   // Check if the map of an object is equal to a specified map and branch to a
   1471   // specified target if equal. Skip the smi check if not required (object is
   1472   // known to be a heap object)
   1473   void DispatchMap(Register obj,
   1474                    Register scratch,
   1475                    Handle<Map> map,
   1476                    Handle<Code> success,
   1477                    SmiCheckType smi_check_type);
   1478 
   1479   // Test the bitfield of the heap object map with mask and set the condition
   1480   // flags. The object register is preserved.
   1481   void TestMapBitfield(Register object, uint64_t mask);
   1482 
   1483   // Load the elements kind field from a map, and return it in the result
   1484   // register.
   1485   void LoadElementsKindFromMap(Register result, Register map);
   1486 
   1487   // Compare the object in a register to a value from the root list.
   1488   void CompareRoot(const Register& obj, Heap::RootListIndex index);
   1489 
   1490   // Compare the object in a register to a value and jump if they are equal.
   1491   void JumpIfRoot(const Register& obj,
   1492                   Heap::RootListIndex index,
   1493                   Label* if_equal);
   1494 
   1495   // Compare the object in a register to a value and jump if they are not equal.
   1496   void JumpIfNotRoot(const Register& obj,
   1497                      Heap::RootListIndex index,
   1498                      Label* if_not_equal);
   1499 
   1500   // Load and check the instance type of an object for being a unique name.
   1501   // Loads the type into the second argument register.
   1502   // The object and type arguments can be the same register; in that case it
   1503   // will be overwritten with the type.
   1504   // Fall-through if the object was a string and jump on fail otherwise.
   1505   inline void IsObjectNameType(Register object, Register type, Label* fail);
   1506 
   1507   inline void IsObjectJSObjectType(Register heap_object,
   1508                                    Register map,
   1509                                    Register scratch,
   1510                                    Label* fail);
   1511 
   1512   // Check the instance type in the given map to see if it corresponds to a
   1513   // JS object type. Jump to the fail label if this is not the case and fall
   1514   // through otherwise. However if fail label is NULL, no branch will be
   1515   // performed and the flag will be updated. You can test the flag for "le"
   1516   // condition to test if it is a valid JS object type.
   1517   inline void IsInstanceJSObjectType(Register map,
   1518                                      Register scratch,
   1519                                      Label* fail);
   1520 
   1521   // Load and check the instance type of an object for being a string.
   1522   // Loads the type into the second argument register.
   1523   // The object and type arguments can be the same register; in that case it
   1524   // will be overwritten with the type.
   1525   // Jumps to not_string or string appropriate. If the appropriate label is
   1526   // NULL, fall through.
   1527   inline void IsObjectJSStringType(Register object, Register type,
   1528                                    Label* not_string, Label* string = NULL);
   1529 
   1530   // Compare the contents of a register with an operand, and branch to true,
   1531   // false or fall through, depending on condition.
   1532   void CompareAndSplit(const Register& lhs,
   1533                        const Operand& rhs,
   1534                        Condition cond,
   1535                        Label* if_true,
   1536                        Label* if_false,
   1537                        Label* fall_through);
   1538 
   1539   // Test the bits of register defined by bit_pattern, and branch to
   1540   // if_any_set, if_all_clear or fall_through accordingly.
   1541   void TestAndSplit(const Register& reg,
   1542                     uint64_t bit_pattern,
   1543                     Label* if_all_clear,
   1544                     Label* if_any_set,
   1545                     Label* fall_through);
   1546 
   1547   // Check if a map for a JSObject indicates that the object has fast elements.
   1548   // Jump to the specified label if it does not.
   1549   void CheckFastElements(Register map, Register scratch, Label* fail);
   1550 
   1551   // Check if a map for a JSObject indicates that the object can have both smi
   1552   // and HeapObject elements.  Jump to the specified label if it does not.
   1553   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
   1554 
   1555   // Check to see if number can be stored as a double in FastDoubleElements.
   1556   // If it can, store it at the index specified by key_reg in the array,
   1557   // otherwise jump to fail.
   1558   void StoreNumberToDoubleElements(Register value_reg,
   1559                                    Register key_reg,
   1560                                    Register elements_reg,
   1561                                    Register scratch1,
   1562                                    FPRegister fpscratch1,
   1563                                    Label* fail,
   1564                                    int elements_offset = 0);
   1565 
   1566   // Picks out an array index from the hash field.
   1567   // Register use:
   1568   //   hash - holds the index's hash. Clobbered.
   1569   //   index - holds the overwritten index on exit.
   1570   void IndexFromHash(Register hash, Register index);
   1571 
   1572   // ---------------------------------------------------------------------------
   1573   // Inline caching support.
   1574 
   1575   void EmitSeqStringSetCharCheck(Register string,
   1576                                  Register index,
   1577                                  SeqStringSetCharCheckIndexType index_type,
   1578                                  Register scratch,
   1579                                  uint32_t encoding_mask);
   1580 
   1581   // Generate code for checking access rights - used for security checks
   1582   // on access to global objects across environments. The holder register
   1583   // is left untouched, whereas both scratch registers are clobbered.
   1584   void CheckAccessGlobalProxy(Register holder_reg,
   1585                               Register scratch1,
   1586                               Register scratch2,
   1587                               Label* miss);
   1588 
   1589   // Hash the interger value in 'key' register.
   1590   // It uses the same algorithm as ComputeIntegerHash in utils.h.
   1591   void GetNumberHash(Register key, Register scratch);
   1592 
   1593   // Load value from the dictionary.
   1594   //
   1595   // elements - holds the slow-case elements of the receiver on entry.
   1596   //            Unchanged unless 'result' is the same register.
   1597   //
   1598   // key      - holds the smi key on entry.
   1599   //            Unchanged unless 'result' is the same register.
   1600   //
   1601   // result   - holds the result on exit if the load succeeded.
   1602   //            Allowed to be the same as 'key' or 'result'.
   1603   //            Unchanged on bailout so 'key' or 'result' can be used
   1604   //            in further computation.
   1605   void LoadFromNumberDictionary(Label* miss,
   1606                                 Register elements,
   1607                                 Register key,
   1608                                 Register result,
   1609                                 Register scratch0,
   1610                                 Register scratch1,
   1611                                 Register scratch2,
   1612                                 Register scratch3);
   1613 
   1614   // ---------------------------------------------------------------------------
   1615   // Frames.
   1616 
   1617   // Activation support.
   1618   void EnterFrame(StackFrame::Type type);
   1619   void LeaveFrame(StackFrame::Type type);
   1620 
   1621   // Returns map with validated enum cache in object register.
   1622   void CheckEnumCache(Register object,
   1623                       Register null_value,
   1624                       Register scratch0,
   1625                       Register scratch1,
   1626                       Register scratch2,
   1627                       Register scratch3,
   1628                       Label* call_runtime);
   1629 
   1630   // AllocationMemento support. Arrays may have an associated
   1631   // AllocationMemento object that can be checked for in order to pretransition
   1632   // to another type.
   1633   // On entry, receiver should point to the array object.
   1634   // If allocation info is present, the Z flag is set (so that the eq
   1635   // condition will pass).
   1636   void TestJSArrayForAllocationMemento(Register receiver,
   1637                                        Register scratch1,
   1638                                        Register scratch2,
   1639                                        Label* no_memento_found);
   1640 
   1641   void JumpIfJSArrayHasAllocationMemento(Register receiver,
   1642                                          Register scratch1,
   1643                                          Register scratch2,
   1644                                          Label* memento_found) {
   1645     Label no_memento_found;
   1646     TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
   1647                                     &no_memento_found);
   1648     B(eq, memento_found);
   1649     Bind(&no_memento_found);
   1650   }
   1651 
   1652   // The stack pointer has to switch between csp and jssp when setting up and
   1653   // destroying the exit frame. Hence preserving/restoring the registers is
   1654   // slightly more complicated than simple push/pop operations.
   1655   void ExitFramePreserveFPRegs();
   1656   void ExitFrameRestoreFPRegs();
   1657 
   1658   // Generates function and stub prologue code.
   1659   void StubPrologue();
   1660   void Prologue(bool code_pre_aging);
   1661 
   1662   // Enter exit frame. Exit frames are used when calling C code from generated
   1663   // (JavaScript) code.
   1664   //
   1665   // The stack pointer must be jssp on entry, and will be set to csp by this
   1666   // function. The frame pointer is also configured, but the only other
   1667   // registers modified by this function are the provided scratch register, and
   1668   // jssp.
   1669   //
   1670   // The 'extra_space' argument can be used to allocate some space in the exit
   1671   // frame that will be ignored by the GC. This space will be reserved in the
   1672   // bottom of the frame immediately above the return address slot.
   1673   //
   1674   // Set up a stack frame and registers as follows:
   1675   //         fp[8]: CallerPC (lr)
   1676   //   fp -> fp[0]: CallerFP (old fp)
   1677   //         fp[-8]: SPOffset (new csp)
   1678   //         fp[-16]: CodeObject()
   1679   //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
   1680   //         csp[8]: Memory reserved for the caller if extra_space != 0.
   1681   //                 Alignment padding, if necessary.
   1682   //  csp -> csp[0]: Space reserved for the return address.
   1683   //
   1684   // This function also stores the new frame information in the top frame, so
   1685   // that the new frame becomes the current frame.
   1686   void EnterExitFrame(bool save_doubles,
   1687                       const Register& scratch,
   1688                       int extra_space = 0);
   1689 
   1690   // Leave the current exit frame, after a C function has returned to generated
   1691   // (JavaScript) code.
   1692   //
   1693   // This effectively unwinds the operation of EnterExitFrame:
   1694   //  * Preserved doubles are restored (if restore_doubles is true).
   1695   //  * The frame information is removed from the top frame.
   1696   //  * The exit frame is dropped.
   1697   //  * The stack pointer is reset to jssp.
   1698   //
   1699   // The stack pointer must be csp on entry.
   1700   void LeaveExitFrame(bool save_doubles,
   1701                       const Register& scratch,
   1702                       bool restore_context);
   1703 
   1704   void LoadContext(Register dst, int context_chain_length);
   1705 
   1706   // Emit code for a truncating division by a constant. The dividend register is
   1707   // unchanged. Dividend and result must be different.
   1708   void TruncatingDiv(Register result, Register dividend, int32_t divisor);
   1709 
   1710   // ---------------------------------------------------------------------------
   1711   // StatsCounter support
   1712 
   1713   void SetCounter(StatsCounter* counter, int value, Register scratch1,
   1714                   Register scratch2);
   1715   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
   1716                         Register scratch2);
   1717   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
   1718                         Register scratch2);
   1719 
   1720   // ---------------------------------------------------------------------------
   1721   // Garbage collector support (GC).
   1722 
   1723   enum RememberedSetFinalAction {
   1724     kReturnAtEnd,
   1725     kFallThroughAtEnd
   1726   };
   1727 
   1728   // Record in the remembered set the fact that we have a pointer to new space
   1729   // at the address pointed to by the addr register. Only works if addr is not
   1730   // in new space.
   1731   void RememberedSetHelper(Register object,  // Used for debug code.
   1732                            Register addr,
   1733                            Register scratch1,
   1734                            SaveFPRegsMode save_fp,
   1735                            RememberedSetFinalAction and_then);
   1736 
   1737   // Push and pop the registers that can hold pointers, as defined by the
   1738   // RegList constant kSafepointSavedRegisters.
   1739   void PushSafepointRegisters();
   1740   void PopSafepointRegisters();
   1741 
   1742   void PushSafepointRegistersAndDoubles();
   1743   void PopSafepointRegistersAndDoubles();
   1744 
   1745   // Store value in register src in the safepoint stack slot for register dst.
   1746   void StoreToSafepointRegisterSlot(Register src, Register dst) {
   1747     Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
   1748   }
   1749 
   1750   // Load the value of the src register from its safepoint stack slot
   1751   // into register dst.
   1752   void LoadFromSafepointRegisterSlot(Register dst, Register src) {
   1753     Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
   1754   }
   1755 
   1756   void CheckPageFlagSet(const Register& object,
   1757                         const Register& scratch,
   1758                         int mask,
   1759                         Label* if_any_set);
   1760 
   1761   void CheckPageFlagClear(const Register& object,
   1762                           const Register& scratch,
   1763                           int mask,
   1764                           Label* if_all_clear);
   1765 
   1766   void CheckMapDeprecated(Handle<Map> map,
   1767                           Register scratch,
   1768                           Label* if_deprecated);
   1769 
   1770   // Check if object is in new space and jump accordingly.
   1771   // Register 'object' is preserved.
   1772   void JumpIfNotInNewSpace(Register object,
   1773                            Label* branch) {
   1774     InNewSpace(object, ne, branch);
   1775   }
   1776 
   1777   void JumpIfInNewSpace(Register object,
   1778                         Label* branch) {
   1779     InNewSpace(object, eq, branch);
   1780   }
   1781 
   1782   // Notify the garbage collector that we wrote a pointer into an object.
   1783   // |object| is the object being stored into, |value| is the object being
   1784   // stored.  value and scratch registers are clobbered by the operation.
   1785   // The offset is the offset from the start of the object, not the offset from
   1786   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
   1787   void RecordWriteField(
   1788       Register object,
   1789       int offset,
   1790       Register value,
   1791       Register scratch,
   1792       LinkRegisterStatus lr_status,
   1793       SaveFPRegsMode save_fp,
   1794       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1795       SmiCheck smi_check = INLINE_SMI_CHECK,
   1796       PointersToHereCheck pointers_to_here_check_for_value =
   1797           kPointersToHereMaybeInteresting);
   1798 
   1799   // As above, but the offset has the tag presubtracted. For use with
   1800   // MemOperand(reg, off).
   1801   inline void RecordWriteContextSlot(
   1802       Register context,
   1803       int offset,
   1804       Register value,
   1805       Register scratch,
   1806       LinkRegisterStatus lr_status,
   1807       SaveFPRegsMode save_fp,
   1808       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1809       SmiCheck smi_check = INLINE_SMI_CHECK,
   1810       PointersToHereCheck pointers_to_here_check_for_value =
   1811           kPointersToHereMaybeInteresting) {
   1812     RecordWriteField(context,
   1813                      offset + kHeapObjectTag,
   1814                      value,
   1815                      scratch,
   1816                      lr_status,
   1817                      save_fp,
   1818                      remembered_set_action,
   1819                      smi_check,
   1820                      pointers_to_here_check_for_value);
   1821   }
   1822 
   1823   void RecordWriteForMap(
   1824       Register object,
   1825       Register map,
   1826       Register dst,
   1827       LinkRegisterStatus lr_status,
   1828       SaveFPRegsMode save_fp);
   1829 
   1830   // For a given |object| notify the garbage collector that the slot |address|
   1831   // has been written.  |value| is the object being stored. The value and
   1832   // address registers are clobbered by the operation.
   1833   void RecordWrite(
   1834       Register object,
   1835       Register address,
   1836       Register value,
   1837       LinkRegisterStatus lr_status,
   1838       SaveFPRegsMode save_fp,
   1839       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1840       SmiCheck smi_check = INLINE_SMI_CHECK,
   1841       PointersToHereCheck pointers_to_here_check_for_value =
   1842           kPointersToHereMaybeInteresting);
   1843 
   1844   // Checks the color of an object. If the object is already grey or black
   1845   // then we just fall through, since it is already live. If it is white and
   1846   // we can determine that it doesn't need to be scanned, then we just mark it
   1847   // black and fall through. For the rest we jump to the label so the
   1848   // incremental marker can fix its assumptions.
   1849   void EnsureNotWhite(Register object,
   1850                       Register scratch1,
   1851                       Register scratch2,
   1852                       Register scratch3,
   1853                       Register scratch4,
   1854                       Label* object_is_white_and_not_data);
   1855 
   1856   // Detects conservatively whether an object is data-only, i.e. it does need to
   1857   // be scanned by the garbage collector.
   1858   void JumpIfDataObject(Register value,
   1859                         Register scratch,
   1860                         Label* not_data_object);
   1861 
   1862   // Helper for finding the mark bits for an address.
   1863   // Note that the behaviour slightly differs from other architectures.
   1864   // On exit:
   1865   //  - addr_reg is unchanged.
   1866   //  - The bitmap register points at the word with the mark bits.
   1867   //  - The shift register contains the index of the first color bit for this
   1868   //    object in the bitmap.
   1869   inline void GetMarkBits(Register addr_reg,
   1870                           Register bitmap_reg,
   1871                           Register shift_reg);
   1872 
   1873   // Check if an object has a given incremental marking color.
   1874   void HasColor(Register object,
   1875                 Register scratch0,
   1876                 Register scratch1,
   1877                 Label* has_color,
   1878                 int first_bit,
   1879                 int second_bit);
   1880 
   1881   void JumpIfBlack(Register object,
   1882                    Register scratch0,
   1883                    Register scratch1,
   1884                    Label* on_black);
   1885 
   1886 
   1887   // Get the location of a relocated constant (its address in the constant pool)
   1888   // from its load site.
   1889   void GetRelocatedValueLocation(Register ldr_location,
   1890                                  Register result);
   1891 
   1892 
   1893   // ---------------------------------------------------------------------------
   1894   // Debugging.
   1895 
   1896   // Calls Abort(msg) if the condition cond is not satisfied.
   1897   // Use --debug_code to enable.
   1898   void Assert(Condition cond, BailoutReason reason);
   1899   void AssertRegisterIsClear(Register reg, BailoutReason reason);
   1900   void AssertRegisterIsRoot(
   1901       Register reg,
   1902       Heap::RootListIndex index,
   1903       BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
   1904   void AssertFastElements(Register elements);
   1905 
   1906   // Abort if the specified register contains the invalid color bit pattern.
   1907   // The pattern must be in bits [1:0] of 'reg' register.
   1908   //
   1909   // If emit_debug_code() is false, this emits no code.
   1910   void AssertHasValidColor(const Register& reg);
   1911 
   1912   // Abort if 'object' register doesn't point to a string object.
   1913   //
   1914   // If emit_debug_code() is false, this emits no code.
   1915   void AssertIsString(const Register& object);
   1916 
   1917   // Like Assert(), but always enabled.
   1918   void Check(Condition cond, BailoutReason reason);
   1919   void CheckRegisterIsClear(Register reg, BailoutReason reason);
   1920 
   1921   // Print a message to stderr and abort execution.
   1922   void Abort(BailoutReason reason);
   1923 
   1924   // Conditionally load the cached Array transitioned map of type
   1925   // transitioned_kind from the native context if the map in register
   1926   // map_in_out is the cached Array map in the native context of
   1927   // expected_kind.
   1928   void LoadTransitionedArrayMapConditional(
   1929       ElementsKind expected_kind,
   1930       ElementsKind transitioned_kind,
   1931       Register map_in_out,
   1932       Register scratch1,
   1933       Register scratch2,
   1934       Label* no_map_match);
   1935 
   1936   void LoadGlobalFunction(int index, Register function);
   1937 
   1938   // Load the initial map from the global function. The registers function and
   1939   // map can be the same, function is then overwritten.
   1940   void LoadGlobalFunctionInitialMap(Register function,
   1941                                     Register map,
   1942                                     Register scratch);
   1943 
   1944   CPURegList* TmpList() { return &tmp_list_; }
   1945   CPURegList* FPTmpList() { return &fptmp_list_; }
   1946 
   1947   static CPURegList DefaultTmpList();
   1948   static CPURegList DefaultFPTmpList();
   1949 
   1950   // Like printf, but print at run-time from generated code.
   1951   //
   1952   // The caller must ensure that arguments for floating-point placeholders
   1953   // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
   1954   // placeholders are Registers.
   1955   //
   1956   // At the moment it is only possible to print the value of csp if it is the
   1957   // current stack pointer. Otherwise, the MacroAssembler will automatically
   1958   // update csp on every push (using BumpSystemStackPointer), so determining its
   1959   // value is difficult.
   1960   //
   1961   // Format placeholders that refer to more than one argument, or to a specific
   1962   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
   1963   //
   1964   // This function automatically preserves caller-saved registers so that
   1965   // calling code can use Printf at any point without having to worry about
   1966   // corruption. The preservation mechanism generates a lot of code. If this is
   1967   // a problem, preserve the important registers manually and then call
   1968   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
   1969   // implicitly preserved.
   1970   void Printf(const char * format,
   1971               CPURegister arg0 = NoCPUReg,
   1972               CPURegister arg1 = NoCPUReg,
   1973               CPURegister arg2 = NoCPUReg,
   1974               CPURegister arg3 = NoCPUReg);
   1975 
   1976   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
   1977   //
   1978   // The return code from the system printf call will be returned in x0.
   1979   void PrintfNoPreserve(const char * format,
   1980                         const CPURegister& arg0 = NoCPUReg,
   1981                         const CPURegister& arg1 = NoCPUReg,
   1982                         const CPURegister& arg2 = NoCPUReg,
   1983                         const CPURegister& arg3 = NoCPUReg);
   1984 
   1985   // Code ageing support functions.
   1986 
   1987   // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
   1988   // function as old, it replaces some of the function prologue (generated by
   1989   // FullCodeGenerator::Generate) with a call to a special stub (ultimately
   1990   // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
   1991   // function prologue to its initial young state (indicating that it has been
   1992   // recently run) and continues. A young function is therefore one which has a
   1993   // normal frame setup sequence, and an old function has a code age sequence
   1994   // which calls a code ageing stub.
   1995 
   1996   // Set up a basic stack frame for young code (or code exempt from ageing) with
   1997   // type FUNCTION. It may be patched later for code ageing support. This is
   1998   // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
   1999   //
   2000   // This function takes an Assembler so it can be called from either a
   2001   // MacroAssembler or a PatchingAssembler context.
   2002   static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
   2003 
   2004   // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
   2005   void EmitFrameSetupForCodeAgePatching();
   2006 
   2007   // Emit a code age sequence that calls the relevant code age stub. The code
   2008   // generated by this sequence is expected to replace the code generated by
   2009   // EmitFrameSetupForCodeAgePatching, and represents an old function.
   2010   //
   2011   // If stub is NULL, this function generates the code age sequence but omits
   2012   // the stub address that is normally embedded in the instruction stream. This
   2013   // can be used by debug code to verify code age sequences.
   2014   static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
   2015 
   2016   // Call EmitCodeAgeSequence from a MacroAssembler context.
   2017   void EmitCodeAgeSequence(Code* stub);
   2018 
   2019   // Return true if the sequence is a young sequence geneated by
   2020   // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
   2021   // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
   2022   static bool IsYoungSequence(Isolate* isolate, byte* sequence);
   2023 
   2024   // Jumps to found label if a prototype map has dictionary elements.
   2025   void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
   2026                                         Register scratch1, Label* found);
   2027 
   2028   // Perform necessary maintenance operations before a push or after a pop.
   2029   //
   2030   // Note that size is specified in bytes.
   2031   void PushPreamble(Operand total_size);
   2032   void PopPostamble(Operand total_size);
   2033 
   2034   void PushPreamble(int count, int size) { PushPreamble(count * size); }
   2035   void PopPostamble(int count, int size) { PopPostamble(count * size); }
   2036 
   2037  private:
   2038   // Helpers for CopyFields.
   2039   // These each implement CopyFields in a different way.
   2040   void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
   2041                                  Register scratch1, Register scratch2,
   2042                                  Register scratch3, Register scratch4,
   2043                                  Register scratch5);
   2044   void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
   2045                                      Register scratch1, Register scratch2,
   2046                                      Register scratch3, Register scratch4);
   2047   void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
   2048                                 Register scratch1, Register scratch2,
   2049                                 Register scratch3);
   2050 
   2051   // The actual Push and Pop implementations. These don't generate any code
   2052   // other than that required for the push or pop. This allows
   2053   // (Push|Pop)CPURegList to bundle together run-time assertions for a large
   2054   // block of registers.
   2055   //
   2056   // Note that size is per register, and is specified in bytes.
   2057   void PushHelper(int count, int size,
   2058                   const CPURegister& src0, const CPURegister& src1,
   2059                   const CPURegister& src2, const CPURegister& src3);
   2060   void PopHelper(int count, int size,
   2061                  const CPURegister& dst0, const CPURegister& dst1,
   2062                  const CPURegister& dst2, const CPURegister& dst3);
   2063 
   2064   // Call Printf. On a native build, a simple call will be generated, but if the
   2065   // simulator is being used then a suitable pseudo-instruction is used. The
   2066   // arguments and stack (csp) must be prepared by the caller as for a normal
   2067   // AAPCS64 call to 'printf'.
   2068   //
   2069   // The 'args' argument should point to an array of variable arguments in their
   2070   // proper PCS registers (and in calling order). The argument registers can
   2071   // have mixed types. The format string (x0) should not be included.
   2072   void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
   2073 
   2074   // Helper for throwing exceptions.  Compute a handler address and jump to
   2075   // it.  See the implementation for register usage.
   2076   void JumpToHandlerEntry(Register exception,
   2077                           Register object,
   2078                           Register state,
   2079                           Register scratch1,
   2080                           Register scratch2);
   2081 
   2082   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   2083   void InNewSpace(Register object,
   2084                   Condition cond,  // eq for new space, ne otherwise.
   2085                   Label* branch);
   2086 
   2087   // Try to represent a double as an int so that integer fast-paths may be
   2088   // used. Not every valid integer value is guaranteed to be caught.
   2089   // It supports both 32-bit and 64-bit integers depending whether 'as_int'
   2090   // is a W or X register.
   2091   //
   2092   // This does not distinguish between +0 and -0, so if this distinction is
   2093   // important it must be checked separately.
   2094   //
   2095   // On output the Z flag is set if the operation was successful.
   2096   void TryRepresentDoubleAsInt(Register as_int,
   2097                                FPRegister value,
   2098                                FPRegister scratch_d,
   2099                                Label* on_successful_conversion = NULL,
   2100                                Label* on_failed_conversion = NULL);
   2101 
   2102   bool generating_stub_;
   2103 #if DEBUG
   2104   // Tell whether any of the macro instruction can be used. When false the
   2105   // MacroAssembler will assert if a method which can emit a variable number
   2106   // of instructions is called.
   2107   bool allow_macro_instructions_;
   2108 #endif
   2109   bool has_frame_;
   2110 
   2111   // The Abort method should call a V8 runtime function, but the CallRuntime
   2112   // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
   2113   // use a simpler abort mechanism that doesn't depend on CEntryStub.
   2114   //
   2115   // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
   2116   // being generated.
   2117   bool use_real_aborts_;
   2118 
   2119   // This handle will be patched with the code object on installation.
   2120   Handle<Object> code_object_;
   2121 
   2122   // The register to use as a stack pointer for stack operations.
   2123   Register sp_;
   2124 
   2125   // Scratch registers available for use by the MacroAssembler.
   2126   CPURegList tmp_list_;
   2127   CPURegList fptmp_list_;
   2128 
   2129   void InitializeNewString(Register string,
   2130                            Register length,
   2131                            Heap::RootListIndex map_index,
   2132                            Register scratch1,
   2133                            Register scratch2);
   2134 
   2135  public:
   2136   // Far branches resolving.
   2137   //
   2138   // The various classes of branch instructions with immediate offsets have
   2139   // different ranges. While the Assembler will fail to assemble a branch
   2140   // exceeding its range, the MacroAssembler offers a mechanism to resolve
   2141   // branches to too distant targets, either by tweaking the generated code to
   2142   // use branch instructions with wider ranges or generating veneers.
   2143   //
   2144   // Currently branches to distant targets are resolved using unconditional
   2145   // branch isntructions with a range of +-128MB. If that becomes too little
   2146   // (!), the mechanism can be extended to generate special veneers for really
   2147   // far targets.
   2148 
   2149   // Helps resolve branching to labels potentially out of range.
   2150   // If the label is not bound, it registers the information necessary to later
   2151   // be able to emit a veneer for this branch if necessary.
   2152   // If the label is bound, it returns true if the label (or the previous link
   2153   // in the label chain) is out of range. In that case the caller is responsible
   2154   // for generating appropriate code.
   2155   // Otherwise it returns false.
   2156   // This function also checks wether veneers need to be emitted.
   2157   bool NeedExtraInstructionsOrRegisterBranch(Label *label,
   2158                                              ImmBranchType branch_type);
   2159 };
   2160 
   2161 
   2162 // Use this scope when you need a one-to-one mapping bewteen methods and
   2163 // instructions. This scope prevents the MacroAssembler from being called and
   2164 // literal pools from being emitted. It also asserts the number of instructions
   2165 // emitted is what you specified when creating the scope.
   2166 class InstructionAccurateScope BASE_EMBEDDED {
   2167  public:
   2168   InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
   2169       : masm_(masm)
   2170 #ifdef DEBUG
   2171         ,
   2172         size_(count * kInstructionSize)
   2173 #endif
   2174   {
   2175     // Before blocking the const pool, see if it needs to be emitted.
   2176     masm_->CheckConstPool(false, true);
   2177     masm_->CheckVeneerPool(false, true);
   2178 
   2179     masm_->StartBlockPools();
   2180 #ifdef DEBUG
   2181     if (count != 0) {
   2182       masm_->bind(&start_);
   2183     }
   2184     previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
   2185     masm_->set_allow_macro_instructions(false);
   2186 #endif
   2187   }
   2188 
   2189   ~InstructionAccurateScope() {
   2190     masm_->EndBlockPools();
   2191 #ifdef DEBUG
   2192     if (start_.is_bound()) {
   2193       ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
   2194     }
   2195     masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
   2196 #endif
   2197   }
   2198 
   2199  private:
   2200   MacroAssembler* masm_;
   2201 #ifdef DEBUG
   2202   size_t size_;
   2203   Label start_;
   2204   bool previous_allow_macro_instructions_;
   2205 #endif
   2206 };
   2207 
   2208 
   2209 // This scope utility allows scratch registers to be managed safely. The
   2210 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
   2211 // registers. These registers can be allocated on demand, and will be returned
   2212 // at the end of the scope.
   2213 //
   2214 // When the scope ends, the MacroAssembler's lists will be restored to their
   2215 // original state, even if the lists were modified by some other means.
   2216 class UseScratchRegisterScope {
   2217  public:
   2218   explicit UseScratchRegisterScope(MacroAssembler* masm)
   2219       : available_(masm->TmpList()),
   2220         availablefp_(masm->FPTmpList()),
   2221         old_available_(available_->list()),
   2222         old_availablefp_(availablefp_->list()) {
   2223     ASSERT(available_->type() == CPURegister::kRegister);
   2224     ASSERT(availablefp_->type() == CPURegister::kFPRegister);
   2225   }
   2226 
   2227   ~UseScratchRegisterScope();
   2228 
   2229   // Take a register from the appropriate temps list. It will be returned
   2230   // automatically when the scope ends.
   2231   Register AcquireW() { return AcquireNextAvailable(available_).W(); }
   2232   Register AcquireX() { return AcquireNextAvailable(available_).X(); }
   2233   FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
   2234   FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
   2235 
   2236   Register UnsafeAcquire(const Register& reg) {
   2237     return Register(UnsafeAcquire(available_, reg));
   2238   }
   2239 
   2240   Register AcquireSameSizeAs(const Register& reg);
   2241   FPRegister AcquireSameSizeAs(const FPRegister& reg);
   2242 
   2243  private:
   2244   static CPURegister AcquireNextAvailable(CPURegList* available);
   2245   static CPURegister UnsafeAcquire(CPURegList* available,
   2246                                    const CPURegister& reg);
   2247 
   2248   // Available scratch registers.
   2249   CPURegList* available_;     // kRegister
   2250   CPURegList* availablefp_;   // kFPRegister
   2251 
   2252   // The state of the available lists at the start of this scope.
   2253   RegList old_available_;     // kRegister
   2254   RegList old_availablefp_;   // kFPRegister
   2255 };
   2256 
   2257 
   2258 inline MemOperand ContextMemOperand(Register context, int index) {
   2259   return MemOperand(context, Context::SlotOffset(index));
   2260 }
   2261 
   2262 inline MemOperand GlobalObjectMemOperand() {
   2263   return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
   2264 }
   2265 
   2266 
   2267 // Encode and decode information about patchable inline SMI checks.
   2268 class InlineSmiCheckInfo {
   2269  public:
   2270   explicit InlineSmiCheckInfo(Address info);
   2271 
   2272   bool HasSmiCheck() const {
   2273     return smi_check_ != NULL;
   2274   }
   2275 
   2276   const Register& SmiRegister() const {
   2277     return reg_;
   2278   }
   2279 
   2280   Instruction* SmiCheck() const {
   2281     return smi_check_;
   2282   }
   2283 
   2284   // Use MacroAssembler::InlineData to emit information about patchable inline
   2285   // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
   2286   // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
   2287   //
   2288   // The generated patch information can be read using the InlineSMICheckInfo
   2289   // class.
   2290   static void Emit(MacroAssembler* masm, const Register& reg,
   2291                    const Label* smi_check);
   2292 
   2293   // Emit information to indicate that there is no inline SMI check.
   2294   static void EmitNotInlined(MacroAssembler* masm) {
   2295     Label unbound;
   2296     Emit(masm, NoReg, &unbound);
   2297   }
   2298 
   2299  private:
   2300   Register reg_;
   2301   Instruction* smi_check_;
   2302 
   2303   // Fields in the data encoded by InlineData.
   2304 
   2305   // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
   2306   // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
   2307   // used in a patchable check. The Emit() method checks this.
   2308   //
   2309   // Note that the total size of the fields is restricted by the underlying
   2310   // storage size handled by the BitField class, which is a uint32_t.
   2311   class RegisterBits : public BitField<unsigned, 0, 5> {};
   2312   class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
   2313 };
   2314 
   2315 } }  // namespace v8::internal
   2316 
   2317 #ifdef GENERATED_CODE_COVERAGE
   2318 #error "Unsupported option"
   2319 #define CODE_COVERAGE_STRINGIFY(x) #x
   2320 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
   2321 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
   2322 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
   2323 #else
   2324 #define ACCESS_MASM(masm) masm->
   2325 #endif
   2326 
   2327 #endif  // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
   2328