Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
      6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
      7 
      8 #include <vector>
      9 
     10 #include "src/bailout-reason.h"
     11 #include "src/globals.h"
     12 
     13 #include "src/arm64/assembler-arm64-inl.h"
     14 #include "src/base/bits.h"
     15 
     16 // Simulator specific helpers.
     17 #if USE_SIMULATOR
     18   // TODO(all): If possible automatically prepend an indicator like
     19   // UNIMPLEMENTED or LOCATION.
     20   #define ASM_UNIMPLEMENTED(message)                                         \
     21   __ Debug(message, __LINE__, NO_PARAM)
     22   #define ASM_UNIMPLEMENTED_BREAK(message)                                   \
     23   __ Debug(message, __LINE__,                                                \
     24            FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
     25   #define ASM_LOCATION(message)                                              \
     26   __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
     27 #else
     28   #define ASM_UNIMPLEMENTED(message)
     29   #define ASM_UNIMPLEMENTED_BREAK(message)
     30   #define ASM_LOCATION(message)
     31 #endif
     32 
     33 
     34 namespace v8 {
     35 namespace internal {
     36 
     37 #define LS_MACRO_LIST(V)                                      \
     38   V(Ldrb, Register&, rt, LDRB_w)                              \
     39   V(Strb, Register&, rt, STRB_w)                              \
     40   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w)  \
     41   V(Ldrh, Register&, rt, LDRH_w)                              \
     42   V(Strh, Register&, rt, STRH_w)                              \
     43   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w)  \
     44   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                     \
     45   V(Str, CPURegister&, rt, StoreOpFor(rt))                    \
     46   V(Ldrsw, Register&, rt, LDRSW_x)
     47 
     48 #define LSPAIR_MACRO_LIST(V)                             \
     49   V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
     50   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
     51   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
     52 
     53 
     54 // ----------------------------------------------------------------------------
     55 // Static helper functions
     56 
     57 // Generate a MemOperand for loading a field from an object.
     58 inline MemOperand FieldMemOperand(Register object, int offset);
     59 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
     60 
     61 // Generate a MemOperand for loading a SMI from memory.
     62 inline MemOperand UntagSmiMemOperand(Register object, int offset);
     63 
     64 
     65 // ----------------------------------------------------------------------------
     66 // MacroAssembler
     67 
     68 enum BranchType {
     69   // Copies of architectural conditions.
     70   // The associated conditions can be used in place of those, the code will
     71   // take care of reinterpreting them with the correct type.
     72   integer_eq = eq,
     73   integer_ne = ne,
     74   integer_hs = hs,
     75   integer_lo = lo,
     76   integer_mi = mi,
     77   integer_pl = pl,
     78   integer_vs = vs,
     79   integer_vc = vc,
     80   integer_hi = hi,
     81   integer_ls = ls,
     82   integer_ge = ge,
     83   integer_lt = lt,
     84   integer_gt = gt,
     85   integer_le = le,
     86   integer_al = al,
     87   integer_nv = nv,
     88 
     89   // These two are *different* from the architectural codes al and nv.
     90   // 'always' is used to generate unconditional branches.
     91   // 'never' is used to not generate a branch (generally as the inverse
     92   // branch type of 'always).
     93   always, never,
     94   // cbz and cbnz
     95   reg_zero, reg_not_zero,
     96   // tbz and tbnz
     97   reg_bit_clear, reg_bit_set,
     98 
     99   // Aliases.
    100   kBranchTypeFirstCondition = eq,
    101   kBranchTypeLastCondition = nv,
    102   kBranchTypeFirstUsingReg = reg_zero,
    103   kBranchTypeFirstUsingBit = reg_bit_clear
    104 };
    105 
    106 inline BranchType InvertBranchType(BranchType type) {
    107   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
    108     return static_cast<BranchType>(
    109         NegateCondition(static_cast<Condition>(type)));
    110   } else {
    111     return static_cast<BranchType>(type ^ 1);
    112   }
    113 }
    114 
    115 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
    116 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
    117 enum PointersToHereCheck {
    118   kPointersToHereMaybeInteresting,
    119   kPointersToHereAreAlwaysInteresting
    120 };
    121 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
    122 enum TargetAddressStorageMode {
    123   CAN_INLINE_TARGET_ADDRESS,
    124   NEVER_INLINE_TARGET_ADDRESS
    125 };
    126 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
    127 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
    128 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
    129 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
    130 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
    131 
    132 class MacroAssembler : public Assembler {
    133  public:
    134   MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
    135 
    136   inline Handle<Object> CodeObject();
    137 
    138   // Instruction set functions ------------------------------------------------
    139   // Logical macros.
    140   inline void And(const Register& rd,
    141                   const Register& rn,
    142                   const Operand& operand);
    143   inline void Ands(const Register& rd,
    144                    const Register& rn,
    145                    const Operand& operand);
    146   inline void Bic(const Register& rd,
    147                   const Register& rn,
    148                   const Operand& operand);
    149   inline void Bics(const Register& rd,
    150                    const Register& rn,
    151                    const Operand& operand);
    152   inline void Orr(const Register& rd,
    153                   const Register& rn,
    154                   const Operand& operand);
    155   inline void Orn(const Register& rd,
    156                   const Register& rn,
    157                   const Operand& operand);
    158   inline void Eor(const Register& rd,
    159                   const Register& rn,
    160                   const Operand& operand);
    161   inline void Eon(const Register& rd,
    162                   const Register& rn,
    163                   const Operand& operand);
    164   inline void Tst(const Register& rn, const Operand& operand);
    165   void LogicalMacro(const Register& rd,
    166                     const Register& rn,
    167                     const Operand& operand,
    168                     LogicalOp op);
    169 
    170   // Add and sub macros.
    171   inline void Add(const Register& rd,
    172                   const Register& rn,
    173                   const Operand& operand);
    174   inline void Adds(const Register& rd,
    175                    const Register& rn,
    176                    const Operand& operand);
    177   inline void Sub(const Register& rd,
    178                   const Register& rn,
    179                   const Operand& operand);
    180   inline void Subs(const Register& rd,
    181                    const Register& rn,
    182                    const Operand& operand);
    183   inline void Cmn(const Register& rn, const Operand& operand);
    184   inline void Cmp(const Register& rn, const Operand& operand);
    185   inline void Neg(const Register& rd,
    186                   const Operand& operand);
    187   inline void Negs(const Register& rd,
    188                    const Operand& operand);
    189 
    190   void AddSubMacro(const Register& rd,
    191                    const Register& rn,
    192                    const Operand& operand,
    193                    FlagsUpdate S,
    194                    AddSubOp op);
    195 
    196   // Add/sub with carry macros.
    197   inline void Adc(const Register& rd,
    198                   const Register& rn,
    199                   const Operand& operand);
    200   inline void Adcs(const Register& rd,
    201                    const Register& rn,
    202                    const Operand& operand);
    203   inline void Sbc(const Register& rd,
    204                   const Register& rn,
    205                   const Operand& operand);
    206   inline void Sbcs(const Register& rd,
    207                    const Register& rn,
    208                    const Operand& operand);
    209   inline void Ngc(const Register& rd,
    210                   const Operand& operand);
    211   inline void Ngcs(const Register& rd,
    212                    const Operand& operand);
    213   void AddSubWithCarryMacro(const Register& rd,
    214                             const Register& rn,
    215                             const Operand& operand,
    216                             FlagsUpdate S,
    217                             AddSubWithCarryOp op);
    218 
    219   // Move macros.
    220   void Mov(const Register& rd,
    221            const Operand& operand,
    222            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
    223   void Mov(const Register& rd, uint64_t imm);
    224   inline void Mvn(const Register& rd, uint64_t imm);
    225   void Mvn(const Register& rd, const Operand& operand);
    226   static bool IsImmMovn(uint64_t imm, unsigned reg_size);
    227   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
    228   static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
    229 
    230   // Try to move an immediate into the destination register in a single
    231   // instruction. Returns true for success, and updates the contents of dst.
    232   // Returns false, otherwise.
    233   bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
    234 
    235   // Move an immediate into register dst, and return an Operand object for use
    236   // with a subsequent instruction that accepts a shift. The value moved into
    237   // dst is not necessarily equal to imm; it may have had a shifting operation
    238   // applied to it that will be subsequently undone by the shift applied in the
    239   // Operand.
    240   Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
    241 
    242   // Conditional macros.
    243   inline void Ccmp(const Register& rn,
    244                    const Operand& operand,
    245                    StatusFlags nzcv,
    246                    Condition cond);
    247   inline void Ccmn(const Register& rn,
    248                    const Operand& operand,
    249                    StatusFlags nzcv,
    250                    Condition cond);
    251   void ConditionalCompareMacro(const Register& rn,
    252                                const Operand& operand,
    253                                StatusFlags nzcv,
    254                                Condition cond,
    255                                ConditionalCompareOp op);
    256   void Csel(const Register& rd,
    257             const Register& rn,
    258             const Operand& operand,
    259             Condition cond);
    260 
    261   // Load/store macros.
    262 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
    263   inline void FN(const REGTYPE REG, const MemOperand& addr);
    264   LS_MACRO_LIST(DECLARE_FUNCTION)
    265 #undef DECLARE_FUNCTION
    266 
    267   void LoadStoreMacro(const CPURegister& rt,
    268                       const MemOperand& addr,
    269                       LoadStoreOp op);
    270 
    271 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
    272   inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
    273   LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
    274 #undef DECLARE_FUNCTION
    275 
    276   void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
    277                           const MemOperand& addr, LoadStorePairOp op);
    278 
    279   // V8-specific load/store helpers.
    280   void Load(const Register& rt, const MemOperand& addr, Representation r);
    281   void Store(const Register& rt, const MemOperand& addr, Representation r);
    282 
    283   enum AdrHint {
    284     // The target must be within the immediate range of adr.
    285     kAdrNear,
    286     // The target may be outside of the immediate range of adr. Additional
    287     // instructions may be emitted.
    288     kAdrFar
    289   };
    290   void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
    291 
    292   // Remaining instructions are simple pass-through calls to the assembler.
    293   inline void Asr(const Register& rd, const Register& rn, unsigned shift);
    294   inline void Asr(const Register& rd, const Register& rn, const Register& rm);
    295 
    296   // Branch type inversion relies on these relations.
    297   STATIC_ASSERT((reg_zero      == (reg_not_zero ^ 1)) &&
    298                 (reg_bit_clear == (reg_bit_set ^ 1)) &&
    299                 (always        == (never ^ 1)));
    300 
    301   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
    302 
    303   inline void B(Label* label);
    304   inline void B(Condition cond, Label* label);
    305   void B(Label* label, Condition cond);
    306   inline void Bfi(const Register& rd,
    307                   const Register& rn,
    308                   unsigned lsb,
    309                   unsigned width);
    310   inline void Bfxil(const Register& rd,
    311                     const Register& rn,
    312                     unsigned lsb,
    313                     unsigned width);
    314   inline void Bind(Label* label);
    315   inline void Bl(Label* label);
    316   inline void Blr(const Register& xn);
    317   inline void Br(const Register& xn);
    318   inline void Brk(int code);
    319   void Cbnz(const Register& rt, Label* label);
    320   void Cbz(const Register& rt, Label* label);
    321   inline void Cinc(const Register& rd, const Register& rn, Condition cond);
    322   inline void Cinv(const Register& rd, const Register& rn, Condition cond);
    323   inline void Cls(const Register& rd, const Register& rn);
    324   inline void Clz(const Register& rd, const Register& rn);
    325   inline void Cneg(const Register& rd, const Register& rn, Condition cond);
    326   inline void CzeroX(const Register& rd, Condition cond);
    327   inline void CmovX(const Register& rd, const Register& rn, Condition cond);
    328   inline void Cset(const Register& rd, Condition cond);
    329   inline void Csetm(const Register& rd, Condition cond);
    330   inline void Csinc(const Register& rd,
    331                     const Register& rn,
    332                     const Register& rm,
    333                     Condition cond);
    334   inline void Csinv(const Register& rd,
    335                     const Register& rn,
    336                     const Register& rm,
    337                     Condition cond);
    338   inline void Csneg(const Register& rd,
    339                     const Register& rn,
    340                     const Register& rm,
    341                     Condition cond);
    342   inline void Dmb(BarrierDomain domain, BarrierType type);
    343   inline void Dsb(BarrierDomain domain, BarrierType type);
    344   inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
    345   inline void Extr(const Register& rd,
    346                    const Register& rn,
    347                    const Register& rm,
    348                    unsigned lsb);
    349   inline void Fabs(const FPRegister& fd, const FPRegister& fn);
    350   inline void Fadd(const FPRegister& fd,
    351                    const FPRegister& fn,
    352                    const FPRegister& fm);
    353   inline void Fccmp(const FPRegister& fn,
    354                     const FPRegister& fm,
    355                     StatusFlags nzcv,
    356                     Condition cond);
    357   inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
    358   inline void Fcmp(const FPRegister& fn, double value);
    359   inline void Fcsel(const FPRegister& fd,
    360                     const FPRegister& fn,
    361                     const FPRegister& fm,
    362                     Condition cond);
    363   inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
    364   inline void Fcvtas(const Register& rd, const FPRegister& fn);
    365   inline void Fcvtau(const Register& rd, const FPRegister& fn);
    366   inline void Fcvtms(const Register& rd, const FPRegister& fn);
    367   inline void Fcvtmu(const Register& rd, const FPRegister& fn);
    368   inline void Fcvtns(const Register& rd, const FPRegister& fn);
    369   inline void Fcvtnu(const Register& rd, const FPRegister& fn);
    370   inline void Fcvtzs(const Register& rd, const FPRegister& fn);
    371   inline void Fcvtzu(const Register& rd, const FPRegister& fn);
    372   inline void Fdiv(const FPRegister& fd,
    373                    const FPRegister& fn,
    374                    const FPRegister& fm);
    375   inline void Fmadd(const FPRegister& fd,
    376                     const FPRegister& fn,
    377                     const FPRegister& fm,
    378                     const FPRegister& fa);
    379   inline void Fmax(const FPRegister& fd,
    380                    const FPRegister& fn,
    381                    const FPRegister& fm);
    382   inline void Fmaxnm(const FPRegister& fd,
    383                      const FPRegister& fn,
    384                      const FPRegister& fm);
    385   inline void Fmin(const FPRegister& fd,
    386                    const FPRegister& fn,
    387                    const FPRegister& fm);
    388   inline void Fminnm(const FPRegister& fd,
    389                      const FPRegister& fn,
    390                      const FPRegister& fm);
    391   inline void Fmov(FPRegister fd, FPRegister fn);
    392   inline void Fmov(FPRegister fd, Register rn);
    393   // Provide explicit double and float interfaces for FP immediate moves, rather
    394   // than relying on implicit C++ casts. This allows signalling NaNs to be
    395   // preserved when the immediate matches the format of fd. Most systems convert
    396   // signalling NaNs to quiet NaNs when converting between float and double.
    397   inline void Fmov(FPRegister fd, double imm);
    398   inline void Fmov(FPRegister fd, float imm);
    399   // Provide a template to allow other types to be converted automatically.
    400   template<typename T>
    401   void Fmov(FPRegister fd, T imm) {
    402     DCHECK(allow_macro_instructions_);
    403     Fmov(fd, static_cast<double>(imm));
    404   }
    405   inline void Fmov(Register rd, FPRegister fn);
    406   inline void Fmsub(const FPRegister& fd,
    407                     const FPRegister& fn,
    408                     const FPRegister& fm,
    409                     const FPRegister& fa);
    410   inline void Fmul(const FPRegister& fd,
    411                    const FPRegister& fn,
    412                    const FPRegister& fm);
    413   inline void Fneg(const FPRegister& fd, const FPRegister& fn);
    414   inline void Fnmadd(const FPRegister& fd,
    415                      const FPRegister& fn,
    416                      const FPRegister& fm,
    417                      const FPRegister& fa);
    418   inline void Fnmsub(const FPRegister& fd,
    419                      const FPRegister& fn,
    420                      const FPRegister& fm,
    421                      const FPRegister& fa);
    422   inline void Frinta(const FPRegister& fd, const FPRegister& fn);
    423   inline void Frintm(const FPRegister& fd, const FPRegister& fn);
    424   inline void Frintn(const FPRegister& fd, const FPRegister& fn);
    425   inline void Frintz(const FPRegister& fd, const FPRegister& fn);
    426   inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
    427   inline void Fsub(const FPRegister& fd,
    428                    const FPRegister& fn,
    429                    const FPRegister& fm);
    430   inline void Hint(SystemHint code);
    431   inline void Hlt(int code);
    432   inline void Isb();
    433   inline void Ldnp(const CPURegister& rt,
    434                    const CPURegister& rt2,
    435                    const MemOperand& src);
    436   // Load a literal from the inline constant pool.
    437   inline void Ldr(const CPURegister& rt, const Immediate& imm);
    438   // Helper function for double immediate.
    439   inline void Ldr(const CPURegister& rt, double imm);
    440   inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
    441   inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
    442   inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
    443   inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
    444   inline void Madd(const Register& rd,
    445                    const Register& rn,
    446                    const Register& rm,
    447                    const Register& ra);
    448   inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
    449   inline void Mov(const Register& rd, const Register& rm);
    450   inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
    451   inline void Mrs(const Register& rt, SystemRegister sysreg);
    452   inline void Msr(SystemRegister sysreg, const Register& rt);
    453   inline void Msub(const Register& rd,
    454                    const Register& rn,
    455                    const Register& rm,
    456                    const Register& ra);
    457   inline void Mul(const Register& rd, const Register& rn, const Register& rm);
    458   inline void Nop() { nop(); }
    459   inline void Rbit(const Register& rd, const Register& rn);
    460   inline void Ret(const Register& xn = lr);
    461   inline void Rev(const Register& rd, const Register& rn);
    462   inline void Rev16(const Register& rd, const Register& rn);
    463   inline void Rev32(const Register& rd, const Register& rn);
    464   inline void Ror(const Register& rd, const Register& rs, unsigned shift);
    465   inline void Ror(const Register& rd, const Register& rn, const Register& rm);
    466   inline void Sbfiz(const Register& rd,
    467                     const Register& rn,
    468                     unsigned lsb,
    469                     unsigned width);
    470   inline void Sbfx(const Register& rd,
    471                    const Register& rn,
    472                    unsigned lsb,
    473                    unsigned width);
    474   inline void Scvtf(const FPRegister& fd,
    475                     const Register& rn,
    476                     unsigned fbits = 0);
    477   inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
    478   inline void Smaddl(const Register& rd,
    479                      const Register& rn,
    480                      const Register& rm,
    481                      const Register& ra);
    482   inline void Smsubl(const Register& rd,
    483                      const Register& rn,
    484                      const Register& rm,
    485                      const Register& ra);
    486   inline void Smull(const Register& rd,
    487                     const Register& rn,
    488                     const Register& rm);
    489   inline void Smulh(const Register& rd,
    490                     const Register& rn,
    491                     const Register& rm);
    492   inline void Stnp(const CPURegister& rt,
    493                    const CPURegister& rt2,
    494                    const MemOperand& dst);
    495   inline void Sxtb(const Register& rd, const Register& rn);
    496   inline void Sxth(const Register& rd, const Register& rn);
    497   inline void Sxtw(const Register& rd, const Register& rn);
    498   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
    499   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
    500   inline void Ubfiz(const Register& rd,
    501                     const Register& rn,
    502                     unsigned lsb,
    503                     unsigned width);
    504   inline void Ubfx(const Register& rd,
    505                    const Register& rn,
    506                    unsigned lsb,
    507                    unsigned width);
    508   inline void Ucvtf(const FPRegister& fd,
    509                     const Register& rn,
    510                     unsigned fbits = 0);
    511   inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
    512   inline void Umaddl(const Register& rd,
    513                      const Register& rn,
    514                      const Register& rm,
    515                      const Register& ra);
    516   inline void Umsubl(const Register& rd,
    517                      const Register& rn,
    518                      const Register& rm,
    519                      const Register& ra);
    520   inline void Uxtb(const Register& rd, const Register& rn);
    521   inline void Uxth(const Register& rd, const Register& rn);
    522   inline void Uxtw(const Register& rd, const Register& rn);
    523 
    524   // Pseudo-instructions ------------------------------------------------------
    525 
    526   // Compute rd = abs(rm).
    527   // This function clobbers the condition flags. On output the overflow flag is
    528   // set iff the negation overflowed.
    529   //
    530   // If rm is the minimum representable value, the result is not representable.
    531   // Handlers for each case can be specified using the relevant labels.
    532   void Abs(const Register& rd, const Register& rm,
    533            Label * is_not_representable = NULL,
    534            Label * is_representable = NULL);
    535 
    536   // Push or pop up to 4 registers of the same width to or from the stack,
    537   // using the current stack pointer as set by SetStackPointer.
    538   //
    539   // If an argument register is 'NoReg', all further arguments are also assumed
    540   // to be 'NoReg', and are thus not pushed or popped.
    541   //
    542   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
    543   // to "Push(a); Push(b);".
    544   //
    545   // It is valid to push the same register more than once, and there is no
    546   // restriction on the order in which registers are specified.
    547   //
    548   // It is not valid to pop into the same register more than once in one
    549   // operation, not even into the zero register.
    550   //
    551   // If the current stack pointer (as set by SetStackPointer) is csp, then it
    552   // must be aligned to 16 bytes on entry and the total size of the specified
    553   // registers must also be a multiple of 16 bytes.
    554   //
    555   // Even if the current stack pointer is not the system stack pointer (csp),
    556   // Push (and derived methods) will still modify the system stack pointer in
    557   // order to comply with ABI rules about accessing memory below the system
    558   // stack pointer.
    559   //
    560   // Other than the registers passed into Pop, the stack pointer and (possibly)
    561   // the system stack pointer, these methods do not modify any other registers.
    562   void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
    563             const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
    564   void Push(const CPURegister& src0, const CPURegister& src1,
    565             const CPURegister& src2, const CPURegister& src3,
    566             const CPURegister& src4, const CPURegister& src5 = NoReg,
    567             const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
    568   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
    569            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
    570   void Push(const Register& src0, const FPRegister& src1);
    571 
    572   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
    573   // specifies the registers that are to be pushed or popped. Higher-numbered
    574   // registers are associated with higher memory addresses (as in the A32 push
    575   // and pop instructions).
    576   //
    577   // (Push|Pop)SizeRegList allow you to specify the register size as a
    578   // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
    579   // kSRegSizeInBits are supported.
    580   //
    581   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
    582   void PushCPURegList(CPURegList registers);
    583   void PopCPURegList(CPURegList registers);
    584 
    585   inline void PushSizeRegList(RegList registers, unsigned reg_size,
    586       CPURegister::RegisterType type = CPURegister::kRegister) {
    587     PushCPURegList(CPURegList(type, reg_size, registers));
    588   }
    589   inline void PopSizeRegList(RegList registers, unsigned reg_size,
    590       CPURegister::RegisterType type = CPURegister::kRegister) {
    591     PopCPURegList(CPURegList(type, reg_size, registers));
    592   }
    593   inline void PushXRegList(RegList regs) {
    594     PushSizeRegList(regs, kXRegSizeInBits);
    595   }
    596   inline void PopXRegList(RegList regs) {
    597     PopSizeRegList(regs, kXRegSizeInBits);
    598   }
    599   inline void PushWRegList(RegList regs) {
    600     PushSizeRegList(regs, kWRegSizeInBits);
    601   }
    602   inline void PopWRegList(RegList regs) {
    603     PopSizeRegList(regs, kWRegSizeInBits);
    604   }
    605   inline void PushDRegList(RegList regs) {
    606     PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
    607   }
    608   inline void PopDRegList(RegList regs) {
    609     PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
    610   }
    611   inline void PushSRegList(RegList regs) {
    612     PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
    613   }
    614   inline void PopSRegList(RegList regs) {
    615     PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
    616   }
    617 
    618   // Push the specified register 'count' times.
    619   void PushMultipleTimes(CPURegister src, Register count);
    620   void PushMultipleTimes(CPURegister src, int count);
    621 
    622   // This is a convenience method for pushing a single Handle<Object>.
    623   inline void Push(Handle<Object> handle);
    624   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
    625 
    626   // Aliases of Push and Pop, required for V8 compatibility.
    627   inline void push(Register src) {
    628     Push(src);
    629   }
    630   inline void pop(Register dst) {
    631     Pop(dst);
    632   }
    633 
    634   // Sometimes callers need to push or pop multiple registers in a way that is
    635   // difficult to structure efficiently for fixed Push or Pop calls. This scope
    636   // allows push requests to be queued up, then flushed at once. The
    637   // MacroAssembler will try to generate the most efficient sequence required.
    638   //
    639   // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
    640   // register sizes and types.
    641   class PushPopQueue {
    642    public:
    643     explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
    644 
    645     ~PushPopQueue() {
    646       DCHECK(queued_.empty());
    647     }
    648 
    649     void Queue(const CPURegister& rt) {
    650       size_ += rt.SizeInBytes();
    651       queued_.push_back(rt);
    652     }
    653 
    654     enum PreambleDirective {
    655       WITH_PREAMBLE,
    656       SKIP_PREAMBLE
    657     };
    658     void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
    659     void PopQueued();
    660 
    661    private:
    662     MacroAssembler* masm_;
    663     int size_;
    664     std::vector<CPURegister> queued_;
    665   };
    666 
    667   // Poke 'src' onto the stack. The offset is in bytes.
    668   //
    669   // If the current stack pointer (according to StackPointer()) is csp, then
    670   // csp must be aligned to 16 bytes.
    671   void Poke(const CPURegister& src, const Operand& offset);
    672 
    673   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
    674   //
    675   // If the current stack pointer (according to StackPointer()) is csp, then
    676   // csp must be aligned to 16 bytes.
    677   void Peek(const CPURegister& dst, const Operand& offset);
    678 
    679   // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
    680   // with 'src2' at a higher address than 'src1'. The offset is in bytes.
    681   //
    682   // If the current stack pointer (according to StackPointer()) is csp, then
    683   // csp must be aligned to 16 bytes.
    684   void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
    685 
    686   // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
    687   // values peeked will be adjacent, with the value in 'dst2' being from a
    688   // higher address than 'dst1'. The offset is in bytes.
    689   //
    690   // If the current stack pointer (according to StackPointer()) is csp, then
    691   // csp must be aligned to 16 bytes.
    692   void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
    693 
    694   // Claim or drop stack space without actually accessing memory.
    695   //
    696   // In debug mode, both of these will write invalid data into the claimed or
    697   // dropped space.
    698   //
    699   // If the current stack pointer (according to StackPointer()) is csp, then it
    700   // must be aligned to 16 bytes and the size claimed or dropped must be a
    701   // multiple of 16 bytes.
    702   //
    703   // Note that unit_size must be specified in bytes. For variants which take a
    704   // Register count, the unit size must be a power of two.
    705   inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
    706   inline void Claim(const Register& count,
    707                     uint64_t unit_size = kXRegSize);
    708   inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
    709   inline void Drop(const Register& count,
    710                    uint64_t unit_size = kXRegSize);
    711 
    712   // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
    713   // register.
    714   inline void ClaimBySMI(const Register& count_smi,
    715                          uint64_t unit_size = kXRegSize);
    716   inline void DropBySMI(const Register& count_smi,
    717                         uint64_t unit_size = kXRegSize);
    718 
    719   // Compare a register with an operand, and branch to label depending on the
    720   // condition. May corrupt the status flags.
    721   inline void CompareAndBranch(const Register& lhs,
    722                                const Operand& rhs,
    723                                Condition cond,
    724                                Label* label);
    725 
    726   // Test the bits of register defined by bit_pattern, and branch if ANY of
    727   // those bits are set. May corrupt the status flags.
    728   inline void TestAndBranchIfAnySet(const Register& reg,
    729                                     const uint64_t bit_pattern,
    730                                     Label* label);
    731 
    732   // Test the bits of register defined by bit_pattern, and branch if ALL of
    733   // those bits are clear (ie. not set.) May corrupt the status flags.
    734   inline void TestAndBranchIfAllClear(const Register& reg,
    735                                       const uint64_t bit_pattern,
    736                                       Label* label);
    737 
    738   // Insert one or more instructions into the instruction stream that encode
    739   // some caller-defined data. The instructions used will be executable with no
    740   // side effects.
    741   inline void InlineData(uint64_t data);
    742 
    743   // Insert an instrumentation enable marker into the instruction stream.
    744   inline void EnableInstrumentation();
    745 
    746   // Insert an instrumentation disable marker into the instruction stream.
    747   inline void DisableInstrumentation();
    748 
    749   // Insert an instrumentation event marker into the instruction stream. These
    750   // will be picked up by the instrumentation system to annotate an instruction
    751   // profile. The argument marker_name must be a printable two character string;
    752   // it will be encoded in the event marker.
    753   inline void AnnotateInstrumentation(const char* marker_name);
    754 
    755   // If emit_debug_code() is true, emit a run-time check to ensure that
    756   // StackPointer() does not point below the system stack pointer.
    757   //
    758   // Whilst it is architecturally legal for StackPointer() to point below csp,
    759   // it can be evidence of a potential bug because the ABI forbids accesses
    760   // below csp.
    761   //
    762   // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
    763   // enabled, then csp will be dereferenced to  cause the processor
    764   // (or simulator) to abort if it is not properly aligned.
    765   //
    766   // If emit_debug_code() is false, this emits no code.
    767   void AssertStackConsistency();
    768 
    769   // Preserve the callee-saved registers (as defined by AAPCS64).
    770   //
    771   // Higher-numbered registers are pushed before lower-numbered registers, and
    772   // thus get higher addresses.
    773   // Floating-point registers are pushed before general-purpose registers, and
    774   // thus get higher addresses.
    775   //
    776   // Note that registers are not checked for invalid values. Use this method
    777   // only if you know that the GC won't try to examine the values on the stack.
    778   //
    779   // This method must not be called unless the current stack pointer (as set by
    780   // SetStackPointer) is the system stack pointer (csp), and is aligned to
    781   // ActivationFrameAlignment().
    782   void PushCalleeSavedRegisters();
    783 
    784   // Restore the callee-saved registers (as defined by AAPCS64).
    785   //
    786   // Higher-numbered registers are popped after lower-numbered registers, and
    787   // thus come from higher addresses.
    788   // Floating-point registers are popped after general-purpose registers, and
    789   // thus come from higher addresses.
    790   //
    791   // This method must not be called unless the current stack pointer (as set by
    792   // SetStackPointer) is the system stack pointer (csp), and is aligned to
    793   // ActivationFrameAlignment().
    794   void PopCalleeSavedRegisters();
    795 
    796   // Set the current stack pointer, but don't generate any code.
    797   inline void SetStackPointer(const Register& stack_pointer) {
    798     DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
    799     sp_ = stack_pointer;
    800   }
    801 
    802   // Return the current stack pointer, as set by SetStackPointer.
    803   inline const Register& StackPointer() const {
    804     return sp_;
    805   }
    806 
    807   // Align csp for a frame, as per ActivationFrameAlignment, and make it the
    808   // current stack pointer.
    809   inline void AlignAndSetCSPForFrame() {
    810     int sp_alignment = ActivationFrameAlignment();
    811     // AAPCS64 mandates at least 16-byte alignment.
    812     DCHECK(sp_alignment >= 16);
    813     DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
    814     Bic(csp, StackPointer(), sp_alignment - 1);
    815     SetStackPointer(csp);
    816   }
    817 
    818   // Push the system stack pointer (csp) down to allow the same to be done to
    819   // the current stack pointer (according to StackPointer()). This must be
    820   // called _before_ accessing the memory.
    821   //
    822   // This is necessary when pushing or otherwise adding things to the stack, to
    823   // satisfy the AAPCS64 constraint that the memory below the system stack
    824   // pointer is not accessed.  The amount pushed will be increased as necessary
    825   // to ensure csp remains aligned to 16 bytes.
    826   //
    827   // This method asserts that StackPointer() is not csp, since the call does
    828   // not make sense in that context.
    829   inline void BumpSystemStackPointer(const Operand& space);
    830 
    831   // Re-synchronizes the system stack pointer (csp) with the current stack
    832   // pointer (according to StackPointer()).  This function will ensure the
    833   // new value of the system stack pointer is remains aligned to 16 bytes, and
    834   // is lower than or equal to the value of the current stack pointer.
    835   //
    836   // This method asserts that StackPointer() is not csp, since the call does
    837   // not make sense in that context.
    838   inline void SyncSystemStackPointer();
    839 
    840   // Helpers ------------------------------------------------------------------
    841   // Root register.
    842   inline void InitializeRootRegister();
    843 
    844   void AssertFPCRState(Register fpcr = NoReg);
    845   void ConfigureFPCR();
    846   void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
    847   void CanonicalizeNaN(const FPRegister& reg) {
    848     CanonicalizeNaN(reg, reg);
    849   }
    850 
    851   // Load an object from the root table.
    852   void LoadRoot(CPURegister destination,
    853                 Heap::RootListIndex index);
    854   // Store an object to the root table.
    855   void StoreRoot(Register source,
    856                  Heap::RootListIndex index);
    857 
    858   // Load both TrueValue and FalseValue roots.
    859   void LoadTrueFalseRoots(Register true_root, Register false_root);
    860 
    861   void LoadHeapObject(Register dst, Handle<HeapObject> object);
    862 
    863   void LoadObject(Register result, Handle<Object> object) {
    864     AllowDeferredHandleDereference heap_object_check;
    865     if (object->IsHeapObject()) {
    866       LoadHeapObject(result, Handle<HeapObject>::cast(object));
    867     } else {
    868       DCHECK(object->IsSmi());
    869       Mov(result, Operand(object));
    870     }
    871   }
    872 
    873   static int SafepointRegisterStackIndex(int reg_code);
    874 
    875   // This is required for compatibility with architecture independant code.
    876   // Remove if not needed.
    877   inline void Move(Register dst, Register src) { Mov(dst, src); }
    878 
    879   void LoadInstanceDescriptors(Register map,
    880                                Register descriptors);
    881   void EnumLengthUntagged(Register dst, Register map);
    882   void EnumLengthSmi(Register dst, Register map);
    883   void NumberOfOwnDescriptors(Register dst, Register map);
    884 
    885   template<typename Field>
    886   void DecodeField(Register dst, Register src) {
    887     static const uint64_t shift = Field::kShift;
    888     static const uint64_t setbits = CountSetBits(Field::kMask, 32);
    889     Ubfx(dst, src, shift, setbits);
    890   }
    891 
    892   template<typename Field>
    893   void DecodeField(Register reg) {
    894     DecodeField<Field>(reg, reg);
    895   }
    896 
    897   // ---- SMI and Number Utilities ----
    898 
    899   inline void SmiTag(Register dst, Register src);
    900   inline void SmiTag(Register smi);
    901   inline void SmiUntag(Register dst, Register src);
    902   inline void SmiUntag(Register smi);
    903   inline void SmiUntagToDouble(FPRegister dst,
    904                                Register src,
    905                                UntagMode mode = kNotSpeculativeUntag);
    906   inline void SmiUntagToFloat(FPRegister dst,
    907                               Register src,
    908                               UntagMode mode = kNotSpeculativeUntag);
    909 
    910   // Tag and push in one step.
    911   inline void SmiTagAndPush(Register src);
    912   inline void SmiTagAndPush(Register src1, Register src2);
    913 
    914   inline void JumpIfSmi(Register value,
    915                         Label* smi_label,
    916                         Label* not_smi_label = NULL);
    917   inline void JumpIfNotSmi(Register value, Label* not_smi_label);
    918   inline void JumpIfBothSmi(Register value1,
    919                             Register value2,
    920                             Label* both_smi_label,
    921                             Label* not_smi_label = NULL);
    922   inline void JumpIfEitherSmi(Register value1,
    923                               Register value2,
    924                               Label* either_smi_label,
    925                               Label* not_smi_label = NULL);
    926   inline void JumpIfEitherNotSmi(Register value1,
    927                                  Register value2,
    928                                  Label* not_smi_label);
    929   inline void JumpIfBothNotSmi(Register value1,
    930                                Register value2,
    931                                Label* not_smi_label);
    932 
    933   // Abort execution if argument is a smi, enabled via --debug-code.
    934   void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
    935   void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
    936 
    937   inline void ObjectTag(Register tagged_obj, Register obj);
    938   inline void ObjectUntag(Register untagged_obj, Register obj);
    939 
    940   // Abort execution if argument is not a name, enabled via --debug-code.
    941   void AssertName(Register object);
    942 
    943   // Abort execution if argument is not undefined or an AllocationSite, enabled
    944   // via --debug-code.
    945   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
    946 
    947   // Abort execution if argument is not a string, enabled via --debug-code.
    948   void AssertString(Register object);
    949 
    950   void JumpIfHeapNumber(Register object, Label* on_heap_number,
    951                         SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
    952   void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
    953                            SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
    954 
    955   // Sets the vs flag if the input is -0.0.
    956   void TestForMinusZero(DoubleRegister input);
    957 
    958   // Jump to label if the input double register contains -0.0.
    959   void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
    960 
    961   // Jump to label if the input integer register contains the double precision
    962   // floating point representation of -0.0.
    963   void JumpIfMinusZero(Register input, Label* on_negative_zero);
    964 
    965   // Generate code to do a lookup in the number string cache. If the number in
    966   // the register object is found in the cache the generated code falls through
    967   // with the result in the result register. The object and the result register
    968   // can be the same. If the number is not found in the cache the code jumps to
    969   // the label not_found with only the content of register object unchanged.
    970   void LookupNumberStringCache(Register object,
    971                                Register result,
    972                                Register scratch1,
    973                                Register scratch2,
    974                                Register scratch3,
    975                                Label* not_found);
    976 
    977   // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
    978   // output.
    979   void ClampInt32ToUint8(Register in_out);
    980   void ClampInt32ToUint8(Register output, Register input);
    981 
    982   // Saturate a double in input to an unsigned 8-bit integer in output.
    983   void ClampDoubleToUint8(Register output,
    984                           DoubleRegister input,
    985                           DoubleRegister dbl_scratch);
    986 
    987   // Try to represent a double as a signed 32-bit int.
    988   // This succeeds if the result compares equal to the input, so inputs of -0.0
    989   // are represented as 0 and handled as a success.
    990   //
    991   // On output the Z flag is set if the operation was successful.
    992   void TryRepresentDoubleAsInt32(Register as_int,
    993                                  FPRegister value,
    994                                  FPRegister scratch_d,
    995                                  Label* on_successful_conversion = NULL,
    996                                  Label* on_failed_conversion = NULL) {
    997     DCHECK(as_int.Is32Bits());
    998     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
    999                             on_failed_conversion);
   1000   }
   1001 
   1002   // Try to represent a double as a signed 64-bit int.
   1003   // This succeeds if the result compares equal to the input, so inputs of -0.0
   1004   // are represented as 0 and handled as a success.
   1005   //
   1006   // On output the Z flag is set if the operation was successful.
   1007   void TryRepresentDoubleAsInt64(Register as_int,
   1008                                  FPRegister value,
   1009                                  FPRegister scratch_d,
   1010                                  Label* on_successful_conversion = NULL,
   1011                                  Label* on_failed_conversion = NULL) {
   1012     DCHECK(as_int.Is64Bits());
   1013     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
   1014                             on_failed_conversion);
   1015   }
   1016 
   1017   // ---- Object Utilities ----
   1018 
   1019   // Copy fields from 'src' to 'dst', where both are tagged objects.
   1020   // The 'temps' list is a list of X registers which can be used for scratch
   1021   // values. The temps list must include at least one register.
   1022   //
   1023   // Currently, CopyFields cannot make use of more than three registers from
   1024   // the 'temps' list.
   1025   //
   1026   // CopyFields expects to be able to take at least two registers from
   1027   // MacroAssembler::TmpList().
   1028   void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
   1029 
   1030   // Starting at address in dst, initialize field_count 64-bit fields with
   1031   // 64-bit value in register filler. Register dst is corrupted.
   1032   void FillFields(Register dst,
   1033                   Register field_count,
   1034                   Register filler);
   1035 
   1036   // Copies a number of bytes from src to dst. All passed registers are
   1037   // clobbered. On exit src and dst will point to the place just after where the
   1038   // last byte was read or written and length will be zero. Hint may be used to
   1039   // determine which is the most efficient algorithm to use for copying.
   1040   void CopyBytes(Register dst,
   1041                  Register src,
   1042                  Register length,
   1043                  Register scratch,
   1044                  CopyHint hint = kCopyUnknown);
   1045 
   1046   // ---- String Utilities ----
   1047 
   1048 
   1049   // Jump to label if either object is not a sequential one-byte string.
   1050   // Optionally perform a smi check on the objects first.
   1051   void JumpIfEitherIsNotSequentialOneByteStrings(
   1052       Register first, Register second, Register scratch1, Register scratch2,
   1053       Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
   1054 
   1055   // Check if instance type is sequential one-byte string and jump to label if
   1056   // it is not.
   1057   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
   1058                                                 Label* failure);
   1059 
   1060   // Checks if both instance types are sequential one-byte strings and jumps to
   1061   // label if either is not.
   1062   void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
   1063       Register first_object_instance_type, Register second_object_instance_type,
   1064       Register scratch1, Register scratch2, Label* failure);
   1065 
   1066   // Checks if both instance types are sequential one-byte strings and jumps to
   1067   // label if either is not.
   1068   void JumpIfBothInstanceTypesAreNotSequentialOneByte(
   1069       Register first_object_instance_type, Register second_object_instance_type,
   1070       Register scratch1, Register scratch2, Label* failure);
   1071 
   1072   void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
   1073 
   1074   // ---- Calling / Jumping helpers ----
   1075 
   1076   // This is required for compatibility in architecture indepenedant code.
   1077   inline void jmp(Label* L) { B(L); }
   1078 
   1079   // Passes thrown value to the handler of top of the try handler chain.
   1080   // Register value must be x0.
   1081   void Throw(Register value,
   1082              Register scratch1,
   1083              Register scratch2,
   1084              Register scratch3,
   1085              Register scratch4);
   1086 
   1087   // Propagates an uncatchable exception to the top of the current JS stack's
   1088   // handler chain. Register value must be x0.
   1089   void ThrowUncatchable(Register value,
   1090                         Register scratch1,
   1091                         Register scratch2,
   1092                         Register scratch3,
   1093                         Register scratch4);
   1094 
   1095   void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
   1096   void TailCallStub(CodeStub* stub);
   1097 
   1098   void CallRuntime(const Runtime::Function* f,
   1099                    int num_arguments,
   1100                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
   1101 
   1102   void CallRuntime(Runtime::FunctionId id,
   1103                    int num_arguments,
   1104                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
   1105     CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
   1106   }
   1107 
   1108   void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   1109     const Runtime::Function* function = Runtime::FunctionForId(id);
   1110     CallRuntime(function, function->nargs, kSaveFPRegs);
   1111   }
   1112 
   1113   void TailCallRuntime(Runtime::FunctionId fid,
   1114                        int num_arguments,
   1115                        int result_size);
   1116 
   1117   int ActivationFrameAlignment();
   1118 
   1119   // Calls a C function.
   1120   // The called function is not allowed to trigger a
   1121   // garbage collection, since that might move the code and invalidate the
   1122   // return address (unless this is somehow accounted for by the called
   1123   // function).
   1124   void CallCFunction(ExternalReference function,
   1125                      int num_reg_arguments);
   1126   void CallCFunction(ExternalReference function,
   1127                      int num_reg_arguments,
   1128                      int num_double_arguments);
   1129   void CallCFunction(Register function,
   1130                      int num_reg_arguments,
   1131                      int num_double_arguments);
   1132 
   1133   // Calls an API function. Allocates HandleScope, extracts returned value
   1134   // from handle and propagates exceptions.
   1135   // 'stack_space' is the space to be unwound on exit (includes the call JS
   1136   // arguments space and the additional space allocated for the fast call).
   1137   // 'spill_offset' is the offset from the stack pointer where
   1138   // CallApiFunctionAndReturn can spill registers.
   1139   void CallApiFunctionAndReturn(Register function_address,
   1140                                 ExternalReference thunk_ref,
   1141                                 int stack_space,
   1142                                 int spill_offset,
   1143                                 MemOperand return_value_operand,
   1144                                 MemOperand* context_restore_operand);
   1145 
   1146   // The number of register that CallApiFunctionAndReturn will need to save on
   1147   // the stack. The space for these registers need to be allocated in the
   1148   // ExitFrame before calling CallApiFunctionAndReturn.
   1149   static const int kCallApiFunctionSpillSpace = 4;
   1150 
   1151   // Jump to a runtime routine.
   1152   void JumpToExternalReference(const ExternalReference& builtin);
   1153   // Tail call of a runtime routine (jump).
   1154   // Like JumpToExternalReference, but also takes care of passing the number
   1155   // of parameters.
   1156   void TailCallExternalReference(const ExternalReference& ext,
   1157                                  int num_arguments,
   1158                                  int result_size);
   1159   void CallExternalReference(const ExternalReference& ext,
   1160                              int num_arguments);
   1161 
   1162 
   1163   // Invoke specified builtin JavaScript function. Adds an entry to
   1164   // the unresolved list if the name does not resolve.
   1165   void InvokeBuiltin(Builtins::JavaScript id,
   1166                      InvokeFlag flag,
   1167                      const CallWrapper& call_wrapper = NullCallWrapper());
   1168 
   1169   // Store the code object for the given builtin in the target register and
   1170   // setup the function in the function register.
   1171   void GetBuiltinEntry(Register target,
   1172                        Register function,
   1173                        Builtins::JavaScript id);
   1174 
   1175   // Store the function for the given builtin in the target register.
   1176   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
   1177 
   1178   void Jump(Register target);
   1179   void Jump(Address target, RelocInfo::Mode rmode);
   1180   void Jump(Handle<Code> code, RelocInfo::Mode rmode);
   1181   void Jump(intptr_t target, RelocInfo::Mode rmode);
   1182 
   1183   void Call(Register target);
   1184   void Call(Label* target);
   1185   void Call(Address target, RelocInfo::Mode rmode);
   1186   void Call(Handle<Code> code,
   1187             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
   1188             TypeFeedbackId ast_id = TypeFeedbackId::None());
   1189 
   1190   // For every Call variant, there is a matching CallSize function that returns
   1191   // the size (in bytes) of the call sequence.
   1192   static int CallSize(Register target);
   1193   static int CallSize(Label* target);
   1194   static int CallSize(Address target, RelocInfo::Mode rmode);
   1195   static int CallSize(Handle<Code> code,
   1196                       RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
   1197                       TypeFeedbackId ast_id = TypeFeedbackId::None());
   1198 
   1199   // Registers used through the invocation chain are hard-coded.
   1200   // We force passing the parameters to ensure the contracts are correctly
   1201   // honoured by the caller.
   1202   // 'function' must be x1.
   1203   // 'actual' must use an immediate or x0.
   1204   // 'expected' must use an immediate or x2.
   1205   // 'call_kind' must be x5.
   1206   void InvokePrologue(const ParameterCount& expected,
   1207                       const ParameterCount& actual,
   1208                       Handle<Code> code_constant,
   1209                       Register code_reg,
   1210                       Label* done,
   1211                       InvokeFlag flag,
   1212                       bool* definitely_mismatches,
   1213                       const CallWrapper& call_wrapper);
   1214   void InvokeCode(Register code,
   1215                   const ParameterCount& expected,
   1216                   const ParameterCount& actual,
   1217                   InvokeFlag flag,
   1218                   const CallWrapper& call_wrapper);
   1219   // Invoke the JavaScript function in the given register.
   1220   // Changes the current context to the context in the function before invoking.
   1221   void InvokeFunction(Register function,
   1222                       const ParameterCount& actual,
   1223                       InvokeFlag flag,
   1224                       const CallWrapper& call_wrapper);
   1225   void InvokeFunction(Register function,
   1226                       const ParameterCount& expected,
   1227                       const ParameterCount& actual,
   1228                       InvokeFlag flag,
   1229                       const CallWrapper& call_wrapper);
   1230   void InvokeFunction(Handle<JSFunction> function,
   1231                       const ParameterCount& expected,
   1232                       const ParameterCount& actual,
   1233                       InvokeFlag flag,
   1234                       const CallWrapper& call_wrapper);
   1235 
   1236 
   1237   // ---- Floating point helpers ----
   1238 
   1239   // Perform a conversion from a double to a signed int64. If the input fits in
   1240   // range of the 64-bit result, execution branches to done. Otherwise,
   1241   // execution falls through, and the sign of the result can be used to
   1242   // determine if overflow was towards positive or negative infinity.
   1243   //
   1244   // On successful conversion, the least significant 32 bits of the result are
   1245   // equivalent to the ECMA-262 operation "ToInt32".
   1246   //
   1247   // Only public for the test code in test-code-stubs-arm64.cc.
   1248   void TryConvertDoubleToInt64(Register result,
   1249                                DoubleRegister input,
   1250                                Label* done);
   1251 
   1252   // Performs a truncating conversion of a floating point number as used by
   1253   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
   1254   // Exits with 'result' holding the answer.
   1255   void TruncateDoubleToI(Register result, DoubleRegister double_input);
   1256 
   1257   // Performs a truncating conversion of a heap number as used by
   1258   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
   1259   // must be different registers.  Exits with 'result' holding the answer.
   1260   void TruncateHeapNumberToI(Register result, Register object);
   1261 
   1262   // Converts the smi or heap number in object to an int32 using the rules
   1263   // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
   1264   // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
   1265   // different registers.
   1266   void TruncateNumberToI(Register object,
   1267                          Register result,
   1268                          Register heap_number_map,
   1269                          Label* not_int32);
   1270 
   1271   // ---- Code generation helpers ----
   1272 
   1273   void set_generating_stub(bool value) { generating_stub_ = value; }
   1274   bool generating_stub() const { return generating_stub_; }
   1275 #if DEBUG
   1276   void set_allow_macro_instructions(bool value) {
   1277     allow_macro_instructions_ = value;
   1278   }
   1279   bool allow_macro_instructions() const { return allow_macro_instructions_; }
   1280 #endif
   1281   bool use_real_aborts() const { return use_real_aborts_; }
   1282   void set_has_frame(bool value) { has_frame_ = value; }
   1283   bool has_frame() const { return has_frame_; }
   1284   bool AllowThisStubCall(CodeStub* stub);
   1285 
   1286   class NoUseRealAbortsScope {
   1287    public:
   1288     explicit NoUseRealAbortsScope(MacroAssembler* masm) :
   1289         saved_(masm->use_real_aborts_), masm_(masm) {
   1290       masm_->use_real_aborts_ = false;
   1291     }
   1292     ~NoUseRealAbortsScope() {
   1293       masm_->use_real_aborts_ = saved_;
   1294     }
   1295    private:
   1296     bool saved_;
   1297     MacroAssembler* masm_;
   1298   };
   1299 
   1300   // ---------------------------------------------------------------------------
   1301   // Debugger Support
   1302 
   1303   void DebugBreak();
   1304 
   1305   // ---------------------------------------------------------------------------
   1306   // Exception handling
   1307 
   1308   // Push a new try handler and link into try handler chain.
   1309   void PushTryHandler(StackHandler::Kind kind, int handler_index);
   1310 
   1311   // Unlink the stack handler on top of the stack from the try handler chain.
   1312   // Must preserve the result register.
   1313   void PopTryHandler();
   1314 
   1315 
   1316   // ---------------------------------------------------------------------------
   1317   // Allocation support
   1318 
   1319   // Allocate an object in new space or old pointer space. The object_size is
   1320   // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
   1321   // is passed. The allocated object is returned in result.
   1322   //
   1323   // If the new space is exhausted control continues at the gc_required label.
   1324   // In this case, the result and scratch registers may still be clobbered.
   1325   // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
   1326   void Allocate(Register object_size,
   1327                 Register result,
   1328                 Register scratch1,
   1329                 Register scratch2,
   1330                 Label* gc_required,
   1331                 AllocationFlags flags);
   1332 
   1333   void Allocate(int object_size,
   1334                 Register result,
   1335                 Register scratch1,
   1336                 Register scratch2,
   1337                 Label* gc_required,
   1338                 AllocationFlags flags);
   1339 
   1340   // Undo allocation in new space. The object passed and objects allocated after
   1341   // it will no longer be allocated. The caller must make sure that no pointers
   1342   // are left to the object(s) no longer allocated as they would be invalid when
   1343   // allocation is undone.
   1344   void UndoAllocationInNewSpace(Register object, Register scratch);
   1345 
   1346   void AllocateTwoByteString(Register result,
   1347                              Register length,
   1348                              Register scratch1,
   1349                              Register scratch2,
   1350                              Register scratch3,
   1351                              Label* gc_required);
   1352   void AllocateOneByteString(Register result, Register length,
   1353                              Register scratch1, Register scratch2,
   1354                              Register scratch3, Label* gc_required);
   1355   void AllocateTwoByteConsString(Register result,
   1356                                  Register length,
   1357                                  Register scratch1,
   1358                                  Register scratch2,
   1359                                  Label* gc_required);
   1360   void AllocateOneByteConsString(Register result, Register length,
   1361                                  Register scratch1, Register scratch2,
   1362                                  Label* gc_required);
   1363   void AllocateTwoByteSlicedString(Register result,
   1364                                    Register length,
   1365                                    Register scratch1,
   1366                                    Register scratch2,
   1367                                    Label* gc_required);
   1368   void AllocateOneByteSlicedString(Register result, Register length,
   1369                                    Register scratch1, Register scratch2,
   1370                                    Label* gc_required);
   1371 
   1372   // Allocates a heap number or jumps to the gc_required label if the young
   1373   // space is full and a scavenge is needed.
   1374   // All registers are clobbered.
   1375   // If no heap_number_map register is provided, the function will take care of
   1376   // loading it.
   1377   void AllocateHeapNumber(Register result,
   1378                           Label* gc_required,
   1379                           Register scratch1,
   1380                           Register scratch2,
   1381                           CPURegister value = NoFPReg,
   1382                           CPURegister heap_number_map = NoReg,
   1383                           MutableMode mode = IMMUTABLE);
   1384 
   1385   // ---------------------------------------------------------------------------
   1386   // Support functions.
   1387 
   1388   // Try to get function prototype of a function and puts the value in the
   1389   // result register. Checks that the function really is a function and jumps
   1390   // to the miss label if the fast checks fail. The function register will be
   1391   // untouched; the other registers may be clobbered.
   1392   enum BoundFunctionAction {
   1393     kMissOnBoundFunction,
   1394     kDontMissOnBoundFunction
   1395   };
   1396 
   1397   void TryGetFunctionPrototype(Register function,
   1398                                Register result,
   1399                                Register scratch,
   1400                                Label* miss,
   1401                                BoundFunctionAction action =
   1402                                  kDontMissOnBoundFunction);
   1403 
   1404   // Compare object type for heap object.  heap_object contains a non-Smi
   1405   // whose object type should be compared with the given type.  This both
   1406   // sets the flags and leaves the object type in the type_reg register.
   1407   // It leaves the map in the map register (unless the type_reg and map register
   1408   // are the same register).  It leaves the heap object in the heap_object
   1409   // register unless the heap_object register is the same register as one of the
   1410   // other registers.
   1411   void CompareObjectType(Register heap_object,
   1412                          Register map,
   1413                          Register type_reg,
   1414                          InstanceType type);
   1415 
   1416 
   1417   // Compare object type for heap object, and branch if equal (or not.)
   1418   // heap_object contains a non-Smi whose object type should be compared with
   1419   // the given type.  This both sets the flags and leaves the object type in
   1420   // the type_reg register. It leaves the map in the map register (unless the
   1421   // type_reg and map register are the same register).  It leaves the heap
   1422   // object in the heap_object register unless the heap_object register is the
   1423   // same register as one of the other registers.
   1424   void JumpIfObjectType(Register object,
   1425                         Register map,
   1426                         Register type_reg,
   1427                         InstanceType type,
   1428                         Label* if_cond_pass,
   1429                         Condition cond = eq);
   1430 
   1431   void JumpIfNotObjectType(Register object,
   1432                            Register map,
   1433                            Register type_reg,
   1434                            InstanceType type,
   1435                            Label* if_not_object);
   1436 
   1437   // Compare instance type in a map.  map contains a valid map object whose
   1438   // object type should be compared with the given type.  This both
   1439   // sets the flags and leaves the object type in the type_reg register.
   1440   void CompareInstanceType(Register map,
   1441                            Register type_reg,
   1442                            InstanceType type);
   1443 
   1444   // Compare an object's map with the specified map. Condition flags are set
   1445   // with result of map compare.
   1446   void CompareObjectMap(Register obj, Heap::RootListIndex index);
   1447 
   1448   // Compare an object's map with the specified map. Condition flags are set
   1449   // with result of map compare.
   1450   void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
   1451 
   1452   // As above, but the map of the object is already loaded into the register
   1453   // which is preserved by the code generated.
   1454   void CompareMap(Register obj_map,
   1455                   Handle<Map> map);
   1456 
   1457   // Check if the map of an object is equal to a specified map and branch to
   1458   // label if not. Skip the smi check if not required (object is known to be a
   1459   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
   1460   // against maps that are ElementsKind transition maps of the specified map.
   1461   void CheckMap(Register obj,
   1462                 Register scratch,
   1463                 Handle<Map> map,
   1464                 Label* fail,
   1465                 SmiCheckType smi_check_type);
   1466 
   1467 
   1468   void CheckMap(Register obj,
   1469                 Register scratch,
   1470                 Heap::RootListIndex index,
   1471                 Label* fail,
   1472                 SmiCheckType smi_check_type);
   1473 
   1474   // As above, but the map of the object is already loaded into obj_map, and is
   1475   // preserved.
   1476   void CheckMap(Register obj_map,
   1477                 Handle<Map> map,
   1478                 Label* fail,
   1479                 SmiCheckType smi_check_type);
   1480 
   1481   // Check if the map of an object is equal to a specified map and branch to a
   1482   // specified target if equal. Skip the smi check if not required (object is
   1483   // known to be a heap object)
   1484   void DispatchMap(Register obj,
   1485                    Register scratch,
   1486                    Handle<Map> map,
   1487                    Handle<Code> success,
   1488                    SmiCheckType smi_check_type);
   1489 
   1490   // Test the bitfield of the heap object map with mask and set the condition
   1491   // flags. The object register is preserved.
   1492   void TestMapBitfield(Register object, uint64_t mask);
   1493 
   1494   // Load the elements kind field from a map, and return it in the result
   1495   // register.
   1496   void LoadElementsKindFromMap(Register result, Register map);
   1497 
   1498   // Compare the object in a register to a value from the root list.
   1499   void CompareRoot(const Register& obj, Heap::RootListIndex index);
   1500 
   1501   // Compare the object in a register to a value and jump if they are equal.
   1502   void JumpIfRoot(const Register& obj,
   1503                   Heap::RootListIndex index,
   1504                   Label* if_equal);
   1505 
   1506   // Compare the object in a register to a value and jump if they are not equal.
   1507   void JumpIfNotRoot(const Register& obj,
   1508                      Heap::RootListIndex index,
   1509                      Label* if_not_equal);
   1510 
   1511   // Load and check the instance type of an object for being a unique name.
   1512   // Loads the type into the second argument register.
   1513   // The object and type arguments can be the same register; in that case it
   1514   // will be overwritten with the type.
   1515   // Fall-through if the object was a string and jump on fail otherwise.
   1516   inline void IsObjectNameType(Register object, Register type, Label* fail);
   1517 
   1518   inline void IsObjectJSObjectType(Register heap_object,
   1519                                    Register map,
   1520                                    Register scratch,
   1521                                    Label* fail);
   1522 
   1523   // Check the instance type in the given map to see if it corresponds to a
   1524   // JS object type. Jump to the fail label if this is not the case and fall
   1525   // through otherwise. However if fail label is NULL, no branch will be
   1526   // performed and the flag will be updated. You can test the flag for "le"
   1527   // condition to test if it is a valid JS object type.
   1528   inline void IsInstanceJSObjectType(Register map,
   1529                                      Register scratch,
   1530                                      Label* fail);
   1531 
   1532   // Load and check the instance type of an object for being a string.
   1533   // Loads the type into the second argument register.
   1534   // The object and type arguments can be the same register; in that case it
   1535   // will be overwritten with the type.
   1536   // Jumps to not_string or string appropriate. If the appropriate label is
   1537   // NULL, fall through.
   1538   inline void IsObjectJSStringType(Register object, Register type,
   1539                                    Label* not_string, Label* string = NULL);
   1540 
   1541   // Compare the contents of a register with an operand, and branch to true,
   1542   // false or fall through, depending on condition.
   1543   void CompareAndSplit(const Register& lhs,
   1544                        const Operand& rhs,
   1545                        Condition cond,
   1546                        Label* if_true,
   1547                        Label* if_false,
   1548                        Label* fall_through);
   1549 
   1550   // Test the bits of register defined by bit_pattern, and branch to
   1551   // if_any_set, if_all_clear or fall_through accordingly.
   1552   void TestAndSplit(const Register& reg,
   1553                     uint64_t bit_pattern,
   1554                     Label* if_all_clear,
   1555                     Label* if_any_set,
   1556                     Label* fall_through);
   1557 
   1558   // Check if a map for a JSObject indicates that the object has fast elements.
   1559   // Jump to the specified label if it does not.
   1560   void CheckFastElements(Register map, Register scratch, Label* fail);
   1561 
   1562   // Check if a map for a JSObject indicates that the object can have both smi
   1563   // and HeapObject elements.  Jump to the specified label if it does not.
   1564   void CheckFastObjectElements(Register map, Register scratch, Label* fail);
   1565 
   1566   // Check to see if number can be stored as a double in FastDoubleElements.
   1567   // If it can, store it at the index specified by key_reg in the array,
   1568   // otherwise jump to fail.
   1569   void StoreNumberToDoubleElements(Register value_reg,
   1570                                    Register key_reg,
   1571                                    Register elements_reg,
   1572                                    Register scratch1,
   1573                                    FPRegister fpscratch1,
   1574                                    Label* fail,
   1575                                    int elements_offset = 0);
   1576 
   1577   // Picks out an array index from the hash field.
   1578   // Register use:
   1579   //   hash - holds the index's hash. Clobbered.
   1580   //   index - holds the overwritten index on exit.
   1581   void IndexFromHash(Register hash, Register index);
   1582 
   1583   // ---------------------------------------------------------------------------
   1584   // Inline caching support.
   1585 
   1586   void EmitSeqStringSetCharCheck(Register string,
   1587                                  Register index,
   1588                                  SeqStringSetCharCheckIndexType index_type,
   1589                                  Register scratch,
   1590                                  uint32_t encoding_mask);
   1591 
   1592   // Generate code for checking access rights - used for security checks
   1593   // on access to global objects across environments. The holder register
   1594   // is left untouched, whereas both scratch registers are clobbered.
   1595   void CheckAccessGlobalProxy(Register holder_reg,
   1596                               Register scratch1,
   1597                               Register scratch2,
   1598                               Label* miss);
   1599 
   1600   // Hash the interger value in 'key' register.
   1601   // It uses the same algorithm as ComputeIntegerHash in utils.h.
   1602   void GetNumberHash(Register key, Register scratch);
   1603 
   1604   // Load value from the dictionary.
   1605   //
   1606   // elements - holds the slow-case elements of the receiver on entry.
   1607   //            Unchanged unless 'result' is the same register.
   1608   //
   1609   // key      - holds the smi key on entry.
   1610   //            Unchanged unless 'result' is the same register.
   1611   //
   1612   // result   - holds the result on exit if the load succeeded.
   1613   //            Allowed to be the same as 'key' or 'result'.
   1614   //            Unchanged on bailout so 'key' or 'result' can be used
   1615   //            in further computation.
   1616   void LoadFromNumberDictionary(Label* miss,
   1617                                 Register elements,
   1618                                 Register key,
   1619                                 Register result,
   1620                                 Register scratch0,
   1621                                 Register scratch1,
   1622                                 Register scratch2,
   1623                                 Register scratch3);
   1624 
   1625   // ---------------------------------------------------------------------------
   1626   // Frames.
   1627 
   1628   // Activation support.
   1629   void EnterFrame(StackFrame::Type type);
   1630   void LeaveFrame(StackFrame::Type type);
   1631 
   1632   // Returns map with validated enum cache in object register.
   1633   void CheckEnumCache(Register object,
   1634                       Register null_value,
   1635                       Register scratch0,
   1636                       Register scratch1,
   1637                       Register scratch2,
   1638                       Register scratch3,
   1639                       Label* call_runtime);
   1640 
   1641   // AllocationMemento support. Arrays may have an associated
   1642   // AllocationMemento object that can be checked for in order to pretransition
   1643   // to another type.
   1644   // On entry, receiver should point to the array object.
   1645   // If allocation info is present, the Z flag is set (so that the eq
   1646   // condition will pass).
   1647   void TestJSArrayForAllocationMemento(Register receiver,
   1648                                        Register scratch1,
   1649                                        Register scratch2,
   1650                                        Label* no_memento_found);
   1651 
   1652   void JumpIfJSArrayHasAllocationMemento(Register receiver,
   1653                                          Register scratch1,
   1654                                          Register scratch2,
   1655                                          Label* memento_found) {
   1656     Label no_memento_found;
   1657     TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
   1658                                     &no_memento_found);
   1659     B(eq, memento_found);
   1660     Bind(&no_memento_found);
   1661   }
   1662 
   1663   // The stack pointer has to switch between csp and jssp when setting up and
   1664   // destroying the exit frame. Hence preserving/restoring the registers is
   1665   // slightly more complicated than simple push/pop operations.
   1666   void ExitFramePreserveFPRegs();
   1667   void ExitFrameRestoreFPRegs();
   1668 
   1669   // Generates function and stub prologue code.
   1670   void StubPrologue();
   1671   void Prologue(bool code_pre_aging);
   1672 
   1673   // Enter exit frame. Exit frames are used when calling C code from generated
   1674   // (JavaScript) code.
   1675   //
   1676   // The stack pointer must be jssp on entry, and will be set to csp by this
   1677   // function. The frame pointer is also configured, but the only other
   1678   // registers modified by this function are the provided scratch register, and
   1679   // jssp.
   1680   //
   1681   // The 'extra_space' argument can be used to allocate some space in the exit
   1682   // frame that will be ignored by the GC. This space will be reserved in the
   1683   // bottom of the frame immediately above the return address slot.
   1684   //
   1685   // Set up a stack frame and registers as follows:
   1686   //         fp[8]: CallerPC (lr)
   1687   //   fp -> fp[0]: CallerFP (old fp)
   1688   //         fp[-8]: SPOffset (new csp)
   1689   //         fp[-16]: CodeObject()
   1690   //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
   1691   //         csp[8]: Memory reserved for the caller if extra_space != 0.
   1692   //                 Alignment padding, if necessary.
   1693   //  csp -> csp[0]: Space reserved for the return address.
   1694   //
   1695   // This function also stores the new frame information in the top frame, so
   1696   // that the new frame becomes the current frame.
   1697   void EnterExitFrame(bool save_doubles,
   1698                       const Register& scratch,
   1699                       int extra_space = 0);
   1700 
   1701   // Leave the current exit frame, after a C function has returned to generated
   1702   // (JavaScript) code.
   1703   //
   1704   // This effectively unwinds the operation of EnterExitFrame:
   1705   //  * Preserved doubles are restored (if restore_doubles is true).
   1706   //  * The frame information is removed from the top frame.
   1707   //  * The exit frame is dropped.
   1708   //  * The stack pointer is reset to jssp.
   1709   //
   1710   // The stack pointer must be csp on entry.
   1711   void LeaveExitFrame(bool save_doubles,
   1712                       const Register& scratch,
   1713                       bool restore_context);
   1714 
   1715   void LoadContext(Register dst, int context_chain_length);
   1716 
   1717   // Emit code for a truncating division by a constant. The dividend register is
   1718   // unchanged. Dividend and result must be different.
   1719   void TruncatingDiv(Register result, Register dividend, int32_t divisor);
   1720 
   1721   // ---------------------------------------------------------------------------
   1722   // StatsCounter support
   1723 
   1724   void SetCounter(StatsCounter* counter, int value, Register scratch1,
   1725                   Register scratch2);
   1726   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
   1727                         Register scratch2);
   1728   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
   1729                         Register scratch2);
   1730 
   1731   // ---------------------------------------------------------------------------
   1732   // Garbage collector support (GC).
   1733 
   1734   enum RememberedSetFinalAction {
   1735     kReturnAtEnd,
   1736     kFallThroughAtEnd
   1737   };
   1738 
   1739   // Record in the remembered set the fact that we have a pointer to new space
   1740   // at the address pointed to by the addr register. Only works if addr is not
   1741   // in new space.
   1742   void RememberedSetHelper(Register object,  // Used for debug code.
   1743                            Register addr,
   1744                            Register scratch1,
   1745                            SaveFPRegsMode save_fp,
   1746                            RememberedSetFinalAction and_then);
   1747 
   1748   // Push and pop the registers that can hold pointers, as defined by the
   1749   // RegList constant kSafepointSavedRegisters.
   1750   void PushSafepointRegisters();
   1751   void PopSafepointRegisters();
   1752 
   1753   void PushSafepointRegistersAndDoubles();
   1754   void PopSafepointRegistersAndDoubles();
   1755 
   1756   // Store value in register src in the safepoint stack slot for register dst.
   1757   void StoreToSafepointRegisterSlot(Register src, Register dst) {
   1758     Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
   1759   }
   1760 
   1761   // Load the value of the src register from its safepoint stack slot
   1762   // into register dst.
   1763   void LoadFromSafepointRegisterSlot(Register dst, Register src) {
   1764     Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
   1765   }
   1766 
   1767   void CheckPageFlagSet(const Register& object,
   1768                         const Register& scratch,
   1769                         int mask,
   1770                         Label* if_any_set);
   1771 
   1772   void CheckPageFlagClear(const Register& object,
   1773                           const Register& scratch,
   1774                           int mask,
   1775                           Label* if_all_clear);
   1776 
   1777   void CheckMapDeprecated(Handle<Map> map,
   1778                           Register scratch,
   1779                           Label* if_deprecated);
   1780 
   1781   // Check if object is in new space and jump accordingly.
   1782   // Register 'object' is preserved.
   1783   void JumpIfNotInNewSpace(Register object,
   1784                            Label* branch) {
   1785     InNewSpace(object, ne, branch);
   1786   }
   1787 
   1788   void JumpIfInNewSpace(Register object,
   1789                         Label* branch) {
   1790     InNewSpace(object, eq, branch);
   1791   }
   1792 
   1793   // Notify the garbage collector that we wrote a pointer into an object.
   1794   // |object| is the object being stored into, |value| is the object being
   1795   // stored.  value and scratch registers are clobbered by the operation.
   1796   // The offset is the offset from the start of the object, not the offset from
   1797   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
   1798   void RecordWriteField(
   1799       Register object,
   1800       int offset,
   1801       Register value,
   1802       Register scratch,
   1803       LinkRegisterStatus lr_status,
   1804       SaveFPRegsMode save_fp,
   1805       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1806       SmiCheck smi_check = INLINE_SMI_CHECK,
   1807       PointersToHereCheck pointers_to_here_check_for_value =
   1808           kPointersToHereMaybeInteresting);
   1809 
   1810   // As above, but the offset has the tag presubtracted. For use with
   1811   // MemOperand(reg, off).
   1812   inline void RecordWriteContextSlot(
   1813       Register context,
   1814       int offset,
   1815       Register value,
   1816       Register scratch,
   1817       LinkRegisterStatus lr_status,
   1818       SaveFPRegsMode save_fp,
   1819       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1820       SmiCheck smi_check = INLINE_SMI_CHECK,
   1821       PointersToHereCheck pointers_to_here_check_for_value =
   1822           kPointersToHereMaybeInteresting) {
   1823     RecordWriteField(context,
   1824                      offset + kHeapObjectTag,
   1825                      value,
   1826                      scratch,
   1827                      lr_status,
   1828                      save_fp,
   1829                      remembered_set_action,
   1830                      smi_check,
   1831                      pointers_to_here_check_for_value);
   1832   }
   1833 
   1834   void RecordWriteForMap(
   1835       Register object,
   1836       Register map,
   1837       Register dst,
   1838       LinkRegisterStatus lr_status,
   1839       SaveFPRegsMode save_fp);
   1840 
   1841   // For a given |object| notify the garbage collector that the slot |address|
   1842   // has been written.  |value| is the object being stored. The value and
   1843   // address registers are clobbered by the operation.
   1844   void RecordWrite(
   1845       Register object,
   1846       Register address,
   1847       Register value,
   1848       LinkRegisterStatus lr_status,
   1849       SaveFPRegsMode save_fp,
   1850       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1851       SmiCheck smi_check = INLINE_SMI_CHECK,
   1852       PointersToHereCheck pointers_to_here_check_for_value =
   1853           kPointersToHereMaybeInteresting);
   1854 
   1855   // Checks the color of an object. If the object is already grey or black
   1856   // then we just fall through, since it is already live. If it is white and
   1857   // we can determine that it doesn't need to be scanned, then we just mark it
   1858   // black and fall through. For the rest we jump to the label so the
   1859   // incremental marker can fix its assumptions.
   1860   void EnsureNotWhite(Register object,
   1861                       Register scratch1,
   1862                       Register scratch2,
   1863                       Register scratch3,
   1864                       Register scratch4,
   1865                       Label* object_is_white_and_not_data);
   1866 
   1867   // Detects conservatively whether an object is data-only, i.e. it does need to
   1868   // be scanned by the garbage collector.
   1869   void JumpIfDataObject(Register value,
   1870                         Register scratch,
   1871                         Label* not_data_object);
   1872 
   1873   // Helper for finding the mark bits for an address.
   1874   // Note that the behaviour slightly differs from other architectures.
   1875   // On exit:
   1876   //  - addr_reg is unchanged.
   1877   //  - The bitmap register points at the word with the mark bits.
   1878   //  - The shift register contains the index of the first color bit for this
   1879   //    object in the bitmap.
   1880   inline void GetMarkBits(Register addr_reg,
   1881                           Register bitmap_reg,
   1882                           Register shift_reg);
   1883 
   1884   // Check if an object has a given incremental marking color.
   1885   void HasColor(Register object,
   1886                 Register scratch0,
   1887                 Register scratch1,
   1888                 Label* has_color,
   1889                 int first_bit,
   1890                 int second_bit);
   1891 
   1892   void JumpIfBlack(Register object,
   1893                    Register scratch0,
   1894                    Register scratch1,
   1895                    Label* on_black);
   1896 
   1897 
   1898   // Get the location of a relocated constant (its address in the constant pool)
   1899   // from its load site.
   1900   void GetRelocatedValueLocation(Register ldr_location,
   1901                                  Register result);
   1902 
   1903 
   1904   // ---------------------------------------------------------------------------
   1905   // Debugging.
   1906 
   1907   // Calls Abort(msg) if the condition cond is not satisfied.
   1908   // Use --debug_code to enable.
   1909   void Assert(Condition cond, BailoutReason reason);
   1910   void AssertRegisterIsClear(Register reg, BailoutReason reason);
   1911   void AssertRegisterIsRoot(
   1912       Register reg,
   1913       Heap::RootListIndex index,
   1914       BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
   1915   void AssertFastElements(Register elements);
   1916 
   1917   // Abort if the specified register contains the invalid color bit pattern.
   1918   // The pattern must be in bits [1:0] of 'reg' register.
   1919   //
   1920   // If emit_debug_code() is false, this emits no code.
   1921   void AssertHasValidColor(const Register& reg);
   1922 
   1923   // Abort if 'object' register doesn't point to a string object.
   1924   //
   1925   // If emit_debug_code() is false, this emits no code.
   1926   void AssertIsString(const Register& object);
   1927 
   1928   // Like Assert(), but always enabled.
   1929   void Check(Condition cond, BailoutReason reason);
   1930   void CheckRegisterIsClear(Register reg, BailoutReason reason);
   1931 
   1932   // Print a message to stderr and abort execution.
   1933   void Abort(BailoutReason reason);
   1934 
   1935   // Conditionally load the cached Array transitioned map of type
   1936   // transitioned_kind from the native context if the map in register
   1937   // map_in_out is the cached Array map in the native context of
   1938   // expected_kind.
   1939   void LoadTransitionedArrayMapConditional(
   1940       ElementsKind expected_kind,
   1941       ElementsKind transitioned_kind,
   1942       Register map_in_out,
   1943       Register scratch1,
   1944       Register scratch2,
   1945       Label* no_map_match);
   1946 
   1947   void LoadGlobalFunction(int index, Register function);
   1948 
   1949   // Load the initial map from the global function. The registers function and
   1950   // map can be the same, function is then overwritten.
   1951   void LoadGlobalFunctionInitialMap(Register function,
   1952                                     Register map,
   1953                                     Register scratch);
   1954 
   1955   CPURegList* TmpList() { return &tmp_list_; }
   1956   CPURegList* FPTmpList() { return &fptmp_list_; }
   1957 
   1958   static CPURegList DefaultTmpList();
   1959   static CPURegList DefaultFPTmpList();
   1960 
   1961   // Like printf, but print at run-time from generated code.
   1962   //
   1963   // The caller must ensure that arguments for floating-point placeholders
   1964   // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
   1965   // placeholders are Registers.
   1966   //
   1967   // At the moment it is only possible to print the value of csp if it is the
   1968   // current stack pointer. Otherwise, the MacroAssembler will automatically
   1969   // update csp on every push (using BumpSystemStackPointer), so determining its
   1970   // value is difficult.
   1971   //
   1972   // Format placeholders that refer to more than one argument, or to a specific
   1973   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
   1974   //
   1975   // This function automatically preserves caller-saved registers so that
   1976   // calling code can use Printf at any point without having to worry about
   1977   // corruption. The preservation mechanism generates a lot of code. If this is
   1978   // a problem, preserve the important registers manually and then call
   1979   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
   1980   // implicitly preserved.
   1981   void Printf(const char * format,
   1982               CPURegister arg0 = NoCPUReg,
   1983               CPURegister arg1 = NoCPUReg,
   1984               CPURegister arg2 = NoCPUReg,
   1985               CPURegister arg3 = NoCPUReg);
   1986 
   1987   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
   1988   //
   1989   // The return code from the system printf call will be returned in x0.
   1990   void PrintfNoPreserve(const char * format,
   1991                         const CPURegister& arg0 = NoCPUReg,
   1992                         const CPURegister& arg1 = NoCPUReg,
   1993                         const CPURegister& arg2 = NoCPUReg,
   1994                         const CPURegister& arg3 = NoCPUReg);
   1995 
   1996   // Code ageing support functions.
   1997 
   1998   // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
   1999   // function as old, it replaces some of the function prologue (generated by
   2000   // FullCodeGenerator::Generate) with a call to a special stub (ultimately
   2001   // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
   2002   // function prologue to its initial young state (indicating that it has been
   2003   // recently run) and continues. A young function is therefore one which has a
   2004   // normal frame setup sequence, and an old function has a code age sequence
   2005   // which calls a code ageing stub.
   2006 
   2007   // Set up a basic stack frame for young code (or code exempt from ageing) with
   2008   // type FUNCTION. It may be patched later for code ageing support. This is
   2009   // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
   2010   //
   2011   // This function takes an Assembler so it can be called from either a
   2012   // MacroAssembler or a PatchingAssembler context.
   2013   static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
   2014 
   2015   // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
   2016   void EmitFrameSetupForCodeAgePatching();
   2017 
   2018   // Emit a code age sequence that calls the relevant code age stub. The code
   2019   // generated by this sequence is expected to replace the code generated by
   2020   // EmitFrameSetupForCodeAgePatching, and represents an old function.
   2021   //
   2022   // If stub is NULL, this function generates the code age sequence but omits
   2023   // the stub address that is normally embedded in the instruction stream. This
   2024   // can be used by debug code to verify code age sequences.
   2025   static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
   2026 
   2027   // Call EmitCodeAgeSequence from a MacroAssembler context.
   2028   void EmitCodeAgeSequence(Code* stub);
   2029 
   2030   // Return true if the sequence is a young sequence geneated by
   2031   // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
   2032   // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
   2033   static bool IsYoungSequence(Isolate* isolate, byte* sequence);
   2034 
   2035   // Jumps to found label if a prototype map has dictionary elements.
   2036   void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
   2037                                         Register scratch1, Label* found);
   2038 
   2039   // Perform necessary maintenance operations before a push or after a pop.
   2040   //
   2041   // Note that size is specified in bytes.
   2042   void PushPreamble(Operand total_size);
   2043   void PopPostamble(Operand total_size);
   2044 
   2045   void PushPreamble(int count, int size) { PushPreamble(count * size); }
   2046   void PopPostamble(int count, int size) { PopPostamble(count * size); }
   2047 
   2048  private:
   2049   // Helpers for CopyFields.
   2050   // These each implement CopyFields in a different way.
   2051   void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
   2052                                  Register scratch1, Register scratch2,
   2053                                  Register scratch3, Register scratch4,
   2054                                  Register scratch5);
   2055   void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
   2056                                      Register scratch1, Register scratch2,
   2057                                      Register scratch3, Register scratch4);
   2058   void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
   2059                                 Register scratch1, Register scratch2,
   2060                                 Register scratch3);
   2061 
   2062   // The actual Push and Pop implementations. These don't generate any code
   2063   // other than that required for the push or pop. This allows
   2064   // (Push|Pop)CPURegList to bundle together run-time assertions for a large
   2065   // block of registers.
   2066   //
   2067   // Note that size is per register, and is specified in bytes.
   2068   void PushHelper(int count, int size,
   2069                   const CPURegister& src0, const CPURegister& src1,
   2070                   const CPURegister& src2, const CPURegister& src3);
   2071   void PopHelper(int count, int size,
   2072                  const CPURegister& dst0, const CPURegister& dst1,
   2073                  const CPURegister& dst2, const CPURegister& dst3);
   2074 
   2075   // Call Printf. On a native build, a simple call will be generated, but if the
   2076   // simulator is being used then a suitable pseudo-instruction is used. The
   2077   // arguments and stack (csp) must be prepared by the caller as for a normal
   2078   // AAPCS64 call to 'printf'.
   2079   //
   2080   // The 'args' argument should point to an array of variable arguments in their
   2081   // proper PCS registers (and in calling order). The argument registers can
   2082   // have mixed types. The format string (x0) should not be included.
   2083   void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
   2084 
   2085   // Helper for throwing exceptions.  Compute a handler address and jump to
   2086   // it.  See the implementation for register usage.
   2087   void JumpToHandlerEntry(Register exception,
   2088                           Register object,
   2089                           Register state,
   2090                           Register scratch1,
   2091                           Register scratch2);
   2092 
   2093   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   2094   void InNewSpace(Register object,
   2095                   Condition cond,  // eq for new space, ne otherwise.
   2096                   Label* branch);
   2097 
   2098   // Try to represent a double as an int so that integer fast-paths may be
   2099   // used. Not every valid integer value is guaranteed to be caught.
   2100   // It supports both 32-bit and 64-bit integers depending whether 'as_int'
   2101   // is a W or X register.
   2102   //
   2103   // This does not distinguish between +0 and -0, so if this distinction is
   2104   // important it must be checked separately.
   2105   //
   2106   // On output the Z flag is set if the operation was successful.
   2107   void TryRepresentDoubleAsInt(Register as_int,
   2108                                FPRegister value,
   2109                                FPRegister scratch_d,
   2110                                Label* on_successful_conversion = NULL,
   2111                                Label* on_failed_conversion = NULL);
   2112 
   2113   bool generating_stub_;
   2114 #if DEBUG
   2115   // Tell whether any of the macro instruction can be used. When false the
   2116   // MacroAssembler will assert if a method which can emit a variable number
   2117   // of instructions is called.
   2118   bool allow_macro_instructions_;
   2119 #endif
   2120   bool has_frame_;
   2121 
   2122   // The Abort method should call a V8 runtime function, but the CallRuntime
   2123   // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
   2124   // use a simpler abort mechanism that doesn't depend on CEntryStub.
   2125   //
   2126   // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
   2127   // being generated.
   2128   bool use_real_aborts_;
   2129 
   2130   // This handle will be patched with the code object on installation.
   2131   Handle<Object> code_object_;
   2132 
   2133   // The register to use as a stack pointer for stack operations.
   2134   Register sp_;
   2135 
   2136   // Scratch registers available for use by the MacroAssembler.
   2137   CPURegList tmp_list_;
   2138   CPURegList fptmp_list_;
   2139 
   2140   void InitializeNewString(Register string,
   2141                            Register length,
   2142                            Heap::RootListIndex map_index,
   2143                            Register scratch1,
   2144                            Register scratch2);
   2145 
   2146  public:
   2147   // Far branches resolving.
   2148   //
   2149   // The various classes of branch instructions with immediate offsets have
   2150   // different ranges. While the Assembler will fail to assemble a branch
   2151   // exceeding its range, the MacroAssembler offers a mechanism to resolve
   2152   // branches to too distant targets, either by tweaking the generated code to
   2153   // use branch instructions with wider ranges or generating veneers.
   2154   //
   2155   // Currently branches to distant targets are resolved using unconditional
   2156   // branch isntructions with a range of +-128MB. If that becomes too little
   2157   // (!), the mechanism can be extended to generate special veneers for really
   2158   // far targets.
   2159 
   2160   // Helps resolve branching to labels potentially out of range.
   2161   // If the label is not bound, it registers the information necessary to later
   2162   // be able to emit a veneer for this branch if necessary.
   2163   // If the label is bound, it returns true if the label (or the previous link
   2164   // in the label chain) is out of range. In that case the caller is responsible
   2165   // for generating appropriate code.
   2166   // Otherwise it returns false.
   2167   // This function also checks wether veneers need to be emitted.
   2168   bool NeedExtraInstructionsOrRegisterBranch(Label *label,
   2169                                              ImmBranchType branch_type);
   2170 };
   2171 
   2172 
   2173 // Use this scope when you need a one-to-one mapping bewteen methods and
   2174 // instructions. This scope prevents the MacroAssembler from being called and
   2175 // literal pools from being emitted. It also asserts the number of instructions
   2176 // emitted is what you specified when creating the scope.
   2177 class InstructionAccurateScope BASE_EMBEDDED {
   2178  public:
   2179   explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
   2180       : masm_(masm)
   2181 #ifdef DEBUG
   2182         ,
   2183         size_(count * kInstructionSize)
   2184 #endif
   2185   {
   2186     // Before blocking the const pool, see if it needs to be emitted.
   2187     masm_->CheckConstPool(false, true);
   2188     masm_->CheckVeneerPool(false, true);
   2189 
   2190     masm_->StartBlockPools();
   2191 #ifdef DEBUG
   2192     if (count != 0) {
   2193       masm_->bind(&start_);
   2194     }
   2195     previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
   2196     masm_->set_allow_macro_instructions(false);
   2197 #endif
   2198   }
   2199 
   2200   ~InstructionAccurateScope() {
   2201     masm_->EndBlockPools();
   2202 #ifdef DEBUG
   2203     if (start_.is_bound()) {
   2204       DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
   2205     }
   2206     masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
   2207 #endif
   2208   }
   2209 
   2210  private:
   2211   MacroAssembler* masm_;
   2212 #ifdef DEBUG
   2213   size_t size_;
   2214   Label start_;
   2215   bool previous_allow_macro_instructions_;
   2216 #endif
   2217 };
   2218 
   2219 
   2220 // This scope utility allows scratch registers to be managed safely. The
   2221 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
   2222 // registers. These registers can be allocated on demand, and will be returned
   2223 // at the end of the scope.
   2224 //
   2225 // When the scope ends, the MacroAssembler's lists will be restored to their
   2226 // original state, even if the lists were modified by some other means.
   2227 class UseScratchRegisterScope {
   2228  public:
   2229   explicit UseScratchRegisterScope(MacroAssembler* masm)
   2230       : available_(masm->TmpList()),
   2231         availablefp_(masm->FPTmpList()),
   2232         old_available_(available_->list()),
   2233         old_availablefp_(availablefp_->list()) {
   2234     DCHECK(available_->type() == CPURegister::kRegister);
   2235     DCHECK(availablefp_->type() == CPURegister::kFPRegister);
   2236   }
   2237 
   2238   ~UseScratchRegisterScope();
   2239 
   2240   // Take a register from the appropriate temps list. It will be returned
   2241   // automatically when the scope ends.
   2242   Register AcquireW() { return AcquireNextAvailable(available_).W(); }
   2243   Register AcquireX() { return AcquireNextAvailable(available_).X(); }
   2244   FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
   2245   FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
   2246 
   2247   Register UnsafeAcquire(const Register& reg) {
   2248     return Register(UnsafeAcquire(available_, reg));
   2249   }
   2250 
   2251   Register AcquireSameSizeAs(const Register& reg);
   2252   FPRegister AcquireSameSizeAs(const FPRegister& reg);
   2253 
   2254  private:
   2255   static CPURegister AcquireNextAvailable(CPURegList* available);
   2256   static CPURegister UnsafeAcquire(CPURegList* available,
   2257                                    const CPURegister& reg);
   2258 
   2259   // Available scratch registers.
   2260   CPURegList* available_;     // kRegister
   2261   CPURegList* availablefp_;   // kFPRegister
   2262 
   2263   // The state of the available lists at the start of this scope.
   2264   RegList old_available_;     // kRegister
   2265   RegList old_availablefp_;   // kFPRegister
   2266 };
   2267 
   2268 
   2269 inline MemOperand ContextMemOperand(Register context, int index) {
   2270   return MemOperand(context, Context::SlotOffset(index));
   2271 }
   2272 
   2273 inline MemOperand GlobalObjectMemOperand() {
   2274   return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
   2275 }
   2276 
   2277 
   2278 // Encode and decode information about patchable inline SMI checks.
   2279 class InlineSmiCheckInfo {
   2280  public:
   2281   explicit InlineSmiCheckInfo(Address info);
   2282 
   2283   bool HasSmiCheck() const {
   2284     return smi_check_ != NULL;
   2285   }
   2286 
   2287   const Register& SmiRegister() const {
   2288     return reg_;
   2289   }
   2290 
   2291   Instruction* SmiCheck() const {
   2292     return smi_check_;
   2293   }
   2294 
   2295   // Use MacroAssembler::InlineData to emit information about patchable inline
   2296   // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
   2297   // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
   2298   //
   2299   // The generated patch information can be read using the InlineSMICheckInfo
   2300   // class.
   2301   static void Emit(MacroAssembler* masm, const Register& reg,
   2302                    const Label* smi_check);
   2303 
   2304   // Emit information to indicate that there is no inline SMI check.
   2305   static void EmitNotInlined(MacroAssembler* masm) {
   2306     Label unbound;
   2307     Emit(masm, NoReg, &unbound);
   2308   }
   2309 
   2310  private:
   2311   Register reg_;
   2312   Instruction* smi_check_;
   2313 
   2314   // Fields in the data encoded by InlineData.
   2315 
   2316   // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
   2317   // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
   2318   // used in a patchable check. The Emit() method checks this.
   2319   //
   2320   // Note that the total size of the fields is restricted by the underlying
   2321   // storage size handled by the BitField class, which is a uint32_t.
   2322   class RegisterBits : public BitField<unsigned, 0, 5> {};
   2323   class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
   2324 };
   2325 
   2326 } }  // namespace v8::internal
   2327 
   2328 #ifdef GENERATED_CODE_COVERAGE
   2329 #error "Unsupported option"
   2330 #define CODE_COVERAGE_STRINGIFY(x) #x
   2331 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
   2332 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
   2333 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
   2334 #else
   2335 #define ACCESS_MASM(masm) masm->
   2336 #endif
   2337 
   2338 #endif  // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
   2339