Home | History | Annotate | Download | only in quick
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
     18 #define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
     19 
     20 #include "invoke_type.h"
     21 #include "compiled_method.h"
     22 #include "dex/compiler_enums.h"
     23 #include "dex/compiler_ir.h"
     24 #include "dex/reg_location.h"
     25 #include "dex/reg_storage.h"
     26 #include "dex/backend.h"
     27 #include "dex/quick/resource_mask.h"
     28 #include "driver/compiler_driver.h"
     29 #include "instruction_set.h"
     30 #include "leb128.h"
     31 #include "entrypoints/quick/quick_entrypoints_enum.h"
     32 #include "safe_map.h"
     33 #include "utils/array_ref.h"
     34 #include "utils/arena_allocator.h"
     35 #include "utils/arena_containers.h"
     36 #include "utils/growable_array.h"
     37 #include "utils/stack_checks.h"
     38 
     39 namespace art {
     40 
     41 /*
     42  * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
     43  * add type safety (see runtime/offsets.h).
     44  */
     45 typedef uint32_t DexOffset;          // Dex offset in code units.
     46 typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
     47 typedef uint32_t CodeOffset;         // Native code offset in bytes.
     48 
     49 // Set to 1 to measure cost of suspend check.
     50 #define NO_SUSPEND 0
     51 
     52 #define IS_BINARY_OP         (1ULL << kIsBinaryOp)
     53 #define IS_BRANCH            (1ULL << kIsBranch)
     54 #define IS_IT                (1ULL << kIsIT)
     55 #define IS_MOVE              (1ULL << kIsMoveOp)
     56 #define IS_LOAD              (1ULL << kMemLoad)
     57 #define IS_QUAD_OP           (1ULL << kIsQuadOp)
     58 #define IS_QUIN_OP           (1ULL << kIsQuinOp)
     59 #define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
     60 #define IS_STORE             (1ULL << kMemStore)
     61 #define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
     62 #define IS_UNARY_OP          (1ULL << kIsUnaryOp)
     63 #define IS_VOLATILE          (1ULL << kMemVolatile)
     64 #define NEEDS_FIXUP          (1ULL << kPCRelFixup)
     65 #define NO_OPERAND           (1ULL << kNoOperand)
     66 #define REG_DEF0             (1ULL << kRegDef0)
     67 #define REG_DEF1             (1ULL << kRegDef1)
     68 #define REG_DEF2             (1ULL << kRegDef2)
     69 #define REG_DEFA             (1ULL << kRegDefA)
     70 #define REG_DEFD             (1ULL << kRegDefD)
     71 #define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
     72 #define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
     73 #define REG_DEF_LIST0        (1ULL << kRegDefList0)
     74 #define REG_DEF_LIST1        (1ULL << kRegDefList1)
     75 #define REG_DEF_LR           (1ULL << kRegDefLR)
     76 #define REG_DEF_SP           (1ULL << kRegDefSP)
     77 #define REG_USE0             (1ULL << kRegUse0)
     78 #define REG_USE1             (1ULL << kRegUse1)
     79 #define REG_USE2             (1ULL << kRegUse2)
     80 #define REG_USE3             (1ULL << kRegUse3)
     81 #define REG_USE4             (1ULL << kRegUse4)
     82 #define REG_USEA             (1ULL << kRegUseA)
     83 #define REG_USEC             (1ULL << kRegUseC)
     84 #define REG_USED             (1ULL << kRegUseD)
     85 #define REG_USEB             (1ULL << kRegUseB)
     86 #define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
     87 #define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
     88 #define REG_USE_LIST0        (1ULL << kRegUseList0)
     89 #define REG_USE_LIST1        (1ULL << kRegUseList1)
     90 #define REG_USE_LR           (1ULL << kRegUseLR)
     91 #define REG_USE_PC           (1ULL << kRegUsePC)
     92 #define REG_USE_SP           (1ULL << kRegUseSP)
     93 #define SETS_CCODES          (1ULL << kSetsCCodes)
     94 #define USES_CCODES          (1ULL << kUsesCCodes)
     95 #define USE_FP_STACK         (1ULL << kUseFpStack)
     96 #define REG_USE_LO           (1ULL << kUseLo)
     97 #define REG_USE_HI           (1ULL << kUseHi)
     98 #define REG_DEF_LO           (1ULL << kDefLo)
     99 #define REG_DEF_HI           (1ULL << kDefHi)
    100 #define SCALED_OFFSET_X0     (1ULL << kMemScaledx0)
    101 #define SCALED_OFFSET_X2     (1ULL << kMemScaledx2)
    102 #define SCALED_OFFSET_X4     (1ULL << kMemScaledx4)
    103 
    104 // Special load/stores
    105 #define IS_LOADX             (IS_LOAD | IS_VOLATILE)
    106 #define IS_LOAD_OFF          (IS_LOAD | SCALED_OFFSET_X0)
    107 #define IS_LOAD_OFF2         (IS_LOAD | SCALED_OFFSET_X2)
    108 #define IS_LOAD_OFF4         (IS_LOAD | SCALED_OFFSET_X4)
    109 
    110 #define IS_STOREX            (IS_STORE | IS_VOLATILE)
    111 #define IS_STORE_OFF         (IS_STORE | SCALED_OFFSET_X0)
    112 #define IS_STORE_OFF2        (IS_STORE | SCALED_OFFSET_X2)
    113 #define IS_STORE_OFF4        (IS_STORE | SCALED_OFFSET_X4)
    114 
    115 // Common combo register usage patterns.
    116 #define REG_DEF01            (REG_DEF0 | REG_DEF1)
    117 #define REG_DEF012           (REG_DEF0 | REG_DEF1 | REG_DEF2)
    118 #define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
    119 #define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
    120 #define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
    121 #define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
    122 #define REG_DEF0_USE123      (REG_DEF0 | REG_USE123)
    123 #define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
    124 #define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
    125 #define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
    126 #define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
    127 #define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
    128 #define REG_USE012           (REG_USE01 | REG_USE2)
    129 #define REG_USE014           (REG_USE01 | REG_USE4)
    130 #define REG_USE01            (REG_USE0 | REG_USE1)
    131 #define REG_USE02            (REG_USE0 | REG_USE2)
    132 #define REG_USE12            (REG_USE1 | REG_USE2)
    133 #define REG_USE23            (REG_USE2 | REG_USE3)
    134 #define REG_USE123           (REG_USE1 | REG_USE2 | REG_USE3)
    135 
    136 // TODO: #includes need a cleanup
    137 #ifndef INVALID_SREG
    138 #define INVALID_SREG (-1)
    139 #endif
    140 
    141 struct BasicBlock;
    142 struct CallInfo;
    143 struct CompilationUnit;
    144 struct InlineMethod;
    145 struct MIR;
    146 struct LIR;
    147 struct RegisterInfo;
    148 class DexFileMethodInliner;
    149 class MIRGraph;
    150 class Mir2Lir;
    151 
    152 typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int,
    153                             const MethodReference& target_method,
    154                             uint32_t method_idx, uintptr_t direct_code,
    155                             uintptr_t direct_method, InvokeType type);
    156 
    157 typedef std::vector<uint8_t> CodeBuffer;
    158 
    159 struct UseDefMasks {
    160   const ResourceMask* use_mask;        // Resource mask for use.
    161   const ResourceMask* def_mask;        // Resource mask for def.
    162 };
    163 
    164 struct AssemblyInfo {
    165   LIR* pcrel_next;           // Chain of LIR nodes needing pc relative fixups.
    166 };
    167 
    168 struct LIR {
    169   CodeOffset offset;             // Offset of this instruction.
    170   NarrowDexOffset dalvik_offset;   // Offset of Dalvik opcode in code units (16-bit words).
    171   int16_t opcode;
    172   LIR* next;
    173   LIR* prev;
    174   LIR* target;
    175   struct {
    176     unsigned int alias_info:17;  // For Dalvik register disambiguation.
    177     bool is_nop:1;               // LIR is optimized away.
    178     unsigned int size:4;         // Note: size of encoded instruction is in bytes.
    179     bool use_def_invalid:1;      // If true, masks should not be used.
    180     unsigned int generation:1;   // Used to track visitation state during fixup pass.
    181     unsigned int fixup:8;        // Fixup kind.
    182   } flags;
    183   union {
    184     UseDefMasks m;               // Use & Def masks used during optimization.
    185     AssemblyInfo a;              // Instruction info used during assembly phase.
    186   } u;
    187   int32_t operands[5];           // [0..4] = [dest, src1, src2, extra, extra2].
    188 };
    189 
    190 // Target-specific initialization.
    191 Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
    192                           ArenaAllocator* const arena);
    193 Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
    194                             ArenaAllocator* const arena);
    195 Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
    196                           ArenaAllocator* const arena);
    197 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
    198                           ArenaAllocator* const arena);
    199 
    200 // Utility macros to traverse the LIR list.
    201 #define NEXT_LIR(lir) (lir->next)
    202 #define PREV_LIR(lir) (lir->prev)
    203 
    204 // Defines for alias_info (tracks Dalvik register references).
    205 #define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
    206 #define DECODE_ALIAS_INFO_WIDE_FLAG     (0x10000)
    207 #define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
    208 #define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
    209 
    210 #define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8))
    211 #define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \
    212   do { \
    213     low_reg = both_regs & 0xff; \
    214     high_reg = (both_regs >> 8) & 0xff; \
    215   } while (false)
    216 
    217 // Mask to denote sreg as the start of a 64-bit item.  Must not interfere with low 16 bits.
    218 #define STARTING_WIDE_SREG 0x10000
    219 
    220 // TODO: replace these macros
    221 #define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
    222 #define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
    223 #define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath))
    224 #define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath))
    225 #define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath))
    226 
    227 class Mir2Lir : public Backend {
    228   public:
    229     static constexpr bool kFailOnSizeError = true && kIsDebugBuild;
    230     static constexpr bool kReportSizeError = true && kIsDebugBuild;
    231 
    232     // TODO: If necessary, this could be made target-dependent.
    233     static constexpr uint16_t kSmallSwitchThreshold = 5;
    234 
    235     /*
    236      * Auxiliary information describing the location of data embedded in the Dalvik
    237      * byte code stream.
    238      */
    239     struct EmbeddedData {
    240       CodeOffset offset;        // Code offset of data block.
    241       const uint16_t* table;      // Original dex data.
    242       DexOffset vaddr;            // Dalvik offset of parent opcode.
    243     };
    244 
    245     struct FillArrayData : EmbeddedData {
    246       int32_t size;
    247     };
    248 
    249     struct SwitchTable : EmbeddedData {
    250       LIR* anchor;                // Reference instruction for relative offsets.
    251       LIR** targets;              // Array of case targets.
    252     };
    253 
    254     /* Static register use counts */
    255     struct RefCounts {
    256       int count;
    257       int s_reg;
    258     };
    259 
    260     /*
    261      * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits)
    262      * and native register storage.  The primary purpose is to reuse previuosly
    263      * loaded values, if possible, and otherwise to keep the value in register
    264      * storage as long as possible.
    265      *
    266      * NOTE 1: wide_value refers to the width of the Dalvik value contained in
    267      * this register (or pair).  For example, a 64-bit register containing a 32-bit
    268      * Dalvik value would have wide_value==false even though the storage container itself
    269      * is wide.  Similarly, a 32-bit register containing half of a 64-bit Dalvik value
    270      * would have wide_value==true (and additionally would have its partner field set to the
    271      * other half whose wide_value field would also be true.
    272      *
    273      * NOTE 2: In the case of a register pair, you can determine which of the partners
    274      * is the low half by looking at the s_reg names.  The high s_reg will equal low_sreg + 1.
    275      *
    276      * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value
    277      * will be true and partner==self.  s_reg refers to the low-order word of the Dalvik
    278      * value, and the s_reg of the high word is implied (s_reg + 1).
    279      *
    280      * NOTE 4: The reg and is_temp fields should always be correct.  If is_temp is false no
    281      * other fields have meaning. [perhaps not true, wide should work for promoted regs?]
    282      * If is_temp==true and live==false, no other fields have
    283      * meaning.  If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start
    284      * and def_end describe the relationship between the temp register/register pair and
    285      * the Dalvik value[s] described by s_reg/s_reg+1.
    286      *
    287      * The fields used_storage, master_storage and storage_mask are used to track allocation
    288      * in light of potential aliasing.  For example, consider Arm's d2, which overlaps s4 & s5.
    289      * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of
    290      * storage use.  For s4, it would be 0x0000001; for s5 0x00000002.  These values should not
    291      * change once initialized.  The "used_storage" field tracks current allocation status.
    292      * Although each record contains this field, only the field from the largest member of
    293      * an aliased group is used.  In our case, it would be d2's.  The master_storage pointer
    294      * of d2, s4 and s5 would all point to d2's used_storage field.  Each bit in a used_storage
    295      * represents 32 bits of storage.  d2's used_storage would be initialized to 0xfffffffc.
    296      * Then, if we wanted to determine whether s4 could be allocated, we would "and"
    297      * s4's storage_mask with s4's *master_storage.  If the result is zero, s4 is free and
    298      * to allocate: *master_storage |= storage_mask.  To free, *master_storage &= ~storage_mask.
    299      *
    300      * For an X86 vector register example, storage_mask would be:
    301      *    0x00000001 for 32-bit view of xmm1
    302      *    0x00000003 for 64-bit view of xmm1
    303      *    0x0000000f for 128-bit view of xmm1
    304      *    0x000000ff for 256-bit view of ymm1   // future expansion, if needed
    305      *    0x0000ffff for 512-bit view of ymm1   // future expansion, if needed
    306      *    0xffffffff for 1024-bit view of ymm1  // future expansion, if needed
    307      *
    308      * The "liveness" of a register is handled in a similar way.  The liveness_ storage is
    309      * held in the widest member of an aliased set.  Note, though, that for a temp register to
    310      * reused as live, it must both be marked live and the associated SReg() must match the
    311      * desired s_reg.  This gets a little complicated when dealing with aliased registers.  All
    312      * members of an aliased set will share the same liveness flags, but each will individually
    313      * maintain s_reg_.  In this way we can know that at least one member of an
    314      * aliased set is live, but will only fully match on the appropriate alias view.  For example,
    315      * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9
    316      * because it is wide), its aliases s2 and s3 will show as live, but will have
    317      * s_reg_ == INVALID_SREG.  An attempt to later AllocLiveReg() of v9 with a single-precision
    318      * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9.
    319      * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will
    320      * report that v9 is currently not live as a single (which is what we want).
    321      *
    322      * NOTE: the x86 usage is still somewhat in flux.  There are competing notions of how
    323      * to treat xmm registers:
    324      *     1. Treat them all as 128-bits wide, but denote how much data used via bytes field.
    325      *         o This more closely matches reality, but means you'd need to be able to get
    326      *           to the associated RegisterInfo struct to figure out how it's being used.
    327      *         o This is how 64-bit core registers will be used - always 64 bits, but the
    328      *           "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage.
    329      *     2. View the xmm registers based on contents.
    330      *         o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would
    331      *           be a k64BitVector.
    332      *         o Note that the two uses above would be considered distinct registers (but with
    333      *           the aliasing mechanism, we could detect interference).
    334      *         o This is how aliased double and single float registers will be handled on
    335      *           Arm and MIPS.
    336      * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and
    337      * mechanism 2 for aliased float registers and x86 vector registers.
    338      */
    339     class RegisterInfo {
    340      public:
    341       RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll);
    342       ~RegisterInfo() {}
    343       static void* operator new(size_t size, ArenaAllocator* arena) {
    344         return arena->Alloc(size, kArenaAllocRegAlloc);
    345       }
    346 
    347       static const uint32_t k32SoloStorageMask     = 0x00000001;
    348       static const uint32_t kLowSingleStorageMask  = 0x00000001;
    349       static const uint32_t kHighSingleStorageMask = 0x00000002;
    350       static const uint32_t k64SoloStorageMask     = 0x00000003;
    351       static const uint32_t k128SoloStorageMask    = 0x0000000f;
    352       static const uint32_t k256SoloStorageMask    = 0x000000ff;
    353       static const uint32_t k512SoloStorageMask    = 0x0000ffff;
    354       static const uint32_t k1024SoloStorageMask   = 0xffffffff;
    355 
    356       bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; }
    357       void MarkInUse() { master_->used_storage_ |= storage_mask_; }
    358       void MarkFree() { master_->used_storage_ &= ~storage_mask_; }
    359       // No part of the containing storage is live in this view.
    360       bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; }
    361       // Liveness of this view matches.  Note: not equivalent to !IsDead().
    362       bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; }
    363       void MarkLive(int s_reg) {
    364         // TODO: Anything useful to assert here?
    365         s_reg_ = s_reg;
    366         master_->liveness_ |= storage_mask_;
    367       }
    368       void MarkDead() {
    369         if (SReg() != INVALID_SREG) {
    370           s_reg_ = INVALID_SREG;
    371           master_->liveness_ &= ~storage_mask_;
    372           ResetDefBody();
    373         }
    374       }
    375       RegStorage GetReg() { return reg_; }
    376       void SetReg(RegStorage reg) { reg_ = reg; }
    377       bool IsTemp() { return is_temp_; }
    378       void SetIsTemp(bool val) { is_temp_ = val; }
    379       bool IsWide() { return wide_value_; }
    380       void SetIsWide(bool val) {
    381         wide_value_ = val;
    382         if (!val) {
    383           // If not wide, reset partner to self.
    384           SetPartner(GetReg());
    385         }
    386       }
    387       bool IsDirty() { return dirty_; }
    388       void SetIsDirty(bool val) { dirty_ = val; }
    389       RegStorage Partner() { return partner_; }
    390       void SetPartner(RegStorage partner) { partner_ = partner; }
    391       int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; }
    392       const ResourceMask& DefUseMask() { return def_use_mask_; }
    393       void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; }
    394       RegisterInfo* Master() { return master_; }
    395       void SetMaster(RegisterInfo* master) {
    396         master_ = master;
    397         if (master != this) {
    398           master_->aliased_ = true;
    399           DCHECK(alias_chain_ == nullptr);
    400           alias_chain_ = master_->alias_chain_;
    401           master_->alias_chain_ = this;
    402         }
    403       }
    404       bool IsAliased() { return aliased_; }
    405       RegisterInfo* GetAliasChain() { return alias_chain_; }
    406       uint32_t StorageMask() { return storage_mask_; }
    407       void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; }
    408       LIR* DefStart() { return def_start_; }
    409       void SetDefStart(LIR* def_start) { def_start_ = def_start; }
    410       LIR* DefEnd() { return def_end_; }
    411       void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
    412       void ResetDefBody() { def_start_ = def_end_ = nullptr; }
    413       // Find member of aliased set matching storage_used; return nullptr if none.
    414       RegisterInfo* FindMatchingView(uint32_t storage_used) {
    415         RegisterInfo* res = Master();
    416         for (; res != nullptr; res = res->GetAliasChain()) {
    417           if (res->StorageMask() == storage_used)
    418             break;
    419         }
    420         return res;
    421       }
    422 
    423      private:
    424       RegStorage reg_;
    425       bool is_temp_;               // Can allocate as temp?
    426       bool wide_value_;            // Holds a Dalvik wide value (either itself, or part of a pair).
    427       bool dirty_;                 // If live, is it dirty?
    428       bool aliased_;               // Is this the master for other aliased RegisterInfo's?
    429       RegStorage partner_;         // If wide_value, other reg of pair or self if 64-bit register.
    430       int s_reg_;                  // Name of live value.
    431       ResourceMask def_use_mask_;  // Resources for this element.
    432       uint32_t used_storage_;      // 1 bit per 4 bytes of storage. Unused by aliases.
    433       uint32_t liveness_;          // 1 bit per 4 bytes of storage. Unused by aliases.
    434       RegisterInfo* master_;       // Pointer to controlling storage mask.
    435       uint32_t storage_mask_;      // Track allocation of sub-units.
    436       LIR *def_start_;             // Starting inst in last def sequence.
    437       LIR *def_end_;               // Ending inst in last def sequence.
    438       RegisterInfo* alias_chain_;  // Chain of aliased registers.
    439     };
    440 
    441     class RegisterPool {
    442      public:
    443       RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
    444                    const ArrayRef<const RegStorage>& core_regs,
    445                    const ArrayRef<const RegStorage>& core64_regs,
    446                    const ArrayRef<const RegStorage>& sp_regs,
    447                    const ArrayRef<const RegStorage>& dp_regs,
    448                    const ArrayRef<const RegStorage>& reserved_regs,
    449                    const ArrayRef<const RegStorage>& reserved64_regs,
    450                    const ArrayRef<const RegStorage>& core_temps,
    451                    const ArrayRef<const RegStorage>& core64_temps,
    452                    const ArrayRef<const RegStorage>& sp_temps,
    453                    const ArrayRef<const RegStorage>& dp_temps);
    454       ~RegisterPool() {}
    455       static void* operator new(size_t size, ArenaAllocator* arena) {
    456         return arena->Alloc(size, kArenaAllocRegAlloc);
    457       }
    458       void ResetNextTemp() {
    459         next_core_reg_ = 0;
    460         next_sp_reg_ = 0;
    461         next_dp_reg_ = 0;
    462       }
    463       GrowableArray<RegisterInfo*> core_regs_;
    464       int next_core_reg_;
    465       GrowableArray<RegisterInfo*> core64_regs_;
    466       int next_core64_reg_;
    467       GrowableArray<RegisterInfo*> sp_regs_;    // Single precision float.
    468       int next_sp_reg_;
    469       GrowableArray<RegisterInfo*> dp_regs_;    // Double precision float.
    470       int next_dp_reg_;
    471       GrowableArray<RegisterInfo*>* ref_regs_;  // Points to core_regs_ or core64_regs_
    472       int* next_ref_reg_;
    473 
    474      private:
    475       Mir2Lir* const m2l_;
    476     };
    477 
    478     struct PromotionMap {
    479       RegLocationType core_location:3;
    480       uint8_t core_reg;
    481       RegLocationType fp_location:3;
    482       uint8_t fp_reg;
    483       bool first_in_pair;
    484     };
    485 
    486     //
    487     // Slow paths.  This object is used generate a sequence of code that is executed in the
    488     // slow path.  For example, resolving a string or class is slow as it will only be executed
    489     // once (after that it is resolved and doesn't need to be done again).  We want slow paths
    490     // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward
    491     // branch over them.
    492     //
    493     // If you want to create a slow path, declare a class derived from LIRSlowPath and provide
    494     // the Compile() function that will be called near the end of the code generated by the
    495     // method.
    496     //
    497     // The basic flow for a slow path is:
    498     //
    499     //     CMP reg, #value
    500     //     BEQ fromfast
    501     //   cont:
    502     //     ...
    503     //     fast path code
    504     //     ...
    505     //     more code
    506     //     ...
    507     //     RETURN
    508     ///
    509     //   fromfast:
    510     //     ...
    511     //     slow path code
    512     //     ...
    513     //     B cont
    514     //
    515     // So you see we need two labels and two branches.  The first branch (called fromfast) is
    516     // the conditional branch to the slow path code.  The second label (called cont) is used
    517     // as an unconditional branch target for getting back to the code after the slow path
    518     // has completed.
    519     //
    520 
    521     class LIRSlowPath {
    522      public:
    523       LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
    524                   LIR* cont = nullptr) :
    525         m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
    526           m2l->StartSlowPath(this);
    527       }
    528       virtual ~LIRSlowPath() {}
    529       virtual void Compile() = 0;
    530 
    531       static void* operator new(size_t size, ArenaAllocator* arena) {
    532         return arena->Alloc(size, kArenaAllocData);
    533       }
    534 
    535       LIR *GetContinuationLabel() {
    536         return cont_;
    537       }
    538 
    539       LIR *GetFromFast() {
    540         return fromfast_;
    541       }
    542 
    543      protected:
    544       LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel);
    545 
    546       Mir2Lir* const m2l_;
    547       CompilationUnit* const cu_;
    548       const DexOffset current_dex_pc_;
    549       LIR* const fromfast_;
    550       LIR* const cont_;
    551     };
    552 
    553     // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_.
    554     class ScopedMemRefType {
    555      public:
    556       ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type)
    557           : m2l_(m2l),
    558             old_mem_ref_type_(m2l->mem_ref_type_) {
    559         m2l_->mem_ref_type_ = new_mem_ref_type;
    560       }
    561 
    562       ~ScopedMemRefType() {
    563         m2l_->mem_ref_type_ = old_mem_ref_type_;
    564       }
    565 
    566      private:
    567       Mir2Lir* const m2l_;
    568       ResourceMask::ResourceBit old_mem_ref_type_;
    569 
    570       DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType);
    571     };
    572 
    573     virtual ~Mir2Lir() {}
    574 
    575     /**
    576      * @brief Decodes the LIR offset.
    577      * @return Returns the scaled offset of LIR.
    578      */
    579     virtual size_t GetInstructionOffset(LIR* lir);
    580 
    581     int32_t s4FromSwitchData(const void* switch_data) {
    582       return *reinterpret_cast<const int32_t*>(switch_data);
    583     }
    584 
    585     /*
    586      * TODO: this is a trace JIT vestige, and its use should be reconsidered.  At the time
    587      * it was introduced, it was intended to be a quick best guess of type without having to
    588      * take the time to do type analysis.  Currently, though, we have a much better idea of
    589      * the types of Dalvik virtual registers.  Instead of using this for a best guess, why not
    590      * just use our knowledge of type to select the most appropriate register class?
    591      */
    592     RegisterClass RegClassBySize(OpSize size) {
    593       if (size == kReference) {
    594         return kRefReg;
    595       } else {
    596         return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
    597                 size == kSignedByte) ? kCoreReg : kAnyReg;
    598       }
    599     }
    600 
    601     size_t CodeBufferSizeInBytes() {
    602       return code_buffer_.size() / sizeof(code_buffer_[0]);
    603     }
    604 
    605     static bool IsPseudoLirOp(int opcode) {
    606       return (opcode < 0);
    607     }
    608 
    609     /*
    610      * LIR operands are 32-bit integers.  Sometimes, (especially for managing
    611      * instructions which require PC-relative fixups), we need the operands to carry
    612      * pointers.  To do this, we assign these pointers an index in pointer_storage_, and
    613      * hold that index in the operand array.
    614      * TUNING: If use of these utilities becomes more common on 32-bit builds, it
    615      * may be worth conditionally-compiling a set of identity functions here.
    616      */
    617     uint32_t WrapPointer(void* pointer) {
    618       uint32_t res = pointer_storage_.Size();
    619       pointer_storage_.Insert(pointer);
    620       return res;
    621     }
    622 
    623     void* UnwrapPointer(size_t index) {
    624       return pointer_storage_.Get(index);
    625     }
    626 
    627     // strdup(), but allocates from the arena.
    628     char* ArenaStrdup(const char* str) {
    629       size_t len = strlen(str) + 1;
    630       char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc));
    631       if (res != NULL) {
    632         strncpy(res, str, len);
    633       }
    634       return res;
    635     }
    636 
    637     // Shared by all targets - implemented in codegen_util.cc
    638     void AppendLIR(LIR* lir);
    639     void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
    640     void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
    641 
    642     /**
    643      * @brief Provides the maximum number of compiler temporaries that the backend can/wants
    644      * to place in a frame.
    645      * @return Returns the maximum number of compiler temporaries.
    646      */
    647     size_t GetMaxPossibleCompilerTemps() const;
    648 
    649     /**
    650      * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries.
    651      * @return Returns the size in bytes for space needed for compiler temporary spill region.
    652      */
    653     size_t GetNumBytesForCompilerTempSpillRegion();
    654 
    655     DexOffset GetCurrentDexPc() const {
    656       return current_dalvik_offset_;
    657     }
    658 
    659     RegisterClass ShortyToRegClass(char shorty_type);
    660     RegisterClass LocToRegClass(RegLocation loc);
    661     int ComputeFrameSize();
    662     virtual void Materialize();
    663     virtual CompiledMethod* GetCompiledMethod();
    664     void MarkSafepointPC(LIR* inst);
    665     void MarkSafepointPCAfter(LIR* after);
    666     void SetupResourceMasks(LIR* lir);
    667     void SetMemRefType(LIR* lir, bool is_load, int mem_type);
    668     void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
    669     void SetupRegMask(ResourceMask* mask, int reg);
    670     void ClearRegMask(ResourceMask* mask, int reg);
    671     void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
    672     void EliminateLoad(LIR* lir, int reg_id);
    673     void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
    674     void DumpPromotionMap();
    675     void CodegenDump();
    676     LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
    677                 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
    678     LIR* NewLIR0(int opcode);
    679     LIR* NewLIR1(int opcode, int dest);
    680     LIR* NewLIR2(int opcode, int dest, int src1);
    681     LIR* NewLIR2NoDest(int opcode, int src, int info);
    682     LIR* NewLIR3(int opcode, int dest, int src1, int src2);
    683     LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info);
    684     LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
    685     LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
    686     LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
    687     LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
    688     LIR* AddWordData(LIR* *constant_list_p, int value);
    689     LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
    690     void ProcessSwitchTables();
    691     void DumpSparseSwitchTable(const uint16_t* table);
    692     void DumpPackedSwitchTable(const uint16_t* table);
    693     void MarkBoundary(DexOffset offset, const char* inst_str);
    694     void NopLIR(LIR* lir);
    695     void UnlinkLIR(LIR* lir);
    696     bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
    697     bool IsInexpensiveConstant(RegLocation rl_src);
    698     ConditionCode FlipComparisonOrder(ConditionCode before);
    699     ConditionCode NegateComparison(ConditionCode before);
    700     virtual void InstallLiteralPools();
    701     void InstallSwitchTables();
    702     void InstallFillArrayData();
    703     bool VerifyCatchEntries();
    704     void CreateMappingTables();
    705     void CreateNativeGcMap();
    706     int AssignLiteralOffset(CodeOffset offset);
    707     int AssignSwitchTablesOffset(CodeOffset offset);
    708     int AssignFillArrayDataOffset(CodeOffset offset);
    709     virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
    710     void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
    711     void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
    712 
    713     virtual void StartSlowPath(LIRSlowPath* slowpath) {}
    714     virtual void BeginInvoke(CallInfo* info) {}
    715     virtual void EndInvoke(CallInfo* info) {}
    716 
    717 
    718     // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation.  No code generated.
    719     virtual RegLocation NarrowRegLoc(RegLocation loc);
    720 
    721     // Shared by all targets - implemented in local_optimizations.cc
    722     void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src);
    723     void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
    724     void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
    725     virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
    726 
    727     // Shared by all targets - implemented in ralloc_util.cc
    728     int GetSRegHi(int lowSreg);
    729     bool LiveOut(int s_reg);
    730     void SimpleRegAlloc();
    731     void ResetRegPool();
    732     void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num);
    733     void DumpRegPool(GrowableArray<RegisterInfo*>* regs);
    734     void DumpCoreRegPool();
    735     void DumpFpRegPool();
    736     void DumpRegPools();
    737     /* Mark a temp register as dead.  Does not affect allocation state. */
    738     void Clobber(RegStorage reg);
    739     void ClobberSReg(int s_reg);
    740     void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask);
    741     int SRegToPMap(int s_reg);
    742     void RecordCorePromotion(RegStorage reg, int s_reg);
    743     RegStorage AllocPreservedCoreReg(int s_reg);
    744     void RecordFpPromotion(RegStorage reg, int s_reg);
    745     RegStorage AllocPreservedFpReg(int s_reg);
    746     virtual RegStorage AllocPreservedSingle(int s_reg);
    747     virtual RegStorage AllocPreservedDouble(int s_reg);
    748     RegStorage AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required);
    749     virtual RegStorage AllocTemp(bool required = true);
    750     virtual RegStorage AllocTempWide(bool required = true);
    751     virtual RegStorage AllocTempRef(bool required = true);
    752     virtual RegStorage AllocTempSingle(bool required = true);
    753     virtual RegStorage AllocTempDouble(bool required = true);
    754     virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true);
    755     virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true);
    756     void FlushReg(RegStorage reg);
    757     void FlushRegWide(RegStorage reg);
    758     RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
    759     RegStorage FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg);
    760     virtual void FreeTemp(RegStorage reg);
    761     virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
    762     virtual bool IsLive(RegStorage reg);
    763     virtual bool IsTemp(RegStorage reg);
    764     bool IsPromoted(RegStorage reg);
    765     bool IsDirty(RegStorage reg);
    766     virtual void LockTemp(RegStorage reg);
    767     void ResetDef(RegStorage reg);
    768     void NullifyRange(RegStorage reg, int s_reg);
    769     void MarkDef(RegLocation rl, LIR *start, LIR *finish);
    770     void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
    771     void ResetDefLoc(RegLocation rl);
    772     void ResetDefLocWide(RegLocation rl);
    773     void ResetDefTracking();
    774     void ClobberAllTemps();
    775     void FlushSpecificReg(RegisterInfo* info);
    776     void FlushAllRegs();
    777     bool RegClassMatches(int reg_class, RegStorage reg);
    778     void MarkLive(RegLocation loc);
    779     void MarkTemp(RegStorage reg);
    780     void UnmarkTemp(RegStorage reg);
    781     void MarkWide(RegStorage reg);
    782     void MarkNarrow(RegStorage reg);
    783     void MarkClean(RegLocation loc);
    784     void MarkDirty(RegLocation loc);
    785     void MarkInUse(RegStorage reg);
    786     bool CheckCorePoolSanity();
    787     virtual RegLocation UpdateLoc(RegLocation loc);
    788     virtual RegLocation UpdateLocWide(RegLocation loc);
    789     RegLocation UpdateRawLoc(RegLocation loc);
    790 
    791     /**
    792      * @brief Used to prepare a register location to receive a wide value.
    793      * @see EvalLoc
    794      * @param loc the location where the value will be stored.
    795      * @param reg_class Type of register needed.
    796      * @param update Whether the liveness information should be updated.
    797      * @return Returns the properly typed temporary in physical register pairs.
    798      */
    799     virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
    800 
    801     /**
    802      * @brief Used to prepare a register location to receive a value.
    803      * @param loc the location where the value will be stored.
    804      * @param reg_class Type of register needed.
    805      * @param update Whether the liveness information should be updated.
    806      * @return Returns the properly typed temporary in physical register.
    807      */
    808     virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
    809 
    810     void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
    811     void DumpCounts(const RefCounts* arr, int size, const char* msg);
    812     void DoPromotion();
    813     int VRegOffset(int v_reg);
    814     int SRegOffset(int s_reg);
    815     RegLocation GetReturnWide(RegisterClass reg_class);
    816     RegLocation GetReturn(RegisterClass reg_class);
    817     RegisterInfo* GetRegInfo(RegStorage reg);
    818 
    819     // Shared by all targets - implemented in gen_common.cc.
    820     void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr);
    821     virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
    822                                   RegLocation rl_src, RegLocation rl_dest, int lit);
    823     bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
    824     virtual void HandleSlowPaths();
    825     void GenBarrier();
    826     void GenDivZeroException();
    827     // c_code holds condition code that's generated from testing divisor against 0.
    828     void GenDivZeroCheck(ConditionCode c_code);
    829     // reg holds divisor.
    830     void GenDivZeroCheck(RegStorage reg);
    831     void GenArrayBoundsCheck(RegStorage index, RegStorage length);
    832     void GenArrayBoundsCheck(int32_t index, RegStorage length);
    833     LIR* GenNullCheck(RegStorage reg);
    834     void MarkPossibleNullPointerException(int opt_flags);
    835     void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
    836     void MarkPossibleStackOverflowException();
    837     void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
    838     LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
    839     LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
    840     virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
    841     void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
    842                              RegLocation rl_src2, LIR* taken, LIR* fall_through);
    843     void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
    844                                  LIR* taken, LIR* fall_through);
    845     virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
    846     void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
    847                          RegLocation rl_src);
    848     void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
    849                      RegLocation rl_src);
    850     void GenFilledNewArray(CallInfo* info);
    851     void GenSput(MIR* mir, RegLocation rl_src,
    852                  bool is_long_or_double, bool is_object);
    853     void GenSget(MIR* mir, RegLocation rl_dest,
    854                  bool is_long_or_double, bool is_object);
    855     void GenIGet(MIR* mir, int opt_flags, OpSize size,
    856                  RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
    857     void GenIPut(MIR* mir, int opt_flags, OpSize size,
    858                  RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
    859     void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
    860                         RegLocation rl_src);
    861 
    862     void GenConstClass(uint32_t type_idx, RegLocation rl_dest);
    863     void GenConstString(uint32_t string_idx, RegLocation rl_dest);
    864     void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
    865     void GenThrow(RegLocation rl_src);
    866     void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
    867     void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
    868     void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
    869                       RegLocation rl_src1, RegLocation rl_src2);
    870     virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
    871                         RegLocation rl_src1, RegLocation rl_shift);
    872     void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
    873                           RegLocation rl_src, int lit);
    874     virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
    875                                 RegLocation rl_src1, RegLocation rl_src2);
    876     void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
    877     virtual void GenSuspendTest(int opt_flags);
    878     virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
    879 
    880     // This will be overridden by x86 implementation.
    881     virtual void GenConstWide(RegLocation rl_dest, int64_t value);
    882     virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
    883                        RegLocation rl_src1, RegLocation rl_src2);
    884 
    885     // Shared by all targets - implemented in gen_invoke.cc.
    886     LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
    887                     bool use_link = true);
    888     RegStorage CallHelperSetup(QuickEntrypointEnum trampoline);
    889 
    890     void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc);
    891     void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
    892     void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc);
    893     void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
    894                                       bool safepoint_pc);
    895     void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
    896                                  bool safepoint_pc);
    897     void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1,
    898                                          bool safepoint_pc);
    899     void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1,
    900                                          bool safepoint_pc);
    901     void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
    902                                  bool safepoint_pc);
    903     void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
    904                                  bool safepoint_pc);
    905     void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
    906     void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
    907                                     bool safepoint_pc);
    908     void CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0,
    909                                                RegLocation arg2, bool safepoint_pc);
    910     void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
    911                                                  RegLocation arg1, bool safepoint_pc);
    912     void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1,
    913                                  bool safepoint_pc);
    914     void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
    915                                     RegStorage arg1, int arg2, bool safepoint_pc);
    916     void CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0,
    917                                                RegLocation arg2, bool safepoint_pc);
    918     void CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2,
    919                                        bool safepoint_pc);
    920     void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
    921                                                     RegLocation arg1, RegLocation arg2,
    922                                                     bool safepoint_pc);
    923     void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,
    924                                                             RegLocation arg0, RegLocation arg1,
    925                                                             RegLocation arg2,
    926                                                             bool safepoint_pc);
    927     void GenInvoke(CallInfo* info);
    928     void GenInvokeNoInline(CallInfo* info);
    929     virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
    930     virtual int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
    931                              NextCallInsn next_call_insn,
    932                              const MethodReference& target_method,
    933                              uint32_t vtable_idx,
    934                              uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
    935                              bool skip_this);
    936     virtual int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
    937                            NextCallInsn next_call_insn,
    938                            const MethodReference& target_method,
    939                            uint32_t vtable_idx,
    940                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
    941                            bool skip_this);
    942 
    943     /**
    944      * @brief Used to determine the register location of destination.
    945      * @details This is needed during generation of inline intrinsics because it finds destination
    946      *  of return,
    947      * either the physical register or the target of move-result.
    948      * @param info Information about the invoke.
    949      * @return Returns the destination location.
    950      */
    951     RegLocation InlineTarget(CallInfo* info);
    952 
    953     /**
    954      * @brief Used to determine the wide register location of destination.
    955      * @see InlineTarget
    956      * @param info Information about the invoke.
    957      * @return Returns the destination location.
    958      */
    959     RegLocation InlineTargetWide(CallInfo* info);
    960 
    961     bool GenInlinedReferenceGetReferent(CallInfo* info);
    962     virtual bool GenInlinedCharAt(CallInfo* info);
    963     bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
    964     virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
    965     bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
    966     bool GenInlinedAbsInt(CallInfo* info);
    967     virtual bool GenInlinedAbsLong(CallInfo* info);
    968     virtual bool GenInlinedAbsFloat(CallInfo* info) = 0;
    969     virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
    970     bool GenInlinedFloatCvt(CallInfo* info);
    971     bool GenInlinedDoubleCvt(CallInfo* info);
    972     virtual bool GenInlinedCeil(CallInfo* info);
    973     virtual bool GenInlinedFloor(CallInfo* info);
    974     virtual bool GenInlinedRint(CallInfo* info);
    975     virtual bool GenInlinedRound(CallInfo* info, bool is_double);
    976     virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
    977     virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
    978     bool GenInlinedStringCompareTo(CallInfo* info);
    979     virtual bool GenInlinedCurrentThread(CallInfo* info);
    980     bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
    981     bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
    982                              bool is_volatile, bool is_ordered);
    983     virtual int LoadArgRegs(CallInfo* info, int call_state,
    984                     NextCallInsn next_call_insn,
    985                     const MethodReference& target_method,
    986                     uint32_t vtable_idx,
    987                     uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
    988                     bool skip_this);
    989 
    990     // Shared by all targets - implemented in gen_loadstore.cc.
    991     RegLocation LoadCurrMethod();
    992     void LoadCurrMethodDirect(RegStorage r_tgt);
    993     virtual LIR* LoadConstant(RegStorage r_dest, int value);
    994     // Natural word size.
    995     virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
    996       return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
    997     }
    998     // Load 32 bits, regardless of target.
    999     virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest)  {
   1000       return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
   1001     }
   1002     // Load a reference at base + displacement and decompress into register.
   1003     virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
   1004                              VolatileKind is_volatile) {
   1005       return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
   1006     }
   1007     // Load a reference at base + index and decompress into register.
   1008     virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
   1009                                 int scale) {
   1010       return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference);
   1011     }
   1012     // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
   1013     virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
   1014     // Same as above, but derive the target register class from the location record.
   1015     virtual RegLocation LoadValue(RegLocation rl_src);
   1016     // Load Dalvik value with 64-bit memory storage.
   1017     virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
   1018     // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
   1019     virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest);
   1020     // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
   1021     virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest);
   1022     // Load Dalvik value with 64-bit memory storage.
   1023     virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest);
   1024     // Load Dalvik value with 64-bit memory storage.
   1025     virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
   1026     // Store an item of natural word size.
   1027     virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
   1028       return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile);
   1029     }
   1030     // Store an uncompressed reference into a compressed 32-bit container.
   1031     virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
   1032                               VolatileKind is_volatile) {
   1033       return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
   1034     }
   1035     // Store an uncompressed reference into a compressed 32-bit container by index.
   1036     virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
   1037                                  int scale) {
   1038       return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference);
   1039     }
   1040     // Store 32 bits, regardless of target.
   1041     virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
   1042       return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile);
   1043     }
   1044 
   1045     /**
   1046      * @brief Used to do the final store in the destination as per bytecode semantics.
   1047      * @param rl_dest The destination dalvik register location.
   1048      * @param rl_src The source register location. Can be either physical register or dalvik register.
   1049      */
   1050     virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src);
   1051 
   1052     /**
   1053      * @brief Used to do the final store in a wide destination as per bytecode semantics.
   1054      * @see StoreValue
   1055      * @param rl_dest The destination dalvik register location.
   1056      * @param rl_src The source register location. Can be either physical register or dalvik
   1057      *  register.
   1058      */
   1059     virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src);
   1060 
   1061     /**
   1062      * @brief Used to do the final store to a destination as per bytecode semantics.
   1063      * @see StoreValue
   1064      * @param rl_dest The destination dalvik register location.
   1065      * @param rl_src The source register location. It must be kLocPhysReg
   1066      *
   1067      * This is used for x86 two operand computations, where we have computed the correct
   1068      * register value that now needs to be properly registered.  This is used to avoid an
   1069      * extra register copy that would result if StoreValue was called.
   1070      */
   1071     virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src);
   1072 
   1073     /**
   1074      * @brief Used to do the final store in a wide destination as per bytecode semantics.
   1075      * @see StoreValueWide
   1076      * @param rl_dest The destination dalvik register location.
   1077      * @param rl_src The source register location. It must be kLocPhysReg
   1078      *
   1079      * This is used for x86 two operand computations, where we have computed the correct
   1080      * register values that now need to be properly registered.  This is used to avoid an
   1081      * extra pair of register copies that would result if StoreValueWide was called.
   1082      */
   1083     virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src);
   1084 
   1085     // Shared by all targets - implemented in mir_to_lir.cc.
   1086     void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
   1087     virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
   1088     bool MethodBlockCodeGen(BasicBlock* bb);
   1089     bool SpecialMIR2LIR(const InlineMethod& special);
   1090     virtual void MethodMIR2LIR();
   1091     // Update LIR for verbose listings.
   1092     void UpdateLIROffsets();
   1093 
   1094     /*
   1095      * @brief Load the address of the dex method into the register.
   1096      * @param target_method The MethodReference of the method to be invoked.
   1097      * @param type How the method will be invoked.
   1098      * @param register that will contain the code address.
   1099      * @note register will be passed to TargetReg to get physical register.
   1100      */
   1101     void LoadCodeAddress(const MethodReference& target_method, InvokeType type,
   1102                          SpecialTargetRegister symbolic_reg);
   1103 
   1104     /*
   1105      * @brief Load the Method* of a dex method into the register.
   1106      * @param target_method The MethodReference of the method to be invoked.
   1107      * @param type How the method will be invoked.
   1108      * @param register that will contain the code address.
   1109      * @note register will be passed to TargetReg to get physical register.
   1110      */
   1111     virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
   1112                                    SpecialTargetRegister symbolic_reg);
   1113 
   1114     /*
   1115      * @brief Load the Class* of a Dex Class type into the register.
   1116      * @param type How the method will be invoked.
   1117      * @param register that will contain the code address.
   1118      * @note register will be passed to TargetReg to get physical register.
   1119      */
   1120     virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
   1121 
   1122     // Routines that work for the generic case, but may be overriden by target.
   1123     /*
   1124      * @brief Compare memory to immediate, and branch if condition true.
   1125      * @param cond The condition code that when true will branch to the target.
   1126      * @param temp_reg A temporary register that can be used if compare to memory is not
   1127      * supported by the architecture.
   1128      * @param base_reg The register holding the base address.
   1129      * @param offset The offset from the base.
   1130      * @param check_value The immediate to compare to.
   1131      * @param target branch target (or nullptr)
   1132      * @param compare output for getting LIR for comparison (or nullptr)
   1133      * @returns The branch instruction that was generated.
   1134      */
   1135     virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
   1136                                    int offset, int check_value, LIR* target, LIR** compare);
   1137 
   1138     // Required for target - codegen helpers.
   1139     virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
   1140                                     RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
   1141     virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
   1142     virtual LIR* CheckSuspendUsingLoad() = 0;
   1143 
   1144     virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
   1145 
   1146     virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
   1147                               OpSize size, VolatileKind is_volatile) = 0;
   1148     virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
   1149                                  int scale, OpSize size) = 0;
   1150     virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
   1151     virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
   1152     virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
   1153                                OpSize size, VolatileKind is_volatile) = 0;
   1154     virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
   1155                                   int scale, OpSize size) = 0;
   1156     virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
   1157 
   1158     // Required for target - register utilities.
   1159 
   1160     bool IsSameReg(RegStorage reg1, RegStorage reg2) {
   1161       RegisterInfo* info1 = GetRegInfo(reg1);
   1162       RegisterInfo* info2 = GetRegInfo(reg2);
   1163       return (info1->Master() == info2->Master() &&
   1164              (info1->StorageMask() & info2->StorageMask()) != 0);
   1165     }
   1166 
   1167     /**
   1168      * @brief Portable way of getting special registers from the backend.
   1169      * @param reg Enumeration describing the purpose of the register.
   1170      * @return Return the #RegStorage corresponding to the given purpose @p reg.
   1171      * @note This function is currently allowed to return any suitable view of the registers
   1172      *   (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends).
   1173      */
   1174     virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0;
   1175 
   1176     /**
   1177      * @brief Portable way of getting special registers from the backend.
   1178      * @param reg Enumeration describing the purpose of the register.
   1179      * @param wide_kind What kind of view of the special register is required.
   1180      * @return Return the #RegStorage corresponding to the given purpose @p reg.
   1181      *
   1182      * @note For 32b system, wide (kWide) views only make sense for the argument registers and the
   1183      *       return. In that case, this function should return a pair where the first component of
   1184      *       the result will be the indicated special register.
   1185      */
   1186     virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
   1187       if (wide_kind == kWide) {
   1188         DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg));
   1189         COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
   1190                        (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
   1191                        (kArg7 == kArg6 + 1), kargs_range_unexpected);
   1192         COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
   1193                        (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
   1194                        (kFArg7 == kFArg6 + 1), kfargs_range_unexpected);
   1195         COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected);
   1196         return RegStorage::MakeRegPair(TargetReg(reg),
   1197                                        TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
   1198       } else {
   1199         return TargetReg(reg);
   1200       }
   1201     }
   1202 
   1203     /**
   1204      * @brief Portable way of getting a special register for storing a pointer.
   1205      * @see TargetReg()
   1206      */
   1207     virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) {
   1208       return TargetReg(reg);
   1209     }
   1210 
   1211     // Get a reg storage corresponding to the wide & ref flags of the reg location.
   1212     virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) {
   1213       if (loc.ref) {
   1214         return TargetReg(reg, kRef);
   1215       } else {
   1216         return TargetReg(reg, loc.wide ? kWide : kNotWide);
   1217       }
   1218     }
   1219 
   1220     virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0;
   1221     virtual RegLocation GetReturnAlt() = 0;
   1222     virtual RegLocation GetReturnWideAlt() = 0;
   1223     virtual RegLocation LocCReturn() = 0;
   1224     virtual RegLocation LocCReturnRef() = 0;
   1225     virtual RegLocation LocCReturnDouble() = 0;
   1226     virtual RegLocation LocCReturnFloat() = 0;
   1227     virtual RegLocation LocCReturnWide() = 0;
   1228     virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0;
   1229     virtual void AdjustSpillMask() = 0;
   1230     virtual void ClobberCallerSave() = 0;
   1231     virtual void FreeCallTemps() = 0;
   1232     virtual void LockCallTemps() = 0;
   1233     virtual void CompilerInitializeRegAlloc() = 0;
   1234 
   1235     // Required for target - miscellaneous.
   1236     virtual void AssembleLIR() = 0;
   1237     virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0;
   1238     virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
   1239                                           ResourceMask* use_mask, ResourceMask* def_mask) = 0;
   1240     virtual const char* GetTargetInstFmt(int opcode) = 0;
   1241     virtual const char* GetTargetInstName(int opcode) = 0;
   1242     virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
   1243 
   1244     // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must
   1245     //       take care of this.
   1246     virtual ResourceMask GetPCUseDefEncoding() const = 0;
   1247     virtual uint64_t GetTargetInstFlags(int opcode) = 0;
   1248     virtual size_t GetInsnSize(LIR* lir) = 0;
   1249     virtual bool IsUnconditionalBranch(LIR* lir) = 0;
   1250 
   1251     // Get the register class for load/store of a field.
   1252     virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0;
   1253 
   1254     // Required for target - Dalvik-level generators.
   1255     virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
   1256                                    RegLocation rl_src1, RegLocation rl_src2) = 0;
   1257     virtual void GenArithOpDouble(Instruction::Code opcode,
   1258                                   RegLocation rl_dest, RegLocation rl_src1,
   1259                                   RegLocation rl_src2) = 0;
   1260     virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
   1261                                  RegLocation rl_src1, RegLocation rl_src2) = 0;
   1262     virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
   1263                           RegLocation rl_src1, RegLocation rl_src2) = 0;
   1264     virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest,
   1265                                RegLocation rl_src) = 0;
   1266     virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0;
   1267 
   1268     /**
   1269      * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max.
   1270      * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm
   1271      * that applies on integers. The generated code will write the smallest or largest value
   1272      * directly into the destination register as specified by the invoke information.
   1273      * @param info Information about the invoke.
   1274      * @param is_min If true generates code that computes minimum. Otherwise computes maximum.
   1275      * @param is_long If true the value value is Long. Otherwise the value is Int.
   1276      * @return Returns true if successfully generated
   1277      */
   1278     virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0;
   1279     virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
   1280 
   1281     virtual bool GenInlinedSqrt(CallInfo* info) = 0;
   1282     virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
   1283     virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
   1284     virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
   1285                                   bool is_div) = 0;
   1286     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
   1287                                      bool is_div) = 0;
   1288     /*
   1289      * @brief Generate an integer div or rem operation by a literal.
   1290      * @param rl_dest Destination Location.
   1291      * @param rl_src1 Numerator Location.
   1292      * @param rl_src2 Divisor Location.
   1293      * @param is_div 'true' if this is a division, 'false' for a remainder.
   1294      * @param check_zero 'true' if an exception should be generated if the divisor is 0.
   1295      */
   1296     virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
   1297                                   RegLocation rl_src2, bool is_div, bool check_zero) = 0;
   1298     /*
   1299      * @brief Generate an integer div or rem operation by a literal.
   1300      * @param rl_dest Destination Location.
   1301      * @param rl_src Numerator Location.
   1302      * @param lit Divisor.
   1303      * @param is_div 'true' if this is a division, 'false' for a remainder.
   1304      */
   1305     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
   1306                                      bool is_div) = 0;
   1307     virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0;
   1308 
   1309     /**
   1310      * @brief Used for generating code that throws ArithmeticException if both registers are zero.
   1311      * @details This is used for generating DivideByZero checks when divisor is held in two
   1312      *  separate registers.
   1313      * @param reg The register holding the pair of 32-bit values.
   1314      */
   1315     virtual void GenDivZeroCheckWide(RegStorage reg) = 0;
   1316 
   1317     virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0;
   1318     virtual void GenExitSequence() = 0;
   1319     virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0;
   1320     virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0;
   1321     virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
   1322 
   1323     /*
   1324      * @brief Handle Machine Specific MIR Extended opcodes.
   1325      * @param bb The basic block in which the MIR is from.
   1326      * @param mir The MIR whose opcode is not standard extended MIR.
   1327      * @note Base class implementation will abort for unknown opcodes.
   1328      */
   1329     virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
   1330 
   1331     /**
   1332      * @brief Lowers the kMirOpSelect MIR into LIR.
   1333      * @param bb The basic block in which the MIR is from.
   1334      * @param mir The MIR whose opcode is kMirOpSelect.
   1335      */
   1336     virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
   1337 
   1338     /**
   1339      * @brief Generates code to select one of the given constants depending on the given opcode.
   1340      */
   1341     virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
   1342                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
   1343                                   int dest_reg_class) = 0;
   1344 
   1345     /**
   1346      * @brief Used to generate a memory barrier in an architecture specific way.
   1347      * @details The last generated LIR will be considered for use as barrier. Namely,
   1348      * if the last LIR can be updated in a way where it will serve the semantics of
   1349      * barrier, then it will be used as such. Otherwise, a new LIR will be generated
   1350      * that can keep the semantics.
   1351      * @param barrier_kind The kind of memory barrier to generate.
   1352      * @return whether a new instruction was generated.
   1353      */
   1354     virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0;
   1355 
   1356     virtual void GenMoveException(RegLocation rl_dest) = 0;
   1357     virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
   1358                                                int first_bit, int second_bit) = 0;
   1359     virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
   1360     virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
   1361 
   1362     // Create code for switch statements. Will decide between short and long versions below.
   1363     void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
   1364     void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
   1365 
   1366     // Potentially backend-specific versions of switch instructions for shorter switch statements.
   1367     // The default implementation will create a chained compare-and-branch.
   1368     virtual void GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
   1369     virtual void GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
   1370     // Backend-specific versions of switch instructions for longer switch statements.
   1371     virtual void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
   1372     virtual void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
   1373 
   1374     virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
   1375                              RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
   1376     virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
   1377                              RegLocation rl_index, RegLocation rl_src, int scale,
   1378                              bool card_mark) = 0;
   1379     virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
   1380                                    RegLocation rl_src1, RegLocation rl_shift) = 0;
   1381 
   1382     // Required for target - single operation generators.
   1383     virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
   1384     virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0;
   1385     virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
   1386                                 LIR* target) = 0;
   1387     virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
   1388     virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0;
   1389     virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
   1390     virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
   1391     virtual void OpEndIT(LIR* it) = 0;
   1392     virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0;
   1393     virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0;
   1394     virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0;
   1395     virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
   1396     virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
   1397     virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
   1398     virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0;
   1399 
   1400     /**
   1401      * @brief Used to generate an LIR that does a load from mem to reg.
   1402      * @param r_dest The destination physical register.
   1403      * @param r_base The base physical register for memory operand.
   1404      * @param offset The displacement for memory operand.
   1405      * @param move_type Specification on the move desired (size, alignment, register kind).
   1406      * @return Returns the generate move LIR.
   1407      */
   1408     virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
   1409                              MoveType move_type) = 0;
   1410 
   1411     /**
   1412      * @brief Used to generate an LIR that does a store from reg to mem.
   1413      * @param r_base The base physical register for memory operand.
   1414      * @param offset The displacement for memory operand.
   1415      * @param r_src The destination physical register.
   1416      * @param bytes_to_move The number of bytes to move.
   1417      * @param is_aligned Whether the memory location is known to be aligned.
   1418      * @return Returns the generate move LIR.
   1419      */
   1420     virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
   1421                              MoveType move_type) = 0;
   1422 
   1423     /**
   1424      * @brief Used for generating a conditional register to register operation.
   1425      * @param op The opcode kind.
   1426      * @param cc The condition code that when true will perform the opcode.
   1427      * @param r_dest The destination physical register.
   1428      * @param r_src The source physical register.
   1429      * @return Returns the newly created LIR or null in case of creation failure.
   1430      */
   1431     virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0;
   1432 
   1433     virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0;
   1434     virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
   1435                              RegStorage r_src2) = 0;
   1436     virtual LIR* OpTestSuspend(LIR* target) = 0;
   1437     virtual LIR* OpVldm(RegStorage r_base, int count) = 0;
   1438     virtual LIR* OpVstm(RegStorage r_base, int count) = 0;
   1439     virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0;
   1440     virtual bool InexpensiveConstantInt(int32_t value) = 0;
   1441     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
   1442     virtual bool InexpensiveConstantLong(int64_t value) = 0;
   1443     virtual bool InexpensiveConstantDouble(int64_t value) = 0;
   1444     virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
   1445       return InexpensiveConstantInt(value);
   1446     }
   1447 
   1448     // May be optimized by targets.
   1449     virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
   1450     virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
   1451 
   1452     // Temp workaround
   1453     void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
   1454 
   1455     virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
   1456 
   1457   protected:
   1458     Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
   1459 
   1460     CompilationUnit* GetCompilationUnit() {
   1461       return cu_;
   1462     }
   1463     /*
   1464      * @brief Returns the index of the lowest set bit in 'x'.
   1465      * @param x Value to be examined.
   1466      * @returns The bit number of the lowest bit set in the value.
   1467      */
   1468     int32_t LowestSetBit(uint64_t x);
   1469     /*
   1470      * @brief Is this value a power of two?
   1471      * @param x Value to be examined.
   1472      * @returns 'true' if only 1 bit is set in the value.
   1473      */
   1474     bool IsPowerOfTwo(uint64_t x);
   1475     /*
   1476      * @brief Do these SRs overlap?
   1477      * @param rl_op1 One RegLocation
   1478      * @param rl_op2 The other RegLocation
   1479      * @return 'true' if the VR pairs overlap
   1480      *
   1481      * Check to see if a result pair has a misaligned overlap with an operand pair.  This
   1482      * is not usual for dx to generate, but it is legal (for now).  In a future rev of
   1483      * dex, we'll want to make this case illegal.
   1484      */
   1485     bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2);
   1486 
   1487     /*
   1488      * @brief Force a location (in a register) into a temporary register
   1489      * @param loc location of result
   1490      * @returns update location
   1491      */
   1492     virtual RegLocation ForceTemp(RegLocation loc);
   1493 
   1494     /*
   1495      * @brief Force a wide location (in registers) into temporary registers
   1496      * @param loc location of result
   1497      * @returns update location
   1498      */
   1499     virtual RegLocation ForceTempWide(RegLocation loc);
   1500 
   1501     static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) {
   1502       return wide ? k64 : ref ? kReference : k32;
   1503     }
   1504 
   1505     virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
   1506                                     RegLocation rl_dest, RegLocation rl_src);
   1507 
   1508     void AddSlowPath(LIRSlowPath* slowpath);
   1509 
   1510     /*
   1511      *
   1512      * @brief Implement Set up instanceof a class.
   1513      * @param needs_access_check 'true' if we must check the access.
   1514      * @param type_known_final 'true' if the type is known to be a final class.
   1515      * @param type_known_abstract 'true' if the type is known to be an abstract class.
   1516      * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
   1517      * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
   1518      * @param type_idx Type index to use if use_declaring_class is 'false'.
   1519      * @param rl_dest Result to be set to 0 or 1.
   1520      * @param rl_src Object to be tested.
   1521      */
   1522     void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
   1523                                     bool type_known_abstract, bool use_declaring_class,
   1524                                     bool can_assume_type_is_in_dex_cache,
   1525                                     uint32_t type_idx, RegLocation rl_dest,
   1526                                     RegLocation rl_src);
   1527     /*
   1528      * @brief Generate the debug_frame FDE information if possible.
   1529      * @returns pointer to vector containg CFE information, or NULL.
   1530      */
   1531     virtual std::vector<uint8_t>* ReturnCallFrameInformation();
   1532 
   1533     /**
   1534      * @brief Used to insert marker that can be used to associate MIR with LIR.
   1535      * @details Only inserts marker if verbosity is enabled.
   1536      * @param mir The mir that is currently being generated.
   1537      */
   1538     void GenPrintLabel(MIR* mir);
   1539 
   1540     /**
   1541      * @brief Used to generate return sequence when there is no frame.
   1542      * @details Assumes that the return registers have already been populated.
   1543      */
   1544     virtual void GenSpecialExitSequence() = 0;
   1545 
   1546     /**
   1547      * @brief Used to generate code for special methods that are known to be
   1548      * small enough to work in frameless mode.
   1549      * @param bb The basic block of the first MIR.
   1550      * @param mir The first MIR of the special method.
   1551      * @param special Information about the special method.
   1552      * @return Returns whether or not this was handled successfully. Returns false
   1553      * if caller should punt to normal MIR2LIR conversion.
   1554      */
   1555     virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
   1556 
   1557   protected:
   1558     void ClobberBody(RegisterInfo* p);
   1559     void SetCurrentDexPc(DexOffset dexpc) {
   1560       current_dalvik_offset_ = dexpc;
   1561     }
   1562 
   1563     /**
   1564      * @brief Used to lock register if argument at in_position was passed that way.
   1565      * @details Does nothing if the argument is passed via stack.
   1566      * @param in_position The argument number whose register to lock.
   1567      * @param wide Whether the argument is wide.
   1568      */
   1569     void LockArg(int in_position, bool wide = false);
   1570 
   1571     /**
   1572      * @brief Used to load VR argument to a physical register.
   1573      * @details The load is only done if the argument is not already in physical register.
   1574      * LockArg must have been previously called.
   1575      * @param in_position The argument number to load.
   1576      * @param wide Whether the argument is 64-bit or not.
   1577      * @return Returns the register (or register pair) for the loaded argument.
   1578      */
   1579     RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false);
   1580 
   1581     /**
   1582      * @brief Used to load a VR argument directly to a specified register location.
   1583      * @param in_position The argument number to place in register.
   1584      * @param rl_dest The register location where to place argument.
   1585      */
   1586     void LoadArgDirect(int in_position, RegLocation rl_dest);
   1587 
   1588     /**
   1589      * @brief Used to generate LIR for special getter method.
   1590      * @param mir The mir that represents the iget.
   1591      * @param special Information about the special getter method.
   1592      * @return Returns whether LIR was successfully generated.
   1593      */
   1594     bool GenSpecialIGet(MIR* mir, const InlineMethod& special);
   1595 
   1596     /**
   1597      * @brief Used to generate LIR for special setter method.
   1598      * @param mir The mir that represents the iput.
   1599      * @param special Information about the special setter method.
   1600      * @return Returns whether LIR was successfully generated.
   1601      */
   1602     bool GenSpecialIPut(MIR* mir, const InlineMethod& special);
   1603 
   1604     /**
   1605      * @brief Used to generate LIR for special return-args method.
   1606      * @param mir The mir that represents the return of argument.
   1607      * @param special Information about the special return-args method.
   1608      * @return Returns whether LIR was successfully generated.
   1609      */
   1610     bool GenSpecialIdentity(MIR* mir, const InlineMethod& special);
   1611 
   1612     void AddDivZeroCheckSlowPath(LIR* branch);
   1613 
   1614     // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using
   1615     // kArg2 as temp.
   1616     virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1);
   1617 
   1618     /**
   1619      * @brief Load Constant into RegLocation
   1620      * @param rl_dest Destination RegLocation
   1621      * @param value Constant value
   1622      */
   1623     virtual void GenConst(RegLocation rl_dest, int value);
   1624 
   1625     /**
   1626      * Returns true iff wide GPRs are just different views on the same physical register.
   1627      */
   1628     virtual bool WideGPRsAreAliases() = 0;
   1629 
   1630     /**
   1631      * Returns true iff wide FPRs are just different views on the same physical register.
   1632      */
   1633     virtual bool WideFPRsAreAliases() = 0;
   1634 
   1635 
   1636     enum class WidenessCheck {  // private
   1637       kIgnoreWide,
   1638       kCheckWide,
   1639       kCheckNotWide
   1640     };
   1641 
   1642     enum class RefCheck {  // private
   1643       kIgnoreRef,
   1644       kCheckRef,
   1645       kCheckNotRef
   1646     };
   1647 
   1648     enum class FPCheck {  // private
   1649       kIgnoreFP,
   1650       kCheckFP,
   1651       kCheckNotFP
   1652     };
   1653 
   1654     /**
   1655      * Check whether a reg storage seems well-formed, that is, if a reg storage is valid,
   1656      * that it has the expected form for the flags.
   1657      * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true.
   1658      */
   1659     void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail,
   1660                              bool report)
   1661         const;
   1662 
   1663     /**
   1664      * Check whether a reg location seems well-formed, that is, if a reg storage is encoded,
   1665      * that it has the expected size.
   1666      */
   1667     void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const;
   1668 
   1669     // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and
   1670     // kReportSizeError.
   1671     void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const;
   1672     // See CheckRegLocationImpl.
   1673     void CheckRegLocation(RegLocation rl) const;
   1674 
   1675   public:
   1676     // TODO: add accessors for these.
   1677     LIR* literal_list_;                        // Constants.
   1678     LIR* method_literal_list_;                 // Method literals requiring patching.
   1679     LIR* class_literal_list_;                  // Class literals requiring patching.
   1680     LIR* code_literal_list_;                   // Code literals requiring patching.
   1681     LIR* first_fixup_;                         // Doubly-linked list of LIR nodes requiring fixups.
   1682 
   1683   protected:
   1684     CompilationUnit* const cu_;
   1685     MIRGraph* const mir_graph_;
   1686     GrowableArray<SwitchTable*> switch_tables_;
   1687     GrowableArray<FillArrayData*> fill_array_data_;
   1688     GrowableArray<RegisterInfo*> tempreg_info_;
   1689     GrowableArray<RegisterInfo*> reginfo_map_;
   1690     GrowableArray<void*> pointer_storage_;
   1691     CodeOffset current_code_offset_;    // Working byte offset of machine instructons.
   1692     CodeOffset data_offset_;            // starting offset of literal pool.
   1693     size_t total_size_;                   // header + code size.
   1694     LIR* block_label_list_;
   1695     PromotionMap* promotion_map_;
   1696     /*
   1697      * TODO: The code generation utilities don't have a built-in
   1698      * mechanism to propagate the original Dalvik opcode address to the
   1699      * associated generated instructions.  For the trace compiler, this wasn't
   1700      * necessary because the interpreter handled all throws and debugging
   1701      * requests.  For now we'll handle this by placing the Dalvik offset
   1702      * in the CompilationUnit struct before codegen for each instruction.
   1703      * The low-level LIR creation utilites will pull it from here.  Rework this.
   1704      */
   1705     DexOffset current_dalvik_offset_;
   1706     size_t estimated_native_code_size_;     // Just an estimate; used to reserve code_buffer_ size.
   1707     RegisterPool* reg_pool_;
   1708     /*
   1709      * Sanity checking for the register temp tracking.  The same ssa
   1710      * name should never be associated with one temp register per
   1711      * instruction compilation.
   1712      */
   1713     int live_sreg_;
   1714     CodeBuffer code_buffer_;
   1715     // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
   1716     std::vector<uint8_t> encoded_mapping_table_;
   1717     ArenaVector<uint32_t> core_vmap_table_;
   1718     ArenaVector<uint32_t> fp_vmap_table_;
   1719     std::vector<uint8_t> native_gc_map_;
   1720     int num_core_spills_;
   1721     int num_fp_spills_;
   1722     int frame_size_;
   1723     unsigned int core_spill_mask_;
   1724     unsigned int fp_spill_mask_;
   1725     LIR* first_lir_insn_;
   1726     LIR* last_lir_insn_;
   1727 
   1728     GrowableArray<LIRSlowPath*> slow_paths_;
   1729 
   1730     // The memory reference type for new LIRs.
   1731     // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly
   1732     // invoke RawLIR() would clutter the code and reduce the readability.
   1733     ResourceMask::ResourceBit mem_ref_type_;
   1734 
   1735     // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR
   1736     // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks
   1737     // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
   1738     // to deduplicate the masks.
   1739     ResourceMaskCache mask_cache_;
   1740 };  // Class Mir2Lir
   1741 
   1742 }  // namespace art
   1743 
   1744 #endif  // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
   1745