Home | History | Annotate | Download | only in x86
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
     18 #define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
     19 
     20 #include "dex/compiler_internals.h"
     21 #include "x86_lir.h"
     22 
     23 #include <map>
     24 
     25 namespace art {
     26 
     27 class X86Mir2Lir : public Mir2Lir {
     28  protected:
     29   class InToRegStorageMapper {
     30    public:
     31     virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) = 0;
     32     virtual ~InToRegStorageMapper() {}
     33   };
     34 
     35   class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
     36    public:
     37     explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {}
     38     virtual ~InToRegStorageX86_64Mapper() {}
     39     virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref);
     40    protected:
     41     Mir2Lir* ml_;
     42    private:
     43     int cur_core_reg_;
     44     int cur_fp_reg_;
     45   };
     46 
     47   class InToRegStorageMapping {
     48    public:
     49     InToRegStorageMapping() : max_mapped_in_(0), is_there_stack_mapped_(false),
     50     initialized_(false) {}
     51     void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
     52     int GetMaxMappedIn() { return max_mapped_in_; }
     53     bool IsThereStackMapped() { return is_there_stack_mapped_; }
     54     RegStorage Get(int in_position);
     55     bool IsInitialized() { return initialized_; }
     56    private:
     57     std::map<int, RegStorage> mapping_;
     58     int max_mapped_in_;
     59     bool is_there_stack_mapped_;
     60     bool initialized_;
     61   };
     62 
     63  public:
     64   X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
     65 
     66   // Required for target - codegen helpers.
     67   bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
     68                           RegLocation rl_dest, int lit) OVERRIDE;
     69   bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
     70   LIR* CheckSuspendUsingLoad() OVERRIDE;
     71   RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
     72   LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
     73                     OpSize size, VolatileKind is_volatile) OVERRIDE;
     74   LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
     75                        OpSize size) OVERRIDE;
     76   LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
     77   LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
     78   LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
     79                      OpSize size, VolatileKind is_volatile) OVERRIDE;
     80   LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
     81                         OpSize size) OVERRIDE;
     82   void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
     83   void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
     84 
     85   // Required for target - register utilities.
     86   RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
     87   RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
     88     if (wide_kind == kWide) {
     89       if (cu_->target64) {
     90         return As64BitReg(TargetReg32(symbolic_reg));
     91       } else {
     92         // x86: construct a pair.
     93         DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) ||
     94                (kFArg0 <= symbolic_reg && symbolic_reg < kFArg3) ||
     95                (kRet0 == symbolic_reg));
     96         return RegStorage::MakeRegPair(TargetReg32(symbolic_reg),
     97                                  TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1)));
     98       }
     99     } else if (wide_kind == kRef && cu_->target64) {
    100       return As64BitReg(TargetReg32(symbolic_reg));
    101     } else {
    102       return TargetReg32(symbolic_reg);
    103     }
    104   }
    105   RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
    106     return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
    107   }
    108 
    109   RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
    110 
    111   RegLocation GetReturnAlt() OVERRIDE;
    112   RegLocation GetReturnWideAlt() OVERRIDE;
    113   RegLocation LocCReturn() OVERRIDE;
    114   RegLocation LocCReturnRef() OVERRIDE;
    115   RegLocation LocCReturnDouble() OVERRIDE;
    116   RegLocation LocCReturnFloat() OVERRIDE;
    117   RegLocation LocCReturnWide() OVERRIDE;
    118 
    119   ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
    120   void AdjustSpillMask() OVERRIDE;
    121   void ClobberCallerSave() OVERRIDE;
    122   void FreeCallTemps() OVERRIDE;
    123   void LockCallTemps() OVERRIDE;
    124 
    125   void CompilerInitializeRegAlloc() OVERRIDE;
    126   int VectorRegisterSize() OVERRIDE;
    127   int NumReservableVectorRegisters(bool fp_used) OVERRIDE;
    128 
    129   // Required for target - miscellaneous.
    130   void AssembleLIR() OVERRIDE;
    131   void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
    132   void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
    133                                 ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
    134   const char* GetTargetInstFmt(int opcode) OVERRIDE;
    135   const char* GetTargetInstName(int opcode) OVERRIDE;
    136   std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
    137   ResourceMask GetPCUseDefEncoding() const OVERRIDE;
    138   uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
    139   size_t GetInsnSize(LIR* lir) OVERRIDE;
    140   bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
    141 
    142   // Get the register class for load/store of a field.
    143   RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
    144 
    145   // Required for target - Dalvik-level generators.
    146   void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
    147                    RegLocation rl_dest, int scale) OVERRIDE;
    148   void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
    149                    RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
    150 
    151   void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
    152                         RegLocation rl_src2) OVERRIDE;
    153   void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
    154                        RegLocation rl_src2) OVERRIDE;
    155   void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
    156                 RegLocation rl_src2) OVERRIDE;
    157   void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
    158 
    159   bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
    160   bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
    161   bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
    162   bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
    163   bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
    164   bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
    165   bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
    166   bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
    167   bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
    168 
    169   // Long instructions.
    170   void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
    171                       RegLocation rl_src2) OVERRIDE;
    172   void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
    173                          RegLocation rl_src2) OVERRIDE;
    174   void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
    175                          RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
    176   void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
    177   void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
    178   void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
    179                       RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
    180 
    181   /*
    182    * @brief Generate a two address long operation with a constant value
    183    * @param rl_dest location of result
    184    * @param rl_src constant source operand
    185    * @param op Opcode to be generated
    186    * @return success or not
    187    */
    188   bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
    189 
    190   /*
    191    * @brief Generate a three address long operation with a constant value
    192    * @param rl_dest location of result
    193    * @param rl_src1 source operand
    194    * @param rl_src2 constant source operand
    195    * @param op Opcode to be generated
    196    * @return success or not
    197    */
    198   bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
    199                       Instruction::Code op);
    200   /**
    201    * @brief Generate a long arithmetic operation.
    202    * @param rl_dest The destination.
    203    * @param rl_src1 First operand.
    204    * @param rl_src2 Second operand.
    205    * @param op The DEX opcode for the operation.
    206    * @param is_commutative The sources can be swapped if needed.
    207    */
    208   virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
    209                             Instruction::Code op, bool is_commutative);
    210 
    211   /**
    212    * @brief Generate a two operand long arithmetic operation.
    213    * @param rl_dest The destination.
    214    * @param rl_src Second operand.
    215    * @param op The DEX opcode for the operation.
    216    */
    217   void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
    218 
    219   /**
    220    * @brief Generate a long operation.
    221    * @param rl_dest The destination.  Must be in a register
    222    * @param rl_src The other operand.  May be in a register or in memory.
    223    * @param op The DEX opcode for the operation.
    224    */
    225   virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
    226 
    227 
    228   // TODO: collapse reg_lo, reg_hi
    229   RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
    230       OVERRIDE;
    231   RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
    232   void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
    233   void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
    234   void GenExitSequence() OVERRIDE;
    235   void GenSpecialExitSequence() OVERRIDE;
    236   void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
    237   void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
    238   void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
    239   void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
    240   void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
    241                         int32_t true_val, int32_t false_val, RegStorage rs_dest,
    242                         int dest_reg_class) OVERRIDE;
    243   bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
    244   void GenMoveException(RegLocation rl_dest) OVERRIDE;
    245   void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
    246                                      int first_bit, int second_bit) OVERRIDE;
    247   void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
    248   void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
    249   void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
    250   void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
    251 
    252   /**
    253    * @brief Implement instanceof a final class with x86 specific code.
    254    * @param use_declaring_class 'true' if we can use the class itself.
    255    * @param type_idx Type index to use if use_declaring_class is 'false'.
    256    * @param rl_dest Result to be set to 0 or 1.
    257    * @param rl_src Object to be tested.
    258    */
    259   void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
    260                           RegLocation rl_src) OVERRIDE;
    261 
    262   // Single operation generators.
    263   LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
    264   LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
    265   LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
    266   LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
    267   LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
    268   LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
    269   LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
    270   void OpEndIT(LIR* it) OVERRIDE;
    271   LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
    272   LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
    273   LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
    274   void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
    275   LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
    276   LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
    277   LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
    278   LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
    279   LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
    280   LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
    281   LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
    282   LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
    283   LIR* OpTestSuspend(LIR* target) OVERRIDE;
    284   LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
    285   LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
    286   void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
    287   bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
    288 
    289   bool InexpensiveConstantInt(int32_t value) OVERRIDE;
    290   bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
    291   bool InexpensiveConstantLong(int64_t value) OVERRIDE;
    292   bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
    293 
    294   /*
    295    * @brief Should try to optimize for two address instructions?
    296    * @return true if we try to avoid generating three operand instructions.
    297    */
    298   virtual bool GenerateTwoOperandInstructions() const { return true; }
    299 
    300   /*
    301    * @brief x86 specific codegen for int operations.
    302    * @param opcode Operation to perform.
    303    * @param rl_dest Destination for the result.
    304    * @param rl_lhs Left hand operand.
    305    * @param rl_rhs Right hand operand.
    306    */
    307   void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
    308                      RegLocation rl_rhs) OVERRIDE;
    309 
    310   /*
    311    * @brief Load the Method* of a dex method into the register.
    312    * @param target_method The MethodReference of the method to be invoked.
    313    * @param type How the method will be invoked.
    314    * @param register that will contain the code address.
    315    * @note register will be passed to TargetReg to get physical register.
    316    */
    317   void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
    318                          SpecialTargetRegister symbolic_reg) OVERRIDE;
    319 
    320   /*
    321    * @brief Load the Class* of a Dex Class type into the register.
    322    * @param type How the method will be invoked.
    323    * @param register that will contain the code address.
    324    * @note register will be passed to TargetReg to get physical register.
    325    */
    326   void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) OVERRIDE;
    327 
    328   void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
    329 
    330   int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
    331                            NextCallInsn next_call_insn,
    332                            const MethodReference& target_method,
    333                            uint32_t vtable_idx,
    334                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
    335                            bool skip_this) OVERRIDE;
    336 
    337   int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
    338                          NextCallInsn next_call_insn,
    339                          const MethodReference& target_method,
    340                          uint32_t vtable_idx,
    341                          uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
    342                          bool skip_this) OVERRIDE;
    343 
    344   /*
    345    * @brief Generate a relative call to the method that will be patched at link time.
    346    * @param target_method The MethodReference of the method to be invoked.
    347    * @param type How the method will be invoked.
    348    * @returns Call instruction
    349    */
    350   virtual LIR * CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
    351 
    352   /*
    353    * @brief Handle x86 specific literals
    354    */
    355   void InstallLiteralPools() OVERRIDE;
    356 
    357   /*
    358    * @brief Generate the debug_frame CFI information.
    359    * @returns pointer to vector containing CFE information
    360    */
    361   static std::vector<uint8_t>* ReturnCommonCallFrameInformation(bool is_x86_64);
    362 
    363   /*
    364    * @brief Generate the debug_frame FDE information.
    365    * @returns pointer to vector containing CFE information
    366    */
    367   std::vector<uint8_t>* ReturnCallFrameInformation() OVERRIDE;
    368 
    369   LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
    370 
    371  protected:
    372   RegStorage TargetReg32(SpecialTargetRegister reg);
    373   // Casting of RegStorage
    374   RegStorage As32BitReg(RegStorage reg) {
    375     DCHECK(!reg.IsPair());
    376     if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
    377       if (kFailOnSizeError) {
    378         LOG(FATAL) << "Expected 64b register " << reg.GetReg();
    379       } else {
    380         LOG(WARNING) << "Expected 64b register " << reg.GetReg();
    381         return reg;
    382       }
    383     }
    384     RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
    385                                     reg.GetRawBits() & RegStorage::kRegTypeMask);
    386     DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
    387                              ->GetReg().GetReg(),
    388               ret_val.GetReg());
    389     return ret_val;
    390   }
    391 
    392   RegStorage As64BitReg(RegStorage reg) {
    393     DCHECK(!reg.IsPair());
    394     if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
    395       if (kFailOnSizeError) {
    396         LOG(FATAL) << "Expected 32b register " << reg.GetReg();
    397       } else {
    398         LOG(WARNING) << "Expected 32b register " << reg.GetReg();
    399         return reg;
    400       }
    401     }
    402     RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
    403                                     reg.GetRawBits() & RegStorage::kRegTypeMask);
    404     DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
    405                              ->GetReg().GetReg(),
    406               ret_val.GetReg());
    407     return ret_val;
    408   }
    409 
    410   LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
    411                            RegStorage r_dest, OpSize size);
    412   LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
    413                             RegStorage r_src, OpSize size);
    414 
    415   RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
    416 
    417   int AssignInsnOffsets();
    418   void AssignOffsets();
    419   AssemblerStatus AssembleInstructions(CodeOffset start_addr);
    420 
    421   size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
    422                      int32_t raw_base, int32_t displacement);
    423   void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
    424   void EmitPrefix(const X86EncodingMap* entry,
    425                   int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b);
    426   void EmitOpcode(const X86EncodingMap* entry);
    427   void EmitPrefixAndOpcode(const X86EncodingMap* entry,
    428                            int32_t reg_r, int32_t reg_x, int32_t reg_b);
    429   void EmitDisp(uint8_t base, int32_t disp);
    430   void EmitModrmThread(uint8_t reg_or_opcode);
    431   void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp);
    432   void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale,
    433                         int32_t disp);
    434   void EmitImm(const X86EncodingMap* entry, int64_t imm);
    435   void EmitNullary(const X86EncodingMap* entry);
    436   void EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg);
    437   void EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg);
    438   void EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
    439   void EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
    440                    int32_t disp);
    441   void EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_reg);
    442   void EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base, int32_t disp);
    443   void EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
    444                     int32_t raw_index, int scale, int32_t disp);
    445   void EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
    446                     int32_t disp, int32_t raw_reg);
    447   void EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
    448   void EmitArrayImm(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
    449                     int32_t raw_disp, int32_t imm);
    450   void EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp);
    451   void EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2);
    452   void EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t imm);
    453   void EmitRegMemImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
    454                      int32_t imm);
    455   void EmitMemRegImm(const X86EncodingMap* entry, int32_t base, int32_t disp, int32_t raw_reg1,
    456                      int32_t imm);
    457   void EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
    458   void EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm);
    459   void EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm);
    460   void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
    461   void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
    462   void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
    463   void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
    464   void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
    465   void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
    466   void EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t cc);
    467   void EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
    468                       int32_t cc);
    469 
    470   void EmitJmp(const X86EncodingMap* entry, int32_t rel);
    471   void EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc);
    472   void EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
    473   void EmitCallImmediate(const X86EncodingMap* entry, int32_t disp);
    474   void EmitCallThread(const X86EncodingMap* entry, int32_t disp);
    475   void EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
    476                  int32_t raw_index, int scale, int32_t table_or_disp);
    477   void EmitMacro(const X86EncodingMap* entry, int32_t raw_reg, int32_t offset);
    478   void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
    479   void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
    480                                 int64_t val, ConditionCode ccode);
    481   void GenConstWide(RegLocation rl_dest, int64_t value);
    482   void GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir);
    483   void GenShiftByteVector(BasicBlock *bb, MIR *mir);
    484   void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
    485   void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
    486   void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
    487 
    488   static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
    489 
    490   /*
    491    * @brief Ensure that a temporary register is byte addressable.
    492    * @returns a temporary guarenteed to be byte addressable.
    493    */
    494   virtual RegStorage AllocateByteRegister();
    495 
    496   /*
    497    * @brief Use a wide temporary as a 128-bit register
    498    * @returns a 128-bit temporary register.
    499    */
    500   virtual RegStorage Get128BitRegister(RegStorage reg);
    501 
    502   /*
    503    * @brief Check if a register is byte addressable.
    504    * @returns true if a register is byte addressable.
    505    */
    506   bool IsByteRegister(RegStorage reg);
    507 
    508   void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
    509 
    510   bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
    511 
    512   /*
    513    * @brief generate inline code for fast case of Strng.indexOf.
    514    * @param info Call parameters
    515    * @param zero_based 'true' if the index into the string is 0.
    516    * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
    517    * generated.
    518    */
    519   bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
    520 
    521   /**
    522    * @brief Reserve a fixed number of vector  registers from the register pool
    523    * @details The mir->dalvikInsn.vA specifies an N such that vector registers
    524    * [0..N-1] are removed from the temporary pool. The caller must call
    525    * ReturnVectorRegisters before calling ReserveVectorRegisters again.
    526    * Also sets the num_reserved_vector_regs_ to the specified value
    527    * @param mir whose vA specifies the number of registers to reserve
    528    */
    529   void ReserveVectorRegisters(MIR* mir);
    530 
    531   /**
    532    * @brief Return all the reserved vector registers to the temp pool
    533    * @details Returns [0..num_reserved_vector_regs_]
    534    */
    535   void ReturnVectorRegisters();
    536 
    537   /*
    538    * @brief Load 128 bit constant into vector register.
    539    * @param bb The basic block in which the MIR is from.
    540    * @param mir The MIR whose opcode is kMirConstVector
    541    * @note vA is the TypeSize for the register.
    542    * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
    543    */
    544   void GenConst128(BasicBlock* bb, MIR* mir);
    545 
    546   /*
    547    * @brief MIR to move a vectorized register to another.
    548    * @param bb The basic block in which the MIR is from.
    549    * @param mir The MIR whose opcode is kMirConstVector.
    550    * @note vA: TypeSize
    551    * @note vB: destination
    552    * @note vC: source
    553    */
    554   void GenMoveVector(BasicBlock *bb, MIR *mir);
    555 
    556   /*
    557    * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
    558    * @param bb The basic block in which the MIR is from.
    559    * @param mir The MIR whose opcode is kMirConstVector.
    560    * @note vA: TypeSize
    561    * @note vB: destination and source
    562    * @note vC: source
    563    */
    564   void GenMultiplyVector(BasicBlock *bb, MIR *mir);
    565 
    566   /*
    567    * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
    568    * @param bb The basic block in which the MIR is from.
    569    * @param mir The MIR whose opcode is kMirConstVector.
    570    * @note vA: TypeSize
    571    * @note vB: destination and source
    572    * @note vC: source
    573    */
    574   void GenAddVector(BasicBlock *bb, MIR *mir);
    575 
    576   /*
    577    * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
    578    * @param bb The basic block in which the MIR is from.
    579    * @param mir The MIR whose opcode is kMirConstVector.
    580    * @note vA: TypeSize
    581    * @note vB: destination and source
    582    * @note vC: source
    583    */
    584   void GenSubtractVector(BasicBlock *bb, MIR *mir);
    585 
    586   /*
    587    * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
    588    * @param bb The basic block in which the MIR is from.
    589    * @param mir The MIR whose opcode is kMirConstVector.
    590    * @note vA: TypeSize
    591    * @note vB: destination and source
    592    * @note vC: immediate
    593    */
    594   void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
    595 
    596   /*
    597    * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
    598    * @param bb The basic block in which the MIR is from.
    599    * @param mir The MIR whose opcode is kMirConstVector.
    600    * @note vA: TypeSize
    601    * @note vB: destination and source
    602    * @note vC: immediate
    603    */
    604   void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
    605 
    606   /*
    607    * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
    608    * @param bb The basic block in which the MIR is from..
    609    * @param mir The MIR whose opcode is kMirConstVector.
    610    * @note vA: TypeSize
    611    * @note vB: destination and source
    612    * @note vC: immediate
    613    */
    614   void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
    615 
    616   /*
    617    * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
    618    * @note vA: TypeSize
    619    * @note vB: destination and source
    620    * @note vC: source
    621    */
    622   void GenAndVector(BasicBlock *bb, MIR *mir);
    623 
    624   /*
    625    * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
    626    * @param bb The basic block in which the MIR is from.
    627    * @param mir The MIR whose opcode is kMirConstVector.
    628    * @note vA: TypeSize
    629    * @note vB: destination and source
    630    * @note vC: source
    631    */
    632   void GenOrVector(BasicBlock *bb, MIR *mir);
    633 
    634   /*
    635    * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
    636    * @param bb The basic block in which the MIR is from.
    637    * @param mir The MIR whose opcode is kMirConstVector.
    638    * @note vA: TypeSize
    639    * @note vB: destination and source
    640    * @note vC: source
    641    */
    642   void GenXorVector(BasicBlock *bb, MIR *mir);
    643 
    644   /*
    645    * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
    646    * @param bb The basic block in which the MIR is from.
    647    * @param mir The MIR whose opcode is kMirConstVector.
    648    * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
    649    * @note vA: TypeSize
    650    * @note vB: destination and source VR (not vector register)
    651    * @note vC: source (vector register)
    652    */
    653   void GenAddReduceVector(BasicBlock *bb, MIR *mir);
    654 
    655   /*
    656    * @brief Extract a packed element into a single VR.
    657    * @param bb The basic block in which the MIR is from.
    658    * @param mir The MIR whose opcode is kMirConstVector.
    659    * @note vA: TypeSize
    660    * @note vB: destination VR (not vector register)
    661    * @note vC: source (vector register)
    662    * @note arg[0]: The index to use for extraction from vector register (which packed element).
    663    */
    664   void GenReduceVector(BasicBlock *bb, MIR *mir);
    665 
    666   /*
    667    * @brief Create a vector value, with all TypeSize values equal to vC
    668    * @param bb The basic block in which the MIR is from.
    669    * @param mir The MIR whose opcode is kMirConstVector.
    670    * @note vA: TypeSize.
    671    * @note vB: destination vector register.
    672    * @note vC: source VR (not vector register).
    673    */
    674   void GenSetVector(BasicBlock *bb, MIR *mir);
    675 
    676   /*
    677    * @brief Generate code for a vector opcode.
    678    * @param bb The basic block in which the MIR is from.
    679    * @param mir The MIR whose opcode is a non-standard opcode.
    680    */
    681   void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
    682 
    683   /*
    684    * @brief Return the correct x86 opcode for the Dex operation
    685    * @param op Dex opcode for the operation
    686    * @param loc Register location of the operand
    687    * @param is_high_op 'true' if this is an operation on the high word
    688    * @param value Immediate value for the operation.  Used for byte variants
    689    * @returns the correct x86 opcode to perform the operation
    690    */
    691   X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
    692 
    693   /*
    694    * @brief Return the correct x86 opcode for the Dex operation
    695    * @param op Dex opcode for the operation
    696    * @param dest location of the destination.  May be register or memory.
    697    * @param rhs Location for the rhs of the operation.  May be in register or memory.
    698    * @param is_high_op 'true' if this is an operation on the high word
    699    * @returns the correct x86 opcode to perform the operation
    700    * @note at most one location may refer to memory
    701    */
    702   X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
    703                       bool is_high_op);
    704 
    705   /*
    706    * @brief Is this operation a no-op for this opcode and value
    707    * @param op Dex opcode for the operation
    708    * @param value Immediate value for the operation.
    709    * @returns 'true' if the operation will have no effect
    710    */
    711   bool IsNoOp(Instruction::Code op, int32_t value);
    712 
    713   /**
    714    * @brief Calculate magic number and shift for a given divisor
    715    * @param divisor divisor number for calculation
    716    * @param magic hold calculated magic number
    717    * @param shift hold calculated shift
    718    * @param is_long 'true' if divisor is jlong, 'false' for jint.
    719    */
    720   void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
    721 
    722   /*
    723    * @brief Generate an integer div or rem operation.
    724    * @param rl_dest Destination Location.
    725    * @param rl_src1 Numerator Location.
    726    * @param rl_src2 Divisor Location.
    727    * @param is_div 'true' if this is a division, 'false' for a remainder.
    728    * @param check_zero 'true' if an exception should be generated if the divisor is 0.
    729    */
    730   RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
    731                         bool is_div, bool check_zero);
    732 
    733   /*
    734    * @brief Generate an integer div or rem operation by a literal.
    735    * @param rl_dest Destination Location.
    736    * @param rl_src Numerator Location.
    737    * @param lit Divisor.
    738    * @param is_div 'true' if this is a division, 'false' for a remainder.
    739    */
    740   RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
    741 
    742   /*
    743    * Generate code to implement long shift operations.
    744    * @param opcode The DEX opcode to specify the shift type.
    745    * @param rl_dest The destination.
    746    * @param rl_src The value to be shifted.
    747    * @param shift_amount How much to shift.
    748    * @returns the RegLocation of the result.
    749    */
    750   RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
    751                                 RegLocation rl_src, int shift_amount);
    752   /*
    753    * Generate an imul of a register by a constant or a better sequence.
    754    * @param dest Destination Register.
    755    * @param src Source Register.
    756    * @param val Constant multiplier.
    757    */
    758   void GenImulRegImm(RegStorage dest, RegStorage src, int val);
    759 
    760   /*
    761    * Generate an imul of a memory location by a constant or a better sequence.
    762    * @param dest Destination Register.
    763    * @param sreg Symbolic register.
    764    * @param displacement Displacement on stack of Symbolic Register.
    765    * @param val Constant multiplier.
    766    */
    767   void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
    768 
    769   /*
    770    * @brief Compare memory to immediate, and branch if condition true.
    771    * @param cond The condition code that when true will branch to the target.
    772    * @param temp_reg A temporary register that can be used if compare memory is not
    773    * supported by the architecture.
    774    * @param base_reg The register holding the base address.
    775    * @param offset The offset from the base.
    776    * @param check_value The immediate to compare to.
    777    * @param target branch target (or nullptr)
    778    * @param compare output for getting LIR for comparison (or nullptr)
    779    */
    780   LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
    781                          int offset, int check_value, LIR* target, LIR** compare);
    782 
    783   void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
    784 
    785   /*
    786    * Can this operation be using core registers without temporaries?
    787    * @param rl_lhs Left hand operand.
    788    * @param rl_rhs Right hand operand.
    789    * @returns 'true' if the operation can proceed without needing temporary regs.
    790    */
    791   bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
    792 
    793   /**
    794    * @brief Generates inline code for conversion of long to FP by using x87/
    795    * @param rl_dest The destination of the FP.
    796    * @param rl_src The source of the long.
    797    * @param is_double 'true' if dealing with double, 'false' for float.
    798    */
    799   virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
    800 
    801   void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
    802   void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
    803 
    804   LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
    805   LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
    806   LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
    807   LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
    808   LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
    809   void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
    810   void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
    811   void OpTlsCmp(ThreadOffset<4> offset, int val);
    812   void OpTlsCmp(ThreadOffset<8> offset, int val);
    813 
    814   void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
    815 
    816   // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
    817   // in which case false will be returned.
    818   bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val);
    819   void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
    820                   RegLocation rl_src2);
    821   void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
    822   void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
    823   void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
    824                      RegLocation rl_src2, bool is_div);
    825 
    826   void SpillCoreRegs();
    827   void UnSpillCoreRegs();
    828   void UnSpillFPRegs();
    829   void SpillFPRegs();
    830 
    831   /*
    832    * @brief Perform MIR analysis before compiling method.
    833    * @note Invokes Mir2LiR::Materialize after analysis.
    834    */
    835   void Materialize();
    836 
    837   /*
    838    * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
    839    * without regard to data type.  In practice, this can result in UpdateLoc returning a
    840    * location record for a Dalvik float value in a core register, and vis-versa.  For targets
    841    * which can inexpensively move data between core and float registers, this can often be a win.
    842    * However, for x86 this is generally not a win.  These variants of UpdateLoc()
    843    * take a register class argument - and will return an in-register location record only if
    844    * the value is live in a temp register of the correct class.  Additionally, if the value is in
    845    * a temp register of the wrong register class, it will be clobbered.
    846    */
    847   RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
    848   RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
    849 
    850   /*
    851    * @brief Analyze MIR before generating code, to prepare for the code generation.
    852    */
    853   void AnalyzeMIR();
    854 
    855   /*
    856    * @brief Analyze one basic block.
    857    * @param bb Basic block to analyze.
    858    */
    859   void AnalyzeBB(BasicBlock * bb);
    860 
    861   /*
    862    * @brief Analyze one extended MIR instruction
    863    * @param opcode MIR instruction opcode.
    864    * @param bb Basic block containing instruction.
    865    * @param mir Extended instruction to analyze.
    866    */
    867   void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
    868 
    869   /*
    870    * @brief Analyze one MIR instruction
    871    * @param opcode MIR instruction opcode.
    872    * @param bb Basic block containing instruction.
    873    * @param mir Instruction to analyze.
    874    */
    875   virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
    876 
    877   /*
    878    * @brief Analyze one MIR float/double instruction
    879    * @param opcode MIR instruction opcode.
    880    * @param bb Basic block containing instruction.
    881    * @param mir Instruction to analyze.
    882    */
    883   void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
    884 
    885   /*
    886    * @brief Analyze one use of a double operand.
    887    * @param rl_use Double RegLocation for the operand.
    888    */
    889   void AnalyzeDoubleUse(RegLocation rl_use);
    890 
    891   /*
    892    * @brief Analyze one invoke-static MIR instruction
    893    * @param opcode MIR instruction opcode.
    894    * @param bb Basic block containing instruction.
    895    * @param mir Instruction to analyze.
    896    */
    897   void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
    898 
    899   // Information derived from analysis of MIR
    900 
    901   // The compiler temporary for the code address of the method.
    902   CompilerTemp *base_of_code_;
    903 
    904   // Have we decided to compute a ptr to code and store in temporary VR?
    905   bool store_method_addr_;
    906 
    907   // Have we used the stored method address?
    908   bool store_method_addr_used_;
    909 
    910   // Instructions to remove if we didn't use the stored method address.
    911   LIR* setup_method_address_[2];
    912 
    913   // Instructions needing patching with Method* values.
    914   GrowableArray<LIR*> method_address_insns_;
    915 
    916   // Instructions needing patching with Class Type* values.
    917   GrowableArray<LIR*> class_type_address_insns_;
    918 
    919   // Instructions needing patching with PC relative code addresses.
    920   GrowableArray<LIR*> call_method_insns_;
    921 
    922   // Prologue decrement of stack pointer.
    923   LIR* stack_decrement_;
    924 
    925   // Epilogue increment of stack pointer.
    926   LIR* stack_increment_;
    927 
    928   // The list of const vector literals.
    929   LIR *const_vectors_;
    930 
    931   /*
    932    * @brief Search for a matching vector literal
    933    * @param mir A kMirOpConst128b MIR instruction to match.
    934    * @returns pointer to matching LIR constant, or nullptr if not found.
    935    */
    936   LIR *ScanVectorLiteral(MIR *mir);
    937 
    938   /*
    939    * @brief Add a constant vector literal
    940    * @param mir A kMirOpConst128b MIR instruction to match.
    941    */
    942   LIR *AddVectorLiteral(MIR *mir);
    943 
    944   InToRegStorageMapping in_to_reg_storage_mapping_;
    945 
    946   bool WideGPRsAreAliases() OVERRIDE {
    947     return cu_->target64;  // On 64b, we have 64b GPRs.
    948   }
    949   bool WideFPRsAreAliases() OVERRIDE {
    950     return true;  // xmm registers have 64b views even on x86.
    951   }
    952 
    953   /*
    954    * @brief Dump a RegLocation using printf
    955    * @param loc Register location to dump
    956    */
    957   static void DumpRegLocation(RegLocation loc);
    958 
    959   static const X86EncodingMap EncodingMap[kX86Last];
    960 
    961  private:
    962   // The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
    963   int num_reserved_vector_regs_;
    964 };
    965 
    966 }  // namespace art
    967 
    968 #endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
    969