Home | History | Annotate | Download | only in x86
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <string>
     18 #include <inttypes.h>
     19 
     20 #include "codegen_x86.h"
     21 #include "dex/compiler_internals.h"
     22 #include "dex/quick/mir_to_lir-inl.h"
     23 #include "dex/reg_storage_eq.h"
     24 #include "mirror/array.h"
     25 #include "mirror/string.h"
     26 #include "x86_lir.h"
     27 
     28 namespace art {
     29 
     30 static constexpr RegStorage core_regs_arr_32[] = {
     31     rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
     32 };
     33 static constexpr RegStorage core_regs_arr_64[] = {
     34     rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
     35     rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
     36 };
     37 static constexpr RegStorage core_regs_arr_64q[] = {
     38     rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
     39     rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
     40 };
     41 static constexpr RegStorage sp_regs_arr_32[] = {
     42     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     43 };
     44 static constexpr RegStorage sp_regs_arr_64[] = {
     45     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     46     rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
     47 };
     48 static constexpr RegStorage dp_regs_arr_32[] = {
     49     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
     50 };
     51 static constexpr RegStorage dp_regs_arr_64[] = {
     52     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
     53     rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
     54 };
     55 static constexpr RegStorage xp_regs_arr_32[] = {
     56     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
     57 };
     58 static constexpr RegStorage xp_regs_arr_64[] = {
     59     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
     60     rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
     61 };
     62 static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
     63 static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
     64 static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
     65 static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
     66 static constexpr RegStorage core_temps_arr_64[] = {
     67     rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
     68     rs_r8, rs_r9, rs_r10, rs_r11
     69 };
     70 
     71 // How to add register to be available for promotion:
     72 // 1) Remove register from array defining temp
     73 // 2) Update ClobberCallerSave
     74 // 3) Update JNI compiler ABI:
     75 // 3.1) add reg in JniCallingConvention method
     76 // 3.2) update CoreSpillMask/FpSpillMask
     77 // 4) Update entrypoints
     78 // 4.1) Update constants in asm_support_x86_64.h for new frame size
     79 // 4.2) Remove entry in SmashCallerSaves
     80 // 4.3) Update jni_entrypoints to spill/unspill new callee save reg
     81 // 4.4) Update quick_entrypoints to spill/unspill new callee save reg
     82 // 5) Update runtime ABI
     83 // 5.1) Update quick_method_frame_info with new required spills
     84 // 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms
     85 // Note that you cannot use register corresponding to incoming args
     86 // according to ABI and QCG needs one additional XMM temp for
     87 // bulk copy in preparation to call.
     88 static constexpr RegStorage core_temps_arr_64q[] = {
     89     rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
     90     rs_r8q, rs_r9q, rs_r10q, rs_r11q
     91 };
     92 static constexpr RegStorage sp_temps_arr_32[] = {
     93     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     94 };
     95 static constexpr RegStorage sp_temps_arr_64[] = {
     96     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     97     rs_fr8, rs_fr9, rs_fr10, rs_fr11
     98 };
     99 static constexpr RegStorage dp_temps_arr_32[] = {
    100     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
    101 };
    102 static constexpr RegStorage dp_temps_arr_64[] = {
    103     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
    104     rs_dr8, rs_dr9, rs_dr10, rs_dr11
    105 };
    106 
    107 static constexpr RegStorage xp_temps_arr_32[] = {
    108     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
    109 };
    110 static constexpr RegStorage xp_temps_arr_64[] = {
    111     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
    112     rs_xr8, rs_xr9, rs_xr10, rs_xr11
    113 };
    114 
    115 static constexpr ArrayRef<const RegStorage> empty_pool;
    116 static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
    117 static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
    118 static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
    119 static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
    120 static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
    121 static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
    122 static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
    123 static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32);
    124 static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64);
    125 static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
    126 static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
    127 static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
    128 static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
    129 static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
    130 static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
    131 static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
    132 static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
    133 static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
    134 static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
    135 
    136 static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
    137 static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
    138 
    139 RegStorage rs_rX86_SP;
    140 
    141 X86NativeRegisterPool rX86_ARG0;
    142 X86NativeRegisterPool rX86_ARG1;
    143 X86NativeRegisterPool rX86_ARG2;
    144 X86NativeRegisterPool rX86_ARG3;
    145 X86NativeRegisterPool rX86_ARG4;
    146 X86NativeRegisterPool rX86_ARG5;
    147 X86NativeRegisterPool rX86_FARG0;
    148 X86NativeRegisterPool rX86_FARG1;
    149 X86NativeRegisterPool rX86_FARG2;
    150 X86NativeRegisterPool rX86_FARG3;
    151 X86NativeRegisterPool rX86_FARG4;
    152 X86NativeRegisterPool rX86_FARG5;
    153 X86NativeRegisterPool rX86_FARG6;
    154 X86NativeRegisterPool rX86_FARG7;
    155 X86NativeRegisterPool rX86_RET0;
    156 X86NativeRegisterPool rX86_RET1;
    157 X86NativeRegisterPool rX86_INVOKE_TGT;
    158 X86NativeRegisterPool rX86_COUNT;
    159 
    160 RegStorage rs_rX86_ARG0;
    161 RegStorage rs_rX86_ARG1;
    162 RegStorage rs_rX86_ARG2;
    163 RegStorage rs_rX86_ARG3;
    164 RegStorage rs_rX86_ARG4;
    165 RegStorage rs_rX86_ARG5;
    166 RegStorage rs_rX86_FARG0;
    167 RegStorage rs_rX86_FARG1;
    168 RegStorage rs_rX86_FARG2;
    169 RegStorage rs_rX86_FARG3;
    170 RegStorage rs_rX86_FARG4;
    171 RegStorage rs_rX86_FARG5;
    172 RegStorage rs_rX86_FARG6;
    173 RegStorage rs_rX86_FARG7;
    174 RegStorage rs_rX86_RET0;
    175 RegStorage rs_rX86_RET1;
    176 RegStorage rs_rX86_INVOKE_TGT;
    177 RegStorage rs_rX86_COUNT;
    178 
    179 RegLocation X86Mir2Lir::LocCReturn() {
    180   return x86_loc_c_return;
    181 }
    182 
    183 RegLocation X86Mir2Lir::LocCReturnRef() {
    184   return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
    185 }
    186 
    187 RegLocation X86Mir2Lir::LocCReturnWide() {
    188   return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
    189 }
    190 
    191 RegLocation X86Mir2Lir::LocCReturnFloat() {
    192   return x86_loc_c_return_float;
    193 }
    194 
    195 RegLocation X86Mir2Lir::LocCReturnDouble() {
    196   return x86_loc_c_return_double;
    197 }
    198 
    199 // Return a target-dependent special register for 32-bit.
    200 RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
    201   RegStorage res_reg = RegStorage::InvalidReg();
    202   switch (reg) {
    203     case kSelf: res_reg = RegStorage::InvalidReg(); break;
    204     case kSuspend: res_reg =  RegStorage::InvalidReg(); break;
    205     case kLr: res_reg =  RegStorage::InvalidReg(); break;
    206     case kPc: res_reg =  RegStorage::InvalidReg(); break;
    207     case kSp: res_reg =  rs_rX86_SP_32; break;  // This must be the concrete one, as _SP is target-
    208                                                 // specific size.
    209     case kArg0: res_reg = rs_rX86_ARG0; break;
    210     case kArg1: res_reg = rs_rX86_ARG1; break;
    211     case kArg2: res_reg = rs_rX86_ARG2; break;
    212     case kArg3: res_reg = rs_rX86_ARG3; break;
    213     case kArg4: res_reg = rs_rX86_ARG4; break;
    214     case kArg5: res_reg = rs_rX86_ARG5; break;
    215     case kFArg0: res_reg = rs_rX86_FARG0; break;
    216     case kFArg1: res_reg = rs_rX86_FARG1; break;
    217     case kFArg2: res_reg = rs_rX86_FARG2; break;
    218     case kFArg3: res_reg = rs_rX86_FARG3; break;
    219     case kFArg4: res_reg = rs_rX86_FARG4; break;
    220     case kFArg5: res_reg = rs_rX86_FARG5; break;
    221     case kFArg6: res_reg = rs_rX86_FARG6; break;
    222     case kFArg7: res_reg = rs_rX86_FARG7; break;
    223     case kRet0: res_reg = rs_rX86_RET0; break;
    224     case kRet1: res_reg = rs_rX86_RET1; break;
    225     case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
    226     case kHiddenArg: res_reg = rs_rAX; break;
    227     case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
    228     case kCount: res_reg = rs_rX86_COUNT; break;
    229     default: res_reg = RegStorage::InvalidReg();
    230   }
    231   return res_reg;
    232 }
    233 
    234 RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
    235   LOG(FATAL) << "Do not use this function!!!";
    236   return RegStorage::InvalidReg();
    237 }
    238 
    239 /*
    240  * Decode the register id.
    241  */
    242 ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
    243   /* Double registers in x86 are just a single FP register. This is always just a single bit. */
    244   return ResourceMask::Bit(
    245       /* FP register starts at bit position 16 */
    246       ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
    247 }
    248 
    249 ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
    250   return kEncodeNone;
    251 }
    252 
    253 void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
    254                                           ResourceMask* use_mask, ResourceMask* def_mask) {
    255   DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
    256   DCHECK(!lir->flags.use_def_invalid);
    257 
    258   // X86-specific resource map setup here.
    259   if (flags & REG_USE_SP) {
    260     use_mask->SetBit(kX86RegSP);
    261   }
    262 
    263   if (flags & REG_DEF_SP) {
    264     def_mask->SetBit(kX86RegSP);
    265   }
    266 
    267   if (flags & REG_DEFA) {
    268     SetupRegMask(def_mask, rs_rAX.GetReg());
    269   }
    270 
    271   if (flags & REG_DEFD) {
    272     SetupRegMask(def_mask, rs_rDX.GetReg());
    273   }
    274   if (flags & REG_USEA) {
    275     SetupRegMask(use_mask, rs_rAX.GetReg());
    276   }
    277 
    278   if (flags & REG_USEC) {
    279     SetupRegMask(use_mask, rs_rCX.GetReg());
    280   }
    281 
    282   if (flags & REG_USED) {
    283     SetupRegMask(use_mask, rs_rDX.GetReg());
    284   }
    285 
    286   if (flags & REG_USEB) {
    287     SetupRegMask(use_mask, rs_rBX.GetReg());
    288   }
    289 
    290   // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
    291   if (lir->opcode == kX86RepneScasw) {
    292     SetupRegMask(use_mask, rs_rAX.GetReg());
    293     SetupRegMask(use_mask, rs_rCX.GetReg());
    294     SetupRegMask(use_mask, rs_rDI.GetReg());
    295     SetupRegMask(def_mask, rs_rDI.GetReg());
    296   }
    297 
    298   if (flags & USE_FP_STACK) {
    299     use_mask->SetBit(kX86FPStack);
    300     def_mask->SetBit(kX86FPStack);
    301   }
    302 }
    303 
    304 /* For dumping instructions */
    305 static const char* x86RegName[] = {
    306   "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
    307   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
    308 };
    309 
    310 static const char* x86CondName[] = {
    311   "O",
    312   "NO",
    313   "B/NAE/C",
    314   "NB/AE/NC",
    315   "Z/EQ",
    316   "NZ/NE",
    317   "BE/NA",
    318   "NBE/A",
    319   "S",
    320   "NS",
    321   "P/PE",
    322   "NP/PO",
    323   "L/NGE",
    324   "NL/GE",
    325   "LE/NG",
    326   "NLE/G"
    327 };
    328 
    329 /*
    330  * Interpret a format string and build a string no longer than size
    331  * See format key in Assemble.cc.
    332  */
    333 std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
    334   std::string buf;
    335   size_t i = 0;
    336   size_t fmt_len = strlen(fmt);
    337   while (i < fmt_len) {
    338     if (fmt[i] != '!') {
    339       buf += fmt[i];
    340       i++;
    341     } else {
    342       i++;
    343       DCHECK_LT(i, fmt_len);
    344       char operand_number_ch = fmt[i];
    345       i++;
    346       if (operand_number_ch == '!') {
    347         buf += "!";
    348       } else {
    349         int operand_number = operand_number_ch - '0';
    350         DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
    351         DCHECK_LT(i, fmt_len);
    352         int operand = lir->operands[operand_number];
    353         switch (fmt[i]) {
    354           case 'c':
    355             DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
    356             buf += x86CondName[operand];
    357             break;
    358           case 'd':
    359             buf += StringPrintf("%d", operand);
    360             break;
    361           case 'q': {
    362              int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
    363                              static_cast<uint32_t>(lir->operands[operand_number+1]));
    364              buf +=StringPrintf("%" PRId64, value);
    365           }
    366           case 'p': {
    367             EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
    368             buf += StringPrintf("0x%08x", tab_rec->offset);
    369             break;
    370           }
    371           case 'r':
    372             if (RegStorage::IsFloat(operand)) {
    373               int fp_reg = RegStorage::RegNum(operand);
    374               buf += StringPrintf("xmm%d", fp_reg);
    375             } else {
    376               int reg_num = RegStorage::RegNum(operand);
    377               DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
    378               buf += x86RegName[reg_num];
    379             }
    380             break;
    381           case 't':
    382             buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
    383                                 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
    384                                 lir->target);
    385             break;
    386           default:
    387             buf += StringPrintf("DecodeError '%c'", fmt[i]);
    388             break;
    389         }
    390         i++;
    391       }
    392     }
    393   }
    394   return buf;
    395 }
    396 
    397 void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
    398   char buf[256];
    399   buf[0] = 0;
    400 
    401   if (mask.Equals(kEncodeAll)) {
    402     strcpy(buf, "all");
    403   } else {
    404     char num[8];
    405     int i;
    406 
    407     for (i = 0; i < kX86RegEnd; i++) {
    408       if (mask.HasBit(i)) {
    409         snprintf(num, arraysize(num), "%d ", i);
    410         strcat(buf, num);
    411       }
    412     }
    413 
    414     if (mask.HasBit(ResourceMask::kCCode)) {
    415       strcat(buf, "cc ");
    416     }
    417     /* Memory bits */
    418     if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
    419       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
    420                DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
    421                (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
    422     }
    423     if (mask.HasBit(ResourceMask::kLiteral)) {
    424       strcat(buf, "lit ");
    425     }
    426 
    427     if (mask.HasBit(ResourceMask::kHeapRef)) {
    428       strcat(buf, "heap ");
    429     }
    430     if (mask.HasBit(ResourceMask::kMustNotAlias)) {
    431       strcat(buf, "noalias ");
    432     }
    433   }
    434   if (buf[0]) {
    435     LOG(INFO) << prefix << ": " <<  buf;
    436   }
    437 }
    438 
    439 void X86Mir2Lir::AdjustSpillMask() {
    440   // Adjustment for LR spilling, x86 has no LR so nothing to do here
    441   core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
    442   num_core_spills_++;
    443 }
    444 
    445 RegStorage X86Mir2Lir::AllocateByteRegister() {
    446   RegStorage reg = AllocTypedTemp(false, kCoreReg);
    447   if (!cu_->target64) {
    448     DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
    449   }
    450   return reg;
    451 }
    452 
    453 RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
    454   return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg();
    455 }
    456 
    457 bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
    458   return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
    459 }
    460 
    461 /* Clobber all regs that might be used by an external C call */
    462 void X86Mir2Lir::ClobberCallerSave() {
    463   if (cu_->target64) {
    464     Clobber(rs_rAX);
    465     Clobber(rs_rCX);
    466     Clobber(rs_rDX);
    467     Clobber(rs_rSI);
    468     Clobber(rs_rDI);
    469 
    470     Clobber(rs_r8);
    471     Clobber(rs_r9);
    472     Clobber(rs_r10);
    473     Clobber(rs_r11);
    474 
    475     Clobber(rs_fr8);
    476     Clobber(rs_fr9);
    477     Clobber(rs_fr10);
    478     Clobber(rs_fr11);
    479   } else {
    480     Clobber(rs_rAX);
    481     Clobber(rs_rCX);
    482     Clobber(rs_rDX);
    483     Clobber(rs_rBX);
    484   }
    485 
    486   Clobber(rs_fr0);
    487   Clobber(rs_fr1);
    488   Clobber(rs_fr2);
    489   Clobber(rs_fr3);
    490   Clobber(rs_fr4);
    491   Clobber(rs_fr5);
    492   Clobber(rs_fr6);
    493   Clobber(rs_fr7);
    494 }
    495 
    496 RegLocation X86Mir2Lir::GetReturnWideAlt() {
    497   RegLocation res = LocCReturnWide();
    498   DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
    499   DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
    500   Clobber(rs_rAX);
    501   Clobber(rs_rDX);
    502   MarkInUse(rs_rAX);
    503   MarkInUse(rs_rDX);
    504   MarkWide(res.reg);
    505   return res;
    506 }
    507 
    508 RegLocation X86Mir2Lir::GetReturnAlt() {
    509   RegLocation res = LocCReturn();
    510   res.reg.SetReg(rs_rDX.GetReg());
    511   Clobber(rs_rDX);
    512   MarkInUse(rs_rDX);
    513   return res;
    514 }
    515 
    516 /* To be used when explicitly managing register use */
    517 void X86Mir2Lir::LockCallTemps() {
    518   LockTemp(rs_rX86_ARG0);
    519   LockTemp(rs_rX86_ARG1);
    520   LockTemp(rs_rX86_ARG2);
    521   LockTemp(rs_rX86_ARG3);
    522   if (cu_->target64) {
    523     LockTemp(rs_rX86_ARG4);
    524     LockTemp(rs_rX86_ARG5);
    525     LockTemp(rs_rX86_FARG0);
    526     LockTemp(rs_rX86_FARG1);
    527     LockTemp(rs_rX86_FARG2);
    528     LockTemp(rs_rX86_FARG3);
    529     LockTemp(rs_rX86_FARG4);
    530     LockTemp(rs_rX86_FARG5);
    531     LockTemp(rs_rX86_FARG6);
    532     LockTemp(rs_rX86_FARG7);
    533   }
    534 }
    535 
    536 /* To be used when explicitly managing register use */
    537 void X86Mir2Lir::FreeCallTemps() {
    538   FreeTemp(rs_rX86_ARG0);
    539   FreeTemp(rs_rX86_ARG1);
    540   FreeTemp(rs_rX86_ARG2);
    541   FreeTemp(rs_rX86_ARG3);
    542   FreeTemp(TargetReg32(kHiddenArg));
    543   if (cu_->target64) {
    544     FreeTemp(rs_rX86_ARG4);
    545     FreeTemp(rs_rX86_ARG5);
    546     FreeTemp(rs_rX86_FARG0);
    547     FreeTemp(rs_rX86_FARG1);
    548     FreeTemp(rs_rX86_FARG2);
    549     FreeTemp(rs_rX86_FARG3);
    550     FreeTemp(rs_rX86_FARG4);
    551     FreeTemp(rs_rX86_FARG5);
    552     FreeTemp(rs_rX86_FARG6);
    553     FreeTemp(rs_rX86_FARG7);
    554   }
    555 }
    556 
    557 bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
    558     switch (opcode) {
    559       case kX86LockCmpxchgMR:
    560       case kX86LockCmpxchgAR:
    561       case kX86LockCmpxchg64M:
    562       case kX86LockCmpxchg64A:
    563       case kX86XchgMR:
    564       case kX86Mfence:
    565         // Atomic memory instructions provide full barrier.
    566         return true;
    567       default:
    568         break;
    569     }
    570 
    571     // Conservative if cannot prove it provides full barrier.
    572     return false;
    573 }
    574 
    575 bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
    576 #if ANDROID_SMP != 0
    577   // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
    578   LIR* mem_barrier = last_lir_insn_;
    579 
    580   bool ret = false;
    581   /*
    582    * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
    583    * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
    584    * For those cases, all we need to ensure is that there is a scheduling barrier in place.
    585    */
    586   if (barrier_kind == kAnyAny) {
    587     // If no LIR exists already that can be used a barrier, then generate an mfence.
    588     if (mem_barrier == nullptr) {
    589       mem_barrier = NewLIR0(kX86Mfence);
    590       ret = true;
    591     }
    592 
    593     // If last instruction does not provide full barrier, then insert an mfence.
    594     if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
    595       mem_barrier = NewLIR0(kX86Mfence);
    596       ret = true;
    597     }
    598   }
    599 
    600   // Now ensure that a scheduling barrier is in place.
    601   if (mem_barrier == nullptr) {
    602     GenBarrier();
    603   } else {
    604     // Mark as a scheduling barrier.
    605     DCHECK(!mem_barrier->flags.use_def_invalid);
    606     mem_barrier->u.m.def_mask = &kEncodeAll;
    607   }
    608   return ret;
    609 #else
    610   return false;
    611 #endif
    612 }
    613 
    614 void X86Mir2Lir::CompilerInitializeRegAlloc() {
    615   if (cu_->target64) {
    616     reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
    617                                           dp_regs_64, reserved_regs_64, reserved_regs_64q,
    618                                           core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
    619   } else {
    620     reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
    621                                           dp_regs_32, reserved_regs_32, empty_pool,
    622                                           core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
    623   }
    624 
    625   // Target-specific adjustments.
    626 
    627   // Add in XMM registers.
    628   const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32;
    629   for (RegStorage reg : *xp_regs) {
    630     RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
    631     reginfo_map_.Put(reg.GetReg(), info);
    632   }
    633   const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
    634   for (RegStorage reg : *xp_temps) {
    635     RegisterInfo* xp_reg_info = GetRegInfo(reg);
    636     xp_reg_info->SetIsTemp(true);
    637   }
    638 
    639   // Alias single precision xmm to double xmms.
    640   // TODO: as needed, add larger vector sizes - alias all to the largest.
    641   GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
    642   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
    643     int sp_reg_num = info->GetReg().GetRegNum();
    644     RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
    645     RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
    646     // 128-bit xmm vector register's master storage should refer to itself.
    647     DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
    648 
    649     // Redirect 32-bit vector's master storage to 128-bit vector.
    650     info->SetMaster(xp_reg_info);
    651 
    652     RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
    653     RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
    654     // Redirect 64-bit vector's master storage to 128-bit vector.
    655     dp_reg_info->SetMaster(xp_reg_info);
    656     // Singles should show a single 32-bit mask bit, at first referring to the low half.
    657     DCHECK_EQ(info->StorageMask(), 0x1U);
    658   }
    659 
    660   if (cu_->target64) {
    661     // Alias 32bit W registers to corresponding 64bit X registers.
    662     GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
    663     for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
    664       int x_reg_num = info->GetReg().GetRegNum();
    665       RegStorage x_reg = RegStorage::Solo64(x_reg_num);
    666       RegisterInfo* x_reg_info = GetRegInfo(x_reg);
    667       // 64bit X register's master storage should refer to itself.
    668       DCHECK_EQ(x_reg_info, x_reg_info->Master());
    669       // Redirect 32bit W master storage to 64bit X.
    670       info->SetMaster(x_reg_info);
    671       // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
    672       DCHECK_EQ(info->StorageMask(), 0x1U);
    673     }
    674   }
    675 
    676   // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
    677   // TODO: adjust for x86/hard float calling convention.
    678   reg_pool_->next_core_reg_ = 2;
    679   reg_pool_->next_sp_reg_ = 2;
    680   reg_pool_->next_dp_reg_ = 1;
    681 }
    682 
    683 int X86Mir2Lir::VectorRegisterSize() {
    684   return 128;
    685 }
    686 
    687 int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) {
    688   return fp_used ? 5 : 7;
    689 }
    690 
    691 void X86Mir2Lir::SpillCoreRegs() {
    692   if (num_core_spills_ == 0) {
    693     return;
    694   }
    695   // Spill mask not including fake return address register
    696   uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
    697   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
    698   OpSize size = cu_->target64 ? k64 : k32;
    699   for (int reg = 0; mask; mask >>= 1, reg++) {
    700     if (mask & 0x1) {
    701       StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) :  RegStorage::Solo32(reg),
    702                    size, kNotVolatile);
    703       offset += GetInstructionSetPointerSize(cu_->instruction_set);
    704     }
    705   }
    706 }
    707 
    708 void X86Mir2Lir::UnSpillCoreRegs() {
    709   if (num_core_spills_ == 0) {
    710     return;
    711   }
    712   // Spill mask not including fake return address register
    713   uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
    714   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
    715   OpSize size = cu_->target64 ? k64 : k32;
    716   for (int reg = 0; mask; mask >>= 1, reg++) {
    717     if (mask & 0x1) {
    718       LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) :  RegStorage::Solo32(reg),
    719                    size, kNotVolatile);
    720       offset += GetInstructionSetPointerSize(cu_->instruction_set);
    721     }
    722   }
    723 }
    724 
    725 void X86Mir2Lir::SpillFPRegs() {
    726   if (num_fp_spills_ == 0) {
    727     return;
    728   }
    729   uint32_t mask = fp_spill_mask_;
    730   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
    731   for (int reg = 0; mask; mask >>= 1, reg++) {
    732     if (mask & 0x1) {
    733       StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
    734                    k64, kNotVolatile);
    735       offset += sizeof(double);
    736     }
    737   }
    738 }
    739 void X86Mir2Lir::UnSpillFPRegs() {
    740   if (num_fp_spills_ == 0) {
    741     return;
    742   }
    743   uint32_t mask = fp_spill_mask_;
    744   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
    745   for (int reg = 0; mask; mask >>= 1, reg++) {
    746     if (mask & 0x1) {
    747       LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
    748                    k64, kNotVolatile);
    749       offset += sizeof(double);
    750     }
    751   }
    752 }
    753 
    754 
    755 bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
    756   return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
    757 }
    758 
    759 RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
    760   // X86_64 can handle any size.
    761   if (cu_->target64) {
    762     if (size == kReference) {
    763       return kRefReg;
    764     }
    765     return kCoreReg;
    766   }
    767 
    768   if (UNLIKELY(is_volatile)) {
    769     // On x86, atomic 64-bit load/store requires an fp register.
    770     // Smaller aligned load/store is atomic for both core and fp registers.
    771     if (size == k64 || size == kDouble) {
    772       return kFPReg;
    773     }
    774   }
    775   return RegClassBySize(size);
    776 }
    777 
    778 X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
    779     : Mir2Lir(cu, mir_graph, arena),
    780       base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
    781       method_address_insns_(arena, 100, kGrowableArrayMisc),
    782       class_type_address_insns_(arena, 100, kGrowableArrayMisc),
    783       call_method_insns_(arena, 100, kGrowableArrayMisc),
    784       stack_decrement_(nullptr), stack_increment_(nullptr),
    785       const_vectors_(nullptr) {
    786   store_method_addr_used_ = false;
    787   if (kIsDebugBuild) {
    788     for (int i = 0; i < kX86Last; i++) {
    789       if (X86Mir2Lir::EncodingMap[i].opcode != i) {
    790         LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
    791                    << " is wrong: expecting " << i << ", seeing "
    792                    << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
    793       }
    794     }
    795   }
    796   if (cu_->target64) {
    797     rs_rX86_SP = rs_rX86_SP_64;
    798 
    799     rs_rX86_ARG0 = rs_rDI;
    800     rs_rX86_ARG1 = rs_rSI;
    801     rs_rX86_ARG2 = rs_rDX;
    802     rs_rX86_ARG3 = rs_rCX;
    803     rs_rX86_ARG4 = rs_r8;
    804     rs_rX86_ARG5 = rs_r9;
    805     rs_rX86_FARG0 = rs_fr0;
    806     rs_rX86_FARG1 = rs_fr1;
    807     rs_rX86_FARG2 = rs_fr2;
    808     rs_rX86_FARG3 = rs_fr3;
    809     rs_rX86_FARG4 = rs_fr4;
    810     rs_rX86_FARG5 = rs_fr5;
    811     rs_rX86_FARG6 = rs_fr6;
    812     rs_rX86_FARG7 = rs_fr7;
    813     rX86_ARG0 = rDI;
    814     rX86_ARG1 = rSI;
    815     rX86_ARG2 = rDX;
    816     rX86_ARG3 = rCX;
    817     rX86_ARG4 = r8;
    818     rX86_ARG5 = r9;
    819     rX86_FARG0 = fr0;
    820     rX86_FARG1 = fr1;
    821     rX86_FARG2 = fr2;
    822     rX86_FARG3 = fr3;
    823     rX86_FARG4 = fr4;
    824     rX86_FARG5 = fr5;
    825     rX86_FARG6 = fr6;
    826     rX86_FARG7 = fr7;
    827     rs_rX86_INVOKE_TGT = rs_rDI;
    828   } else {
    829     rs_rX86_SP = rs_rX86_SP_32;
    830 
    831     rs_rX86_ARG0 = rs_rAX;
    832     rs_rX86_ARG1 = rs_rCX;
    833     rs_rX86_ARG2 = rs_rDX;
    834     rs_rX86_ARG3 = rs_rBX;
    835     rs_rX86_ARG4 = RegStorage::InvalidReg();
    836     rs_rX86_ARG5 = RegStorage::InvalidReg();
    837     rs_rX86_FARG0 = rs_rAX;
    838     rs_rX86_FARG1 = rs_rCX;
    839     rs_rX86_FARG2 = rs_rDX;
    840     rs_rX86_FARG3 = rs_rBX;
    841     rs_rX86_FARG4 = RegStorage::InvalidReg();
    842     rs_rX86_FARG5 = RegStorage::InvalidReg();
    843     rs_rX86_FARG6 = RegStorage::InvalidReg();
    844     rs_rX86_FARG7 = RegStorage::InvalidReg();
    845     rX86_ARG0 = rAX;
    846     rX86_ARG1 = rCX;
    847     rX86_ARG2 = rDX;
    848     rX86_ARG3 = rBX;
    849     rX86_FARG0 = rAX;
    850     rX86_FARG1 = rCX;
    851     rX86_FARG2 = rDX;
    852     rX86_FARG3 = rBX;
    853     rs_rX86_INVOKE_TGT = rs_rAX;
    854     // TODO(64): Initialize with invalid reg
    855 //    rX86_ARG4 = RegStorage::InvalidReg();
    856 //    rX86_ARG5 = RegStorage::InvalidReg();
    857   }
    858   rs_rX86_RET0 = rs_rAX;
    859   rs_rX86_RET1 = rs_rDX;
    860   rs_rX86_COUNT = rs_rCX;
    861   rX86_RET0 = rAX;
    862   rX86_RET1 = rDX;
    863   rX86_INVOKE_TGT = rAX;
    864   rX86_COUNT = rCX;
    865 
    866   // Initialize the number of reserved vector registers
    867   num_reserved_vector_regs_ = -1;
    868 }
    869 
    870 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
    871                           ArenaAllocator* const arena) {
    872   return new X86Mir2Lir(cu, mir_graph, arena);
    873 }
    874 
    875 // Not used in x86(-64)
    876 RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
    877   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
    878   return RegStorage::InvalidReg();
    879 }
    880 
    881 LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
    882   // First load the pointer in fs:[suspend-trigger] into eax
    883   // Then use a test instruction to indirect via that address.
    884   if (cu_->target64) {
    885     NewLIR2(kX86Mov64RT, rs_rAX.GetReg(),
    886         Thread::ThreadSuspendTriggerOffset<8>().Int32Value());
    887   } else {
    888     NewLIR2(kX86Mov32RT, rs_rAX.GetReg(),
    889         Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
    890   }
    891   return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
    892 }
    893 
    894 uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
    895   DCHECK(!IsPseudoLirOp(opcode));
    896   return X86Mir2Lir::EncodingMap[opcode].flags;
    897 }
    898 
    899 const char* X86Mir2Lir::GetTargetInstName(int opcode) {
    900   DCHECK(!IsPseudoLirOp(opcode));
    901   return X86Mir2Lir::EncodingMap[opcode].name;
    902 }
    903 
    904 const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
    905   DCHECK(!IsPseudoLirOp(opcode));
    906   return X86Mir2Lir::EncodingMap[opcode].fmt;
    907 }
    908 
    909 void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
    910   // Can we do this directly to memory?
    911   rl_dest = UpdateLocWide(rl_dest);
    912   if ((rl_dest.location == kLocDalvikFrame) ||
    913       (rl_dest.location == kLocCompilerTemp)) {
    914     int32_t val_lo = Low32Bits(value);
    915     int32_t val_hi = High32Bits(value);
    916     int r_base = rs_rX86_SP.GetReg();
    917     int displacement = SRegOffset(rl_dest.s_reg_low);
    918 
    919     ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
    920     LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
    921     AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
    922                               false /* is_load */, true /* is64bit */);
    923     store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
    924     AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
    925                               false /* is_load */, true /* is64bit */);
    926     return;
    927   }
    928 
    929   // Just use the standard code to do the generation.
    930   Mir2Lir::GenConstWide(rl_dest, value);
    931 }
    932 
    933 // TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
    934 void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
    935   LOG(INFO)  << "location: " << loc.location << ','
    936              << (loc.wide ? " w" : "  ")
    937              << (loc.defined ? " D" : "  ")
    938              << (loc.is_const ? " c" : "  ")
    939              << (loc.fp ? " F" : "  ")
    940              << (loc.core ? " C" : "  ")
    941              << (loc.ref ? " r" : "  ")
    942              << (loc.high_word ? " h" : "  ")
    943              << (loc.home ? " H" : "  ")
    944              << ", low: " << static_cast<int>(loc.reg.GetLowReg())
    945              << ", high: " << static_cast<int>(loc.reg.GetHighReg())
    946              << ", s_reg: " << loc.s_reg_low
    947              << ", orig: " << loc.orig_sreg;
    948 }
    949 
    950 void X86Mir2Lir::Materialize() {
    951   // A good place to put the analysis before starting.
    952   AnalyzeMIR();
    953 
    954   // Now continue with regular code generation.
    955   Mir2Lir::Materialize();
    956 }
    957 
    958 void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
    959                                    SpecialTargetRegister symbolic_reg) {
    960   /*
    961    * For x86, just generate a 32 bit move immediate instruction, that will be filled
    962    * in at 'link time'.  For now, put a unique value based on target to ensure that
    963    * code deduplication works.
    964    */
    965   int target_method_idx = target_method.dex_method_index;
    966   const DexFile* target_dex_file = target_method.dex_file;
    967   const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
    968   uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
    969 
    970   // Generate the move instruction with the unique pointer and save index, dex_file, and type.
    971   LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
    972                      TargetReg(symbolic_reg, kNotWide).GetReg(),
    973                      static_cast<int>(target_method_id_ptr), target_method_idx,
    974                      WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
    975   AppendLIR(move);
    976   method_address_insns_.Insert(move);
    977 }
    978 
    979 void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
    980   /*
    981    * For x86, just generate a 32 bit move immediate instruction, that will be filled
    982    * in at 'link time'.  For now, put a unique value based on target to ensure that
    983    * code deduplication works.
    984    */
    985   const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
    986   uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
    987 
    988   // Generate the move instruction with the unique pointer and save index and type.
    989   LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
    990                      TargetReg(symbolic_reg, kNotWide).GetReg(),
    991                      static_cast<int>(ptr), type_idx);
    992   AppendLIR(move);
    993   class_type_address_insns_.Insert(move);
    994 }
    995 
    996 LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
    997   /*
    998    * For x86, just generate a 32 bit call relative instruction, that will be filled
    999    * in at 'link time'.  For now, put a unique value based on target to ensure that
   1000    * code deduplication works.
   1001    */
   1002   int target_method_idx = target_method.dex_method_index;
   1003   const DexFile* target_dex_file = target_method.dex_file;
   1004   const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
   1005   uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
   1006 
   1007   // Generate the call instruction with the unique pointer and save index, dex_file, and type.
   1008   LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
   1009                      target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
   1010   AppendLIR(call);
   1011   call_method_insns_.Insert(call);
   1012   return call;
   1013 }
   1014 
   1015 /*
   1016  * @brief Enter a 32 bit quantity into a buffer
   1017  * @param buf buffer.
   1018  * @param data Data value.
   1019  */
   1020 
   1021 static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
   1022   buf.push_back(data & 0xff);
   1023   buf.push_back((data >> 8) & 0xff);
   1024   buf.push_back((data >> 16) & 0xff);
   1025   buf.push_back((data >> 24) & 0xff);
   1026 }
   1027 
   1028 void X86Mir2Lir::InstallLiteralPools() {
   1029   // These are handled differently for x86.
   1030   DCHECK(code_literal_list_ == nullptr);
   1031   DCHECK(method_literal_list_ == nullptr);
   1032   DCHECK(class_literal_list_ == nullptr);
   1033 
   1034   // Align to 16 byte boundary.  We have implicit knowledge that the start of the method is
   1035   // on a 4 byte boundary.   How can I check this if it changes (other than aligned loads
   1036   // will fail at runtime)?
   1037   if (const_vectors_ != nullptr) {
   1038     int align_size = (16-4) - (code_buffer_.size() & 0xF);
   1039     if (align_size < 0) {
   1040       align_size += 16;
   1041     }
   1042 
   1043     while (align_size > 0) {
   1044       code_buffer_.push_back(0);
   1045       align_size--;
   1046     }
   1047     for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
   1048       PushWord(code_buffer_, p->operands[0]);
   1049       PushWord(code_buffer_, p->operands[1]);
   1050       PushWord(code_buffer_, p->operands[2]);
   1051       PushWord(code_buffer_, p->operands[3]);
   1052     }
   1053   }
   1054 
   1055   // Handle the fixups for methods.
   1056   for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
   1057       LIR* p = method_address_insns_.Get(i);
   1058       DCHECK_EQ(p->opcode, kX86Mov32RI);
   1059       uint32_t target_method_idx = p->operands[2];
   1060       const DexFile* target_dex_file =
   1061           reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
   1062 
   1063       // The offset to patch is the last 4 bytes of the instruction.
   1064       int patch_offset = p->offset + p->flags.size - 4;
   1065       cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
   1066                                            cu_->method_idx, cu_->invoke_type,
   1067                                            target_method_idx, target_dex_file,
   1068                                            static_cast<InvokeType>(p->operands[4]),
   1069                                            patch_offset);
   1070   }
   1071 
   1072   // Handle the fixups for class types.
   1073   for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
   1074       LIR* p = class_type_address_insns_.Get(i);
   1075       DCHECK_EQ(p->opcode, kX86Mov32RI);
   1076       uint32_t target_method_idx = p->operands[2];
   1077 
   1078       // The offset to patch is the last 4 bytes of the instruction.
   1079       int patch_offset = p->offset + p->flags.size - 4;
   1080       cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
   1081                                           cu_->method_idx, target_method_idx, patch_offset);
   1082   }
   1083 
   1084   // And now the PC-relative calls to methods.
   1085   for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
   1086       LIR* p = call_method_insns_.Get(i);
   1087       DCHECK_EQ(p->opcode, kX86CallI);
   1088       uint32_t target_method_idx = p->operands[1];
   1089       const DexFile* target_dex_file =
   1090           reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
   1091 
   1092       // The offset to patch is the last 4 bytes of the instruction.
   1093       int patch_offset = p->offset + p->flags.size - 4;
   1094       cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
   1095                                                  cu_->method_idx, cu_->invoke_type,
   1096                                                  target_method_idx, target_dex_file,
   1097                                                  static_cast<InvokeType>(p->operands[3]),
   1098                                                  patch_offset, -4 /* offset */);
   1099   }
   1100 
   1101   // And do the normal processing.
   1102   Mir2Lir::InstallLiteralPools();
   1103 }
   1104 
   1105 bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
   1106   RegLocation rl_src = info->args[0];
   1107   RegLocation rl_srcPos = info->args[1];
   1108   RegLocation rl_dst = info->args[2];
   1109   RegLocation rl_dstPos = info->args[3];
   1110   RegLocation rl_length = info->args[4];
   1111   if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
   1112     return false;
   1113   }
   1114   if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
   1115     return false;
   1116   }
   1117   ClobberCallerSave();
   1118   LockCallTemps();  // Using fixed registers.
   1119   RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX;
   1120   LoadValueDirectFixed(rl_src, rs_rAX);
   1121   LoadValueDirectFixed(rl_dst, rs_rCX);
   1122   LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr);
   1123   LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr);
   1124   LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
   1125   LoadValueDirectFixed(rl_length, rs_rDX);
   1126   // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path.
   1127   LIR* len_too_big  = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr);
   1128   LoadValueDirectFixed(rl_src, rs_rAX);
   1129   LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
   1130   LIR* src_bad_len  = nullptr;
   1131   LIR* src_bad_off = nullptr;
   1132   LIR* srcPos_negative  = nullptr;
   1133   if (!rl_srcPos.is_const) {
   1134     LoadValueDirectFixed(rl_srcPos, tmp_reg);
   1135     srcPos_negative  = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
   1136     // src_pos < src_len
   1137     src_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
   1138     // src_len - src_pos < copy_len
   1139     OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
   1140     src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1141   } else {
   1142     int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
   1143     if (pos_val == 0) {
   1144       src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
   1145     } else {
   1146       // src_pos < src_len
   1147       src_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
   1148       // src_len - src_pos < copy_len
   1149       OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
   1150       src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1151     }
   1152   }
   1153   LIR* dstPos_negative = nullptr;
   1154   LIR* dst_bad_len = nullptr;
   1155   LIR* dst_bad_off = nullptr;
   1156   LoadValueDirectFixed(rl_dst, rs_rAX);
   1157   LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
   1158   if (!rl_dstPos.is_const) {
   1159     LoadValueDirectFixed(rl_dstPos, tmp_reg);
   1160     dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
   1161     // dst_pos < dst_len
   1162     dst_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
   1163     // dst_len - dst_pos < copy_len
   1164     OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
   1165     dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1166   } else {
   1167     int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
   1168     if (pos_val == 0) {
   1169       dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
   1170     } else {
   1171       // dst_pos < dst_len
   1172       dst_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
   1173       // dst_len - dst_pos < copy_len
   1174       OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
   1175       dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1176     }
   1177   }
   1178   // Everything is checked now.
   1179   LoadValueDirectFixed(rl_src, rs_rAX);
   1180   LoadValueDirectFixed(rl_dst, tmp_reg);
   1181   LoadValueDirectFixed(rl_srcPos, rs_rCX);
   1182   NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
   1183        rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value());
   1184   // RAX now holds the address of the first src element to be copied.
   1185 
   1186   LoadValueDirectFixed(rl_dstPos, rs_rCX);
   1187   NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(),
   1188        rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() );
   1189   // RBX now holds the address of the first dst element to be copied.
   1190 
   1191   // Check if the number of elements to be copied is odd or even. If odd
   1192   // then copy the first element (so that the remaining number of elements
   1193   // is even).
   1194   LoadValueDirectFixed(rl_length, rs_rCX);
   1195   OpRegImm(kOpAnd, rs_rCX, 1);
   1196   LIR* jmp_to_begin_loop  = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
   1197   OpRegImm(kOpSub, rs_rDX, 1);
   1198   LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
   1199   StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
   1200 
   1201   // Since the remaining number of elements is even, we will copy by
   1202   // two elements at a time.
   1203   LIR* beginLoop = NewLIR0(kPseudoTargetLabel);
   1204   LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr);
   1205   OpRegImm(kOpSub, rs_rDX, 2);
   1206   LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
   1207   StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle);
   1208   OpUnconditionalBranch(beginLoop);
   1209   LIR *check_failed = NewLIR0(kPseudoTargetLabel);
   1210   LIR* launchpad_branch  = OpUnconditionalBranch(nullptr);
   1211   LIR *return_point = NewLIR0(kPseudoTargetLabel);
   1212   jmp_to_ret->target = return_point;
   1213   jmp_to_begin_loop->target = beginLoop;
   1214   src_dst_same->target = check_failed;
   1215   len_too_big->target = check_failed;
   1216   src_null_branch->target = check_failed;
   1217   if (srcPos_negative != nullptr)
   1218     srcPos_negative ->target = check_failed;
   1219   if (src_bad_off != nullptr)
   1220     src_bad_off->target = check_failed;
   1221   if (src_bad_len != nullptr)
   1222     src_bad_len->target = check_failed;
   1223   dst_null_branch->target = check_failed;
   1224   if (dstPos_negative != nullptr)
   1225     dstPos_negative->target = check_failed;
   1226   if (dst_bad_off != nullptr)
   1227     dst_bad_off->target = check_failed;
   1228   if (dst_bad_len != nullptr)
   1229     dst_bad_len->target = check_failed;
   1230   AddIntrinsicSlowPath(info, launchpad_branch, return_point);
   1231   ClobberCallerSave();  // We must clobber everything because slow path will return here
   1232   return true;
   1233 }
   1234 
   1235 
   1236 /*
   1237  * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
   1238  * otherwise bails to standard library code.
   1239  */
   1240 bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   1241   RegLocation rl_obj = info->args[0];
   1242   RegLocation rl_char = info->args[1];
   1243   RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
   1244   // RBX is callee-save register in 64-bit mode.
   1245   RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
   1246   int start_value = -1;
   1247 
   1248   uint32_t char_value =
   1249     rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
   1250 
   1251   if (char_value > 0xFFFF) {
   1252     // We have to punt to the real String.indexOf.
   1253     return false;
   1254   }
   1255 
   1256   // Okay, we are commited to inlining this.
   1257   // EAX: 16 bit character being searched.
   1258   // ECX: count: number of words to be searched.
   1259   // EDI: String being searched.
   1260   // EDX: temporary during execution.
   1261   // EBX or R11: temporary during execution (depending on mode).
   1262   // REP SCASW: search instruction.
   1263 
   1264   FlushReg(rs_rAX);
   1265   Clobber(rs_rAX);
   1266   LockTemp(rs_rAX);
   1267   FlushReg(rs_rCX);
   1268   Clobber(rs_rCX);
   1269   LockTemp(rs_rCX);
   1270   FlushReg(rs_rDX);
   1271   Clobber(rs_rDX);
   1272   LockTemp(rs_rDX);
   1273   FlushReg(rs_tmp);
   1274   Clobber(rs_tmp);
   1275   LockTemp(rs_tmp);
   1276   if (cu_->target64) {
   1277     FlushReg(rs_rDI);
   1278     Clobber(rs_rDI);
   1279     LockTemp(rs_rDI);
   1280   }
   1281 
   1282   RegLocation rl_return = GetReturn(kCoreReg);
   1283   RegLocation rl_dest = InlineTarget(info);
   1284 
   1285   // Is the string non-NULL?
   1286   LoadValueDirectFixed(rl_obj, rs_rDX);
   1287   GenNullCheck(rs_rDX, info->opt_flags);
   1288   info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
   1289 
   1290   LIR *slowpath_branch = nullptr, *length_compare = nullptr;
   1291 
   1292   // We need the value in EAX.
   1293   if (rl_char.is_const) {
   1294     LoadConstantNoClobber(rs_rAX, char_value);
   1295   } else {
   1296     // Does the character fit in 16 bits? Compare it at runtime.
   1297     LoadValueDirectFixed(rl_char, rs_rAX);
   1298     slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
   1299   }
   1300 
   1301   // From here down, we know that we are looking for a char that fits in 16 bits.
   1302   // Location of reference to data array within the String object.
   1303   int value_offset = mirror::String::ValueOffset().Int32Value();
   1304   // Location of count within the String object.
   1305   int count_offset = mirror::String::CountOffset().Int32Value();
   1306   // Starting offset within data array.
   1307   int offset_offset = mirror::String::OffsetOffset().Int32Value();
   1308   // Start of char data with array_.
   1309   int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
   1310 
   1311   // Compute the number of words to search in to rCX.
   1312   Load32Disp(rs_rDX, count_offset, rs_rCX);
   1313 
   1314   // Possible signal here due to null pointer dereference.
   1315   // Note that the signal handler will expect the top word of
   1316   // the stack to be the ArtMethod*.  If the PUSH edi instruction
   1317   // below is ahead of the load above then this will not be true
   1318   // and the signal handler will not work.
   1319   MarkPossibleNullPointerException(0);
   1320 
   1321   if (!cu_->target64) {
   1322     // EDI is callee-save register in 32-bit mode.
   1323     NewLIR1(kX86Push32R, rs_rDI.GetReg());
   1324   }
   1325 
   1326   if (zero_based) {
   1327     // Start index is not present.
   1328     // We have to handle an empty string.  Use special instruction JECXZ.
   1329     length_compare = NewLIR0(kX86Jecxz8);
   1330 
   1331     // Copy the number of words to search in a temporary register.
   1332     // We will use the register at the end to calculate result.
   1333     OpRegReg(kOpMov, rs_tmp, rs_rCX);
   1334   } else {
   1335     // Start index is present.
   1336     rl_start = info->args[2];
   1337 
   1338     // We have to offset by the start index.
   1339     if (rl_start.is_const) {
   1340       start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
   1341       start_value = std::max(start_value, 0);
   1342 
   1343       // Is the start > count?
   1344       length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
   1345       OpRegImm(kOpMov, rs_rDI, start_value);
   1346 
   1347       // Copy the number of words to search in a temporary register.
   1348       // We will use the register at the end to calculate result.
   1349       OpRegReg(kOpMov, rs_tmp, rs_rCX);
   1350 
   1351       if (start_value != 0) {
   1352         // Decrease the number of words to search by the start index.
   1353         OpRegImm(kOpSub, rs_rCX, start_value);
   1354       }
   1355     } else {
   1356       // Handle "start index < 0" case.
   1357       if (!cu_->target64 && rl_start.location != kLocPhysReg) {
   1358         // Load the start index from stack, remembering that we pushed EDI.
   1359         int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
   1360         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   1361         Load32Disp(rs_rX86_SP, displacement, rs_rDI);
   1362         // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
   1363         DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
   1364         int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
   1365         AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
   1366       } else {
   1367         LoadValueDirectFixed(rl_start, rs_rDI);
   1368       }
   1369       OpRegReg(kOpXor, rs_tmp, rs_tmp);
   1370       OpRegReg(kOpCmp, rs_rDI, rs_tmp);
   1371       OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp);
   1372 
   1373       // The length of the string should be greater than the start index.
   1374       length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr);
   1375 
   1376       // Copy the number of words to search in a temporary register.
   1377       // We will use the register at the end to calculate result.
   1378       OpRegReg(kOpMov, rs_tmp, rs_rCX);
   1379 
   1380       // Decrease the number of words to search by the start index.
   1381       OpRegReg(kOpSub, rs_rCX, rs_rDI);
   1382     }
   1383   }
   1384 
   1385   // Load the address of the string into EDI.
   1386   // In case of start index we have to add the address to existing value in EDI.
   1387   // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
   1388   if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
   1389     Load32Disp(rs_rDX, offset_offset, rs_rDI);
   1390   } else {
   1391     OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset);
   1392   }
   1393   OpRegImm(kOpLsl, rs_rDI, 1);
   1394   OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset);
   1395   OpRegImm(kOpAdd, rs_rDI, data_offset);
   1396 
   1397   // EDI now contains the start of the string to be searched.
   1398   // We are all prepared to do the search for the character.
   1399   NewLIR0(kX86RepneScasw);
   1400 
   1401   // Did we find a match?
   1402   LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
   1403 
   1404   // yes, we matched.  Compute the index of the result.
   1405   OpRegReg(kOpSub, rs_tmp, rs_rCX);
   1406   NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1);
   1407 
   1408   LIR *all_done = NewLIR1(kX86Jmp8, 0);
   1409 
   1410   // Failed to match; return -1.
   1411   LIR *not_found = NewLIR0(kPseudoTargetLabel);
   1412   length_compare->target = not_found;
   1413   failed_branch->target = not_found;
   1414   LoadConstantNoClobber(rl_return.reg, -1);
   1415 
   1416   // And join up at the end.
   1417   all_done->target = NewLIR0(kPseudoTargetLabel);
   1418 
   1419   if (!cu_->target64)
   1420     NewLIR1(kX86Pop32R, rs_rDI.GetReg());
   1421 
   1422   // Out of line code returns here.
   1423   if (slowpath_branch != nullptr) {
   1424     LIR *return_point = NewLIR0(kPseudoTargetLabel);
   1425     AddIntrinsicSlowPath(info, slowpath_branch, return_point);
   1426     ClobberCallerSave();  // We must clobber everything because slow path will return here
   1427   }
   1428 
   1429   StoreValue(rl_dest, rl_return);
   1430 
   1431   FreeTemp(rs_rAX);
   1432   FreeTemp(rs_rCX);
   1433   FreeTemp(rs_rDX);
   1434   FreeTemp(rs_tmp);
   1435   if (cu_->target64) {
   1436     FreeTemp(rs_rDI);
   1437   }
   1438 
   1439   return true;
   1440 }
   1441 
   1442 /*
   1443  * @brief Enter an 'advance LOC' into the FDE buffer
   1444  * @param buf FDE buffer.
   1445  * @param increment Amount by which to increase the current location.
   1446  */
   1447 static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
   1448   if (increment < 64) {
   1449     // Encoding in opcode.
   1450     buf.push_back(0x1 << 6 | increment);
   1451   } else if (increment < 256) {
   1452     // Single byte delta.
   1453     buf.push_back(0x02);
   1454     buf.push_back(increment);
   1455   } else if (increment < 256 * 256) {
   1456     // Two byte delta.
   1457     buf.push_back(0x03);
   1458     buf.push_back(increment & 0xff);
   1459     buf.push_back((increment >> 8) & 0xff);
   1460   } else {
   1461     // Four byte delta.
   1462     buf.push_back(0x04);
   1463     PushWord(buf, increment);
   1464   }
   1465 }
   1466 
   1467 
   1468 std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64) {
   1469   return X86Mir2Lir::ReturnCommonCallFrameInformation(is_x86_64);
   1470 }
   1471 
   1472 static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
   1473   uint8_t buffer[12];
   1474   uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
   1475   for (uint8_t *p = buffer; p < ptr; p++) {
   1476     buf.push_back(*p);
   1477   }
   1478 }
   1479 
   1480 static void EncodeSignedLeb128(std::vector<uint8_t>& buf, int32_t value) {
   1481   uint8_t buffer[12];
   1482   uint8_t *ptr = EncodeSignedLeb128(buffer, value);
   1483   for (uint8_t *p = buffer; p < ptr; p++) {
   1484     buf.push_back(*p);
   1485   }
   1486 }
   1487 
   1488 std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation(bool is_x86_64) {
   1489   std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
   1490 
   1491   // Length (will be filled in later in this routine).
   1492   PushWord(*cfi_info, 0);
   1493 
   1494   // CIE id: always 0.
   1495   PushWord(*cfi_info, 0);
   1496 
   1497   // Version: always 1.
   1498   cfi_info->push_back(0x01);
   1499 
   1500   // Augmentation: 'zR\0'
   1501   cfi_info->push_back(0x7a);
   1502   cfi_info->push_back(0x52);
   1503   cfi_info->push_back(0x0);
   1504 
   1505   // Code alignment: 1.
   1506   EncodeUnsignedLeb128(*cfi_info, 1);
   1507 
   1508   // Data alignment.
   1509   if (is_x86_64) {
   1510     EncodeSignedLeb128(*cfi_info, -8);
   1511   } else {
   1512     EncodeSignedLeb128(*cfi_info, -4);
   1513   }
   1514 
   1515   // Return address register.
   1516   if (is_x86_64) {
   1517     // R16(RIP)
   1518     cfi_info->push_back(0x10);
   1519   } else {
   1520     // R8(EIP)
   1521     cfi_info->push_back(0x08);
   1522   }
   1523 
   1524   // Augmentation length: 1.
   1525   cfi_info->push_back(1);
   1526 
   1527   // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
   1528   cfi_info->push_back(0x03);
   1529 
   1530   // Initial instructions.
   1531   if (is_x86_64) {
   1532     // DW_CFA_def_cfa R7(RSP) 8.
   1533     cfi_info->push_back(0x0c);
   1534     cfi_info->push_back(0x07);
   1535     cfi_info->push_back(0x08);
   1536 
   1537     // DW_CFA_offset R16(RIP) 1 (* -8).
   1538     cfi_info->push_back(0x90);
   1539     cfi_info->push_back(0x01);
   1540   } else {
   1541     // DW_CFA_def_cfa R4(ESP) 4.
   1542     cfi_info->push_back(0x0c);
   1543     cfi_info->push_back(0x04);
   1544     cfi_info->push_back(0x04);
   1545 
   1546     // DW_CFA_offset R8(EIP) 1 (* -4).
   1547     cfi_info->push_back(0x88);
   1548     cfi_info->push_back(0x01);
   1549   }
   1550 
   1551   // Padding to a multiple of 4
   1552   while ((cfi_info->size() & 3) != 0) {
   1553     // DW_CFA_nop is encoded as 0.
   1554     cfi_info->push_back(0);
   1555   }
   1556 
   1557   // Set the length of the CIE inside the generated bytes.
   1558   uint32_t length = cfi_info->size() - 4;
   1559   (*cfi_info)[0] = length;
   1560   (*cfi_info)[1] = length >> 8;
   1561   (*cfi_info)[2] = length >> 16;
   1562   (*cfi_info)[3] = length >> 24;
   1563   return cfi_info;
   1564 }
   1565 
   1566 static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) {
   1567   if (is_x86_64) {
   1568     switch (art_reg_id) {
   1569     case 3 : *dwarf_reg_id =  3; return true;  // %rbx
   1570     // This is the only discrepancy between ART & DWARF register numbering.
   1571     case 5 : *dwarf_reg_id =  6; return true;  // %rbp
   1572     case 12: *dwarf_reg_id = 12; return true;  // %r12
   1573     case 13: *dwarf_reg_id = 13; return true;  // %r13
   1574     case 14: *dwarf_reg_id = 14; return true;  // %r14
   1575     case 15: *dwarf_reg_id = 15; return true;  // %r15
   1576     default: return false;  // Should not get here
   1577     }
   1578   } else {
   1579     switch (art_reg_id) {
   1580     case 5: *dwarf_reg_id = 5; return true;  // %ebp
   1581     case 6: *dwarf_reg_id = 6; return true;  // %esi
   1582     case 7: *dwarf_reg_id = 7; return true;  // %edi
   1583     default: return false;  // Should not get here
   1584     }
   1585   }
   1586 }
   1587 
   1588 std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
   1589   std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
   1590 
   1591   // Generate the FDE for the method.
   1592   DCHECK_NE(data_offset_, 0U);
   1593 
   1594   // Length (will be filled in later in this routine).
   1595   PushWord(*cfi_info, 0);
   1596 
   1597   // 'CIE_pointer' (filled in by linker).
   1598   PushWord(*cfi_info, 0);
   1599 
   1600   // 'initial_location' (filled in by linker).
   1601   PushWord(*cfi_info, 0);
   1602 
   1603   // 'address_range' (number of bytes in the method).
   1604   PushWord(*cfi_info, data_offset_);
   1605 
   1606   // Augmentation length: 0
   1607   cfi_info->push_back(0);
   1608 
   1609   // The instructions in the FDE.
   1610   if (stack_decrement_ != nullptr) {
   1611     // Advance LOC to just past the stack decrement.
   1612     uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
   1613     AdvanceLoc(*cfi_info, pc);
   1614 
   1615     // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
   1616     cfi_info->push_back(0x0e);
   1617     EncodeUnsignedLeb128(*cfi_info, frame_size_);
   1618 
   1619     // Handle register spills
   1620     const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4;
   1621     const int kDataAlignmentFactor = (cu_->target64) ? -8 : -4;
   1622     uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
   1623     int offset = -(GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
   1624     for (int reg = 0; mask; mask >>= 1, reg++) {
   1625       if (mask & 0x1) {
   1626         pc += kSpillInstLen;
   1627 
   1628         // Advance LOC to pass this instruction
   1629         AdvanceLoc(*cfi_info, kSpillInstLen);
   1630 
   1631         int dwarf_reg_id;
   1632         if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) {
   1633           // DW_CFA_offset_extended_sf reg_no offset
   1634           cfi_info->push_back(0x11);
   1635           EncodeUnsignedLeb128(*cfi_info, dwarf_reg_id);
   1636           EncodeSignedLeb128(*cfi_info, offset / kDataAlignmentFactor);
   1637         }
   1638 
   1639         offset += GetInstructionSetPointerSize(cu_->instruction_set);
   1640       }
   1641     }
   1642 
   1643     // We continue with that stack until the epilogue.
   1644     if (stack_increment_ != nullptr) {
   1645       uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
   1646       AdvanceLoc(*cfi_info, new_pc - pc);
   1647 
   1648       // We probably have code snippets after the epilogue, so save the
   1649       // current state: DW_CFA_remember_state.
   1650       cfi_info->push_back(0x0a);
   1651 
   1652       // We have now popped the stack: DW_CFA_def_cfa_offset 4/8.
   1653       // There is only the return PC on the stack now.
   1654       cfi_info->push_back(0x0e);
   1655       EncodeUnsignedLeb128(*cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
   1656 
   1657       // Everything after that is the same as before the epilogue.
   1658       // Stack bump was followed by RET instruction.
   1659       LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
   1660       if (post_ret_insn != nullptr) {
   1661         pc = new_pc;
   1662         new_pc = post_ret_insn->offset;
   1663         AdvanceLoc(*cfi_info, new_pc - pc);
   1664         // Restore the state: DW_CFA_restore_state.
   1665         cfi_info->push_back(0x0b);
   1666       }
   1667     }
   1668   }
   1669 
   1670   // Padding to a multiple of 4
   1671   while ((cfi_info->size() & 3) != 0) {
   1672     // DW_CFA_nop is encoded as 0.
   1673     cfi_info->push_back(0);
   1674   }
   1675 
   1676   // Set the length of the FDE inside the generated bytes.
   1677   uint32_t length = cfi_info->size() - 4;
   1678   (*cfi_info)[0] = length;
   1679   (*cfi_info)[1] = length >> 8;
   1680   (*cfi_info)[2] = length >> 16;
   1681   (*cfi_info)[3] = length >> 24;
   1682   return cfi_info;
   1683 }
   1684 
   1685 void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
   1686   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
   1687     case kMirOpReserveVectorRegisters:
   1688       ReserveVectorRegisters(mir);
   1689       break;
   1690     case kMirOpReturnVectorRegisters:
   1691       ReturnVectorRegisters();
   1692       break;
   1693     case kMirOpConstVector:
   1694       GenConst128(bb, mir);
   1695       break;
   1696     case kMirOpMoveVector:
   1697       GenMoveVector(bb, mir);
   1698       break;
   1699     case kMirOpPackedMultiply:
   1700       GenMultiplyVector(bb, mir);
   1701       break;
   1702     case kMirOpPackedAddition:
   1703       GenAddVector(bb, mir);
   1704       break;
   1705     case kMirOpPackedSubtract:
   1706       GenSubtractVector(bb, mir);
   1707       break;
   1708     case kMirOpPackedShiftLeft:
   1709       GenShiftLeftVector(bb, mir);
   1710       break;
   1711     case kMirOpPackedSignedShiftRight:
   1712       GenSignedShiftRightVector(bb, mir);
   1713       break;
   1714     case kMirOpPackedUnsignedShiftRight:
   1715       GenUnsignedShiftRightVector(bb, mir);
   1716       break;
   1717     case kMirOpPackedAnd:
   1718       GenAndVector(bb, mir);
   1719       break;
   1720     case kMirOpPackedOr:
   1721       GenOrVector(bb, mir);
   1722       break;
   1723     case kMirOpPackedXor:
   1724       GenXorVector(bb, mir);
   1725       break;
   1726     case kMirOpPackedAddReduce:
   1727       GenAddReduceVector(bb, mir);
   1728       break;
   1729     case kMirOpPackedReduce:
   1730       GenReduceVector(bb, mir);
   1731       break;
   1732     case kMirOpPackedSet:
   1733       GenSetVector(bb, mir);
   1734       break;
   1735     default:
   1736       break;
   1737   }
   1738 }
   1739 
   1740 void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
   1741   // We should not try to reserve twice without returning the registers
   1742   DCHECK_NE(num_reserved_vector_regs_, -1);
   1743 
   1744   int num_vector_reg = mir->dalvikInsn.vA;
   1745   for (int i = 0; i < num_vector_reg; i++) {
   1746     RegStorage xp_reg = RegStorage::Solo128(i);
   1747     RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
   1748     Clobber(xp_reg);
   1749 
   1750     for (RegisterInfo *info = xp_reg_info->GetAliasChain();
   1751                        info != nullptr;
   1752                        info = info->GetAliasChain()) {
   1753       if (info->GetReg().IsSingle()) {
   1754         reg_pool_->sp_regs_.Delete(info);
   1755       } else {
   1756         reg_pool_->dp_regs_.Delete(info);
   1757       }
   1758     }
   1759   }
   1760 
   1761   num_reserved_vector_regs_ = num_vector_reg;
   1762 }
   1763 
   1764 void X86Mir2Lir::ReturnVectorRegisters() {
   1765   // Return all the reserved registers
   1766   for (int i = 0; i < num_reserved_vector_regs_; i++) {
   1767     RegStorage xp_reg = RegStorage::Solo128(i);
   1768     RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
   1769 
   1770     for (RegisterInfo *info = xp_reg_info->GetAliasChain();
   1771                        info != nullptr;
   1772                        info = info->GetAliasChain()) {
   1773       if (info->GetReg().IsSingle()) {
   1774         reg_pool_->sp_regs_.Insert(info);
   1775       } else {
   1776         reg_pool_->dp_regs_.Insert(info);
   1777       }
   1778     }
   1779   }
   1780 
   1781   // We don't have anymore reserved vector registers
   1782   num_reserved_vector_regs_ = -1;
   1783 }
   1784 
   1785 void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
   1786   store_method_addr_used_ = true;
   1787   int type_size = mir->dalvikInsn.vB;
   1788   // We support 128 bit vectors.
   1789   DCHECK_EQ(type_size & 0xFFFF, 128);
   1790   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   1791   uint32_t *args = mir->dalvikInsn.arg;
   1792   int reg = rs_dest.GetReg();
   1793   // Check for all 0 case.
   1794   if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
   1795     NewLIR2(kX86XorpsRR, reg, reg);
   1796     return;
   1797   }
   1798 
   1799   // Append the mov const vector to reg opcode.
   1800   AppendOpcodeWithConst(kX86MovupsRM, reg, mir);
   1801 }
   1802 
   1803 void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
   1804   // Okay, load it from the constant vector area.
   1805   LIR *data_target = ScanVectorLiteral(mir);
   1806   if (data_target == nullptr) {
   1807     data_target = AddVectorLiteral(mir);
   1808   }
   1809 
   1810   // Address the start of the method.
   1811   RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
   1812   if (rl_method.wide) {
   1813     rl_method = LoadValueWide(rl_method, kCoreReg);
   1814   } else {
   1815     rl_method = LoadValue(rl_method, kCoreReg);
   1816   }
   1817 
   1818   // Load the proper value from the literal area.
   1819   // We don't know the proper offset for the value, so pick one that will force
   1820   // 4 byte offset.  We will fix this up in the assembler later to have the right
   1821   // value.
   1822   ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   1823   LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg());
   1824   load->flags.fixup = kFixupLoad;
   1825   load->target = data_target;
   1826 }
   1827 
   1828 void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
   1829   // We only support 128 bit registers.
   1830   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1831   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   1832   RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
   1833   NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
   1834 }
   1835 
   1836 void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) {
   1837   const int BYTE_SIZE = 8;
   1838   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1839   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1840   RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide());
   1841 
   1842   /*
   1843    * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM
   1844    * and multiplying 8 at a time before recombining back into one XMM register.
   1845    *
   1846    *   let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes)
   1847    *       xmm3 is tmp             (operate on high bits of 16bit lanes)
   1848    *
   1849    *    xmm3 = xmm1
   1850    *    xmm1 = xmm1 .* xmm2
   1851    *    xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff  // xmm1 now has low bits
   1852    *    xmm3 = xmm3 .>> 8
   1853    *    xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00
   1854    *    xmm2 = xmm2 .* xmm3                               // xmm2 now has high bits
   1855    *    xmm1 = xmm1 | xmm2                                // combine results
   1856    */
   1857 
   1858   // Copy xmm1.
   1859   NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg());
   1860 
   1861   // Multiply low bits.
   1862   NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1863 
   1864   // xmm1 now has low bits.
   1865   AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
   1866 
   1867   // Prepare high bits for multiplication.
   1868   NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE);
   1869   AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
   1870 
   1871   // Multiply high bits and xmm2 now has high bits.
   1872   NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg());
   1873 
   1874   // Combine back into dest XMM register.
   1875   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1876 }
   1877 
   1878 void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
   1879   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1880   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   1881   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1882   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1883   int opcode = 0;
   1884   switch (opsize) {
   1885     case k32:
   1886       opcode = kX86PmulldRR;
   1887       break;
   1888     case kSignedHalf:
   1889       opcode = kX86PmullwRR;
   1890       break;
   1891     case kSingle:
   1892       opcode = kX86MulpsRR;
   1893       break;
   1894     case kDouble:
   1895       opcode = kX86MulpdRR;
   1896       break;
   1897     case kSignedByte:
   1898       // HW doesn't support 16x16 byte multiplication so emulate it.
   1899       GenMultiplyVectorSignedByte(bb, mir);
   1900       return;
   1901     default:
   1902       LOG(FATAL) << "Unsupported vector multiply " << opsize;
   1903       break;
   1904   }
   1905   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1906 }
   1907 
   1908 void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
   1909   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1910   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   1911   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1912   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1913   int opcode = 0;
   1914   switch (opsize) {
   1915     case k32:
   1916       opcode = kX86PadddRR;
   1917       break;
   1918     case kSignedHalf:
   1919     case kUnsignedHalf:
   1920       opcode = kX86PaddwRR;
   1921       break;
   1922     case kUnsignedByte:
   1923     case kSignedByte:
   1924       opcode = kX86PaddbRR;
   1925       break;
   1926     case kSingle:
   1927       opcode = kX86AddpsRR;
   1928       break;
   1929     case kDouble:
   1930       opcode = kX86AddpdRR;
   1931       break;
   1932     default:
   1933       LOG(FATAL) << "Unsupported vector addition " << opsize;
   1934       break;
   1935   }
   1936   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1937 }
   1938 
   1939 void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
   1940   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1941   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   1942   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1943   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1944   int opcode = 0;
   1945   switch (opsize) {
   1946     case k32:
   1947       opcode = kX86PsubdRR;
   1948       break;
   1949     case kSignedHalf:
   1950     case kUnsignedHalf:
   1951       opcode = kX86PsubwRR;
   1952       break;
   1953     case kUnsignedByte:
   1954     case kSignedByte:
   1955       opcode = kX86PsubbRR;
   1956       break;
   1957     case kSingle:
   1958       opcode = kX86SubpsRR;
   1959       break;
   1960     case kDouble:
   1961       opcode = kX86SubpdRR;
   1962       break;
   1963     default:
   1964       LOG(FATAL) << "Unsupported vector subtraction " << opsize;
   1965       break;
   1966   }
   1967   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1968 }
   1969 
   1970 void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
   1971   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1972   RegStorage rs_tmp = Get128BitRegister(AllocTempWide());
   1973 
   1974   int opcode = 0;
   1975   int imm = mir->dalvikInsn.vB;
   1976 
   1977   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
   1978     case kMirOpPackedShiftLeft:
   1979       opcode = kX86PsllwRI;
   1980       break;
   1981     case kMirOpPackedSignedShiftRight:
   1982       opcode = kX86PsrawRI;
   1983       break;
   1984     case kMirOpPackedUnsignedShiftRight:
   1985       opcode = kX86PsrlwRI;
   1986       break;
   1987     default:
   1988       LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode;
   1989       break;
   1990   }
   1991 
   1992   /*
   1993    * xmm1 will have low bits
   1994    * xmm2 will have high bits
   1995    *
   1996    * xmm2 = xmm1
   1997    * xmm1 = xmm1 .<< N
   1998    * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00
   1999    * xmm2 = xmm2 .<< N
   2000    * xmm1 = xmm1 | xmm2
   2001    */
   2002 
   2003   // Copy xmm1.
   2004   NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg());
   2005 
   2006   // Shift lower values.
   2007   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2008 
   2009   // Mask bottom bits.
   2010   AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
   2011 
   2012   // Shift higher values.
   2013   NewLIR2(opcode, rs_tmp.GetReg(), imm);
   2014 
   2015   // Combine back into dest XMM register.
   2016   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg());
   2017 }
   2018 
   2019 void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
   2020   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2021   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2022   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2023   int imm = mir->dalvikInsn.vB;
   2024   int opcode = 0;
   2025   switch (opsize) {
   2026     case k32:
   2027       opcode = kX86PslldRI;
   2028       break;
   2029     case k64:
   2030       opcode = kX86PsllqRI;
   2031       break;
   2032     case kSignedHalf:
   2033     case kUnsignedHalf:
   2034       opcode = kX86PsllwRI;
   2035       break;
   2036     case kSignedByte:
   2037     case kUnsignedByte:
   2038       GenShiftByteVector(bb, mir);
   2039       return;
   2040     default:
   2041       LOG(FATAL) << "Unsupported vector shift left " << opsize;
   2042       break;
   2043   }
   2044   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2045 }
   2046 
   2047 void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
   2048   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2049   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2050   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2051   int imm = mir->dalvikInsn.vB;
   2052   int opcode = 0;
   2053   switch (opsize) {
   2054     case k32:
   2055       opcode = kX86PsradRI;
   2056       break;
   2057     case kSignedHalf:
   2058     case kUnsignedHalf:
   2059       opcode = kX86PsrawRI;
   2060       break;
   2061     case kSignedByte:
   2062     case kUnsignedByte:
   2063       GenShiftByteVector(bb, mir);
   2064       return;
   2065     default:
   2066       LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
   2067       break;
   2068   }
   2069   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2070 }
   2071 
   2072 void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
   2073   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2074   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2075   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2076   int imm = mir->dalvikInsn.vB;
   2077   int opcode = 0;
   2078   switch (opsize) {
   2079     case k32:
   2080       opcode = kX86PsrldRI;
   2081       break;
   2082     case k64:
   2083       opcode = kX86PsrlqRI;
   2084       break;
   2085     case kSignedHalf:
   2086     case kUnsignedHalf:
   2087       opcode = kX86PsrlwRI;
   2088       break;
   2089     case kSignedByte:
   2090     case kUnsignedByte:
   2091       GenShiftByteVector(bb, mir);
   2092       return;
   2093     default:
   2094       LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
   2095       break;
   2096   }
   2097   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2098 }
   2099 
   2100 void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
   2101   // We only support 128 bit registers.
   2102   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2103   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2104   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2105   NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   2106 }
   2107 
   2108 void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
   2109   // We only support 128 bit registers.
   2110   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2111   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2112   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2113   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   2114 }
   2115 
   2116 void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
   2117   // We only support 128 bit registers.
   2118   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2119   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2120   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2121   NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   2122 }
   2123 
   2124 void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) {
   2125   MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4);
   2126 }
   2127 
   2128 void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) {
   2129   // Create temporary MIR as container for 128-bit binary mask.
   2130   MIR const_mir;
   2131   MIR* const_mirp = &const_mir;
   2132   const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector);
   2133   const_mirp->dalvikInsn.arg[0] = m0;
   2134   const_mirp->dalvikInsn.arg[1] = m1;
   2135   const_mirp->dalvikInsn.arg[2] = m2;
   2136   const_mirp->dalvikInsn.arg[3] = m3;
   2137 
   2138   // Mask vector with const from literal pool.
   2139   AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
   2140 }
   2141 
   2142 void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
   2143   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2144   RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2145   RegLocation rl_dest = mir_graph_->GetDest(mir);
   2146   RegStorage rs_tmp;
   2147 
   2148   int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8;
   2149   int vec_unit_size = 0;
   2150   int opcode = 0;
   2151   int extr_opcode = 0;
   2152   RegLocation rl_result;
   2153 
   2154   switch (opsize) {
   2155     case k32:
   2156       extr_opcode = kX86PextrdRRI;
   2157       opcode = kX86PhadddRR;
   2158       vec_unit_size = 4;
   2159       break;
   2160     case kSignedByte:
   2161     case kUnsignedByte:
   2162       extr_opcode = kX86PextrbRRI;
   2163       opcode = kX86PhaddwRR;
   2164       vec_unit_size = 2;
   2165       break;
   2166     case kSignedHalf:
   2167     case kUnsignedHalf:
   2168       extr_opcode = kX86PextrwRRI;
   2169       opcode = kX86PhaddwRR;
   2170       vec_unit_size = 2;
   2171       break;
   2172     case kSingle:
   2173       rl_result = EvalLoc(rl_dest, kFPReg, true);
   2174       vec_unit_size = 4;
   2175       for (int i = 0; i < 3; i++) {
   2176         NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
   2177         NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39);
   2178       }
   2179       NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
   2180       StoreValue(rl_dest, rl_result);
   2181 
   2182       // For single-precision floats, we are done here
   2183       return;
   2184     default:
   2185       LOG(FATAL) << "Unsupported vector add reduce " << opsize;
   2186       break;
   2187   }
   2188 
   2189   int elems = vec_bytes / vec_unit_size;
   2190 
   2191   // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again
   2192   // TODO is overflow handled correctly?
   2193   if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2194     rs_tmp = Get128BitRegister(AllocTempWide());
   2195 
   2196     // tmp = xmm1 .>> 8.
   2197     NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg());
   2198     NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8);
   2199 
   2200     // Zero extend low bits in xmm1.
   2201     AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
   2202   }
   2203 
   2204   while (elems > 1) {
   2205     if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2206       NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg());
   2207     }
   2208     NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg());
   2209     elems >>= 1;
   2210   }
   2211 
   2212   // Combine the results if we separated them.
   2213   if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2214     NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg());
   2215   }
   2216 
   2217   // We need to extract to a GPR.
   2218   RegStorage temp = AllocTemp();
   2219   NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0);
   2220 
   2221   // Can we do this directly into memory?
   2222   rl_result = UpdateLocTyped(rl_dest, kCoreReg);
   2223   if (rl_result.location == kLocPhysReg) {
   2224     // Ensure res is in a core reg
   2225     rl_result = EvalLoc(rl_dest, kCoreReg, true);
   2226     OpRegReg(kOpAdd, rl_result.reg, temp);
   2227     StoreFinalValue(rl_dest, rl_result);
   2228   } else {
   2229     OpMemReg(kOpAdd, rl_result, temp.GetReg());
   2230   }
   2231 
   2232   FreeTemp(temp);
   2233 }
   2234 
   2235 void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
   2236   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2237   RegLocation rl_dest = mir_graph_->GetDest(mir);
   2238   RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2239   int extract_index = mir->dalvikInsn.arg[0];
   2240   int extr_opcode = 0;
   2241   RegLocation rl_result;
   2242   bool is_wide = false;
   2243 
   2244   switch (opsize) {
   2245     case k32:
   2246       rl_result = UpdateLocTyped(rl_dest, kCoreReg);
   2247       extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI;
   2248       break;
   2249     case kSignedHalf:
   2250     case kUnsignedHalf:
   2251       rl_result= UpdateLocTyped(rl_dest, kCoreReg);
   2252       extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI;
   2253       break;
   2254     default:
   2255       LOG(FATAL) << "Unsupported vector add reduce " << opsize;
   2256       return;
   2257       break;
   2258   }
   2259 
   2260   if (rl_result.location == kLocPhysReg) {
   2261     NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index);
   2262     if (is_wide == true) {
   2263       StoreFinalValue(rl_dest, rl_result);
   2264     } else {
   2265       StoreFinalValueWide(rl_dest, rl_result);
   2266     }
   2267   } else {
   2268     int displacement = SRegOffset(rl_result.s_reg_low);
   2269     LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg());
   2270     AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
   2271     AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
   2272   }
   2273 }
   2274 
   2275 void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
   2276   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2277   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2278   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   2279   int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR;
   2280   RegisterClass reg_type = kCoreReg;
   2281 
   2282   switch (opsize) {
   2283     case k32:
   2284       op_low = kX86PshufdRRI;
   2285       break;
   2286     case kSingle:
   2287       op_low = kX86PshufdRRI;
   2288       op_mov = kX86Mova128RR;
   2289       reg_type = kFPReg;
   2290       break;
   2291     case k64:
   2292       op_low = kX86PshufdRRI;
   2293       imm = 0x44;
   2294       break;
   2295     case kDouble:
   2296       op_low = kX86PshufdRRI;
   2297       op_mov = kX86Mova128RR;
   2298       reg_type = kFPReg;
   2299       imm = 0x44;
   2300       break;
   2301     case kSignedByte:
   2302     case kUnsignedByte:
   2303       // Shuffle 8 bit value into 16 bit word.
   2304       // We set val = val + (val << 8) below and use 16 bit shuffle.
   2305     case kSignedHalf:
   2306     case kUnsignedHalf:
   2307       // Handles low quadword.
   2308       op_low = kX86PshuflwRRI;
   2309       // Handles upper quadword.
   2310       op_high = kX86PshufdRRI;
   2311       break;
   2312     default:
   2313       LOG(FATAL) << "Unsupported vector set " << opsize;
   2314       break;
   2315   }
   2316 
   2317   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   2318 
   2319   // Load the value from the VR into the reg.
   2320   if (rl_src.wide == 0) {
   2321     rl_src = LoadValue(rl_src, reg_type);
   2322   } else {
   2323     rl_src = LoadValueWide(rl_src, reg_type);
   2324   }
   2325 
   2326   // If opsize is 8 bits wide then double value and use 16 bit shuffle instead.
   2327   if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2328     RegStorage temp = AllocTemp();
   2329     // val = val + (val << 8).
   2330     NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg());
   2331     NewLIR2(kX86Sal32RI, temp.GetReg(), 8);
   2332     NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg());
   2333     FreeTemp(temp);
   2334   }
   2335 
   2336   // Load the value into the XMM register.
   2337   NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg());
   2338 
   2339   // Now shuffle the value across the destination.
   2340   NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm);
   2341 
   2342   // And then repeat as needed.
   2343   if (op_high != 0) {
   2344     NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm);
   2345   }
   2346 }
   2347 
   2348 LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
   2349   int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
   2350   for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
   2351     if (args[0] == p->operands[0] && args[1] == p->operands[1] &&
   2352         args[2] == p->operands[2] && args[3] == p->operands[3]) {
   2353       return p;
   2354     }
   2355   }
   2356   return nullptr;
   2357 }
   2358 
   2359 LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) {
   2360   LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
   2361   int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
   2362   new_value->operands[0] = args[0];
   2363   new_value->operands[1] = args[1];
   2364   new_value->operands[2] = args[2];
   2365   new_value->operands[3] = args[3];
   2366   new_value->next = const_vectors_;
   2367   if (const_vectors_ == nullptr) {
   2368     estimated_native_code_size_ += 12;  // Amount needed to align to 16 byte boundary.
   2369   }
   2370   estimated_native_code_size_ += 16;  // Space for one vector.
   2371   const_vectors_ = new_value;
   2372   return new_value;
   2373 }
   2374 
   2375 // ------------ ABI support: mapping of args to physical registers -------------
   2376 RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide,
   2377                                                               bool is_ref) {
   2378   const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
   2379   const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) /
   2380       sizeof(SpecialTargetRegister);
   2381   const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
   2382                                                              kFArg4, kFArg5, kFArg6, kFArg7};
   2383   const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) /
   2384       sizeof(SpecialTargetRegister);
   2385 
   2386   if (is_double_or_float) {
   2387     if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
   2388       return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide);
   2389     }
   2390   } else {
   2391     if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
   2392       return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
   2393                             is_ref ? kRef : (is_wide ? kWide : kNotWide));
   2394     }
   2395   }
   2396   return RegStorage::InvalidReg();
   2397 }
   2398 
   2399 RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) {
   2400   DCHECK(IsInitialized());
   2401   auto res = mapping_.find(in_position);
   2402   return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
   2403 }
   2404 
   2405 void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
   2406                                                    InToRegStorageMapper* mapper) {
   2407   DCHECK(mapper != nullptr);
   2408   max_mapped_in_ = -1;
   2409   is_there_stack_mapped_ = false;
   2410   for (int in_position = 0; in_position < count; in_position++) {
   2411      RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
   2412              arg_locs[in_position].wide, arg_locs[in_position].ref);
   2413      if (reg.Valid()) {
   2414        mapping_[in_position] = reg;
   2415        max_mapped_in_ = std::max(max_mapped_in_, in_position);
   2416        if (arg_locs[in_position].wide) {
   2417          // We covered 2 args, so skip the next one
   2418          in_position++;
   2419        }
   2420      } else {
   2421        is_there_stack_mapped_ = true;
   2422      }
   2423   }
   2424   initialized_ = true;
   2425 }
   2426 
   2427 RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
   2428   if (!cu_->target64) {
   2429     return GetCoreArgMappingToPhysicalReg(arg_num);
   2430   }
   2431 
   2432   if (!in_to_reg_storage_mapping_.IsInitialized()) {
   2433     int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
   2434     RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
   2435 
   2436     InToRegStorageX86_64Mapper mapper(this);
   2437     in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
   2438   }
   2439   return in_to_reg_storage_mapping_.Get(arg_num);
   2440 }
   2441 
   2442 RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
   2443   // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
   2444   // Not used for 64-bit, TODO: Move X86_32 to the same framework
   2445   switch (core_arg_num) {
   2446     case 0:
   2447       return rs_rX86_ARG1;
   2448     case 1:
   2449       return rs_rX86_ARG2;
   2450     case 2:
   2451       return rs_rX86_ARG3;
   2452     default:
   2453       return RegStorage::InvalidReg();
   2454   }
   2455 }
   2456 
   2457 // ---------End of ABI support: mapping of args to physical registers -------------
   2458 
   2459 /*
   2460  * If there are any ins passed in registers that have not been promoted
   2461  * to a callee-save register, flush them to the frame.  Perform initial
   2462  * assignment of promoted arguments.
   2463  *
   2464  * ArgLocs is an array of location records describing the incoming arguments
   2465  * with one location record per word of argument.
   2466  */
   2467 void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
   2468   if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method);
   2469   /*
   2470    * Dummy up a RegLocation for the incoming Method*
   2471    * It will attempt to keep kArg0 live (or copy it to home location
   2472    * if promoted).
   2473    */
   2474 
   2475   RegLocation rl_src = rl_method;
   2476   rl_src.location = kLocPhysReg;
   2477   rl_src.reg = TargetReg(kArg0, kRef);
   2478   rl_src.home = false;
   2479   MarkLive(rl_src);
   2480   StoreValue(rl_method, rl_src);
   2481   // If Method* has been promoted, explicitly flush
   2482   if (rl_method.location == kLocPhysReg) {
   2483     StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
   2484   }
   2485 
   2486   if (cu_->num_ins == 0) {
   2487     return;
   2488   }
   2489 
   2490   int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
   2491   /*
   2492    * Copy incoming arguments to their proper home locations.
   2493    * NOTE: an older version of dx had an issue in which
   2494    * it would reuse static method argument registers.
   2495    * This could result in the same Dalvik virtual register
   2496    * being promoted to both core and fp regs. To account for this,
   2497    * we only copy to the corresponding promoted physical register
   2498    * if it matches the type of the SSA name for the incoming
   2499    * argument.  It is also possible that long and double arguments
   2500    * end up half-promoted.  In those cases, we must flush the promoted
   2501    * half to memory as well.
   2502    */
   2503   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2504   for (int i = 0; i < cu_->num_ins; i++) {
   2505     // get reg corresponding to input
   2506     RegStorage reg = GetArgMappingToPhysicalReg(i);
   2507 
   2508     RegLocation* t_loc = &ArgLocs[i];
   2509     if (reg.Valid()) {
   2510       // If arriving in register.
   2511 
   2512       // We have already updated the arg location with promoted info
   2513       // so we can be based on it.
   2514       if (t_loc->location == kLocPhysReg) {
   2515         // Just copy it.
   2516         OpRegCopy(t_loc->reg, reg);
   2517       } else {
   2518         // Needs flush.
   2519         if (t_loc->ref) {
   2520           StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
   2521         } else {
   2522           StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
   2523                         kNotVolatile);
   2524         }
   2525       }
   2526     } else {
   2527       // If arriving in frame & promoted.
   2528       if (t_loc->location == kLocPhysReg) {
   2529         if (t_loc->ref) {
   2530           LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
   2531         } else {
   2532           LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
   2533                        t_loc->wide ? k64 : k32, kNotVolatile);
   2534         }
   2535       }
   2536     }
   2537     if (t_loc->wide) {
   2538       // Increment i to skip the next one.
   2539       i++;
   2540     }
   2541   }
   2542 }
   2543 
   2544 /*
   2545  * Load up to 5 arguments, the first three of which will be in
   2546  * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
   2547  * and as part of the load sequence, it must be replaced with
   2548  * the target method pointer.  Note, this may also be called
   2549  * for "range" variants if the number of arguments is 5 or fewer.
   2550  */
   2551 int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
   2552                                   int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
   2553                                   const MethodReference& target_method,
   2554                                   uint32_t vtable_idx, uintptr_t direct_code,
   2555                                   uintptr_t direct_method, InvokeType type, bool skip_this) {
   2556   if (!cu_->target64) {
   2557     return Mir2Lir::GenDalvikArgsNoRange(info,
   2558                                   call_state, pcrLabel, next_call_insn,
   2559                                   target_method,
   2560                                   vtable_idx, direct_code,
   2561                                   direct_method, type, skip_this);
   2562   }
   2563   return GenDalvikArgsRange(info,
   2564                        call_state, pcrLabel, next_call_insn,
   2565                        target_method,
   2566                        vtable_idx, direct_code,
   2567                        direct_method, type, skip_this);
   2568 }
   2569 
   2570 /*
   2571  * May have 0+ arguments (also used for jumbo).  Note that
   2572  * source virtual registers may be in physical registers, so may
   2573  * need to be flushed to home location before copying.  This
   2574  * applies to arg3 and above (see below).
   2575  *
   2576  * Two general strategies:
   2577  *    If < 20 arguments
   2578  *       Pass args 3-18 using vldm/vstm block copy
   2579  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
   2580  *    If 20+ arguments
   2581  *       Pass args arg19+ using memcpy block copy
   2582  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
   2583  *
   2584  */
   2585 int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
   2586                                 LIR** pcrLabel, NextCallInsn next_call_insn,
   2587                                 const MethodReference& target_method,
   2588                                 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
   2589                                 InvokeType type, bool skip_this) {
   2590   if (!cu_->target64) {
   2591     return Mir2Lir::GenDalvikArgsRange(info, call_state,
   2592                                 pcrLabel, next_call_insn,
   2593                                 target_method,
   2594                                 vtable_idx, direct_code, direct_method,
   2595                                 type, skip_this);
   2596   }
   2597 
   2598   /* If no arguments, just return */
   2599   if (info->num_arg_words == 0)
   2600     return call_state;
   2601 
   2602   const int start_index = skip_this ? 1 : 0;
   2603 
   2604   InToRegStorageX86_64Mapper mapper(this);
   2605   InToRegStorageMapping in_to_reg_storage_mapping;
   2606   in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
   2607   const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
   2608   const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
   2609           info->args[last_mapped_in].wide ? 2 : 1;
   2610   int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
   2611 
   2612   // Fisrt of all, check whether it make sense to use bulk copying
   2613   // Optimization is aplicable only for range case
   2614   // TODO: make a constant instead of 2
   2615   if (info->is_range && regs_left_to_pass_via_stack >= 2) {
   2616     // Scan the rest of the args - if in phys_reg flush to memory
   2617     for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) {
   2618       RegLocation loc = info->args[next_arg];
   2619       if (loc.wide) {
   2620         loc = UpdateLocWide(loc);
   2621         if (loc.location == kLocPhysReg) {
   2622           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2623           StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
   2624         }
   2625         next_arg += 2;
   2626       } else {
   2627         loc = UpdateLoc(loc);
   2628         if (loc.location == kLocPhysReg) {
   2629           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2630           StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
   2631         }
   2632         next_arg++;
   2633       }
   2634     }
   2635 
   2636     // Logic below assumes that Method pointer is at offset zero from SP.
   2637     DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
   2638 
   2639     // The rest can be copied together
   2640     int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
   2641     int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped,
   2642                                                    cu_->instruction_set);
   2643 
   2644     int current_src_offset = start_offset;
   2645     int current_dest_offset = outs_offset;
   2646 
   2647     // Only davik regs are accessed in this loop; no next_call_insn() calls.
   2648     ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2649     while (regs_left_to_pass_via_stack > 0) {
   2650       // This is based on the knowledge that the stack itself is 16-byte aligned.
   2651       bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
   2652       bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
   2653       size_t bytes_to_move;
   2654 
   2655       /*
   2656        * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
   2657        * a 128-bit move because we won't get the chance to try to aligned. If there are more than
   2658        * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
   2659        * We do this because we could potentially do a smaller move to align.
   2660        */
   2661       if (regs_left_to_pass_via_stack == 4 ||
   2662           (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
   2663         // Moving 128-bits via xmm register.
   2664         bytes_to_move = sizeof(uint32_t) * 4;
   2665 
   2666         // Allocate a free xmm temp. Since we are working through the calling sequence,
   2667         // we expect to have an xmm temporary available.  AllocTempDouble will abort if
   2668         // there are no free registers.
   2669         RegStorage temp = AllocTempDouble();
   2670 
   2671         LIR* ld1 = nullptr;
   2672         LIR* ld2 = nullptr;
   2673         LIR* st1 = nullptr;
   2674         LIR* st2 = nullptr;
   2675 
   2676         /*
   2677          * The logic is similar for both loads and stores. If we have 16-byte alignment,
   2678          * do an aligned move. If we have 8-byte alignment, then do the move in two
   2679          * parts. This approach prevents possible cache line splits. Finally, fall back
   2680          * to doing an unaligned move. In most cases we likely won't split the cache
   2681          * line but we cannot prove it and thus take a conservative approach.
   2682          */
   2683         bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
   2684         bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
   2685 
   2686         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2687         if (src_is_16b_aligned) {
   2688           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
   2689         } else if (src_is_8b_aligned) {
   2690           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
   2691           ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
   2692                             kMovHi128FP);
   2693         } else {
   2694           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
   2695         }
   2696 
   2697         if (dest_is_16b_aligned) {
   2698           st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
   2699         } else if (dest_is_8b_aligned) {
   2700           st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
   2701           st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
   2702                             temp, kMovHi128FP);
   2703         } else {
   2704           st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
   2705         }
   2706 
   2707         // TODO If we could keep track of aliasing information for memory accesses that are wider
   2708         // than 64-bit, we wouldn't need to set up a barrier.
   2709         if (ld1 != nullptr) {
   2710           if (ld2 != nullptr) {
   2711             // For 64-bit load we can actually set up the aliasing information.
   2712             AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
   2713             AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
   2714           } else {
   2715             // Set barrier for 128-bit load.
   2716             ld1->u.m.def_mask = &kEncodeAll;
   2717           }
   2718         }
   2719         if (st1 != nullptr) {
   2720           if (st2 != nullptr) {
   2721             // For 64-bit store we can actually set up the aliasing information.
   2722             AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
   2723             AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
   2724           } else {
   2725             // Set barrier for 128-bit store.
   2726             st1->u.m.def_mask = &kEncodeAll;
   2727           }
   2728         }
   2729 
   2730         // Free the temporary used for the data movement.
   2731         FreeTemp(temp);
   2732       } else {
   2733         // Moving 32-bits via general purpose register.
   2734         bytes_to_move = sizeof(uint32_t);
   2735 
   2736         // Instead of allocating a new temp, simply reuse one of the registers being used
   2737         // for argument passing.
   2738         RegStorage temp = TargetReg(kArg3, kNotWide);
   2739 
   2740         // Now load the argument VR and store to the outs.
   2741         Load32Disp(rs_rX86_SP, current_src_offset, temp);
   2742         Store32Disp(rs_rX86_SP, current_dest_offset, temp);
   2743       }
   2744 
   2745       current_src_offset += bytes_to_move;
   2746       current_dest_offset += bytes_to_move;
   2747       regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
   2748     }
   2749     DCHECK_EQ(regs_left_to_pass_via_stack, 0);
   2750   }
   2751 
   2752   // Now handle rest not registers if they are
   2753   if (in_to_reg_storage_mapping.IsThereStackMapped()) {
   2754     RegStorage regSingle = TargetReg(kArg2, kNotWide);
   2755     RegStorage regWide = TargetReg(kArg3, kWide);
   2756     for (int i = start_index;
   2757          i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) {
   2758       RegLocation rl_arg = info->args[i];
   2759       rl_arg = UpdateRawLoc(rl_arg);
   2760       RegStorage reg = in_to_reg_storage_mapping.Get(i);
   2761       if (!reg.Valid()) {
   2762         int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
   2763 
   2764         {
   2765           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2766           if (rl_arg.wide) {
   2767             if (rl_arg.location == kLocPhysReg) {
   2768               StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
   2769             } else {
   2770               LoadValueDirectWideFixed(rl_arg, regWide);
   2771               StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
   2772             }
   2773           } else {
   2774             if (rl_arg.location == kLocPhysReg) {
   2775               StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
   2776             } else {
   2777               LoadValueDirectFixed(rl_arg, regSingle);
   2778               StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
   2779             }
   2780           }
   2781         }
   2782         call_state = next_call_insn(cu_, info, call_state, target_method,
   2783                                     vtable_idx, direct_code, direct_method, type);
   2784       }
   2785       if (rl_arg.wide) {
   2786         i++;
   2787       }
   2788     }
   2789   }
   2790 
   2791   // Finish with mapped registers
   2792   for (int i = start_index; i <= last_mapped_in; i++) {
   2793     RegLocation rl_arg = info->args[i];
   2794     rl_arg = UpdateRawLoc(rl_arg);
   2795     RegStorage reg = in_to_reg_storage_mapping.Get(i);
   2796     if (reg.Valid()) {
   2797       if (rl_arg.wide) {
   2798         LoadValueDirectWideFixed(rl_arg, reg);
   2799       } else {
   2800         LoadValueDirectFixed(rl_arg, reg);
   2801       }
   2802       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
   2803                                direct_code, direct_method, type);
   2804     }
   2805     if (rl_arg.wide) {
   2806       i++;
   2807     }
   2808   }
   2809 
   2810   call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
   2811                            direct_code, direct_method, type);
   2812   if (pcrLabel) {
   2813     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
   2814       *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
   2815     } else {
   2816       *pcrLabel = nullptr;
   2817       // In lieu of generating a check for kArg1 being null, we need to
   2818       // perform a load when doing implicit checks.
   2819       RegStorage tmp = AllocTemp();
   2820       Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
   2821       MarkPossibleNullPointerException(info->opt_flags);
   2822       FreeTemp(tmp);
   2823     }
   2824   }
   2825   return call_state;
   2826 }
   2827 
   2828 bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) {
   2829   // Location of reference to data array
   2830   int value_offset = mirror::String::ValueOffset().Int32Value();
   2831   // Location of count
   2832   int count_offset = mirror::String::CountOffset().Int32Value();
   2833   // Starting offset within data array
   2834   int offset_offset = mirror::String::OffsetOffset().Int32Value();
   2835   // Start of char data with array_
   2836   int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
   2837 
   2838   RegLocation rl_obj = info->args[0];
   2839   RegLocation rl_idx = info->args[1];
   2840   rl_obj = LoadValue(rl_obj, kRefReg);
   2841   // X86 wants to avoid putting a constant index into a register.
   2842   if (!rl_idx.is_const) {
   2843     rl_idx = LoadValue(rl_idx, kCoreReg);
   2844   }
   2845   RegStorage reg_max;
   2846   GenNullCheck(rl_obj.reg, info->opt_flags);
   2847   bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
   2848   LIR* range_check_branch = nullptr;
   2849   RegStorage reg_off;
   2850   RegStorage reg_ptr;
   2851   if (range_check) {
   2852     // On x86, we can compare to memory directly
   2853     // Set up a launch pad to allow retry in case of bounds violation */
   2854     if (rl_idx.is_const) {
   2855       LIR* comparison;
   2856       range_check_branch = OpCmpMemImmBranch(
   2857           kCondLs, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
   2858           mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
   2859       MarkPossibleNullPointerExceptionAfter(0, comparison);
   2860     } else {
   2861       OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
   2862       MarkPossibleNullPointerException(0);
   2863       range_check_branch = OpCondBranch(kCondUge, nullptr);
   2864     }
   2865   }
   2866   reg_off = AllocTemp();
   2867   reg_ptr = AllocTempRef();
   2868   Load32Disp(rl_obj.reg, offset_offset, reg_off);
   2869   LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
   2870   if (rl_idx.is_const) {
   2871     OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
   2872   } else {
   2873     OpRegReg(kOpAdd, reg_off, rl_idx.reg);
   2874   }
   2875   FreeTemp(rl_obj.reg);
   2876   if (rl_idx.location == kLocPhysReg) {
   2877     FreeTemp(rl_idx.reg);
   2878   }
   2879   RegLocation rl_dest = InlineTarget(info);
   2880   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   2881   LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
   2882   FreeTemp(reg_off);
   2883   FreeTemp(reg_ptr);
   2884   StoreValue(rl_dest, rl_result);
   2885   if (range_check) {
   2886     DCHECK(range_check_branch != nullptr);
   2887     info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
   2888     AddIntrinsicSlowPath(info, range_check_branch);
   2889   }
   2890   return true;
   2891 }
   2892 
   2893 bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
   2894   RegLocation rl_dest = InlineTarget(info);
   2895 
   2896   // Early exit if the result is unused.
   2897   if (rl_dest.orig_sreg < 0) {
   2898     return true;
   2899   }
   2900 
   2901   RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
   2902 
   2903   if (cu_->target64) {
   2904     OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
   2905   } else {
   2906     OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
   2907   }
   2908 
   2909   StoreValue(rl_dest, rl_result);
   2910   return true;
   2911 }
   2912 
   2913 }  // namespace art
   2914