Home | History | Annotate | Download | only in x86
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <string>
     18 #include <inttypes.h>
     19 
     20 #include "codegen_x86.h"
     21 #include "dex/compiler_internals.h"
     22 #include "dex/quick/mir_to_lir-inl.h"
     23 #include "dex/reg_storage_eq.h"
     24 #include "mirror/array.h"
     25 #include "mirror/string.h"
     26 #include "x86_lir.h"
     27 
     28 namespace art {
     29 
     30 static constexpr RegStorage core_regs_arr_32[] = {
     31     rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
     32 };
     33 static constexpr RegStorage core_regs_arr_64[] = {
     34     rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
     35     rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
     36 };
     37 static constexpr RegStorage core_regs_arr_64q[] = {
     38     rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
     39     rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
     40 };
     41 static constexpr RegStorage sp_regs_arr_32[] = {
     42     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     43 };
     44 static constexpr RegStorage sp_regs_arr_64[] = {
     45     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     46     rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
     47 };
     48 static constexpr RegStorage dp_regs_arr_32[] = {
     49     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
     50 };
     51 static constexpr RegStorage dp_regs_arr_64[] = {
     52     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
     53     rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
     54 };
     55 static constexpr RegStorage xp_regs_arr_32[] = {
     56     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
     57 };
     58 static constexpr RegStorage xp_regs_arr_64[] = {
     59     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
     60     rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
     61 };
     62 static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
     63 static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
     64 static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
     65 static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
     66 static constexpr RegStorage core_temps_arr_64[] = {
     67     rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
     68     rs_r8, rs_r9, rs_r10, rs_r11
     69 };
     70 
     71 // How to add register to be available for promotion:
     72 // 1) Remove register from array defining temp
     73 // 2) Update ClobberCallerSave
     74 // 3) Update JNI compiler ABI:
     75 // 3.1) add reg in JniCallingConvention method
     76 // 3.2) update CoreSpillMask/FpSpillMask
     77 // 4) Update entrypoints
     78 // 4.1) Update constants in asm_support_x86_64.h for new frame size
     79 // 4.2) Remove entry in SmashCallerSaves
     80 // 4.3) Update jni_entrypoints to spill/unspill new callee save reg
     81 // 4.4) Update quick_entrypoints to spill/unspill new callee save reg
     82 // 5) Update runtime ABI
     83 // 5.1) Update quick_method_frame_info with new required spills
     84 // 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms
     85 // Note that you cannot use register corresponding to incoming args
     86 // according to ABI and QCG needs one additional XMM temp for
     87 // bulk copy in preparation to call.
     88 static constexpr RegStorage core_temps_arr_64q[] = {
     89     rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
     90     rs_r8q, rs_r9q, rs_r10q, rs_r11q
     91 };
     92 static constexpr RegStorage sp_temps_arr_32[] = {
     93     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     94 };
     95 static constexpr RegStorage sp_temps_arr_64[] = {
     96     rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
     97     rs_fr8, rs_fr9, rs_fr10, rs_fr11
     98 };
     99 static constexpr RegStorage dp_temps_arr_32[] = {
    100     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
    101 };
    102 static constexpr RegStorage dp_temps_arr_64[] = {
    103     rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
    104     rs_dr8, rs_dr9, rs_dr10, rs_dr11
    105 };
    106 
    107 static constexpr RegStorage xp_temps_arr_32[] = {
    108     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
    109 };
    110 static constexpr RegStorage xp_temps_arr_64[] = {
    111     rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
    112     rs_xr8, rs_xr9, rs_xr10, rs_xr11
    113 };
    114 
    115 static constexpr ArrayRef<const RegStorage> empty_pool;
    116 static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
    117 static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
    118 static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
    119 static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
    120 static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
    121 static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
    122 static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
    123 static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32);
    124 static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64);
    125 static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
    126 static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
    127 static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
    128 static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
    129 static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
    130 static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
    131 static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
    132 static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
    133 static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
    134 static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
    135 
    136 static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
    137 static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
    138 
    139 RegStorage rs_rX86_SP;
    140 
    141 X86NativeRegisterPool rX86_ARG0;
    142 X86NativeRegisterPool rX86_ARG1;
    143 X86NativeRegisterPool rX86_ARG2;
    144 X86NativeRegisterPool rX86_ARG3;
    145 X86NativeRegisterPool rX86_ARG4;
    146 X86NativeRegisterPool rX86_ARG5;
    147 X86NativeRegisterPool rX86_FARG0;
    148 X86NativeRegisterPool rX86_FARG1;
    149 X86NativeRegisterPool rX86_FARG2;
    150 X86NativeRegisterPool rX86_FARG3;
    151 X86NativeRegisterPool rX86_FARG4;
    152 X86NativeRegisterPool rX86_FARG5;
    153 X86NativeRegisterPool rX86_FARG6;
    154 X86NativeRegisterPool rX86_FARG7;
    155 X86NativeRegisterPool rX86_RET0;
    156 X86NativeRegisterPool rX86_RET1;
    157 X86NativeRegisterPool rX86_INVOKE_TGT;
    158 X86NativeRegisterPool rX86_COUNT;
    159 
    160 RegStorage rs_rX86_ARG0;
    161 RegStorage rs_rX86_ARG1;
    162 RegStorage rs_rX86_ARG2;
    163 RegStorage rs_rX86_ARG3;
    164 RegStorage rs_rX86_ARG4;
    165 RegStorage rs_rX86_ARG5;
    166 RegStorage rs_rX86_FARG0;
    167 RegStorage rs_rX86_FARG1;
    168 RegStorage rs_rX86_FARG2;
    169 RegStorage rs_rX86_FARG3;
    170 RegStorage rs_rX86_FARG4;
    171 RegStorage rs_rX86_FARG5;
    172 RegStorage rs_rX86_FARG6;
    173 RegStorage rs_rX86_FARG7;
    174 RegStorage rs_rX86_RET0;
    175 RegStorage rs_rX86_RET1;
    176 RegStorage rs_rX86_INVOKE_TGT;
    177 RegStorage rs_rX86_COUNT;
    178 
    179 RegLocation X86Mir2Lir::LocCReturn() {
    180   return x86_loc_c_return;
    181 }
    182 
    183 RegLocation X86Mir2Lir::LocCReturnRef() {
    184   return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
    185 }
    186 
    187 RegLocation X86Mir2Lir::LocCReturnWide() {
    188   return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
    189 }
    190 
    191 RegLocation X86Mir2Lir::LocCReturnFloat() {
    192   return x86_loc_c_return_float;
    193 }
    194 
    195 RegLocation X86Mir2Lir::LocCReturnDouble() {
    196   return x86_loc_c_return_double;
    197 }
    198 
    199 // Return a target-dependent special register for 32-bit.
    200 RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
    201   RegStorage res_reg = RegStorage::InvalidReg();
    202   switch (reg) {
    203     case kSelf: res_reg = RegStorage::InvalidReg(); break;
    204     case kSuspend: res_reg =  RegStorage::InvalidReg(); break;
    205     case kLr: res_reg =  RegStorage::InvalidReg(); break;
    206     case kPc: res_reg =  RegStorage::InvalidReg(); break;
    207     case kSp: res_reg =  rs_rX86_SP_32; break;  // This must be the concrete one, as _SP is target-
    208                                                 // specific size.
    209     case kArg0: res_reg = rs_rX86_ARG0; break;
    210     case kArg1: res_reg = rs_rX86_ARG1; break;
    211     case kArg2: res_reg = rs_rX86_ARG2; break;
    212     case kArg3: res_reg = rs_rX86_ARG3; break;
    213     case kArg4: res_reg = rs_rX86_ARG4; break;
    214     case kArg5: res_reg = rs_rX86_ARG5; break;
    215     case kFArg0: res_reg = rs_rX86_FARG0; break;
    216     case kFArg1: res_reg = rs_rX86_FARG1; break;
    217     case kFArg2: res_reg = rs_rX86_FARG2; break;
    218     case kFArg3: res_reg = rs_rX86_FARG3; break;
    219     case kFArg4: res_reg = rs_rX86_FARG4; break;
    220     case kFArg5: res_reg = rs_rX86_FARG5; break;
    221     case kFArg6: res_reg = rs_rX86_FARG6; break;
    222     case kFArg7: res_reg = rs_rX86_FARG7; break;
    223     case kRet0: res_reg = rs_rX86_RET0; break;
    224     case kRet1: res_reg = rs_rX86_RET1; break;
    225     case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
    226     case kHiddenArg: res_reg = rs_rAX; break;
    227     case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
    228     case kCount: res_reg = rs_rX86_COUNT; break;
    229     default: res_reg = RegStorage::InvalidReg();
    230   }
    231   return res_reg;
    232 }
    233 
    234 RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
    235   LOG(FATAL) << "Do not use this function!!!";
    236   return RegStorage::InvalidReg();
    237 }
    238 
    239 /*
    240  * Decode the register id.
    241  */
    242 ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
    243   /* Double registers in x86 are just a single FP register. This is always just a single bit. */
    244   return ResourceMask::Bit(
    245       /* FP register starts at bit position 16 */
    246       ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
    247 }
    248 
    249 ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
    250   return kEncodeNone;
    251 }
    252 
    253 void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
    254                                           ResourceMask* use_mask, ResourceMask* def_mask) {
    255   DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
    256   DCHECK(!lir->flags.use_def_invalid);
    257 
    258   // X86-specific resource map setup here.
    259   if (flags & REG_USE_SP) {
    260     use_mask->SetBit(kX86RegSP);
    261   }
    262 
    263   if (flags & REG_DEF_SP) {
    264     def_mask->SetBit(kX86RegSP);
    265   }
    266 
    267   if (flags & REG_DEFA) {
    268     SetupRegMask(def_mask, rs_rAX.GetReg());
    269   }
    270 
    271   if (flags & REG_DEFD) {
    272     SetupRegMask(def_mask, rs_rDX.GetReg());
    273   }
    274   if (flags & REG_USEA) {
    275     SetupRegMask(use_mask, rs_rAX.GetReg());
    276   }
    277 
    278   if (flags & REG_USEC) {
    279     SetupRegMask(use_mask, rs_rCX.GetReg());
    280   }
    281 
    282   if (flags & REG_USED) {
    283     SetupRegMask(use_mask, rs_rDX.GetReg());
    284   }
    285 
    286   if (flags & REG_USEB) {
    287     SetupRegMask(use_mask, rs_rBX.GetReg());
    288   }
    289 
    290   // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
    291   if (lir->opcode == kX86RepneScasw) {
    292     SetupRegMask(use_mask, rs_rAX.GetReg());
    293     SetupRegMask(use_mask, rs_rCX.GetReg());
    294     SetupRegMask(use_mask, rs_rDI.GetReg());
    295     SetupRegMask(def_mask, rs_rDI.GetReg());
    296   }
    297 
    298   if (flags & USE_FP_STACK) {
    299     use_mask->SetBit(kX86FPStack);
    300     def_mask->SetBit(kX86FPStack);
    301   }
    302 }
    303 
    304 /* For dumping instructions */
    305 static const char* x86RegName[] = {
    306   "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
    307   "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
    308 };
    309 
    310 static const char* x86CondName[] = {
    311   "O",
    312   "NO",
    313   "B/NAE/C",
    314   "NB/AE/NC",
    315   "Z/EQ",
    316   "NZ/NE",
    317   "BE/NA",
    318   "NBE/A",
    319   "S",
    320   "NS",
    321   "P/PE",
    322   "NP/PO",
    323   "L/NGE",
    324   "NL/GE",
    325   "LE/NG",
    326   "NLE/G"
    327 };
    328 
    329 /*
    330  * Interpret a format string and build a string no longer than size
    331  * See format key in Assemble.cc.
    332  */
    333 std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
    334   std::string buf;
    335   size_t i = 0;
    336   size_t fmt_len = strlen(fmt);
    337   while (i < fmt_len) {
    338     if (fmt[i] != '!') {
    339       buf += fmt[i];
    340       i++;
    341     } else {
    342       i++;
    343       DCHECK_LT(i, fmt_len);
    344       char operand_number_ch = fmt[i];
    345       i++;
    346       if (operand_number_ch == '!') {
    347         buf += "!";
    348       } else {
    349         int operand_number = operand_number_ch - '0';
    350         DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
    351         DCHECK_LT(i, fmt_len);
    352         int operand = lir->operands[operand_number];
    353         switch (fmt[i]) {
    354           case 'c':
    355             DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
    356             buf += x86CondName[operand];
    357             break;
    358           case 'd':
    359             buf += StringPrintf("%d", operand);
    360             break;
    361           case 'q': {
    362              int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
    363                              static_cast<uint32_t>(lir->operands[operand_number+1]));
    364              buf +=StringPrintf("%" PRId64, value);
    365           }
    366           case 'p': {
    367             EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
    368             buf += StringPrintf("0x%08x", tab_rec->offset);
    369             break;
    370           }
    371           case 'r':
    372             if (RegStorage::IsFloat(operand)) {
    373               int fp_reg = RegStorage::RegNum(operand);
    374               buf += StringPrintf("xmm%d", fp_reg);
    375             } else {
    376               int reg_num = RegStorage::RegNum(operand);
    377               DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
    378               buf += x86RegName[reg_num];
    379             }
    380             break;
    381           case 't':
    382             buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
    383                                 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
    384                                 lir->target);
    385             break;
    386           default:
    387             buf += StringPrintf("DecodeError '%c'", fmt[i]);
    388             break;
    389         }
    390         i++;
    391       }
    392     }
    393   }
    394   return buf;
    395 }
    396 
    397 void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
    398   char buf[256];
    399   buf[0] = 0;
    400 
    401   if (mask.Equals(kEncodeAll)) {
    402     strcpy(buf, "all");
    403   } else {
    404     char num[8];
    405     int i;
    406 
    407     for (i = 0; i < kX86RegEnd; i++) {
    408       if (mask.HasBit(i)) {
    409         snprintf(num, arraysize(num), "%d ", i);
    410         strcat(buf, num);
    411       }
    412     }
    413 
    414     if (mask.HasBit(ResourceMask::kCCode)) {
    415       strcat(buf, "cc ");
    416     }
    417     /* Memory bits */
    418     if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
    419       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
    420                DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
    421                (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
    422     }
    423     if (mask.HasBit(ResourceMask::kLiteral)) {
    424       strcat(buf, "lit ");
    425     }
    426 
    427     if (mask.HasBit(ResourceMask::kHeapRef)) {
    428       strcat(buf, "heap ");
    429     }
    430     if (mask.HasBit(ResourceMask::kMustNotAlias)) {
    431       strcat(buf, "noalias ");
    432     }
    433   }
    434   if (buf[0]) {
    435     LOG(INFO) << prefix << ": " <<  buf;
    436   }
    437 }
    438 
    439 void X86Mir2Lir::AdjustSpillMask() {
    440   // Adjustment for LR spilling, x86 has no LR so nothing to do here
    441   core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
    442   num_core_spills_++;
    443 }
    444 
    445 RegStorage X86Mir2Lir::AllocateByteRegister() {
    446   RegStorage reg = AllocTypedTemp(false, kCoreReg);
    447   if (!cu_->target64) {
    448     DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
    449   }
    450   return reg;
    451 }
    452 
    453 RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
    454   return GetRegInfo(reg)->FindMatchingView(RegisterInfo::k128SoloStorageMask)->GetReg();
    455 }
    456 
    457 bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
    458   return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
    459 }
    460 
    461 /* Clobber all regs that might be used by an external C call */
    462 void X86Mir2Lir::ClobberCallerSave() {
    463   if (cu_->target64) {
    464     Clobber(rs_rAX);
    465     Clobber(rs_rCX);
    466     Clobber(rs_rDX);
    467     Clobber(rs_rSI);
    468     Clobber(rs_rDI);
    469 
    470     Clobber(rs_r8);
    471     Clobber(rs_r9);
    472     Clobber(rs_r10);
    473     Clobber(rs_r11);
    474 
    475     Clobber(rs_fr8);
    476     Clobber(rs_fr9);
    477     Clobber(rs_fr10);
    478     Clobber(rs_fr11);
    479   } else {
    480     Clobber(rs_rAX);
    481     Clobber(rs_rCX);
    482     Clobber(rs_rDX);
    483     Clobber(rs_rBX);
    484   }
    485 
    486   Clobber(rs_fr0);
    487   Clobber(rs_fr1);
    488   Clobber(rs_fr2);
    489   Clobber(rs_fr3);
    490   Clobber(rs_fr4);
    491   Clobber(rs_fr5);
    492   Clobber(rs_fr6);
    493   Clobber(rs_fr7);
    494 }
    495 
    496 RegLocation X86Mir2Lir::GetReturnWideAlt() {
    497   RegLocation res = LocCReturnWide();
    498   DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
    499   DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
    500   Clobber(rs_rAX);
    501   Clobber(rs_rDX);
    502   MarkInUse(rs_rAX);
    503   MarkInUse(rs_rDX);
    504   MarkWide(res.reg);
    505   return res;
    506 }
    507 
    508 RegLocation X86Mir2Lir::GetReturnAlt() {
    509   RegLocation res = LocCReturn();
    510   res.reg.SetReg(rs_rDX.GetReg());
    511   Clobber(rs_rDX);
    512   MarkInUse(rs_rDX);
    513   return res;
    514 }
    515 
    516 /* To be used when explicitly managing register use */
    517 void X86Mir2Lir::LockCallTemps() {
    518   LockTemp(rs_rX86_ARG0);
    519   LockTemp(rs_rX86_ARG1);
    520   LockTemp(rs_rX86_ARG2);
    521   LockTemp(rs_rX86_ARG3);
    522   if (cu_->target64) {
    523     LockTemp(rs_rX86_ARG4);
    524     LockTemp(rs_rX86_ARG5);
    525     LockTemp(rs_rX86_FARG0);
    526     LockTemp(rs_rX86_FARG1);
    527     LockTemp(rs_rX86_FARG2);
    528     LockTemp(rs_rX86_FARG3);
    529     LockTemp(rs_rX86_FARG4);
    530     LockTemp(rs_rX86_FARG5);
    531     LockTemp(rs_rX86_FARG6);
    532     LockTemp(rs_rX86_FARG7);
    533   }
    534 }
    535 
    536 /* To be used when explicitly managing register use */
    537 void X86Mir2Lir::FreeCallTemps() {
    538   FreeTemp(rs_rX86_ARG0);
    539   FreeTemp(rs_rX86_ARG1);
    540   FreeTemp(rs_rX86_ARG2);
    541   FreeTemp(rs_rX86_ARG3);
    542   if (cu_->target64) {
    543     FreeTemp(rs_rX86_ARG4);
    544     FreeTemp(rs_rX86_ARG5);
    545     FreeTemp(rs_rX86_FARG0);
    546     FreeTemp(rs_rX86_FARG1);
    547     FreeTemp(rs_rX86_FARG2);
    548     FreeTemp(rs_rX86_FARG3);
    549     FreeTemp(rs_rX86_FARG4);
    550     FreeTemp(rs_rX86_FARG5);
    551     FreeTemp(rs_rX86_FARG6);
    552     FreeTemp(rs_rX86_FARG7);
    553   }
    554 }
    555 
    556 bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
    557     switch (opcode) {
    558       case kX86LockCmpxchgMR:
    559       case kX86LockCmpxchgAR:
    560       case kX86LockCmpxchg64M:
    561       case kX86LockCmpxchg64A:
    562       case kX86XchgMR:
    563       case kX86Mfence:
    564         // Atomic memory instructions provide full barrier.
    565         return true;
    566       default:
    567         break;
    568     }
    569 
    570     // Conservative if cannot prove it provides full barrier.
    571     return false;
    572 }
    573 
    574 bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
    575 #if ANDROID_SMP != 0
    576   // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
    577   LIR* mem_barrier = last_lir_insn_;
    578 
    579   bool ret = false;
    580   /*
    581    * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
    582    * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
    583    * For those cases, all we need to ensure is that there is a scheduling barrier in place.
    584    */
    585   if (barrier_kind == kAnyAny) {
    586     // If no LIR exists already that can be used a barrier, then generate an mfence.
    587     if (mem_barrier == nullptr) {
    588       mem_barrier = NewLIR0(kX86Mfence);
    589       ret = true;
    590     }
    591 
    592     // If last instruction does not provide full barrier, then insert an mfence.
    593     if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
    594       mem_barrier = NewLIR0(kX86Mfence);
    595       ret = true;
    596     }
    597   }
    598 
    599   // Now ensure that a scheduling barrier is in place.
    600   if (mem_barrier == nullptr) {
    601     GenBarrier();
    602   } else {
    603     // Mark as a scheduling barrier.
    604     DCHECK(!mem_barrier->flags.use_def_invalid);
    605     mem_barrier->u.m.def_mask = &kEncodeAll;
    606   }
    607   return ret;
    608 #else
    609   return false;
    610 #endif
    611 }
    612 
    613 void X86Mir2Lir::CompilerInitializeRegAlloc() {
    614   if (cu_->target64) {
    615     reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
    616                                           dp_regs_64, reserved_regs_64, reserved_regs_64q,
    617                                           core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
    618   } else {
    619     reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
    620                                           dp_regs_32, reserved_regs_32, empty_pool,
    621                                           core_temps_32, empty_pool, sp_temps_32, dp_temps_32);
    622   }
    623 
    624   // Target-specific adjustments.
    625 
    626   // Add in XMM registers.
    627   const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32;
    628   for (RegStorage reg : *xp_regs) {
    629     RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
    630     reginfo_map_.Put(reg.GetReg(), info);
    631   }
    632   const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
    633   for (RegStorage reg : *xp_temps) {
    634     RegisterInfo* xp_reg_info = GetRegInfo(reg);
    635     xp_reg_info->SetIsTemp(true);
    636   }
    637 
    638   // Alias single precision xmm to double xmms.
    639   // TODO: as needed, add larger vector sizes - alias all to the largest.
    640   GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
    641   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
    642     int sp_reg_num = info->GetReg().GetRegNum();
    643     RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
    644     RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
    645     // 128-bit xmm vector register's master storage should refer to itself.
    646     DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
    647 
    648     // Redirect 32-bit vector's master storage to 128-bit vector.
    649     info->SetMaster(xp_reg_info);
    650 
    651     RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
    652     RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
    653     // Redirect 64-bit vector's master storage to 128-bit vector.
    654     dp_reg_info->SetMaster(xp_reg_info);
    655     // Singles should show a single 32-bit mask bit, at first referring to the low half.
    656     DCHECK_EQ(info->StorageMask(), 0x1U);
    657   }
    658 
    659   if (cu_->target64) {
    660     // Alias 32bit W registers to corresponding 64bit X registers.
    661     GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
    662     for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
    663       int x_reg_num = info->GetReg().GetRegNum();
    664       RegStorage x_reg = RegStorage::Solo64(x_reg_num);
    665       RegisterInfo* x_reg_info = GetRegInfo(x_reg);
    666       // 64bit X register's master storage should refer to itself.
    667       DCHECK_EQ(x_reg_info, x_reg_info->Master());
    668       // Redirect 32bit W master storage to 64bit X.
    669       info->SetMaster(x_reg_info);
    670       // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
    671       DCHECK_EQ(info->StorageMask(), 0x1U);
    672     }
    673   }
    674 
    675   // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
    676   // TODO: adjust for x86/hard float calling convention.
    677   reg_pool_->next_core_reg_ = 2;
    678   reg_pool_->next_sp_reg_ = 2;
    679   reg_pool_->next_dp_reg_ = 1;
    680 }
    681 
    682 int X86Mir2Lir::VectorRegisterSize() {
    683   return 128;
    684 }
    685 
    686 int X86Mir2Lir::NumReservableVectorRegisters(bool fp_used) {
    687   return fp_used ? 5 : 7;
    688 }
    689 
    690 void X86Mir2Lir::SpillCoreRegs() {
    691   if (num_core_spills_ == 0) {
    692     return;
    693   }
    694   // Spill mask not including fake return address register
    695   uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
    696   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
    697   OpSize size = cu_->target64 ? k64 : k32;
    698   for (int reg = 0; mask; mask >>= 1, reg++) {
    699     if (mask & 0x1) {
    700       StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) :  RegStorage::Solo32(reg),
    701                    size, kNotVolatile);
    702       offset += GetInstructionSetPointerSize(cu_->instruction_set);
    703     }
    704   }
    705 }
    706 
    707 void X86Mir2Lir::UnSpillCoreRegs() {
    708   if (num_core_spills_ == 0) {
    709     return;
    710   }
    711   // Spill mask not including fake return address register
    712   uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
    713   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
    714   OpSize size = cu_->target64 ? k64 : k32;
    715   for (int reg = 0; mask; mask >>= 1, reg++) {
    716     if (mask & 0x1) {
    717       LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) :  RegStorage::Solo32(reg),
    718                    size, kNotVolatile);
    719       offset += GetInstructionSetPointerSize(cu_->instruction_set);
    720     }
    721   }
    722 }
    723 
    724 void X86Mir2Lir::SpillFPRegs() {
    725   if (num_fp_spills_ == 0) {
    726     return;
    727   }
    728   uint32_t mask = fp_spill_mask_;
    729   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
    730   for (int reg = 0; mask; mask >>= 1, reg++) {
    731     if (mask & 0x1) {
    732       StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
    733                    k64, kNotVolatile);
    734       offset += sizeof(double);
    735     }
    736   }
    737 }
    738 void X86Mir2Lir::UnSpillFPRegs() {
    739   if (num_fp_spills_ == 0) {
    740     return;
    741   }
    742   uint32_t mask = fp_spill_mask_;
    743   int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
    744   for (int reg = 0; mask; mask >>= 1, reg++) {
    745     if (mask & 0x1) {
    746       LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
    747                    k64, kNotVolatile);
    748       offset += sizeof(double);
    749     }
    750   }
    751 }
    752 
    753 
    754 bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
    755   return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
    756 }
    757 
    758 RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
    759   // X86_64 can handle any size.
    760   if (cu_->target64) {
    761     if (size == kReference) {
    762       return kRefReg;
    763     }
    764     return kCoreReg;
    765   }
    766 
    767   if (UNLIKELY(is_volatile)) {
    768     // On x86, atomic 64-bit load/store requires an fp register.
    769     // Smaller aligned load/store is atomic for both core and fp registers.
    770     if (size == k64 || size == kDouble) {
    771       return kFPReg;
    772     }
    773   }
    774   return RegClassBySize(size);
    775 }
    776 
    777 X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
    778     : Mir2Lir(cu, mir_graph, arena),
    779       base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
    780       method_address_insns_(arena, 100, kGrowableArrayMisc),
    781       class_type_address_insns_(arena, 100, kGrowableArrayMisc),
    782       call_method_insns_(arena, 100, kGrowableArrayMisc),
    783       stack_decrement_(nullptr), stack_increment_(nullptr),
    784       const_vectors_(nullptr) {
    785   store_method_addr_used_ = false;
    786   if (kIsDebugBuild) {
    787     for (int i = 0; i < kX86Last; i++) {
    788       if (X86Mir2Lir::EncodingMap[i].opcode != i) {
    789         LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
    790                    << " is wrong: expecting " << i << ", seeing "
    791                    << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
    792       }
    793     }
    794   }
    795   if (cu_->target64) {
    796     rs_rX86_SP = rs_rX86_SP_64;
    797 
    798     rs_rX86_ARG0 = rs_rDI;
    799     rs_rX86_ARG1 = rs_rSI;
    800     rs_rX86_ARG2 = rs_rDX;
    801     rs_rX86_ARG3 = rs_rCX;
    802     rs_rX86_ARG4 = rs_r8;
    803     rs_rX86_ARG5 = rs_r9;
    804     rs_rX86_FARG0 = rs_fr0;
    805     rs_rX86_FARG1 = rs_fr1;
    806     rs_rX86_FARG2 = rs_fr2;
    807     rs_rX86_FARG3 = rs_fr3;
    808     rs_rX86_FARG4 = rs_fr4;
    809     rs_rX86_FARG5 = rs_fr5;
    810     rs_rX86_FARG6 = rs_fr6;
    811     rs_rX86_FARG7 = rs_fr7;
    812     rX86_ARG0 = rDI;
    813     rX86_ARG1 = rSI;
    814     rX86_ARG2 = rDX;
    815     rX86_ARG3 = rCX;
    816     rX86_ARG4 = r8;
    817     rX86_ARG5 = r9;
    818     rX86_FARG0 = fr0;
    819     rX86_FARG1 = fr1;
    820     rX86_FARG2 = fr2;
    821     rX86_FARG3 = fr3;
    822     rX86_FARG4 = fr4;
    823     rX86_FARG5 = fr5;
    824     rX86_FARG6 = fr6;
    825     rX86_FARG7 = fr7;
    826     rs_rX86_INVOKE_TGT = rs_rDI;
    827   } else {
    828     rs_rX86_SP = rs_rX86_SP_32;
    829 
    830     rs_rX86_ARG0 = rs_rAX;
    831     rs_rX86_ARG1 = rs_rCX;
    832     rs_rX86_ARG2 = rs_rDX;
    833     rs_rX86_ARG3 = rs_rBX;
    834     rs_rX86_ARG4 = RegStorage::InvalidReg();
    835     rs_rX86_ARG5 = RegStorage::InvalidReg();
    836     rs_rX86_FARG0 = rs_rAX;
    837     rs_rX86_FARG1 = rs_rCX;
    838     rs_rX86_FARG2 = rs_rDX;
    839     rs_rX86_FARG3 = rs_rBX;
    840     rs_rX86_FARG4 = RegStorage::InvalidReg();
    841     rs_rX86_FARG5 = RegStorage::InvalidReg();
    842     rs_rX86_FARG6 = RegStorage::InvalidReg();
    843     rs_rX86_FARG7 = RegStorage::InvalidReg();
    844     rX86_ARG0 = rAX;
    845     rX86_ARG1 = rCX;
    846     rX86_ARG2 = rDX;
    847     rX86_ARG3 = rBX;
    848     rX86_FARG0 = rAX;
    849     rX86_FARG1 = rCX;
    850     rX86_FARG2 = rDX;
    851     rX86_FARG3 = rBX;
    852     rs_rX86_INVOKE_TGT = rs_rAX;
    853     // TODO(64): Initialize with invalid reg
    854 //    rX86_ARG4 = RegStorage::InvalidReg();
    855 //    rX86_ARG5 = RegStorage::InvalidReg();
    856   }
    857   rs_rX86_RET0 = rs_rAX;
    858   rs_rX86_RET1 = rs_rDX;
    859   rs_rX86_COUNT = rs_rCX;
    860   rX86_RET0 = rAX;
    861   rX86_RET1 = rDX;
    862   rX86_INVOKE_TGT = rAX;
    863   rX86_COUNT = rCX;
    864 
    865   // Initialize the number of reserved vector registers
    866   num_reserved_vector_regs_ = -1;
    867 }
    868 
    869 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
    870                           ArenaAllocator* const arena) {
    871   return new X86Mir2Lir(cu, mir_graph, arena);
    872 }
    873 
    874 // Not used in x86(-64)
    875 RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
    876   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
    877   return RegStorage::InvalidReg();
    878 }
    879 
    880 LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
    881   // First load the pointer in fs:[suspend-trigger] into eax
    882   // Then use a test instruction to indirect via that address.
    883   if (cu_->target64) {
    884     NewLIR2(kX86Mov64RT, rs_rAX.GetReg(),
    885         Thread::ThreadSuspendTriggerOffset<8>().Int32Value());
    886   } else {
    887     NewLIR2(kX86Mov32RT, rs_rAX.GetReg(),
    888         Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
    889   }
    890   return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
    891 }
    892 
    893 uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
    894   DCHECK(!IsPseudoLirOp(opcode));
    895   return X86Mir2Lir::EncodingMap[opcode].flags;
    896 }
    897 
    898 const char* X86Mir2Lir::GetTargetInstName(int opcode) {
    899   DCHECK(!IsPseudoLirOp(opcode));
    900   return X86Mir2Lir::EncodingMap[opcode].name;
    901 }
    902 
    903 const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
    904   DCHECK(!IsPseudoLirOp(opcode));
    905   return X86Mir2Lir::EncodingMap[opcode].fmt;
    906 }
    907 
    908 void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
    909   // Can we do this directly to memory?
    910   rl_dest = UpdateLocWide(rl_dest);
    911   if ((rl_dest.location == kLocDalvikFrame) ||
    912       (rl_dest.location == kLocCompilerTemp)) {
    913     int32_t val_lo = Low32Bits(value);
    914     int32_t val_hi = High32Bits(value);
    915     int r_base = rs_rX86_SP.GetReg();
    916     int displacement = SRegOffset(rl_dest.s_reg_low);
    917 
    918     ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
    919     LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
    920     AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
    921                               false /* is_load */, true /* is64bit */);
    922     store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
    923     AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
    924                               false /* is_load */, true /* is64bit */);
    925     return;
    926   }
    927 
    928   // Just use the standard code to do the generation.
    929   Mir2Lir::GenConstWide(rl_dest, value);
    930 }
    931 
    932 // TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
    933 void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
    934   LOG(INFO)  << "location: " << loc.location << ','
    935              << (loc.wide ? " w" : "  ")
    936              << (loc.defined ? " D" : "  ")
    937              << (loc.is_const ? " c" : "  ")
    938              << (loc.fp ? " F" : "  ")
    939              << (loc.core ? " C" : "  ")
    940              << (loc.ref ? " r" : "  ")
    941              << (loc.high_word ? " h" : "  ")
    942              << (loc.home ? " H" : "  ")
    943              << ", low: " << static_cast<int>(loc.reg.GetLowReg())
    944              << ", high: " << static_cast<int>(loc.reg.GetHighReg())
    945              << ", s_reg: " << loc.s_reg_low
    946              << ", orig: " << loc.orig_sreg;
    947 }
    948 
    949 void X86Mir2Lir::Materialize() {
    950   // A good place to put the analysis before starting.
    951   AnalyzeMIR();
    952 
    953   // Now continue with regular code generation.
    954   Mir2Lir::Materialize();
    955 }
    956 
    957 void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
    958                                    SpecialTargetRegister symbolic_reg) {
    959   /*
    960    * For x86, just generate a 32 bit move immediate instruction, that will be filled
    961    * in at 'link time'.  For now, put a unique value based on target to ensure that
    962    * code deduplication works.
    963    */
    964   int target_method_idx = target_method.dex_method_index;
    965   const DexFile* target_dex_file = target_method.dex_file;
    966   const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
    967   uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
    968 
    969   // Generate the move instruction with the unique pointer and save index, dex_file, and type.
    970   LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
    971                      TargetReg(symbolic_reg, kNotWide).GetReg(),
    972                      static_cast<int>(target_method_id_ptr), target_method_idx,
    973                      WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
    974   AppendLIR(move);
    975   method_address_insns_.Insert(move);
    976 }
    977 
    978 void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
    979   /*
    980    * For x86, just generate a 32 bit move immediate instruction, that will be filled
    981    * in at 'link time'.  For now, put a unique value based on target to ensure that
    982    * code deduplication works.
    983    */
    984   const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
    985   uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
    986 
    987   // Generate the move instruction with the unique pointer and save index and type.
    988   LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
    989                      TargetReg(symbolic_reg, kNotWide).GetReg(),
    990                      static_cast<int>(ptr), type_idx);
    991   AppendLIR(move);
    992   class_type_address_insns_.Insert(move);
    993 }
    994 
    995 LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
    996   /*
    997    * For x86, just generate a 32 bit call relative instruction, that will be filled
    998    * in at 'link time'.  For now, put a unique value based on target to ensure that
    999    * code deduplication works.
   1000    */
   1001   int target_method_idx = target_method.dex_method_index;
   1002   const DexFile* target_dex_file = target_method.dex_file;
   1003   const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
   1004   uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
   1005 
   1006   // Generate the call instruction with the unique pointer and save index, dex_file, and type.
   1007   LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
   1008                      target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
   1009   AppendLIR(call);
   1010   call_method_insns_.Insert(call);
   1011   return call;
   1012 }
   1013 
   1014 /*
   1015  * @brief Enter a 32 bit quantity into a buffer
   1016  * @param buf buffer.
   1017  * @param data Data value.
   1018  */
   1019 
   1020 static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
   1021   buf.push_back(data & 0xff);
   1022   buf.push_back((data >> 8) & 0xff);
   1023   buf.push_back((data >> 16) & 0xff);
   1024   buf.push_back((data >> 24) & 0xff);
   1025 }
   1026 
   1027 void X86Mir2Lir::InstallLiteralPools() {
   1028   // These are handled differently for x86.
   1029   DCHECK(code_literal_list_ == nullptr);
   1030   DCHECK(method_literal_list_ == nullptr);
   1031   DCHECK(class_literal_list_ == nullptr);
   1032 
   1033   // Align to 16 byte boundary.  We have implicit knowledge that the start of the method is
   1034   // on a 4 byte boundary.   How can I check this if it changes (other than aligned loads
   1035   // will fail at runtime)?
   1036   if (const_vectors_ != nullptr) {
   1037     int align_size = (16-4) - (code_buffer_.size() & 0xF);
   1038     if (align_size < 0) {
   1039       align_size += 16;
   1040     }
   1041 
   1042     while (align_size > 0) {
   1043       code_buffer_.push_back(0);
   1044       align_size--;
   1045     }
   1046     for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
   1047       PushWord(code_buffer_, p->operands[0]);
   1048       PushWord(code_buffer_, p->operands[1]);
   1049       PushWord(code_buffer_, p->operands[2]);
   1050       PushWord(code_buffer_, p->operands[3]);
   1051     }
   1052   }
   1053 
   1054   // Handle the fixups for methods.
   1055   for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
   1056       LIR* p = method_address_insns_.Get(i);
   1057       DCHECK_EQ(p->opcode, kX86Mov32RI);
   1058       uint32_t target_method_idx = p->operands[2];
   1059       const DexFile* target_dex_file =
   1060           reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
   1061 
   1062       // The offset to patch is the last 4 bytes of the instruction.
   1063       int patch_offset = p->offset + p->flags.size - 4;
   1064       cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
   1065                                            cu_->method_idx, cu_->invoke_type,
   1066                                            target_method_idx, target_dex_file,
   1067                                            static_cast<InvokeType>(p->operands[4]),
   1068                                            patch_offset);
   1069   }
   1070 
   1071   // Handle the fixups for class types.
   1072   for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
   1073       LIR* p = class_type_address_insns_.Get(i);
   1074       DCHECK_EQ(p->opcode, kX86Mov32RI);
   1075       uint32_t target_method_idx = p->operands[2];
   1076 
   1077       // The offset to patch is the last 4 bytes of the instruction.
   1078       int patch_offset = p->offset + p->flags.size - 4;
   1079       cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
   1080                                           cu_->method_idx, target_method_idx, patch_offset);
   1081   }
   1082 
   1083   // And now the PC-relative calls to methods.
   1084   for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
   1085       LIR* p = call_method_insns_.Get(i);
   1086       DCHECK_EQ(p->opcode, kX86CallI);
   1087       uint32_t target_method_idx = p->operands[1];
   1088       const DexFile* target_dex_file =
   1089           reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
   1090 
   1091       // The offset to patch is the last 4 bytes of the instruction.
   1092       int patch_offset = p->offset + p->flags.size - 4;
   1093       cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
   1094                                                  cu_->method_idx, cu_->invoke_type,
   1095                                                  target_method_idx, target_dex_file,
   1096                                                  static_cast<InvokeType>(p->operands[3]),
   1097                                                  patch_offset, -4 /* offset */);
   1098   }
   1099 
   1100   // And do the normal processing.
   1101   Mir2Lir::InstallLiteralPools();
   1102 }
   1103 
   1104 bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
   1105   RegLocation rl_src = info->args[0];
   1106   RegLocation rl_srcPos = info->args[1];
   1107   RegLocation rl_dst = info->args[2];
   1108   RegLocation rl_dstPos = info->args[3];
   1109   RegLocation rl_length = info->args[4];
   1110   if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
   1111     return false;
   1112   }
   1113   if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
   1114     return false;
   1115   }
   1116   ClobberCallerSave();
   1117   LockCallTemps();  // Using fixed registers.
   1118   RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX;
   1119   LoadValueDirectFixed(rl_src, rs_rAX);
   1120   LoadValueDirectFixed(rl_dst, rs_rCX);
   1121   LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr);
   1122   LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr);
   1123   LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
   1124   LoadValueDirectFixed(rl_length, rs_rDX);
   1125   // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path.
   1126   LIR* len_too_big  = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr);
   1127   LoadValueDirectFixed(rl_src, rs_rAX);
   1128   LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
   1129   LIR* src_bad_len  = nullptr;
   1130   LIR* src_bad_off = nullptr;
   1131   LIR* srcPos_negative  = nullptr;
   1132   if (!rl_srcPos.is_const) {
   1133     LoadValueDirectFixed(rl_srcPos, tmp_reg);
   1134     srcPos_negative  = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
   1135     // src_pos < src_len
   1136     src_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
   1137     // src_len - src_pos < copy_len
   1138     OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
   1139     src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1140   } else {
   1141     int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
   1142     if (pos_val == 0) {
   1143       src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
   1144     } else {
   1145       // src_pos < src_len
   1146       src_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
   1147       // src_len - src_pos < copy_len
   1148       OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
   1149       src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1150     }
   1151   }
   1152   LIR* dstPos_negative = nullptr;
   1153   LIR* dst_bad_len = nullptr;
   1154   LIR* dst_bad_off = nullptr;
   1155   LoadValueDirectFixed(rl_dst, rs_rAX);
   1156   LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
   1157   if (!rl_dstPos.is_const) {
   1158     LoadValueDirectFixed(rl_dstPos, tmp_reg);
   1159     dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
   1160     // dst_pos < dst_len
   1161     dst_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
   1162     // dst_len - dst_pos < copy_len
   1163     OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
   1164     dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1165   } else {
   1166     int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
   1167     if (pos_val == 0) {
   1168       dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
   1169     } else {
   1170       // dst_pos < dst_len
   1171       dst_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
   1172       // dst_len - dst_pos < copy_len
   1173       OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
   1174       dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
   1175     }
   1176   }
   1177   // Everything is checked now.
   1178   LoadValueDirectFixed(rl_src, rs_rAX);
   1179   LoadValueDirectFixed(rl_dst, tmp_reg);
   1180   LoadValueDirectFixed(rl_srcPos, rs_rCX);
   1181   NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
   1182        rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value());
   1183   // RAX now holds the address of the first src element to be copied.
   1184 
   1185   LoadValueDirectFixed(rl_dstPos, rs_rCX);
   1186   NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(),
   1187        rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() );
   1188   // RBX now holds the address of the first dst element to be copied.
   1189 
   1190   // Check if the number of elements to be copied is odd or even. If odd
   1191   // then copy the first element (so that the remaining number of elements
   1192   // is even).
   1193   LoadValueDirectFixed(rl_length, rs_rCX);
   1194   OpRegImm(kOpAnd, rs_rCX, 1);
   1195   LIR* jmp_to_begin_loop  = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
   1196   OpRegImm(kOpSub, rs_rDX, 1);
   1197   LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
   1198   StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
   1199 
   1200   // Since the remaining number of elements is even, we will copy by
   1201   // two elements at a time.
   1202   LIR* beginLoop = NewLIR0(kPseudoTargetLabel);
   1203   LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr);
   1204   OpRegImm(kOpSub, rs_rDX, 2);
   1205   LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
   1206   StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle);
   1207   OpUnconditionalBranch(beginLoop);
   1208   LIR *check_failed = NewLIR0(kPseudoTargetLabel);
   1209   LIR* launchpad_branch  = OpUnconditionalBranch(nullptr);
   1210   LIR *return_point = NewLIR0(kPseudoTargetLabel);
   1211   jmp_to_ret->target = return_point;
   1212   jmp_to_begin_loop->target = beginLoop;
   1213   src_dst_same->target = check_failed;
   1214   len_too_big->target = check_failed;
   1215   src_null_branch->target = check_failed;
   1216   if (srcPos_negative != nullptr)
   1217     srcPos_negative ->target = check_failed;
   1218   if (src_bad_off != nullptr)
   1219     src_bad_off->target = check_failed;
   1220   if (src_bad_len != nullptr)
   1221     src_bad_len->target = check_failed;
   1222   dst_null_branch->target = check_failed;
   1223   if (dstPos_negative != nullptr)
   1224     dstPos_negative->target = check_failed;
   1225   if (dst_bad_off != nullptr)
   1226     dst_bad_off->target = check_failed;
   1227   if (dst_bad_len != nullptr)
   1228     dst_bad_len->target = check_failed;
   1229   AddIntrinsicSlowPath(info, launchpad_branch, return_point);
   1230   ClobberCallerSave();  // We must clobber everything because slow path will return here
   1231   return true;
   1232 }
   1233 
   1234 
   1235 /*
   1236  * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
   1237  * otherwise bails to standard library code.
   1238  */
   1239 bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   1240   RegLocation rl_obj = info->args[0];
   1241   RegLocation rl_char = info->args[1];
   1242   RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
   1243   // RBX is callee-save register in 64-bit mode.
   1244   RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
   1245   int start_value = -1;
   1246 
   1247   uint32_t char_value =
   1248     rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
   1249 
   1250   if (char_value > 0xFFFF) {
   1251     // We have to punt to the real String.indexOf.
   1252     return false;
   1253   }
   1254 
   1255   // Okay, we are commited to inlining this.
   1256   // EAX: 16 bit character being searched.
   1257   // ECX: count: number of words to be searched.
   1258   // EDI: String being searched.
   1259   // EDX: temporary during execution.
   1260   // EBX or R11: temporary during execution (depending on mode).
   1261   // REP SCASW: search instruction.
   1262 
   1263   FlushReg(rs_rAX);
   1264   Clobber(rs_rAX);
   1265   LockTemp(rs_rAX);
   1266   FlushReg(rs_rCX);
   1267   Clobber(rs_rCX);
   1268   LockTemp(rs_rCX);
   1269   FlushReg(rs_rDX);
   1270   Clobber(rs_rDX);
   1271   LockTemp(rs_rDX);
   1272   FlushReg(rs_tmp);
   1273   Clobber(rs_tmp);
   1274   LockTemp(rs_tmp);
   1275   if (cu_->target64) {
   1276     FlushReg(rs_rDI);
   1277     Clobber(rs_rDI);
   1278     LockTemp(rs_rDI);
   1279   }
   1280 
   1281   RegLocation rl_return = GetReturn(kCoreReg);
   1282   RegLocation rl_dest = InlineTarget(info);
   1283 
   1284   // Is the string non-NULL?
   1285   LoadValueDirectFixed(rl_obj, rs_rDX);
   1286   GenNullCheck(rs_rDX, info->opt_flags);
   1287   info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
   1288 
   1289   LIR *slowpath_branch = nullptr, *length_compare = nullptr;
   1290 
   1291   // We need the value in EAX.
   1292   if (rl_char.is_const) {
   1293     LoadConstantNoClobber(rs_rAX, char_value);
   1294   } else {
   1295     // Does the character fit in 16 bits? Compare it at runtime.
   1296     LoadValueDirectFixed(rl_char, rs_rAX);
   1297     slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
   1298   }
   1299 
   1300   // From here down, we know that we are looking for a char that fits in 16 bits.
   1301   // Location of reference to data array within the String object.
   1302   int value_offset = mirror::String::ValueOffset().Int32Value();
   1303   // Location of count within the String object.
   1304   int count_offset = mirror::String::CountOffset().Int32Value();
   1305   // Starting offset within data array.
   1306   int offset_offset = mirror::String::OffsetOffset().Int32Value();
   1307   // Start of char data with array_.
   1308   int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
   1309 
   1310   // Compute the number of words to search in to rCX.
   1311   Load32Disp(rs_rDX, count_offset, rs_rCX);
   1312 
   1313   // Possible signal here due to null pointer dereference.
   1314   // Note that the signal handler will expect the top word of
   1315   // the stack to be the ArtMethod*.  If the PUSH edi instruction
   1316   // below is ahead of the load above then this will not be true
   1317   // and the signal handler will not work.
   1318   MarkPossibleNullPointerException(0);
   1319 
   1320   if (!cu_->target64) {
   1321     // EDI is callee-save register in 32-bit mode.
   1322     NewLIR1(kX86Push32R, rs_rDI.GetReg());
   1323   }
   1324 
   1325   if (zero_based) {
   1326     // Start index is not present.
   1327     // We have to handle an empty string.  Use special instruction JECXZ.
   1328     length_compare = NewLIR0(kX86Jecxz8);
   1329 
   1330     // Copy the number of words to search in a temporary register.
   1331     // We will use the register at the end to calculate result.
   1332     OpRegReg(kOpMov, rs_tmp, rs_rCX);
   1333   } else {
   1334     // Start index is present.
   1335     rl_start = info->args[2];
   1336 
   1337     // We have to offset by the start index.
   1338     if (rl_start.is_const) {
   1339       start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
   1340       start_value = std::max(start_value, 0);
   1341 
   1342       // Is the start > count?
   1343       length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
   1344       OpRegImm(kOpMov, rs_rDI, start_value);
   1345 
   1346       // Copy the number of words to search in a temporary register.
   1347       // We will use the register at the end to calculate result.
   1348       OpRegReg(kOpMov, rs_tmp, rs_rCX);
   1349 
   1350       if (start_value != 0) {
   1351         // Decrease the number of words to search by the start index.
   1352         OpRegImm(kOpSub, rs_rCX, start_value);
   1353       }
   1354     } else {
   1355       // Handle "start index < 0" case.
   1356       if (!cu_->target64 && rl_start.location != kLocPhysReg) {
   1357         // Load the start index from stack, remembering that we pushed EDI.
   1358         int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
   1359         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   1360         Load32Disp(rs_rX86_SP, displacement, rs_rDI);
   1361         // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
   1362         DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
   1363         int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
   1364         AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
   1365       } else {
   1366         LoadValueDirectFixed(rl_start, rs_rDI);
   1367       }
   1368       OpRegReg(kOpXor, rs_tmp, rs_tmp);
   1369       OpRegReg(kOpCmp, rs_rDI, rs_tmp);
   1370       OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp);
   1371 
   1372       // The length of the string should be greater than the start index.
   1373       length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr);
   1374 
   1375       // Copy the number of words to search in a temporary register.
   1376       // We will use the register at the end to calculate result.
   1377       OpRegReg(kOpMov, rs_tmp, rs_rCX);
   1378 
   1379       // Decrease the number of words to search by the start index.
   1380       OpRegReg(kOpSub, rs_rCX, rs_rDI);
   1381     }
   1382   }
   1383 
   1384   // Load the address of the string into EDI.
   1385   // In case of start index we have to add the address to existing value in EDI.
   1386   // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
   1387   if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
   1388     Load32Disp(rs_rDX, offset_offset, rs_rDI);
   1389   } else {
   1390     OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset);
   1391   }
   1392   OpRegImm(kOpLsl, rs_rDI, 1);
   1393   OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset);
   1394   OpRegImm(kOpAdd, rs_rDI, data_offset);
   1395 
   1396   // EDI now contains the start of the string to be searched.
   1397   // We are all prepared to do the search for the character.
   1398   NewLIR0(kX86RepneScasw);
   1399 
   1400   // Did we find a match?
   1401   LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
   1402 
   1403   // yes, we matched.  Compute the index of the result.
   1404   OpRegReg(kOpSub, rs_tmp, rs_rCX);
   1405   NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1);
   1406 
   1407   LIR *all_done = NewLIR1(kX86Jmp8, 0);
   1408 
   1409   // Failed to match; return -1.
   1410   LIR *not_found = NewLIR0(kPseudoTargetLabel);
   1411   length_compare->target = not_found;
   1412   failed_branch->target = not_found;
   1413   LoadConstantNoClobber(rl_return.reg, -1);
   1414 
   1415   // And join up at the end.
   1416   all_done->target = NewLIR0(kPseudoTargetLabel);
   1417 
   1418   if (!cu_->target64)
   1419     NewLIR1(kX86Pop32R, rs_rDI.GetReg());
   1420 
   1421   // Out of line code returns here.
   1422   if (slowpath_branch != nullptr) {
   1423     LIR *return_point = NewLIR0(kPseudoTargetLabel);
   1424     AddIntrinsicSlowPath(info, slowpath_branch, return_point);
   1425     ClobberCallerSave();  // We must clobber everything because slow path will return here
   1426   }
   1427 
   1428   StoreValue(rl_dest, rl_return);
   1429 
   1430   FreeTemp(rs_rAX);
   1431   FreeTemp(rs_rCX);
   1432   FreeTemp(rs_rDX);
   1433   FreeTemp(rs_tmp);
   1434   if (cu_->target64) {
   1435     FreeTemp(rs_rDI);
   1436   }
   1437 
   1438   return true;
   1439 }
   1440 
   1441 /*
   1442  * @brief Enter an 'advance LOC' into the FDE buffer
   1443  * @param buf FDE buffer.
   1444  * @param increment Amount by which to increase the current location.
   1445  */
   1446 static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
   1447   if (increment < 64) {
   1448     // Encoding in opcode.
   1449     buf.push_back(0x1 << 6 | increment);
   1450   } else if (increment < 256) {
   1451     // Single byte delta.
   1452     buf.push_back(0x02);
   1453     buf.push_back(increment);
   1454   } else if (increment < 256 * 256) {
   1455     // Two byte delta.
   1456     buf.push_back(0x03);
   1457     buf.push_back(increment & 0xff);
   1458     buf.push_back((increment >> 8) & 0xff);
   1459   } else {
   1460     // Four byte delta.
   1461     buf.push_back(0x04);
   1462     PushWord(buf, increment);
   1463   }
   1464 }
   1465 
   1466 
   1467 std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64) {
   1468   return X86Mir2Lir::ReturnCommonCallFrameInformation(is_x86_64);
   1469 }
   1470 
   1471 static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
   1472   uint8_t buffer[12];
   1473   uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
   1474   for (uint8_t *p = buffer; p < ptr; p++) {
   1475     buf.push_back(*p);
   1476   }
   1477 }
   1478 
   1479 static void EncodeSignedLeb128(std::vector<uint8_t>& buf, int32_t value) {
   1480   uint8_t buffer[12];
   1481   uint8_t *ptr = EncodeSignedLeb128(buffer, value);
   1482   for (uint8_t *p = buffer; p < ptr; p++) {
   1483     buf.push_back(*p);
   1484   }
   1485 }
   1486 
   1487 std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation(bool is_x86_64) {
   1488   std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
   1489 
   1490   // Length (will be filled in later in this routine).
   1491   PushWord(*cfi_info, 0);
   1492 
   1493   // CIE id: always 0.
   1494   PushWord(*cfi_info, 0);
   1495 
   1496   // Version: always 1.
   1497   cfi_info->push_back(0x01);
   1498 
   1499   // Augmentation: 'zR\0'
   1500   cfi_info->push_back(0x7a);
   1501   cfi_info->push_back(0x52);
   1502   cfi_info->push_back(0x0);
   1503 
   1504   // Code alignment: 1.
   1505   EncodeUnsignedLeb128(*cfi_info, 1);
   1506 
   1507   // Data alignment.
   1508   if (is_x86_64) {
   1509     EncodeSignedLeb128(*cfi_info, -8);
   1510   } else {
   1511     EncodeSignedLeb128(*cfi_info, -4);
   1512   }
   1513 
   1514   // Return address register.
   1515   if (is_x86_64) {
   1516     // R16(RIP)
   1517     cfi_info->push_back(0x10);
   1518   } else {
   1519     // R8(EIP)
   1520     cfi_info->push_back(0x08);
   1521   }
   1522 
   1523   // Augmentation length: 1.
   1524   cfi_info->push_back(1);
   1525 
   1526   // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
   1527   cfi_info->push_back(0x03);
   1528 
   1529   // Initial instructions.
   1530   if (is_x86_64) {
   1531     // DW_CFA_def_cfa R7(RSP) 8.
   1532     cfi_info->push_back(0x0c);
   1533     cfi_info->push_back(0x07);
   1534     cfi_info->push_back(0x08);
   1535 
   1536     // DW_CFA_offset R16(RIP) 1 (* -8).
   1537     cfi_info->push_back(0x90);
   1538     cfi_info->push_back(0x01);
   1539   } else {
   1540     // DW_CFA_def_cfa R4(ESP) 4.
   1541     cfi_info->push_back(0x0c);
   1542     cfi_info->push_back(0x04);
   1543     cfi_info->push_back(0x04);
   1544 
   1545     // DW_CFA_offset R8(EIP) 1 (* -4).
   1546     cfi_info->push_back(0x88);
   1547     cfi_info->push_back(0x01);
   1548   }
   1549 
   1550   // Padding to a multiple of 4
   1551   while ((cfi_info->size() & 3) != 0) {
   1552     // DW_CFA_nop is encoded as 0.
   1553     cfi_info->push_back(0);
   1554   }
   1555 
   1556   // Set the length of the CIE inside the generated bytes.
   1557   uint32_t length = cfi_info->size() - 4;
   1558   (*cfi_info)[0] = length;
   1559   (*cfi_info)[1] = length >> 8;
   1560   (*cfi_info)[2] = length >> 16;
   1561   (*cfi_info)[3] = length >> 24;
   1562   return cfi_info;
   1563 }
   1564 
   1565 static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) {
   1566   if (is_x86_64) {
   1567     switch (art_reg_id) {
   1568     case 3 : *dwarf_reg_id =  3; return true;  // %rbx
   1569     // This is the only discrepancy between ART & DWARF register numbering.
   1570     case 5 : *dwarf_reg_id =  6; return true;  // %rbp
   1571     case 12: *dwarf_reg_id = 12; return true;  // %r12
   1572     case 13: *dwarf_reg_id = 13; return true;  // %r13
   1573     case 14: *dwarf_reg_id = 14; return true;  // %r14
   1574     case 15: *dwarf_reg_id = 15; return true;  // %r15
   1575     default: return false;  // Should not get here
   1576     }
   1577   } else {
   1578     switch (art_reg_id) {
   1579     case 5: *dwarf_reg_id = 5; return true;  // %ebp
   1580     case 6: *dwarf_reg_id = 6; return true;  // %esi
   1581     case 7: *dwarf_reg_id = 7; return true;  // %edi
   1582     default: return false;  // Should not get here
   1583     }
   1584   }
   1585 }
   1586 
   1587 std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
   1588   std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
   1589 
   1590   // Generate the FDE for the method.
   1591   DCHECK_NE(data_offset_, 0U);
   1592 
   1593   // Length (will be filled in later in this routine).
   1594   PushWord(*cfi_info, 0);
   1595 
   1596   // 'CIE_pointer' (filled in by linker).
   1597   PushWord(*cfi_info, 0);
   1598 
   1599   // 'initial_location' (filled in by linker).
   1600   PushWord(*cfi_info, 0);
   1601 
   1602   // 'address_range' (number of bytes in the method).
   1603   PushWord(*cfi_info, data_offset_);
   1604 
   1605   // Augmentation length: 0
   1606   cfi_info->push_back(0);
   1607 
   1608   // The instructions in the FDE.
   1609   if (stack_decrement_ != nullptr) {
   1610     // Advance LOC to just past the stack decrement.
   1611     uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
   1612     AdvanceLoc(*cfi_info, pc);
   1613 
   1614     // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
   1615     cfi_info->push_back(0x0e);
   1616     EncodeUnsignedLeb128(*cfi_info, frame_size_);
   1617 
   1618     // Handle register spills
   1619     const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4;
   1620     const int kDataAlignmentFactor = (cu_->target64) ? -8 : -4;
   1621     uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
   1622     int offset = -(GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
   1623     for (int reg = 0; mask; mask >>= 1, reg++) {
   1624       if (mask & 0x1) {
   1625         pc += kSpillInstLen;
   1626 
   1627         // Advance LOC to pass this instruction
   1628         AdvanceLoc(*cfi_info, kSpillInstLen);
   1629 
   1630         int dwarf_reg_id;
   1631         if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) {
   1632           // DW_CFA_offset_extended_sf reg_no offset
   1633           cfi_info->push_back(0x11);
   1634           EncodeUnsignedLeb128(*cfi_info, dwarf_reg_id);
   1635           EncodeSignedLeb128(*cfi_info, offset / kDataAlignmentFactor);
   1636         }
   1637 
   1638         offset += GetInstructionSetPointerSize(cu_->instruction_set);
   1639       }
   1640     }
   1641 
   1642     // We continue with that stack until the epilogue.
   1643     if (stack_increment_ != nullptr) {
   1644       uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
   1645       AdvanceLoc(*cfi_info, new_pc - pc);
   1646 
   1647       // We probably have code snippets after the epilogue, so save the
   1648       // current state: DW_CFA_remember_state.
   1649       cfi_info->push_back(0x0a);
   1650 
   1651       // We have now popped the stack: DW_CFA_def_cfa_offset 4/8.
   1652       // There is only the return PC on the stack now.
   1653       cfi_info->push_back(0x0e);
   1654       EncodeUnsignedLeb128(*cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
   1655 
   1656       // Everything after that is the same as before the epilogue.
   1657       // Stack bump was followed by RET instruction.
   1658       LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
   1659       if (post_ret_insn != nullptr) {
   1660         pc = new_pc;
   1661         new_pc = post_ret_insn->offset;
   1662         AdvanceLoc(*cfi_info, new_pc - pc);
   1663         // Restore the state: DW_CFA_restore_state.
   1664         cfi_info->push_back(0x0b);
   1665       }
   1666     }
   1667   }
   1668 
   1669   // Padding to a multiple of 4
   1670   while ((cfi_info->size() & 3) != 0) {
   1671     // DW_CFA_nop is encoded as 0.
   1672     cfi_info->push_back(0);
   1673   }
   1674 
   1675   // Set the length of the FDE inside the generated bytes.
   1676   uint32_t length = cfi_info->size() - 4;
   1677   (*cfi_info)[0] = length;
   1678   (*cfi_info)[1] = length >> 8;
   1679   (*cfi_info)[2] = length >> 16;
   1680   (*cfi_info)[3] = length >> 24;
   1681   return cfi_info;
   1682 }
   1683 
   1684 void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
   1685   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
   1686     case kMirOpReserveVectorRegisters:
   1687       ReserveVectorRegisters(mir);
   1688       break;
   1689     case kMirOpReturnVectorRegisters:
   1690       ReturnVectorRegisters();
   1691       break;
   1692     case kMirOpConstVector:
   1693       GenConst128(bb, mir);
   1694       break;
   1695     case kMirOpMoveVector:
   1696       GenMoveVector(bb, mir);
   1697       break;
   1698     case kMirOpPackedMultiply:
   1699       GenMultiplyVector(bb, mir);
   1700       break;
   1701     case kMirOpPackedAddition:
   1702       GenAddVector(bb, mir);
   1703       break;
   1704     case kMirOpPackedSubtract:
   1705       GenSubtractVector(bb, mir);
   1706       break;
   1707     case kMirOpPackedShiftLeft:
   1708       GenShiftLeftVector(bb, mir);
   1709       break;
   1710     case kMirOpPackedSignedShiftRight:
   1711       GenSignedShiftRightVector(bb, mir);
   1712       break;
   1713     case kMirOpPackedUnsignedShiftRight:
   1714       GenUnsignedShiftRightVector(bb, mir);
   1715       break;
   1716     case kMirOpPackedAnd:
   1717       GenAndVector(bb, mir);
   1718       break;
   1719     case kMirOpPackedOr:
   1720       GenOrVector(bb, mir);
   1721       break;
   1722     case kMirOpPackedXor:
   1723       GenXorVector(bb, mir);
   1724       break;
   1725     case kMirOpPackedAddReduce:
   1726       GenAddReduceVector(bb, mir);
   1727       break;
   1728     case kMirOpPackedReduce:
   1729       GenReduceVector(bb, mir);
   1730       break;
   1731     case kMirOpPackedSet:
   1732       GenSetVector(bb, mir);
   1733       break;
   1734     default:
   1735       break;
   1736   }
   1737 }
   1738 
   1739 void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
   1740   // We should not try to reserve twice without returning the registers
   1741   DCHECK_NE(num_reserved_vector_regs_, -1);
   1742 
   1743   int num_vector_reg = mir->dalvikInsn.vA;
   1744   for (int i = 0; i < num_vector_reg; i++) {
   1745     RegStorage xp_reg = RegStorage::Solo128(i);
   1746     RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
   1747     Clobber(xp_reg);
   1748 
   1749     for (RegisterInfo *info = xp_reg_info->GetAliasChain();
   1750                        info != nullptr;
   1751                        info = info->GetAliasChain()) {
   1752       if (info->GetReg().IsSingle()) {
   1753         reg_pool_->sp_regs_.Delete(info);
   1754       } else {
   1755         reg_pool_->dp_regs_.Delete(info);
   1756       }
   1757     }
   1758   }
   1759 
   1760   num_reserved_vector_regs_ = num_vector_reg;
   1761 }
   1762 
   1763 void X86Mir2Lir::ReturnVectorRegisters() {
   1764   // Return all the reserved registers
   1765   for (int i = 0; i < num_reserved_vector_regs_; i++) {
   1766     RegStorage xp_reg = RegStorage::Solo128(i);
   1767     RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
   1768 
   1769     for (RegisterInfo *info = xp_reg_info->GetAliasChain();
   1770                        info != nullptr;
   1771                        info = info->GetAliasChain()) {
   1772       if (info->GetReg().IsSingle()) {
   1773         reg_pool_->sp_regs_.Insert(info);
   1774       } else {
   1775         reg_pool_->dp_regs_.Insert(info);
   1776       }
   1777     }
   1778   }
   1779 
   1780   // We don't have anymore reserved vector registers
   1781   num_reserved_vector_regs_ = -1;
   1782 }
   1783 
   1784 void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
   1785   store_method_addr_used_ = true;
   1786   int type_size = mir->dalvikInsn.vB;
   1787   // We support 128 bit vectors.
   1788   DCHECK_EQ(type_size & 0xFFFF, 128);
   1789   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   1790   uint32_t *args = mir->dalvikInsn.arg;
   1791   int reg = rs_dest.GetReg();
   1792   // Check for all 0 case.
   1793   if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
   1794     NewLIR2(kX86XorpsRR, reg, reg);
   1795     return;
   1796   }
   1797 
   1798   // Append the mov const vector to reg opcode.
   1799   AppendOpcodeWithConst(kX86MovupsRM, reg, mir);
   1800 }
   1801 
   1802 void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
   1803   // Okay, load it from the constant vector area.
   1804   LIR *data_target = ScanVectorLiteral(mir);
   1805   if (data_target == nullptr) {
   1806     data_target = AddVectorLiteral(mir);
   1807   }
   1808 
   1809   // Address the start of the method.
   1810   RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
   1811   if (rl_method.wide) {
   1812     rl_method = LoadValueWide(rl_method, kCoreReg);
   1813   } else {
   1814     rl_method = LoadValue(rl_method, kCoreReg);
   1815   }
   1816 
   1817   // Load the proper value from the literal area.
   1818   // We don't know the proper offset for the value, so pick one that will force
   1819   // 4 byte offset.  We will fix this up in the assembler later to have the right
   1820   // value.
   1821   ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   1822   LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg());
   1823   load->flags.fixup = kFixupLoad;
   1824   load->target = data_target;
   1825 }
   1826 
   1827 void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
   1828   // We only support 128 bit registers.
   1829   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1830   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   1831   RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
   1832   NewLIR2(kX86Mova128RR, rs_dest.GetReg(), rs_src.GetReg());
   1833 }
   1834 
   1835 void X86Mir2Lir::GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir) {
   1836   const int BYTE_SIZE = 8;
   1837   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1838   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1839   RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempWide());
   1840 
   1841   /*
   1842    * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM
   1843    * and multiplying 8 at a time before recombining back into one XMM register.
   1844    *
   1845    *   let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes)
   1846    *       xmm3 is tmp             (operate on high bits of 16bit lanes)
   1847    *
   1848    *    xmm3 = xmm1
   1849    *    xmm1 = xmm1 .* xmm2
   1850    *    xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff  // xmm1 now has low bits
   1851    *    xmm3 = xmm3 .>> 8
   1852    *    xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00
   1853    *    xmm2 = xmm2 .* xmm3                               // xmm2 now has high bits
   1854    *    xmm1 = xmm1 | xmm2                                // combine results
   1855    */
   1856 
   1857   // Copy xmm1.
   1858   NewLIR2(kX86Mova128RR, rs_src1_high_tmp.GetReg(), rs_dest_src1.GetReg());
   1859 
   1860   // Multiply low bits.
   1861   NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1862 
   1863   // xmm1 now has low bits.
   1864   AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
   1865 
   1866   // Prepare high bits for multiplication.
   1867   NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), BYTE_SIZE);
   1868   AndMaskVectorRegister(rs_src2, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
   1869 
   1870   // Multiply high bits and xmm2 now has high bits.
   1871   NewLIR2(kX86PmullwRR, rs_src2.GetReg(), rs_src1_high_tmp.GetReg());
   1872 
   1873   // Combine back into dest XMM register.
   1874   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1875 }
   1876 
   1877 void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
   1878   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1879   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   1880   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1881   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1882   int opcode = 0;
   1883   switch (opsize) {
   1884     case k32:
   1885       opcode = kX86PmulldRR;
   1886       break;
   1887     case kSignedHalf:
   1888       opcode = kX86PmullwRR;
   1889       break;
   1890     case kSingle:
   1891       opcode = kX86MulpsRR;
   1892       break;
   1893     case kDouble:
   1894       opcode = kX86MulpdRR;
   1895       break;
   1896     case kSignedByte:
   1897       // HW doesn't support 16x16 byte multiplication so emulate it.
   1898       GenMultiplyVectorSignedByte(bb, mir);
   1899       return;
   1900     default:
   1901       LOG(FATAL) << "Unsupported vector multiply " << opsize;
   1902       break;
   1903   }
   1904   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1905 }
   1906 
   1907 void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
   1908   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1909   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   1910   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1911   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1912   int opcode = 0;
   1913   switch (opsize) {
   1914     case k32:
   1915       opcode = kX86PadddRR;
   1916       break;
   1917     case kSignedHalf:
   1918     case kUnsignedHalf:
   1919       opcode = kX86PaddwRR;
   1920       break;
   1921     case kUnsignedByte:
   1922     case kSignedByte:
   1923       opcode = kX86PaddbRR;
   1924       break;
   1925     case kSingle:
   1926       opcode = kX86AddpsRR;
   1927       break;
   1928     case kDouble:
   1929       opcode = kX86AddpdRR;
   1930       break;
   1931     default:
   1932       LOG(FATAL) << "Unsupported vector addition " << opsize;
   1933       break;
   1934   }
   1935   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1936 }
   1937 
   1938 void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
   1939   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   1940   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   1941   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1942   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   1943   int opcode = 0;
   1944   switch (opsize) {
   1945     case k32:
   1946       opcode = kX86PsubdRR;
   1947       break;
   1948     case kSignedHalf:
   1949     case kUnsignedHalf:
   1950       opcode = kX86PsubwRR;
   1951       break;
   1952     case kUnsignedByte:
   1953     case kSignedByte:
   1954       opcode = kX86PsubbRR;
   1955       break;
   1956     case kSingle:
   1957       opcode = kX86SubpsRR;
   1958       break;
   1959     case kDouble:
   1960       opcode = kX86SubpdRR;
   1961       break;
   1962     default:
   1963       LOG(FATAL) << "Unsupported vector subtraction " << opsize;
   1964       break;
   1965   }
   1966   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
   1967 }
   1968 
   1969 void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
   1970   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   1971   RegStorage rs_tmp = Get128BitRegister(AllocTempWide());
   1972 
   1973   int opcode = 0;
   1974   int imm = mir->dalvikInsn.vB;
   1975 
   1976   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
   1977     case kMirOpPackedShiftLeft:
   1978       opcode = kX86PsllwRI;
   1979       break;
   1980     case kMirOpPackedSignedShiftRight:
   1981       opcode = kX86PsrawRI;
   1982       break;
   1983     case kMirOpPackedUnsignedShiftRight:
   1984       opcode = kX86PsrlwRI;
   1985       break;
   1986     default:
   1987       LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode;
   1988       break;
   1989   }
   1990 
   1991   /*
   1992    * xmm1 will have low bits
   1993    * xmm2 will have high bits
   1994    *
   1995    * xmm2 = xmm1
   1996    * xmm1 = xmm1 .<< N
   1997    * xmm2 = xmm2 && 0xFF00FF00FF00FF00FF00FF00FF00FF00
   1998    * xmm2 = xmm2 .<< N
   1999    * xmm1 = xmm1 | xmm2
   2000    */
   2001 
   2002   // Copy xmm1.
   2003   NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_dest_src1.GetReg());
   2004 
   2005   // Shift lower values.
   2006   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2007 
   2008   // Mask bottom bits.
   2009   AndMaskVectorRegister(rs_tmp, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
   2010 
   2011   // Shift higher values.
   2012   NewLIR2(opcode, rs_tmp.GetReg(), imm);
   2013 
   2014   // Combine back into dest XMM register.
   2015   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_tmp.GetReg());
   2016 }
   2017 
   2018 void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
   2019   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2020   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2021   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2022   int imm = mir->dalvikInsn.vB;
   2023   int opcode = 0;
   2024   switch (opsize) {
   2025     case k32:
   2026       opcode = kX86PslldRI;
   2027       break;
   2028     case k64:
   2029       opcode = kX86PsllqRI;
   2030       break;
   2031     case kSignedHalf:
   2032     case kUnsignedHalf:
   2033       opcode = kX86PsllwRI;
   2034       break;
   2035     case kSignedByte:
   2036     case kUnsignedByte:
   2037       GenShiftByteVector(bb, mir);
   2038       return;
   2039     default:
   2040       LOG(FATAL) << "Unsupported vector shift left " << opsize;
   2041       break;
   2042   }
   2043   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2044 }
   2045 
   2046 void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
   2047   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2048   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2049   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2050   int imm = mir->dalvikInsn.vB;
   2051   int opcode = 0;
   2052   switch (opsize) {
   2053     case k32:
   2054       opcode = kX86PsradRI;
   2055       break;
   2056     case kSignedHalf:
   2057     case kUnsignedHalf:
   2058       opcode = kX86PsrawRI;
   2059       break;
   2060     case kSignedByte:
   2061     case kUnsignedByte:
   2062       GenShiftByteVector(bb, mir);
   2063       return;
   2064     default:
   2065       LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
   2066       break;
   2067   }
   2068   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2069 }
   2070 
   2071 void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
   2072   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2073   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2074   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2075   int imm = mir->dalvikInsn.vB;
   2076   int opcode = 0;
   2077   switch (opsize) {
   2078     case k32:
   2079       opcode = kX86PsrldRI;
   2080       break;
   2081     case k64:
   2082       opcode = kX86PsrlqRI;
   2083       break;
   2084     case kSignedHalf:
   2085     case kUnsignedHalf:
   2086       opcode = kX86PsrlwRI;
   2087       break;
   2088     case kSignedByte:
   2089     case kUnsignedByte:
   2090       GenShiftByteVector(bb, mir);
   2091       return;
   2092     default:
   2093       LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
   2094       break;
   2095   }
   2096   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
   2097 }
   2098 
   2099 void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
   2100   // We only support 128 bit registers.
   2101   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2102   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2103   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2104   NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   2105 }
   2106 
   2107 void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
   2108   // We only support 128 bit registers.
   2109   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2110   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2111   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2112   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   2113 }
   2114 
   2115 void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
   2116   // We only support 128 bit registers.
   2117   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2118   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
   2119   RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2120   NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
   2121 }
   2122 
   2123 void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) {
   2124   MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4);
   2125 }
   2126 
   2127 void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) {
   2128   // Create temporary MIR as container for 128-bit binary mask.
   2129   MIR const_mir;
   2130   MIR* const_mirp = &const_mir;
   2131   const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector);
   2132   const_mirp->dalvikInsn.arg[0] = m0;
   2133   const_mirp->dalvikInsn.arg[1] = m1;
   2134   const_mirp->dalvikInsn.arg[2] = m2;
   2135   const_mirp->dalvikInsn.arg[3] = m3;
   2136 
   2137   // Mask vector with const from literal pool.
   2138   AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
   2139 }
   2140 
   2141 void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
   2142   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2143   RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2144   RegLocation rl_dest = mir_graph_->GetDest(mir);
   2145   RegStorage rs_tmp;
   2146 
   2147   int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8;
   2148   int vec_unit_size = 0;
   2149   int opcode = 0;
   2150   int extr_opcode = 0;
   2151   RegLocation rl_result;
   2152 
   2153   switch (opsize) {
   2154     case k32:
   2155       extr_opcode = kX86PextrdRRI;
   2156       opcode = kX86PhadddRR;
   2157       vec_unit_size = 4;
   2158       break;
   2159     case kSignedByte:
   2160     case kUnsignedByte:
   2161       extr_opcode = kX86PextrbRRI;
   2162       opcode = kX86PhaddwRR;
   2163       vec_unit_size = 2;
   2164       break;
   2165     case kSignedHalf:
   2166     case kUnsignedHalf:
   2167       extr_opcode = kX86PextrwRRI;
   2168       opcode = kX86PhaddwRR;
   2169       vec_unit_size = 2;
   2170       break;
   2171     case kSingle:
   2172       rl_result = EvalLoc(rl_dest, kFPReg, true);
   2173       vec_unit_size = 4;
   2174       for (int i = 0; i < 3; i++) {
   2175         NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
   2176         NewLIR3(kX86ShufpsRRI, rs_src1.GetReg(), rs_src1.GetReg(), 0x39);
   2177       }
   2178       NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), rs_src1.GetReg());
   2179       StoreValue(rl_dest, rl_result);
   2180 
   2181       // For single-precision floats, we are done here
   2182       return;
   2183     default:
   2184       LOG(FATAL) << "Unsupported vector add reduce " << opsize;
   2185       break;
   2186   }
   2187 
   2188   int elems = vec_bytes / vec_unit_size;
   2189 
   2190   // Emulate horizontal add instruction by reducing 2 vectors with 8 values before adding them again
   2191   // TODO is overflow handled correctly?
   2192   if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2193     rs_tmp = Get128BitRegister(AllocTempWide());
   2194 
   2195     // tmp = xmm1 .>> 8.
   2196     NewLIR2(kX86Mova128RR, rs_tmp.GetReg(), rs_src1.GetReg());
   2197     NewLIR2(kX86PsrlwRI, rs_tmp.GetReg(), 8);
   2198 
   2199     // Zero extend low bits in xmm1.
   2200     AndMaskVectorRegister(rs_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
   2201   }
   2202 
   2203   while (elems > 1) {
   2204     if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2205       NewLIR2(opcode, rs_tmp.GetReg(), rs_tmp.GetReg());
   2206     }
   2207     NewLIR2(opcode, rs_src1.GetReg(), rs_src1.GetReg());
   2208     elems >>= 1;
   2209   }
   2210 
   2211   // Combine the results if we separated them.
   2212   if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2213     NewLIR2(kX86PaddbRR, rs_src1.GetReg(), rs_tmp.GetReg());
   2214   }
   2215 
   2216   // We need to extract to a GPR.
   2217   RegStorage temp = AllocTemp();
   2218   NewLIR3(extr_opcode, temp.GetReg(), rs_src1.GetReg(), 0);
   2219 
   2220   // Can we do this directly into memory?
   2221   rl_result = UpdateLocTyped(rl_dest, kCoreReg);
   2222   if (rl_result.location == kLocPhysReg) {
   2223     // Ensure res is in a core reg
   2224     rl_result = EvalLoc(rl_dest, kCoreReg, true);
   2225     OpRegReg(kOpAdd, rl_result.reg, temp);
   2226     StoreFinalValue(rl_dest, rl_result);
   2227   } else {
   2228     OpMemReg(kOpAdd, rl_result, temp.GetReg());
   2229   }
   2230 
   2231   FreeTemp(temp);
   2232 }
   2233 
   2234 void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
   2235   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2236   RegLocation rl_dest = mir_graph_->GetDest(mir);
   2237   RegStorage rs_src1 = RegStorage::Solo128(mir->dalvikInsn.vB);
   2238   int extract_index = mir->dalvikInsn.arg[0];
   2239   int extr_opcode = 0;
   2240   RegLocation rl_result;
   2241   bool is_wide = false;
   2242 
   2243   switch (opsize) {
   2244     case k32:
   2245       rl_result = UpdateLocTyped(rl_dest, kCoreReg);
   2246       extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdMRI : kX86PextrdRRI;
   2247       break;
   2248     case kSignedHalf:
   2249     case kUnsignedHalf:
   2250       rl_result= UpdateLocTyped(rl_dest, kCoreReg);
   2251       extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwMRI : kX86PextrwRRI;
   2252       break;
   2253     default:
   2254       LOG(FATAL) << "Unsupported vector add reduce " << opsize;
   2255       return;
   2256       break;
   2257   }
   2258 
   2259   if (rl_result.location == kLocPhysReg) {
   2260     NewLIR3(extr_opcode, rl_result.reg.GetReg(), rs_src1.GetReg(), extract_index);
   2261     if (is_wide == true) {
   2262       StoreFinalValue(rl_dest, rl_result);
   2263     } else {
   2264       StoreFinalValueWide(rl_dest, rl_result);
   2265     }
   2266   } else {
   2267     int displacement = SRegOffset(rl_result.s_reg_low);
   2268     LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, rs_src1.GetReg());
   2269     AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
   2270     AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
   2271   }
   2272 }
   2273 
   2274 void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
   2275   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   2276   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   2277   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   2278   int op_low = 0, op_high = 0, imm = 0, op_mov = kX86MovdxrRR;
   2279   RegisterClass reg_type = kCoreReg;
   2280 
   2281   switch (opsize) {
   2282     case k32:
   2283       op_low = kX86PshufdRRI;
   2284       break;
   2285     case kSingle:
   2286       op_low = kX86PshufdRRI;
   2287       op_mov = kX86Mova128RR;
   2288       reg_type = kFPReg;
   2289       break;
   2290     case k64:
   2291       op_low = kX86PshufdRRI;
   2292       imm = 0x44;
   2293       break;
   2294     case kDouble:
   2295       op_low = kX86PshufdRRI;
   2296       op_mov = kX86Mova128RR;
   2297       reg_type = kFPReg;
   2298       imm = 0x44;
   2299       break;
   2300     case kSignedByte:
   2301     case kUnsignedByte:
   2302       // Shuffle 8 bit value into 16 bit word.
   2303       // We set val = val + (val << 8) below and use 16 bit shuffle.
   2304     case kSignedHalf:
   2305     case kUnsignedHalf:
   2306       // Handles low quadword.
   2307       op_low = kX86PshuflwRRI;
   2308       // Handles upper quadword.
   2309       op_high = kX86PshufdRRI;
   2310       break;
   2311     default:
   2312       LOG(FATAL) << "Unsupported vector set " << opsize;
   2313       break;
   2314   }
   2315 
   2316   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   2317 
   2318   // Load the value from the VR into the reg.
   2319   if (rl_src.wide == 0) {
   2320     rl_src = LoadValue(rl_src, reg_type);
   2321   } else {
   2322     rl_src = LoadValueWide(rl_src, reg_type);
   2323   }
   2324 
   2325   // If opsize is 8 bits wide then double value and use 16 bit shuffle instead.
   2326   if (opsize == kSignedByte || opsize == kUnsignedByte) {
   2327     RegStorage temp = AllocTemp();
   2328     // val = val + (val << 8).
   2329     NewLIR2(kX86Mov32RR, temp.GetReg(), rl_src.reg.GetReg());
   2330     NewLIR2(kX86Sal32RI, temp.GetReg(), 8);
   2331     NewLIR2(kX86Or32RR, rl_src.reg.GetReg(), temp.GetReg());
   2332     FreeTemp(temp);
   2333   }
   2334 
   2335   // Load the value into the XMM register.
   2336   NewLIR2(op_mov, rs_dest.GetReg(), rl_src.reg.GetReg());
   2337 
   2338   // Now shuffle the value across the destination.
   2339   NewLIR3(op_low, rs_dest.GetReg(), rs_dest.GetReg(), imm);
   2340 
   2341   // And then repeat as needed.
   2342   if (op_high != 0) {
   2343     NewLIR3(op_high, rs_dest.GetReg(), rs_dest.GetReg(), imm);
   2344   }
   2345 }
   2346 
   2347 LIR *X86Mir2Lir::ScanVectorLiteral(MIR *mir) {
   2348   int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
   2349   for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
   2350     if (args[0] == p->operands[0] && args[1] == p->operands[1] &&
   2351         args[2] == p->operands[2] && args[3] == p->operands[3]) {
   2352       return p;
   2353     }
   2354   }
   2355   return nullptr;
   2356 }
   2357 
   2358 LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) {
   2359   LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
   2360   int *args = reinterpret_cast<int*>(mir->dalvikInsn.arg);
   2361   new_value->operands[0] = args[0];
   2362   new_value->operands[1] = args[1];
   2363   new_value->operands[2] = args[2];
   2364   new_value->operands[3] = args[3];
   2365   new_value->next = const_vectors_;
   2366   if (const_vectors_ == nullptr) {
   2367     estimated_native_code_size_ += 12;  // Amount needed to align to 16 byte boundary.
   2368   }
   2369   estimated_native_code_size_ += 16;  // Space for one vector.
   2370   const_vectors_ = new_value;
   2371   return new_value;
   2372 }
   2373 
   2374 // ------------ ABI support: mapping of args to physical registers -------------
   2375 RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide,
   2376                                                               bool is_ref) {
   2377   const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
   2378   const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) /
   2379       sizeof(SpecialTargetRegister);
   2380   const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
   2381                                                              kFArg4, kFArg5, kFArg6, kFArg7};
   2382   const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) /
   2383       sizeof(SpecialTargetRegister);
   2384 
   2385   if (is_double_or_float) {
   2386     if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
   2387       return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide);
   2388     }
   2389   } else {
   2390     if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
   2391       return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
   2392                             is_ref ? kRef : (is_wide ? kWide : kNotWide));
   2393     }
   2394   }
   2395   return RegStorage::InvalidReg();
   2396 }
   2397 
   2398 RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) {
   2399   DCHECK(IsInitialized());
   2400   auto res = mapping_.find(in_position);
   2401   return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
   2402 }
   2403 
   2404 void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
   2405                                                    InToRegStorageMapper* mapper) {
   2406   DCHECK(mapper != nullptr);
   2407   max_mapped_in_ = -1;
   2408   is_there_stack_mapped_ = false;
   2409   for (int in_position = 0; in_position < count; in_position++) {
   2410      RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
   2411              arg_locs[in_position].wide, arg_locs[in_position].ref);
   2412      if (reg.Valid()) {
   2413        mapping_[in_position] = reg;
   2414        max_mapped_in_ = std::max(max_mapped_in_, in_position);
   2415        if (arg_locs[in_position].wide) {
   2416          // We covered 2 args, so skip the next one
   2417          in_position++;
   2418        }
   2419      } else {
   2420        is_there_stack_mapped_ = true;
   2421      }
   2422   }
   2423   initialized_ = true;
   2424 }
   2425 
   2426 RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
   2427   if (!cu_->target64) {
   2428     return GetCoreArgMappingToPhysicalReg(arg_num);
   2429   }
   2430 
   2431   if (!in_to_reg_storage_mapping_.IsInitialized()) {
   2432     int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
   2433     RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
   2434 
   2435     InToRegStorageX86_64Mapper mapper(this);
   2436     in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
   2437   }
   2438   return in_to_reg_storage_mapping_.Get(arg_num);
   2439 }
   2440 
   2441 RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
   2442   // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
   2443   // Not used for 64-bit, TODO: Move X86_32 to the same framework
   2444   switch (core_arg_num) {
   2445     case 0:
   2446       return rs_rX86_ARG1;
   2447     case 1:
   2448       return rs_rX86_ARG2;
   2449     case 2:
   2450       return rs_rX86_ARG3;
   2451     default:
   2452       return RegStorage::InvalidReg();
   2453   }
   2454 }
   2455 
   2456 // ---------End of ABI support: mapping of args to physical registers -------------
   2457 
   2458 /*
   2459  * If there are any ins passed in registers that have not been promoted
   2460  * to a callee-save register, flush them to the frame.  Perform initial
   2461  * assignment of promoted arguments.
   2462  *
   2463  * ArgLocs is an array of location records describing the incoming arguments
   2464  * with one location record per word of argument.
   2465  */
   2466 void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
   2467   if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method);
   2468   /*
   2469    * Dummy up a RegLocation for the incoming Method*
   2470    * It will attempt to keep kArg0 live (or copy it to home location
   2471    * if promoted).
   2472    */
   2473 
   2474   RegLocation rl_src = rl_method;
   2475   rl_src.location = kLocPhysReg;
   2476   rl_src.reg = TargetReg(kArg0, kRef);
   2477   rl_src.home = false;
   2478   MarkLive(rl_src);
   2479   StoreValue(rl_method, rl_src);
   2480   // If Method* has been promoted, explicitly flush
   2481   if (rl_method.location == kLocPhysReg) {
   2482     StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
   2483   }
   2484 
   2485   if (cu_->num_ins == 0) {
   2486     return;
   2487   }
   2488 
   2489   int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
   2490   /*
   2491    * Copy incoming arguments to their proper home locations.
   2492    * NOTE: an older version of dx had an issue in which
   2493    * it would reuse static method argument registers.
   2494    * This could result in the same Dalvik virtual register
   2495    * being promoted to both core and fp regs. To account for this,
   2496    * we only copy to the corresponding promoted physical register
   2497    * if it matches the type of the SSA name for the incoming
   2498    * argument.  It is also possible that long and double arguments
   2499    * end up half-promoted.  In those cases, we must flush the promoted
   2500    * half to memory as well.
   2501    */
   2502   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2503   for (int i = 0; i < cu_->num_ins; i++) {
   2504     // get reg corresponding to input
   2505     RegStorage reg = GetArgMappingToPhysicalReg(i);
   2506 
   2507     RegLocation* t_loc = &ArgLocs[i];
   2508     if (reg.Valid()) {
   2509       // If arriving in register.
   2510 
   2511       // We have already updated the arg location with promoted info
   2512       // so we can be based on it.
   2513       if (t_loc->location == kLocPhysReg) {
   2514         // Just copy it.
   2515         OpRegCopy(t_loc->reg, reg);
   2516       } else {
   2517         // Needs flush.
   2518         if (t_loc->ref) {
   2519           StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
   2520         } else {
   2521           StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
   2522                         kNotVolatile);
   2523         }
   2524       }
   2525     } else {
   2526       // If arriving in frame & promoted.
   2527       if (t_loc->location == kLocPhysReg) {
   2528         if (t_loc->ref) {
   2529           LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
   2530         } else {
   2531           LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
   2532                        t_loc->wide ? k64 : k32, kNotVolatile);
   2533         }
   2534       }
   2535     }
   2536     if (t_loc->wide) {
   2537       // Increment i to skip the next one.
   2538       i++;
   2539     }
   2540   }
   2541 }
   2542 
   2543 /*
   2544  * Load up to 5 arguments, the first three of which will be in
   2545  * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
   2546  * and as part of the load sequence, it must be replaced with
   2547  * the target method pointer.  Note, this may also be called
   2548  * for "range" variants if the number of arguments is 5 or fewer.
   2549  */
   2550 int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
   2551                                   int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
   2552                                   const MethodReference& target_method,
   2553                                   uint32_t vtable_idx, uintptr_t direct_code,
   2554                                   uintptr_t direct_method, InvokeType type, bool skip_this) {
   2555   if (!cu_->target64) {
   2556     return Mir2Lir::GenDalvikArgsNoRange(info,
   2557                                   call_state, pcrLabel, next_call_insn,
   2558                                   target_method,
   2559                                   vtable_idx, direct_code,
   2560                                   direct_method, type, skip_this);
   2561   }
   2562   return GenDalvikArgsRange(info,
   2563                        call_state, pcrLabel, next_call_insn,
   2564                        target_method,
   2565                        vtable_idx, direct_code,
   2566                        direct_method, type, skip_this);
   2567 }
   2568 
   2569 /*
   2570  * May have 0+ arguments (also used for jumbo).  Note that
   2571  * source virtual registers may be in physical registers, so may
   2572  * need to be flushed to home location before copying.  This
   2573  * applies to arg3 and above (see below).
   2574  *
   2575  * Two general strategies:
   2576  *    If < 20 arguments
   2577  *       Pass args 3-18 using vldm/vstm block copy
   2578  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
   2579  *    If 20+ arguments
   2580  *       Pass args arg19+ using memcpy block copy
   2581  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
   2582  *
   2583  */
   2584 int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
   2585                                 LIR** pcrLabel, NextCallInsn next_call_insn,
   2586                                 const MethodReference& target_method,
   2587                                 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
   2588                                 InvokeType type, bool skip_this) {
   2589   if (!cu_->target64) {
   2590     return Mir2Lir::GenDalvikArgsRange(info, call_state,
   2591                                 pcrLabel, next_call_insn,
   2592                                 target_method,
   2593                                 vtable_idx, direct_code, direct_method,
   2594                                 type, skip_this);
   2595   }
   2596 
   2597   /* If no arguments, just return */
   2598   if (info->num_arg_words == 0)
   2599     return call_state;
   2600 
   2601   const int start_index = skip_this ? 1 : 0;
   2602 
   2603   InToRegStorageX86_64Mapper mapper(this);
   2604   InToRegStorageMapping in_to_reg_storage_mapping;
   2605   in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
   2606   const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
   2607   const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
   2608           info->args[last_mapped_in].wide ? 2 : 1;
   2609   int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
   2610 
   2611   // Fisrt of all, check whether it make sense to use bulk copying
   2612   // Optimization is aplicable only for range case
   2613   // TODO: make a constant instead of 2
   2614   if (info->is_range && regs_left_to_pass_via_stack >= 2) {
   2615     // Scan the rest of the args - if in phys_reg flush to memory
   2616     for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) {
   2617       RegLocation loc = info->args[next_arg];
   2618       if (loc.wide) {
   2619         loc = UpdateLocWide(loc);
   2620         if (loc.location == kLocPhysReg) {
   2621           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2622           StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
   2623         }
   2624         next_arg += 2;
   2625       } else {
   2626         loc = UpdateLoc(loc);
   2627         if (loc.location == kLocPhysReg) {
   2628           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2629           StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
   2630         }
   2631         next_arg++;
   2632       }
   2633     }
   2634 
   2635     // Logic below assumes that Method pointer is at offset zero from SP.
   2636     DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
   2637 
   2638     // The rest can be copied together
   2639     int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
   2640     int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped,
   2641                                                    cu_->instruction_set);
   2642 
   2643     int current_src_offset = start_offset;
   2644     int current_dest_offset = outs_offset;
   2645 
   2646     // Only davik regs are accessed in this loop; no next_call_insn() calls.
   2647     ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2648     while (regs_left_to_pass_via_stack > 0) {
   2649       // This is based on the knowledge that the stack itself is 16-byte aligned.
   2650       bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
   2651       bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
   2652       size_t bytes_to_move;
   2653 
   2654       /*
   2655        * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
   2656        * a 128-bit move because we won't get the chance to try to aligned. If there are more than
   2657        * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
   2658        * We do this because we could potentially do a smaller move to align.
   2659        */
   2660       if (regs_left_to_pass_via_stack == 4 ||
   2661           (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
   2662         // Moving 128-bits via xmm register.
   2663         bytes_to_move = sizeof(uint32_t) * 4;
   2664 
   2665         // Allocate a free xmm temp. Since we are working through the calling sequence,
   2666         // we expect to have an xmm temporary available.  AllocTempDouble will abort if
   2667         // there are no free registers.
   2668         RegStorage temp = AllocTempDouble();
   2669 
   2670         LIR* ld1 = nullptr;
   2671         LIR* ld2 = nullptr;
   2672         LIR* st1 = nullptr;
   2673         LIR* st2 = nullptr;
   2674 
   2675         /*
   2676          * The logic is similar for both loads and stores. If we have 16-byte alignment,
   2677          * do an aligned move. If we have 8-byte alignment, then do the move in two
   2678          * parts. This approach prevents possible cache line splits. Finally, fall back
   2679          * to doing an unaligned move. In most cases we likely won't split the cache
   2680          * line but we cannot prove it and thus take a conservative approach.
   2681          */
   2682         bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
   2683         bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
   2684 
   2685         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2686         if (src_is_16b_aligned) {
   2687           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
   2688         } else if (src_is_8b_aligned) {
   2689           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
   2690           ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
   2691                             kMovHi128FP);
   2692         } else {
   2693           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
   2694         }
   2695 
   2696         if (dest_is_16b_aligned) {
   2697           st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
   2698         } else if (dest_is_8b_aligned) {
   2699           st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
   2700           st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
   2701                             temp, kMovHi128FP);
   2702         } else {
   2703           st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
   2704         }
   2705 
   2706         // TODO If we could keep track of aliasing information for memory accesses that are wider
   2707         // than 64-bit, we wouldn't need to set up a barrier.
   2708         if (ld1 != nullptr) {
   2709           if (ld2 != nullptr) {
   2710             // For 64-bit load we can actually set up the aliasing information.
   2711             AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
   2712             AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
   2713           } else {
   2714             // Set barrier for 128-bit load.
   2715             ld1->u.m.def_mask = &kEncodeAll;
   2716           }
   2717         }
   2718         if (st1 != nullptr) {
   2719           if (st2 != nullptr) {
   2720             // For 64-bit store we can actually set up the aliasing information.
   2721             AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
   2722             AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
   2723           } else {
   2724             // Set barrier for 128-bit store.
   2725             st1->u.m.def_mask = &kEncodeAll;
   2726           }
   2727         }
   2728 
   2729         // Free the temporary used for the data movement.
   2730         FreeTemp(temp);
   2731       } else {
   2732         // Moving 32-bits via general purpose register.
   2733         bytes_to_move = sizeof(uint32_t);
   2734 
   2735         // Instead of allocating a new temp, simply reuse one of the registers being used
   2736         // for argument passing.
   2737         RegStorage temp = TargetReg(kArg3, kNotWide);
   2738 
   2739         // Now load the argument VR and store to the outs.
   2740         Load32Disp(rs_rX86_SP, current_src_offset, temp);
   2741         Store32Disp(rs_rX86_SP, current_dest_offset, temp);
   2742       }
   2743 
   2744       current_src_offset += bytes_to_move;
   2745       current_dest_offset += bytes_to_move;
   2746       regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
   2747     }
   2748     DCHECK_EQ(regs_left_to_pass_via_stack, 0);
   2749   }
   2750 
   2751   // Now handle rest not registers if they are
   2752   if (in_to_reg_storage_mapping.IsThereStackMapped()) {
   2753     RegStorage regSingle = TargetReg(kArg2, kNotWide);
   2754     RegStorage regWide = TargetReg(kArg3, kWide);
   2755     for (int i = start_index;
   2756          i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) {
   2757       RegLocation rl_arg = info->args[i];
   2758       rl_arg = UpdateRawLoc(rl_arg);
   2759       RegStorage reg = in_to_reg_storage_mapping.Get(i);
   2760       if (!reg.Valid()) {
   2761         int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
   2762 
   2763         {
   2764           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   2765           if (rl_arg.wide) {
   2766             if (rl_arg.location == kLocPhysReg) {
   2767               StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
   2768             } else {
   2769               LoadValueDirectWideFixed(rl_arg, regWide);
   2770               StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
   2771             }
   2772           } else {
   2773             if (rl_arg.location == kLocPhysReg) {
   2774               StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
   2775             } else {
   2776               LoadValueDirectFixed(rl_arg, regSingle);
   2777               StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
   2778             }
   2779           }
   2780         }
   2781         call_state = next_call_insn(cu_, info, call_state, target_method,
   2782                                     vtable_idx, direct_code, direct_method, type);
   2783       }
   2784       if (rl_arg.wide) {
   2785         i++;
   2786       }
   2787     }
   2788   }
   2789 
   2790   // Finish with mapped registers
   2791   for (int i = start_index; i <= last_mapped_in; i++) {
   2792     RegLocation rl_arg = info->args[i];
   2793     rl_arg = UpdateRawLoc(rl_arg);
   2794     RegStorage reg = in_to_reg_storage_mapping.Get(i);
   2795     if (reg.Valid()) {
   2796       if (rl_arg.wide) {
   2797         LoadValueDirectWideFixed(rl_arg, reg);
   2798       } else {
   2799         LoadValueDirectFixed(rl_arg, reg);
   2800       }
   2801       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
   2802                                direct_code, direct_method, type);
   2803     }
   2804     if (rl_arg.wide) {
   2805       i++;
   2806     }
   2807   }
   2808 
   2809   call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
   2810                            direct_code, direct_method, type);
   2811   if (pcrLabel) {
   2812     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
   2813       *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
   2814     } else {
   2815       *pcrLabel = nullptr;
   2816       // In lieu of generating a check for kArg1 being null, we need to
   2817       // perform a load when doing implicit checks.
   2818       RegStorage tmp = AllocTemp();
   2819       Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
   2820       MarkPossibleNullPointerException(info->opt_flags);
   2821       FreeTemp(tmp);
   2822     }
   2823   }
   2824   return call_state;
   2825 }
   2826 
   2827 bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) {
   2828   // Location of reference to data array
   2829   int value_offset = mirror::String::ValueOffset().Int32Value();
   2830   // Location of count
   2831   int count_offset = mirror::String::CountOffset().Int32Value();
   2832   // Starting offset within data array
   2833   int offset_offset = mirror::String::OffsetOffset().Int32Value();
   2834   // Start of char data with array_
   2835   int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
   2836 
   2837   RegLocation rl_obj = info->args[0];
   2838   RegLocation rl_idx = info->args[1];
   2839   rl_obj = LoadValue(rl_obj, kRefReg);
   2840   // X86 wants to avoid putting a constant index into a register.
   2841   if (!rl_idx.is_const) {
   2842     rl_idx = LoadValue(rl_idx, kCoreReg);
   2843   }
   2844   RegStorage reg_max;
   2845   GenNullCheck(rl_obj.reg, info->opt_flags);
   2846   bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
   2847   LIR* range_check_branch = nullptr;
   2848   RegStorage reg_off;
   2849   RegStorage reg_ptr;
   2850   if (range_check) {
   2851     // On x86, we can compare to memory directly
   2852     // Set up a launch pad to allow retry in case of bounds violation */
   2853     if (rl_idx.is_const) {
   2854       LIR* comparison;
   2855       range_check_branch = OpCmpMemImmBranch(
   2856           kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
   2857           mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
   2858       MarkPossibleNullPointerExceptionAfter(0, comparison);
   2859     } else {
   2860       OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
   2861       MarkPossibleNullPointerException(0);
   2862       range_check_branch = OpCondBranch(kCondUge, nullptr);
   2863     }
   2864   }
   2865   reg_off = AllocTemp();
   2866   reg_ptr = AllocTempRef();
   2867   Load32Disp(rl_obj.reg, offset_offset, reg_off);
   2868   LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
   2869   if (rl_idx.is_const) {
   2870     OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
   2871   } else {
   2872     OpRegReg(kOpAdd, reg_off, rl_idx.reg);
   2873   }
   2874   FreeTemp(rl_obj.reg);
   2875   if (rl_idx.location == kLocPhysReg) {
   2876     FreeTemp(rl_idx.reg);
   2877   }
   2878   RegLocation rl_dest = InlineTarget(info);
   2879   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   2880   LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
   2881   FreeTemp(reg_off);
   2882   FreeTemp(reg_ptr);
   2883   StoreValue(rl_dest, rl_result);
   2884   if (range_check) {
   2885     DCHECK(range_check_branch != nullptr);
   2886     info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
   2887     AddIntrinsicSlowPath(info, range_check_branch);
   2888   }
   2889   return true;
   2890 }
   2891 
   2892 bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
   2893   RegLocation rl_dest = InlineTarget(info);
   2894 
   2895   // Early exit if the result is unused.
   2896   if (rl_dest.orig_sreg < 0) {
   2897     return true;
   2898   }
   2899 
   2900   RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
   2901 
   2902   if (cu_->target64) {
   2903     OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
   2904   } else {
   2905     OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
   2906   }
   2907 
   2908   StoreValue(rl_dest, rl_result);
   2909   return true;
   2910 }
   2911 
   2912 }  // namespace art
   2913