1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the X86 implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86InstrInfo.h" 15 #include "X86.h" 16 #include "X86InstrBuilder.h" 17 #include "X86MachineFunctionInfo.h" 18 #include "X86Subtarget.h" 19 #include "X86TargetMachine.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/CodeGen/LiveVariables.h" 22 #include "llvm/CodeGen/MachineConstantPool.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/StackMaps.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/MC/MCAsmInfo.h" 32 #include "llvm/MC/MCExpr.h" 33 #include "llvm/MC/MCInst.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Target/TargetOptions.h" 39 #include <limits> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "x86-instr-info" 44 45 #define GET_INSTRINFO_CTOR_DTOR 46 #include "X86GenInstrInfo.inc" 47 48 static cl::opt<bool> 49 NoFusing("disable-spill-fusing", 50 cl::desc("Disable fusing of spill code into instructions")); 51 static cl::opt<bool> 52 PrintFailedFusing("print-failed-fuse-candidates", 53 cl::desc("Print instructions that the allocator wants to" 54 " fuse, but the X86 backend currently can't"), 55 cl::Hidden); 56 static cl::opt<bool> 57 ReMatPICStubLoad("remat-pic-stub-load", 58 cl::desc("Re-materialize load from stub in PIC mode"), 59 cl::init(false), cl::Hidden); 60 61 enum { 62 // Select which memory operand is being unfolded. 63 // (stored in bits 0 - 3) 64 TB_INDEX_0 = 0, 65 TB_INDEX_1 = 1, 66 TB_INDEX_2 = 2, 67 TB_INDEX_3 = 3, 68 TB_INDEX_4 = 4, 69 TB_INDEX_MASK = 0xf, 70 71 // Do not insert the reverse map (MemOp -> RegOp) into the table. 72 // This may be needed because there is a many -> one mapping. 73 TB_NO_REVERSE = 1 << 4, 74 75 // Do not insert the forward map (RegOp -> MemOp) into the table. 76 // This is needed for Native Client, which prohibits branch 77 // instructions from using a memory operand. 78 TB_NO_FORWARD = 1 << 5, 79 80 TB_FOLDED_LOAD = 1 << 6, 81 TB_FOLDED_STORE = 1 << 7, 82 83 // Minimum alignment required for load/store. 84 // Used for RegOp->MemOp conversion. 85 // (stored in bits 8 - 15) 86 TB_ALIGN_SHIFT = 8, 87 TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT, 88 TB_ALIGN_16 = 16 << TB_ALIGN_SHIFT, 89 TB_ALIGN_32 = 32 << TB_ALIGN_SHIFT, 90 TB_ALIGN_64 = 64 << TB_ALIGN_SHIFT, 91 TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT 92 }; 93 94 struct X86MemoryFoldTableEntry { 95 uint16_t RegOp; 96 uint16_t MemOp; 97 uint16_t Flags; 98 }; 99 100 // Pin the vtable to this file. 101 void X86InstrInfo::anchor() {} 102 103 X86InstrInfo::X86InstrInfo(X86Subtarget &STI) 104 : X86GenInstrInfo( 105 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 : X86::ADJCALLSTACKDOWN32), 106 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 : X86::ADJCALLSTACKUP32)), 107 Subtarget(STI), RI(STI.getTargetTriple()) { 108 109 static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = { 110 { X86::ADC32ri, X86::ADC32mi, 0 }, 111 { X86::ADC32ri8, X86::ADC32mi8, 0 }, 112 { X86::ADC32rr, X86::ADC32mr, 0 }, 113 { X86::ADC64ri32, X86::ADC64mi32, 0 }, 114 { X86::ADC64ri8, X86::ADC64mi8, 0 }, 115 { X86::ADC64rr, X86::ADC64mr, 0 }, 116 { X86::ADD16ri, X86::ADD16mi, 0 }, 117 { X86::ADD16ri8, X86::ADD16mi8, 0 }, 118 { X86::ADD16ri_DB, X86::ADD16mi, TB_NO_REVERSE }, 119 { X86::ADD16ri8_DB, X86::ADD16mi8, TB_NO_REVERSE }, 120 { X86::ADD16rr, X86::ADD16mr, 0 }, 121 { X86::ADD16rr_DB, X86::ADD16mr, TB_NO_REVERSE }, 122 { X86::ADD32ri, X86::ADD32mi, 0 }, 123 { X86::ADD32ri8, X86::ADD32mi8, 0 }, 124 { X86::ADD32ri_DB, X86::ADD32mi, TB_NO_REVERSE }, 125 { X86::ADD32ri8_DB, X86::ADD32mi8, TB_NO_REVERSE }, 126 { X86::ADD32rr, X86::ADD32mr, 0 }, 127 { X86::ADD32rr_DB, X86::ADD32mr, TB_NO_REVERSE }, 128 { X86::ADD64ri32, X86::ADD64mi32, 0 }, 129 { X86::ADD64ri8, X86::ADD64mi8, 0 }, 130 { X86::ADD64ri32_DB,X86::ADD64mi32, TB_NO_REVERSE }, 131 { X86::ADD64ri8_DB, X86::ADD64mi8, TB_NO_REVERSE }, 132 { X86::ADD64rr, X86::ADD64mr, 0 }, 133 { X86::ADD64rr_DB, X86::ADD64mr, TB_NO_REVERSE }, 134 { X86::ADD8ri, X86::ADD8mi, 0 }, 135 { X86::ADD8rr, X86::ADD8mr, 0 }, 136 { X86::AND16ri, X86::AND16mi, 0 }, 137 { X86::AND16ri8, X86::AND16mi8, 0 }, 138 { X86::AND16rr, X86::AND16mr, 0 }, 139 { X86::AND32ri, X86::AND32mi, 0 }, 140 { X86::AND32ri8, X86::AND32mi8, 0 }, 141 { X86::AND32rr, X86::AND32mr, 0 }, 142 { X86::AND64ri32, X86::AND64mi32, 0 }, 143 { X86::AND64ri8, X86::AND64mi8, 0 }, 144 { X86::AND64rr, X86::AND64mr, 0 }, 145 { X86::AND8ri, X86::AND8mi, 0 }, 146 { X86::AND8rr, X86::AND8mr, 0 }, 147 { X86::DEC16r, X86::DEC16m, 0 }, 148 { X86::DEC32r, X86::DEC32m, 0 }, 149 { X86::DEC64r, X86::DEC64m, 0 }, 150 { X86::DEC8r, X86::DEC8m, 0 }, 151 { X86::INC16r, X86::INC16m, 0 }, 152 { X86::INC32r, X86::INC32m, 0 }, 153 { X86::INC64r, X86::INC64m, 0 }, 154 { X86::INC8r, X86::INC8m, 0 }, 155 { X86::NEG16r, X86::NEG16m, 0 }, 156 { X86::NEG32r, X86::NEG32m, 0 }, 157 { X86::NEG64r, X86::NEG64m, 0 }, 158 { X86::NEG8r, X86::NEG8m, 0 }, 159 { X86::NOT16r, X86::NOT16m, 0 }, 160 { X86::NOT32r, X86::NOT32m, 0 }, 161 { X86::NOT64r, X86::NOT64m, 0 }, 162 { X86::NOT8r, X86::NOT8m, 0 }, 163 { X86::OR16ri, X86::OR16mi, 0 }, 164 { X86::OR16ri8, X86::OR16mi8, 0 }, 165 { X86::OR16rr, X86::OR16mr, 0 }, 166 { X86::OR32ri, X86::OR32mi, 0 }, 167 { X86::OR32ri8, X86::OR32mi8, 0 }, 168 { X86::OR32rr, X86::OR32mr, 0 }, 169 { X86::OR64ri32, X86::OR64mi32, 0 }, 170 { X86::OR64ri8, X86::OR64mi8, 0 }, 171 { X86::OR64rr, X86::OR64mr, 0 }, 172 { X86::OR8ri, X86::OR8mi, 0 }, 173 { X86::OR8rr, X86::OR8mr, 0 }, 174 { X86::ROL16r1, X86::ROL16m1, 0 }, 175 { X86::ROL16rCL, X86::ROL16mCL, 0 }, 176 { X86::ROL16ri, X86::ROL16mi, 0 }, 177 { X86::ROL32r1, X86::ROL32m1, 0 }, 178 { X86::ROL32rCL, X86::ROL32mCL, 0 }, 179 { X86::ROL32ri, X86::ROL32mi, 0 }, 180 { X86::ROL64r1, X86::ROL64m1, 0 }, 181 { X86::ROL64rCL, X86::ROL64mCL, 0 }, 182 { X86::ROL64ri, X86::ROL64mi, 0 }, 183 { X86::ROL8r1, X86::ROL8m1, 0 }, 184 { X86::ROL8rCL, X86::ROL8mCL, 0 }, 185 { X86::ROL8ri, X86::ROL8mi, 0 }, 186 { X86::ROR16r1, X86::ROR16m1, 0 }, 187 { X86::ROR16rCL, X86::ROR16mCL, 0 }, 188 { X86::ROR16ri, X86::ROR16mi, 0 }, 189 { X86::ROR32r1, X86::ROR32m1, 0 }, 190 { X86::ROR32rCL, X86::ROR32mCL, 0 }, 191 { X86::ROR32ri, X86::ROR32mi, 0 }, 192 { X86::ROR64r1, X86::ROR64m1, 0 }, 193 { X86::ROR64rCL, X86::ROR64mCL, 0 }, 194 { X86::ROR64ri, X86::ROR64mi, 0 }, 195 { X86::ROR8r1, X86::ROR8m1, 0 }, 196 { X86::ROR8rCL, X86::ROR8mCL, 0 }, 197 { X86::ROR8ri, X86::ROR8mi, 0 }, 198 { X86::SAR16r1, X86::SAR16m1, 0 }, 199 { X86::SAR16rCL, X86::SAR16mCL, 0 }, 200 { X86::SAR16ri, X86::SAR16mi, 0 }, 201 { X86::SAR32r1, X86::SAR32m1, 0 }, 202 { X86::SAR32rCL, X86::SAR32mCL, 0 }, 203 { X86::SAR32ri, X86::SAR32mi, 0 }, 204 { X86::SAR64r1, X86::SAR64m1, 0 }, 205 { X86::SAR64rCL, X86::SAR64mCL, 0 }, 206 { X86::SAR64ri, X86::SAR64mi, 0 }, 207 { X86::SAR8r1, X86::SAR8m1, 0 }, 208 { X86::SAR8rCL, X86::SAR8mCL, 0 }, 209 { X86::SAR8ri, X86::SAR8mi, 0 }, 210 { X86::SBB32ri, X86::SBB32mi, 0 }, 211 { X86::SBB32ri8, X86::SBB32mi8, 0 }, 212 { X86::SBB32rr, X86::SBB32mr, 0 }, 213 { X86::SBB64ri32, X86::SBB64mi32, 0 }, 214 { X86::SBB64ri8, X86::SBB64mi8, 0 }, 215 { X86::SBB64rr, X86::SBB64mr, 0 }, 216 { X86::SHL16rCL, X86::SHL16mCL, 0 }, 217 { X86::SHL16ri, X86::SHL16mi, 0 }, 218 { X86::SHL32rCL, X86::SHL32mCL, 0 }, 219 { X86::SHL32ri, X86::SHL32mi, 0 }, 220 { X86::SHL64rCL, X86::SHL64mCL, 0 }, 221 { X86::SHL64ri, X86::SHL64mi, 0 }, 222 { X86::SHL8rCL, X86::SHL8mCL, 0 }, 223 { X86::SHL8ri, X86::SHL8mi, 0 }, 224 { X86::SHLD16rrCL, X86::SHLD16mrCL, 0 }, 225 { X86::SHLD16rri8, X86::SHLD16mri8, 0 }, 226 { X86::SHLD32rrCL, X86::SHLD32mrCL, 0 }, 227 { X86::SHLD32rri8, X86::SHLD32mri8, 0 }, 228 { X86::SHLD64rrCL, X86::SHLD64mrCL, 0 }, 229 { X86::SHLD64rri8, X86::SHLD64mri8, 0 }, 230 { X86::SHR16r1, X86::SHR16m1, 0 }, 231 { X86::SHR16rCL, X86::SHR16mCL, 0 }, 232 { X86::SHR16ri, X86::SHR16mi, 0 }, 233 { X86::SHR32r1, X86::SHR32m1, 0 }, 234 { X86::SHR32rCL, X86::SHR32mCL, 0 }, 235 { X86::SHR32ri, X86::SHR32mi, 0 }, 236 { X86::SHR64r1, X86::SHR64m1, 0 }, 237 { X86::SHR64rCL, X86::SHR64mCL, 0 }, 238 { X86::SHR64ri, X86::SHR64mi, 0 }, 239 { X86::SHR8r1, X86::SHR8m1, 0 }, 240 { X86::SHR8rCL, X86::SHR8mCL, 0 }, 241 { X86::SHR8ri, X86::SHR8mi, 0 }, 242 { X86::SHRD16rrCL, X86::SHRD16mrCL, 0 }, 243 { X86::SHRD16rri8, X86::SHRD16mri8, 0 }, 244 { X86::SHRD32rrCL, X86::SHRD32mrCL, 0 }, 245 { X86::SHRD32rri8, X86::SHRD32mri8, 0 }, 246 { X86::SHRD64rrCL, X86::SHRD64mrCL, 0 }, 247 { X86::SHRD64rri8, X86::SHRD64mri8, 0 }, 248 { X86::SUB16ri, X86::SUB16mi, 0 }, 249 { X86::SUB16ri8, X86::SUB16mi8, 0 }, 250 { X86::SUB16rr, X86::SUB16mr, 0 }, 251 { X86::SUB32ri, X86::SUB32mi, 0 }, 252 { X86::SUB32ri8, X86::SUB32mi8, 0 }, 253 { X86::SUB32rr, X86::SUB32mr, 0 }, 254 { X86::SUB64ri32, X86::SUB64mi32, 0 }, 255 { X86::SUB64ri8, X86::SUB64mi8, 0 }, 256 { X86::SUB64rr, X86::SUB64mr, 0 }, 257 { X86::SUB8ri, X86::SUB8mi, 0 }, 258 { X86::SUB8rr, X86::SUB8mr, 0 }, 259 { X86::XOR16ri, X86::XOR16mi, 0 }, 260 { X86::XOR16ri8, X86::XOR16mi8, 0 }, 261 { X86::XOR16rr, X86::XOR16mr, 0 }, 262 { X86::XOR32ri, X86::XOR32mi, 0 }, 263 { X86::XOR32ri8, X86::XOR32mi8, 0 }, 264 { X86::XOR32rr, X86::XOR32mr, 0 }, 265 { X86::XOR64ri32, X86::XOR64mi32, 0 }, 266 { X86::XOR64ri8, X86::XOR64mi8, 0 }, 267 { X86::XOR64rr, X86::XOR64mr, 0 }, 268 { X86::XOR8ri, X86::XOR8mi, 0 }, 269 { X86::XOR8rr, X86::XOR8mr, 0 } 270 }; 271 272 for (unsigned i = 0, e = array_lengthof(MemoryFoldTable2Addr); i != e; ++i) { 273 unsigned RegOp = MemoryFoldTable2Addr[i].RegOp; 274 unsigned MemOp = MemoryFoldTable2Addr[i].MemOp; 275 unsigned Flags = MemoryFoldTable2Addr[i].Flags; 276 AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable, 277 RegOp, MemOp, 278 // Index 0, folded load and store, no alignment requirement. 279 Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE); 280 } 281 282 static const X86MemoryFoldTableEntry MemoryFoldTable0[] = { 283 { X86::BT16ri8, X86::BT16mi8, TB_FOLDED_LOAD }, 284 { X86::BT32ri8, X86::BT32mi8, TB_FOLDED_LOAD }, 285 { X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD }, 286 { X86::CALL32r, X86::CALL32m, TB_FOLDED_LOAD }, 287 { X86::CALL64r, X86::CALL64m, TB_FOLDED_LOAD }, 288 { X86::CMP16ri, X86::CMP16mi, TB_FOLDED_LOAD }, 289 { X86::CMP16ri8, X86::CMP16mi8, TB_FOLDED_LOAD }, 290 { X86::CMP16rr, X86::CMP16mr, TB_FOLDED_LOAD }, 291 { X86::CMP32ri, X86::CMP32mi, TB_FOLDED_LOAD }, 292 { X86::CMP32ri8, X86::CMP32mi8, TB_FOLDED_LOAD }, 293 { X86::CMP32rr, X86::CMP32mr, TB_FOLDED_LOAD }, 294 { X86::CMP64ri32, X86::CMP64mi32, TB_FOLDED_LOAD }, 295 { X86::CMP64ri8, X86::CMP64mi8, TB_FOLDED_LOAD }, 296 { X86::CMP64rr, X86::CMP64mr, TB_FOLDED_LOAD }, 297 { X86::CMP8ri, X86::CMP8mi, TB_FOLDED_LOAD }, 298 { X86::CMP8rr, X86::CMP8mr, TB_FOLDED_LOAD }, 299 { X86::DIV16r, X86::DIV16m, TB_FOLDED_LOAD }, 300 { X86::DIV32r, X86::DIV32m, TB_FOLDED_LOAD }, 301 { X86::DIV64r, X86::DIV64m, TB_FOLDED_LOAD }, 302 { X86::DIV8r, X86::DIV8m, TB_FOLDED_LOAD }, 303 { X86::EXTRACTPSrr, X86::EXTRACTPSmr, TB_FOLDED_STORE }, 304 { X86::IDIV16r, X86::IDIV16m, TB_FOLDED_LOAD }, 305 { X86::IDIV32r, X86::IDIV32m, TB_FOLDED_LOAD }, 306 { X86::IDIV64r, X86::IDIV64m, TB_FOLDED_LOAD }, 307 { X86::IDIV8r, X86::IDIV8m, TB_FOLDED_LOAD }, 308 { X86::IMUL16r, X86::IMUL16m, TB_FOLDED_LOAD }, 309 { X86::IMUL32r, X86::IMUL32m, TB_FOLDED_LOAD }, 310 { X86::IMUL64r, X86::IMUL64m, TB_FOLDED_LOAD }, 311 { X86::IMUL8r, X86::IMUL8m, TB_FOLDED_LOAD }, 312 { X86::JMP32r, X86::JMP32m, TB_FOLDED_LOAD }, 313 { X86::JMP64r, X86::JMP64m, TB_FOLDED_LOAD }, 314 { X86::MOV16ri, X86::MOV16mi, TB_FOLDED_STORE }, 315 { X86::MOV16rr, X86::MOV16mr, TB_FOLDED_STORE }, 316 { X86::MOV32ri, X86::MOV32mi, TB_FOLDED_STORE }, 317 { X86::MOV32rr, X86::MOV32mr, TB_FOLDED_STORE }, 318 { X86::MOV64ri32, X86::MOV64mi32, TB_FOLDED_STORE }, 319 { X86::MOV64rr, X86::MOV64mr, TB_FOLDED_STORE }, 320 { X86::MOV8ri, X86::MOV8mi, TB_FOLDED_STORE }, 321 { X86::MOV8rr, X86::MOV8mr, TB_FOLDED_STORE }, 322 { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, TB_FOLDED_STORE }, 323 { X86::MOVAPDrr, X86::MOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 324 { X86::MOVAPSrr, X86::MOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 325 { X86::MOVDQArr, X86::MOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 326 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, TB_FOLDED_STORE }, 327 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, TB_FOLDED_STORE }, 328 { X86::MOVSDto64rr, X86::MOVSDto64mr, TB_FOLDED_STORE }, 329 { X86::MOVSS2DIrr, X86::MOVSS2DImr, TB_FOLDED_STORE }, 330 { X86::MOVUPDrr, X86::MOVUPDmr, TB_FOLDED_STORE }, 331 { X86::MOVUPSrr, X86::MOVUPSmr, TB_FOLDED_STORE }, 332 { X86::MUL16r, X86::MUL16m, TB_FOLDED_LOAD }, 333 { X86::MUL32r, X86::MUL32m, TB_FOLDED_LOAD }, 334 { X86::MUL64r, X86::MUL64m, TB_FOLDED_LOAD }, 335 { X86::MUL8r, X86::MUL8m, TB_FOLDED_LOAD }, 336 { X86::PEXTRDrr, X86::PEXTRDmr, TB_FOLDED_STORE }, 337 { X86::PEXTRQrr, X86::PEXTRQmr, TB_FOLDED_STORE }, 338 { X86::SETAEr, X86::SETAEm, TB_FOLDED_STORE }, 339 { X86::SETAr, X86::SETAm, TB_FOLDED_STORE }, 340 { X86::SETBEr, X86::SETBEm, TB_FOLDED_STORE }, 341 { X86::SETBr, X86::SETBm, TB_FOLDED_STORE }, 342 { X86::SETEr, X86::SETEm, TB_FOLDED_STORE }, 343 { X86::SETGEr, X86::SETGEm, TB_FOLDED_STORE }, 344 { X86::SETGr, X86::SETGm, TB_FOLDED_STORE }, 345 { X86::SETLEr, X86::SETLEm, TB_FOLDED_STORE }, 346 { X86::SETLr, X86::SETLm, TB_FOLDED_STORE }, 347 { X86::SETNEr, X86::SETNEm, TB_FOLDED_STORE }, 348 { X86::SETNOr, X86::SETNOm, TB_FOLDED_STORE }, 349 { X86::SETNPr, X86::SETNPm, TB_FOLDED_STORE }, 350 { X86::SETNSr, X86::SETNSm, TB_FOLDED_STORE }, 351 { X86::SETOr, X86::SETOm, TB_FOLDED_STORE }, 352 { X86::SETPr, X86::SETPm, TB_FOLDED_STORE }, 353 { X86::SETSr, X86::SETSm, TB_FOLDED_STORE }, 354 { X86::TAILJMPr, X86::TAILJMPm, TB_FOLDED_LOAD }, 355 { X86::TAILJMPr64, X86::TAILJMPm64, TB_FOLDED_LOAD }, 356 { X86::TAILJMPr64_REX, X86::TAILJMPm64_REX, TB_FOLDED_LOAD }, 357 { X86::TEST16ri, X86::TEST16mi, TB_FOLDED_LOAD }, 358 { X86::TEST32ri, X86::TEST32mi, TB_FOLDED_LOAD }, 359 { X86::TEST64ri32, X86::TEST64mi32, TB_FOLDED_LOAD }, 360 { X86::TEST8ri, X86::TEST8mi, TB_FOLDED_LOAD }, 361 362 // AVX 128-bit versions of foldable instructions 363 { X86::VEXTRACTPSrr,X86::VEXTRACTPSmr, TB_FOLDED_STORE }, 364 { X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 365 { X86::VMOVAPDrr, X86::VMOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 366 { X86::VMOVAPSrr, X86::VMOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 367 { X86::VMOVDQArr, X86::VMOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 368 { X86::VMOVPDI2DIrr,X86::VMOVPDI2DImr, TB_FOLDED_STORE }, 369 { X86::VMOVPQIto64rr, X86::VMOVPQI2QImr,TB_FOLDED_STORE }, 370 { X86::VMOVSDto64rr,X86::VMOVSDto64mr, TB_FOLDED_STORE }, 371 { X86::VMOVSS2DIrr, X86::VMOVSS2DImr, TB_FOLDED_STORE }, 372 { X86::VMOVUPDrr, X86::VMOVUPDmr, TB_FOLDED_STORE }, 373 { X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE }, 374 { X86::VPEXTRDrr, X86::VPEXTRDmr, TB_FOLDED_STORE }, 375 { X86::VPEXTRQrr, X86::VPEXTRQmr, TB_FOLDED_STORE }, 376 377 // AVX 256-bit foldable instructions 378 { X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 379 { X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 380 { X86::VMOVAPSYrr, X86::VMOVAPSYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 381 { X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 382 { X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE }, 383 { X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE }, 384 385 // AVX-512 foldable instructions 386 { X86::VMOVPDI2DIZrr, X86::VMOVPDI2DIZmr, TB_FOLDED_STORE }, 387 { X86::VMOVAPDZrr, X86::VMOVAPDZmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 388 { X86::VMOVAPSZrr, X86::VMOVAPSZmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 389 { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 390 { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zmr, TB_FOLDED_STORE | TB_ALIGN_64 }, 391 { X86::VMOVUPDZrr, X86::VMOVUPDZmr, TB_FOLDED_STORE }, 392 { X86::VMOVUPSZrr, X86::VMOVUPSZmr, TB_FOLDED_STORE }, 393 { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zmr, TB_FOLDED_STORE }, 394 { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zmr, TB_FOLDED_STORE }, 395 { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zmr, TB_FOLDED_STORE }, 396 { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE }, 397 398 // AVX-512 foldable instructions (256-bit versions) 399 { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 400 { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 401 { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 402 { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, 403 { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256mr, TB_FOLDED_STORE }, 404 { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256mr, TB_FOLDED_STORE }, 405 { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256mr, TB_FOLDED_STORE }, 406 { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256mr, TB_FOLDED_STORE }, 407 { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256mr, TB_FOLDED_STORE }, 408 { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256mr, TB_FOLDED_STORE }, 409 410 // AVX-512 foldable instructions (128-bit versions) 411 { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 412 { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 413 { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 414 { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 415 { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128mr, TB_FOLDED_STORE }, 416 { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128mr, TB_FOLDED_STORE }, 417 { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128mr, TB_FOLDED_STORE }, 418 { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128mr, TB_FOLDED_STORE }, 419 { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128mr, TB_FOLDED_STORE }, 420 { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128mr, TB_FOLDED_STORE }, 421 422 // F16C foldable instructions 423 { X86::VCVTPS2PHrr, X86::VCVTPS2PHmr, TB_FOLDED_STORE }, 424 { X86::VCVTPS2PHYrr, X86::VCVTPS2PHYmr, TB_FOLDED_STORE } 425 }; 426 427 for (unsigned i = 0, e = array_lengthof(MemoryFoldTable0); i != e; ++i) { 428 unsigned RegOp = MemoryFoldTable0[i].RegOp; 429 unsigned MemOp = MemoryFoldTable0[i].MemOp; 430 unsigned Flags = MemoryFoldTable0[i].Flags; 431 AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable, 432 RegOp, MemOp, TB_INDEX_0 | Flags); 433 } 434 435 static const X86MemoryFoldTableEntry MemoryFoldTable1[] = { 436 { X86::CMP16rr, X86::CMP16rm, 0 }, 437 { X86::CMP32rr, X86::CMP32rm, 0 }, 438 { X86::CMP64rr, X86::CMP64rm, 0 }, 439 { X86::CMP8rr, X86::CMP8rm, 0 }, 440 { X86::CVTSD2SSrr, X86::CVTSD2SSrm, 0 }, 441 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm, 0 }, 442 { X86::CVTSI2SDrr, X86::CVTSI2SDrm, 0 }, 443 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm, 0 }, 444 { X86::CVTSI2SSrr, X86::CVTSI2SSrm, 0 }, 445 { X86::CVTSS2SDrr, X86::CVTSS2SDrm, 0 }, 446 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm, 0 }, 447 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 }, 448 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 }, 449 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 }, 450 { X86::IMUL16rri, X86::IMUL16rmi, 0 }, 451 { X86::IMUL16rri8, X86::IMUL16rmi8, 0 }, 452 { X86::IMUL32rri, X86::IMUL32rmi, 0 }, 453 { X86::IMUL32rri8, X86::IMUL32rmi8, 0 }, 454 { X86::IMUL64rri32, X86::IMUL64rmi32, 0 }, 455 { X86::IMUL64rri8, X86::IMUL64rmi8, 0 }, 456 { X86::Int_COMISDrr, X86::Int_COMISDrm, 0 }, 457 { X86::Int_COMISSrr, X86::Int_COMISSrm, 0 }, 458 { X86::CVTSD2SI64rr, X86::CVTSD2SI64rm, 0 }, 459 { X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 }, 460 { X86::CVTSS2SI64rr, X86::CVTSS2SI64rm, 0 }, 461 { X86::CVTSS2SIrr, X86::CVTSS2SIrm, 0 }, 462 { X86::CVTDQ2PDrr, X86::CVTDQ2PDrm, TB_ALIGN_16 }, 463 { X86::CVTDQ2PSrr, X86::CVTDQ2PSrm, TB_ALIGN_16 }, 464 { X86::CVTPD2DQrr, X86::CVTPD2DQrm, TB_ALIGN_16 }, 465 { X86::CVTPD2PSrr, X86::CVTPD2PSrm, TB_ALIGN_16 }, 466 { X86::CVTPS2DQrr, X86::CVTPS2DQrm, TB_ALIGN_16 }, 467 { X86::CVTPS2PDrr, X86::CVTPS2PDrm, TB_ALIGN_16 }, 468 { X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, TB_ALIGN_16 }, 469 { X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, TB_ALIGN_16 }, 470 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 }, 471 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm, 0 }, 472 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm, 0 }, 473 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm, 0 }, 474 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm, 0 }, 475 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 }, 476 { X86::MOV16rr, X86::MOV16rm, 0 }, 477 { X86::MOV32rr, X86::MOV32rm, 0 }, 478 { X86::MOV64rr, X86::MOV64rm, 0 }, 479 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 }, 480 { X86::MOV64toSDrr, X86::MOV64toSDrm, 0 }, 481 { X86::MOV8rr, X86::MOV8rm, 0 }, 482 { X86::MOVAPDrr, X86::MOVAPDrm, TB_ALIGN_16 }, 483 { X86::MOVAPSrr, X86::MOVAPSrm, TB_ALIGN_16 }, 484 { X86::MOVDDUPrr, X86::MOVDDUPrm, 0 }, 485 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 }, 486 { X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 }, 487 { X86::MOVDQArr, X86::MOVDQArm, TB_ALIGN_16 }, 488 { X86::MOVSHDUPrr, X86::MOVSHDUPrm, TB_ALIGN_16 }, 489 { X86::MOVSLDUPrr, X86::MOVSLDUPrm, TB_ALIGN_16 }, 490 { X86::MOVSX16rr8, X86::MOVSX16rm8, 0 }, 491 { X86::MOVSX32rr16, X86::MOVSX32rm16, 0 }, 492 { X86::MOVSX32rr8, X86::MOVSX32rm8, 0 }, 493 { X86::MOVSX64rr16, X86::MOVSX64rm16, 0 }, 494 { X86::MOVSX64rr32, X86::MOVSX64rm32, 0 }, 495 { X86::MOVSX64rr8, X86::MOVSX64rm8, 0 }, 496 { X86::MOVUPDrr, X86::MOVUPDrm, TB_ALIGN_16 }, 497 { X86::MOVUPSrr, X86::MOVUPSrm, 0 }, 498 { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm, 0 }, 499 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm, TB_ALIGN_16 }, 500 { X86::MOVZX16rr8, X86::MOVZX16rm8, 0 }, 501 { X86::MOVZX32rr16, X86::MOVZX32rm16, 0 }, 502 { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 }, 503 { X86::MOVZX32rr8, X86::MOVZX32rm8, 0 }, 504 { X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 }, 505 { X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 }, 506 { X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 }, 507 { X86::PCMPESTRIrr, X86::PCMPESTRIrm, TB_ALIGN_16 }, 508 { X86::PCMPESTRM128rr, X86::PCMPESTRM128rm, TB_ALIGN_16 }, 509 { X86::PCMPISTRIrr, X86::PCMPISTRIrm, TB_ALIGN_16 }, 510 { X86::PCMPISTRM128rr, X86::PCMPISTRM128rm, TB_ALIGN_16 }, 511 { X86::PHMINPOSUWrr128, X86::PHMINPOSUWrm128, TB_ALIGN_16 }, 512 { X86::PMOVSXBDrr, X86::PMOVSXBDrm, TB_ALIGN_16 }, 513 { X86::PMOVSXBQrr, X86::PMOVSXBQrm, TB_ALIGN_16 }, 514 { X86::PMOVSXBWrr, X86::PMOVSXBWrm, TB_ALIGN_16 }, 515 { X86::PMOVSXDQrr, X86::PMOVSXDQrm, TB_ALIGN_16 }, 516 { X86::PMOVSXWDrr, X86::PMOVSXWDrm, TB_ALIGN_16 }, 517 { X86::PMOVSXWQrr, X86::PMOVSXWQrm, TB_ALIGN_16 }, 518 { X86::PMOVZXBDrr, X86::PMOVZXBDrm, TB_ALIGN_16 }, 519 { X86::PMOVZXBQrr, X86::PMOVZXBQrm, TB_ALIGN_16 }, 520 { X86::PMOVZXBWrr, X86::PMOVZXBWrm, TB_ALIGN_16 }, 521 { X86::PMOVZXDQrr, X86::PMOVZXDQrm, TB_ALIGN_16 }, 522 { X86::PMOVZXWDrr, X86::PMOVZXWDrm, TB_ALIGN_16 }, 523 { X86::PMOVZXWQrr, X86::PMOVZXWQrm, TB_ALIGN_16 }, 524 { X86::PSHUFDri, X86::PSHUFDmi, TB_ALIGN_16 }, 525 { X86::PSHUFHWri, X86::PSHUFHWmi, TB_ALIGN_16 }, 526 { X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 }, 527 { X86::PTESTrr, X86::PTESTrm, TB_ALIGN_16 }, 528 { X86::RCPPSr, X86::RCPPSm, TB_ALIGN_16 }, 529 { X86::RCPPSr_Int, X86::RCPPSm_Int, TB_ALIGN_16 }, 530 { X86::ROUNDPDr, X86::ROUNDPDm, TB_ALIGN_16 }, 531 { X86::ROUNDPSr, X86::ROUNDPSm, TB_ALIGN_16 }, 532 { X86::RSQRTPSr, X86::RSQRTPSm, TB_ALIGN_16 }, 533 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int, TB_ALIGN_16 }, 534 { X86::RSQRTSSr, X86::RSQRTSSm, 0 }, 535 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int, 0 }, 536 { X86::SQRTPDr, X86::SQRTPDm, TB_ALIGN_16 }, 537 { X86::SQRTPSr, X86::SQRTPSm, TB_ALIGN_16 }, 538 { X86::SQRTSDr, X86::SQRTSDm, 0 }, 539 { X86::SQRTSDr_Int, X86::SQRTSDm_Int, 0 }, 540 { X86::SQRTSSr, X86::SQRTSSm, 0 }, 541 { X86::SQRTSSr_Int, X86::SQRTSSm_Int, 0 }, 542 { X86::TEST16rr, X86::TEST16rm, 0 }, 543 { X86::TEST32rr, X86::TEST32rm, 0 }, 544 { X86::TEST64rr, X86::TEST64rm, 0 }, 545 { X86::TEST8rr, X86::TEST8rm, 0 }, 546 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 547 { X86::UCOMISDrr, X86::UCOMISDrm, 0 }, 548 { X86::UCOMISSrr, X86::UCOMISSrm, 0 }, 549 550 // MMX version of foldable instructions 551 { X86::MMX_CVTPD2PIirr, X86::MMX_CVTPD2PIirm, 0 }, 552 { X86::MMX_CVTPI2PDirr, X86::MMX_CVTPI2PDirm, 0 }, 553 { X86::MMX_CVTPS2PIirr, X86::MMX_CVTPS2PIirm, 0 }, 554 { X86::MMX_CVTTPD2PIirr, X86::MMX_CVTTPD2PIirm, 0 }, 555 { X86::MMX_CVTTPS2PIirr, X86::MMX_CVTTPS2PIirm, 0 }, 556 { X86::MMX_MOVD64to64rr, X86::MMX_MOVQ64rm, 0 }, 557 { X86::MMX_PABSBrr64, X86::MMX_PABSBrm64, 0 }, 558 { X86::MMX_PABSDrr64, X86::MMX_PABSDrm64, 0 }, 559 { X86::MMX_PABSWrr64, X86::MMX_PABSWrm64, 0 }, 560 { X86::MMX_PSHUFWri, X86::MMX_PSHUFWmi, 0 }, 561 562 // 3DNow! version of foldable instructions 563 { X86::PF2IDrr, X86::PF2IDrm, 0 }, 564 { X86::PF2IWrr, X86::PF2IWrm, 0 }, 565 { X86::PFRCPrr, X86::PFRCPrm, 0 }, 566 { X86::PFRSQRTrr, X86::PFRSQRTrm, 0 }, 567 { X86::PI2FDrr, X86::PI2FDrm, 0 }, 568 { X86::PI2FWrr, X86::PI2FWrm, 0 }, 569 { X86::PSWAPDrr, X86::PSWAPDrm, 0 }, 570 571 // AVX 128-bit versions of foldable instructions 572 { X86::Int_VCOMISDrr, X86::Int_VCOMISDrm, 0 }, 573 { X86::Int_VCOMISSrr, X86::Int_VCOMISSrm, 0 }, 574 { X86::Int_VUCOMISDrr, X86::Int_VUCOMISDrm, 0 }, 575 { X86::Int_VUCOMISSrr, X86::Int_VUCOMISSrm, 0 }, 576 { X86::VCVTTSD2SI64rr, X86::VCVTTSD2SI64rm, 0 }, 577 { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,0 }, 578 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SIrm, 0 }, 579 { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm, 0 }, 580 { X86::VCVTTSS2SI64rr, X86::VCVTTSS2SI64rm, 0 }, 581 { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,0 }, 582 { X86::VCVTTSS2SIrr, X86::VCVTTSS2SIrm, 0 }, 583 { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm, 0 }, 584 { X86::VCVTSD2SI64rr, X86::VCVTSD2SI64rm, 0 }, 585 { X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 }, 586 { X86::VCVTSS2SI64rr, X86::VCVTSS2SI64rm, 0 }, 587 { X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 }, 588 { X86::VCVTDQ2PDrr, X86::VCVTDQ2PDrm, 0 }, 589 { X86::VCVTDQ2PSrr, X86::VCVTDQ2PSrm, 0 }, 590 { X86::VCVTPD2DQrr, X86::VCVTPD2DQXrm, 0 }, 591 { X86::VCVTPD2PSrr, X86::VCVTPD2PSXrm, 0 }, 592 { X86::VCVTPS2DQrr, X86::VCVTPS2DQrm, 0 }, 593 { X86::VCVTPS2PDrr, X86::VCVTPS2PDrm, 0 }, 594 { X86::VCVTTPD2DQrr, X86::VCVTTPD2DQXrm, 0 }, 595 { X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, 0 }, 596 { X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 }, 597 { X86::VMOV64toSDrr, X86::VMOV64toSDrm, 0 }, 598 { X86::VMOVAPDrr, X86::VMOVAPDrm, TB_ALIGN_16 }, 599 { X86::VMOVAPSrr, X86::VMOVAPSrm, TB_ALIGN_16 }, 600 { X86::VMOVDDUPrr, X86::VMOVDDUPrm, 0 }, 601 { X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 }, 602 { X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 }, 603 { X86::VMOVDQArr, X86::VMOVDQArm, TB_ALIGN_16 }, 604 { X86::VMOVSLDUPrr, X86::VMOVSLDUPrm, 0 }, 605 { X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, 0 }, 606 { X86::VMOVUPDrr, X86::VMOVUPDrm, 0 }, 607 { X86::VMOVUPSrr, X86::VMOVUPSrm, 0 }, 608 { X86::VMOVZQI2PQIrr, X86::VMOVZQI2PQIrm, 0 }, 609 { X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm, TB_ALIGN_16 }, 610 { X86::VPABSBrr128, X86::VPABSBrm128, 0 }, 611 { X86::VPABSDrr128, X86::VPABSDrm128, 0 }, 612 { X86::VPABSWrr128, X86::VPABSWrm128, 0 }, 613 { X86::VPCMPESTRIrr, X86::VPCMPESTRIrm, 0 }, 614 { X86::VPCMPESTRM128rr, X86::VPCMPESTRM128rm, 0 }, 615 { X86::VPCMPISTRIrr, X86::VPCMPISTRIrm, 0 }, 616 { X86::VPCMPISTRM128rr, X86::VPCMPISTRM128rm, 0 }, 617 { X86::VPHMINPOSUWrr128, X86::VPHMINPOSUWrm128, 0 }, 618 { X86::VPERMILPDri, X86::VPERMILPDmi, 0 }, 619 { X86::VPERMILPSri, X86::VPERMILPSmi, 0 }, 620 { X86::VPMOVSXBDrr, X86::VPMOVSXBDrm, 0 }, 621 { X86::VPMOVSXBQrr, X86::VPMOVSXBQrm, 0 }, 622 { X86::VPMOVSXBWrr, X86::VPMOVSXBWrm, 0 }, 623 { X86::VPMOVSXDQrr, X86::VPMOVSXDQrm, 0 }, 624 { X86::VPMOVSXWDrr, X86::VPMOVSXWDrm, 0 }, 625 { X86::VPMOVSXWQrr, X86::VPMOVSXWQrm, 0 }, 626 { X86::VPMOVZXBDrr, X86::VPMOVZXBDrm, 0 }, 627 { X86::VPMOVZXBQrr, X86::VPMOVZXBQrm, 0 }, 628 { X86::VPMOVZXBWrr, X86::VPMOVZXBWrm, 0 }, 629 { X86::VPMOVZXDQrr, X86::VPMOVZXDQrm, 0 }, 630 { X86::VPMOVZXWDrr, X86::VPMOVZXWDrm, 0 }, 631 { X86::VPMOVZXWQrr, X86::VPMOVZXWQrm, 0 }, 632 { X86::VPSHUFDri, X86::VPSHUFDmi, 0 }, 633 { X86::VPSHUFHWri, X86::VPSHUFHWmi, 0 }, 634 { X86::VPSHUFLWri, X86::VPSHUFLWmi, 0 }, 635 { X86::VPTESTrr, X86::VPTESTrm, 0 }, 636 { X86::VRCPPSr, X86::VRCPPSm, 0 }, 637 { X86::VRCPPSr_Int, X86::VRCPPSm_Int, 0 }, 638 { X86::VROUNDPDr, X86::VROUNDPDm, 0 }, 639 { X86::VROUNDPSr, X86::VROUNDPSm, 0 }, 640 { X86::VRSQRTPSr, X86::VRSQRTPSm, 0 }, 641 { X86::VRSQRTPSr_Int, X86::VRSQRTPSm_Int, 0 }, 642 { X86::VSQRTPDr, X86::VSQRTPDm, 0 }, 643 { X86::VSQRTPSr, X86::VSQRTPSm, 0 }, 644 { X86::VTESTPDrr, X86::VTESTPDrm, 0 }, 645 { X86::VTESTPSrr, X86::VTESTPSrm, 0 }, 646 { X86::VUCOMISDrr, X86::VUCOMISDrm, 0 }, 647 { X86::VUCOMISSrr, X86::VUCOMISSrm, 0 }, 648 649 // AVX 256-bit foldable instructions 650 { X86::VCVTDQ2PDYrr, X86::VCVTDQ2PDYrm, 0 }, 651 { X86::VCVTDQ2PSYrr, X86::VCVTDQ2PSYrm, 0 }, 652 { X86::VCVTPD2DQYrr, X86::VCVTPD2DQYrm, 0 }, 653 { X86::VCVTPD2PSYrr, X86::VCVTPD2PSYrm, 0 }, 654 { X86::VCVTPS2DQYrr, X86::VCVTPS2DQYrm, 0 }, 655 { X86::VCVTPS2PDYrr, X86::VCVTPS2PDYrm, 0 }, 656 { X86::VCVTTPD2DQYrr, X86::VCVTTPD2DQYrm, 0 }, 657 { X86::VCVTTPS2DQYrr, X86::VCVTTPS2DQYrm, 0 }, 658 { X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 }, 659 { X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 }, 660 { X86::VMOVDDUPYrr, X86::VMOVDDUPYrm, 0 }, 661 { X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_32 }, 662 { X86::VMOVSLDUPYrr, X86::VMOVSLDUPYrm, 0 }, 663 { X86::VMOVSHDUPYrr, X86::VMOVSHDUPYrm, 0 }, 664 { X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 }, 665 { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 }, 666 { X86::VPERMILPDYri, X86::VPERMILPDYmi, 0 }, 667 { X86::VPERMILPSYri, X86::VPERMILPSYmi, 0 }, 668 { X86::VPTESTYrr, X86::VPTESTYrm, 0 }, 669 { X86::VRCPPSYr, X86::VRCPPSYm, 0 }, 670 { X86::VRCPPSYr_Int, X86::VRCPPSYm_Int, 0 }, 671 { X86::VROUNDYPDr, X86::VROUNDYPDm, 0 }, 672 { X86::VROUNDYPSr, X86::VROUNDYPSm, 0 }, 673 { X86::VRSQRTPSYr, X86::VRSQRTPSYm, 0 }, 674 { X86::VRSQRTPSYr_Int, X86::VRSQRTPSYm_Int, 0 }, 675 { X86::VSQRTPDYr, X86::VSQRTPDYm, 0 }, 676 { X86::VSQRTPSYr, X86::VSQRTPSYm, 0 }, 677 { X86::VTESTPDYrr, X86::VTESTPDYrm, 0 }, 678 { X86::VTESTPSYrr, X86::VTESTPSYrm, 0 }, 679 680 // AVX2 foldable instructions 681 682 // VBROADCASTS{SD}rr register instructions were an AVX2 addition while the 683 // VBROADCASTS{SD}rm memory instructions were available from AVX1. 684 // TB_NO_REVERSE prevents unfolding from introducing an illegal instruction 685 // on AVX1 targets. The VPBROADCAST instructions are all AVX2 instructions 686 // so they don't need an equivalent limitation. 687 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrm, TB_NO_REVERSE }, 688 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE }, 689 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE }, 690 { X86::VPABSBrr256, X86::VPABSBrm256, 0 }, 691 { X86::VPABSDrr256, X86::VPABSDrm256, 0 }, 692 { X86::VPABSWrr256, X86::VPABSWrm256, 0 }, 693 { X86::VPBROADCASTBrr, X86::VPBROADCASTBrm, 0 }, 694 { X86::VPBROADCASTBYrr, X86::VPBROADCASTBYrm, 0 }, 695 { X86::VPBROADCASTDrr, X86::VPBROADCASTDrm, 0 }, 696 { X86::VPBROADCASTDYrr, X86::VPBROADCASTDYrm, 0 }, 697 { X86::VPBROADCASTQrr, X86::VPBROADCASTQrm, 0 }, 698 { X86::VPBROADCASTQYrr, X86::VPBROADCASTQYrm, 0 }, 699 { X86::VPBROADCASTWrr, X86::VPBROADCASTWrm, 0 }, 700 { X86::VPBROADCASTWYrr, X86::VPBROADCASTWYrm, 0 }, 701 { X86::VPERMPDYri, X86::VPERMPDYmi, 0 }, 702 { X86::VPERMQYri, X86::VPERMQYmi, 0 }, 703 { X86::VPMOVSXBDYrr, X86::VPMOVSXBDYrm, 0 }, 704 { X86::VPMOVSXBQYrr, X86::VPMOVSXBQYrm, 0 }, 705 { X86::VPMOVSXBWYrr, X86::VPMOVSXBWYrm, 0 }, 706 { X86::VPMOVSXDQYrr, X86::VPMOVSXDQYrm, 0 }, 707 { X86::VPMOVSXWDYrr, X86::VPMOVSXWDYrm, 0 }, 708 { X86::VPMOVSXWQYrr, X86::VPMOVSXWQYrm, 0 }, 709 { X86::VPMOVZXBDYrr, X86::VPMOVZXBDYrm, 0 }, 710 { X86::VPMOVZXBQYrr, X86::VPMOVZXBQYrm, 0 }, 711 { X86::VPMOVZXBWYrr, X86::VPMOVZXBWYrm, 0 }, 712 { X86::VPMOVZXDQYrr, X86::VPMOVZXDQYrm, 0 }, 713 { X86::VPMOVZXWDYrr, X86::VPMOVZXWDYrm, 0 }, 714 { X86::VPMOVZXWQYrr, X86::VPMOVZXWQYrm, 0 }, 715 { X86::VPSHUFDYri, X86::VPSHUFDYmi, 0 }, 716 { X86::VPSHUFHWYri, X86::VPSHUFHWYmi, 0 }, 717 { X86::VPSHUFLWYri, X86::VPSHUFLWYmi, 0 }, 718 719 // XOP foldable instructions 720 { X86::VFRCZPDrr, X86::VFRCZPDrm, 0 }, 721 { X86::VFRCZPDrrY, X86::VFRCZPDrmY, 0 }, 722 { X86::VFRCZPSrr, X86::VFRCZPSrm, 0 }, 723 { X86::VFRCZPSrrY, X86::VFRCZPSrmY, 0 }, 724 { X86::VFRCZSDrr, X86::VFRCZSDrm, 0 }, 725 { X86::VFRCZSSrr, X86::VFRCZSSrm, 0 }, 726 { X86::VPHADDBDrr, X86::VPHADDBDrm, 0 }, 727 { X86::VPHADDBQrr, X86::VPHADDBQrm, 0 }, 728 { X86::VPHADDBWrr, X86::VPHADDBWrm, 0 }, 729 { X86::VPHADDDQrr, X86::VPHADDDQrm, 0 }, 730 { X86::VPHADDWDrr, X86::VPHADDWDrm, 0 }, 731 { X86::VPHADDWQrr, X86::VPHADDWQrm, 0 }, 732 { X86::VPHADDUBDrr, X86::VPHADDUBDrm, 0 }, 733 { X86::VPHADDUBQrr, X86::VPHADDUBQrm, 0 }, 734 { X86::VPHADDUBWrr, X86::VPHADDUBWrm, 0 }, 735 { X86::VPHADDUDQrr, X86::VPHADDUDQrm, 0 }, 736 { X86::VPHADDUWDrr, X86::VPHADDUWDrm, 0 }, 737 { X86::VPHADDUWQrr, X86::VPHADDUWQrm, 0 }, 738 { X86::VPHSUBBWrr, X86::VPHSUBBWrm, 0 }, 739 { X86::VPHSUBDQrr, X86::VPHSUBDQrm, 0 }, 740 { X86::VPHSUBWDrr, X86::VPHSUBWDrm, 0 }, 741 { X86::VPROTBri, X86::VPROTBmi, 0 }, 742 { X86::VPROTBrr, X86::VPROTBmr, 0 }, 743 { X86::VPROTDri, X86::VPROTDmi, 0 }, 744 { X86::VPROTDrr, X86::VPROTDmr, 0 }, 745 { X86::VPROTQri, X86::VPROTQmi, 0 }, 746 { X86::VPROTQrr, X86::VPROTQmr, 0 }, 747 { X86::VPROTWri, X86::VPROTWmi, 0 }, 748 { X86::VPROTWrr, X86::VPROTWmr, 0 }, 749 { X86::VPSHABrr, X86::VPSHABmr, 0 }, 750 { X86::VPSHADrr, X86::VPSHADmr, 0 }, 751 { X86::VPSHAQrr, X86::VPSHAQmr, 0 }, 752 { X86::VPSHAWrr, X86::VPSHAWmr, 0 }, 753 { X86::VPSHLBrr, X86::VPSHLBmr, 0 }, 754 { X86::VPSHLDrr, X86::VPSHLDmr, 0 }, 755 { X86::VPSHLQrr, X86::VPSHLQmr, 0 }, 756 { X86::VPSHLWrr, X86::VPSHLWmr, 0 }, 757 758 // BMI/BMI2/LZCNT/POPCNT/TBM foldable instructions 759 { X86::BEXTR32rr, X86::BEXTR32rm, 0 }, 760 { X86::BEXTR64rr, X86::BEXTR64rm, 0 }, 761 { X86::BEXTRI32ri, X86::BEXTRI32mi, 0 }, 762 { X86::BEXTRI64ri, X86::BEXTRI64mi, 0 }, 763 { X86::BLCFILL32rr, X86::BLCFILL32rm, 0 }, 764 { X86::BLCFILL64rr, X86::BLCFILL64rm, 0 }, 765 { X86::BLCI32rr, X86::BLCI32rm, 0 }, 766 { X86::BLCI64rr, X86::BLCI64rm, 0 }, 767 { X86::BLCIC32rr, X86::BLCIC32rm, 0 }, 768 { X86::BLCIC64rr, X86::BLCIC64rm, 0 }, 769 { X86::BLCMSK32rr, X86::BLCMSK32rm, 0 }, 770 { X86::BLCMSK64rr, X86::BLCMSK64rm, 0 }, 771 { X86::BLCS32rr, X86::BLCS32rm, 0 }, 772 { X86::BLCS64rr, X86::BLCS64rm, 0 }, 773 { X86::BLSFILL32rr, X86::BLSFILL32rm, 0 }, 774 { X86::BLSFILL64rr, X86::BLSFILL64rm, 0 }, 775 { X86::BLSI32rr, X86::BLSI32rm, 0 }, 776 { X86::BLSI64rr, X86::BLSI64rm, 0 }, 777 { X86::BLSIC32rr, X86::BLSIC32rm, 0 }, 778 { X86::BLSIC64rr, X86::BLSIC64rm, 0 }, 779 { X86::BLSMSK32rr, X86::BLSMSK32rm, 0 }, 780 { X86::BLSMSK64rr, X86::BLSMSK64rm, 0 }, 781 { X86::BLSR32rr, X86::BLSR32rm, 0 }, 782 { X86::BLSR64rr, X86::BLSR64rm, 0 }, 783 { X86::BZHI32rr, X86::BZHI32rm, 0 }, 784 { X86::BZHI64rr, X86::BZHI64rm, 0 }, 785 { X86::LZCNT16rr, X86::LZCNT16rm, 0 }, 786 { X86::LZCNT32rr, X86::LZCNT32rm, 0 }, 787 { X86::LZCNT64rr, X86::LZCNT64rm, 0 }, 788 { X86::POPCNT16rr, X86::POPCNT16rm, 0 }, 789 { X86::POPCNT32rr, X86::POPCNT32rm, 0 }, 790 { X86::POPCNT64rr, X86::POPCNT64rm, 0 }, 791 { X86::RORX32ri, X86::RORX32mi, 0 }, 792 { X86::RORX64ri, X86::RORX64mi, 0 }, 793 { X86::SARX32rr, X86::SARX32rm, 0 }, 794 { X86::SARX64rr, X86::SARX64rm, 0 }, 795 { X86::SHRX32rr, X86::SHRX32rm, 0 }, 796 { X86::SHRX64rr, X86::SHRX64rm, 0 }, 797 { X86::SHLX32rr, X86::SHLX32rm, 0 }, 798 { X86::SHLX64rr, X86::SHLX64rm, 0 }, 799 { X86::T1MSKC32rr, X86::T1MSKC32rm, 0 }, 800 { X86::T1MSKC64rr, X86::T1MSKC64rm, 0 }, 801 { X86::TZCNT16rr, X86::TZCNT16rm, 0 }, 802 { X86::TZCNT32rr, X86::TZCNT32rm, 0 }, 803 { X86::TZCNT64rr, X86::TZCNT64rm, 0 }, 804 { X86::TZMSK32rr, X86::TZMSK32rm, 0 }, 805 { X86::TZMSK64rr, X86::TZMSK64rm, 0 }, 806 807 // AVX-512 foldable instructions 808 { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 }, 809 { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 }, 810 { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 }, 811 { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 }, 812 { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 }, 813 { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 }, 814 { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 }, 815 { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 }, 816 { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 }, 817 { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 }, 818 { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 }, 819 { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 }, 820 { X86::VPABSDZrr, X86::VPABSDZrm, 0 }, 821 { X86::VPABSQZrr, X86::VPABSQZrm, 0 }, 822 { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE }, 823 { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE }, 824 825 // AVX-512 foldable instructions (256-bit versions) 826 { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 }, 827 { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 }, 828 { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 }, 829 { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 }, 830 { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 }, 831 { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 }, 832 { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 }, 833 { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 }, 834 { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 }, 835 { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 }, 836 { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE }, 837 { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE }, 838 839 // AVX-512 foldable instructions (256-bit versions) 840 { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 }, 841 { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 }, 842 { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 }, 843 { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 }, 844 { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 }, 845 { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 }, 846 { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 }, 847 { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 }, 848 { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 }, 849 { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 }, 850 { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE }, 851 852 // F16C foldable instructions 853 { X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 }, 854 { X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 }, 855 856 // AES foldable instructions 857 { X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 }, 858 { X86::AESKEYGENASSIST128rr, X86::AESKEYGENASSIST128rm, TB_ALIGN_16 }, 859 { X86::VAESIMCrr, X86::VAESIMCrm, 0 }, 860 { X86::VAESKEYGENASSIST128rr, X86::VAESKEYGENASSIST128rm, 0 } 861 }; 862 863 for (unsigned i = 0, e = array_lengthof(MemoryFoldTable1); i != e; ++i) { 864 unsigned RegOp = MemoryFoldTable1[i].RegOp; 865 unsigned MemOp = MemoryFoldTable1[i].MemOp; 866 unsigned Flags = MemoryFoldTable1[i].Flags; 867 AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable, 868 RegOp, MemOp, 869 // Index 1, folded load 870 Flags | TB_INDEX_1 | TB_FOLDED_LOAD); 871 } 872 873 static const X86MemoryFoldTableEntry MemoryFoldTable2[] = { 874 { X86::ADC32rr, X86::ADC32rm, 0 }, 875 { X86::ADC64rr, X86::ADC64rm, 0 }, 876 { X86::ADD16rr, X86::ADD16rm, 0 }, 877 { X86::ADD16rr_DB, X86::ADD16rm, TB_NO_REVERSE }, 878 { X86::ADD32rr, X86::ADD32rm, 0 }, 879 { X86::ADD32rr_DB, X86::ADD32rm, TB_NO_REVERSE }, 880 { X86::ADD64rr, X86::ADD64rm, 0 }, 881 { X86::ADD64rr_DB, X86::ADD64rm, TB_NO_REVERSE }, 882 { X86::ADD8rr, X86::ADD8rm, 0 }, 883 { X86::ADDPDrr, X86::ADDPDrm, TB_ALIGN_16 }, 884 { X86::ADDPSrr, X86::ADDPSrm, TB_ALIGN_16 }, 885 { X86::ADDSDrr, X86::ADDSDrm, 0 }, 886 { X86::ADDSDrr_Int, X86::ADDSDrm_Int, 0 }, 887 { X86::ADDSSrr, X86::ADDSSrm, 0 }, 888 { X86::ADDSSrr_Int, X86::ADDSSrm_Int, 0 }, 889 { X86::ADDSUBPDrr, X86::ADDSUBPDrm, TB_ALIGN_16 }, 890 { X86::ADDSUBPSrr, X86::ADDSUBPSrm, TB_ALIGN_16 }, 891 { X86::AND16rr, X86::AND16rm, 0 }, 892 { X86::AND32rr, X86::AND32rm, 0 }, 893 { X86::AND64rr, X86::AND64rm, 0 }, 894 { X86::AND8rr, X86::AND8rm, 0 }, 895 { X86::ANDNPDrr, X86::ANDNPDrm, TB_ALIGN_16 }, 896 { X86::ANDNPSrr, X86::ANDNPSrm, TB_ALIGN_16 }, 897 { X86::ANDPDrr, X86::ANDPDrm, TB_ALIGN_16 }, 898 { X86::ANDPSrr, X86::ANDPSrm, TB_ALIGN_16 }, 899 { X86::BLENDPDrri, X86::BLENDPDrmi, TB_ALIGN_16 }, 900 { X86::BLENDPSrri, X86::BLENDPSrmi, TB_ALIGN_16 }, 901 { X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16 }, 902 { X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16 }, 903 { X86::CMOVA16rr, X86::CMOVA16rm, 0 }, 904 { X86::CMOVA32rr, X86::CMOVA32rm, 0 }, 905 { X86::CMOVA64rr, X86::CMOVA64rm, 0 }, 906 { X86::CMOVAE16rr, X86::CMOVAE16rm, 0 }, 907 { X86::CMOVAE32rr, X86::CMOVAE32rm, 0 }, 908 { X86::CMOVAE64rr, X86::CMOVAE64rm, 0 }, 909 { X86::CMOVB16rr, X86::CMOVB16rm, 0 }, 910 { X86::CMOVB32rr, X86::CMOVB32rm, 0 }, 911 { X86::CMOVB64rr, X86::CMOVB64rm, 0 }, 912 { X86::CMOVBE16rr, X86::CMOVBE16rm, 0 }, 913 { X86::CMOVBE32rr, X86::CMOVBE32rm, 0 }, 914 { X86::CMOVBE64rr, X86::CMOVBE64rm, 0 }, 915 { X86::CMOVE16rr, X86::CMOVE16rm, 0 }, 916 { X86::CMOVE32rr, X86::CMOVE32rm, 0 }, 917 { X86::CMOVE64rr, X86::CMOVE64rm, 0 }, 918 { X86::CMOVG16rr, X86::CMOVG16rm, 0 }, 919 { X86::CMOVG32rr, X86::CMOVG32rm, 0 }, 920 { X86::CMOVG64rr, X86::CMOVG64rm, 0 }, 921 { X86::CMOVGE16rr, X86::CMOVGE16rm, 0 }, 922 { X86::CMOVGE32rr, X86::CMOVGE32rm, 0 }, 923 { X86::CMOVGE64rr, X86::CMOVGE64rm, 0 }, 924 { X86::CMOVL16rr, X86::CMOVL16rm, 0 }, 925 { X86::CMOVL32rr, X86::CMOVL32rm, 0 }, 926 { X86::CMOVL64rr, X86::CMOVL64rm, 0 }, 927 { X86::CMOVLE16rr, X86::CMOVLE16rm, 0 }, 928 { X86::CMOVLE32rr, X86::CMOVLE32rm, 0 }, 929 { X86::CMOVLE64rr, X86::CMOVLE64rm, 0 }, 930 { X86::CMOVNE16rr, X86::CMOVNE16rm, 0 }, 931 { X86::CMOVNE32rr, X86::CMOVNE32rm, 0 }, 932 { X86::CMOVNE64rr, X86::CMOVNE64rm, 0 }, 933 { X86::CMOVNO16rr, X86::CMOVNO16rm, 0 }, 934 { X86::CMOVNO32rr, X86::CMOVNO32rm, 0 }, 935 { X86::CMOVNO64rr, X86::CMOVNO64rm, 0 }, 936 { X86::CMOVNP16rr, X86::CMOVNP16rm, 0 }, 937 { X86::CMOVNP32rr, X86::CMOVNP32rm, 0 }, 938 { X86::CMOVNP64rr, X86::CMOVNP64rm, 0 }, 939 { X86::CMOVNS16rr, X86::CMOVNS16rm, 0 }, 940 { X86::CMOVNS32rr, X86::CMOVNS32rm, 0 }, 941 { X86::CMOVNS64rr, X86::CMOVNS64rm, 0 }, 942 { X86::CMOVO16rr, X86::CMOVO16rm, 0 }, 943 { X86::CMOVO32rr, X86::CMOVO32rm, 0 }, 944 { X86::CMOVO64rr, X86::CMOVO64rm, 0 }, 945 { X86::CMOVP16rr, X86::CMOVP16rm, 0 }, 946 { X86::CMOVP32rr, X86::CMOVP32rm, 0 }, 947 { X86::CMOVP64rr, X86::CMOVP64rm, 0 }, 948 { X86::CMOVS16rr, X86::CMOVS16rm, 0 }, 949 { X86::CMOVS32rr, X86::CMOVS32rm, 0 }, 950 { X86::CMOVS64rr, X86::CMOVS64rm, 0 }, 951 { X86::CMPPDrri, X86::CMPPDrmi, TB_ALIGN_16 }, 952 { X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16 }, 953 { X86::CMPSDrr, X86::CMPSDrm, 0 }, 954 { X86::CMPSSrr, X86::CMPSSrm, 0 }, 955 { X86::CRC32r32r32, X86::CRC32r32m32, 0 }, 956 { X86::CRC32r64r64, X86::CRC32r64m64, 0 }, 957 { X86::DIVPDrr, X86::DIVPDrm, TB_ALIGN_16 }, 958 { X86::DIVPSrr, X86::DIVPSrm, TB_ALIGN_16 }, 959 { X86::DIVSDrr, X86::DIVSDrm, 0 }, 960 { X86::DIVSDrr_Int, X86::DIVSDrm_Int, 0 }, 961 { X86::DIVSSrr, X86::DIVSSrm, 0 }, 962 { X86::DIVSSrr_Int, X86::DIVSSrm_Int, 0 }, 963 { X86::DPPDrri, X86::DPPDrmi, TB_ALIGN_16 }, 964 { X86::DPPSrri, X86::DPPSrmi, TB_ALIGN_16 }, 965 966 // FIXME: We should not be folding Fs* scalar loads into vector 967 // instructions because the vector instructions require vector-sized 968 // loads. Lowering should create vector-sized instructions (the Fv* 969 // variants below) to allow load folding. 970 { X86::FsANDNPDrr, X86::FsANDNPDrm, TB_ALIGN_16 }, 971 { X86::FsANDNPSrr, X86::FsANDNPSrm, TB_ALIGN_16 }, 972 { X86::FsANDPDrr, X86::FsANDPDrm, TB_ALIGN_16 }, 973 { X86::FsANDPSrr, X86::FsANDPSrm, TB_ALIGN_16 }, 974 { X86::FsORPDrr, X86::FsORPDrm, TB_ALIGN_16 }, 975 { X86::FsORPSrr, X86::FsORPSrm, TB_ALIGN_16 }, 976 { X86::FsXORPDrr, X86::FsXORPDrm, TB_ALIGN_16 }, 977 { X86::FsXORPSrr, X86::FsXORPSrm, TB_ALIGN_16 }, 978 979 { X86::FvANDNPDrr, X86::FvANDNPDrm, TB_ALIGN_16 }, 980 { X86::FvANDNPSrr, X86::FvANDNPSrm, TB_ALIGN_16 }, 981 { X86::FvANDPDrr, X86::FvANDPDrm, TB_ALIGN_16 }, 982 { X86::FvANDPSrr, X86::FvANDPSrm, TB_ALIGN_16 }, 983 { X86::FvORPDrr, X86::FvORPDrm, TB_ALIGN_16 }, 984 { X86::FvORPSrr, X86::FvORPSrm, TB_ALIGN_16 }, 985 { X86::FvXORPDrr, X86::FvXORPDrm, TB_ALIGN_16 }, 986 { X86::FvXORPSrr, X86::FvXORPSrm, TB_ALIGN_16 }, 987 { X86::HADDPDrr, X86::HADDPDrm, TB_ALIGN_16 }, 988 { X86::HADDPSrr, X86::HADDPSrm, TB_ALIGN_16 }, 989 { X86::HSUBPDrr, X86::HSUBPDrm, TB_ALIGN_16 }, 990 { X86::HSUBPSrr, X86::HSUBPSrm, TB_ALIGN_16 }, 991 { X86::IMUL16rr, X86::IMUL16rm, 0 }, 992 { X86::IMUL32rr, X86::IMUL32rm, 0 }, 993 { X86::IMUL64rr, X86::IMUL64rm, 0 }, 994 { X86::Int_CMPSDrr, X86::Int_CMPSDrm, 0 }, 995 { X86::Int_CMPSSrr, X86::Int_CMPSSrm, 0 }, 996 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 }, 997 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 }, 998 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 }, 999 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 }, 1000 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 }, 1001 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 }, 1002 { X86::MAXPDrr, X86::MAXPDrm, TB_ALIGN_16 }, 1003 { X86::MAXPSrr, X86::MAXPSrm, TB_ALIGN_16 }, 1004 { X86::MAXSDrr, X86::MAXSDrm, 0 }, 1005 { X86::MAXSDrr_Int, X86::MAXSDrm_Int, 0 }, 1006 { X86::MAXSSrr, X86::MAXSSrm, 0 }, 1007 { X86::MAXSSrr_Int, X86::MAXSSrm_Int, 0 }, 1008 { X86::MINPDrr, X86::MINPDrm, TB_ALIGN_16 }, 1009 { X86::MINPSrr, X86::MINPSrm, TB_ALIGN_16 }, 1010 { X86::MINSDrr, X86::MINSDrm, 0 }, 1011 { X86::MINSDrr_Int, X86::MINSDrm_Int, 0 }, 1012 { X86::MINSSrr, X86::MINSSrm, 0 }, 1013 { X86::MINSSrr_Int, X86::MINSSrm_Int, 0 }, 1014 { X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 }, 1015 { X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 }, 1016 { X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 }, 1017 { X86::MULSDrr, X86::MULSDrm, 0 }, 1018 { X86::MULSDrr_Int, X86::MULSDrm_Int, 0 }, 1019 { X86::MULSSrr, X86::MULSSrm, 0 }, 1020 { X86::MULSSrr_Int, X86::MULSSrm_Int, 0 }, 1021 { X86::OR16rr, X86::OR16rm, 0 }, 1022 { X86::OR32rr, X86::OR32rm, 0 }, 1023 { X86::OR64rr, X86::OR64rm, 0 }, 1024 { X86::OR8rr, X86::OR8rm, 0 }, 1025 { X86::ORPDrr, X86::ORPDrm, TB_ALIGN_16 }, 1026 { X86::ORPSrr, X86::ORPSrm, TB_ALIGN_16 }, 1027 { X86::PACKSSDWrr, X86::PACKSSDWrm, TB_ALIGN_16 }, 1028 { X86::PACKSSWBrr, X86::PACKSSWBrm, TB_ALIGN_16 }, 1029 { X86::PACKUSDWrr, X86::PACKUSDWrm, TB_ALIGN_16 }, 1030 { X86::PACKUSWBrr, X86::PACKUSWBrm, TB_ALIGN_16 }, 1031 { X86::PADDBrr, X86::PADDBrm, TB_ALIGN_16 }, 1032 { X86::PADDDrr, X86::PADDDrm, TB_ALIGN_16 }, 1033 { X86::PADDQrr, X86::PADDQrm, TB_ALIGN_16 }, 1034 { X86::PADDSBrr, X86::PADDSBrm, TB_ALIGN_16 }, 1035 { X86::PADDSWrr, X86::PADDSWrm, TB_ALIGN_16 }, 1036 { X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 }, 1037 { X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 }, 1038 { X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 }, 1039 { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 }, 1040 { X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 }, 1041 { X86::PANDrr, X86::PANDrm, TB_ALIGN_16 }, 1042 { X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 }, 1043 { X86::PAVGWrr, X86::PAVGWrm, TB_ALIGN_16 }, 1044 { X86::PBLENDVBrr0, X86::PBLENDVBrm0, TB_ALIGN_16 }, 1045 { X86::PBLENDWrri, X86::PBLENDWrmi, TB_ALIGN_16 }, 1046 { X86::PCLMULQDQrr, X86::PCLMULQDQrm, TB_ALIGN_16 }, 1047 { X86::PCMPEQBrr, X86::PCMPEQBrm, TB_ALIGN_16 }, 1048 { X86::PCMPEQDrr, X86::PCMPEQDrm, TB_ALIGN_16 }, 1049 { X86::PCMPEQQrr, X86::PCMPEQQrm, TB_ALIGN_16 }, 1050 { X86::PCMPEQWrr, X86::PCMPEQWrm, TB_ALIGN_16 }, 1051 { X86::PCMPGTBrr, X86::PCMPGTBrm, TB_ALIGN_16 }, 1052 { X86::PCMPGTDrr, X86::PCMPGTDrm, TB_ALIGN_16 }, 1053 { X86::PCMPGTQrr, X86::PCMPGTQrm, TB_ALIGN_16 }, 1054 { X86::PCMPGTWrr, X86::PCMPGTWrm, TB_ALIGN_16 }, 1055 { X86::PHADDDrr, X86::PHADDDrm, TB_ALIGN_16 }, 1056 { X86::PHADDWrr, X86::PHADDWrm, TB_ALIGN_16 }, 1057 { X86::PHADDSWrr128, X86::PHADDSWrm128, TB_ALIGN_16 }, 1058 { X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16 }, 1059 { X86::PHSUBSWrr128, X86::PHSUBSWrm128, TB_ALIGN_16 }, 1060 { X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16 }, 1061 { X86::PINSRBrr, X86::PINSRBrm, 0 }, 1062 { X86::PINSRDrr, X86::PINSRDrm, 0 }, 1063 { X86::PINSRQrr, X86::PINSRQrm, 0 }, 1064 { X86::PINSRWrri, X86::PINSRWrmi, 0 }, 1065 { X86::PMADDUBSWrr128, X86::PMADDUBSWrm128, TB_ALIGN_16 }, 1066 { X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16 }, 1067 { X86::PMAXSWrr, X86::PMAXSWrm, TB_ALIGN_16 }, 1068 { X86::PMAXUBrr, X86::PMAXUBrm, TB_ALIGN_16 }, 1069 { X86::PMINSWrr, X86::PMINSWrm, TB_ALIGN_16 }, 1070 { X86::PMINUBrr, X86::PMINUBrm, TB_ALIGN_16 }, 1071 { X86::PMINSBrr, X86::PMINSBrm, TB_ALIGN_16 }, 1072 { X86::PMINSDrr, X86::PMINSDrm, TB_ALIGN_16 }, 1073 { X86::PMINUDrr, X86::PMINUDrm, TB_ALIGN_16 }, 1074 { X86::PMINUWrr, X86::PMINUWrm, TB_ALIGN_16 }, 1075 { X86::PMAXSBrr, X86::PMAXSBrm, TB_ALIGN_16 }, 1076 { X86::PMAXSDrr, X86::PMAXSDrm, TB_ALIGN_16 }, 1077 { X86::PMAXUDrr, X86::PMAXUDrm, TB_ALIGN_16 }, 1078 { X86::PMAXUWrr, X86::PMAXUWrm, TB_ALIGN_16 }, 1079 { X86::PMULDQrr, X86::PMULDQrm, TB_ALIGN_16 }, 1080 { X86::PMULHRSWrr128, X86::PMULHRSWrm128, TB_ALIGN_16 }, 1081 { X86::PMULHUWrr, X86::PMULHUWrm, TB_ALIGN_16 }, 1082 { X86::PMULHWrr, X86::PMULHWrm, TB_ALIGN_16 }, 1083 { X86::PMULLDrr, X86::PMULLDrm, TB_ALIGN_16 }, 1084 { X86::PMULLWrr, X86::PMULLWrm, TB_ALIGN_16 }, 1085 { X86::PMULUDQrr, X86::PMULUDQrm, TB_ALIGN_16 }, 1086 { X86::PORrr, X86::PORrm, TB_ALIGN_16 }, 1087 { X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 }, 1088 { X86::PSHUFBrr, X86::PSHUFBrm, TB_ALIGN_16 }, 1089 { X86::PSIGNBrr, X86::PSIGNBrm, TB_ALIGN_16 }, 1090 { X86::PSIGNWrr, X86::PSIGNWrm, TB_ALIGN_16 }, 1091 { X86::PSIGNDrr, X86::PSIGNDrm, TB_ALIGN_16 }, 1092 { X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 }, 1093 { X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 }, 1094 { X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 }, 1095 { X86::PSRADrr, X86::PSRADrm, TB_ALIGN_16 }, 1096 { X86::PSRAWrr, X86::PSRAWrm, TB_ALIGN_16 }, 1097 { X86::PSRLDrr, X86::PSRLDrm, TB_ALIGN_16 }, 1098 { X86::PSRLQrr, X86::PSRLQrm, TB_ALIGN_16 }, 1099 { X86::PSRLWrr, X86::PSRLWrm, TB_ALIGN_16 }, 1100 { X86::PSUBBrr, X86::PSUBBrm, TB_ALIGN_16 }, 1101 { X86::PSUBDrr, X86::PSUBDrm, TB_ALIGN_16 }, 1102 { X86::PSUBQrr, X86::PSUBQrm, TB_ALIGN_16 }, 1103 { X86::PSUBSBrr, X86::PSUBSBrm, TB_ALIGN_16 }, 1104 { X86::PSUBSWrr, X86::PSUBSWrm, TB_ALIGN_16 }, 1105 { X86::PSUBUSBrr, X86::PSUBUSBrm, TB_ALIGN_16 }, 1106 { X86::PSUBUSWrr, X86::PSUBUSWrm, TB_ALIGN_16 }, 1107 { X86::PSUBWrr, X86::PSUBWrm, TB_ALIGN_16 }, 1108 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm, TB_ALIGN_16 }, 1109 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm, TB_ALIGN_16 }, 1110 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm, TB_ALIGN_16 }, 1111 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm, TB_ALIGN_16 }, 1112 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm, TB_ALIGN_16 }, 1113 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm, TB_ALIGN_16 }, 1114 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm, TB_ALIGN_16 }, 1115 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm, TB_ALIGN_16 }, 1116 { X86::PXORrr, X86::PXORrm, TB_ALIGN_16 }, 1117 { X86::SBB32rr, X86::SBB32rm, 0 }, 1118 { X86::SBB64rr, X86::SBB64rm, 0 }, 1119 { X86::SHUFPDrri, X86::SHUFPDrmi, TB_ALIGN_16 }, 1120 { X86::SHUFPSrri, X86::SHUFPSrmi, TB_ALIGN_16 }, 1121 { X86::SUB16rr, X86::SUB16rm, 0 }, 1122 { X86::SUB32rr, X86::SUB32rm, 0 }, 1123 { X86::SUB64rr, X86::SUB64rm, 0 }, 1124 { X86::SUB8rr, X86::SUB8rm, 0 }, 1125 { X86::SUBPDrr, X86::SUBPDrm, TB_ALIGN_16 }, 1126 { X86::SUBPSrr, X86::SUBPSrm, TB_ALIGN_16 }, 1127 { X86::SUBSDrr, X86::SUBSDrm, 0 }, 1128 { X86::SUBSDrr_Int, X86::SUBSDrm_Int, 0 }, 1129 { X86::SUBSSrr, X86::SUBSSrm, 0 }, 1130 { X86::SUBSSrr_Int, X86::SUBSSrm_Int, 0 }, 1131 // FIXME: TEST*rr -> swapped operand of TEST*mr. 1132 { X86::UNPCKHPDrr, X86::UNPCKHPDrm, TB_ALIGN_16 }, 1133 { X86::UNPCKHPSrr, X86::UNPCKHPSrm, TB_ALIGN_16 }, 1134 { X86::UNPCKLPDrr, X86::UNPCKLPDrm, TB_ALIGN_16 }, 1135 { X86::UNPCKLPSrr, X86::UNPCKLPSrm, TB_ALIGN_16 }, 1136 { X86::XOR16rr, X86::XOR16rm, 0 }, 1137 { X86::XOR32rr, X86::XOR32rm, 0 }, 1138 { X86::XOR64rr, X86::XOR64rm, 0 }, 1139 { X86::XOR8rr, X86::XOR8rm, 0 }, 1140 { X86::XORPDrr, X86::XORPDrm, TB_ALIGN_16 }, 1141 { X86::XORPSrr, X86::XORPSrm, TB_ALIGN_16 }, 1142 1143 // MMX version of foldable instructions 1144 { X86::MMX_CVTPI2PSirr, X86::MMX_CVTPI2PSirm, 0 }, 1145 { X86::MMX_PACKSSDWirr, X86::MMX_PACKSSDWirm, 0 }, 1146 { X86::MMX_PACKSSWBirr, X86::MMX_PACKSSWBirm, 0 }, 1147 { X86::MMX_PACKUSWBirr, X86::MMX_PACKUSWBirm, 0 }, 1148 { X86::MMX_PADDBirr, X86::MMX_PADDBirm, 0 }, 1149 { X86::MMX_PADDDirr, X86::MMX_PADDDirm, 0 }, 1150 { X86::MMX_PADDQirr, X86::MMX_PADDQirm, 0 }, 1151 { X86::MMX_PADDSBirr, X86::MMX_PADDSBirm, 0 }, 1152 { X86::MMX_PADDSWirr, X86::MMX_PADDSWirm, 0 }, 1153 { X86::MMX_PADDUSBirr, X86::MMX_PADDUSBirm, 0 }, 1154 { X86::MMX_PADDUSWirr, X86::MMX_PADDUSWirm, 0 }, 1155 { X86::MMX_PADDWirr, X86::MMX_PADDWirm, 0 }, 1156 { X86::MMX_PALIGNR64irr, X86::MMX_PALIGNR64irm, 0 }, 1157 { X86::MMX_PANDNirr, X86::MMX_PANDNirm, 0 }, 1158 { X86::MMX_PANDirr, X86::MMX_PANDirm, 0 }, 1159 { X86::MMX_PAVGBirr, X86::MMX_PAVGBirm, 0 }, 1160 { X86::MMX_PAVGWirr, X86::MMX_PAVGWirm, 0 }, 1161 { X86::MMX_PCMPEQBirr, X86::MMX_PCMPEQBirm, 0 }, 1162 { X86::MMX_PCMPEQDirr, X86::MMX_PCMPEQDirm, 0 }, 1163 { X86::MMX_PCMPEQWirr, X86::MMX_PCMPEQWirm, 0 }, 1164 { X86::MMX_PCMPGTBirr, X86::MMX_PCMPGTBirm, 0 }, 1165 { X86::MMX_PCMPGTDirr, X86::MMX_PCMPGTDirm, 0 }, 1166 { X86::MMX_PCMPGTWirr, X86::MMX_PCMPGTWirm, 0 }, 1167 { X86::MMX_PHADDSWrr64, X86::MMX_PHADDSWrm64, 0 }, 1168 { X86::MMX_PHADDWrr64, X86::MMX_PHADDWrm64, 0 }, 1169 { X86::MMX_PHADDrr64, X86::MMX_PHADDrm64, 0 }, 1170 { X86::MMX_PHSUBDrr64, X86::MMX_PHSUBDrm64, 0 }, 1171 { X86::MMX_PHSUBSWrr64, X86::MMX_PHSUBSWrm64, 0 }, 1172 { X86::MMX_PHSUBWrr64, X86::MMX_PHSUBWrm64, 0 }, 1173 { X86::MMX_PINSRWirri, X86::MMX_PINSRWirmi, 0 }, 1174 { X86::MMX_PMADDUBSWrr64, X86::MMX_PMADDUBSWrm64, 0 }, 1175 { X86::MMX_PMADDWDirr, X86::MMX_PMADDWDirm, 0 }, 1176 { X86::MMX_PMAXSWirr, X86::MMX_PMAXSWirm, 0 }, 1177 { X86::MMX_PMAXUBirr, X86::MMX_PMAXUBirm, 0 }, 1178 { X86::MMX_PMINSWirr, X86::MMX_PMINSWirm, 0 }, 1179 { X86::MMX_PMINUBirr, X86::MMX_PMINUBirm, 0 }, 1180 { X86::MMX_PMULHRSWrr64, X86::MMX_PMULHRSWrm64, 0 }, 1181 { X86::MMX_PMULHUWirr, X86::MMX_PMULHUWirm, 0 }, 1182 { X86::MMX_PMULHWirr, X86::MMX_PMULHWirm, 0 }, 1183 { X86::MMX_PMULLWirr, X86::MMX_PMULLWirm, 0 }, 1184 { X86::MMX_PMULUDQirr, X86::MMX_PMULUDQirm, 0 }, 1185 { X86::MMX_PORirr, X86::MMX_PORirm, 0 }, 1186 { X86::MMX_PSADBWirr, X86::MMX_PSADBWirm, 0 }, 1187 { X86::MMX_PSHUFBrr64, X86::MMX_PSHUFBrm64, 0 }, 1188 { X86::MMX_PSIGNBrr64, X86::MMX_PSIGNBrm64, 0 }, 1189 { X86::MMX_PSIGNDrr64, X86::MMX_PSIGNDrm64, 0 }, 1190 { X86::MMX_PSIGNWrr64, X86::MMX_PSIGNWrm64, 0 }, 1191 { X86::MMX_PSLLDrr, X86::MMX_PSLLDrm, 0 }, 1192 { X86::MMX_PSLLQrr, X86::MMX_PSLLQrm, 0 }, 1193 { X86::MMX_PSLLWrr, X86::MMX_PSLLWrm, 0 }, 1194 { X86::MMX_PSRADrr, X86::MMX_PSRADrm, 0 }, 1195 { X86::MMX_PSRAWrr, X86::MMX_PSRAWrm, 0 }, 1196 { X86::MMX_PSRLDrr, X86::MMX_PSRLDrm, 0 }, 1197 { X86::MMX_PSRLQrr, X86::MMX_PSRLQrm, 0 }, 1198 { X86::MMX_PSRLWrr, X86::MMX_PSRLWrm, 0 }, 1199 { X86::MMX_PSUBBirr, X86::MMX_PSUBBirm, 0 }, 1200 { X86::MMX_PSUBDirr, X86::MMX_PSUBDirm, 0 }, 1201 { X86::MMX_PSUBQirr, X86::MMX_PSUBQirm, 0 }, 1202 { X86::MMX_PSUBSBirr, X86::MMX_PSUBSBirm, 0 }, 1203 { X86::MMX_PSUBSWirr, X86::MMX_PSUBSWirm, 0 }, 1204 { X86::MMX_PSUBUSBirr, X86::MMX_PSUBUSBirm, 0 }, 1205 { X86::MMX_PSUBUSWirr, X86::MMX_PSUBUSWirm, 0 }, 1206 { X86::MMX_PSUBWirr, X86::MMX_PSUBWirm, 0 }, 1207 { X86::MMX_PUNPCKHBWirr, X86::MMX_PUNPCKHBWirm, 0 }, 1208 { X86::MMX_PUNPCKHDQirr, X86::MMX_PUNPCKHDQirm, 0 }, 1209 { X86::MMX_PUNPCKHWDirr, X86::MMX_PUNPCKHWDirm, 0 }, 1210 { X86::MMX_PUNPCKLBWirr, X86::MMX_PUNPCKLBWirm, 0 }, 1211 { X86::MMX_PUNPCKLDQirr, X86::MMX_PUNPCKLDQirm, 0 }, 1212 { X86::MMX_PUNPCKLWDirr, X86::MMX_PUNPCKLWDirm, 0 }, 1213 { X86::MMX_PXORirr, X86::MMX_PXORirm, 0 }, 1214 1215 // 3DNow! version of foldable instructions 1216 { X86::PAVGUSBrr, X86::PAVGUSBrm, 0 }, 1217 { X86::PFACCrr, X86::PFACCrm, 0 }, 1218 { X86::PFADDrr, X86::PFADDrm, 0 }, 1219 { X86::PFCMPEQrr, X86::PFCMPEQrm, 0 }, 1220 { X86::PFCMPGErr, X86::PFCMPGErm, 0 }, 1221 { X86::PFCMPGTrr, X86::PFCMPGTrm, 0 }, 1222 { X86::PFMAXrr, X86::PFMAXrm, 0 }, 1223 { X86::PFMINrr, X86::PFMINrm, 0 }, 1224 { X86::PFMULrr, X86::PFMULrm, 0 }, 1225 { X86::PFNACCrr, X86::PFNACCrm, 0 }, 1226 { X86::PFPNACCrr, X86::PFPNACCrm, 0 }, 1227 { X86::PFRCPIT1rr, X86::PFRCPIT1rm, 0 }, 1228 { X86::PFRCPIT2rr, X86::PFRCPIT2rm, 0 }, 1229 { X86::PFRSQIT1rr, X86::PFRSQIT1rm, 0 }, 1230 { X86::PFSUBrr, X86::PFSUBrm, 0 }, 1231 { X86::PFSUBRrr, X86::PFSUBRrm, 0 }, 1232 { X86::PMULHRWrr, X86::PMULHRWrm, 0 }, 1233 1234 // AVX 128-bit versions of foldable instructions 1235 { X86::VCVTSD2SSrr, X86::VCVTSD2SSrm, 0 }, 1236 { X86::Int_VCVTSD2SSrr, X86::Int_VCVTSD2SSrm, 0 }, 1237 { X86::VCVTSI2SD64rr, X86::VCVTSI2SD64rm, 0 }, 1238 { X86::Int_VCVTSI2SD64rr, X86::Int_VCVTSI2SD64rm, 0 }, 1239 { X86::VCVTSI2SDrr, X86::VCVTSI2SDrm, 0 }, 1240 { X86::Int_VCVTSI2SDrr, X86::Int_VCVTSI2SDrm, 0 }, 1241 { X86::VCVTSI2SS64rr, X86::VCVTSI2SS64rm, 0 }, 1242 { X86::Int_VCVTSI2SS64rr, X86::Int_VCVTSI2SS64rm, 0 }, 1243 { X86::VCVTSI2SSrr, X86::VCVTSI2SSrm, 0 }, 1244 { X86::Int_VCVTSI2SSrr, X86::Int_VCVTSI2SSrm, 0 }, 1245 { X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 }, 1246 { X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 }, 1247 { X86::VRCPSSr, X86::VRCPSSm, 0 }, 1248 { X86::VRSQRTSSr, X86::VRSQRTSSm, 0 }, 1249 { X86::VSQRTSDr, X86::VSQRTSDm, 0 }, 1250 { X86::VSQRTSSr, X86::VSQRTSSm, 0 }, 1251 { X86::VADDPDrr, X86::VADDPDrm, 0 }, 1252 { X86::VADDPSrr, X86::VADDPSrm, 0 }, 1253 { X86::VADDSDrr, X86::VADDSDrm, 0 }, 1254 { X86::VADDSDrr_Int, X86::VADDSDrm_Int, 0 }, 1255 { X86::VADDSSrr, X86::VADDSSrm, 0 }, 1256 { X86::VADDSSrr_Int, X86::VADDSSrm_Int, 0 }, 1257 { X86::VADDSUBPDrr, X86::VADDSUBPDrm, 0 }, 1258 { X86::VADDSUBPSrr, X86::VADDSUBPSrm, 0 }, 1259 { X86::VANDNPDrr, X86::VANDNPDrm, 0 }, 1260 { X86::VANDNPSrr, X86::VANDNPSrm, 0 }, 1261 { X86::VANDPDrr, X86::VANDPDrm, 0 }, 1262 { X86::VANDPSrr, X86::VANDPSrm, 0 }, 1263 { X86::VBLENDPDrri, X86::VBLENDPDrmi, 0 }, 1264 { X86::VBLENDPSrri, X86::VBLENDPSrmi, 0 }, 1265 { X86::VBLENDVPDrr, X86::VBLENDVPDrm, 0 }, 1266 { X86::VBLENDVPSrr, X86::VBLENDVPSrm, 0 }, 1267 { X86::VCMPPDrri, X86::VCMPPDrmi, 0 }, 1268 { X86::VCMPPSrri, X86::VCMPPSrmi, 0 }, 1269 { X86::VCMPSDrr, X86::VCMPSDrm, 0 }, 1270 { X86::VCMPSSrr, X86::VCMPSSrm, 0 }, 1271 { X86::VDIVPDrr, X86::VDIVPDrm, 0 }, 1272 { X86::VDIVPSrr, X86::VDIVPSrm, 0 }, 1273 { X86::VDIVSDrr, X86::VDIVSDrm, 0 }, 1274 { X86::VDIVSDrr_Int, X86::VDIVSDrm_Int, 0 }, 1275 { X86::VDIVSSrr, X86::VDIVSSrm, 0 }, 1276 { X86::VDIVSSrr_Int, X86::VDIVSSrm_Int, 0 }, 1277 { X86::VDPPDrri, X86::VDPPDrmi, 0 }, 1278 { X86::VDPPSrri, X86::VDPPSrmi, 0 }, 1279 // Do not fold VFs* loads because there are no scalar load variants for 1280 // these instructions. When folded, the load is required to be 128-bits, so 1281 // the load size would not match. 1282 { X86::VFvANDNPDrr, X86::VFvANDNPDrm, 0 }, 1283 { X86::VFvANDNPSrr, X86::VFvANDNPSrm, 0 }, 1284 { X86::VFvANDPDrr, X86::VFvANDPDrm, 0 }, 1285 { X86::VFvANDPSrr, X86::VFvANDPSrm, 0 }, 1286 { X86::VFvORPDrr, X86::VFvORPDrm, 0 }, 1287 { X86::VFvORPSrr, X86::VFvORPSrm, 0 }, 1288 { X86::VFvXORPDrr, X86::VFvXORPDrm, 0 }, 1289 { X86::VFvXORPSrr, X86::VFvXORPSrm, 0 }, 1290 { X86::VHADDPDrr, X86::VHADDPDrm, 0 }, 1291 { X86::VHADDPSrr, X86::VHADDPSrm, 0 }, 1292 { X86::VHSUBPDrr, X86::VHSUBPDrm, 0 }, 1293 { X86::VHSUBPSrr, X86::VHSUBPSrm, 0 }, 1294 { X86::Int_VCMPSDrr, X86::Int_VCMPSDrm, 0 }, 1295 { X86::Int_VCMPSSrr, X86::Int_VCMPSSrm, 0 }, 1296 { X86::VMAXPDrr, X86::VMAXPDrm, 0 }, 1297 { X86::VMAXPSrr, X86::VMAXPSrm, 0 }, 1298 { X86::VMAXSDrr, X86::VMAXSDrm, 0 }, 1299 { X86::VMAXSDrr_Int, X86::VMAXSDrm_Int, 0 }, 1300 { X86::VMAXSSrr, X86::VMAXSSrm, 0 }, 1301 { X86::VMAXSSrr_Int, X86::VMAXSSrm_Int, 0 }, 1302 { X86::VMINPDrr, X86::VMINPDrm, 0 }, 1303 { X86::VMINPSrr, X86::VMINPSrm, 0 }, 1304 { X86::VMINSDrr, X86::VMINSDrm, 0 }, 1305 { X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 }, 1306 { X86::VMINSSrr, X86::VMINSSrm, 0 }, 1307 { X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 }, 1308 { X86::VMPSADBWrri, X86::VMPSADBWrmi, 0 }, 1309 { X86::VMULPDrr, X86::VMULPDrm, 0 }, 1310 { X86::VMULPSrr, X86::VMULPSrm, 0 }, 1311 { X86::VMULSDrr, X86::VMULSDrm, 0 }, 1312 { X86::VMULSDrr_Int, X86::VMULSDrm_Int, 0 }, 1313 { X86::VMULSSrr, X86::VMULSSrm, 0 }, 1314 { X86::VMULSSrr_Int, X86::VMULSSrm_Int, 0 }, 1315 { X86::VORPDrr, X86::VORPDrm, 0 }, 1316 { X86::VORPSrr, X86::VORPSrm, 0 }, 1317 { X86::VPACKSSDWrr, X86::VPACKSSDWrm, 0 }, 1318 { X86::VPACKSSWBrr, X86::VPACKSSWBrm, 0 }, 1319 { X86::VPACKUSDWrr, X86::VPACKUSDWrm, 0 }, 1320 { X86::VPACKUSWBrr, X86::VPACKUSWBrm, 0 }, 1321 { X86::VPADDBrr, X86::VPADDBrm, 0 }, 1322 { X86::VPADDDrr, X86::VPADDDrm, 0 }, 1323 { X86::VPADDQrr, X86::VPADDQrm, 0 }, 1324 { X86::VPADDSBrr, X86::VPADDSBrm, 0 }, 1325 { X86::VPADDSWrr, X86::VPADDSWrm, 0 }, 1326 { X86::VPADDUSBrr, X86::VPADDUSBrm, 0 }, 1327 { X86::VPADDUSWrr, X86::VPADDUSWrm, 0 }, 1328 { X86::VPADDWrr, X86::VPADDWrm, 0 }, 1329 { X86::VPALIGNR128rr, X86::VPALIGNR128rm, 0 }, 1330 { X86::VPANDNrr, X86::VPANDNrm, 0 }, 1331 { X86::VPANDrr, X86::VPANDrm, 0 }, 1332 { X86::VPAVGBrr, X86::VPAVGBrm, 0 }, 1333 { X86::VPAVGWrr, X86::VPAVGWrm, 0 }, 1334 { X86::VPBLENDVBrr, X86::VPBLENDVBrm, 0 }, 1335 { X86::VPBLENDWrri, X86::VPBLENDWrmi, 0 }, 1336 { X86::VPCLMULQDQrr, X86::VPCLMULQDQrm, 0 }, 1337 { X86::VPCMPEQBrr, X86::VPCMPEQBrm, 0 }, 1338 { X86::VPCMPEQDrr, X86::VPCMPEQDrm, 0 }, 1339 { X86::VPCMPEQQrr, X86::VPCMPEQQrm, 0 }, 1340 { X86::VPCMPEQWrr, X86::VPCMPEQWrm, 0 }, 1341 { X86::VPCMPGTBrr, X86::VPCMPGTBrm, 0 }, 1342 { X86::VPCMPGTDrr, X86::VPCMPGTDrm, 0 }, 1343 { X86::VPCMPGTQrr, X86::VPCMPGTQrm, 0 }, 1344 { X86::VPCMPGTWrr, X86::VPCMPGTWrm, 0 }, 1345 { X86::VPHADDDrr, X86::VPHADDDrm, 0 }, 1346 { X86::VPHADDSWrr128, X86::VPHADDSWrm128, 0 }, 1347 { X86::VPHADDWrr, X86::VPHADDWrm, 0 }, 1348 { X86::VPHSUBDrr, X86::VPHSUBDrm, 0 }, 1349 { X86::VPHSUBSWrr128, X86::VPHSUBSWrm128, 0 }, 1350 { X86::VPHSUBWrr, X86::VPHSUBWrm, 0 }, 1351 { X86::VPERMILPDrr, X86::VPERMILPDrm, 0 }, 1352 { X86::VPERMILPSrr, X86::VPERMILPSrm, 0 }, 1353 { X86::VPINSRBrr, X86::VPINSRBrm, 0 }, 1354 { X86::VPINSRDrr, X86::VPINSRDrm, 0 }, 1355 { X86::VPINSRQrr, X86::VPINSRQrm, 0 }, 1356 { X86::VPINSRWrri, X86::VPINSRWrmi, 0 }, 1357 { X86::VPMADDUBSWrr128, X86::VPMADDUBSWrm128, 0 }, 1358 { X86::VPMADDWDrr, X86::VPMADDWDrm, 0 }, 1359 { X86::VPMAXSWrr, X86::VPMAXSWrm, 0 }, 1360 { X86::VPMAXUBrr, X86::VPMAXUBrm, 0 }, 1361 { X86::VPMINSWrr, X86::VPMINSWrm, 0 }, 1362 { X86::VPMINUBrr, X86::VPMINUBrm, 0 }, 1363 { X86::VPMINSBrr, X86::VPMINSBrm, 0 }, 1364 { X86::VPMINSDrr, X86::VPMINSDrm, 0 }, 1365 { X86::VPMINUDrr, X86::VPMINUDrm, 0 }, 1366 { X86::VPMINUWrr, X86::VPMINUWrm, 0 }, 1367 { X86::VPMAXSBrr, X86::VPMAXSBrm, 0 }, 1368 { X86::VPMAXSDrr, X86::VPMAXSDrm, 0 }, 1369 { X86::VPMAXUDrr, X86::VPMAXUDrm, 0 }, 1370 { X86::VPMAXUWrr, X86::VPMAXUWrm, 0 }, 1371 { X86::VPMULDQrr, X86::VPMULDQrm, 0 }, 1372 { X86::VPMULHRSWrr128, X86::VPMULHRSWrm128, 0 }, 1373 { X86::VPMULHUWrr, X86::VPMULHUWrm, 0 }, 1374 { X86::VPMULHWrr, X86::VPMULHWrm, 0 }, 1375 { X86::VPMULLDrr, X86::VPMULLDrm, 0 }, 1376 { X86::VPMULLWrr, X86::VPMULLWrm, 0 }, 1377 { X86::VPMULUDQrr, X86::VPMULUDQrm, 0 }, 1378 { X86::VPORrr, X86::VPORrm, 0 }, 1379 { X86::VPSADBWrr, X86::VPSADBWrm, 0 }, 1380 { X86::VPSHUFBrr, X86::VPSHUFBrm, 0 }, 1381 { X86::VPSIGNBrr, X86::VPSIGNBrm, 0 }, 1382 { X86::VPSIGNWrr, X86::VPSIGNWrm, 0 }, 1383 { X86::VPSIGNDrr, X86::VPSIGNDrm, 0 }, 1384 { X86::VPSLLDrr, X86::VPSLLDrm, 0 }, 1385 { X86::VPSLLQrr, X86::VPSLLQrm, 0 }, 1386 { X86::VPSLLWrr, X86::VPSLLWrm, 0 }, 1387 { X86::VPSRADrr, X86::VPSRADrm, 0 }, 1388 { X86::VPSRAWrr, X86::VPSRAWrm, 0 }, 1389 { X86::VPSRLDrr, X86::VPSRLDrm, 0 }, 1390 { X86::VPSRLQrr, X86::VPSRLQrm, 0 }, 1391 { X86::VPSRLWrr, X86::VPSRLWrm, 0 }, 1392 { X86::VPSUBBrr, X86::VPSUBBrm, 0 }, 1393 { X86::VPSUBDrr, X86::VPSUBDrm, 0 }, 1394 { X86::VPSUBQrr, X86::VPSUBQrm, 0 }, 1395 { X86::VPSUBSBrr, X86::VPSUBSBrm, 0 }, 1396 { X86::VPSUBSWrr, X86::VPSUBSWrm, 0 }, 1397 { X86::VPSUBUSBrr, X86::VPSUBUSBrm, 0 }, 1398 { X86::VPSUBUSWrr, X86::VPSUBUSWrm, 0 }, 1399 { X86::VPSUBWrr, X86::VPSUBWrm, 0 }, 1400 { X86::VPUNPCKHBWrr, X86::VPUNPCKHBWrm, 0 }, 1401 { X86::VPUNPCKHDQrr, X86::VPUNPCKHDQrm, 0 }, 1402 { X86::VPUNPCKHQDQrr, X86::VPUNPCKHQDQrm, 0 }, 1403 { X86::VPUNPCKHWDrr, X86::VPUNPCKHWDrm, 0 }, 1404 { X86::VPUNPCKLBWrr, X86::VPUNPCKLBWrm, 0 }, 1405 { X86::VPUNPCKLDQrr, X86::VPUNPCKLDQrm, 0 }, 1406 { X86::VPUNPCKLQDQrr, X86::VPUNPCKLQDQrm, 0 }, 1407 { X86::VPUNPCKLWDrr, X86::VPUNPCKLWDrm, 0 }, 1408 { X86::VPXORrr, X86::VPXORrm, 0 }, 1409 { X86::VSHUFPDrri, X86::VSHUFPDrmi, 0 }, 1410 { X86::VSHUFPSrri, X86::VSHUFPSrmi, 0 }, 1411 { X86::VSUBPDrr, X86::VSUBPDrm, 0 }, 1412 { X86::VSUBPSrr, X86::VSUBPSrm, 0 }, 1413 { X86::VSUBSDrr, X86::VSUBSDrm, 0 }, 1414 { X86::VSUBSDrr_Int, X86::VSUBSDrm_Int, 0 }, 1415 { X86::VSUBSSrr, X86::VSUBSSrm, 0 }, 1416 { X86::VSUBSSrr_Int, X86::VSUBSSrm_Int, 0 }, 1417 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrm, 0 }, 1418 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrm, 0 }, 1419 { X86::VUNPCKLPDrr, X86::VUNPCKLPDrm, 0 }, 1420 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrm, 0 }, 1421 { X86::VXORPDrr, X86::VXORPDrm, 0 }, 1422 { X86::VXORPSrr, X86::VXORPSrm, 0 }, 1423 1424 // AVX 256-bit foldable instructions 1425 { X86::VADDPDYrr, X86::VADDPDYrm, 0 }, 1426 { X86::VADDPSYrr, X86::VADDPSYrm, 0 }, 1427 { X86::VADDSUBPDYrr, X86::VADDSUBPDYrm, 0 }, 1428 { X86::VADDSUBPSYrr, X86::VADDSUBPSYrm, 0 }, 1429 { X86::VANDNPDYrr, X86::VANDNPDYrm, 0 }, 1430 { X86::VANDNPSYrr, X86::VANDNPSYrm, 0 }, 1431 { X86::VANDPDYrr, X86::VANDPDYrm, 0 }, 1432 { X86::VANDPSYrr, X86::VANDPSYrm, 0 }, 1433 { X86::VBLENDPDYrri, X86::VBLENDPDYrmi, 0 }, 1434 { X86::VBLENDPSYrri, X86::VBLENDPSYrmi, 0 }, 1435 { X86::VBLENDVPDYrr, X86::VBLENDVPDYrm, 0 }, 1436 { X86::VBLENDVPSYrr, X86::VBLENDVPSYrm, 0 }, 1437 { X86::VCMPPDYrri, X86::VCMPPDYrmi, 0 }, 1438 { X86::VCMPPSYrri, X86::VCMPPSYrmi, 0 }, 1439 { X86::VDIVPDYrr, X86::VDIVPDYrm, 0 }, 1440 { X86::VDIVPSYrr, X86::VDIVPSYrm, 0 }, 1441 { X86::VDPPSYrri, X86::VDPPSYrmi, 0 }, 1442 { X86::VHADDPDYrr, X86::VHADDPDYrm, 0 }, 1443 { X86::VHADDPSYrr, X86::VHADDPSYrm, 0 }, 1444 { X86::VHSUBPDYrr, X86::VHSUBPDYrm, 0 }, 1445 { X86::VHSUBPSYrr, X86::VHSUBPSYrm, 0 }, 1446 { X86::VINSERTF128rr, X86::VINSERTF128rm, 0 }, 1447 { X86::VMAXPDYrr, X86::VMAXPDYrm, 0 }, 1448 { X86::VMAXPSYrr, X86::VMAXPSYrm, 0 }, 1449 { X86::VMINPDYrr, X86::VMINPDYrm, 0 }, 1450 { X86::VMINPSYrr, X86::VMINPSYrm, 0 }, 1451 { X86::VMULPDYrr, X86::VMULPDYrm, 0 }, 1452 { X86::VMULPSYrr, X86::VMULPSYrm, 0 }, 1453 { X86::VORPDYrr, X86::VORPDYrm, 0 }, 1454 { X86::VORPSYrr, X86::VORPSYrm, 0 }, 1455 { X86::VPERM2F128rr, X86::VPERM2F128rm, 0 }, 1456 { X86::VPERMILPDYrr, X86::VPERMILPDYrm, 0 }, 1457 { X86::VPERMILPSYrr, X86::VPERMILPSYrm, 0 }, 1458 { X86::VSHUFPDYrri, X86::VSHUFPDYrmi, 0 }, 1459 { X86::VSHUFPSYrri, X86::VSHUFPSYrmi, 0 }, 1460 { X86::VSUBPDYrr, X86::VSUBPDYrm, 0 }, 1461 { X86::VSUBPSYrr, X86::VSUBPSYrm, 0 }, 1462 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrm, 0 }, 1463 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrm, 0 }, 1464 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrm, 0 }, 1465 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrm, 0 }, 1466 { X86::VXORPDYrr, X86::VXORPDYrm, 0 }, 1467 { X86::VXORPSYrr, X86::VXORPSYrm, 0 }, 1468 1469 // AVX2 foldable instructions 1470 { X86::VINSERTI128rr, X86::VINSERTI128rm, 0 }, 1471 { X86::VPACKSSDWYrr, X86::VPACKSSDWYrm, 0 }, 1472 { X86::VPACKSSWBYrr, X86::VPACKSSWBYrm, 0 }, 1473 { X86::VPACKUSDWYrr, X86::VPACKUSDWYrm, 0 }, 1474 { X86::VPACKUSWBYrr, X86::VPACKUSWBYrm, 0 }, 1475 { X86::VPADDBYrr, X86::VPADDBYrm, 0 }, 1476 { X86::VPADDDYrr, X86::VPADDDYrm, 0 }, 1477 { X86::VPADDQYrr, X86::VPADDQYrm, 0 }, 1478 { X86::VPADDSBYrr, X86::VPADDSBYrm, 0 }, 1479 { X86::VPADDSWYrr, X86::VPADDSWYrm, 0 }, 1480 { X86::VPADDUSBYrr, X86::VPADDUSBYrm, 0 }, 1481 { X86::VPADDUSWYrr, X86::VPADDUSWYrm, 0 }, 1482 { X86::VPADDWYrr, X86::VPADDWYrm, 0 }, 1483 { X86::VPALIGNR256rr, X86::VPALIGNR256rm, 0 }, 1484 { X86::VPANDNYrr, X86::VPANDNYrm, 0 }, 1485 { X86::VPANDYrr, X86::VPANDYrm, 0 }, 1486 { X86::VPAVGBYrr, X86::VPAVGBYrm, 0 }, 1487 { X86::VPAVGWYrr, X86::VPAVGWYrm, 0 }, 1488 { X86::VPBLENDDrri, X86::VPBLENDDrmi, 0 }, 1489 { X86::VPBLENDDYrri, X86::VPBLENDDYrmi, 0 }, 1490 { X86::VPBLENDVBYrr, X86::VPBLENDVBYrm, 0 }, 1491 { X86::VPBLENDWYrri, X86::VPBLENDWYrmi, 0 }, 1492 { X86::VPCMPEQBYrr, X86::VPCMPEQBYrm, 0 }, 1493 { X86::VPCMPEQDYrr, X86::VPCMPEQDYrm, 0 }, 1494 { X86::VPCMPEQQYrr, X86::VPCMPEQQYrm, 0 }, 1495 { X86::VPCMPEQWYrr, X86::VPCMPEQWYrm, 0 }, 1496 { X86::VPCMPGTBYrr, X86::VPCMPGTBYrm, 0 }, 1497 { X86::VPCMPGTDYrr, X86::VPCMPGTDYrm, 0 }, 1498 { X86::VPCMPGTQYrr, X86::VPCMPGTQYrm, 0 }, 1499 { X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, 0 }, 1500 { X86::VPERM2I128rr, X86::VPERM2I128rm, 0 }, 1501 { X86::VPERMDYrr, X86::VPERMDYrm, 0 }, 1502 { X86::VPERMPSYrr, X86::VPERMPSYrm, 0 }, 1503 { X86::VPHADDDYrr, X86::VPHADDDYrm, 0 }, 1504 { X86::VPHADDSWrr256, X86::VPHADDSWrm256, 0 }, 1505 { X86::VPHADDWYrr, X86::VPHADDWYrm, 0 }, 1506 { X86::VPHSUBDYrr, X86::VPHSUBDYrm, 0 }, 1507 { X86::VPHSUBSWrr256, X86::VPHSUBSWrm256, 0 }, 1508 { X86::VPHSUBWYrr, X86::VPHSUBWYrm, 0 }, 1509 { X86::VPMADDUBSWrr256, X86::VPMADDUBSWrm256, 0 }, 1510 { X86::VPMADDWDYrr, X86::VPMADDWDYrm, 0 }, 1511 { X86::VPMAXSWYrr, X86::VPMAXSWYrm, 0 }, 1512 { X86::VPMAXUBYrr, X86::VPMAXUBYrm, 0 }, 1513 { X86::VPMINSWYrr, X86::VPMINSWYrm, 0 }, 1514 { X86::VPMINUBYrr, X86::VPMINUBYrm, 0 }, 1515 { X86::VPMINSBYrr, X86::VPMINSBYrm, 0 }, 1516 { X86::VPMINSDYrr, X86::VPMINSDYrm, 0 }, 1517 { X86::VPMINUDYrr, X86::VPMINUDYrm, 0 }, 1518 { X86::VPMINUWYrr, X86::VPMINUWYrm, 0 }, 1519 { X86::VPMAXSBYrr, X86::VPMAXSBYrm, 0 }, 1520 { X86::VPMAXSDYrr, X86::VPMAXSDYrm, 0 }, 1521 { X86::VPMAXUDYrr, X86::VPMAXUDYrm, 0 }, 1522 { X86::VPMAXUWYrr, X86::VPMAXUWYrm, 0 }, 1523 { X86::VMPSADBWYrri, X86::VMPSADBWYrmi, 0 }, 1524 { X86::VPMULDQYrr, X86::VPMULDQYrm, 0 }, 1525 { X86::VPMULHRSWrr256, X86::VPMULHRSWrm256, 0 }, 1526 { X86::VPMULHUWYrr, X86::VPMULHUWYrm, 0 }, 1527 { X86::VPMULHWYrr, X86::VPMULHWYrm, 0 }, 1528 { X86::VPMULLDYrr, X86::VPMULLDYrm, 0 }, 1529 { X86::VPMULLWYrr, X86::VPMULLWYrm, 0 }, 1530 { X86::VPMULUDQYrr, X86::VPMULUDQYrm, 0 }, 1531 { X86::VPORYrr, X86::VPORYrm, 0 }, 1532 { X86::VPSADBWYrr, X86::VPSADBWYrm, 0 }, 1533 { X86::VPSHUFBYrr, X86::VPSHUFBYrm, 0 }, 1534 { X86::VPSIGNBYrr, X86::VPSIGNBYrm, 0 }, 1535 { X86::VPSIGNWYrr, X86::VPSIGNWYrm, 0 }, 1536 { X86::VPSIGNDYrr, X86::VPSIGNDYrm, 0 }, 1537 { X86::VPSLLDYrr, X86::VPSLLDYrm, 0 }, 1538 { X86::VPSLLQYrr, X86::VPSLLQYrm, 0 }, 1539 { X86::VPSLLWYrr, X86::VPSLLWYrm, 0 }, 1540 { X86::VPSLLVDrr, X86::VPSLLVDrm, 0 }, 1541 { X86::VPSLLVDYrr, X86::VPSLLVDYrm, 0 }, 1542 { X86::VPSLLVQrr, X86::VPSLLVQrm, 0 }, 1543 { X86::VPSLLVQYrr, X86::VPSLLVQYrm, 0 }, 1544 { X86::VPSRADYrr, X86::VPSRADYrm, 0 }, 1545 { X86::VPSRAWYrr, X86::VPSRAWYrm, 0 }, 1546 { X86::VPSRAVDrr, X86::VPSRAVDrm, 0 }, 1547 { X86::VPSRAVDYrr, X86::VPSRAVDYrm, 0 }, 1548 { X86::VPSRLDYrr, X86::VPSRLDYrm, 0 }, 1549 { X86::VPSRLQYrr, X86::VPSRLQYrm, 0 }, 1550 { X86::VPSRLWYrr, X86::VPSRLWYrm, 0 }, 1551 { X86::VPSRLVDrr, X86::VPSRLVDrm, 0 }, 1552 { X86::VPSRLVDYrr, X86::VPSRLVDYrm, 0 }, 1553 { X86::VPSRLVQrr, X86::VPSRLVQrm, 0 }, 1554 { X86::VPSRLVQYrr, X86::VPSRLVQYrm, 0 }, 1555 { X86::VPSUBBYrr, X86::VPSUBBYrm, 0 }, 1556 { X86::VPSUBDYrr, X86::VPSUBDYrm, 0 }, 1557 { X86::VPSUBQYrr, X86::VPSUBQYrm, 0 }, 1558 { X86::VPSUBSBYrr, X86::VPSUBSBYrm, 0 }, 1559 { X86::VPSUBSWYrr, X86::VPSUBSWYrm, 0 }, 1560 { X86::VPSUBUSBYrr, X86::VPSUBUSBYrm, 0 }, 1561 { X86::VPSUBUSWYrr, X86::VPSUBUSWYrm, 0 }, 1562 { X86::VPSUBWYrr, X86::VPSUBWYrm, 0 }, 1563 { X86::VPUNPCKHBWYrr, X86::VPUNPCKHBWYrm, 0 }, 1564 { X86::VPUNPCKHDQYrr, X86::VPUNPCKHDQYrm, 0 }, 1565 { X86::VPUNPCKHQDQYrr, X86::VPUNPCKHQDQYrm, 0 }, 1566 { X86::VPUNPCKHWDYrr, X86::VPUNPCKHWDYrm, 0 }, 1567 { X86::VPUNPCKLBWYrr, X86::VPUNPCKLBWYrm, 0 }, 1568 { X86::VPUNPCKLDQYrr, X86::VPUNPCKLDQYrm, 0 }, 1569 { X86::VPUNPCKLQDQYrr, X86::VPUNPCKLQDQYrm, 0 }, 1570 { X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, 0 }, 1571 { X86::VPXORYrr, X86::VPXORYrm, 0 }, 1572 1573 // FMA4 foldable patterns 1574 { X86::VFMADDSS4rr, X86::VFMADDSS4mr, 0 }, 1575 { X86::VFMADDSD4rr, X86::VFMADDSD4mr, 0 }, 1576 { X86::VFMADDPS4rr, X86::VFMADDPS4mr, 0 }, 1577 { X86::VFMADDPD4rr, X86::VFMADDPD4mr, 0 }, 1578 { X86::VFMADDPS4rrY, X86::VFMADDPS4mrY, 0 }, 1579 { X86::VFMADDPD4rrY, X86::VFMADDPD4mrY, 0 }, 1580 { X86::VFNMADDSS4rr, X86::VFNMADDSS4mr, 0 }, 1581 { X86::VFNMADDSD4rr, X86::VFNMADDSD4mr, 0 }, 1582 { X86::VFNMADDPS4rr, X86::VFNMADDPS4mr, 0 }, 1583 { X86::VFNMADDPD4rr, X86::VFNMADDPD4mr, 0 }, 1584 { X86::VFNMADDPS4rrY, X86::VFNMADDPS4mrY, 0 }, 1585 { X86::VFNMADDPD4rrY, X86::VFNMADDPD4mrY, 0 }, 1586 { X86::VFMSUBSS4rr, X86::VFMSUBSS4mr, 0 }, 1587 { X86::VFMSUBSD4rr, X86::VFMSUBSD4mr, 0 }, 1588 { X86::VFMSUBPS4rr, X86::VFMSUBPS4mr, 0 }, 1589 { X86::VFMSUBPD4rr, X86::VFMSUBPD4mr, 0 }, 1590 { X86::VFMSUBPS4rrY, X86::VFMSUBPS4mrY, 0 }, 1591 { X86::VFMSUBPD4rrY, X86::VFMSUBPD4mrY, 0 }, 1592 { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4mr, 0 }, 1593 { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4mr, 0 }, 1594 { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4mr, 0 }, 1595 { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4mr, 0 }, 1596 { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4mrY, 0 }, 1597 { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4mrY, 0 }, 1598 { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4mr, 0 }, 1599 { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4mr, 0 }, 1600 { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4mrY, 0 }, 1601 { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4mrY, 0 }, 1602 { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4mr, 0 }, 1603 { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4mr, 0 }, 1604 { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4mrY, 0 }, 1605 { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, 0 }, 1606 1607 // XOP foldable instructions 1608 { X86::VPCMOVrr, X86::VPCMOVmr, 0 }, 1609 { X86::VPCMOVrrY, X86::VPCMOVmrY, 0 }, 1610 { X86::VPCOMBri, X86::VPCOMBmi, 0 }, 1611 { X86::VPCOMDri, X86::VPCOMDmi, 0 }, 1612 { X86::VPCOMQri, X86::VPCOMQmi, 0 }, 1613 { X86::VPCOMWri, X86::VPCOMWmi, 0 }, 1614 { X86::VPCOMUBri, X86::VPCOMUBmi, 0 }, 1615 { X86::VPCOMUDri, X86::VPCOMUDmi, 0 }, 1616 { X86::VPCOMUQri, X86::VPCOMUQmi, 0 }, 1617 { X86::VPCOMUWri, X86::VPCOMUWmi, 0 }, 1618 { X86::VPERMIL2PDrr, X86::VPERMIL2PDmr, 0 }, 1619 { X86::VPERMIL2PDrrY, X86::VPERMIL2PDmrY, 0 }, 1620 { X86::VPERMIL2PSrr, X86::VPERMIL2PSmr, 0 }, 1621 { X86::VPERMIL2PSrrY, X86::VPERMIL2PSmrY, 0 }, 1622 { X86::VPMACSDDrr, X86::VPMACSDDrm, 0 }, 1623 { X86::VPMACSDQHrr, X86::VPMACSDQHrm, 0 }, 1624 { X86::VPMACSDQLrr, X86::VPMACSDQLrm, 0 }, 1625 { X86::VPMACSSDDrr, X86::VPMACSSDDrm, 0 }, 1626 { X86::VPMACSSDQHrr, X86::VPMACSSDQHrm, 0 }, 1627 { X86::VPMACSSDQLrr, X86::VPMACSSDQLrm, 0 }, 1628 { X86::VPMACSSWDrr, X86::VPMACSSWDrm, 0 }, 1629 { X86::VPMACSSWWrr, X86::VPMACSSWWrm, 0 }, 1630 { X86::VPMACSWDrr, X86::VPMACSWDrm, 0 }, 1631 { X86::VPMACSWWrr, X86::VPMACSWWrm, 0 }, 1632 { X86::VPMADCSSWDrr, X86::VPMADCSSWDrm, 0 }, 1633 { X86::VPMADCSWDrr, X86::VPMADCSWDrm, 0 }, 1634 { X86::VPPERMrr, X86::VPPERMmr, 0 }, 1635 { X86::VPROTBrr, X86::VPROTBrm, 0 }, 1636 { X86::VPROTDrr, X86::VPROTDrm, 0 }, 1637 { X86::VPROTQrr, X86::VPROTQrm, 0 }, 1638 { X86::VPROTWrr, X86::VPROTWrm, 0 }, 1639 { X86::VPSHABrr, X86::VPSHABrm, 0 }, 1640 { X86::VPSHADrr, X86::VPSHADrm, 0 }, 1641 { X86::VPSHAQrr, X86::VPSHAQrm, 0 }, 1642 { X86::VPSHAWrr, X86::VPSHAWrm, 0 }, 1643 { X86::VPSHLBrr, X86::VPSHLBrm, 0 }, 1644 { X86::VPSHLDrr, X86::VPSHLDrm, 0 }, 1645 { X86::VPSHLQrr, X86::VPSHLQrm, 0 }, 1646 { X86::VPSHLWrr, X86::VPSHLWrm, 0 }, 1647 1648 // BMI/BMI2 foldable instructions 1649 { X86::ANDN32rr, X86::ANDN32rm, 0 }, 1650 { X86::ANDN64rr, X86::ANDN64rm, 0 }, 1651 { X86::MULX32rr, X86::MULX32rm, 0 }, 1652 { X86::MULX64rr, X86::MULX64rm, 0 }, 1653 { X86::PDEP32rr, X86::PDEP32rm, 0 }, 1654 { X86::PDEP64rr, X86::PDEP64rm, 0 }, 1655 { X86::PEXT32rr, X86::PEXT32rm, 0 }, 1656 { X86::PEXT64rr, X86::PEXT64rm, 0 }, 1657 1658 // AVX-512 foldable instructions 1659 { X86::VADDPSZrr, X86::VADDPSZrm, 0 }, 1660 { X86::VADDPDZrr, X86::VADDPDZrm, 0 }, 1661 { X86::VSUBPSZrr, X86::VSUBPSZrm, 0 }, 1662 { X86::VSUBPDZrr, X86::VSUBPDZrm, 0 }, 1663 { X86::VMULPSZrr, X86::VMULPSZrm, 0 }, 1664 { X86::VMULPDZrr, X86::VMULPDZrm, 0 }, 1665 { X86::VDIVPSZrr, X86::VDIVPSZrm, 0 }, 1666 { X86::VDIVPDZrr, X86::VDIVPDZrm, 0 }, 1667 { X86::VMINPSZrr, X86::VMINPSZrm, 0 }, 1668 { X86::VMINPDZrr, X86::VMINPDZrm, 0 }, 1669 { X86::VMAXPSZrr, X86::VMAXPSZrm, 0 }, 1670 { X86::VMAXPDZrr, X86::VMAXPDZrm, 0 }, 1671 { X86::VPADDDZrr, X86::VPADDDZrm, 0 }, 1672 { X86::VPADDQZrr, X86::VPADDQZrm, 0 }, 1673 { X86::VPERMPDZri, X86::VPERMPDZmi, 0 }, 1674 { X86::VPERMPSZrr, X86::VPERMPSZrm, 0 }, 1675 { X86::VPMAXSDZrr, X86::VPMAXSDZrm, 0 }, 1676 { X86::VPMAXSQZrr, X86::VPMAXSQZrm, 0 }, 1677 { X86::VPMAXUDZrr, X86::VPMAXUDZrm, 0 }, 1678 { X86::VPMAXUQZrr, X86::VPMAXUQZrm, 0 }, 1679 { X86::VPMINSDZrr, X86::VPMINSDZrm, 0 }, 1680 { X86::VPMINSQZrr, X86::VPMINSQZrm, 0 }, 1681 { X86::VPMINUDZrr, X86::VPMINUDZrm, 0 }, 1682 { X86::VPMINUQZrr, X86::VPMINUQZrm, 0 }, 1683 { X86::VPMULDQZrr, X86::VPMULDQZrm, 0 }, 1684 { X86::VPSLLVDZrr, X86::VPSLLVDZrm, 0 }, 1685 { X86::VPSLLVQZrr, X86::VPSLLVQZrm, 0 }, 1686 { X86::VPSRAVDZrr, X86::VPSRAVDZrm, 0 }, 1687 { X86::VPSRLVDZrr, X86::VPSRLVDZrm, 0 }, 1688 { X86::VPSRLVQZrr, X86::VPSRLVQZrm, 0 }, 1689 { X86::VPSUBDZrr, X86::VPSUBDZrm, 0 }, 1690 { X86::VPSUBQZrr, X86::VPSUBQZrm, 0 }, 1691 { X86::VSHUFPDZrri, X86::VSHUFPDZrmi, 0 }, 1692 { X86::VSHUFPSZrri, X86::VSHUFPSZrmi, 0 }, 1693 { X86::VALIGNQrri, X86::VALIGNQrmi, 0 }, 1694 { X86::VALIGNDrri, X86::VALIGNDrmi, 0 }, 1695 { X86::VPMULUDQZrr, X86::VPMULUDQZrm, 0 }, 1696 { X86::VBROADCASTSSZrkz, X86::VBROADCASTSSZmkz, TB_NO_REVERSE }, 1697 { X86::VBROADCASTSDZrkz, X86::VBROADCASTSDZmkz, TB_NO_REVERSE }, 1698 1699 // AVX-512{F,VL} foldable instructions 1700 { X86::VBROADCASTSSZ256rkz, X86::VBROADCASTSSZ256mkz, TB_NO_REVERSE }, 1701 { X86::VBROADCASTSDZ256rkz, X86::VBROADCASTSDZ256mkz, TB_NO_REVERSE }, 1702 { X86::VBROADCASTSSZ128rkz, X86::VBROADCASTSSZ128mkz, TB_NO_REVERSE }, 1703 1704 // AVX-512{F,VL} foldable instructions 1705 { X86::VADDPDZ128rr, X86::VADDPDZ128rm, 0 }, 1706 { X86::VADDPDZ256rr, X86::VADDPDZ256rm, 0 }, 1707 { X86::VADDPSZ128rr, X86::VADDPSZ128rm, 0 }, 1708 { X86::VADDPSZ256rr, X86::VADDPSZ256rm, 0 }, 1709 1710 // AES foldable instructions 1711 { X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 }, 1712 { X86::AESDECrr, X86::AESDECrm, TB_ALIGN_16 }, 1713 { X86::AESENCLASTrr, X86::AESENCLASTrm, TB_ALIGN_16 }, 1714 { X86::AESENCrr, X86::AESENCrm, TB_ALIGN_16 }, 1715 { X86::VAESDECLASTrr, X86::VAESDECLASTrm, 0 }, 1716 { X86::VAESDECrr, X86::VAESDECrm, 0 }, 1717 { X86::VAESENCLASTrr, X86::VAESENCLASTrm, 0 }, 1718 { X86::VAESENCrr, X86::VAESENCrm, 0 }, 1719 1720 // SHA foldable instructions 1721 { X86::SHA1MSG1rr, X86::SHA1MSG1rm, TB_ALIGN_16 }, 1722 { X86::SHA1MSG2rr, X86::SHA1MSG2rm, TB_ALIGN_16 }, 1723 { X86::SHA1NEXTErr, X86::SHA1NEXTErm, TB_ALIGN_16 }, 1724 { X86::SHA1RNDS4rri, X86::SHA1RNDS4rmi, TB_ALIGN_16 }, 1725 { X86::SHA256MSG1rr, X86::SHA256MSG1rm, TB_ALIGN_16 }, 1726 { X86::SHA256MSG2rr, X86::SHA256MSG2rm, TB_ALIGN_16 }, 1727 { X86::SHA256RNDS2rr, X86::SHA256RNDS2rm, TB_ALIGN_16 } 1728 }; 1729 1730 for (unsigned i = 0, e = array_lengthof(MemoryFoldTable2); i != e; ++i) { 1731 unsigned RegOp = MemoryFoldTable2[i].RegOp; 1732 unsigned MemOp = MemoryFoldTable2[i].MemOp; 1733 unsigned Flags = MemoryFoldTable2[i].Flags; 1734 AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable, 1735 RegOp, MemOp, 1736 // Index 2, folded load 1737 Flags | TB_INDEX_2 | TB_FOLDED_LOAD); 1738 } 1739 1740 static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { 1741 // FMA foldable instructions 1742 { X86::VFMADDSSr231r, X86::VFMADDSSr231m, TB_ALIGN_NONE }, 1743 { X86::VFMADDSDr231r, X86::VFMADDSDr231m, TB_ALIGN_NONE }, 1744 { X86::VFMADDSSr132r, X86::VFMADDSSr132m, TB_ALIGN_NONE }, 1745 { X86::VFMADDSDr132r, X86::VFMADDSDr132m, TB_ALIGN_NONE }, 1746 { X86::VFMADDSSr213r, X86::VFMADDSSr213m, TB_ALIGN_NONE }, 1747 { X86::VFMADDSDr213r, X86::VFMADDSDr213m, TB_ALIGN_NONE }, 1748 1749 { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_NONE }, 1750 { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_NONE }, 1751 { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_NONE }, 1752 { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_NONE }, 1753 { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_NONE }, 1754 { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_NONE }, 1755 { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_NONE }, 1756 { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_NONE }, 1757 { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_NONE }, 1758 { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_NONE }, 1759 { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_NONE }, 1760 { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_NONE }, 1761 1762 { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, TB_ALIGN_NONE }, 1763 { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, TB_ALIGN_NONE }, 1764 { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, TB_ALIGN_NONE }, 1765 { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, TB_ALIGN_NONE }, 1766 { X86::VFNMADDSSr213r, X86::VFNMADDSSr213m, TB_ALIGN_NONE }, 1767 { X86::VFNMADDSDr213r, X86::VFNMADDSDr213m, TB_ALIGN_NONE }, 1768 1769 { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_NONE }, 1770 { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_NONE }, 1771 { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_NONE }, 1772 { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_NONE }, 1773 { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_NONE }, 1774 { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_NONE }, 1775 { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_NONE }, 1776 { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_NONE }, 1777 { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_NONE }, 1778 { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_NONE }, 1779 { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_NONE }, 1780 { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_NONE }, 1781 1782 { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, TB_ALIGN_NONE }, 1783 { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, TB_ALIGN_NONE }, 1784 { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, TB_ALIGN_NONE }, 1785 { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, TB_ALIGN_NONE }, 1786 { X86::VFMSUBSSr213r, X86::VFMSUBSSr213m, TB_ALIGN_NONE }, 1787 { X86::VFMSUBSDr213r, X86::VFMSUBSDr213m, TB_ALIGN_NONE }, 1788 1789 { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_NONE }, 1790 { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_NONE }, 1791 { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_NONE }, 1792 { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_NONE }, 1793 { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_NONE }, 1794 { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_NONE }, 1795 { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_NONE }, 1796 { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_NONE }, 1797 { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_NONE }, 1798 { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_NONE }, 1799 { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_NONE }, 1800 { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_NONE }, 1801 1802 { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, TB_ALIGN_NONE }, 1803 { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, TB_ALIGN_NONE }, 1804 { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr132m, TB_ALIGN_NONE }, 1805 { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr132m, TB_ALIGN_NONE }, 1806 { X86::VFNMSUBSSr213r, X86::VFNMSUBSSr213m, TB_ALIGN_NONE }, 1807 { X86::VFNMSUBSDr213r, X86::VFNMSUBSDr213m, TB_ALIGN_NONE }, 1808 1809 { X86::VFNMSUBPSr231r, X86::VFNMSUBPSr231m, TB_ALIGN_NONE }, 1810 { X86::VFNMSUBPDr231r, X86::VFNMSUBPDr231m, TB_ALIGN_NONE }, 1811 { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr132m, TB_ALIGN_NONE }, 1812 { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr132m, TB_ALIGN_NONE }, 1813 { X86::VFNMSUBPSr213r, X86::VFNMSUBPSr213m, TB_ALIGN_NONE }, 1814 { X86::VFNMSUBPDr213r, X86::VFNMSUBPDr213m, TB_ALIGN_NONE }, 1815 { X86::VFNMSUBPSr231rY, X86::VFNMSUBPSr231mY, TB_ALIGN_NONE }, 1816 { X86::VFNMSUBPDr231rY, X86::VFNMSUBPDr231mY, TB_ALIGN_NONE }, 1817 { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr132mY, TB_ALIGN_NONE }, 1818 { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_NONE }, 1819 { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_NONE }, 1820 { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_NONE }, 1821 1822 { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_NONE }, 1823 { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_NONE }, 1824 { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr132m, TB_ALIGN_NONE }, 1825 { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr132m, TB_ALIGN_NONE }, 1826 { X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr213m, TB_ALIGN_NONE }, 1827 { X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr213m, TB_ALIGN_NONE }, 1828 { X86::VFMADDSUBPSr231rY, X86::VFMADDSUBPSr231mY, TB_ALIGN_NONE }, 1829 { X86::VFMADDSUBPDr231rY, X86::VFMADDSUBPDr231mY, TB_ALIGN_NONE }, 1830 { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr132mY, TB_ALIGN_NONE }, 1831 { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_NONE }, 1832 { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_NONE }, 1833 { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_NONE }, 1834 1835 { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_NONE }, 1836 { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_NONE }, 1837 { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr132m, TB_ALIGN_NONE }, 1838 { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr132m, TB_ALIGN_NONE }, 1839 { X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr213m, TB_ALIGN_NONE }, 1840 { X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr213m, TB_ALIGN_NONE }, 1841 { X86::VFMSUBADDPSr231rY, X86::VFMSUBADDPSr231mY, TB_ALIGN_NONE }, 1842 { X86::VFMSUBADDPDr231rY, X86::VFMSUBADDPDr231mY, TB_ALIGN_NONE }, 1843 { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr132mY, TB_ALIGN_NONE }, 1844 { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_NONE }, 1845 { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_NONE }, 1846 { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_NONE }, 1847 1848 // FMA4 foldable patterns 1849 { X86::VFMADDSS4rr, X86::VFMADDSS4rm, 0 }, 1850 { X86::VFMADDSD4rr, X86::VFMADDSD4rm, 0 }, 1851 { X86::VFMADDPS4rr, X86::VFMADDPS4rm, TB_ALIGN_16 }, 1852 { X86::VFMADDPD4rr, X86::VFMADDPD4rm, TB_ALIGN_16 }, 1853 { X86::VFMADDPS4rrY, X86::VFMADDPS4rmY, TB_ALIGN_32 }, 1854 { X86::VFMADDPD4rrY, X86::VFMADDPD4rmY, TB_ALIGN_32 }, 1855 { X86::VFNMADDSS4rr, X86::VFNMADDSS4rm, 0 }, 1856 { X86::VFNMADDSD4rr, X86::VFNMADDSD4rm, 0 }, 1857 { X86::VFNMADDPS4rr, X86::VFNMADDPS4rm, TB_ALIGN_16 }, 1858 { X86::VFNMADDPD4rr, X86::VFNMADDPD4rm, TB_ALIGN_16 }, 1859 { X86::VFNMADDPS4rrY, X86::VFNMADDPS4rmY, TB_ALIGN_32 }, 1860 { X86::VFNMADDPD4rrY, X86::VFNMADDPD4rmY, TB_ALIGN_32 }, 1861 { X86::VFMSUBSS4rr, X86::VFMSUBSS4rm, 0 }, 1862 { X86::VFMSUBSD4rr, X86::VFMSUBSD4rm, 0 }, 1863 { X86::VFMSUBPS4rr, X86::VFMSUBPS4rm, TB_ALIGN_16 }, 1864 { X86::VFMSUBPD4rr, X86::VFMSUBPD4rm, TB_ALIGN_16 }, 1865 { X86::VFMSUBPS4rrY, X86::VFMSUBPS4rmY, TB_ALIGN_32 }, 1866 { X86::VFMSUBPD4rrY, X86::VFMSUBPD4rmY, TB_ALIGN_32 }, 1867 { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4rm, 0 }, 1868 { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4rm, 0 }, 1869 { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4rm, TB_ALIGN_16 }, 1870 { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4rm, TB_ALIGN_16 }, 1871 { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4rmY, TB_ALIGN_32 }, 1872 { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4rmY, TB_ALIGN_32 }, 1873 { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4rm, TB_ALIGN_16 }, 1874 { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4rm, TB_ALIGN_16 }, 1875 { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4rmY, TB_ALIGN_32 }, 1876 { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4rmY, TB_ALIGN_32 }, 1877 { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4rm, TB_ALIGN_16 }, 1878 { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4rm, TB_ALIGN_16 }, 1879 { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4rmY, TB_ALIGN_32 }, 1880 { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_32 }, 1881 1882 // XOP foldable instructions 1883 { X86::VPCMOVrr, X86::VPCMOVrm, 0 }, 1884 { X86::VPCMOVrrY, X86::VPCMOVrmY, 0 }, 1885 { X86::VPERMIL2PDrr, X86::VPERMIL2PDrm, 0 }, 1886 { X86::VPERMIL2PDrrY, X86::VPERMIL2PDrmY, 0 }, 1887 { X86::VPERMIL2PSrr, X86::VPERMIL2PSrm, 0 }, 1888 { X86::VPERMIL2PSrrY, X86::VPERMIL2PSrmY, 0 }, 1889 { X86::VPPERMrr, X86::VPPERMrm, 0 }, 1890 1891 // AVX-512 VPERMI instructions with 3 source operands. 1892 { X86::VPERMI2Drr, X86::VPERMI2Drm, 0 }, 1893 { X86::VPERMI2Qrr, X86::VPERMI2Qrm, 0 }, 1894 { X86::VPERMI2PSrr, X86::VPERMI2PSrm, 0 }, 1895 { X86::VPERMI2PDrr, X86::VPERMI2PDrm, 0 }, 1896 { X86::VBLENDMPDZrr, X86::VBLENDMPDZrm, 0 }, 1897 { X86::VBLENDMPSZrr, X86::VBLENDMPSZrm, 0 }, 1898 { X86::VPBLENDMDZrr, X86::VPBLENDMDZrm, 0 }, 1899 { X86::VPBLENDMQZrr, X86::VPBLENDMQZrm, 0 }, 1900 { X86::VBROADCASTSSZrk, X86::VBROADCASTSSZmk, TB_NO_REVERSE }, 1901 { X86::VBROADCASTSDZrk, X86::VBROADCASTSDZmk, TB_NO_REVERSE }, 1902 { X86::VBROADCASTSSZ256rk, X86::VBROADCASTSSZ256mk, TB_NO_REVERSE }, 1903 { X86::VBROADCASTSDZ256rk, X86::VBROADCASTSDZ256mk, TB_NO_REVERSE }, 1904 { X86::VBROADCASTSSZ128rk, X86::VBROADCASTSSZ128mk, TB_NO_REVERSE }, 1905 // AVX-512 arithmetic instructions 1906 { X86::VADDPSZrrkz, X86::VADDPSZrmkz, 0 }, 1907 { X86::VADDPDZrrkz, X86::VADDPDZrmkz, 0 }, 1908 { X86::VSUBPSZrrkz, X86::VSUBPSZrmkz, 0 }, 1909 { X86::VSUBPDZrrkz, X86::VSUBPDZrmkz, 0 }, 1910 { X86::VMULPSZrrkz, X86::VMULPSZrmkz, 0 }, 1911 { X86::VMULPDZrrkz, X86::VMULPDZrmkz, 0 }, 1912 { X86::VDIVPSZrrkz, X86::VDIVPSZrmkz, 0 }, 1913 { X86::VDIVPDZrrkz, X86::VDIVPDZrmkz, 0 }, 1914 { X86::VMINPSZrrkz, X86::VMINPSZrmkz, 0 }, 1915 { X86::VMINPDZrrkz, X86::VMINPDZrmkz, 0 }, 1916 { X86::VMAXPSZrrkz, X86::VMAXPSZrmkz, 0 }, 1917 { X86::VMAXPDZrrkz, X86::VMAXPDZrmkz, 0 }, 1918 // AVX-512{F,VL} arithmetic instructions 256-bit 1919 { X86::VADDPSZ256rrkz, X86::VADDPSZ256rmkz, 0 }, 1920 { X86::VADDPDZ256rrkz, X86::VADDPDZ256rmkz, 0 }, 1921 { X86::VSUBPSZ256rrkz, X86::VSUBPSZ256rmkz, 0 }, 1922 { X86::VSUBPDZ256rrkz, X86::VSUBPDZ256rmkz, 0 }, 1923 { X86::VMULPSZ256rrkz, X86::VMULPSZ256rmkz, 0 }, 1924 { X86::VMULPDZ256rrkz, X86::VMULPDZ256rmkz, 0 }, 1925 { X86::VDIVPSZ256rrkz, X86::VDIVPSZ256rmkz, 0 }, 1926 { X86::VDIVPDZ256rrkz, X86::VDIVPDZ256rmkz, 0 }, 1927 { X86::VMINPSZ256rrkz, X86::VMINPSZ256rmkz, 0 }, 1928 { X86::VMINPDZ256rrkz, X86::VMINPDZ256rmkz, 0 }, 1929 { X86::VMAXPSZ256rrkz, X86::VMAXPSZ256rmkz, 0 }, 1930 { X86::VMAXPDZ256rrkz, X86::VMAXPDZ256rmkz, 0 }, 1931 // AVX-512{F,VL} arithmetic instructions 128-bit 1932 { X86::VADDPSZ128rrkz, X86::VADDPSZ128rmkz, 0 }, 1933 { X86::VADDPDZ128rrkz, X86::VADDPDZ128rmkz, 0 }, 1934 { X86::VSUBPSZ128rrkz, X86::VSUBPSZ128rmkz, 0 }, 1935 { X86::VSUBPDZ128rrkz, X86::VSUBPDZ128rmkz, 0 }, 1936 { X86::VMULPSZ128rrkz, X86::VMULPSZ128rmkz, 0 }, 1937 { X86::VMULPDZ128rrkz, X86::VMULPDZ128rmkz, 0 }, 1938 { X86::VDIVPSZ128rrkz, X86::VDIVPSZ128rmkz, 0 }, 1939 { X86::VDIVPDZ128rrkz, X86::VDIVPDZ128rmkz, 0 }, 1940 { X86::VMINPSZ128rrkz, X86::VMINPSZ128rmkz, 0 }, 1941 { X86::VMINPDZ128rrkz, X86::VMINPDZ128rmkz, 0 }, 1942 { X86::VMAXPSZ128rrkz, X86::VMAXPSZ128rmkz, 0 }, 1943 { X86::VMAXPDZ128rrkz, X86::VMAXPDZ128rmkz, 0 } 1944 }; 1945 1946 for (unsigned i = 0, e = array_lengthof(MemoryFoldTable3); i != e; ++i) { 1947 unsigned RegOp = MemoryFoldTable3[i].RegOp; 1948 unsigned MemOp = MemoryFoldTable3[i].MemOp; 1949 unsigned Flags = MemoryFoldTable3[i].Flags; 1950 AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable, 1951 RegOp, MemOp, 1952 // Index 3, folded load 1953 Flags | TB_INDEX_3 | TB_FOLDED_LOAD); 1954 } 1955 1956 static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { 1957 // AVX-512 foldable instructions 1958 { X86::VADDPSZrrk, X86::VADDPSZrmk, 0 }, 1959 { X86::VADDPDZrrk, X86::VADDPDZrmk, 0 }, 1960 { X86::VSUBPSZrrk, X86::VSUBPSZrmk, 0 }, 1961 { X86::VSUBPDZrrk, X86::VSUBPDZrmk, 0 }, 1962 { X86::VMULPSZrrk, X86::VMULPSZrmk, 0 }, 1963 { X86::VMULPDZrrk, X86::VMULPDZrmk, 0 }, 1964 { X86::VDIVPSZrrk, X86::VDIVPSZrmk, 0 }, 1965 { X86::VDIVPDZrrk, X86::VDIVPDZrmk, 0 }, 1966 { X86::VMINPSZrrk, X86::VMINPSZrmk, 0 }, 1967 { X86::VMINPDZrrk, X86::VMINPDZrmk, 0 }, 1968 { X86::VMAXPSZrrk, X86::VMAXPSZrmk, 0 }, 1969 { X86::VMAXPDZrrk, X86::VMAXPDZrmk, 0 }, 1970 // AVX-512{F,VL} foldable instructions 256-bit 1971 { X86::VADDPSZ256rrk, X86::VADDPSZ256rmk, 0 }, 1972 { X86::VADDPDZ256rrk, X86::VADDPDZ256rmk, 0 }, 1973 { X86::VSUBPSZ256rrk, X86::VSUBPSZ256rmk, 0 }, 1974 { X86::VSUBPDZ256rrk, X86::VSUBPDZ256rmk, 0 }, 1975 { X86::VMULPSZ256rrk, X86::VMULPSZ256rmk, 0 }, 1976 { X86::VMULPDZ256rrk, X86::VMULPDZ256rmk, 0 }, 1977 { X86::VDIVPSZ256rrk, X86::VDIVPSZ256rmk, 0 }, 1978 { X86::VDIVPDZ256rrk, X86::VDIVPDZ256rmk, 0 }, 1979 { X86::VMINPSZ256rrk, X86::VMINPSZ256rmk, 0 }, 1980 { X86::VMINPDZ256rrk, X86::VMINPDZ256rmk, 0 }, 1981 { X86::VMAXPSZ256rrk, X86::VMAXPSZ256rmk, 0 }, 1982 { X86::VMAXPDZ256rrk, X86::VMAXPDZ256rmk, 0 }, 1983 // AVX-512{F,VL} foldable instructions 128-bit 1984 { X86::VADDPSZ128rrk, X86::VADDPSZ128rmk, 0 }, 1985 { X86::VADDPDZ128rrk, X86::VADDPDZ128rmk, 0 }, 1986 { X86::VSUBPSZ128rrk, X86::VSUBPSZ128rmk, 0 }, 1987 { X86::VSUBPDZ128rrk, X86::VSUBPDZ128rmk, 0 }, 1988 { X86::VMULPSZ128rrk, X86::VMULPSZ128rmk, 0 }, 1989 { X86::VMULPDZ128rrk, X86::VMULPDZ128rmk, 0 }, 1990 { X86::VDIVPSZ128rrk, X86::VDIVPSZ128rmk, 0 }, 1991 { X86::VDIVPDZ128rrk, X86::VDIVPDZ128rmk, 0 }, 1992 { X86::VMINPSZ128rrk, X86::VMINPSZ128rmk, 0 }, 1993 { X86::VMINPDZ128rrk, X86::VMINPDZ128rmk, 0 }, 1994 { X86::VMAXPSZ128rrk, X86::VMAXPSZ128rmk, 0 }, 1995 { X86::VMAXPDZ128rrk, X86::VMAXPDZ128rmk, 0 } 1996 }; 1997 1998 for (unsigned i = 0, e = array_lengthof(MemoryFoldTable4); i != e; ++i) { 1999 unsigned RegOp = MemoryFoldTable4[i].RegOp; 2000 unsigned MemOp = MemoryFoldTable4[i].MemOp; 2001 unsigned Flags = MemoryFoldTable4[i].Flags; 2002 AddTableEntry(RegOp2MemOpTable4, MemOp2RegOpTable, 2003 RegOp, MemOp, 2004 // Index 4, folded load 2005 Flags | TB_INDEX_4 | TB_FOLDED_LOAD); 2006 } 2007 } 2008 2009 void 2010 X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable, 2011 MemOp2RegOpTableType &M2RTable, 2012 unsigned RegOp, unsigned MemOp, unsigned Flags) { 2013 if ((Flags & TB_NO_FORWARD) == 0) { 2014 assert(!R2MTable.count(RegOp) && "Duplicate entry!"); 2015 R2MTable[RegOp] = std::make_pair(MemOp, Flags); 2016 } 2017 if ((Flags & TB_NO_REVERSE) == 0) { 2018 assert(!M2RTable.count(MemOp) && 2019 "Duplicated entries in unfolding maps?"); 2020 M2RTable[MemOp] = std::make_pair(RegOp, Flags); 2021 } 2022 } 2023 2024 bool 2025 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 2026 unsigned &SrcReg, unsigned &DstReg, 2027 unsigned &SubIdx) const { 2028 switch (MI.getOpcode()) { 2029 default: break; 2030 case X86::MOVSX16rr8: 2031 case X86::MOVZX16rr8: 2032 case X86::MOVSX32rr8: 2033 case X86::MOVZX32rr8: 2034 case X86::MOVSX64rr8: 2035 if (!Subtarget.is64Bit()) 2036 // It's not always legal to reference the low 8-bit of the larger 2037 // register in 32-bit mode. 2038 return false; 2039 case X86::MOVSX32rr16: 2040 case X86::MOVZX32rr16: 2041 case X86::MOVSX64rr16: 2042 case X86::MOVSX64rr32: { 2043 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 2044 // Be conservative. 2045 return false; 2046 SrcReg = MI.getOperand(1).getReg(); 2047 DstReg = MI.getOperand(0).getReg(); 2048 switch (MI.getOpcode()) { 2049 default: llvm_unreachable("Unreachable!"); 2050 case X86::MOVSX16rr8: 2051 case X86::MOVZX16rr8: 2052 case X86::MOVSX32rr8: 2053 case X86::MOVZX32rr8: 2054 case X86::MOVSX64rr8: 2055 SubIdx = X86::sub_8bit; 2056 break; 2057 case X86::MOVSX32rr16: 2058 case X86::MOVZX32rr16: 2059 case X86::MOVSX64rr16: 2060 SubIdx = X86::sub_16bit; 2061 break; 2062 case X86::MOVSX64rr32: 2063 SubIdx = X86::sub_32bit; 2064 break; 2065 } 2066 return true; 2067 } 2068 } 2069 return false; 2070 } 2071 2072 int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const { 2073 const MachineFunction *MF = MI->getParent()->getParent(); 2074 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 2075 2076 if (MI->getOpcode() == getCallFrameSetupOpcode() || 2077 MI->getOpcode() == getCallFrameDestroyOpcode()) { 2078 unsigned StackAlign = TFI->getStackAlignment(); 2079 int SPAdj = (MI->getOperand(0).getImm() + StackAlign - 1) / StackAlign * 2080 StackAlign; 2081 2082 SPAdj -= MI->getOperand(1).getImm(); 2083 2084 if (MI->getOpcode() == getCallFrameSetupOpcode()) 2085 return SPAdj; 2086 else 2087 return -SPAdj; 2088 } 2089 2090 // To know whether a call adjusts the stack, we need information 2091 // that is bound to the following ADJCALLSTACKUP pseudo. 2092 // Look for the next ADJCALLSTACKUP that follows the call. 2093 if (MI->isCall()) { 2094 const MachineBasicBlock* MBB = MI->getParent(); 2095 auto I = ++MachineBasicBlock::const_iterator(MI); 2096 for (auto E = MBB->end(); I != E; ++I) { 2097 if (I->getOpcode() == getCallFrameDestroyOpcode() || 2098 I->isCall()) 2099 break; 2100 } 2101 2102 // If we could not find a frame destroy opcode, then it has already 2103 // been simplified, so we don't care. 2104 if (I->getOpcode() != getCallFrameDestroyOpcode()) 2105 return 0; 2106 2107 return -(I->getOperand(1).getImm()); 2108 } 2109 2110 // Currently handle only PUSHes we can reasonably expect to see 2111 // in call sequences 2112 switch (MI->getOpcode()) { 2113 default: 2114 return 0; 2115 case X86::PUSH32i8: 2116 case X86::PUSH32r: 2117 case X86::PUSH32rmm: 2118 case X86::PUSH32rmr: 2119 case X86::PUSHi32: 2120 return 4; 2121 } 2122 } 2123 2124 /// Return true and the FrameIndex if the specified 2125 /// operand and follow operands form a reference to the stack frame. 2126 bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op, 2127 int &FrameIndex) const { 2128 if (MI->getOperand(Op+X86::AddrBaseReg).isFI() && 2129 MI->getOperand(Op+X86::AddrScaleAmt).isImm() && 2130 MI->getOperand(Op+X86::AddrIndexReg).isReg() && 2131 MI->getOperand(Op+X86::AddrDisp).isImm() && 2132 MI->getOperand(Op+X86::AddrScaleAmt).getImm() == 1 && 2133 MI->getOperand(Op+X86::AddrIndexReg).getReg() == 0 && 2134 MI->getOperand(Op+X86::AddrDisp).getImm() == 0) { 2135 FrameIndex = MI->getOperand(Op+X86::AddrBaseReg).getIndex(); 2136 return true; 2137 } 2138 return false; 2139 } 2140 2141 static bool isFrameLoadOpcode(int Opcode) { 2142 switch (Opcode) { 2143 default: 2144 return false; 2145 case X86::MOV8rm: 2146 case X86::MOV16rm: 2147 case X86::MOV32rm: 2148 case X86::MOV64rm: 2149 case X86::LD_Fp64m: 2150 case X86::MOVSSrm: 2151 case X86::MOVSDrm: 2152 case X86::MOVAPSrm: 2153 case X86::MOVAPDrm: 2154 case X86::MOVDQArm: 2155 case X86::VMOVSSrm: 2156 case X86::VMOVSDrm: 2157 case X86::VMOVAPSrm: 2158 case X86::VMOVAPDrm: 2159 case X86::VMOVDQArm: 2160 case X86::VMOVUPSYrm: 2161 case X86::VMOVAPSYrm: 2162 case X86::VMOVUPDYrm: 2163 case X86::VMOVAPDYrm: 2164 case X86::VMOVDQUYrm: 2165 case X86::VMOVDQAYrm: 2166 case X86::MMX_MOVD64rm: 2167 case X86::MMX_MOVQ64rm: 2168 case X86::VMOVAPSZrm: 2169 case X86::VMOVUPSZrm: 2170 return true; 2171 } 2172 } 2173 2174 static bool isFrameStoreOpcode(int Opcode) { 2175 switch (Opcode) { 2176 default: break; 2177 case X86::MOV8mr: 2178 case X86::MOV16mr: 2179 case X86::MOV32mr: 2180 case X86::MOV64mr: 2181 case X86::ST_FpP64m: 2182 case X86::MOVSSmr: 2183 case X86::MOVSDmr: 2184 case X86::MOVAPSmr: 2185 case X86::MOVAPDmr: 2186 case X86::MOVDQAmr: 2187 case X86::VMOVSSmr: 2188 case X86::VMOVSDmr: 2189 case X86::VMOVAPSmr: 2190 case X86::VMOVAPDmr: 2191 case X86::VMOVDQAmr: 2192 case X86::VMOVUPSYmr: 2193 case X86::VMOVAPSYmr: 2194 case X86::VMOVUPDYmr: 2195 case X86::VMOVAPDYmr: 2196 case X86::VMOVDQUYmr: 2197 case X86::VMOVDQAYmr: 2198 case X86::VMOVUPSZmr: 2199 case X86::VMOVAPSZmr: 2200 case X86::MMX_MOVD64mr: 2201 case X86::MMX_MOVQ64mr: 2202 case X86::MMX_MOVNTQmr: 2203 return true; 2204 } 2205 return false; 2206 } 2207 2208 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 2209 int &FrameIndex) const { 2210 if (isFrameLoadOpcode(MI->getOpcode())) 2211 if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) 2212 return MI->getOperand(0).getReg(); 2213 return 0; 2214 } 2215 2216 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 2217 int &FrameIndex) const { 2218 if (isFrameLoadOpcode(MI->getOpcode())) { 2219 unsigned Reg; 2220 if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) 2221 return Reg; 2222 // Check for post-frame index elimination operations 2223 const MachineMemOperand *Dummy; 2224 return hasLoadFromStackSlot(MI, Dummy, FrameIndex); 2225 } 2226 return 0; 2227 } 2228 2229 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI, 2230 int &FrameIndex) const { 2231 if (isFrameStoreOpcode(MI->getOpcode())) 2232 if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 && 2233 isFrameOperand(MI, 0, FrameIndex)) 2234 return MI->getOperand(X86::AddrNumOperands).getReg(); 2235 return 0; 2236 } 2237 2238 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 2239 int &FrameIndex) const { 2240 if (isFrameStoreOpcode(MI->getOpcode())) { 2241 unsigned Reg; 2242 if ((Reg = isStoreToStackSlot(MI, FrameIndex))) 2243 return Reg; 2244 // Check for post-frame index elimination operations 2245 const MachineMemOperand *Dummy; 2246 return hasStoreToStackSlot(MI, Dummy, FrameIndex); 2247 } 2248 return 0; 2249 } 2250 2251 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. 2252 static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 2253 // Don't waste compile time scanning use-def chains of physregs. 2254 if (!TargetRegisterInfo::isVirtualRegister(BaseReg)) 2255 return false; 2256 bool isPICBase = false; 2257 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), 2258 E = MRI.def_instr_end(); I != E; ++I) { 2259 MachineInstr *DefMI = &*I; 2260 if (DefMI->getOpcode() != X86::MOVPC32r) 2261 return false; 2262 assert(!isPICBase && "More than one PIC base?"); 2263 isPICBase = true; 2264 } 2265 return isPICBase; 2266 } 2267 2268 bool 2269 X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 2270 AliasAnalysis *AA) const { 2271 switch (MI->getOpcode()) { 2272 default: break; 2273 case X86::MOV8rm: 2274 case X86::MOV16rm: 2275 case X86::MOV32rm: 2276 case X86::MOV64rm: 2277 case X86::LD_Fp64m: 2278 case X86::MOVSSrm: 2279 case X86::MOVSDrm: 2280 case X86::MOVAPSrm: 2281 case X86::MOVUPSrm: 2282 case X86::MOVAPDrm: 2283 case X86::MOVDQArm: 2284 case X86::MOVDQUrm: 2285 case X86::VMOVSSrm: 2286 case X86::VMOVSDrm: 2287 case X86::VMOVAPSrm: 2288 case X86::VMOVUPSrm: 2289 case X86::VMOVAPDrm: 2290 case X86::VMOVDQArm: 2291 case X86::VMOVDQUrm: 2292 case X86::VMOVAPSYrm: 2293 case X86::VMOVUPSYrm: 2294 case X86::VMOVAPDYrm: 2295 case X86::VMOVDQAYrm: 2296 case X86::VMOVDQUYrm: 2297 case X86::MMX_MOVD64rm: 2298 case X86::MMX_MOVQ64rm: 2299 case X86::FsVMOVAPSrm: 2300 case X86::FsVMOVAPDrm: 2301 case X86::FsMOVAPSrm: 2302 case X86::FsMOVAPDrm: { 2303 // Loads from constant pools are trivially rematerializable. 2304 if (MI->getOperand(1+X86::AddrBaseReg).isReg() && 2305 MI->getOperand(1+X86::AddrScaleAmt).isImm() && 2306 MI->getOperand(1+X86::AddrIndexReg).isReg() && 2307 MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 && 2308 MI->isInvariantLoad(AA)) { 2309 unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg(); 2310 if (BaseReg == 0 || BaseReg == X86::RIP) 2311 return true; 2312 // Allow re-materialization of PIC load. 2313 if (!ReMatPICStubLoad && MI->getOperand(1+X86::AddrDisp).isGlobal()) 2314 return false; 2315 const MachineFunction &MF = *MI->getParent()->getParent(); 2316 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2317 return regIsPICBase(BaseReg, MRI); 2318 } 2319 return false; 2320 } 2321 2322 case X86::LEA32r: 2323 case X86::LEA64r: { 2324 if (MI->getOperand(1+X86::AddrScaleAmt).isImm() && 2325 MI->getOperand(1+X86::AddrIndexReg).isReg() && 2326 MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 && 2327 !MI->getOperand(1+X86::AddrDisp).isReg()) { 2328 // lea fi#, lea GV, etc. are all rematerializable. 2329 if (!MI->getOperand(1+X86::AddrBaseReg).isReg()) 2330 return true; 2331 unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg(); 2332 if (BaseReg == 0) 2333 return true; 2334 // Allow re-materialization of lea PICBase + x. 2335 const MachineFunction &MF = *MI->getParent()->getParent(); 2336 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2337 return regIsPICBase(BaseReg, MRI); 2338 } 2339 return false; 2340 } 2341 } 2342 2343 // All other instructions marked M_REMATERIALIZABLE are always trivially 2344 // rematerializable. 2345 return true; 2346 } 2347 2348 bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 2349 MachineBasicBlock::iterator I) const { 2350 MachineBasicBlock::iterator E = MBB.end(); 2351 2352 // For compile time consideration, if we are not able to determine the 2353 // safety after visiting 4 instructions in each direction, we will assume 2354 // it's not safe. 2355 MachineBasicBlock::iterator Iter = I; 2356 for (unsigned i = 0; Iter != E && i < 4; ++i) { 2357 bool SeenDef = false; 2358 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 2359 MachineOperand &MO = Iter->getOperand(j); 2360 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 2361 SeenDef = true; 2362 if (!MO.isReg()) 2363 continue; 2364 if (MO.getReg() == X86::EFLAGS) { 2365 if (MO.isUse()) 2366 return false; 2367 SeenDef = true; 2368 } 2369 } 2370 2371 if (SeenDef) 2372 // This instruction defines EFLAGS, no need to look any further. 2373 return true; 2374 ++Iter; 2375 // Skip over DBG_VALUE. 2376 while (Iter != E && Iter->isDebugValue()) 2377 ++Iter; 2378 } 2379 2380 // It is safe to clobber EFLAGS at the end of a block of no successor has it 2381 // live in. 2382 if (Iter == E) { 2383 for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(), 2384 SE = MBB.succ_end(); SI != SE; ++SI) 2385 if ((*SI)->isLiveIn(X86::EFLAGS)) 2386 return false; 2387 return true; 2388 } 2389 2390 MachineBasicBlock::iterator B = MBB.begin(); 2391 Iter = I; 2392 for (unsigned i = 0; i < 4; ++i) { 2393 // If we make it to the beginning of the block, it's safe to clobber 2394 // EFLAGS iff EFLAGS is not live-in. 2395 if (Iter == B) 2396 return !MBB.isLiveIn(X86::EFLAGS); 2397 2398 --Iter; 2399 // Skip over DBG_VALUE. 2400 while (Iter != B && Iter->isDebugValue()) 2401 --Iter; 2402 2403 bool SawKill = false; 2404 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 2405 MachineOperand &MO = Iter->getOperand(j); 2406 // A register mask may clobber EFLAGS, but we should still look for a 2407 // live EFLAGS def. 2408 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 2409 SawKill = true; 2410 if (MO.isReg() && MO.getReg() == X86::EFLAGS) { 2411 if (MO.isDef()) return MO.isDead(); 2412 if (MO.isKill()) SawKill = true; 2413 } 2414 } 2415 2416 if (SawKill) 2417 // This instruction kills EFLAGS and doesn't redefine it, so 2418 // there's no need to look further. 2419 return true; 2420 } 2421 2422 // Conservative answer. 2423 return false; 2424 } 2425 2426 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 2427 MachineBasicBlock::iterator I, 2428 unsigned DestReg, unsigned SubIdx, 2429 const MachineInstr *Orig, 2430 const TargetRegisterInfo &TRI) const { 2431 // MOV32r0 is implemented with a xor which clobbers condition code. 2432 // Re-materialize it as movri instructions to avoid side effects. 2433 unsigned Opc = Orig->getOpcode(); 2434 if (Opc == X86::MOV32r0 && !isSafeToClobberEFLAGS(MBB, I)) { 2435 DebugLoc DL = Orig->getDebugLoc(); 2436 BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0)) 2437 .addImm(0); 2438 } else { 2439 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 2440 MBB.insert(I, MI); 2441 } 2442 2443 MachineInstr *NewMI = std::prev(I); 2444 NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 2445 } 2446 2447 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead. 2448 static bool hasLiveCondCodeDef(MachineInstr *MI) { 2449 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 2450 MachineOperand &MO = MI->getOperand(i); 2451 if (MO.isReg() && MO.isDef() && 2452 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 2453 return true; 2454 } 2455 } 2456 return false; 2457 } 2458 2459 /// Check whether the shift count for a machine operand is non-zero. 2460 inline static unsigned getTruncatedShiftCount(MachineInstr *MI, 2461 unsigned ShiftAmtOperandIdx) { 2462 // The shift count is six bits with the REX.W prefix and five bits without. 2463 unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31; 2464 unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm(); 2465 return Imm & ShiftCountMask; 2466 } 2467 2468 /// Check whether the given shift count is appropriate 2469 /// can be represented by a LEA instruction. 2470 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { 2471 // Left shift instructions can be transformed into load-effective-address 2472 // instructions if we can encode them appropriately. 2473 // A LEA instruction utilizes a SIB byte to encode it's scale factor. 2474 // The SIB.scale field is two bits wide which means that we can encode any 2475 // shift amount less than 4. 2476 return ShAmt < 4 && ShAmt > 0; 2477 } 2478 2479 bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src, 2480 unsigned Opc, bool AllowSP, 2481 unsigned &NewSrc, bool &isKill, bool &isUndef, 2482 MachineOperand &ImplicitOp) const { 2483 MachineFunction &MF = *MI->getParent()->getParent(); 2484 const TargetRegisterClass *RC; 2485 if (AllowSP) { 2486 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass; 2487 } else { 2488 RC = Opc != X86::LEA32r ? 2489 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass; 2490 } 2491 unsigned SrcReg = Src.getReg(); 2492 2493 // For both LEA64 and LEA32 the register already has essentially the right 2494 // type (32-bit or 64-bit) we may just need to forbid SP. 2495 if (Opc != X86::LEA64_32r) { 2496 NewSrc = SrcReg; 2497 isKill = Src.isKill(); 2498 isUndef = Src.isUndef(); 2499 2500 if (TargetRegisterInfo::isVirtualRegister(NewSrc) && 2501 !MF.getRegInfo().constrainRegClass(NewSrc, RC)) 2502 return false; 2503 2504 return true; 2505 } 2506 2507 // This is for an LEA64_32r and incoming registers are 32-bit. One way or 2508 // another we need to add 64-bit registers to the final MI. 2509 if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) { 2510 ImplicitOp = Src; 2511 ImplicitOp.setImplicit(); 2512 2513 NewSrc = getX86SubSuperRegister(Src.getReg(), MVT::i64); 2514 MachineBasicBlock::LivenessQueryResult LQR = 2515 MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI); 2516 2517 switch (LQR) { 2518 case MachineBasicBlock::LQR_Unknown: 2519 // We can't give sane liveness flags to the instruction, abandon LEA 2520 // formation. 2521 return false; 2522 case MachineBasicBlock::LQR_Live: 2523 isKill = MI->killsRegister(SrcReg); 2524 isUndef = false; 2525 break; 2526 default: 2527 // The physreg itself is dead, so we have to use it as an <undef>. 2528 isKill = false; 2529 isUndef = true; 2530 break; 2531 } 2532 } else { 2533 // Virtual register of the wrong class, we have to create a temporary 64-bit 2534 // vreg to feed into the LEA. 2535 NewSrc = MF.getRegInfo().createVirtualRegister(RC); 2536 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 2537 get(TargetOpcode::COPY)) 2538 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) 2539 .addOperand(Src); 2540 2541 // Which is obviously going to be dead after we're done with it. 2542 isKill = true; 2543 isUndef = false; 2544 } 2545 2546 // We've set all the parameters without issue. 2547 return true; 2548 } 2549 2550 /// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit 2551 /// LEA to form 3-address code by promoting to a 32-bit superregister and then 2552 /// truncating back down to a 16-bit subregister. 2553 MachineInstr * 2554 X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, 2555 MachineFunction::iterator &MFI, 2556 MachineBasicBlock::iterator &MBBI, 2557 LiveVariables *LV) const { 2558 MachineInstr *MI = MBBI; 2559 unsigned Dest = MI->getOperand(0).getReg(); 2560 unsigned Src = MI->getOperand(1).getReg(); 2561 bool isDead = MI->getOperand(0).isDead(); 2562 bool isKill = MI->getOperand(1).isKill(); 2563 2564 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 2565 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 2566 unsigned Opc, leaInReg; 2567 if (Subtarget.is64Bit()) { 2568 Opc = X86::LEA64_32r; 2569 leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 2570 } else { 2571 Opc = X86::LEA32r; 2572 leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 2573 } 2574 2575 // Build and insert into an implicit UNDEF value. This is OK because 2576 // well be shifting and then extracting the lower 16-bits. 2577 // This has the potential to cause partial register stall. e.g. 2578 // movw (%rbp,%rcx,2), %dx 2579 // leal -65(%rdx), %esi 2580 // But testing has shown this *does* help performance in 64-bit mode (at 2581 // least on modern x86 machines). 2582 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg); 2583 MachineInstr *InsMI = 2584 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY)) 2585 .addReg(leaInReg, RegState::Define, X86::sub_16bit) 2586 .addReg(Src, getKillRegState(isKill)); 2587 2588 MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(), 2589 get(Opc), leaOutReg); 2590 switch (MIOpc) { 2591 default: llvm_unreachable("Unreachable!"); 2592 case X86::SHL16ri: { 2593 unsigned ShAmt = MI->getOperand(2).getImm(); 2594 MIB.addReg(0).addImm(1 << ShAmt) 2595 .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0); 2596 break; 2597 } 2598 case X86::INC16r: 2599 addRegOffset(MIB, leaInReg, true, 1); 2600 break; 2601 case X86::DEC16r: 2602 addRegOffset(MIB, leaInReg, true, -1); 2603 break; 2604 case X86::ADD16ri: 2605 case X86::ADD16ri8: 2606 case X86::ADD16ri_DB: 2607 case X86::ADD16ri8_DB: 2608 addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm()); 2609 break; 2610 case X86::ADD16rr: 2611 case X86::ADD16rr_DB: { 2612 unsigned Src2 = MI->getOperand(2).getReg(); 2613 bool isKill2 = MI->getOperand(2).isKill(); 2614 unsigned leaInReg2 = 0; 2615 MachineInstr *InsMI2 = nullptr; 2616 if (Src == Src2) { 2617 // ADD16rr %reg1028<kill>, %reg1028 2618 // just a single insert_subreg. 2619 addRegReg(MIB, leaInReg, true, leaInReg, false); 2620 } else { 2621 if (Subtarget.is64Bit()) 2622 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass); 2623 else 2624 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 2625 // Build and insert into an implicit UNDEF value. This is OK because 2626 // well be shifting and then extracting the lower 16-bits. 2627 BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2); 2628 InsMI2 = 2629 BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY)) 2630 .addReg(leaInReg2, RegState::Define, X86::sub_16bit) 2631 .addReg(Src2, getKillRegState(isKill2)); 2632 addRegReg(MIB, leaInReg, true, leaInReg2, true); 2633 } 2634 if (LV && isKill2 && InsMI2) 2635 LV->replaceKillInstruction(Src2, MI, InsMI2); 2636 break; 2637 } 2638 } 2639 2640 MachineInstr *NewMI = MIB; 2641 MachineInstr *ExtMI = 2642 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY)) 2643 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 2644 .addReg(leaOutReg, RegState::Kill, X86::sub_16bit); 2645 2646 if (LV) { 2647 // Update live variables 2648 LV->getVarInfo(leaInReg).Kills.push_back(NewMI); 2649 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI); 2650 if (isKill) 2651 LV->replaceKillInstruction(Src, MI, InsMI); 2652 if (isDead) 2653 LV->replaceKillInstruction(Dest, MI, ExtMI); 2654 } 2655 2656 return ExtMI; 2657 } 2658 2659 /// This method must be implemented by targets that 2660 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 2661 /// may be able to convert a two-address instruction into a true 2662 /// three-address instruction on demand. This allows the X86 target (for 2663 /// example) to convert ADD and SHL instructions into LEA instructions if they 2664 /// would require register copies due to two-addressness. 2665 /// 2666 /// This method returns a null pointer if the transformation cannot be 2667 /// performed, otherwise it returns the new instruction. 2668 /// 2669 MachineInstr * 2670 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 2671 MachineBasicBlock::iterator &MBBI, 2672 LiveVariables *LV) const { 2673 MachineInstr *MI = MBBI; 2674 2675 // The following opcodes also sets the condition code register(s). Only 2676 // convert them to equivalent lea if the condition code register def's 2677 // are dead! 2678 if (hasLiveCondCodeDef(MI)) 2679 return nullptr; 2680 2681 MachineFunction &MF = *MI->getParent()->getParent(); 2682 // All instructions input are two-addr instructions. Get the known operands. 2683 const MachineOperand &Dest = MI->getOperand(0); 2684 const MachineOperand &Src = MI->getOperand(1); 2685 2686 MachineInstr *NewMI = nullptr; 2687 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 2688 // we have better subtarget support, enable the 16-bit LEA generation here. 2689 // 16-bit LEA is also slow on Core2. 2690 bool DisableLEA16 = true; 2691 bool is64Bit = Subtarget.is64Bit(); 2692 2693 unsigned MIOpc = MI->getOpcode(); 2694 switch (MIOpc) { 2695 default: return nullptr; 2696 case X86::SHL64ri: { 2697 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 2698 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 2699 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 2700 2701 // LEA can't handle RSP. 2702 if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && 2703 !MF.getRegInfo().constrainRegClass(Src.getReg(), 2704 &X86::GR64_NOSPRegClass)) 2705 return nullptr; 2706 2707 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) 2708 .addOperand(Dest) 2709 .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); 2710 break; 2711 } 2712 case X86::SHL32ri: { 2713 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 2714 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 2715 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 2716 2717 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2718 2719 // LEA can't handle ESP. 2720 bool isKill, isUndef; 2721 unsigned SrcReg; 2722 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2723 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 2724 SrcReg, isKill, isUndef, ImplicitOp)) 2725 return nullptr; 2726 2727 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2728 .addOperand(Dest) 2729 .addReg(0).addImm(1 << ShAmt) 2730 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) 2731 .addImm(0).addReg(0); 2732 if (ImplicitOp.getReg() != 0) 2733 MIB.addOperand(ImplicitOp); 2734 NewMI = MIB; 2735 2736 break; 2737 } 2738 case X86::SHL16ri: { 2739 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 2740 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 2741 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; 2742 2743 if (DisableLEA16) 2744 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr; 2745 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2746 .addOperand(Dest) 2747 .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); 2748 break; 2749 } 2750 case X86::INC64r: 2751 case X86::INC32r: { 2752 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 2753 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 2754 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 2755 bool isKill, isUndef; 2756 unsigned SrcReg; 2757 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2758 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 2759 SrcReg, isKill, isUndef, ImplicitOp)) 2760 return nullptr; 2761 2762 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2763 .addOperand(Dest) 2764 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)); 2765 if (ImplicitOp.getReg() != 0) 2766 MIB.addOperand(ImplicitOp); 2767 2768 NewMI = addOffset(MIB, 1); 2769 break; 2770 } 2771 case X86::INC16r: 2772 if (DisableLEA16) 2773 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2774 : nullptr; 2775 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 2776 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2777 .addOperand(Dest).addOperand(Src), 1); 2778 break; 2779 case X86::DEC64r: 2780 case X86::DEC32r: { 2781 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 2782 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 2783 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 2784 2785 bool isKill, isUndef; 2786 unsigned SrcReg; 2787 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2788 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, 2789 SrcReg, isKill, isUndef, ImplicitOp)) 2790 return nullptr; 2791 2792 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2793 .addOperand(Dest) 2794 .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); 2795 if (ImplicitOp.getReg() != 0) 2796 MIB.addOperand(ImplicitOp); 2797 2798 NewMI = addOffset(MIB, -1); 2799 2800 break; 2801 } 2802 case X86::DEC16r: 2803 if (DisableLEA16) 2804 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2805 : nullptr; 2806 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 2807 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2808 .addOperand(Dest).addOperand(Src), -1); 2809 break; 2810 case X86::ADD64rr: 2811 case X86::ADD64rr_DB: 2812 case X86::ADD32rr: 2813 case X86::ADD32rr_DB: { 2814 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2815 unsigned Opc; 2816 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) 2817 Opc = X86::LEA64r; 2818 else 2819 Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2820 2821 bool isKill, isUndef; 2822 unsigned SrcReg; 2823 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2824 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 2825 SrcReg, isKill, isUndef, ImplicitOp)) 2826 return nullptr; 2827 2828 const MachineOperand &Src2 = MI->getOperand(2); 2829 bool isKill2, isUndef2; 2830 unsigned SrcReg2; 2831 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false); 2832 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false, 2833 SrcReg2, isKill2, isUndef2, ImplicitOp2)) 2834 return nullptr; 2835 2836 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2837 .addOperand(Dest); 2838 if (ImplicitOp.getReg() != 0) 2839 MIB.addOperand(ImplicitOp); 2840 if (ImplicitOp2.getReg() != 0) 2841 MIB.addOperand(ImplicitOp2); 2842 2843 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); 2844 2845 // Preserve undefness of the operands. 2846 NewMI->getOperand(1).setIsUndef(isUndef); 2847 NewMI->getOperand(3).setIsUndef(isUndef2); 2848 2849 if (LV && Src2.isKill()) 2850 LV->replaceKillInstruction(SrcReg2, MI, NewMI); 2851 break; 2852 } 2853 case X86::ADD16rr: 2854 case X86::ADD16rr_DB: { 2855 if (DisableLEA16) 2856 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2857 : nullptr; 2858 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2859 unsigned Src2 = MI->getOperand(2).getReg(); 2860 bool isKill2 = MI->getOperand(2).isKill(); 2861 NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2862 .addOperand(Dest), 2863 Src.getReg(), Src.isKill(), Src2, isKill2); 2864 2865 // Preserve undefness of the operands. 2866 bool isUndef = MI->getOperand(1).isUndef(); 2867 bool isUndef2 = MI->getOperand(2).isUndef(); 2868 NewMI->getOperand(1).setIsUndef(isUndef); 2869 NewMI->getOperand(3).setIsUndef(isUndef2); 2870 2871 if (LV && isKill2) 2872 LV->replaceKillInstruction(Src2, MI, NewMI); 2873 break; 2874 } 2875 case X86::ADD64ri32: 2876 case X86::ADD64ri8: 2877 case X86::ADD64ri32_DB: 2878 case X86::ADD64ri8_DB: 2879 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2880 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) 2881 .addOperand(Dest).addOperand(Src), 2882 MI->getOperand(2).getImm()); 2883 break; 2884 case X86::ADD32ri: 2885 case X86::ADD32ri8: 2886 case X86::ADD32ri_DB: 2887 case X86::ADD32ri8_DB: { 2888 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2889 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2890 2891 bool isKill, isUndef; 2892 unsigned SrcReg; 2893 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false); 2894 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true, 2895 SrcReg, isKill, isUndef, ImplicitOp)) 2896 return nullptr; 2897 2898 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2899 .addOperand(Dest) 2900 .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); 2901 if (ImplicitOp.getReg() != 0) 2902 MIB.addOperand(ImplicitOp); 2903 2904 NewMI = addOffset(MIB, MI->getOperand(2).getImm()); 2905 break; 2906 } 2907 case X86::ADD16ri: 2908 case X86::ADD16ri8: 2909 case X86::ADD16ri_DB: 2910 case X86::ADD16ri8_DB: 2911 if (DisableLEA16) 2912 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) 2913 : nullptr; 2914 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2915 NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2916 .addOperand(Dest).addOperand(Src), 2917 MI->getOperand(2).getImm()); 2918 break; 2919 } 2920 2921 if (!NewMI) return nullptr; 2922 2923 if (LV) { // Update live variables 2924 if (Src.isKill()) 2925 LV->replaceKillInstruction(Src.getReg(), MI, NewMI); 2926 if (Dest.isDead()) 2927 LV->replaceKillInstruction(Dest.getReg(), MI, NewMI); 2928 } 2929 2930 MFI->insert(MBBI, NewMI); // Insert the new inst 2931 return NewMI; 2932 } 2933 2934 /// We have a few instructions that must be hacked on to commute them. 2935 /// 2936 MachineInstr * 2937 X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 2938 switch (MI->getOpcode()) { 2939 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 2940 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 2941 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 2942 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 2943 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 2944 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 2945 unsigned Opc; 2946 unsigned Size; 2947 switch (MI->getOpcode()) { 2948 default: llvm_unreachable("Unreachable!"); 2949 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 2950 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 2951 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 2952 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 2953 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 2954 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 2955 } 2956 unsigned Amt = MI->getOperand(3).getImm(); 2957 if (NewMI) { 2958 MachineFunction &MF = *MI->getParent()->getParent(); 2959 MI = MF.CloneMachineInstr(MI); 2960 NewMI = false; 2961 } 2962 MI->setDesc(get(Opc)); 2963 MI->getOperand(3).setImm(Size-Amt); 2964 return TargetInstrInfo::commuteInstruction(MI, NewMI); 2965 } 2966 case X86::BLENDPDrri: 2967 case X86::BLENDPSrri: 2968 case X86::PBLENDWrri: 2969 case X86::VBLENDPDrri: 2970 case X86::VBLENDPSrri: 2971 case X86::VBLENDPDYrri: 2972 case X86::VBLENDPSYrri: 2973 case X86::VPBLENDDrri: 2974 case X86::VPBLENDWrri: 2975 case X86::VPBLENDDYrri: 2976 case X86::VPBLENDWYrri:{ 2977 unsigned Mask; 2978 switch (MI->getOpcode()) { 2979 default: llvm_unreachable("Unreachable!"); 2980 case X86::BLENDPDrri: Mask = 0x03; break; 2981 case X86::BLENDPSrri: Mask = 0x0F; break; 2982 case X86::PBLENDWrri: Mask = 0xFF; break; 2983 case X86::VBLENDPDrri: Mask = 0x03; break; 2984 case X86::VBLENDPSrri: Mask = 0x0F; break; 2985 case X86::VBLENDPDYrri: Mask = 0x0F; break; 2986 case X86::VBLENDPSYrri: Mask = 0xFF; break; 2987 case X86::VPBLENDDrri: Mask = 0x0F; break; 2988 case X86::VPBLENDWrri: Mask = 0xFF; break; 2989 case X86::VPBLENDDYrri: Mask = 0xFF; break; 2990 case X86::VPBLENDWYrri: Mask = 0xFF; break; 2991 } 2992 // Only the least significant bits of Imm are used. 2993 unsigned Imm = MI->getOperand(3).getImm() & Mask; 2994 if (NewMI) { 2995 MachineFunction &MF = *MI->getParent()->getParent(); 2996 MI = MF.CloneMachineInstr(MI); 2997 NewMI = false; 2998 } 2999 MI->getOperand(3).setImm(Mask ^ Imm); 3000 return TargetInstrInfo::commuteInstruction(MI, NewMI); 3001 } 3002 case X86::PCLMULQDQrr: 3003 case X86::VPCLMULQDQrr:{ 3004 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0] 3005 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0] 3006 unsigned Imm = MI->getOperand(3).getImm(); 3007 unsigned Src1Hi = Imm & 0x01; 3008 unsigned Src2Hi = Imm & 0x10; 3009 if (NewMI) { 3010 MachineFunction &MF = *MI->getParent()->getParent(); 3011 MI = MF.CloneMachineInstr(MI); 3012 NewMI = false; 3013 } 3014 MI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4)); 3015 return TargetInstrInfo::commuteInstruction(MI, NewMI); 3016 } 3017 case X86::CMPPDrri: 3018 case X86::CMPPSrri: 3019 case X86::VCMPPDrri: 3020 case X86::VCMPPSrri: 3021 case X86::VCMPPDYrri: 3022 case X86::VCMPPSYrri: { 3023 // Float comparison can be safely commuted for 3024 // Ordered/Unordered/Equal/NotEqual tests 3025 unsigned Imm = MI->getOperand(3).getImm() & 0x7; 3026 switch (Imm) { 3027 case 0x00: // EQUAL 3028 case 0x03: // UNORDERED 3029 case 0x04: // NOT EQUAL 3030 case 0x07: // ORDERED 3031 if (NewMI) { 3032 MachineFunction &MF = *MI->getParent()->getParent(); 3033 MI = MF.CloneMachineInstr(MI); 3034 NewMI = false; 3035 } 3036 return TargetInstrInfo::commuteInstruction(MI, NewMI); 3037 default: 3038 return nullptr; 3039 } 3040 } 3041 case X86::VPCOMBri: case X86::VPCOMUBri: 3042 case X86::VPCOMDri: case X86::VPCOMUDri: 3043 case X86::VPCOMQri: case X86::VPCOMUQri: 3044 case X86::VPCOMWri: case X86::VPCOMUWri: { 3045 // Flip comparison mode immediate (if necessary). 3046 unsigned Imm = MI->getOperand(3).getImm() & 0x7; 3047 switch (Imm) { 3048 case 0x00: Imm = 0x02; break; // LT -> GT 3049 case 0x01: Imm = 0x03; break; // LE -> GE 3050 case 0x02: Imm = 0x00; break; // GT -> LT 3051 case 0x03: Imm = 0x01; break; // GE -> LE 3052 case 0x04: // EQ 3053 case 0x05: // NE 3054 case 0x06: // FALSE 3055 case 0x07: // TRUE 3056 default: 3057 break; 3058 } 3059 if (NewMI) { 3060 MachineFunction &MF = *MI->getParent()->getParent(); 3061 MI = MF.CloneMachineInstr(MI); 3062 NewMI = false; 3063 } 3064 MI->getOperand(3).setImm(Imm); 3065 return TargetInstrInfo::commuteInstruction(MI, NewMI); 3066 } 3067 case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr: 3068 case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr: 3069 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: 3070 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr: 3071 case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr: 3072 case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr: 3073 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr: 3074 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr: 3075 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr: 3076 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr: 3077 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr: 3078 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr: 3079 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr: 3080 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr: 3081 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr: 3082 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: { 3083 unsigned Opc; 3084 switch (MI->getOpcode()) { 3085 default: llvm_unreachable("Unreachable!"); 3086 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 3087 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 3088 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 3089 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 3090 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 3091 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 3092 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 3093 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 3094 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 3095 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 3096 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 3097 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 3098 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 3099 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 3100 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 3101 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 3102 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 3103 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 3104 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 3105 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 3106 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 3107 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 3108 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 3109 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 3110 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 3111 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 3112 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 3113 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 3114 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 3115 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 3116 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 3117 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 3118 case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break; 3119 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 3120 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 3121 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 3122 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 3123 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 3124 case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break; 3125 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 3126 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 3127 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 3128 case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break; 3129 case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break; 3130 case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break; 3131 case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break; 3132 case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break; 3133 case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break; 3134 } 3135 if (NewMI) { 3136 MachineFunction &MF = *MI->getParent()->getParent(); 3137 MI = MF.CloneMachineInstr(MI); 3138 NewMI = false; 3139 } 3140 MI->setDesc(get(Opc)); 3141 // Fallthrough intended. 3142 } 3143 default: 3144 return TargetInstrInfo::commuteInstruction(MI, NewMI); 3145 } 3146 } 3147 3148 bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, 3149 unsigned &SrcOpIdx2) const { 3150 switch (MI->getOpcode()) { 3151 case X86::CMPPDrri: 3152 case X86::CMPPSrri: 3153 case X86::VCMPPDrri: 3154 case X86::VCMPPSrri: 3155 case X86::VCMPPDYrri: 3156 case X86::VCMPPSYrri: { 3157 // Float comparison can be safely commuted for 3158 // Ordered/Unordered/Equal/NotEqual tests 3159 unsigned Imm = MI->getOperand(3).getImm() & 0x7; 3160 switch (Imm) { 3161 case 0x00: // EQUAL 3162 case 0x03: // UNORDERED 3163 case 0x04: // NOT EQUAL 3164 case 0x07: // ORDERED 3165 SrcOpIdx1 = 1; 3166 SrcOpIdx2 = 2; 3167 return true; 3168 } 3169 return false; 3170 } 3171 case X86::VFMADDPDr231r: 3172 case X86::VFMADDPSr231r: 3173 case X86::VFMADDSDr231r: 3174 case X86::VFMADDSSr231r: 3175 case X86::VFMSUBPDr231r: 3176 case X86::VFMSUBPSr231r: 3177 case X86::VFMSUBSDr231r: 3178 case X86::VFMSUBSSr231r: 3179 case X86::VFNMADDPDr231r: 3180 case X86::VFNMADDPSr231r: 3181 case X86::VFNMADDSDr231r: 3182 case X86::VFNMADDSSr231r: 3183 case X86::VFNMSUBPDr231r: 3184 case X86::VFNMSUBPSr231r: 3185 case X86::VFNMSUBSDr231r: 3186 case X86::VFNMSUBSSr231r: 3187 case X86::VFMADDPDr231rY: 3188 case X86::VFMADDPSr231rY: 3189 case X86::VFMSUBPDr231rY: 3190 case X86::VFMSUBPSr231rY: 3191 case X86::VFNMADDPDr231rY: 3192 case X86::VFNMADDPSr231rY: 3193 case X86::VFNMSUBPDr231rY: 3194 case X86::VFNMSUBPSr231rY: 3195 SrcOpIdx1 = 2; 3196 SrcOpIdx2 = 3; 3197 return true; 3198 default: 3199 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); 3200 } 3201 } 3202 3203 static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) { 3204 switch (BrOpc) { 3205 default: return X86::COND_INVALID; 3206 case X86::JE_1: return X86::COND_E; 3207 case X86::JNE_1: return X86::COND_NE; 3208 case X86::JL_1: return X86::COND_L; 3209 case X86::JLE_1: return X86::COND_LE; 3210 case X86::JG_1: return X86::COND_G; 3211 case X86::JGE_1: return X86::COND_GE; 3212 case X86::JB_1: return X86::COND_B; 3213 case X86::JBE_1: return X86::COND_BE; 3214 case X86::JA_1: return X86::COND_A; 3215 case X86::JAE_1: return X86::COND_AE; 3216 case X86::JS_1: return X86::COND_S; 3217 case X86::JNS_1: return X86::COND_NS; 3218 case X86::JP_1: return X86::COND_P; 3219 case X86::JNP_1: return X86::COND_NP; 3220 case X86::JO_1: return X86::COND_O; 3221 case X86::JNO_1: return X86::COND_NO; 3222 } 3223 } 3224 3225 /// Return condition code of a SET opcode. 3226 static X86::CondCode getCondFromSETOpc(unsigned Opc) { 3227 switch (Opc) { 3228 default: return X86::COND_INVALID; 3229 case X86::SETAr: case X86::SETAm: return X86::COND_A; 3230 case X86::SETAEr: case X86::SETAEm: return X86::COND_AE; 3231 case X86::SETBr: case X86::SETBm: return X86::COND_B; 3232 case X86::SETBEr: case X86::SETBEm: return X86::COND_BE; 3233 case X86::SETEr: case X86::SETEm: return X86::COND_E; 3234 case X86::SETGr: case X86::SETGm: return X86::COND_G; 3235 case X86::SETGEr: case X86::SETGEm: return X86::COND_GE; 3236 case X86::SETLr: case X86::SETLm: return X86::COND_L; 3237 case X86::SETLEr: case X86::SETLEm: return X86::COND_LE; 3238 case X86::SETNEr: case X86::SETNEm: return X86::COND_NE; 3239 case X86::SETNOr: case X86::SETNOm: return X86::COND_NO; 3240 case X86::SETNPr: case X86::SETNPm: return X86::COND_NP; 3241 case X86::SETNSr: case X86::SETNSm: return X86::COND_NS; 3242 case X86::SETOr: case X86::SETOm: return X86::COND_O; 3243 case X86::SETPr: case X86::SETPm: return X86::COND_P; 3244 case X86::SETSr: case X86::SETSm: return X86::COND_S; 3245 } 3246 } 3247 3248 /// Return condition code of a CMov opcode. 3249 X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) { 3250 switch (Opc) { 3251 default: return X86::COND_INVALID; 3252 case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm: 3253 case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr: 3254 return X86::COND_A; 3255 case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm: 3256 case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr: 3257 return X86::COND_AE; 3258 case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm: 3259 case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr: 3260 return X86::COND_B; 3261 case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm: 3262 case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr: 3263 return X86::COND_BE; 3264 case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm: 3265 case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr: 3266 return X86::COND_E; 3267 case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm: 3268 case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr: 3269 return X86::COND_G; 3270 case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm: 3271 case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr: 3272 return X86::COND_GE; 3273 case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm: 3274 case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr: 3275 return X86::COND_L; 3276 case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm: 3277 case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr: 3278 return X86::COND_LE; 3279 case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm: 3280 case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr: 3281 return X86::COND_NE; 3282 case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm: 3283 case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr: 3284 return X86::COND_NO; 3285 case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm: 3286 case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr: 3287 return X86::COND_NP; 3288 case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm: 3289 case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr: 3290 return X86::COND_NS; 3291 case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm: 3292 case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr: 3293 return X86::COND_O; 3294 case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm: 3295 case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr: 3296 return X86::COND_P; 3297 case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm: 3298 case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr: 3299 return X86::COND_S; 3300 } 3301 } 3302 3303 unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 3304 switch (CC) { 3305 default: llvm_unreachable("Illegal condition code!"); 3306 case X86::COND_E: return X86::JE_1; 3307 case X86::COND_NE: return X86::JNE_1; 3308 case X86::COND_L: return X86::JL_1; 3309 case X86::COND_LE: return X86::JLE_1; 3310 case X86::COND_G: return X86::JG_1; 3311 case X86::COND_GE: return X86::JGE_1; 3312 case X86::COND_B: return X86::JB_1; 3313 case X86::COND_BE: return X86::JBE_1; 3314 case X86::COND_A: return X86::JA_1; 3315 case X86::COND_AE: return X86::JAE_1; 3316 case X86::COND_S: return X86::JS_1; 3317 case X86::COND_NS: return X86::JNS_1; 3318 case X86::COND_P: return X86::JP_1; 3319 case X86::COND_NP: return X86::JNP_1; 3320 case X86::COND_O: return X86::JO_1; 3321 case X86::COND_NO: return X86::JNO_1; 3322 } 3323 } 3324 3325 /// Return the inverse of the specified condition, 3326 /// e.g. turning COND_E to COND_NE. 3327 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 3328 switch (CC) { 3329 default: llvm_unreachable("Illegal condition code!"); 3330 case X86::COND_E: return X86::COND_NE; 3331 case X86::COND_NE: return X86::COND_E; 3332 case X86::COND_L: return X86::COND_GE; 3333 case X86::COND_LE: return X86::COND_G; 3334 case X86::COND_G: return X86::COND_LE; 3335 case X86::COND_GE: return X86::COND_L; 3336 case X86::COND_B: return X86::COND_AE; 3337 case X86::COND_BE: return X86::COND_A; 3338 case X86::COND_A: return X86::COND_BE; 3339 case X86::COND_AE: return X86::COND_B; 3340 case X86::COND_S: return X86::COND_NS; 3341 case X86::COND_NS: return X86::COND_S; 3342 case X86::COND_P: return X86::COND_NP; 3343 case X86::COND_NP: return X86::COND_P; 3344 case X86::COND_O: return X86::COND_NO; 3345 case X86::COND_NO: return X86::COND_O; 3346 } 3347 } 3348 3349 /// Assuming the flags are set by MI(a,b), return the condition code if we 3350 /// modify the instructions such that flags are set by MI(b,a). 3351 static X86::CondCode getSwappedCondition(X86::CondCode CC) { 3352 switch (CC) { 3353 default: return X86::COND_INVALID; 3354 case X86::COND_E: return X86::COND_E; 3355 case X86::COND_NE: return X86::COND_NE; 3356 case X86::COND_L: return X86::COND_G; 3357 case X86::COND_LE: return X86::COND_GE; 3358 case X86::COND_G: return X86::COND_L; 3359 case X86::COND_GE: return X86::COND_LE; 3360 case X86::COND_B: return X86::COND_A; 3361 case X86::COND_BE: return X86::COND_AE; 3362 case X86::COND_A: return X86::COND_B; 3363 case X86::COND_AE: return X86::COND_BE; 3364 } 3365 } 3366 3367 /// Return a set opcode for the given condition and 3368 /// whether it has memory operand. 3369 unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) { 3370 static const uint16_t Opc[16][2] = { 3371 { X86::SETAr, X86::SETAm }, 3372 { X86::SETAEr, X86::SETAEm }, 3373 { X86::SETBr, X86::SETBm }, 3374 { X86::SETBEr, X86::SETBEm }, 3375 { X86::SETEr, X86::SETEm }, 3376 { X86::SETGr, X86::SETGm }, 3377 { X86::SETGEr, X86::SETGEm }, 3378 { X86::SETLr, X86::SETLm }, 3379 { X86::SETLEr, X86::SETLEm }, 3380 { X86::SETNEr, X86::SETNEm }, 3381 { X86::SETNOr, X86::SETNOm }, 3382 { X86::SETNPr, X86::SETNPm }, 3383 { X86::SETNSr, X86::SETNSm }, 3384 { X86::SETOr, X86::SETOm }, 3385 { X86::SETPr, X86::SETPm }, 3386 { X86::SETSr, X86::SETSm } 3387 }; 3388 3389 assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes"); 3390 return Opc[CC][HasMemoryOperand ? 1 : 0]; 3391 } 3392 3393 /// Return a cmov opcode for the given condition, 3394 /// register size in bytes, and operand type. 3395 unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes, 3396 bool HasMemoryOperand) { 3397 static const uint16_t Opc[32][3] = { 3398 { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr }, 3399 { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr }, 3400 { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr }, 3401 { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr }, 3402 { X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr }, 3403 { X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr }, 3404 { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr }, 3405 { X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr }, 3406 { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr }, 3407 { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr }, 3408 { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr }, 3409 { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr }, 3410 { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr }, 3411 { X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr }, 3412 { X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr }, 3413 { X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr }, 3414 { X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm }, 3415 { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm }, 3416 { X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm }, 3417 { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm }, 3418 { X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm }, 3419 { X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm }, 3420 { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm }, 3421 { X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm }, 3422 { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm }, 3423 { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm }, 3424 { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm }, 3425 { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm }, 3426 { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm }, 3427 { X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm }, 3428 { X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm }, 3429 { X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm } 3430 }; 3431 3432 assert(CC < 16 && "Can only handle standard cond codes"); 3433 unsigned Idx = HasMemoryOperand ? 16+CC : CC; 3434 switch(RegBytes) { 3435 default: llvm_unreachable("Illegal register size!"); 3436 case 2: return Opc[Idx][0]; 3437 case 4: return Opc[Idx][1]; 3438 case 8: return Opc[Idx][2]; 3439 } 3440 } 3441 3442 bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 3443 if (!MI->isTerminator()) return false; 3444 3445 // Conditional branch is a special case. 3446 if (MI->isBranch() && !MI->isBarrier()) 3447 return true; 3448 if (!MI->isPredicable()) 3449 return true; 3450 return !isPredicated(MI); 3451 } 3452 3453 bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 3454 MachineBasicBlock *&TBB, 3455 MachineBasicBlock *&FBB, 3456 SmallVectorImpl<MachineOperand> &Cond, 3457 bool AllowModify) const { 3458 // Start from the bottom of the block and work up, examining the 3459 // terminator instructions. 3460 MachineBasicBlock::iterator I = MBB.end(); 3461 MachineBasicBlock::iterator UnCondBrIter = MBB.end(); 3462 while (I != MBB.begin()) { 3463 --I; 3464 if (I->isDebugValue()) 3465 continue; 3466 3467 // Working from the bottom, when we see a non-terminator instruction, we're 3468 // done. 3469 if (!isUnpredicatedTerminator(I)) 3470 break; 3471 3472 // A terminator that isn't a branch can't easily be handled by this 3473 // analysis. 3474 if (!I->isBranch()) 3475 return true; 3476 3477 // Handle unconditional branches. 3478 if (I->getOpcode() == X86::JMP_1) { 3479 UnCondBrIter = I; 3480 3481 if (!AllowModify) { 3482 TBB = I->getOperand(0).getMBB(); 3483 continue; 3484 } 3485 3486 // If the block has any instructions after a JMP, delete them. 3487 while (std::next(I) != MBB.end()) 3488 std::next(I)->eraseFromParent(); 3489 3490 Cond.clear(); 3491 FBB = nullptr; 3492 3493 // Delete the JMP if it's equivalent to a fall-through. 3494 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 3495 TBB = nullptr; 3496 I->eraseFromParent(); 3497 I = MBB.end(); 3498 UnCondBrIter = MBB.end(); 3499 continue; 3500 } 3501 3502 // TBB is used to indicate the unconditional destination. 3503 TBB = I->getOperand(0).getMBB(); 3504 continue; 3505 } 3506 3507 // Handle conditional branches. 3508 X86::CondCode BranchCode = getCondFromBranchOpc(I->getOpcode()); 3509 if (BranchCode == X86::COND_INVALID) 3510 return true; // Can't handle indirect branch. 3511 3512 // Working from the bottom, handle the first conditional branch. 3513 if (Cond.empty()) { 3514 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); 3515 if (AllowModify && UnCondBrIter != MBB.end() && 3516 MBB.isLayoutSuccessor(TargetBB)) { 3517 // If we can modify the code and it ends in something like: 3518 // 3519 // jCC L1 3520 // jmp L2 3521 // L1: 3522 // ... 3523 // L2: 3524 // 3525 // Then we can change this to: 3526 // 3527 // jnCC L2 3528 // L1: 3529 // ... 3530 // L2: 3531 // 3532 // Which is a bit more efficient. 3533 // We conditionally jump to the fall-through block. 3534 BranchCode = GetOppositeBranchCondition(BranchCode); 3535 unsigned JNCC = GetCondBranchFromCond(BranchCode); 3536 MachineBasicBlock::iterator OldInst = I; 3537 3538 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC)) 3539 .addMBB(UnCondBrIter->getOperand(0).getMBB()); 3540 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) 3541 .addMBB(TargetBB); 3542 3543 OldInst->eraseFromParent(); 3544 UnCondBrIter->eraseFromParent(); 3545 3546 // Restart the analysis. 3547 UnCondBrIter = MBB.end(); 3548 I = MBB.end(); 3549 continue; 3550 } 3551 3552 FBB = TBB; 3553 TBB = I->getOperand(0).getMBB(); 3554 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 3555 continue; 3556 } 3557 3558 // Handle subsequent conditional branches. Only handle the case where all 3559 // conditional branches branch to the same destination and their condition 3560 // opcodes fit one of the special multi-branch idioms. 3561 assert(Cond.size() == 1); 3562 assert(TBB); 3563 3564 // Only handle the case where all conditional branches branch to the same 3565 // destination. 3566 if (TBB != I->getOperand(0).getMBB()) 3567 return true; 3568 3569 // If the conditions are the same, we can leave them alone. 3570 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); 3571 if (OldBranchCode == BranchCode) 3572 continue; 3573 3574 // If they differ, see if they fit one of the known patterns. Theoretically, 3575 // we could handle more patterns here, but we shouldn't expect to see them 3576 // if instruction selection has done a reasonable job. 3577 if ((OldBranchCode == X86::COND_NP && 3578 BranchCode == X86::COND_E) || 3579 (OldBranchCode == X86::COND_E && 3580 BranchCode == X86::COND_NP)) 3581 BranchCode = X86::COND_NP_OR_E; 3582 else if ((OldBranchCode == X86::COND_P && 3583 BranchCode == X86::COND_NE) || 3584 (OldBranchCode == X86::COND_NE && 3585 BranchCode == X86::COND_P)) 3586 BranchCode = X86::COND_NE_OR_P; 3587 else 3588 return true; 3589 3590 // Update the MachineOperand. 3591 Cond[0].setImm(BranchCode); 3592 } 3593 3594 return false; 3595 } 3596 3597 unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 3598 MachineBasicBlock::iterator I = MBB.end(); 3599 unsigned Count = 0; 3600 3601 while (I != MBB.begin()) { 3602 --I; 3603 if (I->isDebugValue()) 3604 continue; 3605 if (I->getOpcode() != X86::JMP_1 && 3606 getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 3607 break; 3608 // Remove the branch. 3609 I->eraseFromParent(); 3610 I = MBB.end(); 3611 ++Count; 3612 } 3613 3614 return Count; 3615 } 3616 3617 unsigned 3618 X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 3619 MachineBasicBlock *FBB, 3620 const SmallVectorImpl<MachineOperand> &Cond, 3621 DebugLoc DL) const { 3622 // Shouldn't be a fall through. 3623 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 3624 assert((Cond.size() == 1 || Cond.size() == 0) && 3625 "X86 branch conditions have one component!"); 3626 3627 if (Cond.empty()) { 3628 // Unconditional branch? 3629 assert(!FBB && "Unconditional branch with multiple successors!"); 3630 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); 3631 return 1; 3632 } 3633 3634 // Conditional branch. 3635 unsigned Count = 0; 3636 X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); 3637 switch (CC) { 3638 case X86::COND_NP_OR_E: 3639 // Synthesize NP_OR_E with two branches. 3640 BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB); 3641 ++Count; 3642 BuildMI(&MBB, DL, get(X86::JE_1)).addMBB(TBB); 3643 ++Count; 3644 break; 3645 case X86::COND_NE_OR_P: 3646 // Synthesize NE_OR_P with two branches. 3647 BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB); 3648 ++Count; 3649 BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB); 3650 ++Count; 3651 break; 3652 default: { 3653 unsigned Opc = GetCondBranchFromCond(CC); 3654 BuildMI(&MBB, DL, get(Opc)).addMBB(TBB); 3655 ++Count; 3656 } 3657 } 3658 if (FBB) { 3659 // Two-way Conditional branch. Insert the second branch. 3660 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); 3661 ++Count; 3662 } 3663 return Count; 3664 } 3665 3666 bool X86InstrInfo:: 3667 canInsertSelect(const MachineBasicBlock &MBB, 3668 const SmallVectorImpl<MachineOperand> &Cond, 3669 unsigned TrueReg, unsigned FalseReg, 3670 int &CondCycles, int &TrueCycles, int &FalseCycles) const { 3671 // Not all subtargets have cmov instructions. 3672 if (!Subtarget.hasCMov()) 3673 return false; 3674 if (Cond.size() != 1) 3675 return false; 3676 // We cannot do the composite conditions, at least not in SSA form. 3677 if ((X86::CondCode)Cond[0].getImm() > X86::COND_S) 3678 return false; 3679 3680 // Check register classes. 3681 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3682 const TargetRegisterClass *RC = 3683 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 3684 if (!RC) 3685 return false; 3686 3687 // We have cmov instructions for 16, 32, and 64 bit general purpose registers. 3688 if (X86::GR16RegClass.hasSubClassEq(RC) || 3689 X86::GR32RegClass.hasSubClassEq(RC) || 3690 X86::GR64RegClass.hasSubClassEq(RC)) { 3691 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy 3692 // Bridge. Probably Ivy Bridge as well. 3693 CondCycles = 2; 3694 TrueCycles = 2; 3695 FalseCycles = 2; 3696 return true; 3697 } 3698 3699 // Can't do vectors. 3700 return false; 3701 } 3702 3703 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, 3704 MachineBasicBlock::iterator I, DebugLoc DL, 3705 unsigned DstReg, 3706 const SmallVectorImpl<MachineOperand> &Cond, 3707 unsigned TrueReg, unsigned FalseReg) const { 3708 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3709 assert(Cond.size() == 1 && "Invalid Cond array"); 3710 unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(), 3711 MRI.getRegClass(DstReg)->getSize(), 3712 false/*HasMemoryOperand*/); 3713 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg); 3714 } 3715 3716 /// Test if the given register is a physical h register. 3717 static bool isHReg(unsigned Reg) { 3718 return X86::GR8_ABCD_HRegClass.contains(Reg); 3719 } 3720 3721 // Try and copy between VR128/VR64 and GR64 registers. 3722 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, 3723 const X86Subtarget &Subtarget) { 3724 3725 // SrcReg(VR128) -> DestReg(GR64) 3726 // SrcReg(VR64) -> DestReg(GR64) 3727 // SrcReg(GR64) -> DestReg(VR128) 3728 // SrcReg(GR64) -> DestReg(VR64) 3729 3730 bool HasAVX = Subtarget.hasAVX(); 3731 bool HasAVX512 = Subtarget.hasAVX512(); 3732 if (X86::GR64RegClass.contains(DestReg)) { 3733 if (X86::VR128XRegClass.contains(SrcReg)) 3734 // Copy from a VR128 register to a GR64 register. 3735 return HasAVX512 ? X86::VMOVPQIto64Zrr: (HasAVX ? X86::VMOVPQIto64rr : 3736 X86::MOVPQIto64rr); 3737 if (X86::VR64RegClass.contains(SrcReg)) 3738 // Copy from a VR64 register to a GR64 register. 3739 return X86::MOVSDto64rr; 3740 } else if (X86::GR64RegClass.contains(SrcReg)) { 3741 // Copy from a GR64 register to a VR128 register. 3742 if (X86::VR128XRegClass.contains(DestReg)) 3743 return HasAVX512 ? X86::VMOV64toPQIZrr: (HasAVX ? X86::VMOV64toPQIrr : 3744 X86::MOV64toPQIrr); 3745 // Copy from a GR64 register to a VR64 register. 3746 if (X86::VR64RegClass.contains(DestReg)) 3747 return X86::MOV64toSDrr; 3748 } 3749 3750 // SrcReg(FR32) -> DestReg(GR32) 3751 // SrcReg(GR32) -> DestReg(FR32) 3752 3753 if (X86::GR32RegClass.contains(DestReg) && X86::FR32XRegClass.contains(SrcReg)) 3754 // Copy from a FR32 register to a GR32 register. 3755 return HasAVX512 ? X86::VMOVSS2DIZrr : (HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr); 3756 3757 if (X86::FR32XRegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg)) 3758 // Copy from a GR32 register to a FR32 register. 3759 return HasAVX512 ? X86::VMOVDI2SSZrr : (HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr); 3760 return 0; 3761 } 3762 3763 inline static bool MaskRegClassContains(unsigned Reg) { 3764 return X86::VK8RegClass.contains(Reg) || 3765 X86::VK16RegClass.contains(Reg) || 3766 X86::VK32RegClass.contains(Reg) || 3767 X86::VK64RegClass.contains(Reg) || 3768 X86::VK1RegClass.contains(Reg); 3769 } 3770 static 3771 unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg) { 3772 if (X86::VR128XRegClass.contains(DestReg, SrcReg) || 3773 X86::VR256XRegClass.contains(DestReg, SrcReg) || 3774 X86::VR512RegClass.contains(DestReg, SrcReg)) { 3775 DestReg = get512BitSuperRegister(DestReg); 3776 SrcReg = get512BitSuperRegister(SrcReg); 3777 return X86::VMOVAPSZrr; 3778 } 3779 if (MaskRegClassContains(DestReg) && 3780 MaskRegClassContains(SrcReg)) 3781 return X86::KMOVWkk; 3782 if (MaskRegClassContains(DestReg) && 3783 (X86::GR32RegClass.contains(SrcReg) || 3784 X86::GR16RegClass.contains(SrcReg) || 3785 X86::GR8RegClass.contains(SrcReg))) { 3786 SrcReg = getX86SubSuperRegister(SrcReg, MVT::i32); 3787 return X86::KMOVWkr; 3788 } 3789 if ((X86::GR32RegClass.contains(DestReg) || 3790 X86::GR16RegClass.contains(DestReg) || 3791 X86::GR8RegClass.contains(DestReg)) && 3792 MaskRegClassContains(SrcReg)) { 3793 DestReg = getX86SubSuperRegister(DestReg, MVT::i32); 3794 return X86::KMOVWrk; 3795 } 3796 return 0; 3797 } 3798 3799 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 3800 MachineBasicBlock::iterator MI, DebugLoc DL, 3801 unsigned DestReg, unsigned SrcReg, 3802 bool KillSrc) const { 3803 // First deal with the normal symmetric copies. 3804 bool HasAVX = Subtarget.hasAVX(); 3805 bool HasAVX512 = Subtarget.hasAVX512(); 3806 unsigned Opc = 0; 3807 if (X86::GR64RegClass.contains(DestReg, SrcReg)) 3808 Opc = X86::MOV64rr; 3809 else if (X86::GR32RegClass.contains(DestReg, SrcReg)) 3810 Opc = X86::MOV32rr; 3811 else if (X86::GR16RegClass.contains(DestReg, SrcReg)) 3812 Opc = X86::MOV16rr; 3813 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { 3814 // Copying to or from a physical H register on x86-64 requires a NOREX 3815 // move. Otherwise use a normal move. 3816 if ((isHReg(DestReg) || isHReg(SrcReg)) && 3817 Subtarget.is64Bit()) { 3818 Opc = X86::MOV8rr_NOREX; 3819 // Both operands must be encodable without an REX prefix. 3820 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && 3821 "8-bit H register can not be copied outside GR8_NOREX"); 3822 } else 3823 Opc = X86::MOV8rr; 3824 } 3825 else if (X86::VR64RegClass.contains(DestReg, SrcReg)) 3826 Opc = X86::MMX_MOVQ64rr; 3827 else if (HasAVX512) 3828 Opc = copyPhysRegOpcode_AVX512(DestReg, SrcReg); 3829 else if (X86::VR128RegClass.contains(DestReg, SrcReg)) 3830 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; 3831 else if (X86::VR256RegClass.contains(DestReg, SrcReg)) 3832 Opc = X86::VMOVAPSYrr; 3833 if (!Opc) 3834 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget); 3835 3836 if (Opc) { 3837 BuildMI(MBB, MI, DL, get(Opc), DestReg) 3838 .addReg(SrcReg, getKillRegState(KillSrc)); 3839 return; 3840 } 3841 3842 // Moving EFLAGS to / from another register requires a push and a pop. 3843 // Notice that we have to adjust the stack if we don't want to clobber the 3844 // first frame index. See X86FrameLowering.cpp - clobbersTheStack. 3845 if (SrcReg == X86::EFLAGS) { 3846 if (X86::GR64RegClass.contains(DestReg)) { 3847 BuildMI(MBB, MI, DL, get(X86::PUSHF64)); 3848 BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg); 3849 return; 3850 } 3851 if (X86::GR32RegClass.contains(DestReg)) { 3852 BuildMI(MBB, MI, DL, get(X86::PUSHF32)); 3853 BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg); 3854 return; 3855 } 3856 } 3857 if (DestReg == X86::EFLAGS) { 3858 if (X86::GR64RegClass.contains(SrcReg)) { 3859 BuildMI(MBB, MI, DL, get(X86::PUSH64r)) 3860 .addReg(SrcReg, getKillRegState(KillSrc)); 3861 BuildMI(MBB, MI, DL, get(X86::POPF64)); 3862 return; 3863 } 3864 if (X86::GR32RegClass.contains(SrcReg)) { 3865 BuildMI(MBB, MI, DL, get(X86::PUSH32r)) 3866 .addReg(SrcReg, getKillRegState(KillSrc)); 3867 BuildMI(MBB, MI, DL, get(X86::POPF32)); 3868 return; 3869 } 3870 } 3871 3872 DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) 3873 << " to " << RI.getName(DestReg) << '\n'); 3874 llvm_unreachable("Cannot emit physreg copy instruction"); 3875 } 3876 3877 static unsigned getLoadStoreRegOpcode(unsigned Reg, 3878 const TargetRegisterClass *RC, 3879 bool isStackAligned, 3880 const X86Subtarget &STI, 3881 bool load) { 3882 if (STI.hasAVX512()) { 3883 if (X86::VK8RegClass.hasSubClassEq(RC) || 3884 X86::VK16RegClass.hasSubClassEq(RC)) 3885 return load ? X86::KMOVWkm : X86::KMOVWmk; 3886 if (RC->getSize() == 4 && X86::FR32XRegClass.hasSubClassEq(RC)) 3887 return load ? X86::VMOVSSZrm : X86::VMOVSSZmr; 3888 if (RC->getSize() == 8 && X86::FR64XRegClass.hasSubClassEq(RC)) 3889 return load ? X86::VMOVSDZrm : X86::VMOVSDZmr; 3890 if (X86::VR512RegClass.hasSubClassEq(RC)) 3891 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 3892 } 3893 3894 bool HasAVX = STI.hasAVX(); 3895 switch (RC->getSize()) { 3896 default: 3897 llvm_unreachable("Unknown spill size"); 3898 case 1: 3899 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass"); 3900 if (STI.is64Bit()) 3901 // Copying to or from a physical H register on x86-64 requires a NOREX 3902 // move. Otherwise use a normal move. 3903 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) 3904 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; 3905 return load ? X86::MOV8rm : X86::MOV8mr; 3906 case 2: 3907 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); 3908 return load ? X86::MOV16rm : X86::MOV16mr; 3909 case 4: 3910 if (X86::GR32RegClass.hasSubClassEq(RC)) 3911 return load ? X86::MOV32rm : X86::MOV32mr; 3912 if (X86::FR32RegClass.hasSubClassEq(RC)) 3913 return load ? 3914 (HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) : 3915 (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr); 3916 if (X86::RFP32RegClass.hasSubClassEq(RC)) 3917 return load ? X86::LD_Fp32m : X86::ST_Fp32m; 3918 llvm_unreachable("Unknown 4-byte regclass"); 3919 case 8: 3920 if (X86::GR64RegClass.hasSubClassEq(RC)) 3921 return load ? X86::MOV64rm : X86::MOV64mr; 3922 if (X86::FR64RegClass.hasSubClassEq(RC)) 3923 return load ? 3924 (HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) : 3925 (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr); 3926 if (X86::VR64RegClass.hasSubClassEq(RC)) 3927 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; 3928 if (X86::RFP64RegClass.hasSubClassEq(RC)) 3929 return load ? X86::LD_Fp64m : X86::ST_Fp64m; 3930 llvm_unreachable("Unknown 8-byte regclass"); 3931 case 10: 3932 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); 3933 return load ? X86::LD_Fp80m : X86::ST_FpP80m; 3934 case 16: { 3935 assert((X86::VR128RegClass.hasSubClassEq(RC) || 3936 X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass"); 3937 // If stack is realigned we can use aligned stores. 3938 if (isStackAligned) 3939 return load ? 3940 (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) : 3941 (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr); 3942 else 3943 return load ? 3944 (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) : 3945 (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); 3946 } 3947 case 32: 3948 assert((X86::VR256RegClass.hasSubClassEq(RC) || 3949 X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass"); 3950 // If stack is realigned we can use aligned stores. 3951 if (isStackAligned) 3952 return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr; 3953 else 3954 return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr; 3955 case 64: 3956 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); 3957 if (isStackAligned) 3958 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; 3959 else 3960 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 3961 } 3962 } 3963 3964 static unsigned getStoreRegOpcode(unsigned SrcReg, 3965 const TargetRegisterClass *RC, 3966 bool isStackAligned, 3967 const X86Subtarget &STI) { 3968 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false); 3969 } 3970 3971 3972 static unsigned getLoadRegOpcode(unsigned DestReg, 3973 const TargetRegisterClass *RC, 3974 bool isStackAligned, 3975 const X86Subtarget &STI) { 3976 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true); 3977 } 3978 3979 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 3980 MachineBasicBlock::iterator MI, 3981 unsigned SrcReg, bool isKill, int FrameIdx, 3982 const TargetRegisterClass *RC, 3983 const TargetRegisterInfo *TRI) const { 3984 const MachineFunction &MF = *MBB.getParent(); 3985 assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() && 3986 "Stack slot too small for store"); 3987 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 3988 bool isAligned = 3989 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 3990 RI.canRealignStack(MF); 3991 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 3992 DebugLoc DL = MBB.findDebugLoc(MI); 3993 addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx) 3994 .addReg(SrcReg, getKillRegState(isKill)); 3995 } 3996 3997 void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 3998 bool isKill, 3999 SmallVectorImpl<MachineOperand> &Addr, 4000 const TargetRegisterClass *RC, 4001 MachineInstr::mmo_iterator MMOBegin, 4002 MachineInstr::mmo_iterator MMOEnd, 4003 SmallVectorImpl<MachineInstr*> &NewMIs) const { 4004 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4005 bool isAligned = MMOBegin != MMOEnd && 4006 (*MMOBegin)->getAlignment() >= Alignment; 4007 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); 4008 DebugLoc DL; 4009 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); 4010 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 4011 MIB.addOperand(Addr[i]); 4012 MIB.addReg(SrcReg, getKillRegState(isKill)); 4013 (*MIB).setMemRefs(MMOBegin, MMOEnd); 4014 NewMIs.push_back(MIB); 4015 } 4016 4017 4018 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 4019 MachineBasicBlock::iterator MI, 4020 unsigned DestReg, int FrameIdx, 4021 const TargetRegisterClass *RC, 4022 const TargetRegisterInfo *TRI) const { 4023 const MachineFunction &MF = *MBB.getParent(); 4024 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4025 bool isAligned = 4026 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || 4027 RI.canRealignStack(MF); 4028 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 4029 DebugLoc DL = MBB.findDebugLoc(MI); 4030 addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx); 4031 } 4032 4033 void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 4034 SmallVectorImpl<MachineOperand> &Addr, 4035 const TargetRegisterClass *RC, 4036 MachineInstr::mmo_iterator MMOBegin, 4037 MachineInstr::mmo_iterator MMOEnd, 4038 SmallVectorImpl<MachineInstr*> &NewMIs) const { 4039 unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16); 4040 bool isAligned = MMOBegin != MMOEnd && 4041 (*MMOBegin)->getAlignment() >= Alignment; 4042 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); 4043 DebugLoc DL; 4044 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 4045 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 4046 MIB.addOperand(Addr[i]); 4047 (*MIB).setMemRefs(MMOBegin, MMOEnd); 4048 NewMIs.push_back(MIB); 4049 } 4050 4051 bool X86InstrInfo:: 4052 analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2, 4053 int &CmpMask, int &CmpValue) const { 4054 switch (MI->getOpcode()) { 4055 default: break; 4056 case X86::CMP64ri32: 4057 case X86::CMP64ri8: 4058 case X86::CMP32ri: 4059 case X86::CMP32ri8: 4060 case X86::CMP16ri: 4061 case X86::CMP16ri8: 4062 case X86::CMP8ri: 4063 SrcReg = MI->getOperand(0).getReg(); 4064 SrcReg2 = 0; 4065 CmpMask = ~0; 4066 CmpValue = MI->getOperand(1).getImm(); 4067 return true; 4068 // A SUB can be used to perform comparison. 4069 case X86::SUB64rm: 4070 case X86::SUB32rm: 4071 case X86::SUB16rm: 4072 case X86::SUB8rm: 4073 SrcReg = MI->getOperand(1).getReg(); 4074 SrcReg2 = 0; 4075 CmpMask = ~0; 4076 CmpValue = 0; 4077 return true; 4078 case X86::SUB64rr: 4079 case X86::SUB32rr: 4080 case X86::SUB16rr: 4081 case X86::SUB8rr: 4082 SrcReg = MI->getOperand(1).getReg(); 4083 SrcReg2 = MI->getOperand(2).getReg(); 4084 CmpMask = ~0; 4085 CmpValue = 0; 4086 return true; 4087 case X86::SUB64ri32: 4088 case X86::SUB64ri8: 4089 case X86::SUB32ri: 4090 case X86::SUB32ri8: 4091 case X86::SUB16ri: 4092 case X86::SUB16ri8: 4093 case X86::SUB8ri: 4094 SrcReg = MI->getOperand(1).getReg(); 4095 SrcReg2 = 0; 4096 CmpMask = ~0; 4097 CmpValue = MI->getOperand(2).getImm(); 4098 return true; 4099 case X86::CMP64rr: 4100 case X86::CMP32rr: 4101 case X86::CMP16rr: 4102 case X86::CMP8rr: 4103 SrcReg = MI->getOperand(0).getReg(); 4104 SrcReg2 = MI->getOperand(1).getReg(); 4105 CmpMask = ~0; 4106 CmpValue = 0; 4107 return true; 4108 case X86::TEST8rr: 4109 case X86::TEST16rr: 4110 case X86::TEST32rr: 4111 case X86::TEST64rr: 4112 SrcReg = MI->getOperand(0).getReg(); 4113 if (MI->getOperand(1).getReg() != SrcReg) return false; 4114 // Compare against zero. 4115 SrcReg2 = 0; 4116 CmpMask = ~0; 4117 CmpValue = 0; 4118 return true; 4119 } 4120 return false; 4121 } 4122 4123 /// Check whether the first instruction, whose only 4124 /// purpose is to update flags, can be made redundant. 4125 /// CMPrr can be made redundant by SUBrr if the operands are the same. 4126 /// This function can be extended later on. 4127 /// SrcReg, SrcRegs: register operands for FlagI. 4128 /// ImmValue: immediate for FlagI if it takes an immediate. 4129 inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg, 4130 unsigned SrcReg2, int ImmValue, 4131 MachineInstr *OI) { 4132 if (((FlagI->getOpcode() == X86::CMP64rr && 4133 OI->getOpcode() == X86::SUB64rr) || 4134 (FlagI->getOpcode() == X86::CMP32rr && 4135 OI->getOpcode() == X86::SUB32rr)|| 4136 (FlagI->getOpcode() == X86::CMP16rr && 4137 OI->getOpcode() == X86::SUB16rr)|| 4138 (FlagI->getOpcode() == X86::CMP8rr && 4139 OI->getOpcode() == X86::SUB8rr)) && 4140 ((OI->getOperand(1).getReg() == SrcReg && 4141 OI->getOperand(2).getReg() == SrcReg2) || 4142 (OI->getOperand(1).getReg() == SrcReg2 && 4143 OI->getOperand(2).getReg() == SrcReg))) 4144 return true; 4145 4146 if (((FlagI->getOpcode() == X86::CMP64ri32 && 4147 OI->getOpcode() == X86::SUB64ri32) || 4148 (FlagI->getOpcode() == X86::CMP64ri8 && 4149 OI->getOpcode() == X86::SUB64ri8) || 4150 (FlagI->getOpcode() == X86::CMP32ri && 4151 OI->getOpcode() == X86::SUB32ri) || 4152 (FlagI->getOpcode() == X86::CMP32ri8 && 4153 OI->getOpcode() == X86::SUB32ri8) || 4154 (FlagI->getOpcode() == X86::CMP16ri && 4155 OI->getOpcode() == X86::SUB16ri) || 4156 (FlagI->getOpcode() == X86::CMP16ri8 && 4157 OI->getOpcode() == X86::SUB16ri8) || 4158 (FlagI->getOpcode() == X86::CMP8ri && 4159 OI->getOpcode() == X86::SUB8ri)) && 4160 OI->getOperand(1).getReg() == SrcReg && 4161 OI->getOperand(2).getImm() == ImmValue) 4162 return true; 4163 return false; 4164 } 4165 4166 /// Check whether the definition can be converted 4167 /// to remove a comparison against zero. 4168 inline static bool isDefConvertible(MachineInstr *MI) { 4169 switch (MI->getOpcode()) { 4170 default: return false; 4171 4172 // The shift instructions only modify ZF if their shift count is non-zero. 4173 // N.B.: The processor truncates the shift count depending on the encoding. 4174 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri: 4175 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri: 4176 return getTruncatedShiftCount(MI, 2) != 0; 4177 4178 // Some left shift instructions can be turned into LEA instructions but only 4179 // if their flags aren't used. Avoid transforming such instructions. 4180 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{ 4181 unsigned ShAmt = getTruncatedShiftCount(MI, 2); 4182 if (isTruncatedShiftCountForLEA(ShAmt)) return false; 4183 return ShAmt != 0; 4184 } 4185 4186 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8: 4187 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8: 4188 return getTruncatedShiftCount(MI, 3) != 0; 4189 4190 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: 4191 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: 4192 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: 4193 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: 4194 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: 4195 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: 4196 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: 4197 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: 4198 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: 4199 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: 4200 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: 4201 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: 4202 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: 4203 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: 4204 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: 4205 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: 4206 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: 4207 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: 4208 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: 4209 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: 4210 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: 4211 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: 4212 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: 4213 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: 4214 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: 4215 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: 4216 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: 4217 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: 4218 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1: 4219 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1: 4220 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1: 4221 case X86::ADC32ri: case X86::ADC32ri8: 4222 case X86::ADC32rr: case X86::ADC64ri32: 4223 case X86::ADC64ri8: case X86::ADC64rr: 4224 case X86::SBB32ri: case X86::SBB32ri8: 4225 case X86::SBB32rr: case X86::SBB64ri32: 4226 case X86::SBB64ri8: case X86::SBB64rr: 4227 case X86::ANDN32rr: case X86::ANDN32rm: 4228 case X86::ANDN64rr: case X86::ANDN64rm: 4229 case X86::BEXTR32rr: case X86::BEXTR64rr: 4230 case X86::BEXTR32rm: case X86::BEXTR64rm: 4231 case X86::BLSI32rr: case X86::BLSI32rm: 4232 case X86::BLSI64rr: case X86::BLSI64rm: 4233 case X86::BLSMSK32rr:case X86::BLSMSK32rm: 4234 case X86::BLSMSK64rr:case X86::BLSMSK64rm: 4235 case X86::BLSR32rr: case X86::BLSR32rm: 4236 case X86::BLSR64rr: case X86::BLSR64rm: 4237 case X86::BZHI32rr: case X86::BZHI32rm: 4238 case X86::BZHI64rr: case X86::BZHI64rm: 4239 case X86::LZCNT16rr: case X86::LZCNT16rm: 4240 case X86::LZCNT32rr: case X86::LZCNT32rm: 4241 case X86::LZCNT64rr: case X86::LZCNT64rm: 4242 case X86::POPCNT16rr:case X86::POPCNT16rm: 4243 case X86::POPCNT32rr:case X86::POPCNT32rm: 4244 case X86::POPCNT64rr:case X86::POPCNT64rm: 4245 case X86::TZCNT16rr: case X86::TZCNT16rm: 4246 case X86::TZCNT32rr: case X86::TZCNT32rm: 4247 case X86::TZCNT64rr: case X86::TZCNT64rm: 4248 return true; 4249 } 4250 } 4251 4252 /// Check whether the use can be converted to remove a comparison against zero. 4253 static X86::CondCode isUseDefConvertible(MachineInstr *MI) { 4254 switch (MI->getOpcode()) { 4255 default: return X86::COND_INVALID; 4256 case X86::LZCNT16rr: case X86::LZCNT16rm: 4257 case X86::LZCNT32rr: case X86::LZCNT32rm: 4258 case X86::LZCNT64rr: case X86::LZCNT64rm: 4259 return X86::COND_B; 4260 case X86::POPCNT16rr:case X86::POPCNT16rm: 4261 case X86::POPCNT32rr:case X86::POPCNT32rm: 4262 case X86::POPCNT64rr:case X86::POPCNT64rm: 4263 return X86::COND_E; 4264 case X86::TZCNT16rr: case X86::TZCNT16rm: 4265 case X86::TZCNT32rr: case X86::TZCNT32rm: 4266 case X86::TZCNT64rr: case X86::TZCNT64rm: 4267 return X86::COND_B; 4268 } 4269 } 4270 4271 /// Check if there exists an earlier instruction that 4272 /// operates on the same source operands and sets flags in the same way as 4273 /// Compare; remove Compare if possible. 4274 bool X86InstrInfo:: 4275 optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, 4276 int CmpMask, int CmpValue, 4277 const MachineRegisterInfo *MRI) const { 4278 // Check whether we can replace SUB with CMP. 4279 unsigned NewOpcode = 0; 4280 switch (CmpInstr->getOpcode()) { 4281 default: break; 4282 case X86::SUB64ri32: 4283 case X86::SUB64ri8: 4284 case X86::SUB32ri: 4285 case X86::SUB32ri8: 4286 case X86::SUB16ri: 4287 case X86::SUB16ri8: 4288 case X86::SUB8ri: 4289 case X86::SUB64rm: 4290 case X86::SUB32rm: 4291 case X86::SUB16rm: 4292 case X86::SUB8rm: 4293 case X86::SUB64rr: 4294 case X86::SUB32rr: 4295 case X86::SUB16rr: 4296 case X86::SUB8rr: { 4297 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg())) 4298 return false; 4299 // There is no use of the destination register, we can replace SUB with CMP. 4300 switch (CmpInstr->getOpcode()) { 4301 default: llvm_unreachable("Unreachable!"); 4302 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; 4303 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; 4304 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; 4305 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break; 4306 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break; 4307 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break; 4308 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break; 4309 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break; 4310 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break; 4311 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break; 4312 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break; 4313 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break; 4314 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break; 4315 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break; 4316 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break; 4317 } 4318 CmpInstr->setDesc(get(NewOpcode)); 4319 CmpInstr->RemoveOperand(0); 4320 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri. 4321 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm || 4322 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm) 4323 return false; 4324 } 4325 } 4326 4327 // Get the unique definition of SrcReg. 4328 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 4329 if (!MI) return false; 4330 4331 // CmpInstr is the first instruction of the BB. 4332 MachineBasicBlock::iterator I = CmpInstr, Def = MI; 4333 4334 // If we are comparing against zero, check whether we can use MI to update 4335 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize. 4336 bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0); 4337 if (IsCmpZero && MI->getParent() != CmpInstr->getParent()) 4338 return false; 4339 4340 // If we have a use of the source register between the def and our compare 4341 // instruction we can eliminate the compare iff the use sets EFLAGS in the 4342 // right way. 4343 bool ShouldUpdateCC = false; 4344 X86::CondCode NewCC = X86::COND_INVALID; 4345 if (IsCmpZero && !isDefConvertible(MI)) { 4346 // Scan forward from the use until we hit the use we're looking for or the 4347 // compare instruction. 4348 for (MachineBasicBlock::iterator J = MI;; ++J) { 4349 // Do we have a convertible instruction? 4350 NewCC = isUseDefConvertible(J); 4351 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() && 4352 J->getOperand(1).getReg() == SrcReg) { 4353 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!"); 4354 ShouldUpdateCC = true; // Update CC later on. 4355 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going 4356 // with the new def. 4357 MI = Def = J; 4358 break; 4359 } 4360 4361 if (J == I) 4362 return false; 4363 } 4364 } 4365 4366 // We are searching for an earlier instruction that can make CmpInstr 4367 // redundant and that instruction will be saved in Sub. 4368 MachineInstr *Sub = nullptr; 4369 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4370 4371 // We iterate backward, starting from the instruction before CmpInstr and 4372 // stop when reaching the definition of a source register or done with the BB. 4373 // RI points to the instruction before CmpInstr. 4374 // If the definition is in this basic block, RE points to the definition; 4375 // otherwise, RE is the rend of the basic block. 4376 MachineBasicBlock::reverse_iterator 4377 RI = MachineBasicBlock::reverse_iterator(I), 4378 RE = CmpInstr->getParent() == MI->getParent() ? 4379 MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ : 4380 CmpInstr->getParent()->rend(); 4381 MachineInstr *Movr0Inst = nullptr; 4382 for (; RI != RE; ++RI) { 4383 MachineInstr *Instr = &*RI; 4384 // Check whether CmpInstr can be made redundant by the current instruction. 4385 if (!IsCmpZero && 4386 isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) { 4387 Sub = Instr; 4388 break; 4389 } 4390 4391 if (Instr->modifiesRegister(X86::EFLAGS, TRI) || 4392 Instr->readsRegister(X86::EFLAGS, TRI)) { 4393 // This instruction modifies or uses EFLAGS. 4394 4395 // MOV32r0 etc. are implemented with xor which clobbers condition code. 4396 // They are safe to move up, if the definition to EFLAGS is dead and 4397 // earlier instructions do not read or write EFLAGS. 4398 if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 && 4399 Instr->registerDefIsDead(X86::EFLAGS, TRI)) { 4400 Movr0Inst = Instr; 4401 continue; 4402 } 4403 4404 // We can't remove CmpInstr. 4405 return false; 4406 } 4407 } 4408 4409 // Return false if no candidates exist. 4410 if (!IsCmpZero && !Sub) 4411 return false; 4412 4413 bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 4414 Sub->getOperand(2).getReg() == SrcReg); 4415 4416 // Scan forward from the instruction after CmpInstr for uses of EFLAGS. 4417 // It is safe to remove CmpInstr if EFLAGS is redefined or killed. 4418 // If we are done with the basic block, we need to check whether EFLAGS is 4419 // live-out. 4420 bool IsSafe = false; 4421 SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate; 4422 MachineBasicBlock::iterator E = CmpInstr->getParent()->end(); 4423 for (++I; I != E; ++I) { 4424 const MachineInstr &Instr = *I; 4425 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); 4426 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); 4427 // We should check the usage if this instruction uses and updates EFLAGS. 4428 if (!UseEFLAGS && ModifyEFLAGS) { 4429 // It is safe to remove CmpInstr if EFLAGS is updated again. 4430 IsSafe = true; 4431 break; 4432 } 4433 if (!UseEFLAGS && !ModifyEFLAGS) 4434 continue; 4435 4436 // EFLAGS is used by this instruction. 4437 X86::CondCode OldCC = X86::COND_INVALID; 4438 bool OpcIsSET = false; 4439 if (IsCmpZero || IsSwapped) { 4440 // We decode the condition code from opcode. 4441 if (Instr.isBranch()) 4442 OldCC = getCondFromBranchOpc(Instr.getOpcode()); 4443 else { 4444 OldCC = getCondFromSETOpc(Instr.getOpcode()); 4445 if (OldCC != X86::COND_INVALID) 4446 OpcIsSET = true; 4447 else 4448 OldCC = X86::getCondFromCMovOpc(Instr.getOpcode()); 4449 } 4450 if (OldCC == X86::COND_INVALID) return false; 4451 } 4452 if (IsCmpZero) { 4453 switch (OldCC) { 4454 default: break; 4455 case X86::COND_A: case X86::COND_AE: 4456 case X86::COND_B: case X86::COND_BE: 4457 case X86::COND_G: case X86::COND_GE: 4458 case X86::COND_L: case X86::COND_LE: 4459 case X86::COND_O: case X86::COND_NO: 4460 // CF and OF are used, we can't perform this optimization. 4461 return false; 4462 } 4463 4464 // If we're updating the condition code check if we have to reverse the 4465 // condition. 4466 if (ShouldUpdateCC) 4467 switch (OldCC) { 4468 default: 4469 return false; 4470 case X86::COND_E: 4471 break; 4472 case X86::COND_NE: 4473 NewCC = GetOppositeBranchCondition(NewCC); 4474 break; 4475 } 4476 } else if (IsSwapped) { 4477 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs 4478 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 4479 // We swap the condition code and synthesize the new opcode. 4480 NewCC = getSwappedCondition(OldCC); 4481 if (NewCC == X86::COND_INVALID) return false; 4482 } 4483 4484 if ((ShouldUpdateCC || IsSwapped) && NewCC != OldCC) { 4485 // Synthesize the new opcode. 4486 bool HasMemoryOperand = Instr.hasOneMemOperand(); 4487 unsigned NewOpc; 4488 if (Instr.isBranch()) 4489 NewOpc = GetCondBranchFromCond(NewCC); 4490 else if(OpcIsSET) 4491 NewOpc = getSETFromCond(NewCC, HasMemoryOperand); 4492 else { 4493 unsigned DstReg = Instr.getOperand(0).getReg(); 4494 NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(), 4495 HasMemoryOperand); 4496 } 4497 4498 // Push the MachineInstr to OpsToUpdate. 4499 // If it is safe to remove CmpInstr, the condition code of these 4500 // instructions will be modified. 4501 OpsToUpdate.push_back(std::make_pair(&*I, NewOpc)); 4502 } 4503 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { 4504 // It is safe to remove CmpInstr if EFLAGS is updated again or killed. 4505 IsSafe = true; 4506 break; 4507 } 4508 } 4509 4510 // If EFLAGS is not killed nor re-defined, we should check whether it is 4511 // live-out. If it is live-out, do not optimize. 4512 if ((IsCmpZero || IsSwapped) && !IsSafe) { 4513 MachineBasicBlock *MBB = CmpInstr->getParent(); 4514 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 4515 SE = MBB->succ_end(); SI != SE; ++SI) 4516 if ((*SI)->isLiveIn(X86::EFLAGS)) 4517 return false; 4518 } 4519 4520 // The instruction to be updated is either Sub or MI. 4521 Sub = IsCmpZero ? MI : Sub; 4522 // Move Movr0Inst to the appropriate place before Sub. 4523 if (Movr0Inst) { 4524 // Look backwards until we find a def that doesn't use the current EFLAGS. 4525 Def = Sub; 4526 MachineBasicBlock::reverse_iterator 4527 InsertI = MachineBasicBlock::reverse_iterator(++Def), 4528 InsertE = Sub->getParent()->rend(); 4529 for (; InsertI != InsertE; ++InsertI) { 4530 MachineInstr *Instr = &*InsertI; 4531 if (!Instr->readsRegister(X86::EFLAGS, TRI) && 4532 Instr->modifiesRegister(X86::EFLAGS, TRI)) { 4533 Sub->getParent()->remove(Movr0Inst); 4534 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr), 4535 Movr0Inst); 4536 break; 4537 } 4538 } 4539 if (InsertI == InsertE) 4540 return false; 4541 } 4542 4543 // Make sure Sub instruction defines EFLAGS and mark the def live. 4544 unsigned i = 0, e = Sub->getNumOperands(); 4545 for (; i != e; ++i) { 4546 MachineOperand &MO = Sub->getOperand(i); 4547 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) { 4548 MO.setIsDead(false); 4549 break; 4550 } 4551 } 4552 assert(i != e && "Unable to locate a def EFLAGS operand"); 4553 4554 CmpInstr->eraseFromParent(); 4555 4556 // Modify the condition code of instructions in OpsToUpdate. 4557 for (unsigned i = 0, e = OpsToUpdate.size(); i < e; i++) 4558 OpsToUpdate[i].first->setDesc(get(OpsToUpdate[i].second)); 4559 return true; 4560 } 4561 4562 /// Try to remove the load by folding it to a register 4563 /// operand at the use. We fold the load instructions if load defines a virtual 4564 /// register, the virtual register is used once in the same BB, and the 4565 /// instructions in-between do not load or store, and have no side effects. 4566 MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI, 4567 const MachineRegisterInfo *MRI, 4568 unsigned &FoldAsLoadDefReg, 4569 MachineInstr *&DefMI) const { 4570 if (FoldAsLoadDefReg == 0) 4571 return nullptr; 4572 // To be conservative, if there exists another load, clear the load candidate. 4573 if (MI->mayLoad()) { 4574 FoldAsLoadDefReg = 0; 4575 return nullptr; 4576 } 4577 4578 // Check whether we can move DefMI here. 4579 DefMI = MRI->getVRegDef(FoldAsLoadDefReg); 4580 assert(DefMI); 4581 bool SawStore = false; 4582 if (!DefMI->isSafeToMove(this, nullptr, SawStore)) 4583 return nullptr; 4584 4585 // Collect information about virtual register operands of MI. 4586 unsigned SrcOperandId = 0; 4587 bool FoundSrcOperand = false; 4588 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 4589 MachineOperand &MO = MI->getOperand(i); 4590 if (!MO.isReg()) 4591 continue; 4592 unsigned Reg = MO.getReg(); 4593 if (Reg != FoldAsLoadDefReg) 4594 continue; 4595 // Do not fold if we have a subreg use or a def or multiple uses. 4596 if (MO.getSubReg() || MO.isDef() || FoundSrcOperand) 4597 return nullptr; 4598 4599 SrcOperandId = i; 4600 FoundSrcOperand = true; 4601 } 4602 if (!FoundSrcOperand) 4603 return nullptr; 4604 4605 // Check whether we can fold the def into SrcOperandId. 4606 MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI); 4607 if (FoldMI) { 4608 FoldAsLoadDefReg = 0; 4609 return FoldMI; 4610 } 4611 4612 return nullptr; 4613 } 4614 4615 /// Expand a single-def pseudo instruction to a two-addr 4616 /// instruction with two undef reads of the register being defined. 4617 /// This is used for mapping: 4618 /// %xmm4 = V_SET0 4619 /// to: 4620 /// %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef> 4621 /// 4622 static bool Expand2AddrUndef(MachineInstrBuilder &MIB, 4623 const MCInstrDesc &Desc) { 4624 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 4625 unsigned Reg = MIB->getOperand(0).getReg(); 4626 MIB->setDesc(Desc); 4627 4628 // MachineInstr::addOperand() will insert explicit operands before any 4629 // implicit operands. 4630 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 4631 // But we don't trust that. 4632 assert(MIB->getOperand(1).getReg() == Reg && 4633 MIB->getOperand(2).getReg() == Reg && "Misplaced operand"); 4634 return true; 4635 } 4636 4637 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different 4638 // code sequence is needed for other targets. 4639 static void expandLoadStackGuard(MachineInstrBuilder &MIB, 4640 const TargetInstrInfo &TII) { 4641 MachineBasicBlock &MBB = *MIB->getParent(); 4642 DebugLoc DL = MIB->getDebugLoc(); 4643 unsigned Reg = MIB->getOperand(0).getReg(); 4644 const GlobalValue *GV = 4645 cast<GlobalValue>((*MIB->memoperands_begin())->getValue()); 4646 unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant; 4647 MachineMemOperand *MMO = MBB.getParent()-> 4648 getMachineMemOperand(MachinePointerInfo::getGOT(), Flag, 8, 8); 4649 MachineBasicBlock::iterator I = MIB.getInstr(); 4650 4651 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1) 4652 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0) 4653 .addMemOperand(MMO); 4654 MIB->setDebugLoc(DL); 4655 MIB->setDesc(TII.get(X86::MOV64rm)); 4656 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0); 4657 } 4658 4659 bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 4660 bool HasAVX = Subtarget.hasAVX(); 4661 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI); 4662 switch (MI->getOpcode()) { 4663 case X86::MOV32r0: 4664 return Expand2AddrUndef(MIB, get(X86::XOR32rr)); 4665 case X86::SETB_C8r: 4666 return Expand2AddrUndef(MIB, get(X86::SBB8rr)); 4667 case X86::SETB_C16r: 4668 return Expand2AddrUndef(MIB, get(X86::SBB16rr)); 4669 case X86::SETB_C32r: 4670 return Expand2AddrUndef(MIB, get(X86::SBB32rr)); 4671 case X86::SETB_C64r: 4672 return Expand2AddrUndef(MIB, get(X86::SBB64rr)); 4673 case X86::V_SET0: 4674 case X86::FsFLD0SS: 4675 case X86::FsFLD0SD: 4676 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); 4677 case X86::AVX_SET0: 4678 assert(HasAVX && "AVX not supported"); 4679 return Expand2AddrUndef(MIB, get(X86::VXORPSYrr)); 4680 case X86::AVX512_512_SET0: 4681 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr)); 4682 case X86::V_SETALLONES: 4683 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); 4684 case X86::AVX2_SETALLONES: 4685 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr)); 4686 case X86::TEST8ri_NOREX: 4687 MI->setDesc(get(X86::TEST8ri)); 4688 return true; 4689 case X86::KSET0B: 4690 case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr)); 4691 case X86::KSET1B: 4692 case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr)); 4693 case TargetOpcode::LOAD_STACK_GUARD: 4694 expandLoadStackGuard(MIB, *this); 4695 return true; 4696 } 4697 return false; 4698 } 4699 4700 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 4701 ArrayRef<MachineOperand> MOs, 4702 MachineInstr *MI, 4703 const TargetInstrInfo &TII) { 4704 // Create the base instruction with the memory operand as the first part. 4705 // Omit the implicit operands, something BuildMI can't do. 4706 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), 4707 MI->getDebugLoc(), true); 4708 MachineInstrBuilder MIB(MF, NewMI); 4709 unsigned NumAddrOps = MOs.size(); 4710 for (unsigned i = 0; i != NumAddrOps; ++i) 4711 MIB.addOperand(MOs[i]); 4712 if (NumAddrOps < 4) // FrameIndex only 4713 addOffset(MIB, 0); 4714 4715 // Loop over the rest of the ri operands, converting them over. 4716 unsigned NumOps = MI->getDesc().getNumOperands()-2; 4717 for (unsigned i = 0; i != NumOps; ++i) { 4718 MachineOperand &MO = MI->getOperand(i+2); 4719 MIB.addOperand(MO); 4720 } 4721 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) { 4722 MachineOperand &MO = MI->getOperand(i); 4723 MIB.addOperand(MO); 4724 } 4725 return MIB; 4726 } 4727 4728 static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, 4729 unsigned OpNo, ArrayRef<MachineOperand> MOs, 4730 MachineInstr *MI, const TargetInstrInfo &TII) { 4731 // Omit the implicit operands, something BuildMI can't do. 4732 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), 4733 MI->getDebugLoc(), true); 4734 MachineInstrBuilder MIB(MF, NewMI); 4735 4736 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 4737 MachineOperand &MO = MI->getOperand(i); 4738 if (i == OpNo) { 4739 assert(MO.isReg() && "Expected to fold into reg operand!"); 4740 unsigned NumAddrOps = MOs.size(); 4741 for (unsigned i = 0; i != NumAddrOps; ++i) 4742 MIB.addOperand(MOs[i]); 4743 if (NumAddrOps < 4) // FrameIndex only 4744 addOffset(MIB, 0); 4745 } else { 4746 MIB.addOperand(MO); 4747 } 4748 } 4749 return MIB; 4750 } 4751 4752 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 4753 ArrayRef<MachineOperand> MOs, 4754 MachineInstr *MI) { 4755 MachineFunction &MF = *MI->getParent()->getParent(); 4756 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode)); 4757 4758 unsigned NumAddrOps = MOs.size(); 4759 for (unsigned i = 0; i != NumAddrOps; ++i) 4760 MIB.addOperand(MOs[i]); 4761 if (NumAddrOps < 4) // FrameIndex only 4762 addOffset(MIB, 0); 4763 return MIB.addImm(0); 4764 } 4765 4766 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 4767 MachineInstr *MI, 4768 unsigned OpNum, 4769 ArrayRef<MachineOperand> MOs, 4770 unsigned Size, unsigned Align, 4771 bool AllowCommute) const { 4772 const DenseMap<unsigned, 4773 std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr; 4774 bool isCallRegIndirect = Subtarget.callRegIndirect(); 4775 bool isTwoAddrFold = false; 4776 4777 // For CPUs that favor the register form of a call, 4778 // do not fold loads into calls. 4779 if (isCallRegIndirect && 4780 (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r)) 4781 return nullptr; 4782 4783 unsigned NumOps = MI->getDesc().getNumOperands(); 4784 bool isTwoAddr = NumOps > 1 && 4785 MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 4786 4787 // FIXME: AsmPrinter doesn't know how to handle 4788 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 4789 if (MI->getOpcode() == X86::ADD32ri && 4790 MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 4791 return nullptr; 4792 4793 MachineInstr *NewMI = nullptr; 4794 // Folding a memory location into the two-address part of a two-address 4795 // instruction is different than folding it other places. It requires 4796 // replacing the *two* registers with the memory location. 4797 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && 4798 MI->getOperand(0).isReg() && 4799 MI->getOperand(1).isReg() && 4800 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 4801 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 4802 isTwoAddrFold = true; 4803 } else if (OpNum == 0) { 4804 if (MI->getOpcode() == X86::MOV32r0) { 4805 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); 4806 if (NewMI) 4807 return NewMI; 4808 } 4809 4810 OpcodeTablePtr = &RegOp2MemOpTable0; 4811 } else if (OpNum == 1) { 4812 OpcodeTablePtr = &RegOp2MemOpTable1; 4813 } else if (OpNum == 2) { 4814 OpcodeTablePtr = &RegOp2MemOpTable2; 4815 } else if (OpNum == 3) { 4816 OpcodeTablePtr = &RegOp2MemOpTable3; 4817 } else if (OpNum == 4) { 4818 OpcodeTablePtr = &RegOp2MemOpTable4; 4819 } 4820 4821 // If table selected... 4822 if (OpcodeTablePtr) { 4823 // Find the Opcode to fuse 4824 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 4825 OpcodeTablePtr->find(MI->getOpcode()); 4826 if (I != OpcodeTablePtr->end()) { 4827 unsigned Opcode = I->second.first; 4828 unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; 4829 if (Align < MinAlign) 4830 return nullptr; 4831 bool NarrowToMOV32rm = false; 4832 if (Size) { 4833 unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize(); 4834 if (Size < RCSize) { 4835 // Check if it's safe to fold the load. If the size of the object is 4836 // narrower than the load width, then it's not. 4837 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) 4838 return nullptr; 4839 // If this is a 64-bit load, but the spill slot is 32, then we can do 4840 // a 32-bit load which is implicitly zero-extended. This likely is 4841 // due to live interval analysis remat'ing a load from stack slot. 4842 if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg()) 4843 return nullptr; 4844 Opcode = X86::MOV32rm; 4845 NarrowToMOV32rm = true; 4846 } 4847 } 4848 4849 if (isTwoAddrFold) 4850 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this); 4851 else 4852 NewMI = FuseInst(MF, Opcode, OpNum, MOs, MI, *this); 4853 4854 if (NarrowToMOV32rm) { 4855 // If this is the special case where we use a MOV32rm to load a 32-bit 4856 // value and zero-extend the top bits. Change the destination register 4857 // to a 32-bit one. 4858 unsigned DstReg = NewMI->getOperand(0).getReg(); 4859 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 4860 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); 4861 else 4862 NewMI->getOperand(0).setSubReg(X86::sub_32bit); 4863 } 4864 return NewMI; 4865 } 4866 } 4867 4868 // If the instruction and target operand are commutable, commute the 4869 // instruction and try again. 4870 if (AllowCommute) { 4871 unsigned OriginalOpIdx = OpNum, CommuteOpIdx1, CommuteOpIdx2; 4872 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { 4873 bool HasDef = MI->getDesc().getNumDefs(); 4874 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; 4875 unsigned Reg1 = MI->getOperand(CommuteOpIdx1).getReg(); 4876 unsigned Reg2 = MI->getOperand(CommuteOpIdx2).getReg(); 4877 bool Tied0 = 4878 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); 4879 bool Tied1 = 4880 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO); 4881 4882 // If either of the commutable operands are tied to the destination 4883 // then we can not commute + fold. 4884 if ((HasDef && Reg0 == Reg1 && Tied0) || 4885 (HasDef && Reg0 == Reg2 && Tied1)) 4886 return nullptr; 4887 4888 if ((CommuteOpIdx1 == OriginalOpIdx) || 4889 (CommuteOpIdx2 == OriginalOpIdx)) { 4890 MachineInstr *CommutedMI = commuteInstruction(MI, false); 4891 if (!CommutedMI) { 4892 // Unable to commute. 4893 return nullptr; 4894 } 4895 if (CommutedMI != MI) { 4896 // New instruction. We can't fold from this. 4897 CommutedMI->eraseFromParent(); 4898 return nullptr; 4899 } 4900 4901 // Attempt to fold with the commuted version of the instruction. 4902 unsigned CommuteOp = 4903 (CommuteOpIdx1 == OriginalOpIdx ? CommuteOpIdx2 : CommuteOpIdx1); 4904 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, Size, Align, 4905 /*AllowCommute=*/false); 4906 if (NewMI) 4907 return NewMI; 4908 4909 // Folding failed again - undo the commute before returning. 4910 MachineInstr *UncommutedMI = commuteInstruction(MI, false); 4911 if (!UncommutedMI) { 4912 // Unable to commute. 4913 return nullptr; 4914 } 4915 if (UncommutedMI != MI) { 4916 // New instruction. It doesn't need to be kept. 4917 UncommutedMI->eraseFromParent(); 4918 return nullptr; 4919 } 4920 4921 // Return here to prevent duplicate fuse failure report. 4922 return nullptr; 4923 } 4924 } 4925 } 4926 4927 // No fusion 4928 if (PrintFailedFusing && !MI->isCopy()) 4929 dbgs() << "We failed to fuse operand " << OpNum << " in " << *MI; 4930 return nullptr; 4931 } 4932 4933 /// Return true for all instructions that only update 4934 /// the first 32 or 64-bits of the destination register and leave the rest 4935 /// unmodified. This can be used to avoid folding loads if the instructions 4936 /// only update part of the destination register, and the non-updated part is 4937 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these 4938 /// instructions breaks the partial register dependency and it can improve 4939 /// performance. e.g.: 4940 /// 4941 /// movss (%rdi), %xmm0 4942 /// cvtss2sd %xmm0, %xmm0 4943 /// 4944 /// Instead of 4945 /// cvtss2sd (%rdi), %xmm0 4946 /// 4947 /// FIXME: This should be turned into a TSFlags. 4948 /// 4949 static bool hasPartialRegUpdate(unsigned Opcode) { 4950 switch (Opcode) { 4951 case X86::CVTSI2SSrr: 4952 case X86::CVTSI2SSrm: 4953 case X86::CVTSI2SS64rr: 4954 case X86::CVTSI2SS64rm: 4955 case X86::CVTSI2SDrr: 4956 case X86::CVTSI2SDrm: 4957 case X86::CVTSI2SD64rr: 4958 case X86::CVTSI2SD64rm: 4959 case X86::CVTSD2SSrr: 4960 case X86::CVTSD2SSrm: 4961 case X86::Int_CVTSD2SSrr: 4962 case X86::Int_CVTSD2SSrm: 4963 case X86::CVTSS2SDrr: 4964 case X86::CVTSS2SDrm: 4965 case X86::Int_CVTSS2SDrr: 4966 case X86::Int_CVTSS2SDrm: 4967 case X86::RCPSSr: 4968 case X86::RCPSSm: 4969 case X86::RCPSSr_Int: 4970 case X86::RCPSSm_Int: 4971 case X86::ROUNDSDr: 4972 case X86::ROUNDSDm: 4973 case X86::ROUNDSDr_Int: 4974 case X86::ROUNDSSr: 4975 case X86::ROUNDSSm: 4976 case X86::ROUNDSSr_Int: 4977 case X86::RSQRTSSr: 4978 case X86::RSQRTSSm: 4979 case X86::RSQRTSSr_Int: 4980 case X86::RSQRTSSm_Int: 4981 case X86::SQRTSSr: 4982 case X86::SQRTSSm: 4983 case X86::SQRTSSr_Int: 4984 case X86::SQRTSSm_Int: 4985 case X86::SQRTSDr: 4986 case X86::SQRTSDm: 4987 case X86::SQRTSDr_Int: 4988 case X86::SQRTSDm_Int: 4989 return true; 4990 } 4991 4992 return false; 4993 } 4994 4995 /// Inform the ExeDepsFix pass how many idle 4996 /// instructions we would like before a partial register update. 4997 unsigned X86InstrInfo:: 4998 getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, 4999 const TargetRegisterInfo *TRI) const { 5000 if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode())) 5001 return 0; 5002 5003 // If MI is marked as reading Reg, the partial register update is wanted. 5004 const MachineOperand &MO = MI->getOperand(0); 5005 unsigned Reg = MO.getReg(); 5006 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 5007 if (MO.readsReg() || MI->readsVirtualRegister(Reg)) 5008 return 0; 5009 } else { 5010 if (MI->readsRegister(Reg, TRI)) 5011 return 0; 5012 } 5013 5014 // If any of the preceding 16 instructions are reading Reg, insert a 5015 // dependency breaking instruction. The magic number is based on a few 5016 // Nehalem experiments. 5017 return 16; 5018 } 5019 5020 // Return true for any instruction the copies the high bits of the first source 5021 // operand into the unused high bits of the destination operand. 5022 static bool hasUndefRegUpdate(unsigned Opcode) { 5023 switch (Opcode) { 5024 case X86::VCVTSI2SSrr: 5025 case X86::VCVTSI2SSrm: 5026 case X86::Int_VCVTSI2SSrr: 5027 case X86::Int_VCVTSI2SSrm: 5028 case X86::VCVTSI2SS64rr: 5029 case X86::VCVTSI2SS64rm: 5030 case X86::Int_VCVTSI2SS64rr: 5031 case X86::Int_VCVTSI2SS64rm: 5032 case X86::VCVTSI2SDrr: 5033 case X86::VCVTSI2SDrm: 5034 case X86::Int_VCVTSI2SDrr: 5035 case X86::Int_VCVTSI2SDrm: 5036 case X86::VCVTSI2SD64rr: 5037 case X86::VCVTSI2SD64rm: 5038 case X86::Int_VCVTSI2SD64rr: 5039 case X86::Int_VCVTSI2SD64rm: 5040 case X86::VCVTSD2SSrr: 5041 case X86::VCVTSD2SSrm: 5042 case X86::Int_VCVTSD2SSrr: 5043 case X86::Int_VCVTSD2SSrm: 5044 case X86::VCVTSS2SDrr: 5045 case X86::VCVTSS2SDrm: 5046 case X86::Int_VCVTSS2SDrr: 5047 case X86::Int_VCVTSS2SDrm: 5048 case X86::VRCPSSr: 5049 case X86::VRCPSSm: 5050 case X86::VRCPSSm_Int: 5051 case X86::VROUNDSDr: 5052 case X86::VROUNDSDm: 5053 case X86::VROUNDSDr_Int: 5054 case X86::VROUNDSSr: 5055 case X86::VROUNDSSm: 5056 case X86::VROUNDSSr_Int: 5057 case X86::VRSQRTSSr: 5058 case X86::VRSQRTSSm: 5059 case X86::VRSQRTSSm_Int: 5060 case X86::VSQRTSSr: 5061 case X86::VSQRTSSm: 5062 case X86::VSQRTSSm_Int: 5063 case X86::VSQRTSDr: 5064 case X86::VSQRTSDm: 5065 case X86::VSQRTSDm_Int: 5066 // AVX-512 5067 case X86::VCVTSD2SSZrr: 5068 case X86::VCVTSD2SSZrm: 5069 case X86::VCVTSS2SDZrr: 5070 case X86::VCVTSS2SDZrm: 5071 return true; 5072 } 5073 5074 return false; 5075 } 5076 5077 /// Inform the ExeDepsFix pass how many idle instructions we would like before 5078 /// certain undef register reads. 5079 /// 5080 /// This catches the VCVTSI2SD family of instructions: 5081 /// 5082 /// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14 5083 /// 5084 /// We should to be careful *not* to catch VXOR idioms which are presumably 5085 /// handled specially in the pipeline: 5086 /// 5087 /// vxorps %xmm1<undef>, %xmm1<undef>, %xmm1 5088 /// 5089 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the 5090 /// high bits that are passed-through are not live. 5091 unsigned X86InstrInfo:: 5092 getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum, 5093 const TargetRegisterInfo *TRI) const { 5094 if (!hasUndefRegUpdate(MI->getOpcode())) 5095 return 0; 5096 5097 // Set the OpNum parameter to the first source operand. 5098 OpNum = 1; 5099 5100 const MachineOperand &MO = MI->getOperand(OpNum); 5101 if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { 5102 // Use the same magic number as getPartialRegUpdateClearance. 5103 return 16; 5104 } 5105 return 0; 5106 } 5107 5108 void X86InstrInfo:: 5109 breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, 5110 const TargetRegisterInfo *TRI) const { 5111 unsigned Reg = MI->getOperand(OpNum).getReg(); 5112 // If MI kills this register, the false dependence is already broken. 5113 if (MI->killsRegister(Reg, TRI)) 5114 return; 5115 if (X86::VR128RegClass.contains(Reg)) { 5116 // These instructions are all floating point domain, so xorps is the best 5117 // choice. 5118 bool HasAVX = Subtarget.hasAVX(); 5119 unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr; 5120 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg) 5121 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 5122 } else if (X86::VR256RegClass.contains(Reg)) { 5123 // Use vxorps to clear the full ymm register. 5124 // It wants to read and write the xmm sub-register. 5125 unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); 5126 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg) 5127 .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef) 5128 .addReg(Reg, RegState::ImplicitDefine); 5129 } else 5130 return; 5131 MI->addRegisterKilled(Reg, TRI, true); 5132 } 5133 5134 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 5135 MachineInstr *MI, 5136 ArrayRef<unsigned> Ops, 5137 int FrameIndex) const { 5138 // Check switch flag 5139 if (NoFusing) return nullptr; 5140 5141 // Unless optimizing for size, don't fold to avoid partial 5142 // register update stalls 5143 if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && 5144 hasPartialRegUpdate(MI->getOpcode())) 5145 return nullptr; 5146 5147 const MachineFrameInfo *MFI = MF.getFrameInfo(); 5148 unsigned Size = MFI->getObjectSize(FrameIndex); 5149 unsigned Alignment = MFI->getObjectAlignment(FrameIndex); 5150 // If the function stack isn't realigned we don't want to fold instructions 5151 // that need increased alignment. 5152 if (!RI.needsStackRealignment(MF)) 5153 Alignment = 5154 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment()); 5155 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5156 unsigned NewOpc = 0; 5157 unsigned RCSize = 0; 5158 switch (MI->getOpcode()) { 5159 default: return nullptr; 5160 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; 5161 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; 5162 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; 5163 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; 5164 } 5165 // Check if it's safe to fold the load. If the size of the object is 5166 // narrower than the load width, then it's not. 5167 if (Size < RCSize) 5168 return nullptr; 5169 // Change to CMPXXri r, 0 first. 5170 MI->setDesc(get(NewOpc)); 5171 MI->getOperand(1).ChangeToImmediate(0); 5172 } else if (Ops.size() != 1) 5173 return nullptr; 5174 5175 return foldMemoryOperandImpl(MF, MI, Ops[0], 5176 MachineOperand::CreateFI(FrameIndex), Size, 5177 Alignment, /*AllowCommute=*/true); 5178 } 5179 5180 static bool isPartialRegisterLoad(const MachineInstr &LoadMI, 5181 const MachineFunction &MF) { 5182 unsigned Opc = LoadMI.getOpcode(); 5183 unsigned RegSize = 5184 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize(); 5185 5186 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm) && RegSize > 4) 5187 // These instructions only load 32 bits, we can't fold them if the 5188 // destination register is wider than 32 bits (4 bytes). 5189 return true; 5190 5191 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm) && RegSize > 8) 5192 // These instructions only load 64 bits, we can't fold them if the 5193 // destination register is wider than 64 bits (8 bytes). 5194 return true; 5195 5196 return false; 5197 } 5198 5199 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 5200 MachineInstr *MI, 5201 ArrayRef<unsigned> Ops, 5202 MachineInstr *LoadMI) const { 5203 // If loading from a FrameIndex, fold directly from the FrameIndex. 5204 unsigned NumOps = LoadMI->getDesc().getNumOperands(); 5205 int FrameIndex; 5206 if (isLoadFromStackSlot(LoadMI, FrameIndex)) { 5207 if (isPartialRegisterLoad(*LoadMI, MF)) 5208 return nullptr; 5209 return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex); 5210 } 5211 5212 // Check switch flag 5213 if (NoFusing) return nullptr; 5214 5215 // Unless optimizing for size, don't fold to avoid partial 5216 // register update stalls 5217 if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) && 5218 hasPartialRegUpdate(MI->getOpcode())) 5219 return nullptr; 5220 5221 // Determine the alignment of the load. 5222 unsigned Alignment = 0; 5223 if (LoadMI->hasOneMemOperand()) 5224 Alignment = (*LoadMI->memoperands_begin())->getAlignment(); 5225 else 5226 switch (LoadMI->getOpcode()) { 5227 case X86::AVX2_SETALLONES: 5228 case X86::AVX_SET0: 5229 Alignment = 32; 5230 break; 5231 case X86::V_SET0: 5232 case X86::V_SETALLONES: 5233 Alignment = 16; 5234 break; 5235 case X86::FsFLD0SD: 5236 Alignment = 8; 5237 break; 5238 case X86::FsFLD0SS: 5239 Alignment = 4; 5240 break; 5241 default: 5242 return nullptr; 5243 } 5244 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5245 unsigned NewOpc = 0; 5246 switch (MI->getOpcode()) { 5247 default: return nullptr; 5248 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 5249 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; 5250 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; 5251 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; 5252 } 5253 // Change to CMPXXri r, 0 first. 5254 MI->setDesc(get(NewOpc)); 5255 MI->getOperand(1).ChangeToImmediate(0); 5256 } else if (Ops.size() != 1) 5257 return nullptr; 5258 5259 // Make sure the subregisters match. 5260 // Otherwise we risk changing the size of the load. 5261 if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg()) 5262 return nullptr; 5263 5264 SmallVector<MachineOperand,X86::AddrNumOperands> MOs; 5265 switch (LoadMI->getOpcode()) { 5266 case X86::V_SET0: 5267 case X86::V_SETALLONES: 5268 case X86::AVX2_SETALLONES: 5269 case X86::AVX_SET0: 5270 case X86::FsFLD0SD: 5271 case X86::FsFLD0SS: { 5272 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. 5273 // Create a constant-pool entry and operands to load from it. 5274 5275 // Medium and large mode can't fold loads this way. 5276 if (MF.getTarget().getCodeModel() != CodeModel::Small && 5277 MF.getTarget().getCodeModel() != CodeModel::Kernel) 5278 return nullptr; 5279 5280 // x86-32 PIC requires a PIC base register for constant pools. 5281 unsigned PICBase = 0; 5282 if (MF.getTarget().getRelocationModel() == Reloc::PIC_) { 5283 if (Subtarget.is64Bit()) 5284 PICBase = X86::RIP; 5285 else 5286 // FIXME: PICBase = getGlobalBaseReg(&MF); 5287 // This doesn't work for several reasons. 5288 // 1. GlobalBaseReg may have been spilled. 5289 // 2. It may not be live at MI. 5290 return nullptr; 5291 } 5292 5293 // Create a constant-pool entry. 5294 MachineConstantPool &MCP = *MF.getConstantPool(); 5295 Type *Ty; 5296 unsigned Opc = LoadMI->getOpcode(); 5297 if (Opc == X86::FsFLD0SS) 5298 Ty = Type::getFloatTy(MF.getFunction()->getContext()); 5299 else if (Opc == X86::FsFLD0SD) 5300 Ty = Type::getDoubleTy(MF.getFunction()->getContext()); 5301 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0) 5302 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8); 5303 else 5304 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); 5305 5306 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES); 5307 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : 5308 Constant::getNullValue(Ty); 5309 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); 5310 5311 // Create operands to load from the constant pool entry. 5312 MOs.push_back(MachineOperand::CreateReg(PICBase, false)); 5313 MOs.push_back(MachineOperand::CreateImm(1)); 5314 MOs.push_back(MachineOperand::CreateReg(0, false)); 5315 MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); 5316 MOs.push_back(MachineOperand::CreateReg(0, false)); 5317 break; 5318 } 5319 default: { 5320 if (isPartialRegisterLoad(*LoadMI, MF)) 5321 return nullptr; 5322 5323 // Folding a normal load. Just copy the load's address operands. 5324 MOs.append(LoadMI->operands_begin() + NumOps - X86::AddrNumOperands, 5325 LoadMI->operands_begin() + NumOps); 5326 break; 5327 } 5328 } 5329 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, 5330 /*Size=*/0, Alignment, /*AllowCommute=*/true); 5331 } 5332 5333 bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, 5334 ArrayRef<unsigned> Ops) const { 5335 // Check switch flag 5336 if (NoFusing) return 0; 5337 5338 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 5339 switch (MI->getOpcode()) { 5340 default: return false; 5341 case X86::TEST8rr: 5342 case X86::TEST16rr: 5343 case X86::TEST32rr: 5344 case X86::TEST64rr: 5345 return true; 5346 case X86::ADD32ri: 5347 // FIXME: AsmPrinter doesn't know how to handle 5348 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 5349 if (MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 5350 return false; 5351 break; 5352 } 5353 } 5354 5355 if (Ops.size() != 1) 5356 return false; 5357 5358 unsigned OpNum = Ops[0]; 5359 unsigned Opc = MI->getOpcode(); 5360 unsigned NumOps = MI->getDesc().getNumOperands(); 5361 bool isTwoAddr = NumOps > 1 && 5362 MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 5363 5364 // Folding a memory location into the two-address part of a two-address 5365 // instruction is different than folding it other places. It requires 5366 // replacing the *two* registers with the memory location. 5367 const DenseMap<unsigned, 5368 std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr; 5369 if (isTwoAddr && NumOps >= 2 && OpNum < 2) { 5370 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 5371 } else if (OpNum == 0) { 5372 if (Opc == X86::MOV32r0) 5373 return true; 5374 5375 OpcodeTablePtr = &RegOp2MemOpTable0; 5376 } else if (OpNum == 1) { 5377 OpcodeTablePtr = &RegOp2MemOpTable1; 5378 } else if (OpNum == 2) { 5379 OpcodeTablePtr = &RegOp2MemOpTable2; 5380 } else if (OpNum == 3) { 5381 OpcodeTablePtr = &RegOp2MemOpTable3; 5382 } 5383 5384 if (OpcodeTablePtr && OpcodeTablePtr->count(Opc)) 5385 return true; 5386 return TargetInstrInfo::canFoldMemoryOperand(MI, Ops); 5387 } 5388 5389 bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 5390 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 5391 SmallVectorImpl<MachineInstr*> &NewMIs) const { 5392 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 5393 MemOp2RegOpTable.find(MI->getOpcode()); 5394 if (I == MemOp2RegOpTable.end()) 5395 return false; 5396 unsigned Opc = I->second.first; 5397 unsigned Index = I->second.second & TB_INDEX_MASK; 5398 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 5399 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 5400 if (UnfoldLoad && !FoldedLoad) 5401 return false; 5402 UnfoldLoad &= FoldedLoad; 5403 if (UnfoldStore && !FoldedStore) 5404 return false; 5405 UnfoldStore &= FoldedStore; 5406 5407 const MCInstrDesc &MCID = get(Opc); 5408 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 5409 if (!MI->hasOneMemOperand() && 5410 RC == &X86::VR128RegClass && 5411 !Subtarget.isUnalignedMemAccessFast()) 5412 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will 5413 // conservatively assume the address is unaligned. That's bad for 5414 // performance. 5415 return false; 5416 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; 5417 SmallVector<MachineOperand,2> BeforeOps; 5418 SmallVector<MachineOperand,2> AfterOps; 5419 SmallVector<MachineOperand,4> ImpOps; 5420 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 5421 MachineOperand &Op = MI->getOperand(i); 5422 if (i >= Index && i < Index + X86::AddrNumOperands) 5423 AddrOps.push_back(Op); 5424 else if (Op.isReg() && Op.isImplicit()) 5425 ImpOps.push_back(Op); 5426 else if (i < Index) 5427 BeforeOps.push_back(Op); 5428 else if (i > Index) 5429 AfterOps.push_back(Op); 5430 } 5431 5432 // Emit the load instruction. 5433 if (UnfoldLoad) { 5434 std::pair<MachineInstr::mmo_iterator, 5435 MachineInstr::mmo_iterator> MMOs = 5436 MF.extractLoadMemRefs(MI->memoperands_begin(), 5437 MI->memoperands_end()); 5438 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs); 5439 if (UnfoldStore) { 5440 // Address operands cannot be marked isKill. 5441 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { 5442 MachineOperand &MO = NewMIs[0]->getOperand(i); 5443 if (MO.isReg()) 5444 MO.setIsKill(false); 5445 } 5446 } 5447 } 5448 5449 // Emit the data processing instruction. 5450 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true); 5451 MachineInstrBuilder MIB(MF, DataMI); 5452 5453 if (FoldedStore) 5454 MIB.addReg(Reg, RegState::Define); 5455 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i) 5456 MIB.addOperand(BeforeOps[i]); 5457 if (FoldedLoad) 5458 MIB.addReg(Reg); 5459 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i) 5460 MIB.addOperand(AfterOps[i]); 5461 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) { 5462 MachineOperand &MO = ImpOps[i]; 5463 MIB.addReg(MO.getReg(), 5464 getDefRegState(MO.isDef()) | 5465 RegState::Implicit | 5466 getKillRegState(MO.isKill()) | 5467 getDeadRegState(MO.isDead()) | 5468 getUndefRegState(MO.isUndef())); 5469 } 5470 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 5471 switch (DataMI->getOpcode()) { 5472 default: break; 5473 case X86::CMP64ri32: 5474 case X86::CMP64ri8: 5475 case X86::CMP32ri: 5476 case X86::CMP32ri8: 5477 case X86::CMP16ri: 5478 case X86::CMP16ri8: 5479 case X86::CMP8ri: { 5480 MachineOperand &MO0 = DataMI->getOperand(0); 5481 MachineOperand &MO1 = DataMI->getOperand(1); 5482 if (MO1.getImm() == 0) { 5483 unsigned NewOpc; 5484 switch (DataMI->getOpcode()) { 5485 default: llvm_unreachable("Unreachable!"); 5486 case X86::CMP64ri8: 5487 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 5488 case X86::CMP32ri8: 5489 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 5490 case X86::CMP16ri8: 5491 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 5492 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 5493 } 5494 DataMI->setDesc(get(NewOpc)); 5495 MO1.ChangeToRegister(MO0.getReg(), false); 5496 } 5497 } 5498 } 5499 NewMIs.push_back(DataMI); 5500 5501 // Emit the store instruction. 5502 if (UnfoldStore) { 5503 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); 5504 std::pair<MachineInstr::mmo_iterator, 5505 MachineInstr::mmo_iterator> MMOs = 5506 MF.extractStoreMemRefs(MI->memoperands_begin(), 5507 MI->memoperands_end()); 5508 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs); 5509 } 5510 5511 return true; 5512 } 5513 5514 bool 5515 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 5516 SmallVectorImpl<SDNode*> &NewNodes) const { 5517 if (!N->isMachineOpcode()) 5518 return false; 5519 5520 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 5521 MemOp2RegOpTable.find(N->getMachineOpcode()); 5522 if (I == MemOp2RegOpTable.end()) 5523 return false; 5524 unsigned Opc = I->second.first; 5525 unsigned Index = I->second.second & TB_INDEX_MASK; 5526 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 5527 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 5528 const MCInstrDesc &MCID = get(Opc); 5529 MachineFunction &MF = DAG.getMachineFunction(); 5530 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 5531 unsigned NumDefs = MCID.NumDefs; 5532 std::vector<SDValue> AddrOps; 5533 std::vector<SDValue> BeforeOps; 5534 std::vector<SDValue> AfterOps; 5535 SDLoc dl(N); 5536 unsigned NumOps = N->getNumOperands(); 5537 for (unsigned i = 0; i != NumOps-1; ++i) { 5538 SDValue Op = N->getOperand(i); 5539 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) 5540 AddrOps.push_back(Op); 5541 else if (i < Index-NumDefs) 5542 BeforeOps.push_back(Op); 5543 else if (i > Index-NumDefs) 5544 AfterOps.push_back(Op); 5545 } 5546 SDValue Chain = N->getOperand(NumOps-1); 5547 AddrOps.push_back(Chain); 5548 5549 // Emit the load instruction. 5550 SDNode *Load = nullptr; 5551 if (FoldedLoad) { 5552 EVT VT = *RC->vt_begin(); 5553 std::pair<MachineInstr::mmo_iterator, 5554 MachineInstr::mmo_iterator> MMOs = 5555 MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 5556 cast<MachineSDNode>(N)->memoperands_end()); 5557 if (!(*MMOs.first) && 5558 RC == &X86::VR128RegClass && 5559 !Subtarget.isUnalignedMemAccessFast()) 5560 // Do not introduce a slow unaligned load. 5561 return false; 5562 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 5563 bool isAligned = (*MMOs.first) && 5564 (*MMOs.first)->getAlignment() >= Alignment; 5565 Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl, 5566 VT, MVT::Other, AddrOps); 5567 NewNodes.push_back(Load); 5568 5569 // Preserve memory reference information. 5570 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second); 5571 } 5572 5573 // Emit the data processing instruction. 5574 std::vector<EVT> VTs; 5575 const TargetRegisterClass *DstRC = nullptr; 5576 if (MCID.getNumDefs() > 0) { 5577 DstRC = getRegClass(MCID, 0, &RI, MF); 5578 VTs.push_back(*DstRC->vt_begin()); 5579 } 5580 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 5581 EVT VT = N->getValueType(i); 5582 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) 5583 VTs.push_back(VT); 5584 } 5585 if (Load) 5586 BeforeOps.push_back(SDValue(Load, 0)); 5587 BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end()); 5588 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps); 5589 NewNodes.push_back(NewNode); 5590 5591 // Emit the store instruction. 5592 if (FoldedStore) { 5593 AddrOps.pop_back(); 5594 AddrOps.push_back(SDValue(NewNode, 0)); 5595 AddrOps.push_back(Chain); 5596 std::pair<MachineInstr::mmo_iterator, 5597 MachineInstr::mmo_iterator> MMOs = 5598 MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 5599 cast<MachineSDNode>(N)->memoperands_end()); 5600 if (!(*MMOs.first) && 5601 RC == &X86::VR128RegClass && 5602 !Subtarget.isUnalignedMemAccessFast()) 5603 // Do not introduce a slow unaligned store. 5604 return false; 5605 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 5606 bool isAligned = (*MMOs.first) && 5607 (*MMOs.first)->getAlignment() >= Alignment; 5608 SDNode *Store = 5609 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), 5610 dl, MVT::Other, AddrOps); 5611 NewNodes.push_back(Store); 5612 5613 // Preserve memory reference information. 5614 cast<MachineSDNode>(Store)->setMemRefs(MMOs.first, MMOs.second); 5615 } 5616 5617 return true; 5618 } 5619 5620 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 5621 bool UnfoldLoad, bool UnfoldStore, 5622 unsigned *LoadRegIndex) const { 5623 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 5624 MemOp2RegOpTable.find(Opc); 5625 if (I == MemOp2RegOpTable.end()) 5626 return 0; 5627 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 5628 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 5629 if (UnfoldLoad && !FoldedLoad) 5630 return 0; 5631 if (UnfoldStore && !FoldedStore) 5632 return 0; 5633 if (LoadRegIndex) 5634 *LoadRegIndex = I->second.second & TB_INDEX_MASK; 5635 return I->second.first; 5636 } 5637 5638 bool 5639 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 5640 int64_t &Offset1, int64_t &Offset2) const { 5641 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 5642 return false; 5643 unsigned Opc1 = Load1->getMachineOpcode(); 5644 unsigned Opc2 = Load2->getMachineOpcode(); 5645 switch (Opc1) { 5646 default: return false; 5647 case X86::MOV8rm: 5648 case X86::MOV16rm: 5649 case X86::MOV32rm: 5650 case X86::MOV64rm: 5651 case X86::LD_Fp32m: 5652 case X86::LD_Fp64m: 5653 case X86::LD_Fp80m: 5654 case X86::MOVSSrm: 5655 case X86::MOVSDrm: 5656 case X86::MMX_MOVD64rm: 5657 case X86::MMX_MOVQ64rm: 5658 case X86::FsMOVAPSrm: 5659 case X86::FsMOVAPDrm: 5660 case X86::MOVAPSrm: 5661 case X86::MOVUPSrm: 5662 case X86::MOVAPDrm: 5663 case X86::MOVDQArm: 5664 case X86::MOVDQUrm: 5665 // AVX load instructions 5666 case X86::VMOVSSrm: 5667 case X86::VMOVSDrm: 5668 case X86::FsVMOVAPSrm: 5669 case X86::FsVMOVAPDrm: 5670 case X86::VMOVAPSrm: 5671 case X86::VMOVUPSrm: 5672 case X86::VMOVAPDrm: 5673 case X86::VMOVDQArm: 5674 case X86::VMOVDQUrm: 5675 case X86::VMOVAPSYrm: 5676 case X86::VMOVUPSYrm: 5677 case X86::VMOVAPDYrm: 5678 case X86::VMOVDQAYrm: 5679 case X86::VMOVDQUYrm: 5680 break; 5681 } 5682 switch (Opc2) { 5683 default: return false; 5684 case X86::MOV8rm: 5685 case X86::MOV16rm: 5686 case X86::MOV32rm: 5687 case X86::MOV64rm: 5688 case X86::LD_Fp32m: 5689 case X86::LD_Fp64m: 5690 case X86::LD_Fp80m: 5691 case X86::MOVSSrm: 5692 case X86::MOVSDrm: 5693 case X86::MMX_MOVD64rm: 5694 case X86::MMX_MOVQ64rm: 5695 case X86::FsMOVAPSrm: 5696 case X86::FsMOVAPDrm: 5697 case X86::MOVAPSrm: 5698 case X86::MOVUPSrm: 5699 case X86::MOVAPDrm: 5700 case X86::MOVDQArm: 5701 case X86::MOVDQUrm: 5702 // AVX load instructions 5703 case X86::VMOVSSrm: 5704 case X86::VMOVSDrm: 5705 case X86::FsVMOVAPSrm: 5706 case X86::FsVMOVAPDrm: 5707 case X86::VMOVAPSrm: 5708 case X86::VMOVUPSrm: 5709 case X86::VMOVAPDrm: 5710 case X86::VMOVDQArm: 5711 case X86::VMOVDQUrm: 5712 case X86::VMOVAPSYrm: 5713 case X86::VMOVUPSYrm: 5714 case X86::VMOVAPDYrm: 5715 case X86::VMOVDQAYrm: 5716 case X86::VMOVDQUYrm: 5717 break; 5718 } 5719 5720 // Check if chain operands and base addresses match. 5721 if (Load1->getOperand(0) != Load2->getOperand(0) || 5722 Load1->getOperand(5) != Load2->getOperand(5)) 5723 return false; 5724 // Segment operands should match as well. 5725 if (Load1->getOperand(4) != Load2->getOperand(4)) 5726 return false; 5727 // Scale should be 1, Index should be Reg0. 5728 if (Load1->getOperand(1) == Load2->getOperand(1) && 5729 Load1->getOperand(2) == Load2->getOperand(2)) { 5730 if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1) 5731 return false; 5732 5733 // Now let's examine the displacements. 5734 if (isa<ConstantSDNode>(Load1->getOperand(3)) && 5735 isa<ConstantSDNode>(Load2->getOperand(3))) { 5736 Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue(); 5737 Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue(); 5738 return true; 5739 } 5740 } 5741 return false; 5742 } 5743 5744 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 5745 int64_t Offset1, int64_t Offset2, 5746 unsigned NumLoads) const { 5747 assert(Offset2 > Offset1); 5748 if ((Offset2 - Offset1) / 8 > 64) 5749 return false; 5750 5751 unsigned Opc1 = Load1->getMachineOpcode(); 5752 unsigned Opc2 = Load2->getMachineOpcode(); 5753 if (Opc1 != Opc2) 5754 return false; // FIXME: overly conservative? 5755 5756 switch (Opc1) { 5757 default: break; 5758 case X86::LD_Fp32m: 5759 case X86::LD_Fp64m: 5760 case X86::LD_Fp80m: 5761 case X86::MMX_MOVD64rm: 5762 case X86::MMX_MOVQ64rm: 5763 return false; 5764 } 5765 5766 EVT VT = Load1->getValueType(0); 5767 switch (VT.getSimpleVT().SimpleTy) { 5768 default: 5769 // XMM registers. In 64-bit mode we can be a bit more aggressive since we 5770 // have 16 of them to play with. 5771 if (Subtarget.is64Bit()) { 5772 if (NumLoads >= 3) 5773 return false; 5774 } else if (NumLoads) { 5775 return false; 5776 } 5777 break; 5778 case MVT::i8: 5779 case MVT::i16: 5780 case MVT::i32: 5781 case MVT::i64: 5782 case MVT::f32: 5783 case MVT::f64: 5784 if (NumLoads) 5785 return false; 5786 break; 5787 } 5788 5789 return true; 5790 } 5791 5792 bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First, 5793 MachineInstr *Second) const { 5794 // Check if this processor supports macro-fusion. Since this is a minor 5795 // heuristic, we haven't specifically reserved a feature. hasAVX is a decent 5796 // proxy for SandyBridge+. 5797 if (!Subtarget.hasAVX()) 5798 return false; 5799 5800 enum { 5801 FuseTest, 5802 FuseCmp, 5803 FuseInc 5804 } FuseKind; 5805 5806 switch(Second->getOpcode()) { 5807 default: 5808 return false; 5809 case X86::JE_1: 5810 case X86::JNE_1: 5811 case X86::JL_1: 5812 case X86::JLE_1: 5813 case X86::JG_1: 5814 case X86::JGE_1: 5815 FuseKind = FuseInc; 5816 break; 5817 case X86::JB_1: 5818 case X86::JBE_1: 5819 case X86::JA_1: 5820 case X86::JAE_1: 5821 FuseKind = FuseCmp; 5822 break; 5823 case X86::JS_1: 5824 case X86::JNS_1: 5825 case X86::JP_1: 5826 case X86::JNP_1: 5827 case X86::JO_1: 5828 case X86::JNO_1: 5829 FuseKind = FuseTest; 5830 break; 5831 } 5832 switch (First->getOpcode()) { 5833 default: 5834 return false; 5835 case X86::TEST8rr: 5836 case X86::TEST16rr: 5837 case X86::TEST32rr: 5838 case X86::TEST64rr: 5839 case X86::TEST8ri: 5840 case X86::TEST16ri: 5841 case X86::TEST32ri: 5842 case X86::TEST32i32: 5843 case X86::TEST64i32: 5844 case X86::TEST64ri32: 5845 case X86::TEST8rm: 5846 case X86::TEST16rm: 5847 case X86::TEST32rm: 5848 case X86::TEST64rm: 5849 case X86::TEST8ri_NOREX: 5850 case X86::AND16i16: 5851 case X86::AND16ri: 5852 case X86::AND16ri8: 5853 case X86::AND16rm: 5854 case X86::AND16rr: 5855 case X86::AND32i32: 5856 case X86::AND32ri: 5857 case X86::AND32ri8: 5858 case X86::AND32rm: 5859 case X86::AND32rr: 5860 case X86::AND64i32: 5861 case X86::AND64ri32: 5862 case X86::AND64ri8: 5863 case X86::AND64rm: 5864 case X86::AND64rr: 5865 case X86::AND8i8: 5866 case X86::AND8ri: 5867 case X86::AND8rm: 5868 case X86::AND8rr: 5869 return true; 5870 case X86::CMP16i16: 5871 case X86::CMP16ri: 5872 case X86::CMP16ri8: 5873 case X86::CMP16rm: 5874 case X86::CMP16rr: 5875 case X86::CMP32i32: 5876 case X86::CMP32ri: 5877 case X86::CMP32ri8: 5878 case X86::CMP32rm: 5879 case X86::CMP32rr: 5880 case X86::CMP64i32: 5881 case X86::CMP64ri32: 5882 case X86::CMP64ri8: 5883 case X86::CMP64rm: 5884 case X86::CMP64rr: 5885 case X86::CMP8i8: 5886 case X86::CMP8ri: 5887 case X86::CMP8rm: 5888 case X86::CMP8rr: 5889 case X86::ADD16i16: 5890 case X86::ADD16ri: 5891 case X86::ADD16ri8: 5892 case X86::ADD16ri8_DB: 5893 case X86::ADD16ri_DB: 5894 case X86::ADD16rm: 5895 case X86::ADD16rr: 5896 case X86::ADD16rr_DB: 5897 case X86::ADD32i32: 5898 case X86::ADD32ri: 5899 case X86::ADD32ri8: 5900 case X86::ADD32ri8_DB: 5901 case X86::ADD32ri_DB: 5902 case X86::ADD32rm: 5903 case X86::ADD32rr: 5904 case X86::ADD32rr_DB: 5905 case X86::ADD64i32: 5906 case X86::ADD64ri32: 5907 case X86::ADD64ri32_DB: 5908 case X86::ADD64ri8: 5909 case X86::ADD64ri8_DB: 5910 case X86::ADD64rm: 5911 case X86::ADD64rr: 5912 case X86::ADD64rr_DB: 5913 case X86::ADD8i8: 5914 case X86::ADD8mi: 5915 case X86::ADD8mr: 5916 case X86::ADD8ri: 5917 case X86::ADD8rm: 5918 case X86::ADD8rr: 5919 case X86::SUB16i16: 5920 case X86::SUB16ri: 5921 case X86::SUB16ri8: 5922 case X86::SUB16rm: 5923 case X86::SUB16rr: 5924 case X86::SUB32i32: 5925 case X86::SUB32ri: 5926 case X86::SUB32ri8: 5927 case X86::SUB32rm: 5928 case X86::SUB32rr: 5929 case X86::SUB64i32: 5930 case X86::SUB64ri32: 5931 case X86::SUB64ri8: 5932 case X86::SUB64rm: 5933 case X86::SUB64rr: 5934 case X86::SUB8i8: 5935 case X86::SUB8ri: 5936 case X86::SUB8rm: 5937 case X86::SUB8rr: 5938 return FuseKind == FuseCmp || FuseKind == FuseInc; 5939 case X86::INC16r: 5940 case X86::INC32r: 5941 case X86::INC64r: 5942 case X86::INC8r: 5943 case X86::DEC16r: 5944 case X86::DEC32r: 5945 case X86::DEC64r: 5946 case X86::DEC8r: 5947 return FuseKind == FuseInc; 5948 } 5949 } 5950 5951 bool X86InstrInfo:: 5952 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 5953 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 5954 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 5955 if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E) 5956 return true; 5957 Cond[0].setImm(GetOppositeBranchCondition(CC)); 5958 return false; 5959 } 5960 5961 bool X86InstrInfo:: 5962 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 5963 // FIXME: Return false for x87 stack register classes for now. We can't 5964 // allow any loads of these registers before FpGet_ST0_80. 5965 return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass || 5966 RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass); 5967 } 5968 5969 /// Return a virtual register initialized with the 5970 /// the global base register value. Output instructions required to 5971 /// initialize the register in the function entry block, if necessary. 5972 /// 5973 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo. 5974 /// 5975 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 5976 assert(!Subtarget.is64Bit() && 5977 "X86-64 PIC uses RIP relative addressing"); 5978 5979 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 5980 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 5981 if (GlobalBaseReg != 0) 5982 return GlobalBaseReg; 5983 5984 // Create the register. The code to initialize it is inserted 5985 // later, by the CGBR pass (below). 5986 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 5987 GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 5988 X86FI->setGlobalBaseReg(GlobalBaseReg); 5989 return GlobalBaseReg; 5990 } 5991 5992 // These are the replaceable SSE instructions. Some of these have Int variants 5993 // that we don't include here. We don't want to replace instructions selected 5994 // by intrinsics. 5995 static const uint16_t ReplaceableInstrs[][3] = { 5996 //PackedSingle PackedDouble PackedInt 5997 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, 5998 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, 5999 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, 6000 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, 6001 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, 6002 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr }, 6003 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, 6004 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, 6005 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, 6006 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, 6007 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, 6008 { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, 6009 { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, 6010 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, 6011 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, 6012 // AVX 128-bit support 6013 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, 6014 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, 6015 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, 6016 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, 6017 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, 6018 // TODO: Add the AVX versions of MOVLPSmr 6019 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, 6020 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, 6021 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, 6022 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, 6023 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, 6024 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, 6025 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, 6026 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, 6027 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, 6028 // AVX 256-bit support 6029 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, 6030 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, 6031 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, 6032 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, 6033 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, 6034 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr } 6035 }; 6036 6037 static const uint16_t ReplaceableInstrsAVX2[][3] = { 6038 //PackedSingle PackedDouble PackedInt 6039 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, 6040 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, 6041 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, 6042 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, 6043 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, 6044 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, 6045 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, 6046 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, 6047 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, 6048 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, 6049 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, 6050 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, 6051 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, 6052 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }, 6053 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm}, 6054 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr}, 6055 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr}, 6056 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm}, 6057 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr}, 6058 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm} 6059 }; 6060 6061 // FIXME: Some shuffle and unpack instructions have equivalents in different 6062 // domains, but they require a bit more work than just switching opcodes. 6063 6064 static const uint16_t *lookup(unsigned opcode, unsigned domain) { 6065 for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i) 6066 if (ReplaceableInstrs[i][domain-1] == opcode) 6067 return ReplaceableInstrs[i]; 6068 return nullptr; 6069 } 6070 6071 static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) { 6072 for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i) 6073 if (ReplaceableInstrsAVX2[i][domain-1] == opcode) 6074 return ReplaceableInstrsAVX2[i]; 6075 return nullptr; 6076 } 6077 6078 std::pair<uint16_t, uint16_t> 6079 X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const { 6080 uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6081 bool hasAVX2 = Subtarget.hasAVX2(); 6082 uint16_t validDomains = 0; 6083 if (domain && lookup(MI->getOpcode(), domain)) 6084 validDomains = 0xe; 6085 else if (domain && lookupAVX2(MI->getOpcode(), domain)) 6086 validDomains = hasAVX2 ? 0xe : 0x6; 6087 return std::make_pair(domain, validDomains); 6088 } 6089 6090 void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 6091 assert(Domain>0 && Domain<4 && "Invalid execution domain"); 6092 uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 6093 assert(dom && "Not an SSE instruction"); 6094 const uint16_t *table = lookup(MI->getOpcode(), dom); 6095 if (!table) { // try the other table 6096 assert((Subtarget.hasAVX2() || Domain < 3) && 6097 "256-bit vector operations only available in AVX2"); 6098 table = lookupAVX2(MI->getOpcode(), dom); 6099 } 6100 assert(table && "Cannot change domain"); 6101 MI->setDesc(get(table[Domain-1])); 6102 } 6103 6104 /// Return the noop instruction to use for a noop. 6105 void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { 6106 NopInst.setOpcode(X86::NOOP); 6107 } 6108 6109 // This code must remain in sync with getJumpInstrTableEntryBound in this class! 6110 // In particular, getJumpInstrTableEntryBound must always return an upper bound 6111 // on the encoding lengths of the instructions generated by 6112 // getUnconditionalBranch and getTrap. 6113 void X86InstrInfo::getUnconditionalBranch( 6114 MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const { 6115 Branch.setOpcode(X86::JMP_1); 6116 Branch.addOperand(MCOperand::CreateExpr(BranchTarget)); 6117 } 6118 6119 // This code must remain in sync with getJumpInstrTableEntryBound in this class! 6120 // In particular, getJumpInstrTableEntryBound must always return an upper bound 6121 // on the encoding lengths of the instructions generated by 6122 // getUnconditionalBranch and getTrap. 6123 void X86InstrInfo::getTrap(MCInst &MI) const { 6124 MI.setOpcode(X86::TRAP); 6125 } 6126 6127 // See getTrap and getUnconditionalBranch for conditions on the value returned 6128 // by this function. 6129 unsigned X86InstrInfo::getJumpInstrTableEntryBound() const { 6130 // 5 bytes suffice: JMP_4 Symbol@PLT is uses 1 byte (E9) for the JMP_4 and 4 6131 // bytes for the symbol offset. And TRAP is ud2, which is two bytes (0F 0B). 6132 return 5; 6133 } 6134 6135 bool X86InstrInfo::isHighLatencyDef(int opc) const { 6136 switch (opc) { 6137 default: return false; 6138 case X86::DIVSDrm: 6139 case X86::DIVSDrm_Int: 6140 case X86::DIVSDrr: 6141 case X86::DIVSDrr_Int: 6142 case X86::DIVSSrm: 6143 case X86::DIVSSrm_Int: 6144 case X86::DIVSSrr: 6145 case X86::DIVSSrr_Int: 6146 case X86::SQRTPDm: 6147 case X86::SQRTPDr: 6148 case X86::SQRTPSm: 6149 case X86::SQRTPSr: 6150 case X86::SQRTSDm: 6151 case X86::SQRTSDm_Int: 6152 case X86::SQRTSDr: 6153 case X86::SQRTSDr_Int: 6154 case X86::SQRTSSm: 6155 case X86::SQRTSSm_Int: 6156 case X86::SQRTSSr: 6157 case X86::SQRTSSr_Int: 6158 // AVX instructions with high latency 6159 case X86::VDIVSDrm: 6160 case X86::VDIVSDrm_Int: 6161 case X86::VDIVSDrr: 6162 case X86::VDIVSDrr_Int: 6163 case X86::VDIVSSrm: 6164 case X86::VDIVSSrm_Int: 6165 case X86::VDIVSSrr: 6166 case X86::VDIVSSrr_Int: 6167 case X86::VSQRTPDm: 6168 case X86::VSQRTPDr: 6169 case X86::VSQRTPSm: 6170 case X86::VSQRTPSr: 6171 case X86::VSQRTSDm: 6172 case X86::VSQRTSDm_Int: 6173 case X86::VSQRTSDr: 6174 case X86::VSQRTSSm: 6175 case X86::VSQRTSSm_Int: 6176 case X86::VSQRTSSr: 6177 case X86::VSQRTPDZm: 6178 case X86::VSQRTPDZr: 6179 case X86::VSQRTPSZm: 6180 case X86::VSQRTPSZr: 6181 case X86::VSQRTSDZm: 6182 case X86::VSQRTSDZm_Int: 6183 case X86::VSQRTSDZr: 6184 case X86::VSQRTSSZm_Int: 6185 case X86::VSQRTSSZr: 6186 case X86::VSQRTSSZm: 6187 case X86::VDIVSDZrm: 6188 case X86::VDIVSDZrr: 6189 case X86::VDIVSSZrm: 6190 case X86::VDIVSSZrr: 6191 6192 case X86::VGATHERQPSZrm: 6193 case X86::VGATHERQPDZrm: 6194 case X86::VGATHERDPDZrm: 6195 case X86::VGATHERDPSZrm: 6196 case X86::VPGATHERQDZrm: 6197 case X86::VPGATHERQQZrm: 6198 case X86::VPGATHERDDZrm: 6199 case X86::VPGATHERDQZrm: 6200 case X86::VSCATTERQPDZmr: 6201 case X86::VSCATTERQPSZmr: 6202 case X86::VSCATTERDPDZmr: 6203 case X86::VSCATTERDPSZmr: 6204 case X86::VPSCATTERQDZmr: 6205 case X86::VPSCATTERQQZmr: 6206 case X86::VPSCATTERDDZmr: 6207 case X86::VPSCATTERDQZmr: 6208 return true; 6209 } 6210 } 6211 6212 bool X86InstrInfo:: 6213 hasHighOperandLatency(const InstrItineraryData *ItinData, 6214 const MachineRegisterInfo *MRI, 6215 const MachineInstr *DefMI, unsigned DefIdx, 6216 const MachineInstr *UseMI, unsigned UseIdx) const { 6217 return isHighLatencyDef(DefMI->getOpcode()); 6218 } 6219 6220 namespace { 6221 /// Create Global Base Reg pass. This initializes the PIC 6222 /// global base register for x86-32. 6223 struct CGBR : public MachineFunctionPass { 6224 static char ID; 6225 CGBR() : MachineFunctionPass(ID) {} 6226 6227 bool runOnMachineFunction(MachineFunction &MF) override { 6228 const X86TargetMachine *TM = 6229 static_cast<const X86TargetMachine *>(&MF.getTarget()); 6230 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 6231 6232 // Don't do anything if this is 64-bit as 64-bit PIC 6233 // uses RIP relative addressing. 6234 if (STI.is64Bit()) 6235 return false; 6236 6237 // Only emit a global base reg in PIC mode. 6238 if (TM->getRelocationModel() != Reloc::PIC_) 6239 return false; 6240 6241 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 6242 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 6243 6244 // If we didn't need a GlobalBaseReg, don't insert code. 6245 if (GlobalBaseReg == 0) 6246 return false; 6247 6248 // Insert the set of GlobalBaseReg into the first MBB of the function 6249 MachineBasicBlock &FirstMBB = MF.front(); 6250 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 6251 DebugLoc DL = FirstMBB.findDebugLoc(MBBI); 6252 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6253 const X86InstrInfo *TII = STI.getInstrInfo(); 6254 6255 unsigned PC; 6256 if (STI.isPICStyleGOT()) 6257 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); 6258 else 6259 PC = GlobalBaseReg; 6260 6261 // Operand of MovePCtoStack is completely ignored by asm printer. It's 6262 // only used in JIT code emission as displacement to pc. 6263 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); 6264 6265 // If we're using vanilla 'GOT' PIC style, we should use relative addressing 6266 // not to pc, but to _GLOBAL_OFFSET_TABLE_ external. 6267 if (STI.isPICStyleGOT()) { 6268 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register 6269 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) 6270 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 6271 X86II::MO_GOT_ABSOLUTE_ADDRESS); 6272 } 6273 6274 return true; 6275 } 6276 6277 const char *getPassName() const override { 6278 return "X86 PIC Global Base Reg Initialization"; 6279 } 6280 6281 void getAnalysisUsage(AnalysisUsage &AU) const override { 6282 AU.setPreservesCFG(); 6283 MachineFunctionPass::getAnalysisUsage(AU); 6284 } 6285 }; 6286 } 6287 6288 char CGBR::ID = 0; 6289 FunctionPass* 6290 llvm::createX86GlobalBaseRegPass() { return new CGBR(); } 6291 6292 namespace { 6293 struct LDTLSCleanup : public MachineFunctionPass { 6294 static char ID; 6295 LDTLSCleanup() : MachineFunctionPass(ID) {} 6296 6297 bool runOnMachineFunction(MachineFunction &MF) override { 6298 X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>(); 6299 if (MFI->getNumLocalDynamicTLSAccesses() < 2) { 6300 // No point folding accesses if there isn't at least two. 6301 return false; 6302 } 6303 6304 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); 6305 return VisitNode(DT->getRootNode(), 0); 6306 } 6307 6308 // Visit the dominator subtree rooted at Node in pre-order. 6309 // If TLSBaseAddrReg is non-null, then use that to replace any 6310 // TLS_base_addr instructions. Otherwise, create the register 6311 // when the first such instruction is seen, and then use it 6312 // as we encounter more instructions. 6313 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { 6314 MachineBasicBlock *BB = Node->getBlock(); 6315 bool Changed = false; 6316 6317 // Traverse the current block. 6318 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; 6319 ++I) { 6320 switch (I->getOpcode()) { 6321 case X86::TLS_base_addr32: 6322 case X86::TLS_base_addr64: 6323 if (TLSBaseAddrReg) 6324 I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg); 6325 else 6326 I = SetRegister(I, &TLSBaseAddrReg); 6327 Changed = true; 6328 break; 6329 default: 6330 break; 6331 } 6332 } 6333 6334 // Visit the children of this block in the dominator tree. 6335 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end(); 6336 I != E; ++I) { 6337 Changed |= VisitNode(*I, TLSBaseAddrReg); 6338 } 6339 6340 return Changed; 6341 } 6342 6343 // Replace the TLS_base_addr instruction I with a copy from 6344 // TLSBaseAddrReg, returning the new instruction. 6345 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I, 6346 unsigned TLSBaseAddrReg) { 6347 MachineFunction *MF = I->getParent()->getParent(); 6348 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 6349 const bool is64Bit = STI.is64Bit(); 6350 const X86InstrInfo *TII = STI.getInstrInfo(); 6351 6352 // Insert a Copy from TLSBaseAddrReg to RAX/EAX. 6353 MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(), 6354 TII->get(TargetOpcode::COPY), 6355 is64Bit ? X86::RAX : X86::EAX) 6356 .addReg(TLSBaseAddrReg); 6357 6358 // Erase the TLS_base_addr instruction. 6359 I->eraseFromParent(); 6360 6361 return Copy; 6362 } 6363 6364 // Create a virtal register in *TLSBaseAddrReg, and populate it by 6365 // inserting a copy instruction after I. Returns the new instruction. 6366 MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) { 6367 MachineFunction *MF = I->getParent()->getParent(); 6368 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>(); 6369 const bool is64Bit = STI.is64Bit(); 6370 const X86InstrInfo *TII = STI.getInstrInfo(); 6371 6372 // Create a virtual register for the TLS base address. 6373 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 6374 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit 6375 ? &X86::GR64RegClass 6376 : &X86::GR32RegClass); 6377 6378 // Insert a copy from RAX/EAX to TLSBaseAddrReg. 6379 MachineInstr *Next = I->getNextNode(); 6380 MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(), 6381 TII->get(TargetOpcode::COPY), 6382 *TLSBaseAddrReg) 6383 .addReg(is64Bit ? X86::RAX : X86::EAX); 6384 6385 return Copy; 6386 } 6387 6388 const char *getPassName() const override { 6389 return "Local Dynamic TLS Access Clean-up"; 6390 } 6391 6392 void getAnalysisUsage(AnalysisUsage &AU) const override { 6393 AU.setPreservesCFG(); 6394 AU.addRequired<MachineDominatorTree>(); 6395 MachineFunctionPass::getAnalysisUsage(AU); 6396 } 6397 }; 6398 } 6399 6400 char LDTLSCleanup::ID = 0; 6401 FunctionPass* 6402 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } 6403