1 //===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file provides pattern fragments useful for SIMD instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 //===----------------------------------------------------------------------===// 15 // MMX Pattern Fragments 16 //===----------------------------------------------------------------------===// 17 18 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>; 19 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>; 20 21 //===----------------------------------------------------------------------===// 22 // SSE specific DAG Nodes. 23 //===----------------------------------------------------------------------===// 24 25 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>, 26 SDTCisFP<0>, SDTCisInt<2> ]>; 27 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, 28 SDTCisFP<1>, SDTCisVT<3, i8>, 29 SDTCisVec<1>]>; 30 31 def X86umin : SDNode<"X86ISD::UMIN", SDTIntBinOp>; 32 def X86umax : SDNode<"X86ISD::UMAX", SDTIntBinOp>; 33 def X86smin : SDNode<"X86ISD::SMIN", SDTIntBinOp>; 34 def X86smax : SDNode<"X86ISD::SMAX", SDTIntBinOp>; 35 36 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>; 37 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>; 38 39 // Commutative and Associative FMIN and FMAX. 40 def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp, 41 [SDNPCommutative, SDNPAssociative]>; 42 def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp, 43 [SDNPCommutative, SDNPAssociative]>; 44 45 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp, 46 [SDNPCommutative, SDNPAssociative]>; 47 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp, 48 [SDNPCommutative, SDNPAssociative]>; 49 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp, 50 [SDNPCommutative, SDNPAssociative]>; 51 def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp, 52 [SDNPCommutative, SDNPAssociative]>; 53 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>; 54 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>; 55 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>; 56 def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>; 57 def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>; 58 def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>; 59 def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>; 60 def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>; 61 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>; 62 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>; 63 def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>; 64 //def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>; 65 def X86pshufb : SDNode<"X86ISD::PSHUFB", 66 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 67 SDTCisSameAs<0,2>]>>; 68 def X86andnp : SDNode<"X86ISD::ANDNP", 69 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 70 SDTCisSameAs<0,2>]>>; 71 def X86psign : SDNode<"X86ISD::PSIGN", 72 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 73 SDTCisSameAs<0,2>]>>; 74 def X86pextrb : SDNode<"X86ISD::PEXTRB", 75 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>; 76 def X86pextrw : SDNode<"X86ISD::PEXTRW", 77 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>; 78 def X86pinsrb : SDNode<"X86ISD::PINSRB", 79 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>, 80 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>; 81 def X86pinsrw : SDNode<"X86ISD::PINSRW", 82 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>, 83 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>; 84 def X86insertps : SDNode<"X86ISD::INSERTPS", 85 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>, 86 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>; 87 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL", 88 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>; 89 90 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad, 91 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 92 93 def X86vzext : SDNode<"X86ISD::VZEXT", 94 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, 95 SDTCisInt<0>, SDTCisInt<1>, 96 SDTCisOpSmallerThanOp<1, 0>]>>; 97 98 def X86vsext : SDNode<"X86ISD::VSEXT", 99 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, 100 SDTCisInt<0>, SDTCisInt<1>, 101 SDTCisOpSmallerThanOp<1, 0>]>>; 102 103 def X86vtrunc : SDNode<"X86ISD::VTRUNC", 104 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, 105 SDTCisInt<0>, SDTCisInt<1>, 106 SDTCisOpSmallerThanOp<0, 1>]>>; 107 def X86trunc : SDNode<"X86ISD::TRUNC", 108 SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>, 109 SDTCisOpSmallerThanOp<0, 1>]>>; 110 111 def X86vtruncm : SDNode<"X86ISD::VTRUNCM", 112 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, 113 SDTCisInt<0>, SDTCisInt<1>, 114 SDTCisVec<2>, SDTCisInt<2>, 115 SDTCisOpSmallerThanOp<0, 2>]>>; 116 def X86vfpext : SDNode<"X86ISD::VFPEXT", 117 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, 118 SDTCisFP<0>, SDTCisFP<1>, 119 SDTCisOpSmallerThanOp<1, 0>]>>; 120 def X86vfpround: SDNode<"X86ISD::VFPROUND", 121 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, 122 SDTCisFP<0>, SDTCisFP<1>, 123 SDTCisOpSmallerThanOp<0, 1>]>>; 124 125 def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>; 126 def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>; 127 def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>; 128 def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>; 129 def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>; 130 131 def X86IntCmpMask : SDTypeProfile<1, 2, 132 [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>]>; 133 def X86pcmpeqm : SDNode<"X86ISD::PCMPEQM", X86IntCmpMask, [SDNPCommutative]>; 134 def X86pcmpgtm : SDNode<"X86ISD::PCMPGTM", X86IntCmpMask>; 135 136 def X86CmpMaskCC : 137 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, SDTCisVec<1>, 138 SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>; 139 def X86CmpMaskCCScalar : 140 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>; 141 142 def X86cmpm : SDNode<"X86ISD::CMPM", X86CmpMaskCC>; 143 def X86cmpmu : SDNode<"X86ISD::CMPMU", X86CmpMaskCC>; 144 def X86cmpms : SDNode<"X86ISD::FSETCC", X86CmpMaskCCScalar>; 145 146 def X86vshl : SDNode<"X86ISD::VSHL", 147 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 148 SDTCisVec<2>]>>; 149 def X86vsrl : SDNode<"X86ISD::VSRL", 150 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 151 SDTCisVec<2>]>>; 152 def X86vsra : SDNode<"X86ISD::VSRA", 153 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 154 SDTCisVec<2>]>>; 155 156 def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>; 157 def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>; 158 def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>; 159 160 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, 161 SDTCisVec<1>, 162 SDTCisSameAs<2, 1>]>; 163 def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>; 164 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>; 165 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>; 166 def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>; 167 def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>, 168 SDTCisVec<1>, 169 SDTCisSameAs<2, 1>]>>; 170 def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>, 171 SDTCisVec<1>, 172 SDTCisSameAs<2, 1>]>>; 173 def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>; 174 175 def X86pmuludq : SDNode<"X86ISD::PMULUDQ", 176 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, 177 SDTCisSameAs<1,2>]>>; 178 def X86pmuldq : SDNode<"X86ISD::PMULDQ", 179 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, 180 SDTCisSameAs<1,2>]>>; 181 182 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get 183 // translated into one of the target nodes below during lowering. 184 // Note: this is a work in progress... 185 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; 186 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, 187 SDTCisSameAs<0,2>]>; 188 def SDTShuff3Op : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 189 SDTCisSameAs<0,2>, SDTCisSameAs<0,3>]>; 190 191 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>, 192 SDTCisSameAs<0,1>, SDTCisInt<2>]>; 193 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 194 SDTCisSameAs<0,2>, SDTCisInt<3>]>; 195 196 def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>; 197 def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>; 198 199 def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, 200 SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>; 201 202 def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>, 203 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>; 204 205 def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>; 206 207 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>; 208 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>; 209 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>; 210 211 def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>; 212 213 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>; 214 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>; 215 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>; 216 217 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>; 218 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>; 219 220 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>; 221 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>; 222 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>; 223 224 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>; 225 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>; 226 227 def SDTPack : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<2, 1>]>; 228 def X86Packss : SDNode<"X86ISD::PACKSS", SDTPack>; 229 def X86Packus : SDNode<"X86ISD::PACKUS", SDTPack>; 230 231 def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>; 232 def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>; 233 234 def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>; 235 def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>; 236 def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>; 237 def X86VPermv3 : SDNode<"X86ISD::VPERMV3", SDTShuff3Op>; 238 def X86VPermiv3 : SDNode<"X86ISD::VPERMIV3", SDTShuff3Op>; 239 240 def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>; 241 242 def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>; 243 def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>; 244 def X86Vinsert : SDNode<"X86ISD::VINSERT", SDTypeProfile<1, 3, 245 [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>; 246 def X86Vextract : SDNode<"X86ISD::VEXTRACT", SDTypeProfile<1, 2, 247 [SDTCisVec<1>, SDTCisPtrTy<2>]>, []>; 248 249 def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>; 250 def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>; 251 def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>; 252 def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>; 253 def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>; 254 def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>; 255 def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>; 256 257 def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, 258 SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>, 259 SDTCisVT<4, i8>]>; 260 def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, 261 SDTCisVT<2, v16i8>, SDTCisVT<3, i32>, 262 SDTCisVT<4, v16i8>, SDTCisVT<5, i32>, 263 SDTCisVT<6, i8>]>; 264 265 def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>; 266 def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>; 267 268 //===----------------------------------------------------------------------===// 269 // SSE Complex Patterns 270 //===----------------------------------------------------------------------===// 271 272 // These are 'extloads' from a scalar to the low element of a vector, zeroing 273 // the top elements. These are used for the SSE 'ss' and 'sd' instruction 274 // forms. 275 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [], 276 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, 277 SDNPWantRoot]>; 278 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [], 279 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, 280 SDNPWantRoot]>; 281 282 def ssmem : Operand<v4f32> { 283 let PrintMethod = "printf32mem"; 284 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); 285 let ParserMatchClass = X86Mem32AsmOperand; 286 let OperandType = "OPERAND_MEMORY"; 287 } 288 def sdmem : Operand<v2f64> { 289 let PrintMethod = "printf64mem"; 290 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); 291 let ParserMatchClass = X86Mem64AsmOperand; 292 let OperandType = "OPERAND_MEMORY"; 293 } 294 295 //===----------------------------------------------------------------------===// 296 // SSE pattern fragments 297 //===----------------------------------------------------------------------===// 298 299 // 128-bit load pattern fragments 300 // NOTE: all 128-bit integer vector loads are promoted to v2i64 301 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>; 302 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>; 303 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; 304 305 // 256-bit load pattern fragments 306 // NOTE: all 256-bit integer vector loads are promoted to v4i64 307 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>; 308 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>; 309 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>; 310 311 // 512-bit load pattern fragments 312 def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>; 313 def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>; 314 def loadv16i32 : PatFrag<(ops node:$ptr), (v16i32 (load node:$ptr))>; 315 def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>; 316 317 // 128-/256-/512-bit extload pattern fragments 318 def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>; 319 def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>; 320 def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>; 321 322 // Like 'store', but always requires 128-bit vector alignment. 323 def alignedstore : PatFrag<(ops node:$val, node:$ptr), 324 (store node:$val, node:$ptr), [{ 325 return cast<StoreSDNode>(N)->getAlignment() >= 16; 326 }]>; 327 328 // Like 'store', but always requires 256-bit vector alignment. 329 def alignedstore256 : PatFrag<(ops node:$val, node:$ptr), 330 (store node:$val, node:$ptr), [{ 331 return cast<StoreSDNode>(N)->getAlignment() >= 32; 332 }]>; 333 334 // Like 'store', but always requires 512-bit vector alignment. 335 def alignedstore512 : PatFrag<(ops node:$val, node:$ptr), 336 (store node:$val, node:$ptr), [{ 337 return cast<StoreSDNode>(N)->getAlignment() >= 64; 338 }]>; 339 340 // Like 'load', but always requires 128-bit vector alignment. 341 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 342 return cast<LoadSDNode>(N)->getAlignment() >= 16; 343 }]>; 344 345 // Like 'X86vzload', but always requires 128-bit vector alignment. 346 def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{ 347 return cast<MemSDNode>(N)->getAlignment() >= 16; 348 }]>; 349 350 // Like 'load', but always requires 256-bit vector alignment. 351 def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 352 return cast<LoadSDNode>(N)->getAlignment() >= 32; 353 }]>; 354 355 // Like 'load', but always requires 512-bit vector alignment. 356 def alignedload512 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 357 return cast<LoadSDNode>(N)->getAlignment() >= 64; 358 }]>; 359 360 def alignedloadfsf32 : PatFrag<(ops node:$ptr), 361 (f32 (alignedload node:$ptr))>; 362 def alignedloadfsf64 : PatFrag<(ops node:$ptr), 363 (f64 (alignedload node:$ptr))>; 364 365 // 128-bit aligned load pattern fragments 366 // NOTE: all 128-bit integer vector loads are promoted to v2i64 367 def alignedloadv4f32 : PatFrag<(ops node:$ptr), 368 (v4f32 (alignedload node:$ptr))>; 369 def alignedloadv2f64 : PatFrag<(ops node:$ptr), 370 (v2f64 (alignedload node:$ptr))>; 371 def alignedloadv2i64 : PatFrag<(ops node:$ptr), 372 (v2i64 (alignedload node:$ptr))>; 373 374 // 256-bit aligned load pattern fragments 375 // NOTE: all 256-bit integer vector loads are promoted to v4i64 376 def alignedloadv8f32 : PatFrag<(ops node:$ptr), 377 (v8f32 (alignedload256 node:$ptr))>; 378 def alignedloadv4f64 : PatFrag<(ops node:$ptr), 379 (v4f64 (alignedload256 node:$ptr))>; 380 def alignedloadv4i64 : PatFrag<(ops node:$ptr), 381 (v4i64 (alignedload256 node:$ptr))>; 382 383 // 512-bit aligned load pattern fragments 384 def alignedloadv16f32 : PatFrag<(ops node:$ptr), 385 (v16f32 (alignedload512 node:$ptr))>; 386 def alignedloadv16i32 : PatFrag<(ops node:$ptr), 387 (v16i32 (alignedload512 node:$ptr))>; 388 def alignedloadv8f64 : PatFrag<(ops node:$ptr), 389 (v8f64 (alignedload512 node:$ptr))>; 390 def alignedloadv8i64 : PatFrag<(ops node:$ptr), 391 (v8i64 (alignedload512 node:$ptr))>; 392 393 // Like 'load', but uses special alignment checks suitable for use in 394 // memory operands in most SSE instructions, which are required to 395 // be naturally aligned on some targets but not on others. If the subtarget 396 // allows unaligned accesses, match any load, though this may require 397 // setting a feature bit in the processor (on startup, for example). 398 // Opteron 10h and later implement such a feature. 399 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 400 return Subtarget->hasVectorUAMem() 401 || cast<LoadSDNode>(N)->getAlignment() >= 16; 402 }]>; 403 404 def memop4 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 405 return Subtarget->hasVectorUAMem() 406 || cast<LoadSDNode>(N)->getAlignment() >= 4; 407 }]>; 408 409 def memop8 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 410 return Subtarget->hasVectorUAMem() 411 || cast<LoadSDNode>(N)->getAlignment() >= 8; 412 }]>; 413 414 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>; 415 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>; 416 417 // 128-bit memop pattern fragments 418 // NOTE: all 128-bit integer vector loads are promoted to v2i64 419 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>; 420 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>; 421 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; 422 423 // 256-bit memop pattern fragments 424 // NOTE: all 256-bit integer vector loads are promoted to v4i64 425 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>; 426 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>; 427 def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>; 428 429 // 512-bit memop pattern fragments 430 def memopv16f32 : PatFrag<(ops node:$ptr), (v16f32 (memop4 node:$ptr))>; 431 def memopv8f64 : PatFrag<(ops node:$ptr), (v8f64 (memop8 node:$ptr))>; 432 def memopv16i32 : PatFrag<(ops node:$ptr), (v16i32 (memop4 node:$ptr))>; 433 def memopv8i64 : PatFrag<(ops node:$ptr), (v8i64 (memop8 node:$ptr))>; 434 435 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a 436 // 16-byte boundary. 437 // FIXME: 8 byte alignment for mmx reads is not required 438 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 439 return cast<LoadSDNode>(N)->getAlignment() >= 8; 440 }]>; 441 442 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>; 443 444 // MOVNT Support 445 // Like 'store', but requires the non-temporal bit to be set 446 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr), 447 (st node:$val, node:$ptr), [{ 448 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) 449 return ST->isNonTemporal(); 450 return false; 451 }]>; 452 453 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), 454 (st node:$val, node:$ptr), [{ 455 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) 456 return ST->isNonTemporal() && !ST->isTruncatingStore() && 457 ST->getAddressingMode() == ISD::UNINDEXED && 458 ST->getAlignment() >= 16; 459 return false; 460 }]>; 461 462 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), 463 (st node:$val, node:$ptr), [{ 464 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) 465 return ST->isNonTemporal() && 466 ST->getAlignment() < 16; 467 return false; 468 }]>; 469 470 // 128-bit bitconvert pattern fragments 471 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>; 472 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>; 473 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>; 474 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>; 475 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>; 476 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>; 477 478 // 256-bit bitconvert pattern fragments 479 def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>; 480 def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>; 481 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>; 482 def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>; 483 def bc_v8f32 : PatFrag<(ops node:$in), (v8f32 (bitconvert node:$in))>; 484 485 // 512-bit bitconvert pattern fragments 486 def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>; 487 def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>; 488 def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>; 489 def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>; 490 491 def vzmovl_v2i64 : PatFrag<(ops node:$src), 492 (bitconvert (v2i64 (X86vzmovl 493 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>; 494 def vzmovl_v4i32 : PatFrag<(ops node:$src), 495 (bitconvert (v4i32 (X86vzmovl 496 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>; 497 498 def vzload_v2i64 : PatFrag<(ops node:$src), 499 (bitconvert (v2i64 (X86vzload node:$src)))>; 500 501 502 def fp32imm0 : PatLeaf<(f32 fpimm), [{ 503 return N->isExactlyValue(+0.0); 504 }]>; 505 506 def I8Imm : SDNodeXForm<imm, [{ 507 // Transformation function: get the low 8 bits. 508 return getI8Imm((uint8_t)N->getZExtValue()); 509 }]>; 510 511 def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>; 512 def FROUND_CURRENT : ImmLeaf<i32, [{ return Imm == 4; }]>; 513 514 // BYTE_imm - Transform bit immediates into byte immediates. 515 def BYTE_imm : SDNodeXForm<imm, [{ 516 // Transformation function: imm >> 3 517 return getI32Imm(N->getZExtValue() >> 3); 518 }]>; 519 520 // EXTRACT_get_vextract128_imm xform function: convert extract_subvector index 521 // to VEXTRACTF128/VEXTRACTI128 imm. 522 def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{ 523 return getI8Imm(X86::getExtractVEXTRACT128Immediate(N)); 524 }]>; 525 526 // INSERT_get_vinsert128_imm xform function: convert insert_subvector index to 527 // VINSERTF128/VINSERTI128 imm. 528 def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{ 529 return getI8Imm(X86::getInsertVINSERT128Immediate(N)); 530 }]>; 531 532 // EXTRACT_get_vextract256_imm xform function: convert extract_subvector index 533 // to VEXTRACTF64x4 imm. 534 def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{ 535 return getI8Imm(X86::getExtractVEXTRACT256Immediate(N)); 536 }]>; 537 538 // INSERT_get_vinsert256_imm xform function: convert insert_subvector index to 539 // VINSERTF64x4 imm. 540 def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{ 541 return getI8Imm(X86::getInsertVINSERT256Immediate(N)); 542 }]>; 543 544 def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index), 545 (extract_subvector node:$bigvec, 546 node:$index), [{ 547 return X86::isVEXTRACT128Index(N); 548 }], EXTRACT_get_vextract128_imm>; 549 550 def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec, 551 node:$index), 552 (insert_subvector node:$bigvec, node:$smallvec, 553 node:$index), [{ 554 return X86::isVINSERT128Index(N); 555 }], INSERT_get_vinsert128_imm>; 556 557 558 def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index), 559 (extract_subvector node:$bigvec, 560 node:$index), [{ 561 return X86::isVEXTRACT256Index(N); 562 }], EXTRACT_get_vextract256_imm>; 563 564 def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec, 565 node:$index), 566 (insert_subvector node:$bigvec, node:$smallvec, 567 node:$index), [{ 568 return X86::isVINSERT256Index(N); 569 }], INSERT_get_vinsert256_imm>; 570 571