1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s 2 3 ; test vector shifts converted to proper SSE2 vector shifts when the shift 4 ; amounts are the same. 5 6 ; Note that x86 does have ashr 7 8 ; shift1a can't use a packed shift 9 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind { 10 entry: 11 ; CHECK: shift1a: 12 ; CHECK: sarl 13 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 > 14 store <2 x i64> %ashr, <2 x i64>* %dst 15 ret void 16 } 17 18 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind { 19 entry: 20 ; CHECK: shift2a: 21 ; CHECK: psrad $5 22 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 > 23 store <4 x i32> %ashr, <4 x i32>* %dst 24 ret void 25 } 26 27 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind { 28 entry: 29 ; CHECK: shift2b: 30 ; CHECK: movd 31 ; CHECK-NEXT: psrad 32 %0 = insertelement <4 x i32> undef, i32 %amt, i32 0 33 %1 = insertelement <4 x i32> %0, i32 %amt, i32 1 34 %2 = insertelement <4 x i32> %1, i32 %amt, i32 2 35 %3 = insertelement <4 x i32> %2, i32 %amt, i32 3 36 %ashr = ashr <4 x i32> %val, %3 37 store <4 x i32> %ashr, <4 x i32>* %dst 38 ret void 39 } 40 41 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind { 42 entry: 43 ; CHECK: shift3a: 44 ; CHECK: psraw $5 45 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 46 store <8 x i16> %ashr, <8 x i16>* %dst 47 ret void 48 } 49 50 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { 51 entry: 52 ; CHECK: shift3b: 53 ; CHECK: movzwl 54 ; CHECK: movd 55 ; CHECK-NEXT: psraw 56 %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 57 %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 58 %2 = insertelement <8 x i16> %0, i16 %amt, i32 2 59 %3 = insertelement <8 x i16> %0, i16 %amt, i32 3 60 %4 = insertelement <8 x i16> %0, i16 %amt, i32 4 61 %5 = insertelement <8 x i16> %0, i16 %amt, i32 5 62 %6 = insertelement <8 x i16> %0, i16 %amt, i32 6 63 %7 = insertelement <8 x i16> %0, i16 %amt, i32 7 64 %ashr = ashr <8 x i16> %val, %7 65 store <8 x i16> %ashr, <8 x i16>* %dst 66 ret void 67 } 68