Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -mattr=+sse41 | FileCheck %s
      2 
      3 
      4 ; In this test we check that sign-extend of the mask bit is performed by
      5 ; shifting the needed bit to the MSB, and not using shl+sra.
      6 
      7 ;CHECK: vsel_float
      8 ;CHECK: movl $-2147483648
      9 ;CHECK-NEXT: movd
     10 ;CHECK-NEXT: blendvps
     11 ;CHECK: ret
     12 define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) {
     13   %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2
     14   ret <4 x float> %vsel
     15 }
     16 
     17 ;CHECK: vsel_4xi8
     18 ;CHECK: movl $-2147483648
     19 ;CHECK-NEXT: movd
     20 ;CHECK-NEXT: blendvps
     21 ;CHECK: ret
     22 define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) {
     23   %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i8> %v1, <4 x i8> %v2
     24   ret <4 x i8> %vsel
     25 }
     26 
     27 
     28 ; We do not have native support for v8i16 blends and we have to use the
     29 ; blendvb instruction or a sequence of NAND/OR/AND. Make sure that we do not r
     30 ; reduce the mask in this case.
     31 ;CHECK: vsel_8xi16
     32 ;CHECK: psllw
     33 ;CHECK: psraw
     34 ;CHECK: pblendvb
     35 ;CHECK: ret
     36 define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) {
     37   %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i16> %v1, <8 x i16> %v2
     38   ret <8 x i16> %vsel
     39 }
     40