1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -mattr=+sse41 | FileCheck %s 2 3 4 ; In this test we check that sign-extend of the mask bit is performed by 5 ; shifting the needed bit to the MSB, and not using shl+sra. 6 7 ;CHECK: vsel_float 8 ;CHECK: pslld 9 ;CHECK-NEXT: blendvps 10 ;CHECK: ret 11 define <4 x float> @vsel_float(<4 x float> %v1, <4 x float> %v2) { 12 %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %v1, <4 x float> %v2 13 ret <4 x float> %vsel 14 } 15 16 ;CHECK: vsel_4xi8 17 ;CHECK: pslld 18 ;CHECK-NEXT: blendvps 19 ;CHECK: ret 20 define <4 x i8> @vsel_4xi8(<4 x i8> %v1, <4 x i8> %v2) { 21 %vsel = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i8> %v1, <4 x i8> %v2 22 ret <4 x i8> %vsel 23 } 24 25 26 ; We do not have native support for v8i16 blends and we have to use the 27 ; blendvb instruction or a sequence of NAND/OR/AND. Make sure that we do not r 28 ; reduce the mask in this case. 29 ;CHECK: vsel_8xi16 30 ;CHECK: psllw 31 ;CHECK: psraw 32 ;CHECK: pblendvb 33 ;CHECK: ret 34 define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) { 35 %vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i16> %v1, <8 x i16> %v2 36 ret <8 x i16> %vsel 37 } 38