Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i386-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE
      3 ; RUN: llc < %s -mtriple=i386-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
      4 
      5 define void @t(<4 x float> %A) {
      6 ; SSE-LABEL: t:
      7 ; SSE:       # %bb.0:
      8 ; SSE-NEXT:    xorps {{\.LCPI.*}}, %xmm0
      9 ; SSE-NEXT:    movaps %xmm0, 0
     10 ; SSE-NEXT:    retl
     11 ;
     12 ; AVX-LABEL: t:
     13 ; AVX:       # %bb.0:
     14 ; AVX-NEXT:    vxorps {{\.LCPI.*}}, %xmm0, %xmm0
     15 ; AVX-NEXT:    vmovaps %xmm0, 0
     16 ; AVX-NEXT:    retl
     17   %tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A
     18   store <4 x float> %tmp1277, <4 x float>* null
     19   ret void
     20 }
     21 
     22 define <4 x float> @t1(<4 x float> %a, <4 x float> %b) {
     23 ; SSE-LABEL: t1:
     24 ; SSE:       # %bb.0: # %entry
     25 ; SSE-NEXT:    xorps %xmm1, %xmm0
     26 ; SSE-NEXT:    retl
     27 ;
     28 ; AVX-LABEL: t1:
     29 ; AVX:       # %bb.0: # %entry
     30 ; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
     31 ; AVX-NEXT:    retl
     32 entry:
     33   %tmp9 = bitcast <4 x float> %a to <4 x i32>
     34   %tmp10 = bitcast <4 x float> %b to <4 x i32>
     35   %tmp11 = xor <4 x i32> %tmp9, %tmp10
     36   %tmp13 = bitcast <4 x i32> %tmp11 to <4 x float>
     37   ret <4 x float> %tmp13
     38 }
     39 
     40 define <2 x double> @t2(<2 x double> %a, <2 x double> %b) {
     41 ; SSE-LABEL: t2:
     42 ; SSE:       # %bb.0: # %entry
     43 ; SSE-NEXT:    andps %xmm1, %xmm0
     44 ; SSE-NEXT:    retl
     45 ;
     46 ; AVX-LABEL: t2:
     47 ; AVX:       # %bb.0: # %entry
     48 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
     49 ; AVX-NEXT:    retl
     50 entry:
     51   %tmp9 = bitcast <2 x double> %a to <2 x i64>
     52   %tmp10 = bitcast <2 x double> %b to <2 x i64>
     53   %tmp11 = and <2 x i64> %tmp9, %tmp10
     54   %tmp13 = bitcast <2 x i64> %tmp11 to <2 x double>
     55   ret <2 x double> %tmp13
     56 }
     57 
     58 define void @t3(<4 x float> %a, <4 x float> %b, <4 x float>* %c, <4 x float>* %d) {
     59 ; SSE-LABEL: t3:
     60 ; SSE:       # %bb.0: # %entry
     61 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
     62 ; SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     63 ; SSE-NEXT:    andnps %xmm1, %xmm0
     64 ; SSE-NEXT:    orps (%ecx), %xmm0
     65 ; SSE-NEXT:    movaps %xmm0, (%eax)
     66 ; SSE-NEXT:    retl
     67 ;
     68 ; AVX-LABEL: t3:
     69 ; AVX:       # %bb.0: # %entry
     70 ; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
     71 ; AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     72 ; AVX-NEXT:    vandnps %xmm1, %xmm0, %xmm0
     73 ; AVX-NEXT:    vorps (%ecx), %xmm0, %xmm0
     74 ; AVX-NEXT:    vmovaps %xmm0, (%eax)
     75 ; AVX-NEXT:    retl
     76 entry:
     77   %tmp3 = load <4 x float>, <4 x float>* %c
     78   %tmp11 = bitcast <4 x float> %a to <4 x i32>
     79   %tmp12 = bitcast <4 x float> %b to <4 x i32>
     80   %tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 >
     81   %tmp14 = and <4 x i32> %tmp12, %tmp13
     82   %tmp27 = bitcast <4 x float> %tmp3 to <4 x i32>
     83   %tmp28 = or <4 x i32> %tmp14, %tmp27
     84   %tmp30 = bitcast <4 x i32> %tmp28 to <4 x float>
     85   store <4 x float> %tmp30, <4 x float>* %d
     86   ret void
     87 }
     88 
     89 define <2 x i64> @andn_double_xor(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
     90 ; SSE-LABEL: andn_double_xor:
     91 ; SSE:       # %bb.0:
     92 ; SSE-NEXT:    xorps %xmm2, %xmm1
     93 ; SSE-NEXT:    andnps %xmm1, %xmm0
     94 ; SSE-NEXT:    retl
     95 ;
     96 ; AVX-LABEL: andn_double_xor:
     97 ; AVX:       # %bb.0:
     98 ; AVX-NEXT:    vxorps %xmm2, %xmm1, %xmm1
     99 ; AVX-NEXT:    vandnps %xmm1, %xmm0, %xmm0
    100 ; AVX-NEXT:    retl
    101   %1 = xor <2 x i64> %a, <i64 -1, i64 -1>
    102   %2 = xor <2 x i64> %b, %c
    103   %3 = and <2 x i64> %1, %2
    104   ret <2 x i64> %3
    105 }
    106 
    107