Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -O3 -disable-peephole -mtriple=i686-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X86
      3 ; RUN: llc < %s -O3 -disable-peephole -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X64
      4 
      5 ; Function Attrs: nounwind ssp uwtable
      6 define void @test1(float* %A, float* %C) #0 {
      7 ; X86-LABEL: test1:
      8 ; X86:       ## %bb.0:
      9 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
     10 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     11 ; X86-NEXT:    vmovaps (%ecx), %ymm0
     12 ; X86-NEXT:    vandps LCPI0_0, %ymm0, %ymm0
     13 ; X86-NEXT:    vmovss %xmm0, (%eax)
     14 ; X86-NEXT:    vzeroupper
     15 ; X86-NEXT:    retl
     16 ;
     17 ; X64-LABEL: test1:
     18 ; X64:       ## %bb.0:
     19 ; X64-NEXT:    vmovaps (%rdi), %ymm0
     20 ; X64-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
     21 ; X64-NEXT:    vmovss %xmm0, (%rsi)
     22 ; X64-NEXT:    vzeroupper
     23 ; X64-NEXT:    retq
     24   %tmp1 = bitcast float* %A to <8 x float>*
     25   %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
     26   %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
     27   %tmp4 = and <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
     28   %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
     29   %tmp6 = extractelement <8 x float> %tmp5, i32 0
     30   store float %tmp6, float* %C
     31   ret void
     32 }
     33 
     34 ; Function Attrs: nounwind ssp uwtable
     35 define void @test2(float* %A, float* %C) #0 {
     36 ; X86-LABEL: test2:
     37 ; X86:       ## %bb.0:
     38 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
     39 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     40 ; X86-NEXT:    vmovaps (%ecx), %ymm0
     41 ; X86-NEXT:    vorps LCPI1_0, %ymm0, %ymm0
     42 ; X86-NEXT:    vmovss %xmm0, (%eax)
     43 ; X86-NEXT:    vzeroupper
     44 ; X86-NEXT:    retl
     45 ;
     46 ; X64-LABEL: test2:
     47 ; X64:       ## %bb.0:
     48 ; X64-NEXT:    vmovaps (%rdi), %ymm0
     49 ; X64-NEXT:    vorps {{.*}}(%rip), %ymm0, %ymm0
     50 ; X64-NEXT:    vmovss %xmm0, (%rsi)
     51 ; X64-NEXT:    vzeroupper
     52 ; X64-NEXT:    retq
     53   %tmp1 = bitcast float* %A to <8 x float>*
     54   %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
     55   %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
     56   %tmp4 = or <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
     57   %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
     58   %tmp6 = extractelement <8 x float> %tmp5, i32 0
     59   store float %tmp6, float* %C
     60   ret void
     61 }
     62 
     63 ; Function Attrs: nounwind ssp uwtable
     64 define void @test3(float* %A, float* %C) #0 {
     65 ; X86-LABEL: test3:
     66 ; X86:       ## %bb.0:
     67 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
     68 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     69 ; X86-NEXT:    vmovaps (%ecx), %ymm0
     70 ; X86-NEXT:    vxorps LCPI2_0, %ymm0, %ymm0
     71 ; X86-NEXT:    vmovss %xmm0, (%eax)
     72 ; X86-NEXT:    vzeroupper
     73 ; X86-NEXT:    retl
     74 ;
     75 ; X64-LABEL: test3:
     76 ; X64:       ## %bb.0:
     77 ; X64-NEXT:    vmovaps (%rdi), %ymm0
     78 ; X64-NEXT:    vxorps {{.*}}(%rip), %ymm0, %ymm0
     79 ; X64-NEXT:    vmovss %xmm0, (%rsi)
     80 ; X64-NEXT:    vzeroupper
     81 ; X64-NEXT:    retq
     82   %tmp1 = bitcast float* %A to <8 x float>*
     83   %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
     84   %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
     85   %tmp4 = xor <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
     86   %tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
     87   %tmp6 = extractelement <8 x float> %tmp5, i32 0
     88   store float %tmp6, float* %C
     89   ret void
     90 }
     91 
     92 define void @test4(float* %A, float* %C) #0 {
     93 ; X86-LABEL: test4:
     94 ; X86:       ## %bb.0:
     95 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
     96 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     97 ; X86-NEXT:    vmovaps (%ecx), %ymm0
     98 ; X86-NEXT:    vandnps LCPI3_0, %ymm0, %ymm0
     99 ; X86-NEXT:    vmovss %xmm0, (%eax)
    100 ; X86-NEXT:    vzeroupper
    101 ; X86-NEXT:    retl
    102 ;
    103 ; X64-LABEL: test4:
    104 ; X64:       ## %bb.0:
    105 ; X64-NEXT:    vmovaps (%rdi), %ymm0
    106 ; X64-NEXT:    vandnps {{.*}}(%rip), %ymm0, %ymm0
    107 ; X64-NEXT:    vmovss %xmm0, (%rsi)
    108 ; X64-NEXT:    vzeroupper
    109 ; X64-NEXT:    retq
    110   %tmp1 = bitcast float* %A to <8 x float>*
    111   %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
    112   %tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
    113   %tmp4 = xor <8 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
    114   %tmp5 = and <8 x i32> %tmp4, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
    115   %tmp6 = bitcast <8 x i32> %tmp5 to <8 x float>
    116   %tmp7 = extractelement <8 x float> %tmp6, i32 0
    117   store float %tmp7, float * %C
    118   ret void
    119 }
    120