Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse  | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE1
      3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE2
      4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE1
      5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE2
      6 
      7 ; FNEG is defined as subtraction from -0.0.
      8 
      9 ; This test verifies that we use an xor with a constant to flip the sign bits; no subtraction needed.
     10 define <4 x float> @t1(<4 x float> %Q) nounwind {
     11 ; X32-SSE-LABEL: t1:
     12 ; X32-SSE:       # BB#0:
     13 ; X32-SSE-NEXT:    xorps .LCPI0_0, %xmm0
     14 ; X32-SSE-NEXT:    retl
     15 ;
     16 ; X64-SSE-LABEL: t1:
     17 ; X64-SSE:       # BB#0:
     18 ; X64-SSE-NEXT:    xorps {{.*}}(%rip), %xmm0
     19 ; X64-SSE-NEXT:    retq
     20   %tmp = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q
     21   ret <4 x float> %tmp
     22 }
     23 
     24 ; This test verifies that we generate an FP subtraction because "0.0 - x" is not an fneg.
     25 define <4 x float> @t2(<4 x float> %Q) nounwind {
     26 ; X32-SSE-LABEL: t2:
     27 ; X32-SSE:       # BB#0:
     28 ; X32-SSE-NEXT:    xorps %xmm1, %xmm1
     29 ; X32-SSE-NEXT:    subps %xmm0, %xmm1
     30 ; X32-SSE-NEXT:    movaps %xmm1, %xmm0
     31 ; X32-SSE-NEXT:    retl
     32 ;
     33 ; X64-SSE-LABEL: t2:
     34 ; X64-SSE:       # BB#0:
     35 ; X64-SSE-NEXT:    xorps %xmm1, %xmm1
     36 ; X64-SSE-NEXT:    subps %xmm0, %xmm1
     37 ; X64-SSE-NEXT:    movaps %xmm1, %xmm0
     38 ; X64-SSE-NEXT:    retq
     39   %tmp = fsub <4 x float> zeroinitializer, %Q
     40   ret <4 x float> %tmp
     41 }
     42 
     43 ; If we're bitcasting an integer to an FP vector, we should avoid the FPU/vector unit entirely.
     44 ; Make sure that we're flipping the sign bit and only the sign bit of each float.
     45 ; So instead of something like this:
     46 ;    movd	%rdi, %xmm0
     47 ;    xorps	.LCPI2_0(%rip), %xmm0
     48 ;
     49 ; We should generate:
     50 ;    movabsq     (put sign bit mask in integer register))
     51 ;    xorq        (flip sign bits)
     52 ;    movd        (move to xmm return register)
     53 
     54 define <2 x float> @fneg_bitcast(i64 %i) nounwind {
     55 ; X32-SSE1-LABEL: fneg_bitcast:
     56 ; X32-SSE1:       # BB#0:
     57 ; X32-SSE1-NEXT:    pushl %ebp
     58 ; X32-SSE1-NEXT:    movl %esp, %ebp
     59 ; X32-SSE1-NEXT:    andl $-16, %esp
     60 ; X32-SSE1-NEXT:    subl $32, %esp
     61 ; X32-SSE1-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
     62 ; X32-SSE1-NEXT:    movl 12(%ebp), %ecx
     63 ; X32-SSE1-NEXT:    xorl %eax, %ecx
     64 ; X32-SSE1-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
     65 ; X32-SSE1-NEXT:    xorl 8(%ebp), %eax
     66 ; X32-SSE1-NEXT:    movl %eax, (%esp)
     67 ; X32-SSE1-NEXT:    movaps (%esp), %xmm0
     68 ; X32-SSE1-NEXT:    movl %ebp, %esp
     69 ; X32-SSE1-NEXT:    popl %ebp
     70 ; X32-SSE1-NEXT:    retl
     71 ;
     72 ; X32-SSE2-LABEL: fneg_bitcast:
     73 ; X32-SSE2:       # BB#0:
     74 ; X32-SSE2-NEXT:    movl $-2147483648, %eax # imm = 0x80000000
     75 ; X32-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     76 ; X32-SSE2-NEXT:    xorl %eax, %ecx
     77 ; X32-SSE2-NEXT:    movd %ecx, %xmm1
     78 ; X32-SSE2-NEXT:    xorl {{[0-9]+}}(%esp), %eax
     79 ; X32-SSE2-NEXT:    movd %eax, %xmm0
     80 ; X32-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
     81 ; X32-SSE2-NEXT:    retl
     82 ;
     83 ; X64-SSE1-LABEL: fneg_bitcast:
     84 ; X64-SSE1:       # BB#0:
     85 ; X64-SSE1-NEXT:    movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
     86 ; X64-SSE1-NEXT:    xorq %rdi, %rax
     87 ; X64-SSE1-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
     88 ; X64-SSE1-NEXT:    movaps -{{[0-9]+}}(%rsp), %xmm0
     89 ; X64-SSE1-NEXT:    retq
     90 ;
     91 ; X64-SSE2-LABEL: fneg_bitcast:
     92 ; X64-SSE2:       # BB#0:
     93 ; X64-SSE2-NEXT:    movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
     94 ; X64-SSE2-NEXT:    xorq %rdi, %rax
     95 ; X64-SSE2-NEXT:    movd %rax, %xmm0
     96 ; X64-SSE2-NEXT:    retq
     97   %bitcast = bitcast i64 %i to <2 x float>
     98   %fneg = fsub <2 x float> <float -0.0, float -0.0>, %bitcast
     99   ret <2 x float> %fneg
    100 }
    101