Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -mtriple=x86_64-linux -mcpu=nehalem | FileCheck %s
      2 ; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=nehalem | FileCheck %s
      3 
      4 ; Full strength reduction wouldn't reduce register pressure, so LSR should
      5 ; stick with indexing here.
      6 
      7 ; CHECK: movaps        (%{{rsi|rdx}},%rax,4), [[X3:%xmm[0-9]+]]
      8 ; CHECK: cvtdq2ps
      9 ; CHECK: orps          {{%xmm[0-9]+}}, [[X4:%xmm[0-9]+]]
     10 ; CHECK: movaps        [[X4]], (%{{rdi|rcx}},%rax,4)
     11 ; CHECK: addq  $4, %rax
     12 ; CHECK: cmpl  %eax, (%{{rdx|r8}})
     13 ; CHECK-NEXT: jg
     14 
     15 define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
     16 entry:
     17   %0 = load i32, i32* %n, align 4
     18   %1 = icmp sgt i32 %0, 0
     19   br i1 %1, label %bb, label %return
     20 
     21 bb:
     22   %indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
     23   %tmp = shl i64 %indvar, 2
     24   %scevgep = getelementptr float, float* %y, i64 %tmp
     25   %scevgep9 = bitcast float* %scevgep to <4 x float>*
     26   %scevgep10 = getelementptr float, float* %x, i64 %tmp
     27   %scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
     28   %2 = load <4 x float>, <4 x float>* %scevgep1011, align 16
     29   %3 = bitcast <4 x float> %2 to <4 x i32>
     30   %4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
     31   %5 = bitcast <4 x i32> %4 to <4 x float>
     32   %6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
     33   %7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
     34   %tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
     35   %8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
     36   %9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
     37   %10 = or <4 x i32> %9, %6
     38   %11 = bitcast <4 x i32> %10 to <4 x float>
     39   %12 = fadd <4 x float> %2, %11
     40   %13 = fsub <4 x float> %12, %11
     41   %14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
     42   %15 = bitcast <4 x float> %14 to <4 x i32>
     43   %16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
     44   %17 = fadd <4 x float> %13, %16
     45   %tmp.i = bitcast <4 x float> %17 to <4 x i32>
     46   %18 = or <4 x i32> %tmp.i, %6
     47   %19 = bitcast <4 x i32> %18 to <4 x float>
     48   store <4 x float> %19, <4 x float>* %scevgep9, align 16
     49   %tmp12 = add i64 %tmp, 4
     50   %tmp13 = trunc i64 %tmp12 to i32
     51   %20 = load i32, i32* %n, align 4
     52   %21 = icmp sgt i32 %20, %tmp13
     53   %indvar.next = add i64 %indvar, 1
     54   br i1 %21, label %bb, label %return
     55 
     56 return:
     57   ret void
     58 }
     59 
     60 declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
     61 
     62 declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
     63