1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=i686-apple-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 3 ; RUN: llc < %s -mtriple=x86_64-apple-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 4 5 ; Verify that we are using the efficient uitofp --> sitofp lowering illustrated 6 ; by the compiler_rt implementation of __floatundisf. 7 ; <rdar://problem/8493982> 8 9 define float @test(i64 %a) nounwind { 10 ; X86-LABEL: test: 11 ; X86: # %bb.0: # %entry 12 ; X86-NEXT: pushl %ebp 13 ; X86-NEXT: movl %esp, %ebp 14 ; X86-NEXT: andl $-8, %esp 15 ; X86-NEXT: subl $16, %esp 16 ; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero 17 ; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) 18 ; X86-NEXT: xorl %eax, %eax 19 ; X86-NEXT: cmpl $0, 12(%ebp) 20 ; X86-NEXT: setns %al 21 ; X86-NEXT: fildll {{[0-9]+}}(%esp) 22 ; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) 23 ; X86-NEXT: fstps {{[0-9]+}}(%esp) 24 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero 25 ; X86-NEXT: movss %xmm0, (%esp) 26 ; X86-NEXT: flds (%esp) 27 ; X86-NEXT: movl %ebp, %esp 28 ; X86-NEXT: popl %ebp 29 ; X86-NEXT: retl 30 ; 31 ; X64-LABEL: test: 32 ; X64: # %bb.0: # %entry 33 ; X64-NEXT: testq %rdi, %rdi 34 ; X64-NEXT: js .LBB0_1 35 ; X64-NEXT: # %bb.2: # %entry 36 ; X64-NEXT: cvtsi2ssq %rdi, %xmm0 37 ; X64-NEXT: retq 38 ; X64-NEXT: .LBB0_1: 39 ; X64-NEXT: movq %rdi, %rax 40 ; X64-NEXT: shrq %rax 41 ; X64-NEXT: andl $1, %edi 42 ; X64-NEXT: orq %rax, %rdi 43 ; X64-NEXT: cvtsi2ssq %rdi, %xmm0 44 ; X64-NEXT: addss %xmm0, %xmm0 45 ; X64-NEXT: retq 46 entry: 47 %b = uitofp i64 %a to float 48 ret float %b 49 } 50