Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.6.6 -mattr=+sse4.1 | FileCheck %s
      3 
      4 %0 = type { double }
      5 %union.anon = type { float }
      6 
      7 define i32 @double_signbit(double %d1) nounwind uwtable readnone ssp {
      8 ; CHECK-LABEL: double_signbit:
      9 ; CHECK:       ## %bb.0: ## %entry
     10 ; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
     11 ; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
     12 ; CHECK-NEXT:    movmskpd %xmm0, %eax
     13 ; CHECK-NEXT:    andl $1, %eax
     14 ; CHECK-NEXT:    retq
     15 entry:
     16   %__x.addr.i = alloca double, align 8
     17   %__u.i = alloca %0, align 8
     18   %0 = bitcast double* %__x.addr.i to i8*
     19   %1 = bitcast %0* %__u.i to i8*
     20   store double %d1, double* %__x.addr.i, align 8
     21   %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
     22   store double %d1, double* %__f.i, align 8
     23   %tmp = bitcast double %d1 to i64
     24   %tmp1 = lshr i64 %tmp, 63
     25   %shr.i = trunc i64 %tmp1 to i32
     26   ret i32 %shr.i
     27 }
     28 
     29 define i32 @double_add_signbit(double %d1, double %d2) nounwind uwtable readnone ssp {
     30 ; CHECK-LABEL: double_add_signbit:
     31 ; CHECK:       ## %bb.0: ## %entry
     32 ; CHECK-NEXT:    addsd %xmm1, %xmm0
     33 ; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
     34 ; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
     35 ; CHECK-NEXT:    movmskpd %xmm0, %eax
     36 ; CHECK-NEXT:    andl $1, %eax
     37 ; CHECK-NEXT:    retq
     38 entry:
     39   %__x.addr.i = alloca double, align 8
     40   %__u.i = alloca %0, align 8
     41   %add = fadd double %d1, %d2
     42   %0 = bitcast double* %__x.addr.i to i8*
     43   %1 = bitcast %0* %__u.i to i8*
     44   store double %add, double* %__x.addr.i, align 8
     45   %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
     46   store double %add, double* %__f.i, align 8
     47   %tmp = bitcast double %add to i64
     48   %tmp1 = lshr i64 %tmp, 63
     49   %shr.i = trunc i64 %tmp1 to i32
     50   ret i32 %shr.i
     51 }
     52 
     53 define i32 @float_signbit(float %f1) nounwind uwtable readnone ssp {
     54 ; CHECK-LABEL: float_signbit:
     55 ; CHECK:       ## %bb.0: ## %entry
     56 ; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
     57 ; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
     58 ; CHECK-NEXT:    movmskps %xmm0, %eax
     59 ; CHECK-NEXT:    andl $1, %eax
     60 ; CHECK-NEXT:    retq
     61 entry:
     62   %__x.addr.i = alloca float, align 4
     63   %__u.i = alloca %union.anon, align 4
     64   %0 = bitcast float* %__x.addr.i to i8*
     65   %1 = bitcast %union.anon* %__u.i to i8*
     66   store float %f1, float* %__x.addr.i, align 4
     67   %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
     68   store float %f1, float* %__f.i, align 4
     69   %2 = bitcast float %f1 to i32
     70   %shr.i = lshr i32 %2, 31
     71   ret i32 %shr.i
     72 }
     73 
     74 define i32 @float_add_signbit(float %f1, float %f2) nounwind uwtable readnone ssp {
     75 ; CHECK-LABEL: float_add_signbit:
     76 ; CHECK:       ## %bb.0: ## %entry
     77 ; CHECK-NEXT:    addss %xmm1, %xmm0
     78 ; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
     79 ; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
     80 ; CHECK-NEXT:    movmskps %xmm0, %eax
     81 ; CHECK-NEXT:    andl $1, %eax
     82 ; CHECK-NEXT:    retq
     83 entry:
     84   %__x.addr.i = alloca float, align 4
     85   %__u.i = alloca %union.anon, align 4
     86   %add = fadd float %f1, %f2
     87   %0 = bitcast float* %__x.addr.i to i8*
     88   %1 = bitcast %union.anon* %__u.i to i8*
     89   store float %add, float* %__x.addr.i, align 4
     90   %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
     91   store float %add, float* %__f.i, align 4
     92   %2 = bitcast float %add to i32
     93   %shr.i = lshr i32 %2, 31
     94   ret i32 %shr.i
     95 }
     96 
     97 ; PR11570
     98 ; FIXME: This should also use movmskps; we don't form the FGETSIGN node
     99 ; in this case, though.
    100 define void @float_call_signbit(double %n) {
    101 ; CHECK-LABEL: float_call_signbit:
    102 ; CHECK:       ## %bb.0: ## %entry
    103 ; CHECK-NEXT:    movq %xmm0, %rdi
    104 ; CHECK-NEXT:    shrq $63, %rdi
    105 ; CHECK-NEXT:    ## kill: def $edi killed $edi killed $rdi
    106 ; CHECK-NEXT:    jmp _float_call_signbit_callee ## TAILCALL
    107 entry:
    108   %t0 = bitcast double %n to i64
    109   %tobool.i.i.i.i = icmp slt i64 %t0, 0
    110   tail call void @float_call_signbit_callee(i1 zeroext %tobool.i.i.i.i)
    111   ret void
    112 }
    113 declare void @float_call_signbit_callee(i1 zeroext)
    114 
    115 
    116 ; rdar://10247336
    117 ; movmskp{s|d} only set low 4/2 bits, high bits are known zero
    118 
    119 define i32 @t1(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
    120 ; CHECK-LABEL: t1:
    121 ; CHECK:       ## %bb.0: ## %entry
    122 ; CHECK-NEXT:    movmskps %xmm0, %eax
    123 ; CHECK-NEXT:    movl (%rdi,%rax,4), %eax
    124 ; CHECK-NEXT:    retq
    125 entry:
    126   %0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
    127   %idxprom = sext i32 %0 to i64
    128   %arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
    129   %1 = load i32, i32* %arrayidx, align 4
    130   ret i32 %1
    131 }
    132 
    133 define i32 @t2(<4 x float> %x, i32* nocapture %indexTable) nounwind uwtable readonly ssp {
    134 ; CHECK-LABEL: t2:
    135 ; CHECK:       ## %bb.0: ## %entry
    136 ; CHECK-NEXT:    movmskpd %xmm0, %eax
    137 ; CHECK-NEXT:    movl (%rdi,%rax,4), %eax
    138 ; CHECK-NEXT:    retq
    139 entry:
    140   %0 = bitcast <4 x float> %x to <2 x double>
    141   %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
    142   %idxprom = sext i32 %1 to i64
    143   %arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
    144   %2 = load i32, i32* %arrayidx, align 4
    145   ret i32 %2
    146 }
    147 
    148 declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
    149 declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
    150