Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
      3 
      4 define i32 @test_add_1_cmov_slt(i64* %p, i32 %a0, i32 %a1) #0 {
      5 ; CHECK-LABEL: test_add_1_cmov_slt:
      6 ; CHECK:       # BB#0: # %entry
      7 ; CHECK-NEXT:    lock incq (%rdi)
      8 ; CHECK-NEXT:    cmovgl %edx, %esi
      9 ; CHECK-NEXT:    movl %esi, %eax
     10 ; CHECK-NEXT:    retq
     11 entry:
     12   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
     13   %tmp1 = icmp slt i64 %tmp0, 0
     14   %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
     15   ret i32 %tmp2
     16 }
     17 
     18 define i32 @test_add_1_cmov_sge(i64* %p, i32 %a0, i32 %a1) #0 {
     19 ; CHECK-LABEL: test_add_1_cmov_sge:
     20 ; CHECK:       # BB#0: # %entry
     21 ; CHECK-NEXT:    lock incq (%rdi)
     22 ; CHECK-NEXT:    cmovlel %edx, %esi
     23 ; CHECK-NEXT:    movl %esi, %eax
     24 ; CHECK-NEXT:    retq
     25 entry:
     26   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
     27   %tmp1 = icmp sge i64 %tmp0, 0
     28   %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
     29   ret i32 %tmp2
     30 }
     31 
     32 define i32 @test_sub_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
     33 ; CHECK-LABEL: test_sub_1_cmov_sle:
     34 ; CHECK:       # BB#0: # %entry
     35 ; CHECK-NEXT:    lock decq (%rdi)
     36 ; CHECK-NEXT:    cmovgel %edx, %esi
     37 ; CHECK-NEXT:    movl %esi, %eax
     38 ; CHECK-NEXT:    retq
     39 entry:
     40   %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
     41   %tmp1 = icmp sle i64 %tmp0, 0
     42   %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
     43   ret i32 %tmp2
     44 }
     45 
     46 define i32 @test_sub_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
     47 ; CHECK-LABEL: test_sub_1_cmov_sgt:
     48 ; CHECK:       # BB#0: # %entry
     49 ; CHECK-NEXT:    lock decq (%rdi)
     50 ; CHECK-NEXT:    cmovll %edx, %esi
     51 ; CHECK-NEXT:    movl %esi, %eax
     52 ; CHECK-NEXT:    retq
     53 entry:
     54   %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
     55   %tmp1 = icmp sgt i64 %tmp0, 0
     56   %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
     57   ret i32 %tmp2
     58 }
     59 
     60 ; FIXME: (setcc slt x, 0) gets combined into shr early.
     61 define i8 @test_add_1_setcc_slt(i64* %p) #0 {
     62 ; CHECK-LABEL: test_add_1_setcc_slt:
     63 ; CHECK:       # BB#0: # %entry
     64 ; CHECK-NEXT:    movl $1, %eax
     65 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
     66 ; CHECK-NEXT:    shrq $63, %rax
     67 ; CHECK-NEXT:    # kill: %AL<def> %AL<kill> %RAX<kill>
     68 ; CHECK-NEXT:    retq
     69 entry:
     70   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
     71   %tmp1 = icmp slt i64 %tmp0, 0
     72   %tmp2 = zext i1 %tmp1 to i8
     73   ret i8 %tmp2
     74 }
     75 
     76 define i8 @test_sub_1_setcc_sgt(i64* %p) #0 {
     77 ; CHECK-LABEL: test_sub_1_setcc_sgt:
     78 ; CHECK:       # BB#0: # %entry
     79 ; CHECK-NEXT:    lock decq (%rdi)
     80 ; CHECK-NEXT:    setge %al
     81 ; CHECK-NEXT:    retq
     82 entry:
     83   %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
     84   %tmp1 = icmp sgt i64 %tmp0, 0
     85   %tmp2 = zext i1 %tmp1 to i8
     86   ret i8 %tmp2
     87 }
     88 
     89 define i32 @test_add_1_brcond_sge(i64* %p, i32 %a0, i32 %a1) #0 {
     90 ; CHECK-LABEL: test_add_1_brcond_sge:
     91 ; CHECK:       # BB#0: # %entry
     92 ; CHECK-NEXT:    lock incq (%rdi)
     93 ; CHECK-NEXT:    jle .LBB6_2
     94 ; CHECK-NEXT:  # BB#1: # %t
     95 ; CHECK-NEXT:    movl %esi, %eax
     96 ; CHECK-NEXT:    retq
     97 ; CHECK-NEXT:  .LBB6_2: # %f
     98 ; CHECK-NEXT:    movl %edx, %eax
     99 ; CHECK-NEXT:    retq
    100 entry:
    101   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
    102   %tmp1 = icmp sge i64 %tmp0, 0
    103   br i1 %tmp1, label %t, label %f
    104 t:
    105   ret i32 %a0
    106 f:
    107   ret i32 %a1
    108 }
    109 
    110 ; Also make sure we don't muck with condition codes that we should ignore.
    111 ; No need to test unsigned comparisons, as they should all be simplified.
    112 
    113 define i32 @test_add_1_cmov_sle(i64* %p, i32 %a0, i32 %a1) #0 {
    114 ; CHECK-LABEL: test_add_1_cmov_sle:
    115 ; CHECK:       # BB#0: # %entry
    116 ; CHECK-NEXT:    movl $1, %eax
    117 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
    118 ; CHECK-NEXT:    testq %rax, %rax
    119 ; CHECK-NEXT:    cmovgl %edx, %esi
    120 ; CHECK-NEXT:    movl %esi, %eax
    121 ; CHECK-NEXT:    retq
    122 entry:
    123   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
    124   %tmp1 = icmp sle i64 %tmp0, 0
    125   %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
    126   ret i32 %tmp2
    127 }
    128 
    129 define i32 @test_add_1_cmov_sgt(i64* %p, i32 %a0, i32 %a1) #0 {
    130 ; CHECK-LABEL: test_add_1_cmov_sgt:
    131 ; CHECK:       # BB#0: # %entry
    132 ; CHECK-NEXT:    movl $1, %eax
    133 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
    134 ; CHECK-NEXT:    testq %rax, %rax
    135 ; CHECK-NEXT:    cmovlel %edx, %esi
    136 ; CHECK-NEXT:    movl %esi, %eax
    137 ; CHECK-NEXT:    retq
    138 entry:
    139   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
    140   %tmp1 = icmp sgt i64 %tmp0, 0
    141   %tmp2 = select i1 %tmp1, i32 %a0, i32 %a1
    142   ret i32 %tmp2
    143 }
    144 
    145 ; Test a result being used by more than just the comparison.
    146 
    147 define i8 @test_add_1_setcc_sgt_reuse(i64* %p, i64* %p2) #0 {
    148 ; CHECK-LABEL: test_add_1_setcc_sgt_reuse:
    149 ; CHECK:       # BB#0: # %entry
    150 ; CHECK-NEXT:    movl $1, %ecx
    151 ; CHECK-NEXT:    lock xaddq %rcx, (%rdi)
    152 ; CHECK-NEXT:    testq %rcx, %rcx
    153 ; CHECK-NEXT:    setg %al
    154 ; CHECK-NEXT:    movq %rcx, (%rsi)
    155 ; CHECK-NEXT:    retq
    156 entry:
    157   %tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
    158   %tmp1 = icmp sgt i64 %tmp0, 0
    159   %tmp2 = zext i1 %tmp1 to i8
    160   store i64 %tmp0, i64* %p2
    161   ret i8 %tmp2
    162 }
    163 
    164 define i8 @test_sub_2_setcc_sgt(i64* %p) #0 {
    165 ; CHECK-LABEL: test_sub_2_setcc_sgt:
    166 ; CHECK:       # BB#0: # %entry
    167 ; CHECK-NEXT:    movq $-2, %rax
    168 ; CHECK-NEXT:    lock xaddq %rax, (%rdi)
    169 ; CHECK-NEXT:    testq %rax, %rax
    170 ; CHECK-NEXT:    setg %al
    171 ; CHECK-NEXT:    retq
    172 entry:
    173   %tmp0 = atomicrmw sub i64* %p, i64 2 seq_cst
    174   %tmp1 = icmp sgt i64 %tmp0, 0
    175   %tmp2 = zext i1 %tmp1 to i8
    176   ret i8 %tmp2
    177 }
    178 
    179 attributes #0 = { nounwind }
    180