Home | History | Annotate | Download | only in AArch64
      1 ; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
      2 
      3 @var32 = global i32 0
      4 @var64 = global i64 0
      5 
      6 define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
      7 ; CHECK-LABEL: test_lsl_arith:
      8 
      9   %rhs1 = load volatile i32, i32* @var32
     10   %shift1 = shl i32 %rhs1, 18
     11   %val1 = add i32 %lhs32, %shift1
     12   store volatile i32 %val1, i32* @var32
     13 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
     14 
     15   %rhs2 = load volatile i32, i32* @var32
     16   %shift2 = shl i32 %rhs2, 31
     17   %val2 = add i32 %shift2, %lhs32
     18   store volatile i32 %val2, i32* @var32
     19 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
     20 
     21   %rhs3 = load volatile i32, i32* @var32
     22   %shift3 = shl i32 %rhs3, 5
     23   %val3 = sub i32 %lhs32, %shift3
     24   store volatile i32 %val3, i32* @var32
     25 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
     26 
     27 ; Subtraction is not commutative!
     28   %rhs4 = load volatile i32, i32* @var32
     29   %shift4 = shl i32 %rhs4, 19
     30   %val4 = sub i32 %shift4, %lhs32
     31   store volatile i32 %val4, i32* @var32
     32 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
     33 
     34   %lhs4a = load volatile i32, i32* @var32
     35   %shift4a = shl i32 %lhs4a, 15
     36   %val4a = sub i32 0, %shift4a
     37   store volatile i32 %val4a, i32* @var32
     38 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
     39 
     40   %rhs5 = load volatile i64, i64* @var64
     41   %shift5 = shl i64 %rhs5, 18
     42   %val5 = add i64 %lhs64, %shift5
     43   store volatile i64 %val5, i64* @var64
     44 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
     45 
     46   %rhs6 = load volatile i64, i64* @var64
     47   %shift6 = shl i64 %rhs6, 31
     48   %val6 = add i64 %shift6, %lhs64
     49   store volatile i64 %val6, i64* @var64
     50 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
     51 
     52   %rhs7 = load volatile i64, i64* @var64
     53   %shift7 = shl i64 %rhs7, 5
     54   %val7 = sub i64 %lhs64, %shift7
     55   store volatile i64 %val7, i64* @var64
     56 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
     57 
     58 ; Subtraction is not commutative!
     59   %rhs8 = load volatile i64, i64* @var64
     60   %shift8 = shl i64 %rhs8, 19
     61   %val8 = sub i64 %shift8, %lhs64
     62   store volatile i64 %val8, i64* @var64
     63 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
     64 
     65   %lhs8a = load volatile i64, i64* @var64
     66   %shift8a = shl i64 %lhs8a, 60
     67   %val8a = sub i64 0, %shift8a
     68   store volatile i64 %val8a, i64* @var64
     69 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
     70 
     71   ret void
     72 ; CHECK: ret
     73 }
     74 
     75 define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
     76 ; CHECK-LABEL: test_lsr_arith:
     77 
     78   %shift1 = lshr i32 %rhs32, 18
     79   %val1 = add i32 %lhs32, %shift1
     80   store volatile i32 %val1, i32* @var32
     81 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
     82 
     83   %shift2 = lshr i32 %rhs32, 31
     84   %val2 = add i32 %shift2, %lhs32
     85   store volatile i32 %val2, i32* @var32
     86 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
     87 
     88   %shift3 = lshr i32 %rhs32, 5
     89   %val3 = sub i32 %lhs32, %shift3
     90   store volatile i32 %val3, i32* @var32
     91 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
     92 
     93 ; Subtraction is not commutative!
     94   %shift4 = lshr i32 %rhs32, 19
     95   %val4 = sub i32 %shift4, %lhs32
     96   store volatile i32 %val4, i32* @var32
     97 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
     98 
     99   %shift4a = lshr i32 %lhs32, 15
    100   %val4a = sub i32 0, %shift4a
    101   store volatile i32 %val4a, i32* @var32
    102 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
    103 
    104   %shift5 = lshr i64 %rhs64, 18
    105   %val5 = add i64 %lhs64, %shift5
    106   store volatile i64 %val5, i64* @var64
    107 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
    108 
    109   %shift6 = lshr i64 %rhs64, 31
    110   %val6 = add i64 %shift6, %lhs64
    111   store volatile i64 %val6, i64* @var64
    112 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
    113 
    114   %shift7 = lshr i64 %rhs64, 5
    115   %val7 = sub i64 %lhs64, %shift7
    116   store volatile i64 %val7, i64* @var64
    117 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
    118 
    119 ; Subtraction is not commutative!
    120   %shift8 = lshr i64 %rhs64, 19
    121   %val8 = sub i64 %shift8, %lhs64
    122   store volatile i64 %val8, i64* @var64
    123 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
    124 
    125   %shift8a = lshr i64 %lhs64, 45
    126   %val8a = sub i64 0, %shift8a
    127   store volatile i64 %val8a, i64* @var64
    128 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
    129 
    130   ret void
    131 ; CHECK: ret
    132 }
    133 
    134 define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
    135 ; CHECK-LABEL: test_asr_arith:
    136 
    137   %shift1 = ashr i32 %rhs32, 18
    138   %val1 = add i32 %lhs32, %shift1
    139   store volatile i32 %val1, i32* @var32
    140 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
    141 
    142   %shift2 = ashr i32 %rhs32, 31
    143   %val2 = add i32 %shift2, %lhs32
    144   store volatile i32 %val2, i32* @var32
    145 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
    146 
    147   %shift3 = ashr i32 %rhs32, 5
    148   %val3 = sub i32 %lhs32, %shift3
    149   store volatile i32 %val3, i32* @var32
    150 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
    151 
    152 ; Subtraction is not commutative!
    153   %shift4 = ashr i32 %rhs32, 19
    154   %val4 = sub i32 %shift4, %lhs32
    155   store volatile i32 %val4, i32* @var32
    156 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
    157 
    158   %shift4a = ashr i32 %lhs32, 15
    159   %val4a = sub i32 0, %shift4a
    160   store volatile i32 %val4a, i32* @var32
    161 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
    162 
    163   %shift5 = ashr i64 %rhs64, 18
    164   %val5 = add i64 %lhs64, %shift5
    165   store volatile i64 %val5, i64* @var64
    166 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
    167 
    168   %shift6 = ashr i64 %rhs64, 31
    169   %val6 = add i64 %shift6, %lhs64
    170   store volatile i64 %val6, i64* @var64
    171 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
    172 
    173   %shift7 = ashr i64 %rhs64, 5
    174   %val7 = sub i64 %lhs64, %shift7
    175   store volatile i64 %val7, i64* @var64
    176 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
    177 
    178 ; Subtraction is not commutative!
    179   %shift8 = ashr i64 %rhs64, 19
    180   %val8 = sub i64 %shift8, %lhs64
    181   store volatile i64 %val8, i64* @var64
    182 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
    183 
    184   %shift8a = ashr i64 %lhs64, 45
    185   %val8a = sub i64 0, %shift8a
    186   store volatile i64 %val8a, i64* @var64
    187 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
    188 
    189   ret void
    190 ; CHECK: ret
    191 }
    192 
    193 define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) {
    194 ; CHECK-LABEL: test_cmp:
    195 
    196   %shift1 = shl i32 %rhs32, 13
    197   %tst1 = icmp uge i32 %lhs32, %shift1
    198   br i1 %tst1, label %t2, label %end
    199 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
    200 
    201 t2:
    202   store volatile i32 %v, i32* @var32
    203   %shift2 = lshr i32 %rhs32, 20
    204   %tst2 = icmp ne i32 %lhs32, %shift2
    205   br i1 %tst2, label %t3, label %end
    206 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
    207 
    208 t3:
    209   store volatile i32 %v, i32* @var32
    210   %shift3 = ashr i32 %rhs32, 9
    211   %tst3 = icmp ne i32 %lhs32, %shift3
    212   br i1 %tst3, label %t4, label %end
    213 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
    214 
    215 t4:
    216   store volatile i32 %v, i32* @var32
    217   %shift4 = shl i64 %rhs64, 43
    218   %tst4 = icmp uge i64 %lhs64, %shift4
    219   br i1 %tst4, label %t5, label %end
    220 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
    221 
    222 t5:
    223   store volatile i32 %v, i32* @var32
    224   %shift5 = lshr i64 %rhs64, 20
    225   %tst5 = icmp ne i64 %lhs64, %shift5
    226   br i1 %tst5, label %t6, label %end
    227 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
    228 
    229 t6:
    230   store volatile i32 %v, i32* @var32
    231   %shift6 = ashr i64 %rhs64, 59
    232   %tst6 = icmp ne i64 %lhs64, %shift6
    233   br i1 %tst6, label %t7, label %end
    234 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
    235 
    236 t7:
    237   store volatile i32 %v, i32* @var32
    238   br label %end
    239 
    240 end:
    241   ret void
    242 ; CHECK: ret
    243 }
    244 
    245 define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
    246 ; CHECK-LABEL: test_cmn:
    247 
    248   %shift1 = shl i32 %rhs32, 13
    249   %val1 = sub i32 0, %shift1
    250   %tst1 = icmp uge i32 %lhs32, %val1
    251   br i1 %tst1, label %t2, label %end
    252   ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
    253   ; 0 then the results will differ.
    254 ; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
    255 ; CHECK: cmp {{w[0-9]+}}, [[RHS]]
    256 
    257 t2:
    258   %shift2 = lshr i32 %rhs32, 20
    259   %val2 = sub i32 0, %shift2
    260   %tst2 = icmp ne i32 %lhs32, %val2
    261   br i1 %tst2, label %t3, label %end
    262 ; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
    263 
    264 t3:
    265   %shift3 = ashr i32 %rhs32, 9
    266   %val3 = sub i32 0, %shift3
    267   %tst3 = icmp eq i32 %lhs32, %val3
    268   br i1 %tst3, label %t4, label %end
    269 ; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
    270 
    271 t4:
    272   %shift4 = shl i64 %rhs64, 43
    273   %val4 = sub i64 0, %shift4
    274   %tst4 = icmp slt i64 %lhs64, %val4
    275   br i1 %tst4, label %t5, label %end
    276   ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
    277 ; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
    278 ; CHECK: cmp {{x[0-9]+}}, [[RHS]]
    279 
    280 t5:
    281   %shift5 = lshr i64 %rhs64, 20
    282   %val5 = sub i64 0, %shift5
    283   %tst5 = icmp ne i64 %lhs64, %val5
    284   br i1 %tst5, label %t6, label %end
    285 ; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
    286 
    287 t6:
    288   %shift6 = ashr i64 %rhs64, 59
    289   %val6 = sub i64 0, %shift6
    290   %tst6 = icmp ne i64 %lhs64, %val6
    291   br i1 %tst6, label %t7, label %end
    292 ; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
    293 
    294 t7:
    295   ret i32 1
    296 end:
    297 
    298   ret i32 0
    299 ; CHECK: ret
    300 }
    301 
    302