Home | History | Annotate | Download | only in AArch64
      1 
      2 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
      3 
      4 @var32 = global i32 0
      5 @var64 = global i64 0
      6 
      7 define void @test_extendb(i8 %var) {
      8 ; CHECK-LABEL: test_extendb:
      9 
     10   %sxt32 = sext i8 %var to i32
     11   store volatile i32 %sxt32, i32* @var32
     12 ; CHECK: sxtb {{w[0-9]+}}, {{w[0-9]+}}
     13 
     14   %sxt64 = sext i8 %var to i64
     15   store volatile i64 %sxt64, i64* @var64
     16 ; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
     17 
     18 ; N.b. this doesn't actually produce a bitfield instruction at the
     19 ; moment, but it's still a good test to have and the semantics are
     20 ; correct.
     21   %uxt32 = zext i8 %var to i32
     22   store volatile i32 %uxt32, i32* @var32
     23 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xff
     24 
     25   %uxt64 = zext i8 %var to i64
     26   store volatile i64 %uxt64, i64* @var64
     27 ; CHECK: uxtb {{x[0-9]+}}, {{w[0-9]+}}
     28   ret void
     29 }
     30 
     31 define void @test_extendh(i16 %var) {
     32 ; CHECK-LABEL: test_extendh:
     33 
     34   %sxt32 = sext i16 %var to i32
     35   store volatile i32 %sxt32, i32* @var32
     36 ; CHECK: sxth {{w[0-9]+}}, {{w[0-9]+}}
     37 
     38   %sxt64 = sext i16 %var to i64
     39   store volatile i64 %sxt64, i64* @var64
     40 ; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
     41 
     42 ; N.b. this doesn't actually produce a bitfield instruction at the
     43 ; moment, but it's still a good test to have and the semantics are
     44 ; correct.
     45   %uxt32 = zext i16 %var to i32
     46   store volatile i32 %uxt32, i32* @var32
     47 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xffff
     48 
     49   %uxt64 = zext i16 %var to i64
     50   store volatile i64 %uxt64, i64* @var64
     51 ; CHECK: uxth {{x[0-9]+}}, {{w[0-9]+}}
     52   ret void
     53 }
     54 
     55 define void @test_extendw(i32 %var) {
     56 ; CHECK-LABEL: test_extendw:
     57 
     58   %sxt64 = sext i32 %var to i64
     59   store volatile i64 %sxt64, i64* @var64
     60 ; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
     61 
     62   %uxt64 = zext i32 %var to i64
     63   store volatile i64 %uxt64, i64* @var64
     64 ; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #0, #32
     65   ret void
     66 }
     67 
     68 define void @test_shifts(i32 %val32, i64 %val64) {
     69 ; CHECK-LABEL: test_shifts:
     70 
     71   %shift1 = ashr i32 %val32, 31
     72   store volatile i32 %shift1, i32* @var32
     73 ; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, #31
     74 
     75   %shift2 = lshr i32 %val32, 8
     76   store volatile i32 %shift2, i32* @var32
     77 ; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #8
     78 
     79   %shift3 = shl i32 %val32, 1
     80   store volatile i32 %shift3, i32* @var32
     81 ; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #1
     82 
     83   %shift4 = ashr i64 %val64, 31
     84   store volatile i64 %shift4, i64* @var64
     85 ; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #31
     86 
     87   %shift5 = lshr i64 %val64, 8
     88   store volatile i64 %shift5, i64* @var64
     89 ; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #8
     90 
     91   %shift6 = shl i64 %val64, 63
     92   store volatile i64 %shift6, i64* @var64
     93 ; CHECK: lsl {{x[0-9]+}}, {{x[0-9]+}}, #63
     94 
     95   %shift7 = ashr i64 %val64, 63
     96   store volatile i64 %shift7, i64* @var64
     97 ; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #63
     98 
     99   %shift8 = lshr i64 %val64, 63
    100   store volatile i64 %shift8, i64* @var64
    101 ; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #63
    102 
    103   %shift9 = lshr i32 %val32, 31
    104   store volatile i32 %shift9, i32* @var32
    105 ; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #31
    106 
    107   %shift10 = shl i32 %val32, 31
    108   store volatile i32 %shift10, i32* @var32
    109 ; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #31
    110 
    111   ret void
    112 }
    113 
    114 ; LLVM can produce in-register extensions taking place entirely with
    115 ; 64-bit registers too.
    116 define void @test_sext_inreg_64(i64 %in) {
    117 ; CHECK-LABEL: test_sext_inreg_64:
    118 
    119 ; i1 doesn't have an official alias, but crops up and is handled by
    120 ; the bitfield ops.
    121   %trunc_i1 = trunc i64 %in to i1
    122   %sext_i1 = sext i1 %trunc_i1 to i64
    123   store volatile i64 %sext_i1, i64* @var64
    124 ; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
    125 
    126   %trunc_i8 = trunc i64 %in to i8
    127   %sext_i8 = sext i8 %trunc_i8 to i64
    128   store volatile i64 %sext_i8, i64* @var64
    129 ; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
    130 
    131   %trunc_i16 = trunc i64 %in to i16
    132   %sext_i16 = sext i16 %trunc_i16 to i64
    133   store volatile i64 %sext_i16, i64* @var64
    134 ; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
    135 
    136   %trunc_i32 = trunc i64 %in to i32
    137   %sext_i32 = sext i32 %trunc_i32 to i64
    138   store volatile i64 %sext_i32, i64* @var64
    139 ; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
    140   ret void
    141 }
    142 
    143 ; These instructions don't actually select to official bitfield
    144 ; operations, but it's important that we select them somehow:
    145 define void @test_zext_inreg_64(i64 %in) {
    146 ; CHECK-LABEL: test_zext_inreg_64:
    147 
    148   %trunc_i8 = trunc i64 %in to i8
    149   %zext_i8 = zext i8 %trunc_i8 to i64
    150   store volatile i64 %zext_i8, i64* @var64
    151 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
    152 
    153   %trunc_i16 = trunc i64 %in to i16
    154   %zext_i16 = zext i16 %trunc_i16 to i64
    155   store volatile i64 %zext_i16, i64* @var64
    156 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
    157 
    158   %trunc_i32 = trunc i64 %in to i32
    159   %zext_i32 = zext i32 %trunc_i32 to i64
    160   store volatile i64 %zext_i32, i64* @var64
    161 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffffffff
    162 
    163   ret void
    164 }
    165 
    166 define i64 @test_sext_inreg_from_32(i32 %in) {
    167 ; CHECK-LABEL: test_sext_inreg_from_32:
    168 
    169   %small = trunc i32 %in to i1
    170   %ext = sext i1 %small to i64
    171 
    172   ; Different registers are of course, possible, though suboptimal. This is
    173   ; making sure that a 64-bit "(sext_inreg (anyext GPR32), i1)" uses the 64-bit
    174   ; sbfx rather than just 32-bits.
    175 ; CHECK: sbfx x0, x0, #0, #1
    176   ret i64 %ext
    177 }
    178 
    179 
    180 define i32 @test_ubfx32(i32* %addr) {
    181 ; CHECK-LABEL: test_ubfx32:
    182 ; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
    183 
    184    %fields = load i32* %addr
    185    %shifted = lshr i32 %fields, 23
    186    %masked = and i32 %shifted, 7
    187    ret i32 %masked
    188 }
    189 
    190 define i64 @test_ubfx64(i64* %addr) {
    191 ; CHECK-LABEL: test_ubfx64:
    192 ; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
    193 
    194    %fields = load i64* %addr
    195    %shifted = lshr i64 %fields, 25
    196    %masked = and i64 %shifted, 1023
    197    ret i64 %masked
    198 }
    199 
    200 define i32 @test_sbfx32(i32* %addr) {
    201 ; CHECK-LABEL: test_sbfx32:
    202 ; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
    203 
    204    %fields = load i32* %addr
    205    %shifted = shl i32 %fields, 23
    206    %extended = ashr i32 %shifted, 29
    207    ret i32 %extended
    208 }
    209 
    210 define i64 @test_sbfx64(i64* %addr) {
    211 ; CHECK-LABEL: test_sbfx64:
    212 ; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
    213 
    214    %fields = load i64* %addr
    215    %shifted = shl i64 %fields, 1
    216    %extended = ashr i64 %shifted, 1
    217    ret i64 %extended
    218 }
    219