Home | History | Annotate | Download | only in AArch64
      1 ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
      2 
      3 define i8 @test_64bit_add(i16* %a, i64 %b) {
      4 ; CHECK-LABEL: test_64bit_add:
      5 ; CHECK: lsl [[REG:x[0-9]+]], x1, #1
      6 ; CHECK: ldrb w0, [x0, [[REG]]]
      7 ; CHECK: ret
      8   %tmp1 = getelementptr inbounds i16, i16* %a, i64 %b
      9   %tmp2 = load i16, i16* %tmp1
     10   %tmp3 = trunc i16 %tmp2 to i8
     11   ret i8 %tmp3
     12 }
     13 
     14 ; These tests are trying to form SEXT and ZEXT operations that never leave i64
     15 ; space, to make sure LLVM can adapt the offset register correctly.
     16 define void @ldst_8bit(i8* %base, i64 %offset) minsize {
     17 ; CHECK-LABEL: ldst_8bit:
     18 
     19    %off32.sext.tmp = shl i64 %offset, 32
     20    %off32.sext = ashr i64 %off32.sext.tmp, 32
     21    %addr8_sxtw = getelementptr i8, i8* %base, i64 %off32.sext
     22    %val8_sxtw = load volatile i8, i8* %addr8_sxtw
     23    %val32_signed = sext i8 %val8_sxtw to i32
     24    store volatile i32 %val32_signed, i32* @var_32bit
     25 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
     26 
     27   %addrint_uxtw = ptrtoint i8* %base to i64
     28   %offset_uxtw = and i64 %offset, 4294967295
     29   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
     30   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
     31   %val8_uxtw = load volatile i8, i8* %addr_uxtw
     32   %newval8 = add i8 %val8_uxtw, 1
     33   store volatile i8 %newval8, i8* @var_8bit
     34 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
     35 
     36    ret void
     37 }
     38 
     39 
     40 define void @ldst_16bit(i16* %base, i64 %offset) minsize {
     41 ; CHECK-LABEL: ldst_16bit:
     42 
     43   %addrint_uxtw = ptrtoint i16* %base to i64
     44   %offset_uxtw = and i64 %offset, 4294967295
     45   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
     46   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
     47   %val8_uxtw = load volatile i16, i16* %addr_uxtw
     48   %newval8 = add i16 %val8_uxtw, 1
     49   store volatile i16 %newval8, i16* @var_16bit
     50 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
     51 
     52   %base_sxtw = ptrtoint i16* %base to i64
     53   %offset_sxtw.tmp = shl i64 %offset, 32
     54   %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
     55   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
     56   %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
     57   %val16_sxtw = load volatile i16, i16* %addr_sxtw
     58   %val64_signed = sext i16 %val16_sxtw to i64
     59   store volatile i64 %val64_signed, i64* @var_64bit
     60 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
     61 
     62 
     63   %base_uxtwN = ptrtoint i16* %base to i64
     64   %offset_uxtwN = and i64 %offset, 4294967295
     65   %offset2_uxtwN = shl i64 %offset_uxtwN, 1
     66   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
     67   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
     68   %val32 = load volatile i32, i32* @var_32bit
     69   %val16_trunc32 = trunc i32 %val32 to i16
     70   store volatile i16 %val16_trunc32, i16* %addr_uxtwN
     71 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
     72    ret void
     73 }
     74 
     75 define void @ldst_32bit(i32* %base, i64 %offset) minsize {
     76 ; CHECK-LABEL: ldst_32bit:
     77 
     78   %addrint_uxtw = ptrtoint i32* %base to i64
     79   %offset_uxtw = and i64 %offset, 4294967295
     80   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
     81   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
     82   %val32_uxtw = load volatile i32, i32* %addr_uxtw
     83   %newval32 = add i32 %val32_uxtw, 1
     84   store volatile i32 %newval32, i32* @var_32bit
     85 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
     86 
     87   %base_sxtw = ptrtoint i32* %base to i64
     88   %offset_sxtw.tmp = shl i64 %offset, 32
     89   %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
     90   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
     91   %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
     92   %val32_sxtw = load volatile i32, i32* %addr_sxtw
     93   %val64_signed = sext i32 %val32_sxtw to i64
     94   store volatile i64 %val64_signed, i64* @var_64bit
     95 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
     96 
     97 
     98   %base_uxtwN = ptrtoint i32* %base to i64
     99   %offset_uxtwN = and i64 %offset, 4294967295
    100   %offset2_uxtwN = shl i64 %offset_uxtwN, 2
    101   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
    102   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
    103   %val32 = load volatile i32, i32* @var_32bit
    104   store volatile i32 %val32, i32* %addr_uxtwN
    105 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
    106    ret void
    107 }
    108 
    109 define void @ldst_64bit(i64* %base, i64 %offset) minsize {
    110 ; CHECK-LABEL: ldst_64bit:
    111 
    112   %addrint_uxtw = ptrtoint i64* %base to i64
    113   %offset_uxtw = and i64 %offset, 4294967295
    114   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
    115   %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
    116   %val64_uxtw = load volatile i64, i64* %addr_uxtw
    117   %newval8 = add i64 %val64_uxtw, 1
    118   store volatile i64 %newval8, i64* @var_64bit
    119 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
    120 
    121   %base_sxtw = ptrtoint i64* %base to i64
    122   %offset_sxtw.tmp = shl i64 %offset, 32
    123   %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
    124   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
    125   %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
    126   %val64_sxtw = load volatile i64, i64* %addr_sxtw
    127   store volatile i64 %val64_sxtw, i64* @var_64bit
    128 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
    129 
    130 
    131   %base_uxtwN = ptrtoint i64* %base to i64
    132   %offset_uxtwN = and i64 %offset, 4294967295
    133   %offset2_uxtwN = shl i64 %offset_uxtwN, 3
    134   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
    135   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
    136   %val64 = load volatile i64, i64* @var_64bit
    137   store volatile i64 %val64, i64* %addr_uxtwN
    138 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
    139    ret void
    140 }
    141 
    142 @var_8bit = global i8 0
    143 @var_16bit = global i16 0
    144 @var_32bit = global i32 0
    145 @var_64bit = global i64 0
    146