Home | History | Annotate | Download | only in PowerPC
      1 ; FIXME: FastISel currently returns false if it hits code that uses VSX
      2 ; registers and with -fast-isel-abort=1 turned on the test case will then fail.
      3 ; When fastisel better supports VSX fix up this test case.
      4 ;
      5 ; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel -fast-isel-abort=1 -mattr=-vsx -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64
      6 
      7 ; This test verifies that load/store instructions are properly generated,
      8 ; and that they pass MI verification.
      9 
     10 @a = global i8 1, align 1
     11 @b = global i16 2, align 2
     12 @c = global i32 4, align 4
     13 @d = global i64 8, align 8
     14 @e = global float 1.25, align 4
     15 @f = global double 3.5, align 8
     16 
     17 %struct.s = type<{ i8, i32 }>
     18 %struct.t = type<{ i8, i64 }>
     19 
     20 @g = global %struct.s <{ i8 1, i32 2 }>, align 1
     21 @h = global %struct.t <{ i8 1, i64 2 }>, align 1
     22 
     23 @i = common global [8192 x i64] zeroinitializer, align 8
     24 
     25 ; load
     26 
     27 define i8 @t1() nounwind {
     28 ; ELF64: t1
     29   %1 = load i8, i8* @a, align 1
     30 ; ELF64: lbz
     31   %2 = add nsw i8 %1, 1
     32 ; ELF64: addi
     33   ret i8 %2
     34 }
     35 
     36 define i16 @t2() nounwind {
     37 ; ELF64: t2
     38   %1 = load i16, i16* @b, align 2
     39 ; ELF64: lhz
     40   %2 = add nsw i16 %1, 1
     41 ; ELF64: addi
     42   ret i16 %2
     43 }
     44 
     45 define i32 @t3() nounwind {
     46 ; ELF64: t3
     47   %1 = load i32, i32* @c, align 4
     48 ; ELF64: lwz
     49   %2 = add nsw i32 %1, 1
     50 ; ELF64: addi
     51   ret i32 %2
     52 }
     53 
     54 define i64 @t4() nounwind {
     55 ; ELF64: t4
     56   %1 = load i64, i64* @d, align 4
     57 ; ELF64: ld
     58   %2 = add nsw i64 %1, 1
     59 ; ELF64: addi
     60   ret i64 %2
     61 }
     62 
     63 define float @t5() nounwind {
     64 ; ELF64: t5
     65   %1 = load float, float* @e, align 4
     66 ; ELF64: lfs
     67   %2 = fadd float %1, 1.0
     68 ; ELF64: fadds
     69   ret float %2
     70 }
     71 
     72 define double @t6() nounwind {
     73 ; ELF64: t6
     74   %1 = load double, double* @f, align 8
     75 ; ELF64: lfd
     76   %2 = fadd double %1, 1.0
     77 ; ELF64: fadd
     78   ret double %2
     79 }
     80 
     81 ; store
     82 
     83 define void @t7(i8 %v) nounwind {
     84 ; ELF64: t7
     85   %1 = add nsw i8 %v, 1
     86   store i8 %1, i8* @a, align 1
     87 ; ELF64: addis
     88 ; ELF64: addi
     89 ; ELF64: addi
     90 ; ELF64: stb
     91   ret void
     92 }
     93 
     94 define void @t8(i16 %v) nounwind {
     95 ; ELF64: t8
     96   %1 = add nsw i16 %v, 1
     97   store i16 %1, i16* @b, align 2
     98 ; ELF64: addis
     99 ; ELF64: addi
    100 ; ELF64: addi
    101 ; ELF64: sth
    102   ret void
    103 }
    104 
    105 define void @t9(i32 %v) nounwind {
    106 ; ELF64: t9
    107   %1 = add nsw i32 %v, 1
    108   store i32 %1, i32* @c, align 4
    109 ; ELF64: addis
    110 ; ELF64: addi
    111 ; ELF64: addi
    112 ; ELF64: stw
    113   ret void
    114 }
    115 
    116 define void @t10(i64 %v) nounwind {
    117 ; ELF64: t10
    118   %1 = add nsw i64 %v, 1
    119   store i64 %1, i64* @d, align 4
    120 ; ELF64: addis
    121 ; ELF64: addi
    122 ; ELF64: addi
    123 ; ELF64: std
    124   ret void
    125 }
    126 
    127 define void @t11(float %v) nounwind {
    128 ; ELF64: t11
    129   %1 = fadd float %v, 1.0
    130   store float %1, float* @e, align 4
    131 ; ELF64: fadds
    132 ; ELF64: stfs
    133   ret void
    134 }
    135 
    136 define void @t12(double %v) nounwind {
    137 ; ELF64: t12
    138   %1 = fadd double %v, 1.0
    139   store double %1, double* @f, align 8
    140 ; ELF64: fadd
    141 ; ELF64: stfd
    142   ret void
    143 }
    144 
    145 ;; lwa requires an offset divisible by 4, so we need lwax here.
    146 define i64 @t13() nounwind {
    147 ; ELF64: t13
    148   %1 = load i32, i32* getelementptr inbounds (%struct.s, %struct.s* @g, i32 0, i32 1), align 1
    149   %2 = sext i32 %1 to i64
    150 ; ELF64: li
    151 ; ELF64: lwax
    152   %3 = add nsw i64 %2, 1
    153 ; ELF64: addi
    154   ret i64 %3
    155 }
    156 
    157 ;; ld requires an offset divisible by 4, so we need ldx here.
    158 define i64 @t14() nounwind {
    159 ; ELF64: t14
    160   %1 = load i64, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
    161 ; ELF64: li
    162 ; ELF64: ldx
    163   %2 = add nsw i64 %1, 1
    164 ; ELF64: addi
    165   ret i64 %2
    166 }
    167 
    168 ;; std requires an offset divisible by 4, so we need stdx here.
    169 define void @t15(i64 %v) nounwind {
    170 ; ELF64: t15
    171   %1 = add nsw i64 %v, 1
    172   store i64 %1, i64* getelementptr inbounds (%struct.t, %struct.t* @h, i32 0, i32 1), align 1
    173 ; ELF64: addis
    174 ; ELF64: addi
    175 ; ELF64: addi
    176 ; ELF64: li
    177 ; ELF64: stdx
    178   ret void
    179 }
    180 
    181 ;; ld requires an offset that fits in 16 bits, so we need ldx here.
    182 define i64 @t16() nounwind {
    183 ; ELF64: t16
    184   %1 = load i64, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
    185 ; ELF64: lis
    186 ; ELF64: ori
    187 ; ELF64: ldx
    188   %2 = add nsw i64 %1, 1
    189 ; ELF64: addi
    190   ret i64 %2
    191 }
    192 
    193 ;; std requires an offset that fits in 16 bits, so we need stdx here.
    194 define void @t17(i64 %v) nounwind {
    195 ; ELF64: t17
    196   %1 = add nsw i64 %v, 1
    197   store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
    198 ; ELF64: addis
    199 ; ELF64: ld
    200 ; ELF64: addi
    201 ; ELF64: lis
    202 ; ELF64: ori
    203 ; ELF64: stdx
    204   ret void
    205 }
    206 
    207