Home | History | Annotate | Download | only in PowerPC
      1 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
      2 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
      3 
      4 @vda = common global <2 x double> zeroinitializer, align 16
      5 @vdb = common global <2 x double> zeroinitializer, align 16
      6 @vdr = common global <2 x double> zeroinitializer, align 16
      7 @vfa = common global <4 x float> zeroinitializer, align 16
      8 @vfb = common global <4 x float> zeroinitializer, align 16
      9 @vfr = common global <4 x float> zeroinitializer, align 16
     10 @vbllr = common global <2 x i64> zeroinitializer, align 16
     11 @vbir = common global <4 x i32> zeroinitializer, align 16
     12 @vblla = common global <2 x i64> zeroinitializer, align 16
     13 @vbllb = common global <2 x i64> zeroinitializer, align 16
     14 @vbia = common global <4 x i32> zeroinitializer, align 16
     15 @vbib = common global <4 x i32> zeroinitializer, align 16
     16 
     17 ; Function Attrs: nounwind
     18 define void @test1() {
     19 entry:
     20   %0 = load <2 x double>, <2 x double>* @vda, align 16
     21   %1 = load <2 x double>, <2 x double>* @vdb, align 16
     22   %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
     23   store <2 x double> %2, <2 x double>* @vdr, align 16
     24   ret void
     25 ; CHECK-LABEL: @test1
     26 ; CHECK: xvdivdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
     27 }
     28 
     29 ; Function Attrs: nounwind
     30 define void @test2() {
     31 entry:
     32   %0 = load <4 x float>, <4 x float>* @vfa, align 16
     33   %1 = load <4 x float>, <4 x float>* @vfb, align 16
     34   %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
     35   store <4 x float> %2, <4 x float>* @vfr, align 16
     36   ret void
     37 ; CHECK-LABEL: @test2
     38 ; CHECK: xvdivsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
     39 }
     40 
     41 ; Function Attrs: nounwind
     42 define void @test3() {
     43 entry:
     44   %0 = load <2 x double>, <2 x double>* @vda, align 16
     45   %1 = load <2 x double>, <2 x double>* @vda, align 16
     46   %2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %1)
     47   store <2 x double> %2, <2 x double>* @vdr, align 16
     48   ret void
     49 ; CHECK-LABEL: @test3
     50 ; CHECK: xvrdpip {{[0-9]+}}, {{[0-9]+}}
     51 }
     52 
     53 ; Function Attrs: nounwind
     54 define void @test4() {
     55 entry:
     56   %0 = load <4 x float>, <4 x float>* @vfa, align 16
     57   %1 = load <4 x float>, <4 x float>* @vfa, align 16
     58   %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %1)
     59   store <4 x float> %2, <4 x float>* @vfr, align 16
     60   ret void
     61 ; CHECK-LABEL: @test4
     62 ; CHECK: xvrspip {{[0-9]+}}, {{[0-9]+}}
     63 }
     64 
     65 ; Function Attrs: nounwind
     66 define void @test5() {
     67 entry:
     68   %0 = load <2 x double>, <2 x double>* @vda, align 16
     69   %1 = load <2 x double>, <2 x double>* @vdb, align 16
     70   %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
     71   store <2 x i64> %2, <2 x i64>* @vbllr, align 16
     72   ret void
     73 ; CHECK-LABEL: @test5
     74 ; CHECK: xvcmpeqdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
     75 }
     76 
     77 ; Function Attrs: nounwind
     78 define void @test6() {
     79 entry:
     80   %0 = load <4 x float>, <4 x float>* @vfa, align 16
     81   %1 = load <4 x float>, <4 x float>* @vfb, align 16
     82   %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
     83   store <4 x i32> %2, <4 x i32>* @vbir, align 16
     84   ret void
     85 ; CHECK-LABEL: @test6
     86 ; CHECK: xvcmpeqsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
     87 }
     88 
     89 ; Function Attrs: nounwind
     90 define void @test7() {
     91 entry:
     92   %0 = load <2 x double>, <2 x double>* @vda, align 16
     93   %1 = load <2 x double>, <2 x double>* @vdb, align 16
     94   %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
     95   store <2 x i64> %2, <2 x i64>* @vbllr, align 16
     96   ret void
     97 ; CHECK-LABEL: @test7
     98 ; CHECK: xvcmpgedp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
     99 }
    100 
    101 ; Function Attrs: nounwind
    102 define void @test8() {
    103 entry:
    104   %0 = load <4 x float>, <4 x float>* @vfa, align 16
    105   %1 = load <4 x float>, <4 x float>* @vfb, align 16
    106   %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
    107   store <4 x i32> %2, <4 x i32>* @vbir, align 16
    108   ret void
    109 ; CHECK-LABEL: @test8
    110 ; CHECK: xvcmpgesp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
    111 }
    112 
    113 ; Function Attrs: nounwind
    114 define void @test9() {
    115 entry:
    116   %0 = load <2 x double>, <2 x double>* @vda, align 16
    117   %1 = load <2 x double>, <2 x double>* @vdb, align 16
    118   %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
    119   store <2 x i64> %2, <2 x i64>* @vbllr, align 16
    120   ret void
    121 ; CHECK-LABEL: @test9
    122 ; CHECK: xvcmpgtdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
    123 }
    124 
    125 ; Function Attrs: nounwind
    126 define void @test10() {
    127 entry:
    128   %0 = load <4 x float>, <4 x float>* @vfa, align 16
    129   %1 = load <4 x float>, <4 x float>* @vfb, align 16
    130   %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
    131   store <4 x i32> %2, <4 x i32>* @vbir, align 16
    132   ret void
    133 ; CHECK-LABEL: @test10
    134 ; CHECK: xvcmpgtsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
    135 }
    136 
    137 ; Function Attrs: nounwind
    138 define <4 x float> @emit_xvresp(<4 x float> %a) {
    139 entry:
    140   %a.addr = alloca <4 x float>, align 16
    141   store <4 x float> %a, <4 x float>* %a.addr, align 16
    142   %0 = load <4 x float>, <4 x float>* %a.addr, align 16
    143   %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
    144   ret <4 x float> %1
    145 ; CHECK-LABEL: @emit_xvresp
    146 ; CHECK: xvresp {{[0-9]+}}, {{[0-9]+}}
    147 }
    148 
    149 ; Function Attrs: nounwind
    150 define <2 x double> @emit_xvredp(<2 x double> %a) {
    151 entry:
    152   %a.addr = alloca <2 x double>, align 16
    153   store <2 x double> %a, <2 x double>* %a.addr, align 16
    154   %0 = load <2 x double>, <2 x double>* %a.addr, align 16
    155   %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
    156   ret <2 x double> %1
    157 ; CHECK-LABEL: @emit_xvredp
    158 ; CHECK: xvredp {{[0-9]+}}, {{[0-9]+}}
    159 }
    160 
    161 ; Function Attrs: nounwind readnone
    162 define <4 x i32> @emit_xvcvdpsxws(<2 x double> %a) {
    163 entry:
    164   %0 = tail call <4 x i32> @llvm.ppc.vsx.xvcvdpsxws(<2 x double> %a)
    165   ret <4 x i32> %0
    166 ; CHECK-LABEL: @emit_xvcvdpsxws
    167 ; CHECK: xvcvdpsxws 34, 34
    168 }
    169 
    170 ; Function Attrs: nounwind readnone
    171 define <4 x i32> @emit_xvcvdpuxws(<2 x double> %a) {
    172 entry:
    173   %0 = tail call <4 x i32> @llvm.ppc.vsx.xvcvdpuxws(<2 x double> %a)
    174   ret <4 x i32> %0
    175 ; CHECK-LABEL: @emit_xvcvdpuxws
    176 ; CHECK: xvcvdpuxws 34, 34
    177 }
    178 
    179 ; Function Attrs: nounwind readnone
    180 define <2 x double> @emit_xvcvsxwdp(<4 x i32> %a) {
    181 entry:
    182   %0 = tail call <2 x double> @llvm.ppc.vsx.xvcvsxwdp(<4 x i32> %a)
    183   ret <2 x double> %0
    184 ; CHECK-LABEL: @emit_xvcvsxwdp
    185 ; CHECK: xvcvsxwdp 34, 34
    186 }
    187 
    188 ; Function Attrs: nounwind readnone
    189 define <2 x double> @emit_xvcvuxwdp(<4 x i32> %a) {
    190 entry:
    191   %0 = tail call <2 x double> @llvm.ppc.vsx.xvcvuxwdp(<4 x i32> %a)
    192   ret <2 x double> %0
    193 ; CHECK-LABEL: @emit_xvcvuxwdp
    194 ; CHECK: xvcvuxwdp 34, 34
    195 }
    196 
    197 ; Function Attrs: nounwind readnone
    198 define <2 x double> @emit_xvcvspdp(<4 x float> %a) {
    199 entry:
    200   %0 = tail call <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float> %a)
    201   ret <2 x double> %0
    202 ; CHECK-LABEL: @emit_xvcvspdp
    203 ; CHECK: xvcvspdp 34, 34
    204 }
    205 
    206 ; Function Attrs: nounwind readnone
    207 define <4 x float> @emit_xvcvsxdsp(<2 x i64> %a) {
    208 entry:
    209   %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvsxdsp(<2 x i64> %a)
    210   ret <4 x float> %0
    211 ; CHECK-LABEL: @emit_xvcvsxdsp
    212 ; CHECK: xvcvsxdsp 34, 34
    213 }
    214 
    215 ; Function Attrs: nounwind readnone
    216 define <4 x float> @emit_xvcvuxdsp(<2 x i64> %a) {
    217 entry:
    218   %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvuxdsp(<2 x i64> %a)
    219   ret <4 x float> %0
    220 ; CHECK-LABEL: @emit_xvcvuxdsp
    221 ; CHECK: xvcvuxdsp 34, 34
    222 }
    223 
    224 ; Function Attrs: nounwind readnone
    225 define <4 x float> @emit_xvcvdpsp(<2 x double> %a) {
    226 entry:
    227   %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvdpsp(<2 x double> %a)
    228   ret <4 x float> %0
    229 ; CHECK-LABEL: @emit_xvcvdpsp
    230 ; CHECK: xvcvdpsp 34, 34
    231 }
    232 
    233 ; Function Attrs: nounwind readnone
    234 
    235 ; Function Attrs: nounwind readnone
    236 declare <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>)
    237 
    238 ; Function Attrs: nounwind readnone
    239 declare <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>)
    240 
    241 ; Function Attrs: nounwind readnone
    242 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
    243 
    244 ; Function Attrs: nounwind readnone
    245 declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
    246 
    247 ; Function Attrs: nounwind readnone
    248 declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
    249 
    250 ; Function Attrs: nounwind readnone
    251 declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
    252 
    253 ; Function Attrs: nounwind readnone
    254 declare <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double>, <2 x double>)
    255 
    256 ; Function Attrs: nounwind readnone
    257 declare <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float>, <4 x float>)
    258 
    259 ; Function Attrs: nounwind readnone
    260 declare <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double>, <2 x double>)
    261 
    262 ; Function Attrs: nounwind readnone
    263 declare <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float>, <4 x float>)
    264 
    265 ; Function Attrs: nounwind readnone
    266 declare <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double>, <2 x double>)
    267 
    268 ; Function Attrs: nounwind readnone
    269 declare <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float>, <4 x float>)
    270 declare <4 x float> @llvm.ppc.vsx.xvcvdpsp(<2 x double>) #1
    271 declare <4 x i32> @llvm.ppc.vsx.xvcvdpsxws(<2 x double>) #1
    272 declare <4 x i32> @llvm.ppc.vsx.xvcvdpuxws(<2 x double>) #1
    273 declare <2 x double> @llvm.ppc.vsx.xvcvsxwdp(<4 x i32>) #1
    274 declare <2 x double> @llvm.ppc.vsx.xvcvuxwdp(<4 x i32>) #1
    275 declare <2 x double> @llvm.ppc.vsx.xvcvspdp(<4 x float>) #1
    276 declare <4 x float> @llvm.ppc.vsx.xvcvsxdsp(<2 x i64>) #1
    277 declare <4 x float> @llvm.ppc.vsx.xvcvuxdsp(<2 x i64>) #1
    278