Home | History | Annotate | Download | only in PowerPC
      1 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
      2 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
      3 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+crypto < %s | FileCheck %s
      4 ; FIXME: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
      5 ; FIXME: The original intent was to add a check-next for the blr after every check.
      6 ; However, this currently fails since we don't eliminate stores of the unused
      7 ; locals. These stores are sometimes scheduled after the crypto instruction
      8 
      9 ; Function Attrs: nounwind
     10 define <16 x i8> @test_vpmsumb() #0 {
     11 entry:
     12   %a = alloca <16 x i8>, align 16
     13   %b = alloca <16 x i8>, align 16
     14   store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
     15   store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
     16   %0 = load <16 x i8>,  <16 x i8>* %a, align 16
     17   %1 = load <16 x i8>,  <16 x i8>* %b, align 16
     18   %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
     19   ret <16 x i8> %2
     20 ; CHECK: vpmsumb 2,
     21 }
     22 
     23 ; Function Attrs: nounwind readnone
     24 declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
     25 
     26 ; Function Attrs: nounwind
     27 define <8 x i16> @test_vpmsumh() #0 {
     28 entry:
     29   %a = alloca <8 x i16>, align 16
     30   %b = alloca <8 x i16>, align 16
     31   store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
     32   store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
     33   %0 = load <8 x i16>,  <8 x i16>* %a, align 16
     34   %1 = load <8 x i16>,  <8 x i16>* %b, align 16
     35   %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
     36   ret <8 x i16> %2
     37 ; CHECK: vpmsumh 2,
     38 }
     39 
     40 ; Function Attrs: nounwind readnone
     41 declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
     42 
     43 ; Function Attrs: nounwind
     44 define <4 x i32> @test_vpmsumw() #0 {
     45 entry:
     46   %a = alloca <4 x i32>, align 16
     47   %b = alloca <4 x i32>, align 16
     48   store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
     49   store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
     50   %0 = load <4 x i32>,  <4 x i32>* %a, align 16
     51   %1 = load <4 x i32>,  <4 x i32>* %b, align 16
     52   %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
     53   ret <4 x i32> %2
     54 ; CHECK: vpmsumw 2,
     55 }
     56 
     57 ; Function Attrs: nounwind readnone
     58 declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
     59 
     60 ; Function Attrs: nounwind
     61 define <2 x i64> @test_vpmsumd() #0 {
     62 entry:
     63   %a = alloca <2 x i64>, align 16
     64   %b = alloca <2 x i64>, align 16
     65   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
     66   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
     67   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
     68   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
     69   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
     70   ret <2 x i64> %2
     71 ; CHECK: vpmsumd 2,
     72 }
     73 
     74 ; Function Attrs: nounwind readnone
     75 declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
     76 
     77 ; Function Attrs: nounwind
     78 define <2 x i64> @test_vsbox() #0 {
     79 entry:
     80   %a = alloca <2 x i64>, align 16
     81   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
     82   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
     83   %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
     84   ret <2 x i64> %1
     85 ; CHECK: vsbox 2,
     86 }
     87 
     88 ; Function Attrs: nounwind readnone
     89 declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
     90 
     91 ; Function Attrs: nounwind
     92 define <16 x i8> @test_vpermxorb() #0 {
     93 entry:
     94   %a = alloca <16 x i8>, align 16
     95   %b = alloca <16 x i8>, align 16
     96   %c = alloca <16 x i8>, align 16
     97   store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
     98   store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
     99   store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %c, align 16
    100   %0 = load <16 x i8>,  <16 x i8>* %a, align 16
    101   %1 = load <16 x i8>,  <16 x i8>* %b, align 16
    102   %2 = load <16 x i8>,  <16 x i8>* %c, align 16
    103   %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
    104   ret <16 x i8> %3
    105 ; CHECK: vpermxor 2,
    106 }
    107 
    108 ; Function Attrs: nounwind readnone
    109 declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8>, <16 x i8>, <16 x i8>) #1
    110 
    111 ; Function Attrs: nounwind
    112 define <8 x i16> @test_vpermxorh() #0 {
    113 entry:
    114   %a = alloca <8 x i16>, align 16
    115   %b = alloca <8 x i16>, align 16
    116   %c = alloca <8 x i16>, align 16
    117   store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
    118   store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
    119   store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %c, align 16
    120   %0 = load <8 x i16>,  <8 x i16>* %a, align 16
    121   %1 = bitcast <8 x i16> %0 to <16 x i8>
    122   %2 = load <8 x i16>,  <8 x i16>* %b, align 16
    123   %3 = bitcast <8 x i16> %2 to <16 x i8>
    124   %4 = load <8 x i16>,  <8 x i16>* %c, align 16
    125   %5 = bitcast <8 x i16> %4 to <16 x i8>
    126   %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
    127   %7 = bitcast <16 x i8> %6 to <8 x i16>
    128   ret <8 x i16> %7
    129 ; CHECK: vpermxor 2,
    130 }
    131 
    132 ; Function Attrs: nounwind
    133 define <4 x i32> @test_vpermxorw() #0 {
    134 entry:
    135   %a = alloca <4 x i32>, align 16
    136   %b = alloca <4 x i32>, align 16
    137   %c = alloca <4 x i32>, align 16
    138   store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
    139   store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
    140   store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %c, align 16
    141   %0 = load <4 x i32>,  <4 x i32>* %a, align 16
    142   %1 = bitcast <4 x i32> %0 to <16 x i8>
    143   %2 = load <4 x i32>,  <4 x i32>* %b, align 16
    144   %3 = bitcast <4 x i32> %2 to <16 x i8>
    145   %4 = load <4 x i32>,  <4 x i32>* %c, align 16
    146   %5 = bitcast <4 x i32> %4 to <16 x i8>
    147   %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
    148   %7 = bitcast <16 x i8> %6 to <4 x i32>
    149   ret <4 x i32> %7
    150 ; CHECK: vpermxor 2,
    151 }
    152 
    153 ; Function Attrs: nounwind
    154 define <2 x i64> @test_vpermxord() #0 {
    155 entry:
    156   %a = alloca <2 x i64>, align 16
    157   %b = alloca <2 x i64>, align 16
    158   %c = alloca <2 x i64>, align 16
    159   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    160   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    161   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %c, align 16
    162   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    163   %1 = bitcast <2 x i64> %0 to <16 x i8>
    164   %2 = load <2 x i64>,  <2 x i64>* %b, align 16
    165   %3 = bitcast <2 x i64> %2 to <16 x i8>
    166   %4 = load <2 x i64>,  <2 x i64>* %c, align 16
    167   %5 = bitcast <2 x i64> %4 to <16 x i8>
    168   %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
    169   %7 = bitcast <16 x i8> %6 to <2 x i64>
    170   ret <2 x i64> %7
    171 ; CHECK: vpermxor 2,
    172 }
    173 
    174 ; Function Attrs: nounwind
    175 define <2 x i64> @test_vcipher() #0 {
    176 entry:
    177   %a = alloca <2 x i64>, align 16
    178   %b = alloca <2 x i64>, align 16
    179   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    180   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    181   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    182   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    183   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> %0, <2 x i64> %1)
    184   ret <2 x i64> %2
    185 ; CHECK: vcipher 2,
    186 }
    187 
    188 ; Function Attrs: nounwind readnone
    189 declare <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64>, <2 x i64>) #1
    190 
    191 ; Function Attrs: nounwind
    192 define <2 x i64> @test_vcipherlast() #0 {
    193 entry:
    194   %a = alloca <2 x i64>, align 16
    195   %b = alloca <2 x i64>, align 16
    196   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    197   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    198   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    199   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    200   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> %0, <2 x i64> %1)
    201   ret <2 x i64> %2
    202 ; CHECK: vcipherlast 2,
    203 }
    204 
    205 ; Function Attrs: nounwind readnone
    206 declare <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64>, <2 x i64>) #1
    207 
    208 ; Function Attrs: nounwind
    209 define <2 x i64> @test_vncipher() #0 {
    210 entry:
    211   %a = alloca <2 x i64>, align 16
    212   %b = alloca <2 x i64>, align 16
    213   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    214   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    215   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    216   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    217   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> %0, <2 x i64> %1)
    218   ret <2 x i64> %2
    219 ; CHECK: vncipher 2,
    220 }
    221 
    222 ; Function Attrs: nounwind readnone
    223 declare <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64>, <2 x i64>) #1
    224 
    225 ; Function Attrs: nounwind
    226 define <2 x i64> @test_vncipherlast() #0 {
    227 entry:
    228   %a = alloca <2 x i64>, align 16
    229   %b = alloca <2 x i64>, align 16
    230   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    231   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    232   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    233   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    234   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> %0, <2 x i64> %1)
    235   ret <2 x i64> %2
    236 ; CHECK: vncipherlast 2,
    237 }
    238 
    239 ; Function Attrs: nounwind readnone
    240 declare <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64>, <2 x i64>) #1
    241 
    242 ; Function Attrs: nounwind
    243 define <4 x i32> @test_vshasigmaw() #0 {
    244 entry:
    245   %a = alloca <4 x i32>, align 16
    246   store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
    247   %0 = load <4 x i32>,  <4 x i32>* %a, align 16
    248   %1 = call <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32> %0, i32 1, i32 15)
    249   ret <4 x i32> %1
    250 ; CHECK: vshasigmaw 2,
    251 }
    252 
    253 ; Function Attrs: nounwind readnone
    254 declare <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32>, i32, i32) #1
    255 
    256 ; Function Attrs: nounwind
    257 define <2 x i64> @test_vshasigmad() #0 {
    258 entry:
    259   %a = alloca <2 x i64>, align 16
    260   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %a, align 16
    261   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    262   %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64> %0, i32 1, i32 15)
    263   ret <2 x i64> %1
    264 ; CHECK: vshasigmad 2,
    265 }
    266 
    267 ; Function Attrs: nounwind readnone
    268 declare <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64>, i32, i32) #1
    269 
    270 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
    271 attributes #1 = { nounwind readnone }
    272 
    273 !llvm.ident = !{!0}
    274 
    275 !0 = !{!"clang version 3.7.0 (trunk 230949) (llvm/trunk 230946)"}
    276