Home | History | Annotate | Download | only in PowerPC
      1 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
      2 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
      3 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+crypto < %s | FileCheck %s
      4 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 < %s | FileCheck %s
      5 ; FIXME: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
      6 ; FIXME: The original intent was to add a check-next for the blr after every check.
      7 ; However, this currently fails since we don't eliminate stores of the unused
      8 ; locals. These stores are sometimes scheduled after the crypto instruction
      9 
     10 ; Function Attrs: nounwind
     11 define <16 x i8> @test_vpmsumb() #0 {
     12 entry:
     13   %a = alloca <16 x i8>, align 16
     14   %b = alloca <16 x i8>, align 16
     15   store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
     16   store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
     17   %0 = load <16 x i8>,  <16 x i8>* %a, align 16
     18   %1 = load <16 x i8>,  <16 x i8>* %b, align 16
     19   %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
     20   ret <16 x i8> %2
     21 ; CHECK: vpmsumb 2,
     22 }
     23 
     24 ; Function Attrs: nounwind readnone
     25 declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
     26 
     27 ; Function Attrs: nounwind
     28 define <8 x i16> @test_vpmsumh() #0 {
     29 entry:
     30   %a = alloca <8 x i16>, align 16
     31   %b = alloca <8 x i16>, align 16
     32   store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
     33   store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
     34   %0 = load <8 x i16>,  <8 x i16>* %a, align 16
     35   %1 = load <8 x i16>,  <8 x i16>* %b, align 16
     36   %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
     37   ret <8 x i16> %2
     38 ; CHECK: vpmsumh 2,
     39 }
     40 
     41 ; Function Attrs: nounwind readnone
     42 declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
     43 
     44 ; Function Attrs: nounwind
     45 define <4 x i32> @test_vpmsumw() #0 {
     46 entry:
     47   %a = alloca <4 x i32>, align 16
     48   %b = alloca <4 x i32>, align 16
     49   store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
     50   store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
     51   %0 = load <4 x i32>,  <4 x i32>* %a, align 16
     52   %1 = load <4 x i32>,  <4 x i32>* %b, align 16
     53   %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
     54   ret <4 x i32> %2
     55 ; CHECK: vpmsumw 2,
     56 }
     57 
     58 ; Function Attrs: nounwind readnone
     59 declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
     60 
     61 ; Function Attrs: nounwind
     62 define <2 x i64> @test_vpmsumd() #0 {
     63 entry:
     64   %a = alloca <2 x i64>, align 16
     65   %b = alloca <2 x i64>, align 16
     66   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
     67   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
     68   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
     69   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
     70   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
     71   ret <2 x i64> %2
     72 ; CHECK: vpmsumd 2,
     73 }
     74 
     75 ; Function Attrs: nounwind readnone
     76 declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
     77 
     78 ; Function Attrs: nounwind
     79 define <2 x i64> @test_vsbox() #0 {
     80 entry:
     81   %a = alloca <2 x i64>, align 16
     82   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
     83   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
     84   %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
     85   ret <2 x i64> %1
     86 ; CHECK: vsbox 2,
     87 }
     88 
     89 ; Function Attrs: nounwind readnone
     90 declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
     91 
     92 ; Function Attrs: nounwind
     93 define <16 x i8> @test_vpermxorb() #0 {
     94 entry:
     95   %a = alloca <16 x i8>, align 16
     96   %b = alloca <16 x i8>, align 16
     97   %c = alloca <16 x i8>, align 16
     98   store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
     99   store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
    100   store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %c, align 16
    101   %0 = load <16 x i8>,  <16 x i8>* %a, align 16
    102   %1 = load <16 x i8>,  <16 x i8>* %b, align 16
    103   %2 = load <16 x i8>,  <16 x i8>* %c, align 16
    104   %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
    105   ret <16 x i8> %3
    106 ; CHECK: vpermxor 2,
    107 }
    108 
    109 ; Function Attrs: nounwind readnone
    110 declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8>, <16 x i8>, <16 x i8>) #1
    111 
    112 ; Function Attrs: nounwind
    113 define <8 x i16> @test_vpermxorh() #0 {
    114 entry:
    115   %a = alloca <8 x i16>, align 16
    116   %b = alloca <8 x i16>, align 16
    117   %c = alloca <8 x i16>, align 16
    118   store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
    119   store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
    120   store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %c, align 16
    121   %0 = load <8 x i16>,  <8 x i16>* %a, align 16
    122   %1 = bitcast <8 x i16> %0 to <16 x i8>
    123   %2 = load <8 x i16>,  <8 x i16>* %b, align 16
    124   %3 = bitcast <8 x i16> %2 to <16 x i8>
    125   %4 = load <8 x i16>,  <8 x i16>* %c, align 16
    126   %5 = bitcast <8 x i16> %4 to <16 x i8>
    127   %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
    128   %7 = bitcast <16 x i8> %6 to <8 x i16>
    129   ret <8 x i16> %7
    130 ; CHECK: vpermxor 2,
    131 }
    132 
    133 ; Function Attrs: nounwind
    134 define <4 x i32> @test_vpermxorw() #0 {
    135 entry:
    136   %a = alloca <4 x i32>, align 16
    137   %b = alloca <4 x i32>, align 16
    138   %c = alloca <4 x i32>, align 16
    139   store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
    140   store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
    141   store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %c, align 16
    142   %0 = load <4 x i32>,  <4 x i32>* %a, align 16
    143   %1 = bitcast <4 x i32> %0 to <16 x i8>
    144   %2 = load <4 x i32>,  <4 x i32>* %b, align 16
    145   %3 = bitcast <4 x i32> %2 to <16 x i8>
    146   %4 = load <4 x i32>,  <4 x i32>* %c, align 16
    147   %5 = bitcast <4 x i32> %4 to <16 x i8>
    148   %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
    149   %7 = bitcast <16 x i8> %6 to <4 x i32>
    150   ret <4 x i32> %7
    151 ; CHECK: vpermxor 2,
    152 }
    153 
    154 ; Function Attrs: nounwind
    155 define <2 x i64> @test_vpermxord() #0 {
    156 entry:
    157   %a = alloca <2 x i64>, align 16
    158   %b = alloca <2 x i64>, align 16
    159   %c = alloca <2 x i64>, align 16
    160   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    161   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    162   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %c, align 16
    163   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    164   %1 = bitcast <2 x i64> %0 to <16 x i8>
    165   %2 = load <2 x i64>,  <2 x i64>* %b, align 16
    166   %3 = bitcast <2 x i64> %2 to <16 x i8>
    167   %4 = load <2 x i64>,  <2 x i64>* %c, align 16
    168   %5 = bitcast <2 x i64> %4 to <16 x i8>
    169   %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
    170   %7 = bitcast <16 x i8> %6 to <2 x i64>
    171   ret <2 x i64> %7
    172 ; CHECK: vpermxor 2,
    173 }
    174 
    175 ; Function Attrs: nounwind
    176 define <2 x i64> @test_vcipher() #0 {
    177 entry:
    178   %a = alloca <2 x i64>, align 16
    179   %b = alloca <2 x i64>, align 16
    180   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    181   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    182   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    183   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    184   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> %0, <2 x i64> %1)
    185   ret <2 x i64> %2
    186 ; CHECK: vcipher 2,
    187 }
    188 
    189 ; Function Attrs: nounwind readnone
    190 declare <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64>, <2 x i64>) #1
    191 
    192 ; Function Attrs: nounwind
    193 define <2 x i64> @test_vcipherlast() #0 {
    194 entry:
    195   %a = alloca <2 x i64>, align 16
    196   %b = alloca <2 x i64>, align 16
    197   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    198   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    199   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    200   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    201   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> %0, <2 x i64> %1)
    202   ret <2 x i64> %2
    203 ; CHECK: vcipherlast 2,
    204 }
    205 
    206 ; Function Attrs: nounwind readnone
    207 declare <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64>, <2 x i64>) #1
    208 
    209 ; Function Attrs: nounwind
    210 define <2 x i64> @test_vncipher() #0 {
    211 entry:
    212   %a = alloca <2 x i64>, align 16
    213   %b = alloca <2 x i64>, align 16
    214   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    215   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    216   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    217   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    218   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> %0, <2 x i64> %1)
    219   ret <2 x i64> %2
    220 ; CHECK: vncipher 2,
    221 }
    222 
    223 ; Function Attrs: nounwind readnone
    224 declare <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64>, <2 x i64>) #1
    225 
    226 ; Function Attrs: nounwind
    227 define <2 x i64> @test_vncipherlast() #0 {
    228 entry:
    229   %a = alloca <2 x i64>, align 16
    230   %b = alloca <2 x i64>, align 16
    231   store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
    232   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
    233   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    234   %1 = load <2 x i64>,  <2 x i64>* %b, align 16
    235   %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> %0, <2 x i64> %1)
    236   ret <2 x i64> %2
    237 ; CHECK: vncipherlast 2,
    238 }
    239 
    240 ; Function Attrs: nounwind readnone
    241 declare <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64>, <2 x i64>) #1
    242 
    243 ; Function Attrs: nounwind
    244 define <4 x i32> @test_vshasigmaw() #0 {
    245 entry:
    246   %a = alloca <4 x i32>, align 16
    247   store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
    248   %0 = load <4 x i32>,  <4 x i32>* %a, align 16
    249   %1 = call <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32> %0, i32 1, i32 15)
    250   ret <4 x i32> %1
    251 ; CHECK: vshasigmaw 2,
    252 }
    253 
    254 ; Function Attrs: nounwind readnone
    255 declare <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32>, i32, i32) #1
    256 
    257 ; Function Attrs: nounwind
    258 define <2 x i64> @test_vshasigmad() #0 {
    259 entry:
    260   %a = alloca <2 x i64>, align 16
    261   store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %a, align 16
    262   %0 = load <2 x i64>,  <2 x i64>* %a, align 16
    263   %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64> %0, i32 1, i32 15)
    264   ret <2 x i64> %1
    265 ; CHECK: vshasigmad 2,
    266 }
    267 
    268 ; Function Attrs: nounwind readnone
    269 declare <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64>, i32, i32) #1
    270 
    271 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
    272 attributes #1 = { nounwind readnone }
    273 
    274 !llvm.ident = !{!0}
    275 
    276 !0 = !{!"clang version 3.7.0 (trunk 230949) (llvm/trunk 230946)"}
    277