Home | History | Annotate | Download | only in ARM
      1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
      2 
      3 define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
      4 ;CHECK: test_vextd:
      5 ;CHECK: vext
      6 	%tmp1 = load <8 x i8>* %A
      7 	%tmp2 = load <8 x i8>* %B
      8 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
      9 	ret <8 x i8> %tmp3
     10 }
     11 
     12 define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
     13 ;CHECK: test_vextRd:
     14 ;CHECK: vext
     15 	%tmp1 = load <8 x i8>* %A
     16 	%tmp2 = load <8 x i8>* %B
     17 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
     18 	ret <8 x i8> %tmp3
     19 }
     20 
     21 define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
     22 ;CHECK: test_vextq:
     23 ;CHECK: vext
     24 	%tmp1 = load <16 x i8>* %A
     25 	%tmp2 = load <16 x i8>* %B
     26 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
     27 	ret <16 x i8> %tmp3
     28 }
     29 
     30 define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
     31 ;CHECK: test_vextRq:
     32 ;CHECK: vext
     33 	%tmp1 = load <16 x i8>* %A
     34 	%tmp2 = load <16 x i8>* %B
     35 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
     36 	ret <16 x i8> %tmp3
     37 }
     38 
     39 define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
     40 ;CHECK: test_vextd16:
     41 ;CHECK: vext
     42 	%tmp1 = load <4 x i16>* %A
     43 	%tmp2 = load <4 x i16>* %B
     44 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
     45 	ret <4 x i16> %tmp3
     46 }
     47 
     48 define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
     49 ;CHECK: test_vextq32:
     50 ;CHECK: vext
     51 	%tmp1 = load <4 x i32>* %A
     52 	%tmp2 = load <4 x i32>* %B
     53 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
     54 	ret <4 x i32> %tmp3
     55 }
     56 
     57 ; Undef shuffle indices should not prevent matching to VEXT:
     58 
     59 define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
     60 ;CHECK: test_vextd_undef:
     61 ;CHECK: vext
     62 	%tmp1 = load <8 x i8>* %A
     63 	%tmp2 = load <8 x i8>* %B
     64 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
     65 	ret <8 x i8> %tmp3
     66 }
     67 
     68 define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
     69 ;CHECK: test_vextRq_undef:
     70 ;CHECK: vext
     71 	%tmp1 = load <16 x i8>* %A
     72 	%tmp2 = load <16 x i8>* %B
     73 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
     74 	ret <16 x i8> %tmp3
     75 }
     76 
     77 define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind {
     78 ;CHECK: test_vextq_undef_op2:
     79 ;CHECK: vext
     80 entry:
     81   %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
     82   ret <16 x i8> %tmp1
     83 }
     84 
     85 define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind {
     86 ;CHECK: test_vextd_undef_op2:
     87 ;CHECK: vext
     88 entry:
     89   %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
     90   ret <8 x i8> %tmp1
     91 }
     92 
     93 
     94 define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind {
     95 ;CHECK: test_vextq_undef_op2_undef:
     96 ;CHECK: vext
     97 entry:
     98   %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
     99   ret <16 x i8> %tmp1
    100 }
    101 
    102 define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind {
    103 ;CHECK: test_vextd_undef_op2_undef:
    104 ;CHECK: vext
    105 entry:
    106   %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 1>
    107   ret <8 x i8> %tmp1
    108 }
    109 
    110 ; Tests for ReconstructShuffle function. Indices have to be carefully
    111 ; chosen to reach lowering phase as a BUILD_VECTOR.
    112 
    113 ; One vector needs vext, the other can be handled by extract_subvector
    114 ; Also checks interleaving of sources is handled correctly.
    115 ; Essence: a vext is used on %A and something saner than stack load/store for final result.
    116 define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
    117 ;CHECK: test_interleaved:
    118 ;CHECK: vext.16
    119 ;CHECK-NOT: vext.16
    120 ;CHECK: vzip.16
    121         %tmp1 = load <8 x i16>* %A
    122         %tmp2 = load <8 x i16>* %B
    123         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
    124         ret <4 x i16> %tmp3
    125 }
    126 
    127 ; An undef in the shuffle list should still be optimizable
    128 define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
    129 ;CHECK: test_undef:
    130 ;CHECK: vzip.16
    131         %tmp1 = load <8 x i16>* %A
    132         %tmp2 = load <8 x i16>* %B
    133         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
    134         ret <4 x i16> %tmp3
    135 }
    136 
    137 ; We should ignore a build_vector with more than two sources.
    138 ; Use illegal <32 x i16> type to produce such a shuffle after legalizing types.
    139 ; Try to look for fallback to stack expansion.
    140 define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
    141 ;CHECK: test_multisource:
    142 ;CHECK: vst1.16
    143         %tmp1 = load <32 x i16>* %B
    144         %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
    145         ret <4 x i16> %tmp2
    146 }
    147 
    148 ; We don't handle shuffles using more than half of a 128-bit vector.
    149 ; Again, test for fallback to stack expansion
    150 define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
    151 ;CHECK: test_largespan:
    152 ;CHECK: vst1.16
    153         %tmp1 = load <8 x i16>* %B
    154         %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
    155         ret <4 x i16> %tmp2
    156 }
    157 
    158 ; The actual shuffle code only handles some cases, make sure we check
    159 ; this rather than blindly emitting a VECTOR_SHUFFLE (infinite
    160 ; lowering loop can result otherwise).
    161 define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
    162 ;CHECK: test_illegal:
    163 ;CHECK: vst1.16
    164        %tmp1 = load <8 x i16>* %A
    165        %tmp2 = load <8 x i16>* %B
    166        %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
    167        ret <8 x i16> %tmp3
    168 }
    169 
    170 ; PR11129
    171 ; Make sure this doesn't crash
    172 define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
    173 ; CHECK: test_elem_mismatch:
    174 ; CHECK: vstr
    175   %tmp0 = load <2 x i64>* %src, align 16
    176   %tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
    177   %tmp2 = extractelement <4 x i32> %tmp1, i32 0
    178   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
    179   %tmp4 = trunc i32 %tmp2 to i16
    180   %tmp5 = trunc i32 %tmp3 to i16
    181   %tmp6 = insertelement <4 x i16> undef, i16 %tmp4, i32 0
    182   %tmp7 = insertelement <4 x i16> %tmp6, i16 %tmp5, i32 1
    183   store <4 x i16> %tmp7, <4 x i16>* %dest, align 4
    184   ret void
    185 }
    186