Home | History | Annotate | Download | only in CodeGen
      1 // REQUIRES: aarch64-registered-target
      2 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
      3 // RUN:   -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
      4 
      5 // Test new aarch64 intrinsics and types
      6 
      7 #include <arm_neon.h>
      8 
      9 // CHECK-LABEL: define <8 x i8> @test_vext_s8(<8 x i8> %a, <8 x i8> %b) #0 {
     10 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
     11 // CHECK:   ret <8 x i8> [[VEXT]]
     12 int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) {
     13   return vext_s8(a, b, 2);
     14 }
     15 
     16 // CHECK-LABEL: define <4 x i16> @test_vext_s16(<4 x i16> %a, <4 x i16> %b) #0 {
     17 // CHECK:   [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
     18 // CHECK:   [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
     19 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
     20 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
     21 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
     22 // CHECK:   ret <4 x i16> [[VEXT]]
     23 int16x4_t test_vext_s16(int16x4_t a, int16x4_t b) {
     24   return vext_s16(a, b, 3);
     25 }
     26 
     27 // CHECK-LABEL: define <2 x i32> @test_vext_s32(<2 x i32> %a, <2 x i32> %b) #0 {
     28 // CHECK:   [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
     29 // CHECK:   [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
     30 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
     31 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
     32 // CHECK:   [[VEXT:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> <i32 1, i32 2>
     33 // CHECK:   ret <2 x i32> [[VEXT]]
     34 int32x2_t test_vext_s32(int32x2_t a, int32x2_t b) {
     35   return vext_s32(a, b, 1);
     36 }
     37 
     38 // CHECK-LABEL: define <1 x i64> @test_vext_s64(<1 x i64> %a, <1 x i64> %b) #0 {
     39 // CHECK:   [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
     40 // CHECK:   [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
     41 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
     42 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64>
     43 // CHECK:   [[VEXT:%.*]] = shufflevector <1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i32> zeroinitializer
     44 // CHECK:   ret <1 x i64> [[VEXT]]
     45 int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) {
     46   return vext_s64(a, b, 0);
     47 }
     48 
     49 // CHECK-LABEL: define <16 x i8> @test_vextq_s8(<16 x i8> %a, <16 x i8> %b) #0 {
     50 // CHECK:   [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
     51 // CHECK:   ret <16 x i8> [[VEXT]]
     52 int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) {
     53   return vextq_s8(a, b, 2);
     54 }
     55 
     56 // CHECK-LABEL: define <8 x i16> @test_vextq_s16(<8 x i16> %a, <8 x i16> %b) #0 {
     57 // CHECK:   [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
     58 // CHECK:   [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
     59 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
     60 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
     61 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
     62 // CHECK:   ret <8 x i16> [[VEXT]]
     63 int16x8_t test_vextq_s16(int16x8_t a, int16x8_t b) {
     64   return vextq_s16(a, b, 3);
     65 }
     66 
     67 // CHECK-LABEL: define <4 x i32> @test_vextq_s32(<4 x i32> %a, <4 x i32> %b) #0 {
     68 // CHECK:   [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
     69 // CHECK:   [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
     70 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
     71 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
     72 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> <i32 1, i32 2, i32 3, i32 4>
     73 // CHECK:   ret <4 x i32> [[VEXT]]
     74 int32x4_t test_vextq_s32(int32x4_t a, int32x4_t b) {
     75   return vextq_s32(a, b, 1);
     76 }
     77 
     78 // CHECK-LABEL: define <2 x i64> @test_vextq_s64(<2 x i64> %a, <2 x i64> %b) #0 {
     79 // CHECK:   [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
     80 // CHECK:   [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8>
     81 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
     82 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
     83 // CHECK:   [[VEXT:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i32> <i32 1, i32 2>
     84 // CHECK:   ret <2 x i64> [[VEXT]]
     85 int64x2_t test_vextq_s64(int64x2_t a, int64x2_t b) {
     86   return vextq_s64(a, b, 1);
     87 }
     88 
     89 // CHECK-LABEL: define <8 x i8> @test_vext_u8(<8 x i8> %a, <8 x i8> %b) #0 {
     90 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
     91 // CHECK:   ret <8 x i8> [[VEXT]]
     92 uint8x8_t test_vext_u8(uint8x8_t a, uint8x8_t b) {
     93   return vext_u8(a, b, 2);
     94 }
     95 
     96 // CHECK-LABEL: define <4 x i16> @test_vext_u16(<4 x i16> %a, <4 x i16> %b) #0 {
     97 // CHECK:   [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
     98 // CHECK:   [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
     99 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
    100 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
    101 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
    102 // CHECK:   ret <4 x i16> [[VEXT]]
    103 uint16x4_t test_vext_u16(uint16x4_t a, uint16x4_t b) {
    104   return vext_u16(a, b, 3);
    105 }
    106 
    107 // CHECK-LABEL: define <2 x i32> @test_vext_u32(<2 x i32> %a, <2 x i32> %b) #0 {
    108 // CHECK:   [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
    109 // CHECK:   [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
    110 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
    111 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
    112 // CHECK:   [[VEXT:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> <i32 1, i32 2>
    113 // CHECK:   ret <2 x i32> [[VEXT]]
    114 uint32x2_t test_vext_u32(uint32x2_t a, uint32x2_t b) {
    115   return vext_u32(a, b, 1);
    116 }
    117 
    118 // CHECK-LABEL: define <1 x i64> @test_vext_u64(<1 x i64> %a, <1 x i64> %b) #0 {
    119 // CHECK:   [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
    120 // CHECK:   [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
    121 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
    122 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64>
    123 // CHECK:   [[VEXT:%.*]] = shufflevector <1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i32> zeroinitializer
    124 // CHECK:   ret <1 x i64> [[VEXT]]
    125 uint64x1_t test_vext_u64(uint64x1_t a, uint64x1_t b) {
    126   return vext_u64(a, b, 0);
    127 }
    128 
    129 // CHECK-LABEL: define <16 x i8> @test_vextq_u8(<16 x i8> %a, <16 x i8> %b) #0 {
    130 // CHECK:   [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
    131 // CHECK:   ret <16 x i8> [[VEXT]]
    132 uint8x16_t test_vextq_u8(uint8x16_t a, uint8x16_t b) {
    133   return vextq_u8(a, b, 2);
    134 }
    135 
    136 // CHECK-LABEL: define <8 x i16> @test_vextq_u16(<8 x i16> %a, <8 x i16> %b) #0 {
    137 // CHECK:   [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
    138 // CHECK:   [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
    139 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
    140 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
    141 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
    142 // CHECK:   ret <8 x i16> [[VEXT]]
    143 uint16x8_t test_vextq_u16(uint16x8_t a, uint16x8_t b) {
    144   return vextq_u16(a, b, 3);
    145 }
    146 
    147 // CHECK-LABEL: define <4 x i32> @test_vextq_u32(<4 x i32> %a, <4 x i32> %b) #0 {
    148 // CHECK:   [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
    149 // CHECK:   [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
    150 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
    151 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
    152 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> <i32 1, i32 2, i32 3, i32 4>
    153 // CHECK:   ret <4 x i32> [[VEXT]]
    154 uint32x4_t test_vextq_u32(uint32x4_t a, uint32x4_t b) {
    155   return vextq_u32(a, b, 1);
    156 }
    157 
    158 // CHECK-LABEL: define <2 x i64> @test_vextq_u64(<2 x i64> %a, <2 x i64> %b) #0 {
    159 // CHECK:   [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
    160 // CHECK:   [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8>
    161 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
    162 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
    163 // CHECK:   [[VEXT:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i32> <i32 1, i32 2>
    164 // CHECK:   ret <2 x i64> [[VEXT]]
    165 uint64x2_t test_vextq_u64(uint64x2_t a, uint64x2_t b) {
    166   return vextq_u64(a, b, 1);
    167 }
    168 
    169 // CHECK-LABEL: define <2 x float> @test_vext_f32(<2 x float> %a, <2 x float> %b) #0 {
    170 // CHECK:   [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
    171 // CHECK:   [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8>
    172 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
    173 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
    174 // CHECK:   [[VEXT:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> [[TMP3]], <2 x i32> <i32 1, i32 2>
    175 // CHECK:   ret <2 x float> [[VEXT]]
    176 float32x2_t test_vext_f32(float32x2_t a, float32x2_t b) {
    177   return vext_f32(a, b, 1);
    178 }
    179 
    180 // CHECK-LABEL: define <1 x double> @test_vext_f64(<1 x double> %a, <1 x double> %b) #0 {
    181 // CHECK:   [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
    182 // CHECK:   [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8>
    183 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
    184 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
    185 // CHECK:   [[VEXT:%.*]] = shufflevector <1 x double> [[TMP2]], <1 x double> [[TMP3]], <1 x i32> zeroinitializer
    186 // CHECK:   ret <1 x double> [[VEXT]]
    187 float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) {
    188   return vext_f64(a, b, 0);
    189 }
    190 
    191 // CHECK-LABEL: define <4 x float> @test_vextq_f32(<4 x float> %a, <4 x float> %b) #0 {
    192 // CHECK:   [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
    193 // CHECK:   [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8>
    194 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
    195 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
    196 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP3]], <4 x i32> <i32 1, i32 2, i32 3, i32 4>
    197 // CHECK:   ret <4 x float> [[VEXT]]
    198 float32x4_t test_vextq_f32(float32x4_t a, float32x4_t b) {
    199   return vextq_f32(a, b, 1);
    200 }
    201 
    202 // CHECK-LABEL: define <2 x double> @test_vextq_f64(<2 x double> %a, <2 x double> %b) #0 {
    203 // CHECK:   [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
    204 // CHECK:   [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8>
    205 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
    206 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
    207 // CHECK:   [[VEXT:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP3]], <2 x i32> <i32 1, i32 2>
    208 // CHECK:   ret <2 x double> [[VEXT]]
    209 float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) {
    210   return vextq_f64(a, b, 1);
    211 }
    212 
    213 // CHECK-LABEL: define <8 x i8> @test_vext_p8(<8 x i8> %a, <8 x i8> %b) #0 {
    214 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
    215 // CHECK:   ret <8 x i8> [[VEXT]]
    216 poly8x8_t test_vext_p8(poly8x8_t a, poly8x8_t b) {
    217   return vext_p8(a, b, 2);
    218 }
    219 
    220 // CHECK-LABEL: define <4 x i16> @test_vext_p16(<4 x i16> %a, <4 x i16> %b) #0 {
    221 // CHECK:   [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
    222 // CHECK:   [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
    223 // CHECK:   [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
    224 // CHECK:   [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
    225 // CHECK:   [[VEXT:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
    226 // CHECK:   ret <4 x i16> [[VEXT]]
    227 poly16x4_t test_vext_p16(poly16x4_t a, poly16x4_t b) {
    228   return vext_p16(a, b, 3);
    229 }
    230 
    231 // CHECK-LABEL: define <16 x i8> @test_vextq_p8(<16 x i8> %a, <16 x i8> %b) #0 {
    232 // CHECK:   [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
    233 // CHECK:   ret <16 x i8> [[VEXT]]
    234 poly8x16_t test_vextq_p8(poly8x16_t a, poly8x16_t b) {
    235   return vextq_p8(a, b, 2);
    236 }
    237 
    238 // CHECK-LABEL: define <8 x i16> @test_vextq_p16(<8 x i16> %a, <8 x i16> %b) #0 {
    239 // CHECK:   [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
    240 // CHECK:   [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
    241 // CHECK:   [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
    242 // CHECK:   [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
    243 // CHECK:   [[VEXT:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
    244 // CHECK:   ret <8 x i16> [[VEXT]]
    245 poly16x8_t test_vextq_p16(poly16x8_t a, poly16x8_t b) {
    246   return vextq_p16(a, b, 3);
    247 }
    248