Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
      4 
      5 define   <16 x i32> @_inreg16xi32(i32 %a) {
      6 ; ALL-LABEL: _inreg16xi32:
      7 ; ALL:       # BB#0:
      8 ; ALL-NEXT:    vpbroadcastd %edi, %zmm0
      9 ; ALL-NEXT:    retq
     10   %b = insertelement <16 x i32> undef, i32 %a, i32 0
     11   %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
     12   ret <16 x i32> %c
     13 }
     14 
     15 define   <8 x i64> @_inreg8xi64(i64 %a) {
     16 ; ALL-LABEL: _inreg8xi64:
     17 ; ALL:       # BB#0:
     18 ; ALL-NEXT:    vpbroadcastq %rdi, %zmm0
     19 ; ALL-NEXT:    retq
     20   %b = insertelement <8 x i64> undef, i64 %a, i32 0
     21   %c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
     22   ret <8 x i64> %c
     23 }
     24 
     25 define   <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
     26 ; ALL-LABEL: _ss16xfloat_v4:
     27 ; ALL:       # BB#0:
     28 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
     29 ; ALL-NEXT:    retq
     30   %b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
     31   ret <16 x float> %b
     32 }
     33 
     34 define   <16 x float> @_inreg16xfloat(float %a) {
     35 ; ALL-LABEL: _inreg16xfloat:
     36 ; ALL:       # BB#0:
     37 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
     38 ; ALL-NEXT:    retq
     39   %b = insertelement <16 x float> undef, float %a, i32 0
     40   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
     41   ret <16 x float> %c
     42 }
     43 
     44 define   <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
     45 ; ALL-LABEL: _ss16xfloat_mask:
     46 ; ALL:       # BB#0:
     47 ; ALL-NEXT:    vpxord %zmm3, %zmm3, %zmm3
     48 ; ALL-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
     49 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm1 {%k1}
     50 ; ALL-NEXT:    vmovaps %zmm1, %zmm0
     51 ; ALL-NEXT:    retq
     52   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
     53   %b = insertelement <16 x float> undef, float %a, i32 0
     54   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
     55   %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
     56   ret <16 x float> %r
     57 }
     58 
     59 define   <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
     60 ; ALL-LABEL: _ss16xfloat_maskz:
     61 ; ALL:       # BB#0:
     62 ; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
     63 ; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
     64 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0 {%k1} {z}
     65 ; ALL-NEXT:    retq
     66   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
     67   %b = insertelement <16 x float> undef, float %a, i32 0
     68   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
     69   %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
     70   ret <16 x float> %r
     71 }
     72 
     73 define   <16 x float> @_ss16xfloat_load(float* %a.ptr) {
     74 ; ALL-LABEL: _ss16xfloat_load:
     75 ; ALL:       # BB#0:
     76 ; ALL-NEXT:    vbroadcastss (%rdi), %zmm0
     77 ; ALL-NEXT:    retq
     78   %a = load float, float* %a.ptr
     79   %b = insertelement <16 x float> undef, float %a, i32 0
     80   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
     81   ret <16 x float> %c
     82 }
     83 
     84 define   <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
     85 ; ALL-LABEL: _ss16xfloat_mask_load:
     86 ; ALL:       # BB#0:
     87 ; ALL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
     88 ; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
     89 ; ALL-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1}
     90 ; ALL-NEXT:    retq
     91   %a = load float, float* %a.ptr
     92   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
     93   %b = insertelement <16 x float> undef, float %a, i32 0
     94   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
     95   %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
     96   ret <16 x float> %r
     97 }
     98 
     99 define   <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
    100 ; ALL-LABEL: _ss16xfloat_maskz_load:
    101 ; ALL:       # BB#0:
    102 ; ALL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
    103 ; ALL-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
    104 ; ALL-NEXT:    vbroadcastss (%rdi), %zmm0 {%k1} {z}
    105 ; ALL-NEXT:    retq
    106   %a = load float, float* %a.ptr
    107   %mask = icmp ne <16 x i32> %mask1, zeroinitializer
    108   %b = insertelement <16 x float> undef, float %a, i32 0
    109   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
    110   %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
    111   ret <16 x float> %r
    112 }
    113 
    114 define   <8 x double> @_inreg8xdouble(double %a) {
    115 ; ALL-LABEL: _inreg8xdouble:
    116 ; ALL:       # BB#0:
    117 ; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
    118 ; ALL-NEXT:    retq
    119   %b = insertelement <8 x double> undef, double %a, i32 0
    120   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    121   ret <8 x double> %c
    122 }
    123 
    124 define   <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
    125 ; ALL-LABEL: _sd8xdouble_mask:
    126 ; ALL:       # BB#0:
    127 ; ALL-NEXT:    # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
    128 ; ALL-NEXT:    vpxor %ymm3, %ymm3, %ymm3
    129 ; ALL-NEXT:    vpcmpneqd %zmm3, %zmm2, %k1
    130 ; ALL-NEXT:    vbroadcastsd %xmm0, %zmm1 {%k1}
    131 ; ALL-NEXT:    vmovaps %zmm1, %zmm0
    132 ; ALL-NEXT:    retq
    133   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
    134   %b = insertelement <8 x double> undef, double %a, i32 0
    135   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    136   %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
    137   ret <8 x double> %r
    138 }
    139 
    140 define   <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
    141 ; ALL-LABEL: _sd8xdouble_maskz:
    142 ; ALL:       # BB#0:
    143 ; ALL-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
    144 ; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
    145 ; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
    146 ; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0 {%k1} {z}
    147 ; ALL-NEXT:    retq
    148   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
    149   %b = insertelement <8 x double> undef, double %a, i32 0
    150   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    151   %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
    152   ret <8 x double> %r
    153 }
    154 
    155 define   <8 x double> @_sd8xdouble_load(double* %a.ptr) {
    156 ; ALL-LABEL: _sd8xdouble_load:
    157 ; ALL:       # BB#0:
    158 ; ALL-NEXT:    vbroadcastsd (%rdi), %zmm0
    159 ; ALL-NEXT:    retq
    160   %a = load double, double* %a.ptr
    161   %b = insertelement <8 x double> undef, double %a, i32 0
    162   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    163   ret <8 x double> %c
    164 }
    165 
    166 define   <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
    167 ; ALL-LABEL: _sd8xdouble_mask_load:
    168 ; ALL:       # BB#0:
    169 ; ALL-NEXT:    # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
    170 ; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
    171 ; ALL-NEXT:    vpcmpneqd %zmm2, %zmm1, %k1
    172 ; ALL-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1}
    173 ; ALL-NEXT:    retq
    174   %a = load double, double* %a.ptr
    175   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
    176   %b = insertelement <8 x double> undef, double %a, i32 0
    177   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    178   %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
    179   ret <8 x double> %r
    180 }
    181 
    182 define   <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
    183 ; ALL-LABEL: _sd8xdouble_maskz_load:
    184 ; ALL:       # BB#0:
    185 ; ALL-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
    186 ; ALL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
    187 ; ALL-NEXT:    vpcmpneqd %zmm1, %zmm0, %k1
    188 ; ALL-NEXT:    vbroadcastsd (%rdi), %zmm0 {%k1} {z}
    189 ; ALL-NEXT:    retq
    190   %a = load double, double* %a.ptr
    191   %mask = icmp ne <8 x i32> %mask1, zeroinitializer
    192   %b = insertelement <8 x double> undef, double %a, i32 0
    193   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    194   %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
    195   ret <8 x double> %r
    196 }
    197 
    198 define   <16 x i32> @_xmm16xi32(<16 x i32> %a) {
    199 ; ALL-LABEL: _xmm16xi32:
    200 ; ALL:       # BB#0:
    201 ; ALL-NEXT:    vpbroadcastd %xmm0, %zmm0
    202 ; ALL-NEXT:    retq
    203   %b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
    204   ret <16 x i32> %b
    205 }
    206 
    207 define   <16 x float> @_xmm16xfloat(<16 x float> %a) {
    208 ; ALL-LABEL: _xmm16xfloat:
    209 ; ALL:       # BB#0:
    210 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
    211 ; ALL-NEXT:    retq
    212   %b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
    213   ret <16 x float> %b
    214 }
    215 
    216 define <16 x i32> @test_vbroadcast() {
    217 ; ALL-LABEL: test_vbroadcast:
    218 ; ALL:       # BB#0: # %entry
    219 ; ALL-NEXT:    vpxord %zmm0, %zmm0, %zmm0
    220 ; ALL-NEXT:    vcmpunordps %zmm0, %zmm0, %k1
    221 ; ALL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0
    222 ; ALL-NEXT:    vmovdqa32 %zmm0, %zmm0 {%k1} {z}
    223 ; ALL-NEXT:    knotw %k1, %k1
    224 ; ALL-NEXT:    vmovdqa32 %zmm0, %zmm0 {%k1} {z}
    225 ; ALL-NEXT:    retq
    226 entry:
    227   %0 = sext <16 x i1> zeroinitializer to <16 x i32>
    228   %1 = fcmp uno <16 x float> undef, zeroinitializer
    229   %2 = sext <16 x i1> %1 to <16 x i32>
    230   %3 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %2
    231   ret <16 x i32> %3
    232 }
    233 
    234 ; We implement the set1 intrinsics with vector initializers.  Verify that the
    235 ; IR generated will produce broadcasts at the end.
    236 define <8 x double> @test_set1_pd(double %d) #2 {
    237 ; ALL-LABEL: test_set1_pd:
    238 ; ALL:       # BB#0: # %entry
    239 ; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
    240 ; ALL-NEXT:    retq
    241 entry:
    242   %vecinit.i = insertelement <8 x double> undef, double %d, i32 0
    243   %vecinit1.i = insertelement <8 x double> %vecinit.i, double %d, i32 1
    244   %vecinit2.i = insertelement <8 x double> %vecinit1.i, double %d, i32 2
    245   %vecinit3.i = insertelement <8 x double> %vecinit2.i, double %d, i32 3
    246   %vecinit4.i = insertelement <8 x double> %vecinit3.i, double %d, i32 4
    247   %vecinit5.i = insertelement <8 x double> %vecinit4.i, double %d, i32 5
    248   %vecinit6.i = insertelement <8 x double> %vecinit5.i, double %d, i32 6
    249   %vecinit7.i = insertelement <8 x double> %vecinit6.i, double %d, i32 7
    250   ret <8 x double> %vecinit7.i
    251 }
    252 
    253 define <8 x i64> @test_set1_epi64(i64 %d) #2 {
    254 ; ALL-LABEL: test_set1_epi64:
    255 ; ALL:       # BB#0: # %entry
    256 ; ALL-NEXT:    vpbroadcastq %rdi, %zmm0
    257 ; ALL-NEXT:    retq
    258 entry:
    259   %vecinit.i = insertelement <8 x i64> undef, i64 %d, i32 0
    260   %vecinit1.i = insertelement <8 x i64> %vecinit.i, i64 %d, i32 1
    261   %vecinit2.i = insertelement <8 x i64> %vecinit1.i, i64 %d, i32 2
    262   %vecinit3.i = insertelement <8 x i64> %vecinit2.i, i64 %d, i32 3
    263   %vecinit4.i = insertelement <8 x i64> %vecinit3.i, i64 %d, i32 4
    264   %vecinit5.i = insertelement <8 x i64> %vecinit4.i, i64 %d, i32 5
    265   %vecinit6.i = insertelement <8 x i64> %vecinit5.i, i64 %d, i32 6
    266   %vecinit7.i = insertelement <8 x i64> %vecinit6.i, i64 %d, i32 7
    267   ret <8 x i64> %vecinit7.i
    268 }
    269 
    270 define <16 x float> @test_set1_ps(float %f) #2 {
    271 ; ALL-LABEL: test_set1_ps:
    272 ; ALL:       # BB#0: # %entry
    273 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
    274 ; ALL-NEXT:    retq
    275 entry:
    276   %vecinit.i = insertelement <16 x float> undef, float %f, i32 0
    277   %vecinit1.i = insertelement <16 x float> %vecinit.i, float %f, i32 1
    278   %vecinit2.i = insertelement <16 x float> %vecinit1.i, float %f, i32 2
    279   %vecinit3.i = insertelement <16 x float> %vecinit2.i, float %f, i32 3
    280   %vecinit4.i = insertelement <16 x float> %vecinit3.i, float %f, i32 4
    281   %vecinit5.i = insertelement <16 x float> %vecinit4.i, float %f, i32 5
    282   %vecinit6.i = insertelement <16 x float> %vecinit5.i, float %f, i32 6
    283   %vecinit7.i = insertelement <16 x float> %vecinit6.i, float %f, i32 7
    284   %vecinit8.i = insertelement <16 x float> %vecinit7.i, float %f, i32 8
    285   %vecinit9.i = insertelement <16 x float> %vecinit8.i, float %f, i32 9
    286   %vecinit10.i = insertelement <16 x float> %vecinit9.i, float %f, i32 10
    287   %vecinit11.i = insertelement <16 x float> %vecinit10.i, float %f, i32 11
    288   %vecinit12.i = insertelement <16 x float> %vecinit11.i, float %f, i32 12
    289   %vecinit13.i = insertelement <16 x float> %vecinit12.i, float %f, i32 13
    290   %vecinit14.i = insertelement <16 x float> %vecinit13.i, float %f, i32 14
    291   %vecinit15.i = insertelement <16 x float> %vecinit14.i, float %f, i32 15
    292   ret <16 x float> %vecinit15.i
    293 }
    294 
    295 define <16 x i32> @test_set1_epi32(i32 %f) #2 {
    296 ; ALL-LABEL: test_set1_epi32:
    297 ; ALL:       # BB#0: # %entry
    298 ; ALL-NEXT:    vpbroadcastd %edi, %zmm0
    299 ; ALL-NEXT:    retq
    300 entry:
    301   %vecinit.i = insertelement <16 x i32> undef, i32 %f, i32 0
    302   %vecinit1.i = insertelement <16 x i32> %vecinit.i, i32 %f, i32 1
    303   %vecinit2.i = insertelement <16 x i32> %vecinit1.i, i32 %f, i32 2
    304   %vecinit3.i = insertelement <16 x i32> %vecinit2.i, i32 %f, i32 3
    305   %vecinit4.i = insertelement <16 x i32> %vecinit3.i, i32 %f, i32 4
    306   %vecinit5.i = insertelement <16 x i32> %vecinit4.i, i32 %f, i32 5
    307   %vecinit6.i = insertelement <16 x i32> %vecinit5.i, i32 %f, i32 6
    308   %vecinit7.i = insertelement <16 x i32> %vecinit6.i, i32 %f, i32 7
    309   %vecinit8.i = insertelement <16 x i32> %vecinit7.i, i32 %f, i32 8
    310   %vecinit9.i = insertelement <16 x i32> %vecinit8.i, i32 %f, i32 9
    311   %vecinit10.i = insertelement <16 x i32> %vecinit9.i, i32 %f, i32 10
    312   %vecinit11.i = insertelement <16 x i32> %vecinit10.i, i32 %f, i32 11
    313   %vecinit12.i = insertelement <16 x i32> %vecinit11.i, i32 %f, i32 12
    314   %vecinit13.i = insertelement <16 x i32> %vecinit12.i, i32 %f, i32 13
    315   %vecinit14.i = insertelement <16 x i32> %vecinit13.i, i32 %f, i32 14
    316   %vecinit15.i = insertelement <16 x i32> %vecinit14.i, i32 %f, i32 15
    317   ret <16 x i32> %vecinit15.i
    318 }
    319 
    320 ; We implement the scalar broadcast intrinsics with vector initializers.
    321 ; Verify that the IR generated will produce the broadcast at the end.
    322 define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
    323 ; ALL-LABEL: test_mm512_broadcastsd_pd:
    324 ; ALL:       # BB#0: # %entry
    325 ; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
    326 ; ALL-NEXT:    retq
    327 entry:
    328   %0 = extractelement <2 x double> %a, i32 0
    329   %vecinit.i = insertelement <8 x double> undef, double %0, i32 0
    330   %vecinit1.i = insertelement <8 x double> %vecinit.i, double %0, i32 1
    331   %vecinit2.i = insertelement <8 x double> %vecinit1.i, double %0, i32 2
    332   %vecinit3.i = insertelement <8 x double> %vecinit2.i, double %0, i32 3
    333   %vecinit4.i = insertelement <8 x double> %vecinit3.i, double %0, i32 4
    334   %vecinit5.i = insertelement <8 x double> %vecinit4.i, double %0, i32 5
    335   %vecinit6.i = insertelement <8 x double> %vecinit5.i, double %0, i32 6
    336   %vecinit7.i = insertelement <8 x double> %vecinit6.i, double %0, i32 7
    337   ret <8 x double> %vecinit7.i
    338 }
    339 
    340 define <16 x float> @test1(<8 x float>%a)  {
    341 ; ALL-LABEL: test1:
    342 ; ALL:       # BB#0:
    343 ; ALL-NEXT:    vbroadcastss %xmm0, %zmm0
    344 ; ALL-NEXT:    retq
    345   %res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
    346   ret <16 x float>%res
    347 }
    348 
    349 define <8 x double> @test2(<4 x double>%a)  {
    350 ; ALL-LABEL: test2:
    351 ; ALL:       # BB#0:
    352 ; ALL-NEXT:    vbroadcastsd %xmm0, %zmm0
    353 ; ALL-NEXT:    retq
    354   %res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
    355   ret <8 x double>%res
    356 }
    357 
    358 define <64 x i8> @_invec32xi8(<32 x i8>%a)  {
    359 ; AVX512F-LABEL: _invec32xi8:
    360 ; AVX512F:       # BB#0:
    361 ; AVX512F-NEXT:    vpbroadcastb %xmm0, %ymm0
    362 ; AVX512F-NEXT:    vmovaps %zmm0, %zmm1
    363 ; AVX512F-NEXT:    retq
    364 ;
    365 ; AVX512BW-LABEL: _invec32xi8:
    366 ; AVX512BW:       # BB#0:
    367 ; AVX512BW-NEXT:    vpbroadcastb %xmm0, %zmm0
    368 ; AVX512BW-NEXT:    retq
    369   %res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
    370   ret <64 x i8>%res
    371 }
    372 
    373 define <32 x i16> @_invec16xi16(<16 x i16>%a)  {
    374 ; AVX512F-LABEL: _invec16xi16:
    375 ; AVX512F:       # BB#0:
    376 ; AVX512F-NEXT:    vpbroadcastw %xmm0, %ymm0
    377 ; AVX512F-NEXT:    vmovaps %zmm0, %zmm1
    378 ; AVX512F-NEXT:    retq
    379 ;
    380 ; AVX512BW-LABEL: _invec16xi16:
    381 ; AVX512BW:       # BB#0:
    382 ; AVX512BW-NEXT:    vpbroadcastw %xmm0, %zmm0
    383 ; AVX512BW-NEXT:    retq
    384   %res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
    385   ret <32 x i16>%res
    386 }
    387 
    388 define <16 x i32> @_invec8xi32(<8 x i32>%a)  {
    389 ; ALL-LABEL: _invec8xi32:
    390 ; ALL:       # BB#0:
    391 ; ALL-NEXT:    vpbroadcastd %xmm0, %zmm0
    392 ; ALL-NEXT:    retq
    393   %res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
    394   ret <16 x i32>%res
    395 }
    396 
    397 define <8 x i64> @_invec4xi64(<4 x i64>%a)  {
    398 ; ALL-LABEL: _invec4xi64:
    399 ; ALL:       # BB#0:
    400 ; ALL-NEXT:    vpbroadcastq %xmm0, %zmm0
    401 ; ALL-NEXT:    retq
    402   %res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
    403   ret <8 x i64>%res
    404 }
    405 
    406 declare void @func_f32(float)
    407 define <16 x float> @broadcast_ss_spill(float %x) {
    408 ; ALL-LABEL: broadcast_ss_spill:
    409 ; ALL:       # BB#0:
    410 ; ALL-NEXT:    pushq %rax
    411 ; ALL-NEXT:  .Ltmp0:
    412 ; ALL-NEXT:    .cfi_def_cfa_offset 16
    413 ; ALL-NEXT:    vaddss %xmm0, %xmm0, %xmm0
    414 ; ALL-NEXT:    vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Folded Spill
    415 ; ALL-NEXT:    callq func_f32
    416 ; ALL-NEXT:    vbroadcastss {{[0-9]+}}(%rsp), %zmm0 # 4-byte Folded Reload
    417 ; ALL-NEXT:    popq %rax
    418 ; ALL-NEXT:    retq
    419   %a  = fadd float %x, %x
    420   call void @func_f32(float %a)
    421   %b = insertelement <16 x float> undef, float %a, i32 0
    422   %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
    423   ret <16 x float> %c
    424 }
    425 
    426 declare void @func_f64(double)
    427 define <8 x double> @broadcast_sd_spill(double %x) {
    428 ; ALL-LABEL: broadcast_sd_spill:
    429 ; ALL:       # BB#0:
    430 ; ALL-NEXT:    pushq %rax
    431 ; ALL-NEXT:  .Ltmp1:
    432 ; ALL-NEXT:    .cfi_def_cfa_offset 16
    433 ; ALL-NEXT:    vaddsd %xmm0, %xmm0, %xmm0
    434 ; ALL-NEXT:    vmovsd %xmm0, (%rsp) # 8-byte Folded Spill
    435 ; ALL-NEXT:    callq func_f64
    436 ; ALL-NEXT:    vbroadcastsd (%rsp), %zmm0 # 8-byte Folded Reload
    437 ; ALL-NEXT:    popq %rax
    438 ; ALL-NEXT:    retq
    439   %a  = fadd double %x, %x
    440   call void @func_f64(double %a)
    441   %b = insertelement <8 x double> undef, double %a, i32 0
    442   %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
    443   ret <8 x double> %c
    444 }
    445