1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X32_AVX256 3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64_AVX256 4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=X32_AVX512 5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=X64_AVX512 6 7 define <8 x float> @insert_subvector_256(i16 %x0, i16 %x1, <8 x float> %v) nounwind { 8 ; X32_AVX256-LABEL: insert_subvector_256: 9 ; X32_AVX256: # %bb.0: 10 ; X32_AVX256-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 11 ; X32_AVX256-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 12 ; X32_AVX256-NEXT: vpbroadcastd %xmm1, %xmm1 13 ; X32_AVX256-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7] 14 ; X32_AVX256-NEXT: retl 15 ; 16 ; X64_AVX256-LABEL: insert_subvector_256: 17 ; X64_AVX256: # %bb.0: 18 ; X64_AVX256-NEXT: vmovd %edi, %xmm1 19 ; X64_AVX256-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1 20 ; X64_AVX256-NEXT: vpbroadcastd %xmm1, %xmm1 21 ; X64_AVX256-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7] 22 ; X64_AVX256-NEXT: retq 23 ; 24 ; X32_AVX512-LABEL: insert_subvector_256: 25 ; X32_AVX512: # %bb.0: 26 ; X32_AVX512-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero 27 ; X32_AVX512-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 28 ; X32_AVX512-NEXT: vpbroadcastd %xmm1, %xmm1 29 ; X32_AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7] 30 ; X32_AVX512-NEXT: retl 31 ; 32 ; X64_AVX512-LABEL: insert_subvector_256: 33 ; X64_AVX512: # %bb.0: 34 ; X64_AVX512-NEXT: vmovd %edi, %xmm1 35 ; X64_AVX512-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1 36 ; X64_AVX512-NEXT: vpbroadcastd %xmm1, %xmm1 37 ; X64_AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7] 38 ; X64_AVX512-NEXT: retq 39 %ins1 = insertelement <2 x i16> undef, i16 %x0, i32 0 40 %ins2 = insertelement <2 x i16> %ins1, i16 %x1, i32 1 41 %bc = bitcast <2 x i16> %ins2 to float 42 %ins3 = insertelement <8 x float> %v, float %bc, i32 1 43 ret <8 x float> %ins3 44 } 45 46 define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind { 47 ; X32_AVX256-LABEL: insert_subvector_512: 48 ; X32_AVX256: # %bb.0: 49 ; X32_AVX256-NEXT: pushl %ebp 50 ; X32_AVX256-NEXT: movl %esp, %ebp 51 ; X32_AVX256-NEXT: andl $-8, %esp 52 ; X32_AVX256-NEXT: subl $8, %esp 53 ; X32_AVX256-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero 54 ; X32_AVX256-NEXT: vmovlps %xmm2, (%esp) 55 ; X32_AVX256-NEXT: vextracti128 $1, %ymm0, %xmm2 56 ; X32_AVX256-NEXT: vpinsrd $0, (%esp), %xmm2, %xmm2 57 ; X32_AVX256-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm2, %xmm2 58 ; X32_AVX256-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 59 ; X32_AVX256-NEXT: movl %ebp, %esp 60 ; X32_AVX256-NEXT: popl %ebp 61 ; X32_AVX256-NEXT: retl 62 ; 63 ; X64_AVX256-LABEL: insert_subvector_512: 64 ; X64_AVX256: # %bb.0: 65 ; X64_AVX256-NEXT: vmovd %edi, %xmm2 66 ; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm2, %xmm2 67 ; X64_AVX256-NEXT: vmovq %xmm2, %rax 68 ; X64_AVX256-NEXT: vextracti128 $1, %ymm0, %xmm2 69 ; X64_AVX256-NEXT: vpinsrq $0, %rax, %xmm2, %xmm2 70 ; X64_AVX256-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 71 ; X64_AVX256-NEXT: retq 72 ; 73 ; X32_AVX512-LABEL: insert_subvector_512: 74 ; X32_AVX512: # %bb.0: 75 ; X32_AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero 76 ; X32_AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,0,1,0,8,0,3,0,4,0,5,0,6,0,7,0] 77 ; X32_AVX512-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 78 ; X32_AVX512-NEXT: retl 79 ; 80 ; X64_AVX512-LABEL: insert_subvector_512: 81 ; X64_AVX512: # %bb.0: 82 ; X64_AVX512-NEXT: vmovd %edi, %xmm1 83 ; X64_AVX512-NEXT: vpinsrd $1, %esi, %xmm1, %xmm1 84 ; X64_AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,8,3,4,5,6,7] 85 ; X64_AVX512-NEXT: vpermt2q %zmm1, %zmm2, %zmm0 86 ; X64_AVX512-NEXT: retq 87 %ins1 = insertelement <2 x i32> undef, i32 %x0, i32 0 88 %ins2 = insertelement <2 x i32> %ins1, i32 %x1, i32 1 89 %bc = bitcast <2 x i32> %ins2 to i64 90 %ins3 = insertelement <8 x i64> %v, i64 %bc, i32 2 91 ret <8 x i64> %ins3 92 } 93 94 ; PR34716 - https://bugs.llvm.org/show_bug.cgi?id=34716 95 ; Special case: if we're inserting into an undef vector, we can optimize more. 96 97 define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind { 98 ; X32_AVX256-LABEL: insert_subvector_into_undef: 99 ; X32_AVX256: # %bb.0: 100 ; X32_AVX256-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 101 ; X32_AVX256-NEXT: vbroadcastsd %xmm0, %ymm0 102 ; X32_AVX256-NEXT: vmovaps %ymm0, %ymm1 103 ; X32_AVX256-NEXT: retl 104 ; 105 ; X64_AVX256-LABEL: insert_subvector_into_undef: 106 ; X64_AVX256: # %bb.0: 107 ; X64_AVX256-NEXT: vmovd %edi, %xmm0 108 ; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 109 ; X64_AVX256-NEXT: vpbroadcastq %xmm0, %ymm0 110 ; X64_AVX256-NEXT: vmovdqa %ymm0, %ymm1 111 ; X64_AVX256-NEXT: retq 112 ; 113 ; X32_AVX512-LABEL: insert_subvector_into_undef: 114 ; X32_AVX512: # %bb.0: 115 ; X32_AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 116 ; X32_AVX512-NEXT: vbroadcastsd %xmm0, %zmm0 117 ; X32_AVX512-NEXT: retl 118 ; 119 ; X64_AVX512-LABEL: insert_subvector_into_undef: 120 ; X64_AVX512: # %bb.0: 121 ; X64_AVX512-NEXT: vmovd %edi, %xmm0 122 ; X64_AVX512-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0 123 ; X64_AVX512-NEXT: vpbroadcastq %xmm0, %zmm0 124 ; X64_AVX512-NEXT: retq 125 %ins1 = insertelement <2 x i32> undef, i32 %x0, i32 0 126 %ins2 = insertelement <2 x i32> %ins1, i32 %x1, i32 1 127 %bc = bitcast <2 x i32> %ins2 to i64 128 %ins3 = insertelement <8 x i64> undef, i64 %bc, i32 0 129 %splat = shufflevector <8 x i64> %ins3, <8 x i64> undef, <8 x i32> zeroinitializer 130 ret <8 x i64> %splat 131 } 132 133