Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefix=ALL --check-prefix=AVX
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
      4 
      5 define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) {
      6 ; ALL-LABEL: insert_f32:
      7 ; ALL:       # %bb.0:
      8 ; ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
      9 ; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
     10 ; ALL-NEXT:    retq
     11   %i0 = insertelement <8 x float> %y, float %f, i32 0
     12   ret <8 x float> %i0
     13 }
     14 
     15 define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) {
     16 ; ALL-LABEL: insert_f64:
     17 ; ALL:       # %bb.0:
     18 ; ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
     19 ; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
     20 ; ALL-NEXT:    retq
     21   %i0 = insertelement <4 x double> %y, double %f, i32 0
     22   ret <4 x double> %i0
     23 }
     24 
     25 define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) {
     26 ; AVX-LABEL: insert_i8:
     27 ; AVX:       # %bb.0:
     28 ; AVX-NEXT:    vpinsrb $0, %edi, %xmm0, %xmm1
     29 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     30 ; AVX-NEXT:    retq
     31 ;
     32 ; AVX2-LABEL: insert_i8:
     33 ; AVX2:       # %bb.0:
     34 ; AVX2-NEXT:    vpinsrb $0, %edi, %xmm0, %xmm1
     35 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     36 ; AVX2-NEXT:    retq
     37   %i0 = insertelement <32 x i8> %y, i8 %f, i32 0
     38   ret <32 x i8> %i0
     39 }
     40 
     41 define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) {
     42 ; AVX-LABEL: insert_i16:
     43 ; AVX:       # %bb.0:
     44 ; AVX-NEXT:    vpinsrw $0, %edi, %xmm0, %xmm1
     45 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     46 ; AVX-NEXT:    retq
     47 ;
     48 ; AVX2-LABEL: insert_i16:
     49 ; AVX2:       # %bb.0:
     50 ; AVX2-NEXT:    vpinsrw $0, %edi, %xmm0, %xmm1
     51 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     52 ; AVX2-NEXT:    retq
     53   %i0 = insertelement <16 x i16> %y, i16 %f, i32 0
     54   ret <16 x i16> %i0
     55 }
     56 
     57 define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) {
     58 ; AVX-LABEL: insert_i32:
     59 ; AVX:       # %bb.0:
     60 ; AVX-NEXT:    vpinsrd $0, %edi, %xmm0, %xmm1
     61 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     62 ; AVX-NEXT:    retq
     63 ;
     64 ; AVX2-LABEL: insert_i32:
     65 ; AVX2:       # %bb.0:
     66 ; AVX2-NEXT:    vmovd %edi, %xmm1
     67 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
     68 ; AVX2-NEXT:    retq
     69   %i0 = insertelement <8 x i32> %y, i32 %f, i32 0
     70   ret <8 x i32> %i0
     71 }
     72 
     73 define <4 x i64> @insert_i64(<4 x i64> %y, i64 %f, <4 x i64> %x) {
     74 ; AVX-LABEL: insert_i64:
     75 ; AVX:       # %bb.0:
     76 ; AVX-NEXT:    vpinsrq $0, %rdi, %xmm0, %xmm1
     77 ; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     78 ; AVX-NEXT:    retq
     79 ;
     80 ; AVX2-LABEL: insert_i64:
     81 ; AVX2:       # %bb.0:
     82 ; AVX2-NEXT:    vpinsrq $0, %rdi, %xmm0, %xmm1
     83 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
     84 ; AVX2-NEXT:    retq
     85   %i0 = insertelement <4 x i64> %y, i64 %f, i32 0
     86   ret <4 x i64> %i0
     87 }
     88 
     89