Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
      3 
      4 ; When extracting multiple consecutive elements from a larger
      5 ; vector into a smaller one, do it efficiently. We should use
      6 ; an EXTRACT_SUBVECTOR node internally rather than a bunch of
      7 ; single element extractions.
      8 
      9 ; Extracting the low elements only requires using the right kind of store.
     10 define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
     11 ; CHECK-LABEL: low_v8f32_to_v4f32:
     12 ; CHECK:       # BB#0:
     13 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
     14 ; CHECK-NEXT:    vzeroupper
     15 ; CHECK-NEXT:    retq
     16   %ext0 = extractelement <8 x float> %v, i32 0
     17   %ext1 = extractelement <8 x float> %v, i32 1
     18   %ext2 = extractelement <8 x float> %v, i32 2
     19   %ext3 = extractelement <8 x float> %v, i32 3
     20   %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
     21   %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
     22   %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
     23   %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
     24   store <4 x float> %ins3, <4 x float>* %ptr, align 16
     25   ret void
     26 }
     27 
     28 ; Extracting the high elements requires just one AVX instruction.
     29 define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
     30 ; CHECK-LABEL: high_v8f32_to_v4f32:
     31 ; CHECK:       # BB#0:
     32 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     33 ; CHECK-NEXT:    vzeroupper
     34 ; CHECK-NEXT:    retq
     35   %ext0 = extractelement <8 x float> %v, i32 4
     36   %ext1 = extractelement <8 x float> %v, i32 5
     37   %ext2 = extractelement <8 x float> %v, i32 6
     38   %ext3 = extractelement <8 x float> %v, i32 7
     39   %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
     40   %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
     41   %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
     42   %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
     43   store <4 x float> %ins3, <4 x float>* %ptr, align 16
     44   ret void
     45 }
     46 
     47 ; Make sure element type doesn't alter the codegen. Note that
     48 ; if we were actually using the vector in this function and
     49 ; have AVX2, we should generate vextracti128 (the int version).
     50 define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
     51 ; CHECK-LABEL: high_v8i32_to_v4i32:
     52 ; CHECK:       # BB#0:
     53 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     54 ; CHECK-NEXT:    vzeroupper
     55 ; CHECK-NEXT:    retq
     56   %ext0 = extractelement <8 x i32> %v, i32 4
     57   %ext1 = extractelement <8 x i32> %v, i32 5
     58   %ext2 = extractelement <8 x i32> %v, i32 6
     59   %ext3 = extractelement <8 x i32> %v, i32 7
     60   %ins0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
     61   %ins1 = insertelement <4 x i32> %ins0, i32 %ext1, i32 1
     62   %ins2 = insertelement <4 x i32> %ins1, i32 %ext2, i32 2
     63   %ins3 = insertelement <4 x i32> %ins2, i32 %ext3, i32 3
     64   store <4 x i32> %ins3, <4 x i32>* %ptr, align 16
     65   ret void
     66 }
     67 
     68 ; Make sure that element size doesn't alter the codegen.
     69 define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
     70 ; CHECK-LABEL: high_v4f64_to_v2f64:
     71 ; CHECK:       # BB#0:
     72 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     73 ; CHECK-NEXT:    vzeroupper
     74 ; CHECK-NEXT:    retq
     75   %ext0 = extractelement <4 x double> %v, i32 2
     76   %ext1 = extractelement <4 x double> %v, i32 3
     77   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
     78   %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
     79   store <2 x double> %ins1, <2 x double>* %ptr, align 16
     80   ret void
     81 }
     82 
     83 ; PR25320 Make sure that a widened (possibly legalized) vector correctly zero-extends upper elements.
     84 ; FIXME - Ideally these should just call VMOVD/VMOVQ/VMOVSS/VMOVSD
     85 
     86 define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
     87 ; CHECK-LABEL: legal_vzmovl_2i32_8i32:
     88 ; CHECK:       # BB#0:
     89 ; CHECK-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
     90 ; CHECK-NEXT:    vxorps %ymm1, %ymm1, %ymm1
     91 ; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
     92 ; CHECK-NEXT:    vmovaps %ymm0, (%rsi)
     93 ; CHECK-NEXT:    vzeroupper
     94 ; CHECK-NEXT:    retq
     95   %ld = load <2 x i32>, <2 x i32>* %in, align 8
     96   %ext = extractelement <2 x i32> %ld, i64 0
     97   %ins = insertelement <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %ext, i64 0
     98   store <8 x i32> %ins, <8 x i32>* %out, align 32
     99   ret void
    100 }
    101 
    102 define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
    103 ; CHECK-LABEL: legal_vzmovl_2i64_4i64:
    104 ; CHECK:       # BB#0:
    105 ; CHECK-NEXT:    vmovupd (%rdi), %xmm0
    106 ; CHECK-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
    107 ; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
    108 ; CHECK-NEXT:    vmovapd %ymm0, (%rsi)
    109 ; CHECK-NEXT:    vzeroupper
    110 ; CHECK-NEXT:    retq
    111   %ld = load <2 x i64>, <2 x i64>* %in, align 8
    112   %ext = extractelement <2 x i64> %ld, i64 0
    113   %ins = insertelement <4 x i64> <i64 undef, i64 0, i64 0, i64 0>, i64 %ext, i64 0
    114   store <4 x i64> %ins, <4 x i64>* %out, align 32
    115   ret void
    116 }
    117 
    118 define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
    119 ; CHECK-LABEL: legal_vzmovl_2f32_8f32:
    120 ; CHECK:       # BB#0:
    121 ; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
    122 ; CHECK-NEXT:    vxorps %ymm1, %ymm1, %ymm1
    123 ; CHECK-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
    124 ; CHECK-NEXT:    vmovaps %ymm0, (%rsi)
    125 ; CHECK-NEXT:    vzeroupper
    126 ; CHECK-NEXT:    retq
    127   %ld = load <2 x float>, <2 x float>* %in, align 8
    128   %ext = extractelement <2 x float> %ld, i64 0
    129   %ins = insertelement <8 x float> <float undef, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, float %ext, i64 0
    130   store <8 x float> %ins, <8 x float>* %out, align 32
    131   ret void
    132 }
    133 
    134 define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
    135 ; CHECK-LABEL: legal_vzmovl_2f64_4f64:
    136 ; CHECK:       # BB#0:
    137 ; CHECK-NEXT:    vmovupd (%rdi), %xmm0
    138 ; CHECK-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
    139 ; CHECK-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
    140 ; CHECK-NEXT:    vmovapd %ymm0, (%rsi)
    141 ; CHECK-NEXT:    vzeroupper
    142 ; CHECK-NEXT:    retq
    143   %ld = load <2 x double>, <2 x double>* %in, align 8
    144   %ext = extractelement <2 x double> %ld, i64 0
    145   %ins = insertelement <4 x double> <double undef, double 0.0, double 0.0, double 0.0>, double %ext, i64 0
    146   store <4 x double> %ins, <4 x double>* %out, align 32
    147   ret void
    148 }
    149