Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X32
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64
      4 
      5 ; When extracting multiple consecutive elements from a larger
      6 ; vector into a smaller one, do it efficiently. We should use
      7 ; an EXTRACT_SUBVECTOR node internally rather than a bunch of
      8 ; single element extractions.
      9 
     10 ; Extracting the low elements only requires using the right kind of store.
     11 define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
     12 ; X32-LABEL: low_v8f32_to_v4f32:
     13 ; X32:       # BB#0:
     14 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     15 ; X32-NEXT:    vmovaps %xmm0, (%eax)
     16 ; X32-NEXT:    vzeroupper
     17 ; X32-NEXT:    retl
     18 ;
     19 ; X64-LABEL: low_v8f32_to_v4f32:
     20 ; X64:       # BB#0:
     21 ; X64-NEXT:    vmovaps %xmm0, (%rdi)
     22 ; X64-NEXT:    vzeroupper
     23 ; X64-NEXT:    retq
     24   %ext0 = extractelement <8 x float> %v, i32 0
     25   %ext1 = extractelement <8 x float> %v, i32 1
     26   %ext2 = extractelement <8 x float> %v, i32 2
     27   %ext3 = extractelement <8 x float> %v, i32 3
     28   %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
     29   %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
     30   %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
     31   %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
     32   store <4 x float> %ins3, <4 x float>* %ptr, align 16
     33   ret void
     34 }
     35 
     36 ; Extracting the high elements requires just one AVX instruction.
     37 define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
     38 ; X32-LABEL: high_v8f32_to_v4f32:
     39 ; X32:       # BB#0:
     40 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     41 ; X32-NEXT:    vextractf128 $1, %ymm0, (%eax)
     42 ; X32-NEXT:    vzeroupper
     43 ; X32-NEXT:    retl
     44 ;
     45 ; X64-LABEL: high_v8f32_to_v4f32:
     46 ; X64:       # BB#0:
     47 ; X64-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     48 ; X64-NEXT:    vzeroupper
     49 ; X64-NEXT:    retq
     50   %ext0 = extractelement <8 x float> %v, i32 4
     51   %ext1 = extractelement <8 x float> %v, i32 5
     52   %ext2 = extractelement <8 x float> %v, i32 6
     53   %ext3 = extractelement <8 x float> %v, i32 7
     54   %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
     55   %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
     56   %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
     57   %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
     58   store <4 x float> %ins3, <4 x float>* %ptr, align 16
     59   ret void
     60 }
     61 
     62 ; Make sure element type doesn't alter the codegen. Note that
     63 ; if we were actually using the vector in this function and
     64 ; have AVX2, we should generate vextracti128 (the int version).
     65 define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
     66 ; X32-LABEL: high_v8i32_to_v4i32:
     67 ; X32:       # BB#0:
     68 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     69 ; X32-NEXT:    vextractf128 $1, %ymm0, (%eax)
     70 ; X32-NEXT:    vzeroupper
     71 ; X32-NEXT:    retl
     72 ;
     73 ; X64-LABEL: high_v8i32_to_v4i32:
     74 ; X64:       # BB#0:
     75 ; X64-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     76 ; X64-NEXT:    vzeroupper
     77 ; X64-NEXT:    retq
     78   %ext0 = extractelement <8 x i32> %v, i32 4
     79   %ext1 = extractelement <8 x i32> %v, i32 5
     80   %ext2 = extractelement <8 x i32> %v, i32 6
     81   %ext3 = extractelement <8 x i32> %v, i32 7
     82   %ins0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
     83   %ins1 = insertelement <4 x i32> %ins0, i32 %ext1, i32 1
     84   %ins2 = insertelement <4 x i32> %ins1, i32 %ext2, i32 2
     85   %ins3 = insertelement <4 x i32> %ins2, i32 %ext3, i32 3
     86   store <4 x i32> %ins3, <4 x i32>* %ptr, align 16
     87   ret void
     88 }
     89 
     90 ; Make sure that element size doesn't alter the codegen.
     91 define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
     92 ; X32-LABEL: high_v4f64_to_v2f64:
     93 ; X32:       # BB#0:
     94 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     95 ; X32-NEXT:    vextractf128 $1, %ymm0, (%eax)
     96 ; X32-NEXT:    vzeroupper
     97 ; X32-NEXT:    retl
     98 ;
     99 ; X64-LABEL: high_v4f64_to_v2f64:
    100 ; X64:       # BB#0:
    101 ; X64-NEXT:    vextractf128 $1, %ymm0, (%rdi)
    102 ; X64-NEXT:    vzeroupper
    103 ; X64-NEXT:    retq
    104   %ext0 = extractelement <4 x double> %v, i32 2
    105   %ext1 = extractelement <4 x double> %v, i32 3
    106   %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
    107   %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
    108   store <2 x double> %ins1, <2 x double>* %ptr, align 16
    109   ret void
    110 }
    111 
    112 ; PR25320 Make sure that a widened (possibly legalized) vector correctly zero-extends upper elements.
    113 ; FIXME - Ideally these should just call VMOVD/VMOVQ/VMOVSS/VMOVSD
    114 
    115 define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
    116 ; X32-LABEL: legal_vzmovl_2i32_8i32:
    117 ; X32:       # BB#0:
    118 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
    119 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
    120 ; X32-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
    121 ; X32-NEXT:    vxorps %ymm1, %ymm1, %ymm1
    122 ; X32-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
    123 ; X32-NEXT:    vmovaps %ymm0, (%eax)
    124 ; X32-NEXT:    vzeroupper
    125 ; X32-NEXT:    retl
    126 ;
    127 ; X64-LABEL: legal_vzmovl_2i32_8i32:
    128 ; X64:       # BB#0:
    129 ; X64-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
    130 ; X64-NEXT:    vxorps %ymm1, %ymm1, %ymm1
    131 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
    132 ; X64-NEXT:    vmovaps %ymm0, (%rsi)
    133 ; X64-NEXT:    vzeroupper
    134 ; X64-NEXT:    retq
    135   %ld = load <2 x i32>, <2 x i32>* %in, align 8
    136   %ext = extractelement <2 x i32> %ld, i64 0
    137   %ins = insertelement <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %ext, i64 0
    138   store <8 x i32> %ins, <8 x i32>* %out, align 32
    139   ret void
    140 }
    141 
    142 define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
    143 ; X32-LABEL: legal_vzmovl_2i64_4i64:
    144 ; X32:       # BB#0:
    145 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
    146 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
    147 ; X32-NEXT:    vmovupd (%ecx), %xmm0
    148 ; X32-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
    149 ; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
    150 ; X32-NEXT:    vmovapd %ymm0, (%eax)
    151 ; X32-NEXT:    vzeroupper
    152 ; X32-NEXT:    retl
    153 ;
    154 ; X64-LABEL: legal_vzmovl_2i64_4i64:
    155 ; X64:       # BB#0:
    156 ; X64-NEXT:    vmovupd (%rdi), %xmm0
    157 ; X64-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
    158 ; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
    159 ; X64-NEXT:    vmovapd %ymm0, (%rsi)
    160 ; X64-NEXT:    vzeroupper
    161 ; X64-NEXT:    retq
    162   %ld = load <2 x i64>, <2 x i64>* %in, align 8
    163   %ext = extractelement <2 x i64> %ld, i64 0
    164   %ins = insertelement <4 x i64> <i64 undef, i64 0, i64 0, i64 0>, i64 %ext, i64 0
    165   store <4 x i64> %ins, <4 x i64>* %out, align 32
    166   ret void
    167 }
    168 
    169 define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
    170 ; X32-LABEL: legal_vzmovl_2f32_8f32:
    171 ; X32:       # BB#0:
    172 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
    173 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
    174 ; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
    175 ; X32-NEXT:    vmovaps %ymm0, (%eax)
    176 ; X32-NEXT:    vzeroupper
    177 ; X32-NEXT:    retl
    178 ;
    179 ; X64-LABEL: legal_vzmovl_2f32_8f32:
    180 ; X64:       # BB#0:
    181 ; X64-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
    182 ; X64-NEXT:    vxorps %ymm1, %ymm1, %ymm1
    183 ; X64-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
    184 ; X64-NEXT:    vmovaps %ymm0, (%rsi)
    185 ; X64-NEXT:    vzeroupper
    186 ; X64-NEXT:    retq
    187   %ld = load <2 x float>, <2 x float>* %in, align 8
    188   %ext = extractelement <2 x float> %ld, i64 0
    189   %ins = insertelement <8 x float> <float undef, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, float %ext, i64 0
    190   store <8 x float> %ins, <8 x float>* %out, align 32
    191   ret void
    192 }
    193 
    194 define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
    195 ; X32-LABEL: legal_vzmovl_2f64_4f64:
    196 ; X32:       # BB#0:
    197 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
    198 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
    199 ; X32-NEXT:    vmovupd (%ecx), %xmm0
    200 ; X32-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
    201 ; X32-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
    202 ; X32-NEXT:    vmovapd %ymm0, (%eax)
    203 ; X32-NEXT:    vzeroupper
    204 ; X32-NEXT:    retl
    205 ;
    206 ; X64-LABEL: legal_vzmovl_2f64_4f64:
    207 ; X64:       # BB#0:
    208 ; X64-NEXT:    vmovupd (%rdi), %xmm0
    209 ; X64-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
    210 ; X64-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
    211 ; X64-NEXT:    vmovapd %ymm0, (%rsi)
    212 ; X64-NEXT:    vzeroupper
    213 ; X64-NEXT:    retq
    214   %ld = load <2 x double>, <2 x double>* %in, align 8
    215   %ext = extractelement <2 x double> %ld, i64 0
    216   %ins = insertelement <4 x double> <double undef, double 0.0, double 0.0, double 0.0>, double %ext, i64 0
    217   store <4 x double> %ins, <4 x double>* %out, align 32
    218   ret void
    219 }
    220