Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s
      3 
      4 define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp {
      5 ; CHECK-LABEL: A:
      6 ; CHECK:       ## %bb.0: ## %entry
      7 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
      8 ; CHECK-NEXT:    retq
      9 entry:
     10   %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 8, i32 8, i32 8>
     11   ret <8 x float> %shuffle
     12 }
     13 
     14 define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp {
     15 ; CHECK-LABEL: B:
     16 ; CHECK:       ## %bb.0: ## %entry
     17 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm0
     18 ; CHECK-NEXT:    retq
     19 entry:
     20   %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 4>
     21   ret <4 x double> %shuffle
     22 }
     23 
     24 define void @t0(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
     25 ; CHECK-LABEL: t0:
     26 ; CHECK:       ## %bb.0: ## %entry
     27 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     28 ; CHECK-NEXT:    vzeroupper
     29 ; CHECK-NEXT:    retq
     30 entry:
     31   %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 1)
     32   %1 = bitcast float* %addr to <4 x float>*
     33   store <4 x float> %0, <4 x float>* %1, align 16
     34   ret void
     35 }
     36 
     37 define void @t2(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
     38 ; CHECK-LABEL: t2:
     39 ; CHECK:       ## %bb.0: ## %entry
     40 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     41 ; CHECK-NEXT:    vzeroupper
     42 ; CHECK-NEXT:    retq
     43 entry:
     44   %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 1)
     45   %1 = bitcast double* %addr to <2 x double>*
     46   store <2 x double> %0, <2 x double>* %1, align 16
     47   ret void
     48 }
     49 
     50 define void @t4(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
     51 ; CHECK-LABEL: t4:
     52 ; CHECK:       ## %bb.0: ## %entry
     53 ; CHECK-NEXT:    vextractf128 $1, %ymm0, (%rdi)
     54 ; CHECK-NEXT:    vzeroupper
     55 ; CHECK-NEXT:    retq
     56 entry:
     57   %0 = bitcast <4 x i64> %a to <8 x i32>
     58   %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 1)
     59   %2 = bitcast <4 x i32> %1 to <2 x i64>
     60   store <2 x i64> %2, <2 x i64>* %addr, align 16
     61   ret void
     62 }
     63 
     64 define void @t5(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp {
     65 ; CHECK-LABEL: t5:
     66 ; CHECK:       ## %bb.0: ## %entry
     67 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
     68 ; CHECK-NEXT:    vzeroupper
     69 ; CHECK-NEXT:    retq
     70 entry:
     71   %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0)
     72   %1 = bitcast float* %addr to <4 x float>*
     73   store <4 x float> %0, <4 x float>* %1, align 16
     74   ret void
     75 }
     76 
     77 define void @t6(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp {
     78 ; CHECK-LABEL: t6:
     79 ; CHECK:       ## %bb.0: ## %entry
     80 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
     81 ; CHECK-NEXT:    vzeroupper
     82 ; CHECK-NEXT:    retq
     83 entry:
     84   %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0)
     85   %1 = bitcast double* %addr to <2 x double>*
     86   store <2 x double> %0, <2 x double>* %1, align 16
     87   ret void
     88 }
     89 
     90 define void @t7(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
     91 ; CHECK-LABEL: t7:
     92 ; CHECK:       ## %bb.0: ## %entry
     93 ; CHECK-NEXT:    vmovaps %xmm0, (%rdi)
     94 ; CHECK-NEXT:    vzeroupper
     95 ; CHECK-NEXT:    retq
     96 entry:
     97   %0 = bitcast <4 x i64> %a to <8 x i32>
     98   %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
     99   %2 = bitcast <4 x i32> %1 to <2 x i64>
    100   store <2 x i64> %2, <2 x i64>* %addr, align 16
    101   ret void
    102 }
    103 
    104 define void @t8(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp {
    105 ; CHECK-LABEL: t8:
    106 ; CHECK:       ## %bb.0: ## %entry
    107 ; CHECK-NEXT:    vmovups %xmm0, (%rdi)
    108 ; CHECK-NEXT:    vzeroupper
    109 ; CHECK-NEXT:    retq
    110 entry:
    111   %0 = bitcast <4 x i64> %a to <8 x i32>
    112   %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0)
    113   %2 = bitcast <4 x i32> %1 to <2 x i64>
    114   store <2 x i64> %2, <2 x i64>* %addr, align 1
    115   ret void
    116 }
    117 
    118 ; PR15462
    119 define void @t9(i64* %p) {
    120 ; CHECK-LABEL: t9:
    121 ; CHECK:       ## %bb.0:
    122 ; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
    123 ; CHECK-NEXT:    vmovups %ymm0, (%rdi)
    124 ; CHECK-NEXT:    vzeroupper
    125 ; CHECK-NEXT:    retq
    126  store i64 0, i64* %p
    127  %q = getelementptr i64, i64* %p, i64 1
    128  store i64 0, i64* %q
    129  %r = getelementptr i64, i64* %p, i64 2
    130  store i64 0, i64* %r
    131  %s = getelementptr i64, i64* %p, i64 3
    132  store i64 0, i64* %s
    133  ret void
    134 }
    135 
    136 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
    137 declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone
    138 declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind readnone
    139