Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
      2 ; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0
      3 
      4 ; CHECK: vmovaps
      5 ; CHECK: vmovaps
      6 ; CHECK: vmovaps
      7 ; CHECK: vmovaps
      8 ; CHECK: vmovaps
      9 ; CHECK: vmovaps
     10 define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
     11 entry:
     12   %0 = bitcast double* %d to <4 x double>*
     13   %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
     14   %1 = bitcast float* %f to <8 x float>*
     15   %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
     16   %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
     17   tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
     18   store <4 x double> %tmp1.i, <4 x double>* %0, align 32
     19   store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
     20   store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32
     21   ret void
     22 }
     23 
     24 declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
     25 
     26 ;;
     27 ;; The two tests below check that we must fold load + scalar_to_vector
     28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
     29 
     30 ; CHECK: mov00
     31 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
     32   %val = load float, float* %ptr
     33 ; CHECK: vmovss (%
     34   %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
     35   ret <8 x float> %i0
     36 ; CHECK: ret
     37 }
     38 
     39 ; CHECK: mov01
     40 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
     41   %val = load double, double* %ptr
     42 ; CHECK: vmovsd (%
     43   %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
     44   ret <4 x double> %i0
     45 ; CHECK: ret
     46 }
     47 
     48 ; CHECK: vmovaps  %ymm
     49 define void @storev16i16(<16 x i16> %a) nounwind {
     50   store <16 x i16> %a, <16 x i16>* undef, align 32
     51   unreachable
     52 }
     53 
     54 ; CHECK: storev16i16_01
     55 ; CHECK: vextractf128
     56 ; CHECK: vmovups  %xmm
     57 define void @storev16i16_01(<16 x i16> %a) nounwind {
     58   store <16 x i16> %a, <16 x i16>* undef, align 4
     59   unreachable
     60 }
     61 
     62 ; CHECK: storev32i8
     63 ; CHECK: vmovaps  %ymm
     64 define void @storev32i8(<32 x i8> %a) nounwind {
     65   store <32 x i8> %a, <32 x i8>* undef, align 32
     66   unreachable
     67 }
     68 
     69 ; CHECK: storev32i8_01
     70 ; CHECK: vextractf128
     71 ; CHECK: vmovups  %xmm
     72 define void @storev32i8_01(<32 x i8> %a) nounwind {
     73   store <32 x i8> %a, <32 x i8>* undef, align 4
     74   unreachable
     75 }
     76 
     77 ; It is faster to make two saves, if the data is already in XMM registers. For
     78 ; example, after making an integer operation.
     79 ; CHECK: _double_save
     80 ; CHECK-NOT: vinsertf128 $1
     81 ; CHECK-NOT: vinsertf128 $0
     82 ; CHECK: vmovaps %xmm
     83 ; CHECK: vmovaps %xmm
     84 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
     85 entry:
     86   %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
     87   store <8 x i32> %Z, <8 x i32>* %P, align 16
     88   ret void
     89 }
     90 
     91 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind
     92 
     93 ; CHECK_O0: _f_f
     94 ; CHECK-O0: vmovss LCPI
     95 ; CHECK-O0: vxorps  %xmm
     96 ; CHECK-O0: vmovss %xmm
     97 define void @f_f() nounwind {
     98 allocas:
     99   br i1 undef, label %cif_mask_all, label %cif_mask_mixed
    100 
    101 cif_mask_all:                                     ; preds = %allocas
    102   unreachable
    103 
    104 cif_mask_mixed:                                   ; preds = %allocas
    105   br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check
    106 
    107 cif_mixed_test_all:                               ; preds = %cif_mask_mixed
    108   call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <8 x float> undef) nounwind
    109   unreachable
    110 
    111 cif_mixed_test_any_check:                         ; preds = %cif_mask_mixed
    112   unreachable
    113 }
    114 
    115 ; CHECK: add8i32
    116 ; CHECK: vmovups
    117 ; CHECK: vmovups
    118 ; CHECK-NOT: vinsertf128
    119 ; CHECK-NOT: vextractf128
    120 ; CHECK: vmovups
    121 ; CHECK: vmovups
    122 define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
    123   %b = load <8 x i32>, <8 x i32>* %bp, align 1
    124   %x = add <8 x i32> zeroinitializer, %b
    125   store <8 x i32> %x, <8 x i32>* %ret, align 1
    126   ret void
    127 }
    128 
    129 ; CHECK: add4i64a64
    130 ; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
    131 ; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
    132 define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
    133   %b = load <4 x i64>, <4 x i64>* %bp, align 64
    134   %x = add <4 x i64> zeroinitializer, %b
    135   store <4 x i64> %x, <4 x i64>* %ret, align 64
    136   ret void
    137 }
    138 
    139 ; CHECK: add4i64a16
    140 ; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
    141 ; CHECK: vmovaps {{.*}}({{.*}}), %xmm{{.*}}
    142 ; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
    143 ; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
    144 define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
    145   %b = load <4 x i64>, <4 x i64>* %bp, align 16
    146   %x = add <4 x i64> zeroinitializer, %b
    147   store <4 x i64> %x, <4 x i64>* %ret, align 16
    148   ret void
    149 }
    150