1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s 2 ; RUN: llc -O0 < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s -check-prefix=CHECK_O0 3 4 ; CHECK: vmovaps 5 ; CHECK: vmovaps 6 ; CHECK: vmovaps 7 ; CHECK: vmovaps 8 ; CHECK: vmovaps 9 ; CHECK: vmovaps 10 define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp { 11 entry: 12 %0 = bitcast double* %d to <4 x double>* 13 %tmp1.i = load <4 x double>* %0, align 32 14 %1 = bitcast float* %f to <8 x float>* 15 %tmp1.i17 = load <8 x float>* %1, align 32 16 %tmp1.i16 = load <4 x i64>* %i, align 32 17 tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind 18 store <4 x double> %tmp1.i, <4 x double>* %0, align 32 19 store <8 x float> %tmp1.i17, <8 x float>* %1, align 32 20 store <4 x i64> %tmp1.i16, <4 x i64>* %i, align 32 21 ret void 22 } 23 24 declare void @dummy(<4 x double>, <8 x float>, <4 x i64>) 25 26 ;; 27 ;; The two tests below check that we must fold load + scalar_to_vector 28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory 29 30 ; CHECK: mov00 31 define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind { 32 %val = load float* %ptr 33 ; CHECK: vinsertps 34 ; CHECK: vinsertf128 35 %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0 36 ret <8 x float> %i0 37 ; CHECK: ret 38 } 39 40 ; CHECK: mov01 41 define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind { 42 %val = load double* %ptr 43 ; CHECK: vmovlpd 44 ; CHECK: vinsertf128 45 %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0 46 ret <4 x double> %i0 47 ; CHECK: ret 48 } 49 50 ; CHECK: vmovaps %ymm 51 define void @storev16i16(<16 x i16> %a) nounwind { 52 store <16 x i16> %a, <16 x i16>* undef, align 32 53 unreachable 54 } 55 56 ; CHECK: vmovups %ymm 57 define void @storev16i16_01(<16 x i16> %a) nounwind { 58 store <16 x i16> %a, <16 x i16>* undef, align 4 59 unreachable 60 } 61 62 ; CHECK: vmovaps %ymm 63 define void @storev32i8(<32 x i8> %a) nounwind { 64 store <32 x i8> %a, <32 x i8>* undef, align 32 65 unreachable 66 } 67 68 ; CHECK: vmovups %ymm 69 define void @storev32i8_01(<32 x i8> %a) nounwind { 70 store <32 x i8> %a, <32 x i8>* undef, align 4 71 unreachable 72 } 73 74 ; It is faster to make two saves, if the data is already in XMM registers. For 75 ; example, after making an integer operation. 76 ; CHECK: _double_save 77 ; CHECK-NOT: vinsertf128 $1 78 ; CHECK-NOT: vinsertf128 $0 79 ; CHECK: vmovaps %xmm 80 ; CHECK: vmovaps %xmm 81 define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp { 82 entry: 83 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 84 store <8 x i32> %Z, <8 x i32>* %P, align 16 85 ret void 86 } 87 88 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind 89 90 ; CHECK_O0: _f_f 91 ; CHECK-O0: vmovss LCPI 92 ; CHECK-O0: vxorps %xmm 93 ; CHECK-O0: vmovss %xmm 94 define void @f_f() nounwind { 95 allocas: 96 br i1 undef, label %cif_mask_all, label %cif_mask_mixed 97 98 cif_mask_all: ; preds = %allocas 99 unreachable 100 101 cif_mask_mixed: ; preds = %allocas 102 br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check 103 104 cif_mixed_test_all: ; preds = %cif_mask_mixed 105 call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> <float 0xFFFFFFFFE0000000, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <8 x float> undef) nounwind 106 unreachable 107 108 cif_mixed_test_any_check: ; preds = %cif_mask_mixed 109 unreachable 110 } 111 112