Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -march=x86 -mcpu=pentium4 -mattr=+sse2 | FileCheck %s -check-prefix=SSE2
      2 ; RUN: llc < %s -march=x86 -mcpu=pentium4 -mattr=+sse3 | FileCheck %s -check-prefix=SSE3
      3 ; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s -check-prefix=AVX
      4 
      5 define void @test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) nounwind {
      6 	%tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0		; <<4 x float>> [#uses=1]
      7 	%tmp2 = insertelement <4 x float> %tmp, float %X, i32 1		; <<4 x float>> [#uses=1]
      8 	%tmp4 = insertelement <4 x float> %tmp2, float %X, i32 2		; <<4 x float>> [#uses=1]
      9 	%tmp6 = insertelement <4 x float> %tmp4, float %X, i32 3		; <<4 x float>> [#uses=1]
     10 	%tmp8 = load <4 x float>* %Q		; <<4 x float>> [#uses=1]
     11 	%tmp10 = fmul <4 x float> %tmp8, %tmp6		; <<4 x float>> [#uses=1]
     12 	store <4 x float> %tmp10, <4 x float>* %P
     13 	ret void
     14 
     15 ; SSE2-LABEL: test_v4sf:
     16 ; SSE2: pshufd $0
     17 
     18 ; SSE3-LABEL: test_v4sf:
     19 ; SSE3: pshufd $0
     20 }
     21 
     22 define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) nounwind {
     23 	%tmp = insertelement <2 x double> zeroinitializer, double %X, i32 0		; <<2 x double>> [#uses=1]
     24 	%tmp2 = insertelement <2 x double> %tmp, double %X, i32 1		; <<2 x double>> [#uses=1]
     25 	%tmp4 = load <2 x double>* %Q		; <<2 x double>> [#uses=1]
     26 	%tmp6 = fmul <2 x double> %tmp4, %tmp2		; <<2 x double>> [#uses=1]
     27 	store <2 x double> %tmp6, <2 x double>* %P
     28 	ret void
     29 
     30 ; SSE2-LABEL: test_v2sd:
     31 ; SSE2: shufpd $0
     32 
     33 ; SSE3-LABEL: test_v2sd:
     34 ; SSE3: movddup
     35 }
     36 
     37 ; Fold extract of a load into the load's address computation. This avoids spilling to the stack.
     38 define <4 x float> @load_extract_splat(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
     39   %1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
     40   %2 = load <4 x float>* %1, align 16
     41   %3 = trunc i64 %j to i32
     42   %4 = extractelement <4 x float> %2, i32 %3
     43   %5 = insertelement <4 x float> undef, float %4, i32 0
     44   %6 = insertelement <4 x float> %5, float %4, i32 1
     45   %7 = insertelement <4 x float> %6, float %4, i32 2
     46   %8 = insertelement <4 x float> %7, float %4, i32 3
     47   ret <4 x float> %8
     48   
     49 ; AVX-LABEL: load_extract_splat
     50 ; AVX-NOT: rsp
     51 ; AVX: vbroadcastss
     52 }
     53 
     54 ; Fold extract of a load into the load's address computation. This avoids spilling to the stack.
     55 define <4 x float> @load_extract_splat1(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) nounwind {
     56   %1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
     57   %2 = load <4 x float>* %1, align 16
     58   %3 = extractelement <4 x float> %2, i64 %j
     59   %4 = insertelement <4 x float> undef, float %3, i32 0
     60   %5 = insertelement <4 x float> %4, float %3, i32 1
     61   %6 = insertelement <4 x float> %5, float %3, i32 2
     62   %7 = insertelement <4 x float> %6, float %3, i32 3
     63   ret <4 x float> %7
     64   
     65 ; AVX-LABEL: load_extract_splat1
     66 ; AVX-NOT: movs
     67 ; AVX: vbroadcastss
     68 }
     69