Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -march=x86 -mattr=sse41 -stack-alignment=16 -join-physregs > %t
      2 ; RUN: grep pmul %t | count 12
      3 ; RUN: grep mov %t | count 11
      4 
      5 ; The f() arguments in %xmm0 and %xmm1 cause an extra movdqa without -join-physregs.
      6 
      7 define <4 x i32> @a(<4 x i32> %i) nounwind  {
      8         %A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
      9         ret <4 x i32> %A
     10 }
     11 define <2 x i64> @b(<2 x i64> %i) nounwind  {
     12         %A = mul <2 x i64> %i, < i64 117, i64 117 >
     13         ret <2 x i64> %A
     14 }
     15 define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind  {
     16         %A = mul <4 x i32> %i, %j
     17         ret <4 x i32> %A
     18 }
     19 define <2 x i64> @d(<2 x i64> %i, <2 x i64> %j) nounwind  {
     20         %A = mul <2 x i64> %i, %j
     21         ret <2 x i64> %A
     22 }
     23 ; Use a call to force spills.
     24 declare void @foo()
     25 define <4 x i32> @e(<4 x i32> %i, <4 x i32> %j) nounwind  {
     26         call void @foo()
     27         %A = mul <4 x i32> %i, %j
     28         ret <4 x i32> %A
     29 }
     30 define <2 x i64> @f(<2 x i64> %i, <2 x i64> %j) nounwind  {
     31         call void @foo()
     32         %A = mul <2 x i64> %i, %j
     33         ret <2 x i64> %A
     34 }
     35