Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | FileCheck %s --check-prefix=X86-32
      3 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86-64
      4 ;
      5 ; On Darwin x86-32, v8i8, v4i16, v2i32 values are passed in MM[0-2].
      6 ; On Darwin x86-32, v1i64 values are passed in memory.  In this example, they
      7 ;                   are never moved into an MM register at all.
      8 ; On Darwin x86-64, v8i8, v4i16, v2i32 values are passed in XMM[0-7].
      9 ; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs.
     10 
     11 @u1 = external global x86_mmx
     12 
     13 define void @t1(x86_mmx %v1) nounwind  {
     14 ; X86-32-LABEL: t1:
     15 ; X86-32:       ## BB#0:
     16 ; X86-32-NEXT:    movl L_u1$non_lazy_ptr, %eax
     17 ; X86-32-NEXT:    movq %mm0, (%eax)
     18 ; X86-32-NEXT:    retl
     19 ;
     20 ; X86-64-LABEL: t1:
     21 ; X86-64:       ## BB#0:
     22 ; X86-64-NEXT:    movdq2q %xmm0, %mm0
     23 ; X86-64-NEXT:    movq _u1@{{.*}}(%rip), %rax
     24 ; X86-64-NEXT:    movq %mm0, (%rax)
     25 ; X86-64-NEXT:    retq
     26 	store x86_mmx %v1, x86_mmx* @u1, align 8
     27 	ret void
     28 }
     29 
     30 @u2 = external global x86_mmx
     31 
     32 define void @t2(<1 x i64> %v1) nounwind  {
     33 ; X86-32-LABEL: t2:
     34 ; X86-32:       ## BB#0:
     35 ; X86-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     36 ; X86-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
     37 ; X86-32-NEXT:    movl L_u2$non_lazy_ptr, %edx
     38 ; X86-32-NEXT:    movl %ecx, 4(%edx)
     39 ; X86-32-NEXT:    movl %eax, (%edx)
     40 ; X86-32-NEXT:    retl
     41 ;
     42 ; X86-64-LABEL: t2:
     43 ; X86-64:       ## BB#0:
     44 ; X86-64-NEXT:    movq _u2@{{.*}}(%rip), %rax
     45 ; X86-64-NEXT:    movq %rdi, (%rax)
     46 ; X86-64-NEXT:    retq
     47         %tmp = bitcast <1 x i64> %v1 to x86_mmx
     48 	store x86_mmx %tmp, x86_mmx* @u2, align 8
     49 	ret void
     50 }
     51