Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+mmx,+sse4.2 | FileCheck %s --check-prefix=X32
      3 ; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+mmx,+sse4.2 | FileCheck %s --check-prefix=X64
      4 
      5 ; MMX insertelement is not available; these are promoted to XMM.
      6 ; (Without SSE they are split to two ints, and the code is much better.)
      7 
      8 define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
      9 ; X32-LABEL: mmx_movzl:
     10 ; X32:       ## BB#0:
     11 ; X32-NEXT:    subl $20, %esp
     12 ; X32-NEXT:    movq %mm0, {{[0-9]+}}(%esp)
     13 ; X32-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
     14 ; X32-NEXT:    movl $32, %eax
     15 ; X32-NEXT:    pinsrd $0, %eax, %xmm0
     16 ; X32-NEXT:    pxor %xmm1, %xmm1
     17 ; X32-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
     18 ; X32-NEXT:    movq %xmm1, (%esp)
     19 ; X32-NEXT:    movq (%esp), %mm0
     20 ; X32-NEXT:    addl $20, %esp
     21 ; X32-NEXT:    retl
     22 ;
     23 ; X64-LABEL: mmx_movzl:
     24 ; X64:       ## BB#0:
     25 ; X64-NEXT:    movdq2q %xmm0, %mm0
     26 ; X64-NEXT:    movq %mm0, -{{[0-9]+}}(%rsp)
     27 ; X64-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
     28 ; X64-NEXT:    movl $32, %eax
     29 ; X64-NEXT:    pinsrq $0, %rax, %xmm1
     30 ; X64-NEXT:    pxor %xmm0, %xmm0
     31 ; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
     32 ; X64-NEXT:    retq
     33   %tmp = bitcast x86_mmx %x to <2 x i32>
     34   %tmp3 = insertelement <2 x i32> %tmp, i32 32, i32 0
     35   %tmp8 = insertelement <2 x i32> %tmp3, i32 0, i32 1
     36   %tmp9 = bitcast <2 x i32> %tmp8 to x86_mmx
     37   ret x86_mmx %tmp9
     38 }
     39