Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32
      3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
      4 
      5 define i32 @test0(<1 x i64>* %v4) nounwind {
      6 ; X32-LABEL: test0:
      7 ; X32:       # %bb.0: # %entry
      8 ; X32-NEXT:    pushl %ebp
      9 ; X32-NEXT:    movl %esp, %ebp
     10 ; X32-NEXT:    andl $-8, %esp
     11 ; X32-NEXT:    subl $8, %esp
     12 ; X32-NEXT:    movl 8(%ebp), %eax
     13 ; X32-NEXT:    movl (%eax), %ecx
     14 ; X32-NEXT:    movl 4(%eax), %eax
     15 ; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
     16 ; X32-NEXT:    movl %ecx, (%esp)
     17 ; X32-NEXT:    pshufw $238, (%esp), %mm0 # mm0 = mem[2,3,2,3]
     18 ; X32-NEXT:    movd %mm0, %eax
     19 ; X32-NEXT:    addl $32, %eax
     20 ; X32-NEXT:    movl %ebp, %esp
     21 ; X32-NEXT:    popl %ebp
     22 ; X32-NEXT:    retl
     23 ;
     24 ; X64-LABEL: test0:
     25 ; X64:       # %bb.0: # %entry
     26 ; X64-NEXT:    pshufw $238, (%rdi), %mm0 # mm0 = mem[2,3,2,3]
     27 ; X64-NEXT:    movd %mm0, %eax
     28 ; X64-NEXT:    addl $32, %eax
     29 ; X64-NEXT:    retq
     30 entry:
     31   %v5 = load <1 x i64>, <1 x i64>* %v4, align 8
     32   %v12 = bitcast <1 x i64> %v5 to <4 x i16>
     33   %v13 = bitcast <4 x i16> %v12 to x86_mmx
     34   %v14 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %v13, i8 -18)
     35   %v15 = bitcast x86_mmx %v14 to <4 x i16>
     36   %v16 = bitcast <4 x i16> %v15 to <1 x i64>
     37   %v17 = extractelement <1 x i64> %v16, i32 0
     38   %v18 = bitcast i64 %v17 to <2 x i32>
     39   %v19 = extractelement <2 x i32> %v18, i32 0
     40   %v20 = add i32 %v19, 32
     41   ret i32 %v20
     42 }
     43 
     44 define i32 @test1(i32* nocapture readonly %ptr) nounwind {
     45 ; X32-LABEL: test1:
     46 ; X32:       # %bb.0: # %entry
     47 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     48 ; X32-NEXT:    movd (%eax), %mm0
     49 ; X32-NEXT:    pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
     50 ; X32-NEXT:    movd %mm0, %eax
     51 ; X32-NEXT:    emms
     52 ; X32-NEXT:    retl
     53 ;
     54 ; X64-LABEL: test1:
     55 ; X64:       # %bb.0: # %entry
     56 ; X64-NEXT:    movd (%rdi), %mm0
     57 ; X64-NEXT:    pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
     58 ; X64-NEXT:    movd %mm0, %eax
     59 ; X64-NEXT:    emms
     60 ; X64-NEXT:    retq
     61 entry:
     62   %0 = load i32, i32* %ptr, align 4
     63   %1 = insertelement <2 x i32> undef, i32 %0, i32 0
     64   %2 = insertelement <2 x i32> %1, i32 0, i32 1
     65   %3 = bitcast <2 x i32> %2 to x86_mmx
     66   %4 = bitcast x86_mmx %3 to i64
     67   %5 = bitcast i64 %4 to <4 x i16>
     68   %6 = bitcast <4 x i16> %5 to x86_mmx
     69   %7 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %6, i8 -24)
     70   %8 = bitcast x86_mmx %7 to <4 x i16>
     71   %9 = bitcast <4 x i16> %8 to <1 x i64>
     72   %10 = extractelement <1 x i64> %9, i32 0
     73   %11 = bitcast i64 %10 to <2 x i32>
     74   %12 = extractelement <2 x i32> %11, i32 0
     75   tail call void @llvm.x86.mmx.emms()
     76   ret i32 %12
     77 }
     78 
     79 define i32 @test2(i32* nocapture readonly %ptr) nounwind {
     80 ; X32-LABEL: test2:
     81 ; X32:       # %bb.0: # %entry
     82 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     83 ; X32-NEXT:    pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
     84 ; X32-NEXT:    movd %mm0, %eax
     85 ; X32-NEXT:    emms
     86 ; X32-NEXT:    retl
     87 ;
     88 ; X64-LABEL: test2:
     89 ; X64:       # %bb.0: # %entry
     90 ; X64-NEXT:    pshufw $232, (%rdi), %mm0 # mm0 = mem[0,2,2,3]
     91 ; X64-NEXT:    movd %mm0, %eax
     92 ; X64-NEXT:    emms
     93 ; X64-NEXT:    retq
     94 entry:
     95   %0 = bitcast i32* %ptr to x86_mmx*
     96   %1 = load x86_mmx, x86_mmx* %0, align 8
     97   %2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 -24)
     98   %3 = bitcast x86_mmx %2 to <4 x i16>
     99   %4 = bitcast <4 x i16> %3 to <1 x i64>
    100   %5 = extractelement <1 x i64> %4, i32 0
    101   %6 = bitcast i64 %5 to <2 x i32>
    102   %7 = extractelement <2 x i32> %6, i32 0
    103   tail call void @llvm.x86.mmx.emms()
    104   ret i32 %7
    105 }
    106 
    107 define i32 @test3(x86_mmx %a) nounwind {
    108 ; X32-LABEL: test3:
    109 ; X32:       # %bb.0:
    110 ; X32-NEXT:    movd %mm0, %eax
    111 ; X32-NEXT:    retl
    112 ;
    113 ; X64-LABEL: test3:
    114 ; X64:       # %bb.0:
    115 ; X64-NEXT:    movd %mm0, %eax
    116 ; X64-NEXT:    retq
    117   %tmp0 = bitcast x86_mmx %a to <2 x i32>
    118   %tmp1 = extractelement <2 x i32> %tmp0, i32 0
    119   ret i32 %tmp1
    120 }
    121 
    122 ; Verify we don't muck with extractelts from the upper lane.
    123 define i32 @test4(x86_mmx %a) nounwind {
    124 ; X32-LABEL: test4:
    125 ; X32:       # %bb.0:
    126 ; X32-NEXT:    pushl %ebp
    127 ; X32-NEXT:    movl %esp, %ebp
    128 ; X32-NEXT:    andl $-8, %esp
    129 ; X32-NEXT:    subl $8, %esp
    130 ; X32-NEXT:    movq %mm0, (%esp)
    131 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
    132 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,0,1]
    133 ; X32-NEXT:    movd %xmm0, %eax
    134 ; X32-NEXT:    movl %ebp, %esp
    135 ; X32-NEXT:    popl %ebp
    136 ; X32-NEXT:    retl
    137 ;
    138 ; X64-LABEL: test4:
    139 ; X64:       # %bb.0:
    140 ; X64-NEXT:    movq %mm0, -{{[0-9]+}}(%rsp)
    141 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
    142 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1]
    143 ; X64-NEXT:    movd %xmm0, %eax
    144 ; X64-NEXT:    retq
    145   %tmp0 = bitcast x86_mmx %a to <2 x i32>
    146   %tmp1 = extractelement <2 x i32> %tmp0, i32 1
    147   ret i32 %tmp1
    148 }
    149 
    150 declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8)
    151 declare void @llvm.x86.mmx.emms()
    152