Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X32 %s
      3 ; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X64 %s
      4 
      5 ; If there is no explicit MMX type usage, always promote to XMM.
      6 
      7 define void @test0(<1 x i64>* %x) {
      8 ; X32-LABEL: test0:
      9 ; X32:       ## %bb.0: ## %entry
     10 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
     11 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
     12 ; X32-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
     13 ; X32-NEXT:    movlps %xmm0, (%eax)
     14 ; X32-NEXT:    retl
     15 ;
     16 ; X64-LABEL: test0:
     17 ; X64:       ## %bb.0: ## %entry
     18 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
     19 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
     20 ; X64-NEXT:    movq %xmm0, (%rdi)
     21 ; X64-NEXT:    retq
     22 entry:
     23   %tmp2 = load <1 x i64>, <1 x i64>* %x
     24   %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>
     25   %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
     26   %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>
     27   store <1 x i64> %tmp10, <1 x i64>* %x
     28   ret void
     29 }
     30 
     31 define void @test1() {
     32 ; X32-LABEL: test1:
     33 ; X32:       ## %bb.0: ## %entry
     34 ; X32-NEXT:    pushl %edi
     35 ; X32-NEXT:    .cfi_def_cfa_offset 8
     36 ; X32-NEXT:    .cfi_offset %edi, -8
     37 ; X32-NEXT:    pxor %mm0, %mm0
     38 ; X32-NEXT:    movq LCPI1_0, %mm1 ## mm1 = 0x7070606040400000
     39 ; X32-NEXT:    xorl %edi, %edi
     40 ; X32-NEXT:    maskmovq %mm1, %mm0
     41 ; X32-NEXT:    popl %edi
     42 ; X32-NEXT:    retl
     43 ;
     44 ; X64-LABEL: test1:
     45 ; X64:       ## %bb.0: ## %entry
     46 ; X64-NEXT:    pxor %mm0, %mm0
     47 ; X64-NEXT:    movq {{.*}}(%rip), %mm1 ## mm1 = 0x7070606040400000
     48 ; X64-NEXT:    xorl %edi, %edi
     49 ; X64-NEXT:    maskmovq %mm1, %mm0
     50 ; X64-NEXT:    retq
     51 entry:
     52   %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32>
     53   %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>)
     54   %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>
     55   %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >
     56   %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>
     57   %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
     58   %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
     59   tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null)
     60   ret void
     61 }
     62 
     63 @tmp_V2i = common global <2 x i32> zeroinitializer
     64 
     65 define void @test2() nounwind {
     66 ; X32-LABEL: test2:
     67 ; X32:       ## %bb.0: ## %entry
     68 ; X32-NEXT:    movl L_tmp_V2i$non_lazy_ptr, %eax
     69 ; X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
     70 ; X32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
     71 ; X32-NEXT:    movlps %xmm0, (%eax)
     72 ; X32-NEXT:    retl
     73 ;
     74 ; X64-LABEL: test2:
     75 ; X64:       ## %bb.0: ## %entry
     76 ; X64-NEXT:    movq _tmp_V2i@{{.*}}(%rip), %rax
     77 ; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
     78 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
     79 ; X64-NEXT:    movq %xmm0, (%rax)
     80 ; X64-NEXT:    retq
     81 entry:
     82   %0 = load <2 x i32>, <2 x i32>* @tmp_V2i, align 8
     83   %1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
     84   store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
     85   ret void
     86 }
     87 
     88 define <4 x float> @pr35869() nounwind {
     89 ; X32-LABEL: pr35869:
     90 ; X32:       ## %bb.0:
     91 ; X32-NEXT:    movl $64, %eax
     92 ; X32-NEXT:    movd %eax, %mm0
     93 ; X32-NEXT:    pxor %mm1, %mm1
     94 ; X32-NEXT:    punpcklbw %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3]
     95 ; X32-NEXT:    pcmpgtw %mm0, %mm1
     96 ; X32-NEXT:    movq %mm0, %mm2
     97 ; X32-NEXT:    punpckhwd %mm1, %mm2 ## mm2 = mm2[2],mm1[2],mm2[3],mm1[3]
     98 ; X32-NEXT:    xorps %xmm0, %xmm0
     99 ; X32-NEXT:    cvtpi2ps %mm2, %xmm0
    100 ; X32-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
    101 ; X32-NEXT:    punpcklwd %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
    102 ; X32-NEXT:    cvtpi2ps %mm0, %xmm0
    103 ; X32-NEXT:    retl
    104 ;
    105 ; X64-LABEL: pr35869:
    106 ; X64:       ## %bb.0:
    107 ; X64-NEXT:    movl $64, %eax
    108 ; X64-NEXT:    movd %eax, %mm0
    109 ; X64-NEXT:    pxor %mm1, %mm1
    110 ; X64-NEXT:    punpcklbw %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3]
    111 ; X64-NEXT:    pcmpgtw %mm0, %mm1
    112 ; X64-NEXT:    movq %mm0, %mm2
    113 ; X64-NEXT:    punpckhwd %mm1, %mm2 ## mm2 = mm2[2],mm1[2],mm2[3],mm1[3]
    114 ; X64-NEXT:    xorps %xmm0, %xmm0
    115 ; X64-NEXT:    cvtpi2ps %mm2, %xmm0
    116 ; X64-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
    117 ; X64-NEXT:    punpcklwd %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
    118 ; X64-NEXT:    cvtpi2ps %mm0, %xmm0
    119 ; X64-NEXT:    retq
    120   %1 = tail call x86_mmx @llvm.x86.mmx.punpcklbw(x86_mmx bitcast (<8 x i8> <i8 64, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0> to x86_mmx), x86_mmx bitcast (<8 x i8> zeroinitializer to x86_mmx))
    121   %2 = tail call x86_mmx @llvm.x86.mmx.pcmpgt.w(x86_mmx bitcast (<4 x i16> zeroinitializer to x86_mmx), x86_mmx %1)
    122   %3 = tail call x86_mmx @llvm.x86.mmx.punpckhwd(x86_mmx %1, x86_mmx %2)
    123   %4 = tail call <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float> zeroinitializer, x86_mmx %3)
    124   %5 = shufflevector <4 x float> %4, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
    125   %6 = tail call x86_mmx @llvm.x86.mmx.punpcklwd(x86_mmx %1, x86_mmx %2)
    126   %7 = tail call <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float> %5, x86_mmx %6)
    127   ret <4 x float> %7
    128 }
    129 
    130 declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)
    131 declare x86_mmx @llvm.x86.mmx.pcmpgt.w(x86_mmx, x86_mmx)
    132 declare x86_mmx @llvm.x86.mmx.punpcklbw(x86_mmx, x86_mmx)
    133 declare x86_mmx @llvm.x86.mmx.punpcklwd(x86_mmx, x86_mmx)
    134 declare x86_mmx @llvm.x86.mmx.punpckhwd(x86_mmx, x86_mmx)
    135 declare <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float>, x86_mmx)
    136