1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2 ; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32 3 ; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse4.1 | FileCheck %s --check-prefix=X64 4 5 ; This is not an MMX operation; promoted to XMM. 6 define x86_mmx @t0(i32 %A) nounwind { 7 ; X32-LABEL: t0: 8 ; X32: ## BB#0: 9 ; X32-NEXT: subl $12, %esp 10 ; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 11 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] 12 ; X32-NEXT: movq %xmm0, (%esp) 13 ; X32-NEXT: movq (%esp), %mm0 14 ; X32-NEXT: addl $12, %esp 15 ; X32-NEXT: retl 16 ; 17 ; X64-LABEL: t0: 18 ; X64: ## BB#0: 19 ; X64-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def> 20 ; X64-NEXT: movd %rdi, %xmm0 21 ; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7] 22 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 23 ; X64-NEXT: retq 24 %tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1 25 %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx 26 ret x86_mmx %tmp4 27 } 28 29 define <8 x i8> @t1(i8 zeroext %x) nounwind { 30 ; X32-LABEL: t1: 31 ; X32: ## BB#0: 32 ; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero 33 ; X32-NEXT: retl 34 ; 35 ; X64-LABEL: t1: 36 ; X64: ## BB#0: 37 ; X64-NEXT: movd %edi, %xmm0 38 ; X64-NEXT: retq 39 %r = insertelement <8 x i8> undef, i8 %x, i32 0 40 ret <8 x i8> %r 41 } 42 43 ; PR2574 44 define <2 x float> @t2(<2 x float> %a0) { 45 ; X32-LABEL: t2: 46 ; X32: ## BB#0: 47 ; X32-NEXT: xorps %xmm0, %xmm0 48 ; X32-NEXT: retl 49 ; 50 ; X64-LABEL: t2: 51 ; X64: ## BB#0: 52 ; X64-NEXT: xorps %xmm0, %xmm0 53 ; X64-NEXT: retq 54 %v1 = insertelement <2 x float> %a0, float 0.000000e+00, i32 0 55 %v2 = insertelement <2 x float> %v1, float 0.000000e+00, i32 1 56 ret <2 x float> %v2 57 } 58 59 @g0 = external global i16 60 @g1 = external global <4 x i16> 61 62 ; PR2562 63 define void @t3() { 64 ; X32-LABEL: t3: 65 ; X32: ## BB#0: 66 ; X32-NEXT: movl L_g0$non_lazy_ptr, %eax 67 ; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx 68 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero 69 ; X32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] 70 ; X32-NEXT: movzwl (%eax), %eax 71 ; X32-NEXT: movd %eax, %xmm1 72 ; X32-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] 73 ; X32-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] 74 ; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] 75 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 76 ; X32-NEXT: movq %xmm0, (%ecx) 77 ; X32-NEXT: retl 78 ; 79 ; X64-LABEL: t3: 80 ; X64: ## BB#0: 81 ; X64-NEXT: movq _g0@{{.*}}(%rip), %rax 82 ; X64-NEXT: movq _g1@{{.*}}(%rip), %rcx 83 ; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero 84 ; X64-NEXT: movzwl (%rax), %eax 85 ; X64-NEXT: pinsrd $0, %eax, %xmm0 86 ; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] 87 ; X64-NEXT: movq %xmm0, (%rcx) 88 ; X64-NEXT: retq 89 load i16, i16* @g0 90 load <4 x i16>, <4 x i16>* @g1 91 insertelement <4 x i16> %2, i16 %1, i32 0 92 store <4 x i16> %3, <4 x i16>* @g1 93 ret void 94 } 95