Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefixes=CHECK,X86
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s --check-prefixes=CHECK,X64
      4 
      5 define i32 @mul_f(<4 x i8>* %A) {
      6 ; X86-LABEL: mul_f:
      7 ; X86:       # %bb.0: # %entry
      8 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
      9 ; X86-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
     10 ; X86-NEXT:    pmaddwd %xmm0, %xmm0
     11 ; X86-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
     12 ; X86-NEXT:    movd %xmm0, (%eax)
     13 ; X86-NEXT:    xorl %eax, %eax
     14 ; X86-NEXT:    retl
     15 ;
     16 ; X64-LABEL: mul_f:
     17 ; X64:       # %bb.0: # %entry
     18 ; X64-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
     19 ; X64-NEXT:    pmaddwd %xmm0, %xmm0
     20 ; X64-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
     21 ; X64-NEXT:    movd %xmm0, (%rax)
     22 ; X64-NEXT:    xorl %eax, %eax
     23 ; X64-NEXT:    retq
     24 entry:
     25   %0 = load <4 x i8>, <4 x i8>* %A, align 8
     26   %mul = mul <4 x i8> %0, %0
     27   store <4 x i8> %mul, <4 x i8>* undef
     28   ret i32 0
     29 }
     30 
     31 define i32 @shuff_f(<4 x i8>* %A) {
     32 ; X86-LABEL: shuff_f:
     33 ; X86:       # %bb.0: # %entry
     34 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
     35 ; X86-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
     36 ; X86-NEXT:    paddd %xmm0, %xmm0
     37 ; X86-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
     38 ; X86-NEXT:    movd %xmm0, (%eax)
     39 ; X86-NEXT:    xorl %eax, %eax
     40 ; X86-NEXT:    retl
     41 ;
     42 ; X64-LABEL: shuff_f:
     43 ; X64:       # %bb.0: # %entry
     44 ; X64-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
     45 ; X64-NEXT:    paddd %xmm0, %xmm0
     46 ; X64-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
     47 ; X64-NEXT:    movd %xmm0, (%rax)
     48 ; X64-NEXT:    xorl %eax, %eax
     49 ; X64-NEXT:    retq
     50 entry:
     51   %0 = load <4 x i8>, <4 x i8>* %A, align 8
     52   %add = add <4 x i8> %0, %0
     53   store <4 x i8> %add, <4 x i8>* undef
     54   ret i32 0
     55 }
     56 
     57 define <2 x float> @bitcast_widen(<4 x i32> %in) nounwind readnone {
     58 ; X86-LABEL: bitcast_widen:
     59 ; X86:       # %bb.0: # %entry
     60 ; X86-NEXT:    retl
     61 ;
     62 ; X64-LABEL: bitcast_widen:
     63 ; X64:       # %bb.0: # %entry
     64 ; X64-NEXT:    retq
     65 entry:
     66  %x = shufflevector <4 x i32> %in, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
     67  %y = bitcast <2 x i32> %x to <2 x float>
     68  ret <2 x float> %y
     69 }
     70