Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE41
      4 
      5 ; Widen a v5i16 to v8i16 to do a vector sub and multiple
      6 
      7 define void @update(<5 x i16>* %dst, <5 x i16>* %src, i32 %n) nounwind {
      8 ; SSE2-LABEL: update:
      9 ; SSE2:       # %bb.0: # %entry
     10 ; SSE2-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
     11 ; SSE2-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
     12 ; SSE2-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
     13 ; SSE2-NEXT:    movq {{.*}}(%rip), %rax
     14 ; SSE2-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
     15 ; SSE2-NEXT:    movw $0, -{{[0-9]+}}(%rsp)
     16 ; SSE2-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
     17 ; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = <271,271,271,271,271,u,u,u>
     18 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = <2,4,2,2,2,u,u,u>
     19 ; SSE2-NEXT:    jmp .LBB0_1
     20 ; SSE2-NEXT:    .p2align 4, 0x90
     21 ; SSE2-NEXT:  .LBB0_2: # %forbody
     22 ; SSE2-NEXT:    # in Loop: Header=BB0_1 Depth=1
     23 ; SSE2-NEXT:    movslq -{{[0-9]+}}(%rsp), %rax
     24 ; SSE2-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
     25 ; SSE2-NEXT:    shlq $4, %rax
     26 ; SSE2-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
     27 ; SSE2-NEXT:    movdqa (%rdx,%rax), %xmm2
     28 ; SSE2-NEXT:    psubw %xmm0, %xmm2
     29 ; SSE2-NEXT:    pmullw %xmm1, %xmm2
     30 ; SSE2-NEXT:    movq %xmm2, (%rcx,%rax)
     31 ; SSE2-NEXT:    pextrw $4, %xmm2, %edx
     32 ; SSE2-NEXT:    movw %dx, 8(%rcx,%rax)
     33 ; SSE2-NEXT:    incl -{{[0-9]+}}(%rsp)
     34 ; SSE2-NEXT:  .LBB0_1: # %forcond
     35 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
     36 ; SSE2-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
     37 ; SSE2-NEXT:    cmpl -{{[0-9]+}}(%rsp), %eax
     38 ; SSE2-NEXT:    jl .LBB0_2
     39 ; SSE2-NEXT:  # %bb.3: # %afterfor
     40 ; SSE2-NEXT:    retq
     41 ;
     42 ; SSE41-LABEL: update:
     43 ; SSE41:       # %bb.0: # %entry
     44 ; SSE41-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
     45 ; SSE41-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
     46 ; SSE41-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
     47 ; SSE41-NEXT:    movq {{.*}}(%rip), %rax
     48 ; SSE41-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
     49 ; SSE41-NEXT:    movw $0, -{{[0-9]+}}(%rsp)
     50 ; SSE41-NEXT:    movl $0, -{{[0-9]+}}(%rsp)
     51 ; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = <271,271,271,271,271,u,u,u>
     52 ; SSE41-NEXT:    jmp .LBB0_1
     53 ; SSE41-NEXT:    .p2align 4, 0x90
     54 ; SSE41-NEXT:  .LBB0_2: # %forbody
     55 ; SSE41-NEXT:    # in Loop: Header=BB0_1 Depth=1
     56 ; SSE41-NEXT:    movslq -{{[0-9]+}}(%rsp), %rax
     57 ; SSE41-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
     58 ; SSE41-NEXT:    shlq $4, %rax
     59 ; SSE41-NEXT:    movq -{{[0-9]+}}(%rsp), %rdx
     60 ; SSE41-NEXT:    movdqa (%rdx,%rax), %xmm1
     61 ; SSE41-NEXT:    psubw %xmm0, %xmm1
     62 ; SSE41-NEXT:    movdqa %xmm1, %xmm2
     63 ; SSE41-NEXT:    psllw $2, %xmm2
     64 ; SSE41-NEXT:    psllw $1, %xmm1
     65 ; SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7]
     66 ; SSE41-NEXT:    pextrw $4, %xmm1, 8(%rcx,%rax)
     67 ; SSE41-NEXT:    movq %xmm2, (%rcx,%rax)
     68 ; SSE41-NEXT:    incl -{{[0-9]+}}(%rsp)
     69 ; SSE41-NEXT:  .LBB0_1: # %forcond
     70 ; SSE41-NEXT:    # =>This Inner Loop Header: Depth=1
     71 ; SSE41-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
     72 ; SSE41-NEXT:    cmpl -{{[0-9]+}}(%rsp), %eax
     73 ; SSE41-NEXT:    jl .LBB0_2
     74 ; SSE41-NEXT:  # %bb.3: # %afterfor
     75 ; SSE41-NEXT:    retq
     76 entry:
     77 	%dst.addr = alloca <5 x i16>*
     78 	%src.addr = alloca <5 x i16>*
     79 	%n.addr = alloca i32
     80 	%v = alloca <5 x i16>, align 16
     81 	%i = alloca i32, align 4
     82 	store <5 x i16>* %dst, <5 x i16>** %dst.addr
     83 	store <5 x i16>* %src, <5 x i16>** %src.addr
     84 	store i32 %n, i32* %n.addr
     85 	store <5 x i16> < i16 1, i16 1, i16 1, i16 0, i16 0 >, <5 x i16>* %v
     86 	store i32 0, i32* %i
     87 	br label %forcond
     88 
     89 forcond:
     90 	%tmp = load i32, i32* %i
     91 	%tmp1 = load i32, i32* %n.addr
     92 	%cmp = icmp slt i32 %tmp, %tmp1
     93 	br i1 %cmp, label %forbody, label %afterfor
     94 
     95 forbody:
     96 	%tmp2 = load i32, i32* %i
     97 	%tmp3 = load <5 x i16>*, <5 x i16>** %dst.addr
     98 	%arrayidx = getelementptr <5 x i16>, <5 x i16>* %tmp3, i32 %tmp2
     99 	%tmp4 = load i32, i32* %i
    100 	%tmp5 = load <5 x i16>*, <5 x i16>** %src.addr
    101 	%arrayidx6 = getelementptr <5 x i16>, <5 x i16>* %tmp5, i32 %tmp4
    102 	%tmp7 = load <5 x i16>, <5 x i16>* %arrayidx6
    103 	%sub = sub <5 x i16> %tmp7, < i16 271, i16 271, i16 271, i16 271, i16 271 >
    104 	%mul = mul <5 x i16> %sub, < i16 2, i16 4, i16 2, i16 2, i16 2 >
    105 	store <5 x i16> %mul, <5 x i16>* %arrayidx
    106 	br label %forinc
    107 
    108 forinc:
    109 	%tmp8 = load i32, i32* %i
    110 	%inc = add i32 %tmp8, 1
    111 	store i32 %inc, i32* %i
    112 	br label %forcond
    113 
    114 afterfor:
    115 	ret void
    116 }
    117 
    118