Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7-avx | FileCheck %s
      3 
      4 define <4 x i3> @test1(<4 x i3>* %in) nounwind {
      5 ; CHECK-LABEL: test1:
      6 ; CHECK:       # BB#0:
      7 ; CHECK-NEXT:    movzwl (%rdi), %eax
      8 ; CHECK-NEXT:    movl %eax, %ecx
      9 ; CHECK-NEXT:    shrl $3, %ecx
     10 ; CHECK-NEXT:    andl $7, %ecx
     11 ; CHECK-NEXT:    movl %eax, %edx
     12 ; CHECK-NEXT:    andl $7, %edx
     13 ; CHECK-NEXT:    vmovd %edx, %xmm0
     14 ; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
     15 ; CHECK-NEXT:    movl %eax, %ecx
     16 ; CHECK-NEXT:    shrl $6, %ecx
     17 ; CHECK-NEXT:    andl $7, %ecx
     18 ; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
     19 ; CHECK-NEXT:    shrl $9, %eax
     20 ; CHECK-NEXT:    andl $7, %eax
     21 ; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
     22 ; CHECK-NEXT:    retq
     23   %ret = load <4 x i3>, <4 x i3>* %in, align 1
     24   ret <4 x i3> %ret
     25 }
     26 
     27 define <4 x i1> @test2(<4 x i1>* %in) nounwind {
     28 ; CHECK-LABEL: test2:
     29 ; CHECK:       # BB#0:
     30 ; CHECK-NEXT:    movzbl (%rdi), %eax
     31 ; CHECK-NEXT:    movl %eax, %ecx
     32 ; CHECK-NEXT:    shrl %ecx
     33 ; CHECK-NEXT:    andl $1, %ecx
     34 ; CHECK-NEXT:    movl %eax, %edx
     35 ; CHECK-NEXT:    andl $1, %edx
     36 ; CHECK-NEXT:    vmovd %edx, %xmm0
     37 ; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
     38 ; CHECK-NEXT:    movl %eax, %ecx
     39 ; CHECK-NEXT:    shrl $2, %ecx
     40 ; CHECK-NEXT:    andl $1, %ecx
     41 ; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
     42 ; CHECK-NEXT:    shrl $3, %eax
     43 ; CHECK-NEXT:    andl $1, %eax
     44 ; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
     45 ; CHECK-NEXT:    retq
     46   %ret = load <4 x i1>, <4 x i1>* %in, align 1
     47   ret <4 x i1> %ret
     48 }
     49 
     50 define <4 x i64> @test3(<4 x i1>* %in) nounwind {
     51 ; CHECK-LABEL: test3:
     52 ; CHECK:       # BB#0:
     53 ; CHECK-NEXT:    movzbl (%rdi), %eax
     54 ; CHECK-NEXT:    movq %rax, %rcx
     55 ; CHECK-NEXT:    shlq $62, %rcx
     56 ; CHECK-NEXT:    sarq $63, %rcx
     57 ; CHECK-NEXT:    movq %rax, %rdx
     58 ; CHECK-NEXT:    shlq $63, %rdx
     59 ; CHECK-NEXT:    sarq $63, %rdx
     60 ; CHECK-NEXT:    vmovd %edx, %xmm0
     61 ; CHECK-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
     62 ; CHECK-NEXT:    movq %rax, %rcx
     63 ; CHECK-NEXT:    shlq $61, %rcx
     64 ; CHECK-NEXT:    sarq $63, %rcx
     65 ; CHECK-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
     66 ; CHECK-NEXT:    shlq $60, %rax
     67 ; CHECK-NEXT:    sarq $63, %rax
     68 ; CHECK-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
     69 ; CHECK-NEXT:    vpmovsxdq %xmm0, %xmm1
     70 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
     71 ; CHECK-NEXT:    vpmovsxdq %xmm0, %xmm0
     72 ; CHECK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
     73 ; CHECK-NEXT:    retq
     74   %wide.load35 = load <4 x i1>, <4 x i1>* %in, align 1
     75   %sext = sext <4 x i1> %wide.load35 to <4 x i64>
     76   ret <4 x i64> %sext
     77 }
     78 
     79 define <16 x i4> @test4(<16 x i4>* %in) nounwind {
     80 ; CHECK-LABEL: test4:
     81 ; CHECK:       # BB#0:
     82 ; CHECK-NEXT:    movq (%rdi), %rax
     83 ; CHECK-NEXT:    movl %eax, %ecx
     84 ; CHECK-NEXT:    shrl $4, %ecx
     85 ; CHECK-NEXT:    andl $15, %ecx
     86 ; CHECK-NEXT:    movl %eax, %edx
     87 ; CHECK-NEXT:    andl $15, %edx
     88 ; CHECK-NEXT:    vmovd %edx, %xmm0
     89 ; CHECK-NEXT:    vpinsrb $1, %ecx, %xmm0, %xmm0
     90 ; CHECK-NEXT:    movl %eax, %ecx
     91 ; CHECK-NEXT:    shrl $8, %ecx
     92 ; CHECK-NEXT:    andl $15, %ecx
     93 ; CHECK-NEXT:    vpinsrb $2, %ecx, %xmm0, %xmm0
     94 ; CHECK-NEXT:    movl %eax, %ecx
     95 ; CHECK-NEXT:    shrl $12, %ecx
     96 ; CHECK-NEXT:    andl $15, %ecx
     97 ; CHECK-NEXT:    vpinsrb $3, %ecx, %xmm0, %xmm0
     98 ; CHECK-NEXT:    movl %eax, %ecx
     99 ; CHECK-NEXT:    shrl $16, %ecx
    100 ; CHECK-NEXT:    andl $15, %ecx
    101 ; CHECK-NEXT:    vpinsrb $4, %ecx, %xmm0, %xmm0
    102 ; CHECK-NEXT:    movl %eax, %ecx
    103 ; CHECK-NEXT:    shrl $20, %ecx
    104 ; CHECK-NEXT:    andl $15, %ecx
    105 ; CHECK-NEXT:    vpinsrb $5, %ecx, %xmm0, %xmm0
    106 ; CHECK-NEXT:    movl %eax, %ecx
    107 ; CHECK-NEXT:    shrl $24, %ecx
    108 ; CHECK-NEXT:    andl $15, %ecx
    109 ; CHECK-NEXT:    vpinsrb $6, %ecx, %xmm0, %xmm0
    110 ; CHECK-NEXT:    movl %eax, %ecx
    111 ; CHECK-NEXT:    shrl $28, %ecx
    112 ; CHECK-NEXT:    vpinsrb $7, %ecx, %xmm0, %xmm0
    113 ; CHECK-NEXT:    movq %rax, %rcx
    114 ; CHECK-NEXT:    shrq $32, %rcx
    115 ; CHECK-NEXT:    andl $15, %ecx
    116 ; CHECK-NEXT:    vpinsrb $8, %ecx, %xmm0, %xmm0
    117 ; CHECK-NEXT:    movq %rax, %rcx
    118 ; CHECK-NEXT:    shrq $36, %rcx
    119 ; CHECK-NEXT:    andl $15, %ecx
    120 ; CHECK-NEXT:    vpinsrb $9, %ecx, %xmm0, %xmm0
    121 ; CHECK-NEXT:    movq %rax, %rcx
    122 ; CHECK-NEXT:    shrq $40, %rcx
    123 ; CHECK-NEXT:    andl $15, %ecx
    124 ; CHECK-NEXT:    vpinsrb $10, %ecx, %xmm0, %xmm0
    125 ; CHECK-NEXT:    movq %rax, %rcx
    126 ; CHECK-NEXT:    shrq $44, %rcx
    127 ; CHECK-NEXT:    andl $15, %ecx
    128 ; CHECK-NEXT:    vpinsrb $11, %ecx, %xmm0, %xmm0
    129 ; CHECK-NEXT:    movq %rax, %rcx
    130 ; CHECK-NEXT:    shrq $48, %rcx
    131 ; CHECK-NEXT:    andl $15, %ecx
    132 ; CHECK-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0
    133 ; CHECK-NEXT:    movq %rax, %rcx
    134 ; CHECK-NEXT:    shrq $52, %rcx
    135 ; CHECK-NEXT:    andl $15, %ecx
    136 ; CHECK-NEXT:    vpinsrb $13, %ecx, %xmm0, %xmm0
    137 ; CHECK-NEXT:    movq %rax, %rcx
    138 ; CHECK-NEXT:    shrq $56, %rcx
    139 ; CHECK-NEXT:    andl $15, %ecx
    140 ; CHECK-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
    141 ; CHECK-NEXT:    shrq $60, %rax
    142 ; CHECK-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
    143 ; CHECK-NEXT:    retq
    144   %ret = load <16 x i4>, <16 x i4>* %in, align 1
    145   ret <16 x i4> %ret
    146 }
    147