Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
      3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
      4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
      5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
      6 
      7 define <2 x i16> @test_urem_unary_v2i16() nounwind {
      8 ; SSE-LABEL: test_urem_unary_v2i16:
      9 ; SSE:       # BB#0:
     10 ; SSE-NEXT:    xorps %xmm0, %xmm0
     11 ; SSE-NEXT:    retq
     12 ;
     13 ; AVX-LABEL: test_urem_unary_v2i16:
     14 ; AVX:       # BB#0:
     15 ; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
     16 ; AVX-NEXT:    retq
     17   %I8 = insertelement <2 x i16> zeroinitializer, i16 -1, i32 0
     18   %I9 = insertelement <2 x i16> %I8, i16 -1, i32 1
     19   %B9 = urem <2 x i16> %I9, %I9
     20   ret <2 x i16> %B9
     21 }
     22 
     23 define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
     24 ; SSE2-LABEL: PR20355:
     25 ; SSE2:       # BB#0: # %entry
     26 ; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
     27 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
     28 ; SSE2-NEXT:    psrad $31, %xmm2
     29 ; SSE2-NEXT:    pand %xmm0, %xmm2
     30 ; SSE2-NEXT:    movdqa %xmm0, %xmm3
     31 ; SSE2-NEXT:    psrad $31, %xmm3
     32 ; SSE2-NEXT:    pand %xmm1, %xmm3
     33 ; SSE2-NEXT:    paddd %xmm2, %xmm3
     34 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
     35 ; SSE2-NEXT:    pmuludq %xmm1, %xmm0
     36 ; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
     37 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
     38 ; SSE2-NEXT:    pmuludq %xmm2, %xmm0
     39 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
     40 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
     41 ; SSE2-NEXT:    psubd %xmm3, %xmm4
     42 ; SSE2-NEXT:    movdqa %xmm4, %xmm0
     43 ; SSE2-NEXT:    psrld $31, %xmm0
     44 ; SSE2-NEXT:    paddd %xmm4, %xmm0
     45 ; SSE2-NEXT:    retq
     46 ;
     47 ; SSE41-LABEL: PR20355:
     48 ; SSE41:       # BB#0: # %entry
     49 ; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
     50 ; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
     51 ; SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
     52 ; SSE41-NEXT:    pmuldq %xmm2, %xmm3
     53 ; SSE41-NEXT:    pmuldq %xmm1, %xmm0
     54 ; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
     55 ; SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
     56 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
     57 ; SSE41-NEXT:    psrld $31, %xmm0
     58 ; SSE41-NEXT:    paddd %xmm1, %xmm0
     59 ; SSE41-NEXT:    retq
     60 ;
     61 ; AVX1-LABEL: PR20355:
     62 ; AVX1:       # BB#0: # %entry
     63 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
     64 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
     65 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
     66 ; AVX1-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
     67 ; AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
     68 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
     69 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
     70 ; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm1
     71 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
     72 ; AVX1-NEXT:    retq
     73 ;
     74 ; AVX2-LABEL: PR20355:
     75 ; AVX2:       # BB#0: # %entry
     76 ; AVX2-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
     77 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
     78 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
     79 ; AVX2-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
     80 ; AVX2-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
     81 ; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
     82 ; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
     83 ; AVX2-NEXT:    vpsrld $31, %xmm0, %xmm1
     84 ; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
     85 ; AVX2-NEXT:    retq
     86 entry:
     87   %sdiv = sdiv <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
     88   ret <4 x i32> %sdiv
     89 }
     90