/external/llvm/test/CodeGen/X86/ |
vector-shuffle-256-v8.ll | 10 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 15 ; AVX2-NEXT: vbroadcastss %xmm0, %ymm0 26 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 32 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 43 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 49 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 60 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 [all...] |
vector-shuffle-256-v4.ll | 10 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 15 ; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0 25 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 30 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,0,1] 39 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 42 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 47 ; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,0 [all...] |
avx2-vector-shifts.ll | 12 ; CHECK-NOT: vpsllw $0, %ymm0, %ymm0 22 ; CHECK: vpaddw %ymm0, %ymm0, %ymm0 32 ; CHECK: vpsllw $15, %ymm0, %ymm0 42 ; CHECK-NOT: vpslld $0, %ymm0, %ymm0 52 ; CHECK: vpaddd %ymm0, %ymm0, %ymm [all...] |
vector-shuffle-512-v8.ll | 19 ; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,0] 20 ; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 30 ; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,0,0] 31 ; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 41 ; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,0,0,0] 42 ; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 53 ; ALL-NEXT: vbroadcastsd %xmm0, %ymm0 [all...] |
vector-shuffle-256-v16.ll | 10 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 15 ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 28 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 35 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 46 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 53 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 64 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 71 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 82 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 [all...] |
lower-vec-shuffle-bug.ll | 6 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 16 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 26 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 36 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
pr17764.ll | 9 ; CHECK: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
|
avx-vperm2x128.ll | 7 ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] 17 ; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] 27 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 37 ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] 47 ; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3 [all...] |
avx-insertelt.ll | 6 ; ALL: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] 14 ; ALL: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] 24 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] 30 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] 40 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7 [all...] |
vector-shuffle-256-v32.ll | 11 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 16 ; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0 28 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 35 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 47 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 54 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 66 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 73 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 85 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 [all...] |
avx-logic.ll | 6 ; CHECK-NEXT: vandpd %ymm0, %ymm1, %ymm0 8 ; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 23 ; CHECK-NEXT: vandpd {{.*}}(%rip), %ymm0, %ymm0 25 ; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 39 ; CHECK-NEXT: vandps %ymm0, %ymm1, %ymm0 [all...] |
2012-01-12-extract-sv.ll | 6 ; CHECK-NEXT: vmovaps (%eax), %ymm0 7 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 10 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 14 ; CHECK-NEXT: vblendps $1, %ymm0, %ymm2, %ymm0 # ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7] 15 ; CHECK-NEXT: vmovaps %ymm0, (%eax)
|
avx-shift.ll | 8 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 10 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 21 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 23 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 33 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 35 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 46 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 48 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 59 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 61 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 [all...] |
avx-cvt.ll | 6 ; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0 15 ; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0 24 ; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0 36 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 37 ; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0 46 ; CHECK-NEXT: vcvttpd2dqy %ymm0, %xmm0 56 ; CHECK-NEXT: vcvtpd2psy %ymm0, %xmm [all...] |
avx1-logical-load-folding.ll | 17 ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0 31 ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0 45 ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0 59 ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0
|
x86-upgrade-avx2-vbroadcast.ll | 10 ; CHECK: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
avx-cast.ll | 11 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] 16 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] 27 ; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] 32 ; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] 45 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7 [all...] |
avx-cvt-2.ll | 12 ; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0 13 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 29 ; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0 30 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 46 ; CHECK-NEXT: vcvttps2dq %ymm0, %ymm0 47 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 64 ; CHECK-NEXT: vcvttps2dq %ymm0, %ymm [all...] |
avx2-intrinsics-x86-upgrade.ll | 36 ; CHECK: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8],zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24] 44 ; CHECK: vpsrldq {{.*#+}} ymm0 = ymm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,ymm0[23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero,zero 52 ; CHECK: vpslldq {{.*#+}} ymm0 = zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28,29,30] 60 ; CHECK: vpsrldq {{.*#+}} ymm0 = ymm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,ymm0[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 (…) [all...] |
avx2-pmovxrm-intrinsics.ll | 5 ; CHECK: vpmovsxbw (%rdi), %ymm0 13 ; CHECK: vpmovsxbd (%rdi), %ymm0 21 ; CHECK: vpmovsxbq (%rdi), %ymm0 29 ; CHECK: vpmovsxwd (%rdi), %ymm0 37 ; CHECK: vpmovsxwq (%rdi), %ymm0 45 ; CHECK: vpmovsxdq (%rdi), %ymm0 53 ; CHECK: vpmovzxbw (%rdi), %ymm0 61 ; CHECK: vpmovzxbd (%rdi), %ymm0 69 ; CHECK: vpmovzxbq (%rdi), %ymm0 77 ; CHECK: vpmovzxwd (%rdi), %ymm0 [all...] |
commute-blend-avx2.ll | 20 ;CHECK: vpblendw {{.*#+}} ymm0 = ymm0[0],mem[1,2,3],ymm0[4],mem[5,6,7],ymm0[8],mem[9,10,11],ymm0[12],mem[13,14,15] 42 ;CHECK: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6],ymm0[7] 64 ;CHECK: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3,4,5,6,7 [all...] |
/external/llvm/test/MC/X86/ |
x86_64-fma4-encoding.s | 56 // CHECK: vfmaddps (%rcx), %ymm1, %ymm0, %ymm0 58 vfmaddps (%rcx), %ymm1, %ymm0, %ymm0 60 // CHECK: vfmaddps %ymm1, (%rcx), %ymm0, %ymm0 62 vfmaddps %ymm1, (%rcx),%ymm0, %ymm0 64 // CHECK: vfmaddps %ymm2, %ymm1, %ymm0, %ymm0 [all...] |
shuffle-comments.s | 33 vpalignr $8, %ymm0, %ymm1, %ymm2 34 # CHECK: ymm2 = ymm0[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23] 38 vpalignr $16, %ymm0, %ymm1, %ymm2 43 vpalignr $0, %ymm0, %ymm1, %ymm2 44 # CHECK: ymm2 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] 58 vpshufd $27, %ymm0, %ymm1 59 # CHECK: ymm1 = ymm0[3,2,1,0,7,6,5,4] 73 vpunpcklbw %ymm0, %ymm1, %ymm2 74 # CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6] (…) [all...] |
intel-syntax-unsized-memory.s | 22 // CHECK: vmovdqa %ymm0, (%rax) 23 vmovdqa [rax], ymm0
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
compare_win.cc | 69 vpxor ymm0, ymm0, ymm0 // sum local 86 vpaddd ymm0, ymm0, ymm1 local 87 vpaddd ymm0, ymm0, ymm2 local 90 vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. local 91 vpaddd ymm0, ymm0, ymm local 92 vpshufd ymm1, ymm0, 0x01 \/\/ 1 + 0 both lanes. local 93 vpaddd ymm0, ymm0, ymm1 local 94 vpermq ymm1, ymm0, 0x02 \/\/ high + low lane. local 95 vpaddd ymm0, ymm0, ymm1 local [all...] |