/external/llvm/test/CodeGen/X86/ |
vec_compare.ll | 48 ; CHECK: pshufd $177 59 ; CHECK: pshufd $177 80 ; CHECK: pshufd $160 82 ; CHECK: pshufd $245 84 ; CHECK: pshufd $245 97 ; CHECK: pshufd $160 99 ; CHECK: pshufd $245 101 ; CHECK: pshufd $245 114 ; CHECK: pshufd $160 116 ; CHECK: pshufd $24 [all...] |
pshufd-combine-crash.ll | 6 ; v4f32 X86ISD::PSHUFD nodes with a single PSHUFD.
|
avx-trunc.ll | 5 ; CHECK: pshufd 6 ; CHECK: pshufd
|
lower-bitcast.ll | 13 ; pshufd+paddq+pshufd. This is fixed with the widening legalization. 17 ; CHECK: pshufd 19 ; CHECK-NEXT: pshufd 53 ; CHECK-NOT: pshufd 55 ; CHECK-NOT: pshufd 59 ; CHECK-WIDE-NOT: pshufd 61 ; CHECK-WIDE-NOT: pshufd 71 ; FIXME: At the moment we still produce the sequence pshufd+paddd+pshufd [all...] |
SwizzleShuff.ll | 17 ; CHECK: pshufd 18 ; CHECK-NEXT: pshufd 20 ; CHECK-NEXT: pshufd 21 ; CHECK-NEXT: pshufd 49 ; CHECK-NOT: pshufd 61 ; CHECK: pshufd
|
2011-05-09-loaduse.ll | 4 ;CHECK-NOT: pshufd
|
pmul.ll | 8 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 10 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 12 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] 48 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] 50 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 51 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 53 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 96 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 99 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 100 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3 [all...] |
pr23246.ll | 10 ; CHECK-NEXT: pshufd {{.*}} xmm0 = xmm0[0,1,0,1]
|
vector-idiv.ll | 11 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 12 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 15 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 28 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] 29 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 30 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 32 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] 62 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] 63 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 67 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3 [all...] |
vector-shuffle-mmx.ll | 11 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 18 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 46 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 62 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 95 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
|
fold-load-vec.ll | 4 ; We should not fold movss into pshufd since pshufd expects m128 while movss
|
trunc-ext-ld-st.ll | 23 ;CHECK: pshufd 36 ;CHECK: pshufd
|
vector-sext.ll | 77 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3] 81 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 86 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] 90 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 100 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3] 104 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 109 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] 113 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] 131 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] 164 ; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3 [all...] |
pointer-vector.ll | 8 ;CHECK: pshufd 17 ;CHECK: pshufd 26 ;CHECK: pshufd 45 ;CHECK: pshufd
|
ret-mmx.ll | 37 ; CHECK-NOT: pshufd
|
/external/boringssl/win-x86_64/crypto/aes/ |
bsaes-x86_64.asm | 332 pshufd xmm7,xmm15,0x93 333 pshufd xmm8,xmm0,0x93 335 pshufd xmm9,xmm3,0x93 337 pshufd xmm10,xmm5,0x93 339 pshufd xmm11,xmm2,0x93 341 pshufd xmm12,xmm6,0x93 343 pshufd xmm13,xmm1,0x93 345 pshufd xmm14,xmm4,0x93 352 pshufd xmm15,xmm15,0x4E 354 pshufd xmm0,xmm0,0x4 [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
compare_win.cc | 50 pshufd xmm1, xmm0, 0xee 52 pshufd xmm1, xmm0, 0x01 176 pshufd xmm2, xmm1, 0x0e // upper 2 dwords 178 pshufd xmm2, xmm1, 0x01 214 pshufd xmm2, xmm1, 0x0e // upper 2 dwords 216 pshufd xmm2, xmm1, 0x01
|
compare_posix.cc | 46 "pshufd $0xee,%%xmm0,%%xmm1 \n" 48 "pshufd $0x1,%%xmm0,%%xmm1 \n" 129 "pshufd $0xe,%%xmm1,%%xmm2 \n" 131 "pshufd $0x1,%%xmm1,%%xmm2 \n"
|
/external/libvpx/libvpx/vp8/common/x86/ |
idctllm_sse2.asm | 151 pshufd xmm0, xmm0, 11011000b 152 pshufd xmm1, xmm4, 11011000b 158 pshufd xmm2, xmm2, 11011000b 159 pshufd xmm3, xmm4, 11011000b 225 pshufd xmm0, xmm2, 11011000b 226 pshufd xmm2, xmm1, 11011000b 228 pshufd xmm1, xmm5, 11011000b 229 pshufd xmm3, xmm7, 11011000b 303 pshufd xmm0, xmm2, 11011000b 304 pshufd xmm2, xmm1, 11011000 [all...] |
iwalsh_sse2.asm | 30 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 57 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 60 pshufd xmm0, xmm0, 0 ;03 03 03 03 03 03 03 03
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
idctllm_sse2.asm | 151 pshufd xmm0, xmm0, 11011000b 152 pshufd xmm1, xmm4, 11011000b 158 pshufd xmm2, xmm2, 11011000b 159 pshufd xmm3, xmm4, 11011000b 225 pshufd xmm0, xmm2, 11011000b 226 pshufd xmm2, xmm1, 11011000b 228 pshufd xmm1, xmm5, 11011000b 229 pshufd xmm3, xmm7, 11011000b 303 pshufd xmm0, xmm2, 11011000b 304 pshufd xmm2, xmm1, 11011000 [all...] |
iwalsh_sse2.asm | 30 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 57 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 60 pshufd xmm0, xmm0, 0 ;03 03 03 03 03 03 03 03
|
/external/boringssl/linux-x86_64/crypto/aes/ |
bsaes-x86_64.S | 330 pshufd $147,%xmm15,%xmm7 331 pshufd $147,%xmm0,%xmm8 333 pshufd $147,%xmm3,%xmm9 335 pshufd $147,%xmm5,%xmm10 337 pshufd $147,%xmm2,%xmm11 339 pshufd $147,%xmm6,%xmm12 341 pshufd $147,%xmm1,%xmm13 343 pshufd $147,%xmm4,%xmm14 350 pshufd $78,%xmm15,%xmm15 352 pshufd $78,%xmm0,%xmm [all...] |
/external/boringssl/mac-x86_64/crypto/aes/ |
bsaes-x86_64.S | 328 pshufd $147,%xmm15,%xmm7 329 pshufd $147,%xmm0,%xmm8 331 pshufd $147,%xmm3,%xmm9 333 pshufd $147,%xmm5,%xmm10 335 pshufd $147,%xmm2,%xmm11 337 pshufd $147,%xmm6,%xmm12 339 pshufd $147,%xmm1,%xmm13 341 pshufd $147,%xmm4,%xmm14 348 pshufd $78,%xmm15,%xmm15 350 pshufd $78,%xmm0,%xmm [all...] |
/external/libvpx/libvpx/vp8/encoder/x86/ |
fwalsh_sse2.asm | 84 pshufd xmm4, xmm0, 0xd8 ; d11 d10 a11 a10 85 pshufd xmm5, xmm2, 0xd8 ; c11 c10 b11 b10 86 pshufd xmm6, xmm1, 0x72 ; d13 d12 a13 a12 87 pshufd xmm7, xmm3, 0x72 ; c13 c12 b13 b12
|