/external/llvm/test/CodeGen/X86/ |
vec_compare.ll | 48 ; CHECK: pshufd $177 59 ; CHECK: pshufd $177 80 ; CHECK: pshufd $160 82 ; CHECK: pshufd $245 84 ; CHECK: pshufd $245 97 ; CHECK: pshufd $160 99 ; CHECK: pshufd $245 101 ; CHECK: pshufd $245 114 ; CHECK: pshufd $160 116 ; CHECK: pshufd $24 [all...] |
pshufd-combine-crash.ll | 6 ; v4f32 X86ISD::PSHUFD nodes with a single PSHUFD.
|
avx-trunc.ll | 5 ; CHECK: pshufd 6 ; CHECK: pshufd
|
lower-bitcast.ll | 13 ; pshufd+paddq+pshufd. This is fixed with the widening legalization. 17 ; CHECK: pshufd 19 ; CHECK-NEXT: pshufd 53 ; CHECK-NOT: pshufd 55 ; CHECK-NOT: pshufd 59 ; CHECK-WIDE-NOT: pshufd 61 ; CHECK-WIDE-NOT: pshufd 71 ; FIXME: At the moment we still produce the sequence pshufd+paddd+pshufd [all...] |
SwizzleShuff.ll | 17 ; CHECK: pshufd 18 ; CHECK-NEXT: pshufd 20 ; CHECK-NEXT: pshufd 21 ; CHECK-NEXT: pshufd 49 ; CHECK-NOT: pshufd 61 ; CHECK: pshufd
|
2011-05-09-loaduse.ll | 4 ;CHECK-NOT: pshufd
|
widen_conv-2.ll | 4 ; CHECK: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
combine-multiplies.ll | 77 ; pshufd $245, %xmm0, %xmm3 # xmm3 = xmm0[1,1,3,3] 79 ; pshufd $232, %xmm0, %xmm0 # xmm0 = xmm0[0,2,2,3] 81 ; pshufd $232, %xmm3, %xmm2 # xmm2 = xmm3[0,2,2,3] 100 ; CHECK-NEXT: pshufd $245, %xmm0, [[T1:%xmm[0-9]]] 102 ; CHECK-NEXT: pshufd $232, [[T2]], [[T3:%xmm[0-9]]] 104 ; CHECK-NEXT: pshufd $232, [[T4]], [[T5:%xmm[0-9]]] 138 ; CHECK-NEXT: pshufd $245, %xmm0, [[T1:%xmm[0-9]]] 140 ; CHECK-NEXT: pshufd $232, [[T2]], [[T3:%xmm[0-9]]] 141 ; CHECK-NEXT: pshufd $245, [[C22]], [[T7:%xmm[0-9]]] 143 ; CHECK-NEXT: pshufd $232, [[T7]], [[T5:%xmm[0-9]] [all...] |
pr23246.ll | 10 ; CHECK-NEXT: pshufd {{.*}} xmm0 = xmm0[0,1,0,1]
|
vector-idiv.ll | 12 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 13 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 16 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 29 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] 30 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 31 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] 33 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3] 63 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] 64 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] 68 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3 [all...] |
vector-shuffle-mmx.ll | 12 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 19 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] 47 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 63 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 96 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
|
vec_cmp_sint-128.ll | 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] 116 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] 236 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 238 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] 240 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] 253 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 255 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] 257 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] 370 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2] 372 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3 [all...] |
fold-load-vec.ll | 4 ; We should not fold movss into pshufd since pshufd expects m128 while movss
|
trunc-ext-ld-st.ll | 23 ;CHECK: pshufd 36 ;CHECK: pshufd
|
/external/gemmlowp/internal/ |
kernel_SSE.h | 93 "pshufd $0x00,%%xmm1,%%xmm2 \n\t" 96 "pshufd $0x55,%%xmm1,%%xmm3 \n\t" 102 "pshufd $0xaa,%%xmm1,%%xmm2 \n\t" 105 "pshufd $0xff,%%xmm1,%%xmm3 \n\t" 117 "pshufd $0x00,%%xmm1,%%xmm2 \n\t" 120 "pshufd $0x55,%%xmm1,%%xmm3 \n\t" 124 "pshufd $0xaa,%%xmm1,%%xmm2 \n\t" 127 "pshufd $0xff,%%xmm1,%%xmm3 \n\t" 149 "pshufd $0x00,%%xmm1,%%xmm2 \n\t" 152 "pshufd $0x55,%%xmm1,%%xmm3 \n\t [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
inv_wht_sse2.asm | 54 pshufd m1, m0, 0x0e 55 pshufd m3, m2, 0x0e 66 pshufd m1, m0, 0x0e 67 pshufd m3, m2, 0x0e
|
/external/boringssl/win-x86_64/crypto/aes/ |
bsaes-x86_64.asm | 332 pshufd xmm7,xmm15,0x93 333 pshufd xmm8,xmm0,0x93 335 pshufd xmm9,xmm3,0x93 337 pshufd xmm10,xmm5,0x93 339 pshufd xmm11,xmm2,0x93 341 pshufd xmm12,xmm6,0x93 343 pshufd xmm13,xmm1,0x93 345 pshufd xmm14,xmm4,0x93 352 pshufd xmm15,xmm15,0x4E 354 pshufd xmm0,xmm0,0x4 [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
compare_win.cc | 51 pshufd xmm1, xmm0, 0xee 53 pshufd xmm1, xmm0, 0x01 174 pshufd xmm2, xmm1, 0x0e // upper 2 dwords 176 pshufd xmm2, xmm1, 0x01 211 pshufd xmm2, xmm1, 0x0e // upper 2 dwords 213 pshufd xmm2, xmm1, 0x01
|
/external/libvpx/libvpx/vp8/common/x86/ |
idctllm_sse2.asm | 151 pshufd xmm0, xmm0, 11011000b 152 pshufd xmm1, xmm4, 11011000b 158 pshufd xmm2, xmm2, 11011000b 159 pshufd xmm3, xmm4, 11011000b 225 pshufd xmm0, xmm2, 11011000b 226 pshufd xmm2, xmm1, 11011000b 228 pshufd xmm1, xmm5, 11011000b 229 pshufd xmm3, xmm7, 11011000b 303 pshufd xmm0, xmm2, 11011000b 304 pshufd xmm2, xmm1, 11011000 [all...] |
iwalsh_sse2.asm | 30 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 57 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 60 pshufd xmm0, xmm0, 0 ;03 03 03 03 03 03 03 03
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
idctllm_sse2.asm | 151 pshufd xmm0, xmm0, 11011000b 152 pshufd xmm1, xmm4, 11011000b 158 pshufd xmm2, xmm2, 11011000b 159 pshufd xmm3, xmm4, 11011000b 225 pshufd xmm0, xmm2, 11011000b 226 pshufd xmm2, xmm1, 11011000b 228 pshufd xmm1, xmm5, 11011000b 229 pshufd xmm3, xmm7, 11011000b 303 pshufd xmm0, xmm2, 11011000b 304 pshufd xmm2, xmm1, 11011000 [all...] |
iwalsh_sse2.asm | 30 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 57 pshufd xmm2, xmm1, 4eh ;ip[8] ip[12] 60 pshufd xmm0, xmm0, 0 ;03 03 03 03 03 03 03 03
|
/prebuilts/go/darwin-x86/src/crypto/aes/ |
asm_amd64.s | 223 PSHUFD $0xff, X1, X1 236 PSHUFD $0x55, X1, X1 246 PSHUFD $0xff, X0, X3 259 PSHUFD $0x55, X1, X1 268 PSHUFD $0xff, X0, X3 280 PSHUFD $0xaa, X1, X1
|
/prebuilts/go/linux-x86/src/crypto/aes/ |
asm_amd64.s | 223 PSHUFD $0xff, X1, X1 236 PSHUFD $0x55, X1, X1 246 PSHUFD $0xff, X0, X3 259 PSHUFD $0x55, X1, X1 268 PSHUFD $0xff, X0, X3 280 PSHUFD $0xaa, X1, X1
|
/external/boringssl/linux-x86_64/crypto/aes/ |
bsaes-x86_64.S | 330 pshufd $147,%xmm15,%xmm7 331 pshufd $147,%xmm0,%xmm8 333 pshufd $147,%xmm3,%xmm9 335 pshufd $147,%xmm5,%xmm10 337 pshufd $147,%xmm2,%xmm11 339 pshufd $147,%xmm6,%xmm12 341 pshufd $147,%xmm1,%xmm13 343 pshufd $147,%xmm4,%xmm14 350 pshufd $78,%xmm15,%xmm15 352 pshufd $78,%xmm0,%xmm [all...] |