/external/llvm/test/CodeGen/X86/ |
sse2.ll | 9 ; CHECK-NEXT: movapd (%ecx), %xmm0 10 ; CHECK-NEXT: movlpd {{[0-9]+}}(%esp), %xmm0 11 ; CHECK-NEXT: movapd %xmm0, (%eax) 25 ; CHECK-NEXT: movapd (%ecx), %xmm0 26 ; CHECK-NEXT: movhpd {{[0-9]+}}(%esp), %xmm0 27 ; CHECK-NEXT: movapd %xmm0, (%eax) 43 ; CHECK-NEXT: movaps (%edx), %xmm0 44 ; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1 [all...] |
commute-clmul.ll | 8 ;SSE: pclmulqdq $0, (%rdi), %xmm0 12 ;AVX: vpclmulqdq $0, (%rdi), %xmm0, %xmm0 22 ;SSE: pclmulqdq $1, (%rdi), %xmm0 26 ;AVX: vpclmulqdq $1, (%rdi), %xmm0, %xmm0 36 ;SSE: pclmulqdq $16, (%rdi), %xmm0 40 ;AVX: vpclmulqdq $16, (%rdi), %xmm0, %xmm0 50 ;SSE: pclmulqdq $17, (%rdi), %xmm0 [all...] |
pr22774.ll | 9 ; CHECK-NEXT: vmovq %xmm0, %xmm0 10 ; CHECK-NEXT: vmovdqa %xmm0, out(%rip)
|
pr23246.ll | 9 ; CHECK: movq2dq %mm0, %xmm0 10 ; CHECK-NEXT: pshufd {{.*}} xmm0 = xmm0[0,1,0,1]
|
sse41.ll | 9 ; X32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0 14 ; X64-NEXT: pinsrd $1, %edi, %xmm0 23 ; X32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0 28 ; X64-NEXT: pinsrb $1, %edi, %xmm0 38 ; X32-NEXT: pmovsxbd (%eax), %xmm0 43 ; X64-NEXT: pmovsxbd (%rdi), %xmm0 61 ; X32-NEXT: pmovsxwd (%eax), %xmm0 66 ; X64-NEXT: pmovsxwd (%rdi), %xmm0 81 ; X32-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero 87 ; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zer [all...] |
fma_patterns.ll | 6 ; CHECK: vfmadd213ps %xmm2, %xmm1, %xmm0 9 ; CHECK_FMA4: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0 18 ; CHECK: fmsub213ps %xmm2, %xmm1, %xmm0 21 ; CHECK_FMA4: vfmsubps %xmm2, %xmm1, %xmm0, %xmm0 30 ; CHECK: fnmadd213ps %xmm2, %xmm1, %xmm0 33 ; CHECK_FMA4: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 42 ; CHECK: fnmsub213ps %xmm2, %xmm1, %xmm0 [all...] |
lower-vec-shift-2.ll | 10 ; SSE2-NEXT: psllw %xmm1, %xmm0 17 ; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0 30 ; SSE2-NEXT: pslld %xmm2, %xmm0 37 ; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0 48 ; SSE2-NEXT: psllq %xmm1, %xmm0 53 ; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0 67 ; SSE2-NEXT: psrlw %xmm1, %xmm0 [all...] |
sse41-pmovxrm-intrinsics.ll | 6 ; SSE41: pmovsxbw (%rdi), %xmm0 7 ; AVX: vpmovsxbw (%rdi), %xmm0 15 ; SSE41: pmovsxbd (%rdi), %xmm0 16 ; AVX: vpmovsxbd (%rdi), %xmm0 24 ; SSE41: pmovsxbq (%rdi), %xmm0 25 ; AVX: vpmovsxbq (%rdi), %xmm0 33 ; SSE41: pmovsxwd (%rdi), %xmm0 34 ; AVX: vpmovsxwd (%rdi), %xmm0 42 ; SSE41: pmovsxwq (%rdi), %xmm0 43 ; AVX: vpmovsxwq (%rdi), %xmm0 [all...] |
vec_set-3.ll | 5 ; CHECK: insertps $29, {{.*}}, %xmm0 17 ; CHECK: movd {{.*}}, %xmm0 18 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,0,1] 30 ; CHECK: insertps {{.*#+}} xmm0 = zero,xmm0[0],zero,zero
|
2007-03-24-InlineAsmVectorOp.ll | 6 ; CHECK: {{cmpltsd %xmm0, %xmm0}}
|
commute-blend-sse41.ll | 9 ;CHECK: pblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7] 20 ;CHECK: blendps {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3] 31 ;CHECK: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1]
|
fold-load-unops.ll | 10 ; CHECK: vrcpss (%rdi), %xmm0, %xmm0 21 ; CHECK: vrsqrtss (%rdi), %xmm0, %xmm0 32 ; CHECK: vsqrtss (%rdi), %xmm0, %xmm0 43 ; CHECK: vsqrtsd (%rdi), %xmm0, %xmm0
|
sse2-intrinsics-x86-upgrade.ll | 4 ; CHECK: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8] 12 ; CHECK: psrldq {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero 19 ; CHECK: pslldq {{.*#+}} xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] 27 ; CHECK: psrldq {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
|
sse-align-12.ll | 6 ; CHECK-NEXT: movups (%rdi), %xmm0 7 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0] 25 ; CHECK-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] 42 ; CHECK-NEXT: movupd (%rdi), %xmm0 43 ; CHECK-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0] 57 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1 [all...] |
break-false-dep.ll | 8 ; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0 9 ; SSE: cvtss2sd %xmm0, %xmm0 19 ; SSE: cvtsd2ss ([[A0]]), %xmm0 28 ; SSE: movss ([[A0]]), %xmm0 29 ; SSE: sqrtss %xmm0, %xmm0 38 ; SSE: movsd ([[A0]]), %xmm0 39 ; SSE: sqrtsd %xmm0, %xmm0 [all...] |
/external/boringssl/win-x86_64/crypto/aes/ |
aesni-x86_64.asm | 14 movups xmm0,XMMWORD[r8] 17 xorps xmm2,xmm0 25 pxor xmm0,xmm0 38 movups xmm0,XMMWORD[r8] 41 xorps xmm2,xmm0 49 pxor xmm0,xmm0 58 movups xmm0,XMMWORD[rcx] 61 xorps xmm2,xmm0 [all...] |
/bionic/libc/arch-x86_64/string/ |
sse2-strlcpy-slm.S | 84 pxor %xmm0, %xmm0 87 pcmpeqb %xmm1, %xmm0 88 pmovmskb %xmm0, %rdx 97 pcmpeqb %xmm1, %xmm0 98 pmovmskb %xmm0, %rdx 114 pcmpeqb %xmm1, %xmm0 115 pmovmskb %xmm0, %rdx 179 pxor %xmm0, %xmm0 [all...] |
/bionic/libc/arch-x86/atom/string/ |
ssse3-wcscpy-atom.S | 112 pxor %xmm0, %xmm0 113 pcmpeqd (%esi), %xmm0 117 pmovmskb %xmm0, %eax 144 pcmpeqd %xmm2, %xmm0 145 pmovmskb %xmm0, %eax 153 pcmpeqd %xmm3, %xmm0 154 pmovmskb %xmm0, %eax 162 pcmpeqd %xmm4, %xmm0 163 pmovmskb %xmm0, %ea [all...] |
sse2-wcscmp-atom.S | 125 pxor %xmm0, %xmm0 /* clear %xmm0 for null char checks */ 177 pcmpeqd %xmm1, %xmm0 /* Any null double_word? */ 179 psubb %xmm0, %xmm1 /* packed sub of comparison results*/ 186 pcmpeqd %xmm1, %xmm0 /* Any null double_word? */ 188 psubb %xmm0, %xmm1 /* packed sub of comparison results*/ 195 pcmpeqd %xmm1, %xmm0 /* Any null double_word? */ 197 psubb %xmm0, %xmm1 /* packed sub of comparison results*/ 244 pcmpeqd %xmm1, %xmm0 /* Any null double_word? * [all...] |
/external/compiler-rt/lib/builtins/i386/ |
ashrdi3.S | 17 movd 4(%esp), %xmm0 19 punpckldq %xmm1, %xmm0 // Load input 21 movq 4(%esp), %xmm0 // Load input 24 psrlq %xmm2, %xmm0 // unsigned shift input by count 37 por %xmm1, %xmm0 40 1: movd %xmm0, %eax 41 psrlq $32, %xmm0 42 movd %xmm0, %edx
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/ |
quantize_ssse3.asm | 55 movdqa xmm0, [rax] 62 movdqa xmm1, xmm0 66 psraw xmm0, 15 86 pxor xmm1, xmm0 88 psubw xmm1, xmm0 94 movdqa xmm0, [rdi] 97 pmullw xmm0, xmm1 108 movdqa [rcx], xmm0 ;store dqcoeff
|
/external/boringssl/src/crypto/aes/asm/ |
vpaes-x86_64.pl | 71 ## AES-encrypt %xmm0. 74 ## %xmm0 = input 78 ## Output in %xmm0 91 pandn %xmm0, %xmm1 94 pand %xmm9, %xmm0 95 pshufb %xmm0, %xmm2 96 movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi 97 pshufb %xmm1, %xmm0 100 pxor %xmm2, %xmm0 108 movdqa %xmm12, %xmm0 # 0 : sb1 [all...] |
/external/boringssl/linux-x86/crypto/modes/ |
ghash-x86.S | 938 movdqa %xmm2,%xmm0 939 movdqa %xmm0,%xmm1 940 pshufd $78,%xmm0,%xmm3 942 pxor %xmm0,%xmm3 947 xorps %xmm0,%xmm3 953 pxor %xmm4,%xmm0 954 movdqa %xmm0,%xmm4 955 movdqa %xmm0,%xmm3 956 psllq $5,%xmm0 957 pxor %xmm0,%xmm [all...] |
/external/boringssl/mac-x86/crypto/modes/ |
ghash-x86.S | 929 movdqa %xmm2,%xmm0 930 movdqa %xmm0,%xmm1 931 pshufd $78,%xmm0,%xmm3 933 pxor %xmm0,%xmm3 938 xorps %xmm0,%xmm3 944 pxor %xmm4,%xmm0 945 movdqa %xmm0,%xmm4 946 movdqa %xmm0,%xmm3 947 psllq $5,%xmm0 948 pxor %xmm0,%xmm [all...] |
/external/boringssl/win-x86/crypto/modes/ |
ghash-x86.asm | 937 movdqa xmm0,xmm2 938 movdqa xmm1,xmm0 939 pshufd xmm3,xmm0,78 941 pxor xmm3,xmm0 946 xorps xmm3,xmm0 952 pxor xmm0,xmm4 953 movdqa xmm4,xmm0 954 movdqa xmm3,xmm0 955 psllq xmm0,5 956 pxor xmm3,xmm0 [all...] |