HomeSort by relevance Sort by last modified time
    Searched full:xmm2 (Results 1 - 25 of 435) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_variance_impl_sse2.asm 37 movdqa xmm2, [rax+32]
41 pmaddwd xmm2, xmm2
45 paddd xmm2, xmm3
47 paddd xmm4, xmm2
129 movdqu xmm2, XMMWORD PTR [rdi]
135 movdqa xmm4, xmm2
141 punpcklbw xmm2, xmm0
145 psubw xmm1, xmm2
176 movdqa xmm2, xmm
    [all...]
vp9_sad_sse4.asm 19 movq xmm2, MMWORD PTR [rdi+16]
21 punpcklqdq xmm3, xmm2
23 movdqa xmm2, xmm1
25 mpsadbw xmm2, xmm0, 0x5
33 paddw xmm1, xmm2
40 movq xmm2, MMWORD PTR [rdi+16]
42 punpcklqdq xmm3, xmm2
44 movdqa xmm2, xmm5
46 mpsadbw xmm2, xmm0, 0x5
54 paddw xmm5, xmm2
    [all...]
  /art/runtime/arch/x86_64/
memcmp16_x86_64.S 57 movdqu (%rdi), %xmm2
58 pxor %xmm1, %xmm2
59 ptest %xmm2, %xmm0
76 movdqu (%rdi), %xmm2
77 pxor (%rsi), %xmm2
78 ptest %xmm2, %xmm0
81 movdqu 16(%rdi), %xmm2
82 pxor 16(%rsi), %xmm2
83 ptest %xmm2, %xmm0
86 movdqu 32(%rdi), %xmm2
    [all...]
  /bionic/libc/arch-x86_64/string/
sse4-memcmp-slm.S 109 movdqu (%rdi), %xmm2
110 pxor %xmm1, %xmm2
111 ptest %xmm2, %xmm0
128 movdqu (%rdi), %xmm2
129 pxor (%rsi), %xmm2
130 ptest %xmm2, %xmm0
133 movdqu 16(%rdi), %xmm2
134 pxor 16(%rsi), %xmm2
135 ptest %xmm2, %xmm0
138 movdqu 32(%rdi), %xmm2
    [all...]
  /external/llvm/test/MC/X86/
x86-32-avx.s 3 // CHECK: vaddss %xmm4, %xmm6, %xmm2
5 vaddss %xmm4, %xmm6, %xmm2
7 // CHECK: vmulss %xmm4, %xmm6, %xmm2
9 vmulss %xmm4, %xmm6, %xmm2
11 // CHECK: vsubss %xmm4, %xmm6, %xmm2
13 vsubss %xmm4, %xmm6, %xmm2
15 // CHECK: vdivss %xmm4, %xmm6, %xmm2
17 vdivss %xmm4, %xmm6, %xmm2
19 // CHECK: vaddsd %xmm4, %xmm6, %xmm2
21 vaddsd %xmm4, %xmm6, %xmm2
    [all...]
  /external/llvm/test/CodeGen/X86/
vector-zext.ll 11 ; SSE2-NEXT: pxor %xmm2, %xmm2
13 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
21 ; SSSE3-NEXT: pxor %xmm2, %xmm2
23 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3
    [all...]
bswap-vector.ll 17 ; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
18 ; CHECK-NOSSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15
    [all...]
lower-vec-shift-2.ll 15 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
16 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
28 ; SSE2-NEXT: xorps %xmm2, %xmm2
29 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
30 ; SSE2-NEXT: pslld %xmm2, %xmm0
35 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
variance_impl_ssse3.asm 69 movdqa xmm2, xmm0
72 punpckhbw xmm2, xmm1
74 pmaddubsw xmm2, [rax]
77 paddw xmm2, [GLOBAL(xmm_bi_rd)]
79 psraw xmm2, xmm_filter_shift
81 packuswb xmm0, xmm2
93 movdqu xmm2, XMMWORD PTR [rsi+1]
96 punpcklbw xmm1, xmm2
97 punpckhbw xmm3, xmm2
107 movdqa xmm2, xmm
    [all...]
mfqe_sse2.asm 49 movdqa xmm2, [rax]
54 movdqa xmm3, xmm2
55 punpcklbw xmm2, xmm6
57 pmullw xmm2, xmm0
68 paddw xmm2, xmm4
70 paddw xmm2, [GLOBAL(tMFQE_round)]
72 psrlw xmm2, 4
75 packuswb xmm2, xmm3
76 movdqa [rdx], xmm2
126 movq xmm2, [rax
    [all...]
idctllm_sse2.asm 61 movq xmm2, [rax+2*rdx]
66 punpcklbw xmm2, xmm5
73 paddw xmm2, xmm4
79 packuswb xmm2, xmm5
88 movq [rax], xmm2
126 ; note the transpose of xmm1 and xmm2, necessary for shuffle
129 movdqa xmm2, [rax+16]
141 pmullw xmm2, [rdx+16]
154 movdqa xmm4, xmm2
155 punpckldq xmm2, xmm
    [all...]
variance_impl_sse2.asm 39 movdqa xmm2, [rax+32]
43 pmaddwd xmm2, xmm2
47 paddd xmm2, xmm3
49 paddd xmm4, xmm2
131 movdqu xmm2, XMMWORD PTR [rdi]
137 movdqa xmm4, xmm2
143 punpcklbw xmm2, xmm0
147 psubw xmm1, xmm2
178 movdqa xmm2, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
variance_impl_ssse3.asm 69 movdqa xmm2, xmm0
72 punpckhbw xmm2, xmm1
74 pmaddubsw xmm2, [rax]
77 paddw xmm2, [GLOBAL(xmm_bi_rd)]
79 psraw xmm2, xmm_filter_shift
81 packuswb xmm0, xmm2
93 movdqu xmm2, XMMWORD PTR [rsi+1]
96 punpcklbw xmm1, xmm2
97 punpckhbw xmm3, xmm2
107 movdqa xmm2, xmm
    [all...]
mfqe_sse2.asm 49 movdqa xmm2, [rax]
54 movdqa xmm3, xmm2
55 punpcklbw xmm2, xmm6
57 pmullw xmm2, xmm0
68 paddw xmm2, xmm4
70 paddw xmm2, [GLOBAL(tMFQE_round)]
72 psrlw xmm2, 4
75 packuswb xmm2, xmm3
76 movdqa [rdx], xmm2
126 movq xmm2, [rax
    [all...]
idctllm_sse2.asm 61 movq xmm2, [rax+2*rdx]
66 punpcklbw xmm2, xmm5
73 paddw xmm2, xmm4
79 packuswb xmm2, xmm5
88 movq [rax], xmm2
126 ; note the transpose of xmm1 and xmm2, necessary for shuffle
129 movdqa xmm2, [rax+16]
141 pmullw xmm2, [rdx+16]
154 movdqa xmm4, xmm2
155 punpckldq xmm2, xmm
    [all...]
  /external/libvpx/libvpx/third_party/libyuv/source/
compare_win.cc 34 movdqa xmm2, [edx] local
38 psubusb xmm1, xmm2
39 psubusb xmm2, xmm3 local
40 por xmm1, xmm2
41 movdqa xmm2, xmm1 local
43 punpckhbw xmm2, xmm5 local
45 pmaddwd xmm2, xmm2 local
47 paddd xmm0, xmm2
133 // 72: 66 0F 38 40 D5 pmulld xmm2,xmm
154 movdqa xmm2, xmm1 local
155 punpcklbw xmm2, xmm7 \/\/ src[0-7] local
165 movdqa xmm2, xmm1 local
166 punpcklwd xmm2, xmm7 \/\/ src[8-11] local
176 pshufd xmm2, xmm1, 0x0e \/\/ upper 2 dwords local
178 pshufd xmm2, xmm1, 0x01 local
204 vpmovzxbd xmm2, dword ptr [eax + 8] \/\/ src[8-11] local
207 pmulld xmm2, kHashMul2 local
214 pshufd xmm2, xmm1, 0x0e \/\/ upper 2 dwords local
216 pshufd xmm2, xmm1, 0x01 local
    [all...]
compare_posix.cc 30 "movdqa " MEMACCESS(1) ",%%xmm2 \n"
34 "psubusb %%xmm2,%%xmm1 \n"
35 "psubusb %%xmm3,%%xmm2 \n"
36 "por %%xmm2,%%xmm1 \n"
37 "movdqa %%xmm1,%%xmm2 \n"
39 "punpckhbw %%xmm5,%%xmm2 \n"
41 "pmaddwd %%xmm2,%%xmm2 \n"
43 "paddd %%xmm2,%%xmm0 \n"
59 , "xmm0", "xmm1", "xmm2", "xmm3", "xmm5
    [all...]
  /bionic/libc/arch-x86/atom/string/
ssse3-strcpy-atom.S 271 movaps 16(%ecx), %xmm2
273 pcmpeqb %xmm2, %xmm0
284 movaps %xmm2, (%edx, %esi)
319 movaps 16(%ecx, %esi), %xmm2
321 pcmpeqb %xmm2, %xmm0
332 movaps %xmm2, (%edx, %esi)
355 movaps (%ecx), %xmm2
357 movaps %xmm2, %xmm4
361 pminub %xmm5, %xmm2
363 pminub %xmm2, %xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_variance_impl_sse2.asm 37 movdqa xmm2, [rax+32]
41 pmaddwd xmm2, xmm2
45 paddd xmm2, xmm3
47 paddd xmm4, xmm2
129 movdqu xmm2, XMMWORD PTR [rdi]
135 movdqa xmm4, xmm2
141 punpcklbw xmm2, xmm0
145 psubw xmm1, xmm2
176 movdqa xmm2, xmm
    [all...]
vp9_sad_sse4.asm 19 movq xmm2, MMWORD PTR [rdi+16]
21 punpcklqdq xmm3, xmm2
23 movdqa xmm2, xmm1
25 mpsadbw xmm2, xmm0, 0x5
33 paddw xmm1, xmm2
40 movq xmm2, MMWORD PTR [rdi+16]
42 punpcklqdq xmm3, xmm2
44 movdqa xmm2, xmm5
46 mpsadbw xmm2, xmm0, 0x5
54 paddw xmm5, xmm2
    [all...]
  /external/boringssl/win-x86/crypto/aes/
aesni-x86.asm 24 movups xmm2,[eax]
30 xorps xmm2,xmm0
40 movups [eax],xmm2
41 pxor xmm2,xmm2
49 movups xmm2,[eax]
55 xorps xmm2,xmm0
65 movups [eax],xmm2
66 pxor xmm2,xmm2
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
fwalsh_sse2.asm 34 movq xmm2, MMWORD PTR [rsi]
38 punpcklwd xmm2, xmm3
41 punpckldq xmm0, xmm2 ; ip[1] ip[0]
42 punpckhdq xmm1, xmm2 ; ip[3] ip[2]
44 movdqa xmm2, xmm0
46 psubw xmm2, xmm1
49 psllw xmm2, 2 ; c1 b1
52 punpcklqdq xmm0, xmm2 ; b1 a1
53 punpckhqdq xmm1, xmm2 ; c1 d1
61 movdqa xmm2, xmm
    [all...]
subtract_sse2.asm 99 movdqa xmm2, xmm0
103 pxor xmm2, xmm4
104 pcmpgtb xmm1, xmm2 ; obtain sign information
106 movdqa xmm2, xmm0
108 punpckhbw xmm2, xmm1 ; put sign back to subtraction
117 movdqa [rdi +16], xmm2
172 movq xmm2, [rsi+rdx] ; src -- next line
178 punpcklqdq xmm0, xmm2
181 movdqa xmm2, xmm0
185 pxor xmm2, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/
fwalsh_sse2.asm 34 movq xmm2, MMWORD PTR [rsi]
38 punpcklwd xmm2, xmm3
41 punpckldq xmm0, xmm2 ; ip[1] ip[0]
42 punpckhdq xmm1, xmm2 ; ip[3] ip[2]
44 movdqa xmm2, xmm0
46 psubw xmm2, xmm1
49 psllw xmm2, 2 ; c1 b1
52 punpcklqdq xmm0, xmm2 ; b1 a1
53 punpckhqdq xmm1, xmm2 ; c1 d1
61 movdqa xmm2, xmm
    [all...]
subtract_sse2.asm 99 movdqa xmm2, xmm0
103 pxor xmm2, xmm4
104 pcmpgtb xmm1, xmm2 ; obtain sign information
106 movdqa xmm2, xmm0
108 punpckhbw xmm2, xmm1 ; put sign back to subtraction
117 movdqa [rdi +16], xmm2
172 movq xmm2, [rsi+rdx] ; src -- next line
178 punpcklqdq xmm0, xmm2
181 movdqa xmm2, xmm0
185 pxor xmm2, xmm
    [all...]

Completed in 484 milliseconds

1 2 3 4 5 6 7 8 91011>>