/external/libvpx/vp8/common/x86/ |
recon_sse2.asm | 145 movdqu xmm0, [rsi] 150 movdqu xmm1, [rsi+rax] 151 movdqu xmm2, [rsi+rax*2] 163 movdqu xmm3, [rsi] 166 movdqu xmm4, [rsi+rax] 168 movdqu xmm5, [rsi+rax*2] 178 movdqu xmm0, [rsi] 181 movdqu xmm1, [rsi+rax] 183 movdqu xmm2, [rsi+rax*2] 192 movdqu xmm3, [rsi [all...] |
subpixel_sse2.asm | [all...] |
iwalsh_sse2.asm | 51 ;; movdqu [rdi + 0], xmm4 52 ;; movdqu [rdi + 16], xmm3
|
subpixel_ssse3.asm | 288 movdqu xmm1, XMMWORD PTR [rsi - 2] 295 movdqu xmm3, XMMWORD PTR [rsi + 6] 376 movdqu xmm0, XMMWORD PTR [rsi - 2] 427 movdqu xmm1, XMMWORD PTR [rsi - 2] [all...] |
loopfilter_sse2.asm | [all...] |
postproc_sse2.asm | 397 movdqu xmm4, [rax + rcx*2] ;vp8_rv[rcx*2] 400 movdqu xmm4, [r8 + rcx*2] ;vp8_rv[rcx*2] 402 movdqu xmm4, [sym(vp8_rv) + rcx*2] 661 movdqu xmm1,[rsi+rax] ; get the source 667 movdqu xmm2,[rdi+rax] ; get the noise for this line 669 movdqu [rsi+rax],xmm1 ; store the result
|
postproc_mmx.c | 1002 movdqu xmm4, vp8_rv[ecx*2] 1128 movdqu xmm1, [esi+eax] // get the source 1134 movdqu xmm2, [edi+eax] // get the noise for this line 1136 movdqu [esi+eax], xmm1 // store the result 1498 movdqu [esi+eax], xmm1 \/\/ store the result local [all...] |
/external/llvm/test/CodeGen/X86/ |
sse-align-6.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movdqu | count 1
|
sse-align-12.ll | 4 ; CHECK: movdqu
|
/external/libvpx/vp8/encoder/x86/ |
variance_impl_ssse3.asm | 67 movdqu xmm0, XMMWORD PTR [rsi] 68 movdqu xmm1, XMMWORD PTR [rsi+1] 92 movdqu xmm1, XMMWORD PTR [rsi] 93 movdqu xmm2, XMMWORD PTR [rsi+1] 163 movdqu xmm1, XMMWORD PTR [rsi] 173 movdqu xmm3, XMMWORD PTR [rsi] 264 movdqu xmm1, XMMWORD PTR [rsi] 265 movdqu xmm2, XMMWORD PTR [rsi+1]
|
variance_impl_sse2.asm | 129 movdqu xmm1, XMMWORD PTR [rsi] 130 movdqu xmm2, XMMWORD PTR [rdi] 245 movdqu xmm1, XMMWORD PTR [rsi] 246 movdqu xmm2, XMMWORD PTR [rdi] 945 movdqu xmm5, XMMWORD PTR [rsi] 946 movdqu xmm3, XMMWORD PTR [rsi+1] 952 movdqu xmm1, XMMWORD PTR [rsi] ; 953 movdqu xmm2, XMMWORD PTR [rsi+1] ; [all...] |
ssim_opt.asm | 89 movdqu xmm5, [rsi] 90 movdqu xmm6, [rdi]
|
sad_sse3.asm | 583 ;%define lddqu movdqu 594 movdqu xmm1, XMMWORD PTR [ref_ptr] 596 movdqu xmm3, XMMWORD PTR [ref_ptr+ref_stride] 602 movdqu xmm5, XMMWORD PTR [ref_ptr] 607 movdqu xmm1, XMMWORD PTR [ref_ptr+ref_stride]
|
/external/v8/src/ia32/ |
codegen-ia32.cc | 106 __ movdqu(xmm0, Operand(src, 0)); 107 __ movdqu(Operand(dst, 0), xmm0); 155 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 156 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0); 177 __ movdqu(xmm0, Operand(src, 0x00)); 178 __ movdqu(xmm1, Operand(src, 0x10)); 193 __ movdqu(xmm0, Operand(src, 0)); 201 __ movdqu(xmm0, Operand(src, count, times_1, -0x10)); 202 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
|
/bionic/libc/arch-x86/string/ |
ssse3-memcpy5.S | 204 movdqu (%eax), %xmm0 239 movdqu %xmm0, (%esi) 443 movdqu %xmm0, (%esi) 488 movdqu %xmm0, (%esi) 533 movdqu %xmm0, (%esi) 578 movdqu %xmm0, (%esi) 623 movdqu %xmm0, (%esi) 668 movdqu %xmm0, (%esi) 713 movdqu %xmm0, (%esi) 758 movdqu %xmm0, (%esi [all...] |
/external/zlib/contrib/amd64/ |
amd64-match.S | 293 movdqu (%windowbestlen, %rdx), %xmm1 294 movdqu (%prev, %rdx), %xmm2 296 movdqu 16(%windowbestlen, %rdx), %xmm3 297 movdqu 16(%prev, %rdx), %xmm4 299 movdqu 32(%windowbestlen, %rdx), %xmm5 300 movdqu 32(%prev, %rdx), %xmm6 302 movdqu 48(%windowbestlen, %rdx), %xmm7 303 movdqu 48(%prev, %rdx), %xmm8
|
/external/libyuv/files/source/ |
convert.cc | 380 movdqu xmm3, XMMWORD PTR [eax] ;in1 384 movdqu XMMWORD PTR [esi], xmm1 ;write to out1 386 movdqu xmm5, XMMWORD PTR [ecx] ;in2 390 movdqu XMMWORD PTR [edi], xmm2 ;write to out2 393 movdqu XMMWORD PTR [esi+16], xmm3 ;write to out1 again 398 movdqu XMMWORD PTR [edi+16], xmm5 ;write to out2 again 494 movdqu xmm3, XMMWORD PTR [eax] ;in1 497 movdqu XMMWORD PTR [esi], xmm1 ;write to out1 499 movdqu xmm5, XMMWORD PTR [ecx] ;in2 502 movdqu XMMWORD PTR [edi], xmm2 ;write to out [all...] |
scale.cc | 536 // Note that movdqa+palign may be better than movdqu. 586 // Note that movdqa+palign may be better than movdqu. 614 movdqu xmm0, [esi+8] // pixels 8..15 615 movdqu xmm1, [esi+ebx+8] 643 // Note that movdqa+palign may be better than movdqu. 672 movdqu xmm0, [esi+8] // pixels 8..15 673 movdqu xmm1, [esi+ebx+8] 1072 // Note that movdqa+palign may be better than movdqu. [all...] |
/external/valgrind/main/exp-bbv/tests/amd64-linux/ |
rep_prefix.S | 22 movdqu %xmm1,%xmm2 23 movdqu %xmm2,%xmm1
|
/external/valgrind/main/memcheck/tests/amd64/ |
sse_memory.stdout.exp | [all...] |
/external/valgrind/main/memcheck/tests/x86/ |
sse2_memory.stdout.exp | [all...] |
/external/v8/test/cctest/ |
test-disasm-ia32.cc | 393 __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000)); 394 __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
|
/external/llvm/test/MC/Disassembler/X86/ |
simple-tests.txt | 243 # CHECK: movdqu %xmm1, %xmm0 246 # CHECK: movdqu %xmm0, %xmm1
|
x86-32.txt | 274 # CHECK: movdqu %xmm1, %xmm0 277 # CHECK: movdqu %xmm0, %xmm1
|
/packages/apps/Camera/jni/feature_stab/db_vlvm/ |
db_feature_matching.cpp | 893 movdqu [ecx+1*22],xmm7 \/* move short values to patch *\/ local 894 movdqu [ecx+1*22+16],xmm6 \/* move short values to patch *\/ local 914 movdqu [ecx+2*22],xmm7 \/* move short values to patch *\/ local 915 movdqu [ecx+2*22+16],xmm6 \/* move short values to patch *\/ local 935 movdqu [ecx+3*22],xmm7 \/* move short values to patch *\/ local 936 movdqu [ecx+3*22+16],xmm6 \/* move short values to patch *\/ local 956 movdqu [ecx+4*22],xmm7 \/* move short values to patch *\/ local 957 movdqu [ecx+4*22+16],xmm6 \/* move short values to patch *\/ local 977 movdqu [ecx+5*22],xmm7 \/* move short values to patch *\/ local 978 movdqu [ecx+5*22+16],xmm6 \/* move short values to patch *\/ local 998 movdqu [ecx+6*22],xmm7 \/* move short values to patch *\/ local 999 movdqu [ecx+6*22+16],xmm6 \/* move short values to patch *\/ local 1019 movdqu [ecx+7*22],xmm7 \/* move short values to patch *\/ local 1020 movdqu [ecx+7*22+16],xmm6 \/* move short values to patch *\/ local 1061 movdqu [ecx+9*22],xmm7 \/* move short values to patch *\/ local 1062 movdqu [ecx+9*22+16],xmm6 \/* move short values to patch *\/ local 1082 movdqu [ecx+10*22],xmm7 \/* move short values to patch *\/ local 1083 movdqu [ecx+10*22+16],xmm6 \/* move short values to patch *\/ local [all...] |