/external/libvpx/libvpx/vp8/common/x86/ |
subpixel_ssse3.asm | 322 movdqu xmm0, XMMWORD PTR [rsi - 2] 374 movdqu xmm1, XMMWORD PTR [rsi - 2] [all...] |
/external/chromium_org/third_party/openssl/openssl/crypto/bn/asm/ |
x86_64-mont5.S | 692 movdqu (%rsi),%xmm1 694 movdqu %xmm1,(%rdi) 698 movdqu 16(%rsi,%r14,1),%xmm2 699 movdqu 32(%rsi,%r14,1),%xmm1 701 movdqu %xmm2,16(%rdi,%r14,1) 703 movdqu %xmm1,32(%rdi,%r14,1) 709 movdqu 16(%rsi,%r14,1),%xmm2 711 movdqu %xmm2,16(%rdi,%r14,1)
|
modexp512-x86_64.S | 1490 movdqu 0(%rsi),%xmm0 1491 movdqu 16(%rsi),%xmm1 1492 movdqu 32(%rsi),%xmm2 1493 movdqu 48(%rsi),%xmm3 1504 movdqu 0(%rdx),%xmm0 1505 movdqu 16(%rdx),%xmm1 1506 movdqu 32(%rdx),%xmm2 1507 movdqu 48(%rdx),%xmm3 1697 movdqu 0(%rdx),%xmm0 1698 movdqu 16(%rdx),%xmm [all...] |
modexp512-x86_64.pl | 1133 movdqu (+16*0)(%rsi), %xmm0 1134 movdqu (+16*1)(%rsi), %xmm1 1135 movdqu (+16*2)(%rsi), %xmm2 1136 movdqu (+16*3)(%rsi), %xmm3 1147 movdqu (+16*0)(%rdx), %xmm0 1148 movdqu (+16*1)(%rdx), %xmm1 1149 movdqu (+16*2)(%rdx), %xmm2 1150 movdqu (+16*3)(%rdx), %xmm3 1298 movdqu (+16*0)(%rdx), %xmm0 1299 movdqu (+16*1)(%rdx), %xmm [all...] |
x86_64-gf2m.S | 226 movdqu %xmm2,0(%rdi) 227 movdqu %xmm0,16(%rdi)
|
x86_64-mont5.pl | 787 movdqu ($ap),%xmm1 789 movdqu %xmm1,($rp) 793 movdqu 16($ap,$i),%xmm2 794 movdqu 32($ap,$i),%xmm1 796 movdqu %xmm2,16($rp,$i) 798 movdqu %xmm1,32($rp,$i) 804 movdqu 16($ap,$i),%xmm2 806 movdqu %xmm2,16($rp,$i)
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
ssim_opt.asm | 90 movdqu xmm5, [rsi] 91 movdqu xmm6, [rdi]
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_ssim_opt.asm | 90 movdqu xmm5, [rsi] 91 movdqu xmm6, [rdi]
|
/external/openssl/crypto/bn/asm/ |
x86_64-mont5.S | 692 movdqu (%rsi),%xmm1 694 movdqu %xmm1,(%rdi) 698 movdqu 16(%rsi,%r14,1),%xmm2 699 movdqu 32(%rsi,%r14,1),%xmm1 701 movdqu %xmm2,16(%rdi,%r14,1) 703 movdqu %xmm1,32(%rdi,%r14,1) 709 movdqu 16(%rsi,%r14,1),%xmm2 711 movdqu %xmm2,16(%rdi,%r14,1)
|
modexp512-x86_64.S | 1490 movdqu 0(%rsi),%xmm0 1491 movdqu 16(%rsi),%xmm1 1492 movdqu 32(%rsi),%xmm2 1493 movdqu 48(%rsi),%xmm3 1504 movdqu 0(%rdx),%xmm0 1505 movdqu 16(%rdx),%xmm1 1506 movdqu 32(%rdx),%xmm2 1507 movdqu 48(%rdx),%xmm3 1697 movdqu 0(%rdx),%xmm0 1698 movdqu 16(%rdx),%xmm [all...] |
modexp512-x86_64.pl | 1133 movdqu (+16*0)(%rsi), %xmm0 1134 movdqu (+16*1)(%rsi), %xmm1 1135 movdqu (+16*2)(%rsi), %xmm2 1136 movdqu (+16*3)(%rsi), %xmm3 1147 movdqu (+16*0)(%rdx), %xmm0 1148 movdqu (+16*1)(%rdx), %xmm1 1149 movdqu (+16*2)(%rdx), %xmm2 1150 movdqu (+16*3)(%rdx), %xmm3 1298 movdqu (+16*0)(%rdx), %xmm0 1299 movdqu (+16*1)(%rdx), %xmm [all...] |
x86_64-gf2m.S | 226 movdqu %xmm2,0(%rdi) 227 movdqu %xmm0,16(%rdi)
|
x86_64-mont5.pl | 787 movdqu ($ap),%xmm1 789 movdqu %xmm1,($rp) 793 movdqu 16($ap,$i),%xmm2 794 movdqu 32($ap,$i),%xmm1 796 movdqu %xmm2,16($rp,$i) 798 movdqu %xmm1,32($rp,$i) 804 movdqu 16($ap,$i),%xmm2 806 movdqu %xmm2,16($rp,$i)
|
/external/stressapptest/src/ |
adler32memcpy.cc | 287 "movdqu 48(" rAX "), %%xmm6;" 297 "movdqu 4(" rSI "), %%xmm1;" // Be careful to use unaligned move here. 299 "movdqu 20(" rSI "), %%xmm3;" 301 "movdqu 36(" rSI "), %%xmm5;" 360 "movdqa 0(" rSI "), %%xmm5;" // Accomplish movdqu 4(%rSI) without
|
/external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/ |
ssewidth.asm | 220 movdqu xmm1, xmm2 label 221 movdqu xmm1, dqword [rbx] label 222 movdqu dqword [rbx], xmm2 label
|
/external/libyuv/files/source/ |
scale.cc | 288 movdqu xmm0, [eax] 289 movdqu xmm1, [eax + 16] 295 movdqu [edx], xmm0 local 319 movdqu xmm0, [eax] 320 movdqu xmm1, [eax + 16] 321 movdqu xmm2, [eax + esi] 322 movdqu xmm3, [eax + esi + 16] 338 movdqu [edx], xmm0 local 551 // Note that movdqa+palign may be better than movdqu. 600 // Note that movdqa+palign may be better than movdqu [all...] |
/external/llvm/test/CodeGen/X86/ |
fast-isel-store.ll | 21 ; CHECK: movdqu
|
sse-align-12.ll | 4 ; CHECK: movdqu
|
/external/chromium_org/third_party/openssl/openssl/crypto/rc4/asm/ |
rc4-md5-x86_64.pl | 221 $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); 261 $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); 301 $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15); 340 $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15); 388 #rc4# movdqu %xmm2,($out,$in0) # write RC4 output 389 #rc4# movdqu %xmm3,16($out,$in0) 390 #rc4# movdqu %xmm4,32($out,$in0) 391 #rc4# movdqu %xmm5,48($out,$in0)
|
rc4-md5-x86_64.S | 324 movdqu (%r13),%xmm2 618 movdqu 16(%r13),%xmm3 902 movdqu 32(%r13),%xmm4 1195 movdqu 48(%r13),%xmm5 1230 movdqu %xmm2,(%r14,%r13,1) 1231 movdqu %xmm3,16(%r14,%r13,1) 1232 movdqu %xmm4,32(%r14,%r13,1) 1233 movdqu %xmm5,48(%r14,%r13,1)
|
/external/openssl/crypto/rc4/asm/ |
rc4-md5-x86_64.pl | 221 $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); 261 $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); 301 $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15); 340 $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15); 388 #rc4# movdqu %xmm2,($out,$in0) # write RC4 output 389 #rc4# movdqu %xmm3,16($out,$in0) 390 #rc4# movdqu %xmm4,32($out,$in0) 391 #rc4# movdqu %xmm5,48($out,$in0)
|
rc4-md5-x86_64.S | 324 movdqu (%r13),%xmm2 618 movdqu 16(%r13),%xmm3 902 movdqu 32(%r13),%xmm4 1195 movdqu 48(%r13),%xmm5 1230 movdqu %xmm2,(%r14,%r13,1) 1231 movdqu %xmm3,16(%r14,%r13,1) 1232 movdqu %xmm4,32(%r14,%r13,1) 1233 movdqu %xmm5,48(%r14,%r13,1)
|
/bionic/libc/arch-x86/string/ |
ssse3-wcscpy-atom.S | 114 movdqu (%ecx), %xmm1 115 movdqu %xmm1, (%edx) 597 movdqu (%ecx), %xmm0 598 movdqu %xmm0, (%edx) 645 movdqu (%ecx), %xmm0 646 movdqu %xmm0, (%edx)
|
/external/libvpx/libvpx/vp9/common/x86/ |
vp9_postproc_sse2.asm | 397 movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2] 400 movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2] 402 movdqu xmm4, [sym(vp9_rv) + rcx*2] 661 movdqu xmm1,[rsi+rax] ; get the source 667 movdqu xmm2,[rdi+rax] ; get the noise for this line 669 movdqu [rsi+rax],xmm1 ; store the result
|
/external/valgrind/main/exp-bbv/tests/amd64-linux/ |
rep_prefix.S | 22 movdqu %xmm1,%xmm2 23 movdqu %xmm2,%xmm1
|