/external/chromium_org/third_party/libyuv/source/ |
scale_posix.cc | 208 "movdqu " MEMACCESS(0) ",%%xmm0 \n" 209 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" 214 "movdqu %%xmm0," MEMACCESS(1) " \n" 238 "movdqu " MEMACCESS(0) ",%%xmm0 \n" 239 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" 250 "movdqu %%xmm0," MEMACCESS(1) " \n" 274 "movdqu " MEMACCESS(0) ",%%xmm0 \n" 275 "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" 276 MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm [all...] |
/external/stressapptest/src/ |
adler32memcpy.cc | 287 "movdqu 48(" rAX "), %%xmm6;" 297 "movdqu 4(" rSI "), %%xmm1;" // Be careful to use unaligned move here. 299 "movdqu 20(" rSI "), %%xmm3;" 301 "movdqu 36(" rSI "), %%xmm5;" 360 "movdqa 0(" rSI "), %%xmm5;" // Accomplish movdqu 4(%rSI) without
|
/external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/ |
ssewidth.asm | 220 movdqu xmm1, xmm2 label 221 movdqu xmm1, dqword [rbx] label 222 movdqu dqword [rbx], xmm2 label
|
/external/libyuv/files/source/ |
scale.cc | 288 movdqu xmm0, [eax] 289 movdqu xmm1, [eax + 16] 295 movdqu [edx], xmm0 local 319 movdqu xmm0, [eax] 320 movdqu xmm1, [eax + 16] 321 movdqu xmm2, [eax + esi] 322 movdqu xmm3, [eax + esi + 16] 338 movdqu [edx], xmm0 local 551 // Note that movdqa+palign may be better than movdqu. 600 // Note that movdqa+palign may be better than movdqu [all...] |
/external/llvm/test/CodeGen/X86/ |
fast-isel-store.ll | 21 ; CHECK: movdqu
|
sse-align-12.ll | 4 ; CHECK: movdqu
|
unaligned-spill-folding.ll | 37 ; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/x86/ |
vp9_variance_impl_sse2.asm | 128 movdqu xmm1, XMMWORD PTR [rsi] 129 movdqu xmm2, XMMWORD PTR [rdi]
|
/external/chromium_org/third_party/openssl/openssl/crypto/rc4/asm/ |
rc4-md5-x86_64.pl | 221 $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); 261 $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); 301 $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15); 340 $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15); 388 #rc4# movdqu %xmm2,($out,$in0) # write RC4 output 389 #rc4# movdqu %xmm3,16($out,$in0) 390 #rc4# movdqu %xmm4,32($out,$in0) 391 #rc4# movdqu %xmm5,48($out,$in0)
|
rc4-md5-x86_64.S | 324 movdqu (%r13),%xmm2 618 movdqu 16(%r13),%xmm3 902 movdqu 32(%r13),%xmm4 1195 movdqu 48(%r13),%xmm5 1230 movdqu %xmm2,(%r14,%r13,1) 1231 movdqu %xmm3,16(%r14,%r13,1) 1232 movdqu %xmm4,32(%r14,%r13,1) 1233 movdqu %xmm5,48(%r14,%r13,1)
|
/external/openssl/crypto/rc4/asm/ |
rc4-md5-x86_64.pl | 221 $code.=" movdqu ($in0),%xmm2\n" if ($rc4 && $j==15); 261 $code.=" movdqu 16($in0),%xmm3\n" if ($rc4 && $j==15); 301 $code.=" movdqu 32($in0),%xmm4\n" if ($rc4 && $j==15); 340 $code.=" movdqu 48($in0),%xmm5\n" if ($rc4 && $j==15); 388 #rc4# movdqu %xmm2,($out,$in0) # write RC4 output 389 #rc4# movdqu %xmm3,16($out,$in0) 390 #rc4# movdqu %xmm4,32($out,$in0) 391 #rc4# movdqu %xmm5,48($out,$in0)
|
/bionic/libc/arch-x86/atom/string/ |
ssse3-wcscpy-atom.S | 114 movdqu (%ecx), %xmm1 115 movdqu %xmm1, (%edx) 597 movdqu (%ecx), %xmm0 598 movdqu %xmm0, (%edx) 645 movdqu (%ecx), %xmm0 646 movdqu %xmm0, (%edx)
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/x86/ |
vp9_postproc_sse2.asm | 397 movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2] 400 movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2] 402 movdqu xmm4, [sym(vp9_rv) + rcx*2] 660 movdqu xmm1,[rsi+rax] ; get the source 666 movdqu xmm2,[rdi+rax] ; get the noise for this line 668 movdqu [rsi+rax],xmm1 ; store the result
|
/external/libvpx/libvpx/vp9/common/x86/ |
vp9_postproc_sse2.asm | 397 movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2] 400 movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2] 402 movdqu xmm4, [sym(vp9_rv) + rcx*2] 661 movdqu xmm1,[rsi+rax] ; get the source 667 movdqu xmm2,[rdi+rax] ; get the noise for this line 669 movdqu [rsi+rax],xmm1 ; store the result
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
vp9_postproc_sse2.asm | 397 movdqu xmm4, [rax + rcx*2] ;vp9_rv[rcx*2] 400 movdqu xmm4, [r8 + rcx*2] ;vp9_rv[rcx*2] 402 movdqu xmm4, [sym(vp9_rv) + rcx*2] 661 movdqu xmm1,[rsi+rax] ; get the source 667 movdqu xmm2,[rdi+rax] ; get the noise for this line 669 movdqu [rsi+rax],xmm1 ; store the result
|
/external/openssl/crypto/modes/asm/ |
ghash-x86.S | 920 movdqu (%eax),%xmm2 967 movdqu %xmm2,(%edx) 968 movdqu %xmm0,16(%edx) 982 movdqu (%eax),%xmm0 1021 movdqu %xmm0,(%eax) 1041 movdqu (%eax),%xmm0 1043 movdqu (%edx),%xmm2 1047 movdqu (%esi),%xmm3 1048 movdqu 16(%esi),%xmm6 1087 movdqu (%esi),%xmm [all...] |
/bionic/libc/arch-x86/silvermont/string/ |
sse2-memset-slm.S | 207 movdqu %xmm0, (%edx) 208 movdqu %xmm0, -16(%edx, %ecx) 211 movdqu %xmm0, 16(%edx) 212 movdqu %xmm0, -32(%edx, %ecx) 221 movdqu %xmm0, (%edx)
|
/external/chromium_org/third_party/openssl/openssl/patches/ |
mac_ia32_assembly.patch | [all...] |
/external/chromium_org/third_party/openssl/patches.chromium/ |
0010-mac_ia32_assembly.patch | [all...] |
/external/chromium_org/third_party/openssl/openssl/crypto/bn/asm/ |
modexp512-x86_64.S | 1490 movdqu 0(%rsi),%xmm0 1491 movdqu 16(%rsi),%xmm1 1492 movdqu 32(%rsi),%xmm2 1493 movdqu 48(%rsi),%xmm3 1504 movdqu 0(%rdx),%xmm0 1505 movdqu 16(%rdx),%xmm1 1506 movdqu 32(%rdx),%xmm2 1507 movdqu 48(%rdx),%xmm3 1697 movdqu 0(%rdx),%xmm0 1698 movdqu 16(%rdx),%xmm [all...] |
modexp512-x86_64.pl | 1133 movdqu (+16*0)(%rsi), %xmm0 1134 movdqu (+16*1)(%rsi), %xmm1 1135 movdqu (+16*2)(%rsi), %xmm2 1136 movdqu (+16*3)(%rsi), %xmm3 1147 movdqu (+16*0)(%rdx), %xmm0 1148 movdqu (+16*1)(%rdx), %xmm1 1149 movdqu (+16*2)(%rdx), %xmm2 1150 movdqu (+16*3)(%rdx), %xmm3 1298 movdqu (+16*0)(%rdx), %xmm0 1299 movdqu (+16*1)(%rdx), %xmm [all...] |
x86_64-gf2m.S | 226 movdqu %xmm2,0(%rdi) 227 movdqu %xmm0,16(%rdi)
|
x86_64-mont5.pl | 787 movdqu ($ap),%xmm1 789 movdqu %xmm1,($rp) 793 movdqu 16($ap,$i),%xmm2 794 movdqu 32($ap,$i),%xmm1 796 movdqu %xmm2,16($rp,$i) 798 movdqu %xmm1,32($rp,$i) 804 movdqu 16($ap,$i),%xmm2 806 movdqu %xmm2,16($rp,$i)
|
/external/openssl/crypto/bn/asm/ |
modexp512-x86_64.S | 1490 movdqu 0(%rsi),%xmm0 1491 movdqu 16(%rsi),%xmm1 1492 movdqu 32(%rsi),%xmm2 1493 movdqu 48(%rsi),%xmm3 1504 movdqu 0(%rdx),%xmm0 1505 movdqu 16(%rdx),%xmm1 1506 movdqu 32(%rdx),%xmm2 1507 movdqu 48(%rdx),%xmm3 1697 movdqu 0(%rdx),%xmm0 1698 movdqu 16(%rdx),%xmm [all...] |
modexp512-x86_64.pl | 1133 movdqu (+16*0)(%rsi), %xmm0 1134 movdqu (+16*1)(%rsi), %xmm1 1135 movdqu (+16*2)(%rsi), %xmm2 1136 movdqu (+16*3)(%rsi), %xmm3 1147 movdqu (+16*0)(%rdx), %xmm0 1148 movdqu (+16*1)(%rdx), %xmm1 1149 movdqu (+16*2)(%rdx), %xmm2 1150 movdqu (+16*3)(%rdx), %xmm3 1298 movdqu (+16*0)(%rdx), %xmm0 1299 movdqu (+16*1)(%rdx), %xmm [all...] |