/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
sad_sse3.asm | 169 movdqa xmm0, XMMWORD PTR [%2] 178 movdqa xmm0, XMMWORD PTR [%2] 191 movdqa xmm0, XMMWORD PTR [%2+%4] 263 movdqa xmm0, XMMWORD PTR [%2] 274 movdqa xmm0, XMMWORD PTR [%2] 291 movdqa xmm0, XMMWORD PTR [%2+%7] 601 movdqa xmm0, XMMWORD PTR [src_ptr] 603 movdqa xmm2, XMMWORD PTR [src_ptr+src_stride] 609 movdqa xmm4, XMMWORD PTR [src_ptr] 611 movdqa xmm6, XMMWORD PTR [src_ptr+src_stride [all...] |
/external/llvm/test/CodeGen/X86/ |
vector-zext.ll | 40 ; SSE2-NEXT: movdqa %xmm0, %xmm1 48 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 56 ; SSE41-NEXT: movdqa %xmm0, %xmm1 112 ; SSE2-NEXT: movdqa %xmm0, %xmm1 115 ; SSE2-NEXT: movdqa %xmm1, %xmm0 122 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 125 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 135 ; SSE41-NEXT: movdqa %xmm2, %xmm0 192 ; SSE2-NEXT: movdqa %xmm0, %xmm1 196 ; SSE2-NEXT: movdqa %xmm1, %xmm [all...] |
vector-idiv.ll | 11 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] 26 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] 27 ; SSE-NEXT: movdqa %xmm0, %xmm2 62 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] 66 ; SSE41-NEXT: movdqa %xmm0, %xmm5 87 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757] 88 ; SSE-NEXT: movdqa %xmm0, %xmm3 133 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363] 143 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363] 166 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363 [all...] |
sse-domains.ll | 21 ; CHECK: movdqa 26 ; CHECK: movdqa 27 ; CHECK: movdqa
|
palignr.ll | 23 ; CHECK-NEXT: movdqa %xmm1, %xmm0 39 ; CHECK-NEXT: movdqa %xmm1, %xmm0 87 ; CHECK-NEXT: movdqa %xmm1, %xmm0 104 ; CHECK-NEXT: movdqa %xmm1, %xmm0 121 ; CHECK-NEXT: movdqa %xmm1, %xmm0 142 ; CHECK-NEXT: movdqa %xmm1, %xmm0 147 ; CHECK-YONAH-NEXT: movdqa %xmm1, %xmm0 151 ; CHECK-YONAH-NEXT: movdqa %xmm1, %xmm0
|
pr14161.ll | 8 ; CHECK-NEXT: movdqa (%rdi), %xmm0 27 ; CHECK-NEXT: movdqa (%rdi), %xmm0
|
vec_cmp_uint-128.ll | 231 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] 234 ; SSE2-NEXT: movdqa %xmm1, %xmm2 248 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] 251 ; SSE41-NEXT: movdqa %xmm1, %xmm2 265 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] 295 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] 394 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] 397 ; SSE2-NEXT: movdqa %xmm0, %xmm2 409 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] 412 ; SSE41-NEXT: movdqa %xmm0, %xmm [all...] |
dagcombine-buildvector.ll | 17 ; CHECK: movdqa
|
vec_cmp_sint-128.ll | 231 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] 234 ; SSE2-NEXT: movdqa %xmm1, %xmm2 248 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] 251 ; SSE41-NEXT: movdqa %xmm1, %xmm2 365 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] 368 ; SSE2-NEXT: movdqa %xmm0, %xmm2 380 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] 383 ; SSE41-NEXT: movdqa %xmm0, %xmm2 479 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0] 482 ; SSE2-NEXT: movdqa %xmm0, %xmm [all...] |
unaligned-spill-folding.ll | 42 ; ALIGNED: movdqa {{.*}} # 16-byte Spill 47 ; FORCEALIGNED: movdqa {{.*}} # 16-byte Spill
|
/external/boringssl/src/crypto/aes/asm/ |
aesni-x86_64.pl | 972 movdqa .Lincrement64(%rip),$increment 973 movdqa .Lbswap_mask(%rip),$bswap_mask 979 movdqa $iv,$inout0 1014 movdqa $iv,$inout0 1063 movdqa .Lincrement64(%rip),$increment 1064 movdqa .Lbswap_mask(%rip),$bswap_mask [all...] |
/bionic/libc/arch-x86/atom/string/ |
sse2-memrchr-atom.S | 125 movdqa 48(%ecx), %xmm0 131 movdqa 32(%ecx), %xmm2 137 movdqa 16(%ecx), %xmm3 143 movdqa (%ecx), %xmm4 153 movdqa 48(%ecx), %xmm0 159 movdqa 32(%ecx), %xmm2 165 movdqa 16(%ecx), %xmm3 171 movdqa (%ecx), %xmm3 193 movdqa (%ecx), %xmm0 194 movdqa 16(%ecx), %xmm [all...] |
/external/boringssl/win-x86_64/crypto/sha/ |
sha1-x86_64.asm | 1300 movdqa xmm6,XMMWORD[64+r11] 1301 movdqa xmm9,XMMWORD[((-64))+r11] 1314 movdqa XMMWORD[rsp],xmm0 1316 movdqa XMMWORD[16+rsp],xmm1 1318 movdqa XMMWORD[32+rsp],xmm2 1326 movdqa xmm8,xmm3 1347 movdqa XMMWORD[48+rsp],xmm9 1350 movdqa xmm10,xmm4 1354 movdqa xmm8,xmm4 1364 movdqa xmm9,xmm1 [all...] |
/bionic/libc/arch-x86_64/string/ |
sse4-memcmp-slm.S | 371 movdqa %xmm2, %xmm1 405 movdqa %xmm2, %xmm1 441 movdqa (%rdi), %xmm2 446 movdqa 16(%rdi), %xmm2 451 movdqa 32(%rdi), %xmm2 456 movdqa 48(%rdi), %xmm2 463 movdqa 64(%rdi), %xmm2 468 movdqa 80(%rdi), %xmm2 491 movdqa (%rdi), %xmm2 496 movdqa 16(%rdi), %xmm [all...] |
/bionic/libc/arch-x86/silvermont/string/ |
sse2-memmove-slm.S | 190 movdqa %xmm4, (%edi) 213 movdqa %xmm0, (%edi) 420 movdqa %xmm4, -16(%edi) 421 movdqa %xmm5, -32(%edi) 422 movdqa %xmm6, -48(%edi) 423 movdqa %xmm7, -64(%edi) 444 movdqa %xmm0, -64(%edi) 445 movdqa %xmm1, -48(%edi) 446 movdqa %xmm2, -32(%edi) 447 movdqa %xmm3, -16(%edi [all...] |
/external/boringssl/src/crypto/modes/asm/ |
ghash-x86.pl | 846 &movdqa ($Xhi,$Xi); # 859 &movdqa ($T2,$T1); # 874 &movdqa ($T1,$Xi); # 875 &movdqa ($Xhi,$Xi); 886 &movdqa ($T3,$T2); # 907 &movdqa ($T2,$Xi); # 908 &movdqa ($T1,$Xi); 914 &movdqa ($T1,$Xi); # 921 &movdqa ($T2,$Xi); 945 &movdqa ($T1,$Hkey) [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
row_gcc.cc | 249 "movdqa %%xmm0,%%xmm1 \n" 272 "movdqa %3,%%xmm4 \n" 279 "movdqa %%xmm3,%%xmm2 \n" 310 "movdqa %3,%%xmm4 \n" 317 "movdqa %%xmm3,%%xmm2 \n" 364 "movdqa %%xmm0,%%xmm1 \n" 365 "movdqa %%xmm0,%%xmm2 \n" 375 "movdqa %%xmm1,%%xmm2 \n" 402 "movdqa %%xmm3,%%xmm4 \n" 411 "movdqa %%xmm0,%%xmm1 \n [all...] |
/external/boringssl/src/crypto/sha/asm/ |
sha512-586.pl | 471 &movdqa (@X[1],&QWP(80*8,$K512)); # byte swap mask 475 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load 476 &movdqa (@X[3],&QWP(16*($j%8),$K512)); 477 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask 479 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0] 482 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]); # xfer X[i]+K[i] 490 &movdqa (@X[2],&QWP(16*(($j+1)%4),$frame)); # pre-restore @X[1] 491 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]); # off-load @X[3] 572 &movdqa ($t2,@X[5]); 573 &movdqa (@X[1],$t0); # restore @X[1 [all...] |
sha256-586.pl | 532 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask 547 &movdqa (&QWP(16,"esp"),$CDGH); # offload 549 &movdqa ($Wi,&QWP(0*16-0x80,$K256)); 555 &movdqa (&QWP(0,"esp"),$ABEF); # offload 558 &movdqa ($Wi,&QWP(1*16-0x80,$K256)); 567 &movdqa ($Wi,&QWP(2*16-0x80,$K256)); 572 &movdqa ($TMP,@MSG[3]); 579 &movdqa ($Wi,&QWP(3*16-0x80,$K256)); 584 &movdqa ($TMP,@MSG[0]); 592 &movdqa ($Wi,&QWP($i*16-0x80,$K256)) [all...] |
/external/boringssl/linux-x86_64/crypto/bn/ |
x86_64-mont.S | 594 movdqa %xmm5,(%rsp,%r14,1) 595 movdqa %xmm5,16(%rsp,%r14,1) 680 movdqa %xmm0,0(%r11) 681 movdqa %xmm1,16(%r11) 682 movdqa %xmm3,32(%r11) 683 movdqa %xmm4,48(%r11) 702 movdqa %xmm0,0(%rax) 703 movdqa %xmm0,16(%rax) 704 movdqa %xmm0,32(%rax) 705 movdqa %xmm0,48(%rax [all...] |
/external/boringssl/mac-x86_64/crypto/bn/ |
x86_64-mont.S | 593 movdqa %xmm5,(%rsp,%r14,1) 594 movdqa %xmm5,16(%rsp,%r14,1) 678 movdqa %xmm0,0(%r11) 679 movdqa %xmm1,16(%r11) 680 movdqa %xmm3,32(%r11) 681 movdqa %xmm4,48(%r11) 700 movdqa %xmm0,0(%rax) 701 movdqa %xmm0,16(%rax) 702 movdqa %xmm0,32(%rax) 703 movdqa %xmm0,48(%rax [all...] |
/external/boringssl/win-x86_64/crypto/bn/ |
x86_64-mont.asm | 622 movdqa XMMWORD[r14*1+rsp],xmm5 623 movdqa XMMWORD[16+r14*1+rsp],xmm5 721 movdqa XMMWORD[r11],xmm0 722 movdqa XMMWORD[16+r11],xmm1 723 movdqa XMMWORD[32+r11],xmm3 724 movdqa XMMWORD[48+r11],xmm4 743 movdqa XMMWORD[rax],xmm0 744 movdqa XMMWORD[16+rax],xmm0 745 movdqa XMMWORD[32+rax],xmm0 746 movdqa XMMWORD[48+rax],xmm [all...] |
/external/boringssl/linux-x86_64/crypto/sha/ |
sha1-x86_64.S | 1271 movdqa 64(%r11),%xmm6 1272 movdqa -64(%r11),%xmm9 1285 movdqa %xmm0,0(%rsp) 1287 movdqa %xmm1,16(%rsp) 1289 movdqa %xmm2,32(%rsp) 1297 movdqa %xmm3,%xmm8 1318 movdqa %xmm9,48(%rsp) 1321 movdqa %xmm4,%xmm10 1325 movdqa %xmm4,%xmm8 1335 movdqa %xmm10,%xmm [all...] |
/external/boringssl/mac-x86_64/crypto/sha/ |
sha1-x86_64.S | 1270 movdqa 64(%r11),%xmm6 1271 movdqa -64(%r11),%xmm9 1284 movdqa %xmm0,0(%rsp) 1286 movdqa %xmm1,16(%rsp) 1288 movdqa %xmm2,32(%rsp) 1296 movdqa %xmm3,%xmm8 1317 movdqa %xmm9,48(%rsp) 1320 movdqa %xmm4,%xmm10 1324 movdqa %xmm4,%xmm8 1334 movdqa %xmm10,%xmm [all...] |
/external/libyuv/files/source/ |
convert_from.cc | 254 movdqa xmm0, [eax] // Y 256 movdqa xmm1, xmm0 259 movdqa [edi], xmm0 local 260 movdqa [edi + 16], xmm1 local 293 movdqa xmm0, [eax] // Y 294 movdqa xmm1, xmm2 298 movdqa [edi], xmm1 local 299 movdqa [edi + 16], xmm2 local 323 "movdqa (%0),%%xmm0 \n" 325 "movdqa %%xmm0,%%xmm1 \n [all...] |